ThinklySEO / modules /keywords.py
yashgori20's picture
sdsswfsfv
9b4ad2b
"""
Keywords Rankings Module for SEO Report Generator
Implements PRD requirements with Competitors Ranking Keywords API and Google Keyword Insight API
"""
import os
import requests
import json
import time
import hashlib
from typing import Dict, Any, List, Optional, Tuple
from urllib.parse import urlparse
from datetime import datetime, timedelta
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor, as_completed
from utils import safe_pct, as_int
@dataclass
class ModuleResult:
"""Standard result object for SEO modules"""
success: bool
data: Dict[str, Any]
error: str = None
class KeywordsModule:
def __init__(self):
# API Configuration
self.rapidapi_key = os.getenv('RAPIDAPI_KEY')
# RapidAPI endpoints
self.enrichment_api_host = "google-keyword-insight1.p.rapidapi.com"
self.similarweb_url = "https://similarweb-traffic.p.rapidapi.com/traffic"
# API priority order (tries in this order)
self.api_sources = [
{'name': 'SimilarWeb', 'available': bool(self.rapidapi_key)}, # Primary: SimilarWeb Traffic
{'name': 'GoogleInsight', 'available': bool(self.rapidapi_key)}, # Fallback: Google Keyword Insight
]
# Performance Configuration
self.timeout = int(os.getenv('KEYWORD_API_TIMEOUT', 30))
self.max_retries = int(os.getenv('KEYWORD_MAX_RETRIES', 3))
self.pagination_limit = int(os.getenv('KEYWORD_PAGINATION_LIMIT', 1000))
self.enrichment_batch_size = int(os.getenv('ENRICHMENT_BATCH_SIZE', 50))
self.enrichment_cache_ttl = int(os.getenv('ENRICHMENT_CACHE_TTL', 86400))
# Rate limiting
self.primary_api_calls = 0
self.enrichment_api_calls = 0
self.last_primary_call = 0
self.last_enrichment_call = 0
# In-memory cache for enrichment data
self.enrichment_cache = {}
self.cache_timestamps = {}
def analyze(self, url: str, competitor_domains: List[str] = None, quick_scan: bool = False) -> ModuleResult:
"""
Analyze keyword rankings for the given URL and competitors
Args:
url: Target website URL
competitor_domains: List of competitor domains to analyze
quick_scan: If True, limit to 1000 keywords per domain
Returns:
ModuleResult with comprehensive keywords data
"""
start_time = time.time()
try:
domain = self._extract_domain(url)
competitor_domains = competitor_domains or []
# Limit competitors for demo performance
if len(competitor_domains) > 3:
competitor_domains = competitor_domains[:3]
# Call ALL APIs and combine real + mock data
main_domain_data = self._fetch_from_all_apis(domain, quick_scan)
# Fetch competitor data using same ALL APIs approach
competitor_data = {}
for comp_domain in competitor_domains:
comp_result = self._fetch_from_all_apis(comp_domain, quick_scan)
competitor_data[comp_domain] = comp_result['data']
# Process and enrich data
result_data = self._process_keywords_data(
main_domain_data['data'],
competitor_data,
domain,
competitor_domains
)
# Add metadata
processing_time = time.time() - start_time
result_data['meta'] = {
'last_updated': datetime.now().isoformat(),
'processing_time': round(processing_time, 2),
'locale': 'en-US'
}
return ModuleResult(success=True, data=result_data)
except Exception as e:
return ModuleResult(
success=False,
data={},
error=f"Keywords analysis failed: {str(e)}"
)
def _extract_domain(self, url: str) -> str:
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
return urlparse(url).netloc.replace('www.', '')
def _fetch_from_all_apis(self, domain: str, quick_scan: bool) -> Dict[str, Any]:
"""Call ALL APIs and combine real data + mock data for failures"""
api_results = {}
failed_apis = []
if not self.rapidapi_key:
failed_apis.extend(['SimilarWeb', 'GoogleInsight'])
print("❌ No RAPIDAPI_KEY - using mock data for all keyword APIs")
else:
# Try SimilarWeb
try:
print("🔄 Trying SimilarWeb Traffic API...")
similarweb_result = self._fetch_domain_keywords_similarweb(domain, quick_scan)
if similarweb_result['success']:
api_results['SimilarWeb'] = similarweb_result['data']
print("✅ SimilarWeb Traffic API - SUCCESS")
else:
failed_apis.append('SimilarWeb')
print(f"❌ SimilarWeb Traffic API - FAILED: {similarweb_result.get('error', 'Unknown error')}")
except Exception as e:
failed_apis.append('SimilarWeb')
print(f"❌ SimilarWeb Traffic API - FAILED: {str(e)}")
# Try Google Keyword Insight
try:
print("🔄 Trying Google Keyword Insight API...")
google_result = self._fetch_keywords_enrichment_only(domain, quick_scan)
if google_result['success']:
api_results['GoogleInsight'] = google_result['data']
print("✅ Google Keyword Insight API - SUCCESS")
else:
failed_apis.append('GoogleInsight')
print(f"❌ Google Keyword Insight API - FAILED: {google_result.get('error', 'Unknown error')}")
except Exception as e:
failed_apis.append('GoogleInsight')
print(f"❌ Google Keyword Insight API - FAILED: {str(e)}")
# Combine all successful API data + generate mock for failures
combined_data = self._combine_all_keyword_apis(domain, api_results, failed_apis)
return {
'success': True,
'data': combined_data,
'failed_apis': failed_apis
}
def _combine_all_keyword_apis(self, domain: str, api_results: Dict, failed_apis: List[str]) -> Dict[str, Any]:
"""Combine real API data with mock data for failures"""
# Start with the best available real data
if 'SimilarWeb' in api_results:
base_data = api_results['SimilarWeb']
primary_source = 'SimilarWeb Traffic API'
elif 'GoogleInsight' in api_results:
base_data = api_results['GoogleInsight']
primary_source = 'Google Keyword Insight API'
else:
# All APIs failed - use mock data
base_data = self._generate_mock_domain_data(domain)
primary_source = 'Mock data (all APIs failed)'
# Add error tracking for failed APIs
failed_api_messages = []
for api in failed_apis:
if api == 'SimilarWeb':
failed_api_messages.append("❌ SimilarWeb Traffic API failed - using mock data")
elif api == 'GoogleInsight':
failed_api_messages.append("❌ Google Keyword Insight API failed - using mock data")
# Combine with additional data from other working APIs if available
if len(api_results) > 1:
# If we have multiple API sources working, we can enrich the data
combined_keywords = base_data['keywords']
# Add traffic data from SimilarWeb if available
if 'SimilarWeb' in api_results and 'traffic_data' in api_results['SimilarWeb']:
base_data['traffic_data'] = api_results['SimilarWeb']['traffic_data']
# Mark which parts are real vs mock
base_data['api_status'] = {
'working_apis': list(api_results.keys()),
'failed_apis': failed_apis,
'failed_messages': failed_api_messages,
'primary_source': primary_source
}
return base_data
def _fetch_domain_keywords_multi_api(self, domain: str, quick_scan: bool) -> Dict[str, Any]:
"""Try multiple API sources in order of preference"""
available_apis = [api for api in self.api_sources if api['available']]
if not available_apis:
print("No keyword APIs configured")
return {'success': False, 'error': 'No RAPIDAPI_KEY configured'}
for api_source in available_apis:
try:
print(f"Trying {api_source['name']} for keyword data...")
if api_source['name'] == 'SimilarWeb':
result = self._fetch_domain_keywords_similarweb(domain, quick_scan)
elif api_source['name'] == 'GoogleInsight':
result = self._fetch_keywords_enrichment_only(domain, quick_scan)
else:
continue
# Track which API source was successfully used
if result.get('success'):
self._current_api_source = api_source['name']
print(f"✅ Successfully using {api_source['name']} for keywords")
return result
except Exception as e:
print(f"{api_source['name']} failed: {str(e)}")
continue
print("All APIs failed")
return {'success': False, 'error': 'All keyword APIs failed'}
def _calculate_domain_statistics(self, keywords: List[Dict]) -> Dict[str, Any]:
total_keywords = len(keywords)
# Position distribution
pos_1 = sum(1 for k in keywords if k.get('rank', 100) == 1)
pos_2_3 = sum(1 for k in keywords if 2 <= k.get('rank', 100) <= 3)
pos_4_10 = sum(1 for k in keywords if 4 <= k.get('rank', 100) <= 10)
pos_11_20 = sum(1 for k in keywords if 11 <= k.get('rank', 100) <= 20)
pos_21_50 = sum(1 for k in keywords if 21 <= k.get('rank', 100) <= 50)
# Movement tracking
new_keywords = sum(1 for k in keywords if k.get('previous_rank') is None)
up_keywords = sum(1 for k in keywords if k.get('rank', 100) < k.get('previous_rank', 100))
down_keywords = sum(1 for k in keywords if k.get('rank', 100) > k.get('previous_rank', 100))
# Traffic estimation
estimated_traffic = sum(k.get('estimated_traffic_volume', 0) for k in keywords)
return {
'organic': {
'keywords_in_pos_1': pos_1,
'keywords_in_pos_2_3': pos_2_3,
'keywords_in_pos_4_10': pos_4_10,
'keywords_in_pos_11_20': pos_11_20,
'keywords_in_pos_21_50': pos_21_50,
'total_keywords_count': total_keywords,
'Estimated_traffic_volume': estimated_traffic,
'is_new': new_keywords,
'is_up': up_keywords,
'is_down': down_keywords,
'is_lost': 0
}
}
def _process_keywords_data(self, main_data: Dict, competitor_data: Dict,
domain: str, competitor_domains: List[str]) -> Dict[str, Any]:
stats = main_data['statistics']['organic']
keywords = main_data['keywords']
# Calculate totals
totals = {
'keywords': stats['total_keywords_count'],
'estimated_traffic': stats['Estimated_traffic_volume']
}
# Calculate position distribution (corrected Top-50 logic)
top3 = stats['keywords_in_pos_1'] + stats['keywords_in_pos_2_3']
top10 = top3 + stats['keywords_in_pos_4_10']
p11_20 = stats['keywords_in_pos_11_20']
p21_50 = sum(1 for k in keywords if 21 <= k.get('rank', 100) <= 50)
top50 = top10 + p11_20 + p21_50
distribution = {
'top3': top3,
'top10': top10,
'top50': top50,
'percentages': {
'top3': safe_pct(top3, stats['total_keywords_count']),
'top10': safe_pct(top10, stats['total_keywords_count']),
'top50': safe_pct(top50, stats['total_keywords_count'])
}
}
# Movement tracking
movement = {
'new': stats['is_new'],
'up': stats['is_up'],
'down': stats['is_down'],
'lost': stats['is_lost']
}
# Identify best keywords
best_keywords = self._identify_best_keywords(keywords)
# Identify declining keywords
declining_keywords = self._identify_declining_keywords(keywords)
# Identify worst performing keywords
worst_keywords = self._identify_worst_keywords(keywords)
# Competitor gap analysis
opportunities, competitor_summary = self._analyze_competitor_gaps(
keywords, competitor_data, domain, competitor_domains
)
# Enrich keywords with volume/CPC data
enriched_keywords = self._enrich_keywords_data(keywords)
# Data sources tracking
data_sources = {
'positions': 'Competitors Ranking Keywords API',
'volume': 'Google Keyword Insight API',
'enrichment_rate': self._calculate_enrichment_rate(enriched_keywords)
}
# Set data source label based on what was actually used
if hasattr(self, '_current_api_source'):
if self._current_api_source == 'SimilarWeb':
data_source = 'SimilarWeb Traffic API'
elif self._current_api_source == 'GoogleInsight':
data_source = 'Google Keyword Insight API (rankings estimated)'
else:
data_source = f'{self._current_api_source} API'
else:
data_source = 'Real API data unavailable'
return {
'totals': totals,
'distribution': distribution,
'movement': movement,
'best_keywords': best_keywords,
'declining_keywords': declining_keywords,
'worst_keywords': worst_keywords,
'opportunities': opportunities,
'competitor_summary': competitor_summary,
'data_sources': data_sources,
'data_source': data_source
}
def _identify_best_keywords(self, keywords: List[Dict]) -> List[Dict]:
best_candidates = [
k for k in keywords
if k.get('rank', 100) <= 3 and k.get('estimated_traffic_volume', 0) > 10
]
# Sort by estimated traffic volume
best_candidates.sort(key=lambda x: x.get('estimated_traffic_volume', 0), reverse=True)
return [
{
'keyword': k.get('keyword', ''),
'rank': k.get('rank', 0),
'url': k.get('url', ''),
'volume': k.get('avg_search_volume', 0),
'estimated_traffic': k.get('estimated_traffic_volume', 0),
'trend': self._determine_trend(k)
}
for k in best_candidates[:15]
]
def _identify_declining_keywords(self, keywords: List[Dict]) -> List[Dict]:
declining_candidates = []
for k in keywords:
current_rank = k.get('rank', 100)
previous_rank = k.get('previous_rank', 100)
if current_rank > previous_rank and (current_rank - previous_rank) >= 5:
declining_candidates.append({
'keyword': k.get('keyword', ''),
'rank': current_rank,
'previous_rank': previous_rank,
'rank_delta': current_rank - previous_rank,
'volume': k.get('avg_search_volume', 0)
})
# Sort by rank delta (biggest drops first)
declining_candidates.sort(key=lambda x: x['rank_delta'], reverse=True)
return declining_candidates[:15]
def _analyze_competitor_gaps(self, main_keywords: List[Dict], competitor_data: Dict,
domain: str, competitor_domains: List[str]) -> Tuple[List[Dict], List[Dict]]:
opportunities = []
competitor_summary = []
# Normalize main domain keywords
main_keyword_set = {k.get('keyword', '').lower().strip() for k in main_keywords}
for comp_domain, comp_data in competitor_data.items():
comp_keywords = comp_data.get('keywords', [])
comp_stats = comp_data.get('statistics', {}).get('organic', {})
# Find gaps
gaps = []
for k in comp_keywords:
keyword = k.get('keyword', '').lower().strip()
comp_rank = k.get('rank', 100)
# Keyword where competitor ranks well but main domain doesn't
if keyword not in main_keyword_set and comp_rank <= 20:
gaps.append({
'keyword': k.get('keyword', ''),
'competitor_rank': comp_rank,
'competitor_domain': comp_domain,
'volume': k.get('avg_search_volume', 0),
'difficulty': self._estimate_difficulty(comp_rank, k.get('avg_search_volume', 0))
})
# Calculate opportunity scores
for gap in gaps:
score = self._calculate_opportunity_score(
gap['competitor_rank'],
gap['volume'],
gap['difficulty']
)
gap['priority_score'] = score
# Sort by priority score
gaps.sort(key=lambda x: x['priority_score'], reverse=True)
opportunities.extend(gaps[:20]) # Top 20 per competitor
# Competitor summary
overlapping = len([k for k in comp_keywords if k.get('keyword', '').lower().strip() in main_keyword_set])
competitor_summary.append({
'domain': comp_domain,
'total_keywords': comp_stats.get('total_keywords_count', 0),
'overlapping_keywords': overlapping,
'gaps_identified': len(gaps)
})
# Sort all opportunities by priority score
opportunities.sort(key=lambda x: x['priority_score'], reverse=True)
return opportunities[:50], competitor_summary
def _calculate_opportunity_score(self, competitor_rank: int, search_volume: int, difficulty: int) -> float:
position_ctr = {1: 28, 2: 15, 3: 11, 4: 8, 5: 7, 10: 2, 20: 1}
# Find closest CTR value
ctr_value = 1
for pos, ctr in position_ctr.items():
if competitor_rank <= pos:
ctr_value = ctr
break
traffic_potential = ctr_value * search_volume / 100
competition_factor = max(competitor_rank, 1)
difficulty_factor = max(difficulty, 10) / 100
score = traffic_potential / (competition_factor * difficulty_factor)
return min(round(score, 1), 100)
def _estimate_difficulty(self, rank: int, volume: int) -> int:
# Simple heuristic - in practice, this would come from a keyword difficulty API
if rank <= 3:
return 20 + (volume // 1000) * 5
elif rank <= 10:
return 35 + (volume // 1000) * 3
else:
return 50 + (volume // 1000) * 2
def _enrich_keywords_data(self, keywords: List[Dict]) -> List[Dict]:
# Identify keywords needing enrichment
keywords_to_enrich = [
k for k in keywords
if not k.get('avg_search_volume') or k.get('avg_search_volume', 0) == 0
]
if not keywords_to_enrich:
return keywords
# Batch enrichment
enriched_data = self._batch_enrich_keywords(
[k.get('keyword', '') for k in keywords_to_enrich]
)
# Merge enriched data back
enriched_keywords = keywords.copy()
for i, keyword_data in enumerate(keywords_to_enrich):
keyword = keyword_data.get('keyword', '')
if keyword in enriched_data:
# Find the keyword in the original list and update it
for j, k in enumerate(enriched_keywords):
if k.get('keyword', '') == keyword:
enriched_keywords[j].update(enriched_data[keyword])
break
return enriched_keywords
def _batch_enrich_keywords(self, keywords: List[str]) -> Dict[str, Dict]:
enriched_data = {}
# Process in batches
for i in range(0, len(keywords), self.enrichment_batch_size):
batch = keywords[i:i + self.enrichment_batch_size]
# Check cache first
uncached_keywords = []
for keyword in batch:
cache_key = self._get_cache_key(keyword)
if cache_key in self.enrichment_cache:
cache_age = time.time() - self.cache_timestamps.get(cache_key, 0)
if cache_age < self.enrichment_cache_ttl:
enriched_data[keyword] = self.enrichment_cache[cache_key]
else:
uncached_keywords.append(keyword)
else:
uncached_keywords.append(keyword)
if not uncached_keywords:
continue
# Enrich uncached keywords
try:
self._rate_limit_enrichment_api()
url = "https://google-keyword-insight1.p.rapidapi.com/globalkey/"
headers = {
"x-rapidapi-key": self.rapidapi_key,
"x-rapidapi-host": self.enrichment_api_host
}
for keyword in uncached_keywords:
params = {
"keyword": keyword,
"lang": "en"
}
response = requests.get(url, headers=headers, params=params, timeout=self.timeout)
self.enrichment_api_calls += 1
self.last_enrichment_call = time.time()
if response.status_code == 200:
data = response.json()
if data and isinstance(data, list) and len(data) > 0:
insight = data[0]
enriched_info = {
'avg_search_volume': insight.get('volume', 0),
'cpc_low': insight.get('low_bid', 0),
'cpc_high': insight.get('high_bid', 0),
'competition_level': insight.get('competition_level', 'UNKNOWN'),
'trend': insight.get('trend', 0)
}
enriched_data[keyword] = enriched_info
# Cache the result
cache_key = self._get_cache_key(keyword)
self.enrichment_cache[cache_key] = enriched_info
self.cache_timestamps[cache_key] = time.time()
# Small delay to respect rate limits
time.sleep(0.1)
except Exception as e:
# Continue processing even if enrichment fails
print(f"Enrichment error: {e}")
continue
return enriched_data
def _get_cache_key(self, keyword: str) -> str:
return hashlib.md5(keyword.lower().encode()).hexdigest()
def _calculate_enrichment_rate(self, keywords: List[Dict]) -> float:
enriched = sum(1 for k in keywords if k.get('avg_search_volume', 0) > 0)
total = len(keywords)
return round(enriched / total * 100, 1) if total > 0 else 0
def _determine_trend(self, keyword_data: Dict) -> str:
current_rank = keyword_data.get('rank', 100)
previous_rank = keyword_data.get('previous_rank', 100)
if previous_rank is None:
return 'new'
elif current_rank < previous_rank:
return 'up'
elif current_rank > previous_rank:
return 'down'
else:
return 'stable'
def _rate_limit_enrichment_api(self):
current_time = time.time()
if current_time - self.last_enrichment_call < 0.6:
time.sleep(0.6)
def _identify_worst_keywords(self, keywords: List[Dict]) -> Dict[str, List[Dict]]:
"""Identify worst performing keywords by CTR and position"""
IMP_MIN = 500
CTR_MIN = 1.0
# Filter for keywords with sufficient data
keywords_with_data = [
k for k in keywords
if k.get('estimated_traffic_volume', 0) >= IMP_MIN
]
# Worst by CTR (simulated - high impressions, low traffic suggests low CTR)
worst_by_ctr = []
for k in keywords_with_data:
impressions = k.get('avg_search_volume', 0)
traffic = k.get('estimated_traffic_volume', 0)
if impressions > 0:
estimated_ctr = (traffic / impressions) * 100
if estimated_ctr < CTR_MIN:
worst_by_ctr.append({
'keyword': k.get('keyword', ''),
'rank': k.get('rank', 0),
'impressions': impressions,
'estimated_ctr': round(estimated_ctr, 2),
'volume': impressions
})
# Worst by position
worst_by_position = [
{
'keyword': k.get('keyword', ''),
'rank': k.get('rank', 0),
'impressions': k.get('avg_search_volume', 0),
'volume': k.get('avg_search_volume', 0)
}
for k in keywords_with_data
if k.get('rank', 100) > 30
]
# Sort and limit
worst_by_ctr.sort(key=lambda x: x['estimated_ctr'])
worst_by_position.sort(key=lambda x: x['rank'], reverse=True)
return {
'by_ctr': worst_by_ctr[:20],
'by_position': worst_by_position[:20]
}
def _generate_mock_keywords_data(self, domain: str, competitor_domains: List[str]) -> ModuleResult:
"""Generate realistic mock data when APIs are unavailable"""
mock_data = self._generate_mock_domain_data(domain)
result_data = self._process_keywords_data(
mock_data,
{}, # No competitor data for mock
domain,
[]
)
# Add metadata
result_data['meta'] = {
'last_updated': datetime.now().isoformat(),
'processing_time': 0.5,
'locale': 'en-US'
}
return ModuleResult(success=True, data=result_data)
def _generate_mock_domain_data(self, domain: str) -> Dict[str, Any]:
"""Generate mock domain data with realistic keywords, enriched if possible"""
base_keywords = [
f'{domain.replace(".", " ")} services', f'{domain.replace(".", " ")} reviews',
f'best {domain.replace(".", " ")}', f'{domain.replace(".", " ")} pricing',
f'how to use {domain.replace(".", " ")}', f'{domain.replace(".", " ")} alternatives',
f'{domain.replace(".", " ")} login', f'{domain.replace(".", " ")} features',
f'{domain.replace(".", " ")} support', f'{domain.replace(".", " ")} tutorial'
]
# Try to get real search volumes from enrichment API if available
enriched_volumes = {}
if self.rapidapi_key:
print("Trying to get real search volumes from enrichment API...")
enriched_volumes = self._batch_enrich_keywords(base_keywords[:5]) # Limit to save quota
mock_keywords = []
default_ranks = [5, 12, 23, 8, 35, 18, 2, 15, 42, 28]
default_volumes = [1200, 890, 560, 720, 340, 480, 2100, 650, 290, 410]
for i, keyword in enumerate(base_keywords):
# Use real volume if available, otherwise use default
if keyword in enriched_volumes:
volume = enriched_volumes[keyword].get('avg_search_volume', default_volumes[i])
print(f"✅ Got real volume for '{keyword}': {volume}")
else:
volume = default_volumes[i]
rank = default_ranks[i]
# Estimate traffic based on position and CTR
ctr_by_position = {1: 28, 2: 15, 3: 11, 5: 7, 8: 5, 12: 3, 15: 2, 18: 1.5, 23: 1, 28: 0.8, 35: 0.5, 42: 0.3}
estimated_ctr = ctr_by_position.get(rank, 0.2)
estimated_traffic = int(volume * estimated_ctr / 100)
mock_keywords.append({
'keyword': keyword,
'rank': rank,
'avg_search_volume': volume,
'estimated_traffic_volume': estimated_traffic
})
# Calculate domain statistics
stats = {
'organic': {
'keywords_in_pos_1': 0,
'keywords_in_pos_2_3': 2,
'keywords_in_pos_4_10': 3,
'keywords_in_pos_11_20': 3,
'keywords_in_pos_21_50': 2,
'total_keywords_count': len(mock_keywords),
'Estimated_traffic_volume': sum(k['estimated_traffic_volume'] for k in mock_keywords),
'is_new': 2,
'is_up': 3,
'is_down': 1,
'is_lost': 0
}
}
return {
'domain': domain,
'statistics': stats,
'keywords': mock_keywords
}
def _fetch_keywords_enrichment_only(self, domain: str, quick_scan: bool) -> Dict[str, Any]:
"""Use only the enrichment API when rankings API fails"""
print(f"Using enrichment API only for {domain} (rankings API quota exceeded)")
# Generate basic keyword ideas based on domain
domain_clean = domain.replace('.', ' ')
keyword_ideas = [
f"{domain_clean}", f"{domain_clean} login", f"{domain_clean} pricing",
f"{domain_clean} features", f"{domain_clean} reviews", f"best {domain_clean}",
f"{domain_clean} alternatives", f"how to use {domain_clean}",
f"{domain_clean} tutorial", f"{domain_clean} support"
]
# Get real search volumes from enrichment API
enriched_data = self._batch_enrich_keywords(keyword_ideas)
# Build realistic keywords with search volumes but estimated rankings
keywords = []
estimated_ranks = [2, 1, 8, 12, 15, 25, 18, 35, 28, 45] # Mixed realistic ranks
for i, keyword in enumerate(keyword_ideas):
if keyword in enriched_data:
volume = enriched_data[keyword].get('avg_search_volume', 500)
competition = enriched_data[keyword].get('competition_level', 'MEDIUM')
else:
volume = max(100, 1000 - i * 80) # Decreasing volume
competition = 'MEDIUM'
rank = estimated_ranks[i] if i < len(estimated_ranks) else 30 + i
# Estimate traffic based on rank and volume
ctr_by_position = {1: 28, 2: 15, 3: 11, 8: 5, 12: 3, 15: 2, 18: 1.5, 25: 1, 28: 0.8, 35: 0.5, 45: 0.3}
estimated_ctr = ctr_by_position.get(rank, 0.2)
estimated_traffic = int(volume * estimated_ctr / 100)
keywords.append({
'keyword': keyword,
'rank': rank,
'avg_search_volume': volume,
'estimated_traffic_volume': estimated_traffic,
'competition_level': competition
})
# Calculate domain statistics
top3 = sum(1 for k in keywords if k['rank'] <= 3)
top10 = sum(1 for k in keywords if k['rank'] <= 10)
top50 = sum(1 for k in keywords if k['rank'] <= 50)
stats = {
'organic': {
'keywords_in_pos_1': sum(1 for k in keywords if k['rank'] == 1),
'keywords_in_pos_2_3': sum(1 for k in keywords if 2 <= k['rank'] <= 3),
'keywords_in_pos_4_10': sum(1 for k in keywords if 4 <= k['rank'] <= 10),
'keywords_in_pos_11_20': sum(1 for k in keywords if 11 <= k['rank'] <= 20),
'keywords_in_pos_21_50': sum(1 for k in keywords if 21 <= k['rank'] <= 50),
'total_keywords_count': len(keywords),
'Estimated_traffic_volume': sum(k['estimated_traffic_volume'] for k in keywords),
'is_new': 1,
'is_up': 2,
'is_down': 1,
'is_lost': 0
}
}
return {
'success': True,
'data': {
'domain': domain,
'statistics': stats,
'keywords': keywords
}
}
def _fetch_domain_keywords_similarweb(self, domain: str, quick_scan: bool) -> Dict[str, Any]:
"""Fetch keyword data from SimilarWeb Traffic API"""
try:
headers = {
'x-rapidapi-key': self.rapidapi_key,
'x-rapidapi-host': 'similarweb-traffic.p.rapidapi.com'
}
params = {'domain': domain}
response = requests.get(self.similarweb_url, headers=headers, params=params, timeout=self.timeout)
if response.status_code == 429:
print("SimilarWeb API quota exceeded")
raise Exception("Quota exceeded")
elif response.status_code == 403:
print("SimilarWeb API subscription required")
raise Exception("Not subscribed to SimilarWeb API")
elif response.status_code != 200:
print(f"SimilarWeb API error {response.status_code}: {response.text}")
raise Exception(f"API error {response.status_code}")
data = response.json()
# Extract top keywords from SimilarWeb response
top_keywords = data.get('TopKeywords', [])
if not top_keywords:
raise Exception("No keywords found in SimilarWeb response")
# Transform SimilarWeb data to our format
keywords = []
for i, kw_data in enumerate(top_keywords[:20]): # Limit to top 20
keyword = kw_data.get('Name', '')
volume = kw_data.get('Volume', 0)
estimated_value = kw_data.get('EstimatedValue', 0)
# Estimate ranking based on estimated value (higher value = better ranking)
# Top keywords are likely ranking well for the domain
estimated_rank = min(i + 1, 10) if i < 10 else min(i + 5, 50)
# Calculate estimated traffic from the estimated value
estimated_traffic = int(estimated_value / 10) if estimated_value else 0
keywords.append({
'keyword': keyword,
'rank': estimated_rank,
'avg_search_volume': volume,
'estimated_traffic_volume': estimated_traffic,
'estimated_value': estimated_value
})
# Calculate domain statistics based on SimilarWeb data
total_keywords = len(keywords)
top3 = sum(1 for k in keywords if k['rank'] <= 3)
top10 = sum(1 for k in keywords if k['rank'] <= 10)
top50 = sum(1 for k in keywords if k['rank'] <= 50)
# Get additional traffic metrics from SimilarWeb (note: SimilarWeb API has typo "Engagments")
engagements = data.get('Engagments', {}) # SimilarWeb API typo
visits = int(engagements.get('Visits', 0))
stats = {
'organic': {
'keywords_in_pos_1': sum(1 for k in keywords if k['rank'] == 1),
'keywords_in_pos_2_3': sum(1 for k in keywords if 2 <= k['rank'] <= 3),
'keywords_in_pos_4_10': sum(1 for k in keywords if 4 <= k['rank'] <= 10),
'keywords_in_pos_11_20': sum(1 for k in keywords if 11 <= k['rank'] <= 20),
'keywords_in_pos_21_50': sum(1 for k in keywords if 21 <= k['rank'] <= 50),
'total_keywords_count': total_keywords,
'Estimated_traffic_volume': sum(k['estimated_traffic_volume'] for k in keywords),
'is_new': 0, # SimilarWeb doesn't provide historical comparison
'is_up': 0,
'is_down': 0,
'is_lost': 0
}
}
return {
'success': True,
'data': {
'domain': domain,
'statistics': stats,
'keywords': keywords,
'traffic_data': {
'monthly_visits': visits,
'global_rank': data.get('GlobalRank', {}).get('Rank', 0),
'bounce_rate': engagements.get('BounceRate', 0)
}
}
}
except Exception as e:
return {'success': False, 'error': str(e)}