Spaces:
Sleeping
Sleeping
remove msr
Browse files
app.py
CHANGED
|
@@ -48,6 +48,7 @@ DEBUG_REVIEW_METADATA_CACHE = defaultdict(list)
|
|
| 48 |
|
| 49 |
AGENTS_REPO = "SWE-Arena/swe_agents" # HuggingFace dataset for agent metadata
|
| 50 |
REVIEW_METADATA_REPO = "SWE-Arena/review_metadata" # HuggingFace dataset for review metadata
|
|
|
|
| 51 |
|
| 52 |
LEADERBOARD_COLUMNS = [
|
| 53 |
("Agent Name", "string"),
|
|
@@ -770,17 +771,14 @@ def calculate_monthly_metrics_by_agent():
|
|
| 770 |
}
|
| 771 |
}
|
| 772 |
"""
|
| 773 |
-
# Get current year for loading metadata
|
| 774 |
-
current_year = datetime.now().year
|
| 775 |
-
|
| 776 |
# Load ALL agents from HuggingFace agents repo
|
| 777 |
agents = load_agents_from_hf()
|
| 778 |
|
| 779 |
# Create mapping from agent_identifier to agent_name
|
| 780 |
identifier_to_name = {agent.get('github_identifier'): agent.get('agent_name') for agent in agents if agent.get('github_identifier')}
|
| 781 |
|
| 782 |
-
# Load all review metadata
|
| 783 |
-
all_metadata =
|
| 784 |
|
| 785 |
if not all_metadata:
|
| 786 |
return {'agents': [], 'months': [], 'data': {}}
|
|
@@ -967,27 +965,42 @@ def save_review_metadata_to_hf(metadata_list, agent_identifier):
|
|
| 967 |
return False
|
| 968 |
|
| 969 |
|
| 970 |
-
def
|
| 971 |
"""
|
| 972 |
-
Load
|
| 973 |
-
|
| 974 |
-
In debug mode, loads from in-memory cache if available.
|
| 975 |
|
| 976 |
Structure: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 977 |
|
| 978 |
Returns:
|
| 979 |
List of dictionaries with 'agent_identifier' added to each review metadata.
|
|
|
|
| 980 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 981 |
# In debug mode, check in-memory cache first
|
| 982 |
if DEBUG_MODE and DEBUG_REVIEW_METADATA_CACHE:
|
| 983 |
all_metadata = []
|
| 984 |
for agent_identifier, metadata_list in DEBUG_REVIEW_METADATA_CACHE.items():
|
| 985 |
for review_meta in metadata_list:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 986 |
review_with_agent = review_meta.copy()
|
| 987 |
review_with_agent['agent_identifier'] = agent_identifier
|
| 988 |
all_metadata.append(review_with_agent)
|
| 989 |
if all_metadata:
|
| 990 |
-
print(f"🐛 DEBUG MODE: Loading review metadata from in-memory cache ({len(all_metadata)} reviews)")
|
| 991 |
return all_metadata
|
| 992 |
|
| 993 |
try:
|
|
@@ -997,22 +1010,33 @@ def load_review_metadata_for_year(year):
|
|
| 997 |
# List all files in the repository
|
| 998 |
files = api.list_repo_files(repo_id=REVIEW_METADATA_REPO, repo_type="dataset")
|
| 999 |
|
| 1000 |
-
# Filter for files matching the
|
| 1001 |
-
#
|
| 1002 |
-
|
| 1003 |
-
year_files = []
|
| 1004 |
for f in files:
|
| 1005 |
if f.endswith('.jsonl'):
|
| 1006 |
parts = f.split('/')
|
| 1007 |
if len(parts) == 2: # [agent_identifier]/YYYY.MM.DD.jsonl
|
| 1008 |
filename = parts[1]
|
| 1009 |
-
|
| 1010 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1011 |
|
| 1012 |
-
print(f"📥 Loading review metadata
|
| 1013 |
|
| 1014 |
all_metadata = []
|
| 1015 |
-
for filename in
|
| 1016 |
try:
|
| 1017 |
# Extract agent_identifier from path (first part)
|
| 1018 |
# Format: agent_identifier/YYYY.MM.DD.jsonl
|
|
@@ -1031,20 +1055,32 @@ def load_review_metadata_for_year(year):
|
|
| 1031 |
)
|
| 1032 |
day_metadata = load_jsonl(file_path)
|
| 1033 |
|
| 1034 |
-
# Add agent_identifier
|
|
|
|
| 1035 |
for review_meta in day_metadata:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1036 |
review_meta['agent_identifier'] = agent_identifier
|
|
|
|
|
|
|
| 1037 |
|
| 1038 |
-
|
| 1039 |
-
print(f" ✓ Loaded {len(day_metadata)} reviews from {filename}")
|
| 1040 |
except Exception as e:
|
| 1041 |
print(f" Warning: Could not load {filename}: {str(e)}")
|
| 1042 |
|
| 1043 |
-
print(f"✓ Loaded {len(all_metadata)} total reviews
|
| 1044 |
return all_metadata
|
| 1045 |
|
| 1046 |
except Exception as e:
|
| 1047 |
-
print(f"✗ Error loading review metadata
|
| 1048 |
return []
|
| 1049 |
|
| 1050 |
|
|
@@ -1532,7 +1568,6 @@ def update_all_agents_incremental():
|
|
| 1532 |
Returns dictionary of all agent data with current stats.
|
| 1533 |
"""
|
| 1534 |
token = get_github_token()
|
| 1535 |
-
current_year = datetime.now().year
|
| 1536 |
|
| 1537 |
# Load agent metadata from HuggingFace
|
| 1538 |
agents = load_agents_from_hf()
|
|
@@ -1587,9 +1622,9 @@ def update_all_agents_incremental():
|
|
| 1587 |
else:
|
| 1588 |
print(f" No new reviews to save")
|
| 1589 |
|
| 1590 |
-
# Load ALL metadata
|
| 1591 |
print(f"📊 Calculating statistics from ALL stored metadata (last 6 months)...")
|
| 1592 |
-
all_year_metadata =
|
| 1593 |
|
| 1594 |
# Filter for this specific agent
|
| 1595 |
agent_metadata = [review for review in all_year_metadata if review.get("agent_identifier") == identifier]
|
|
@@ -1624,8 +1659,6 @@ def construct_leaderboard_from_metadata():
|
|
| 1624 |
Returns dictionary of agent stats.
|
| 1625 |
"""
|
| 1626 |
print("📊 Constructing leaderboard from review metadata...")
|
| 1627 |
-
current_year = datetime.now().year
|
| 1628 |
-
|
| 1629 |
# Load agents
|
| 1630 |
agents = load_agents_from_hf()
|
| 1631 |
if not agents:
|
|
@@ -1633,7 +1666,7 @@ def construct_leaderboard_from_metadata():
|
|
| 1633 |
return {}
|
| 1634 |
|
| 1635 |
# Load all review metadata for current year
|
| 1636 |
-
all_metadata =
|
| 1637 |
|
| 1638 |
cache_dict = {}
|
| 1639 |
|
|
|
|
| 48 |
|
| 49 |
AGENTS_REPO = "SWE-Arena/swe_agents" # HuggingFace dataset for agent metadata
|
| 50 |
REVIEW_METADATA_REPO = "SWE-Arena/review_metadata" # HuggingFace dataset for review metadata
|
| 51 |
+
LEADERBOARD_TIME_FRAME_DAYS = 180 # Time frame for leaderboard (past 6 months)
|
| 52 |
|
| 53 |
LEADERBOARD_COLUMNS = [
|
| 54 |
("Agent Name", "string"),
|
|
|
|
| 771 |
}
|
| 772 |
}
|
| 773 |
"""
|
|
|
|
|
|
|
|
|
|
| 774 |
# Load ALL agents from HuggingFace agents repo
|
| 775 |
agents = load_agents_from_hf()
|
| 776 |
|
| 777 |
# Create mapping from agent_identifier to agent_name
|
| 778 |
identifier_to_name = {agent.get('github_identifier'): agent.get('agent_name') for agent in agents if agent.get('github_identifier')}
|
| 779 |
|
| 780 |
+
# Load all review metadata from review_metadata dataset
|
| 781 |
+
all_metadata = load_review_metadata()
|
| 782 |
|
| 783 |
if not all_metadata:
|
| 784 |
return {'agents': [], 'months': [], 'data': {}}
|
|
|
|
| 965 |
return False
|
| 966 |
|
| 967 |
|
| 968 |
+
def load_review_metadata():
|
| 969 |
"""
|
| 970 |
+
Load review metadata from the last LEADERBOARD_TIME_FRAME_DAYS.
|
| 971 |
+
|
| 972 |
+
In debug mode, loads from in-memory cache if available and filters by time frame.
|
| 973 |
|
| 974 |
Structure: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 975 |
|
| 976 |
Returns:
|
| 977 |
List of dictionaries with 'agent_identifier' added to each review metadata.
|
| 978 |
+
Only includes reviews from the last LEADERBOARD_TIME_FRAME_DAYS.
|
| 979 |
"""
|
| 980 |
+
# Calculate cutoff date based on LEADERBOARD_TIME_FRAME_DAYS
|
| 981 |
+
current_time = datetime.now(timezone.utc)
|
| 982 |
+
cutoff_date = current_time - timedelta(days=LEADERBOARD_TIME_FRAME_DAYS)
|
| 983 |
+
|
| 984 |
# In debug mode, check in-memory cache first
|
| 985 |
if DEBUG_MODE and DEBUG_REVIEW_METADATA_CACHE:
|
| 986 |
all_metadata = []
|
| 987 |
for agent_identifier, metadata_list in DEBUG_REVIEW_METADATA_CACHE.items():
|
| 988 |
for review_meta in metadata_list:
|
| 989 |
+
# Filter by time frame
|
| 990 |
+
reviewed_at = review_meta.get('reviewed_at')
|
| 991 |
+
if reviewed_at:
|
| 992 |
+
try:
|
| 993 |
+
dt = datetime.fromisoformat(reviewed_at.replace('Z', '+00:00'))
|
| 994 |
+
if dt < cutoff_date:
|
| 995 |
+
continue # Skip reviews older than time frame
|
| 996 |
+
except Exception:
|
| 997 |
+
pass # Keep reviews with unparseable dates
|
| 998 |
+
|
| 999 |
review_with_agent = review_meta.copy()
|
| 1000 |
review_with_agent['agent_identifier'] = agent_identifier
|
| 1001 |
all_metadata.append(review_with_agent)
|
| 1002 |
if all_metadata:
|
| 1003 |
+
print(f"🐛 DEBUG MODE: Loading review metadata from in-memory cache (last {LEADERBOARD_TIME_FRAME_DAYS} days, {len(all_metadata)} reviews)")
|
| 1004 |
return all_metadata
|
| 1005 |
|
| 1006 |
try:
|
|
|
|
| 1010 |
# List all files in the repository
|
| 1011 |
files = api.list_repo_files(repo_id=REVIEW_METADATA_REPO, repo_type="dataset")
|
| 1012 |
|
| 1013 |
+
# Filter for files matching the pattern: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 1014 |
+
# AND within the time frame (parse date from filename)
|
| 1015 |
+
time_frame_files = []
|
|
|
|
| 1016 |
for f in files:
|
| 1017 |
if f.endswith('.jsonl'):
|
| 1018 |
parts = f.split('/')
|
| 1019 |
if len(parts) == 2: # [agent_identifier]/YYYY.MM.DD.jsonl
|
| 1020 |
filename = parts[1]
|
| 1021 |
+
# Parse date from filename: YYYY.MM.DD.jsonl
|
| 1022 |
+
try:
|
| 1023 |
+
date_part = filename.replace('.jsonl', '') # Get YYYY.MM.DD
|
| 1024 |
+
date_components = date_part.split('.')
|
| 1025 |
+
if len(date_components) == 3:
|
| 1026 |
+
file_year, file_month, file_day = map(int, date_components)
|
| 1027 |
+
file_date = datetime(file_year, file_month, file_day, tzinfo=timezone.utc)
|
| 1028 |
+
|
| 1029 |
+
# Only include files within the time frame
|
| 1030 |
+
if file_date >= cutoff_date:
|
| 1031 |
+
time_frame_files.append(f)
|
| 1032 |
+
except Exception:
|
| 1033 |
+
# If we can't parse the date, skip this file
|
| 1034 |
+
continue
|
| 1035 |
|
| 1036 |
+
print(f"📥 Loading review metadata from last {LEADERBOARD_TIME_FRAME_DAYS} days ({len(time_frame_files)} daily files across all agents)...")
|
| 1037 |
|
| 1038 |
all_metadata = []
|
| 1039 |
+
for filename in time_frame_files:
|
| 1040 |
try:
|
| 1041 |
# Extract agent_identifier from path (first part)
|
| 1042 |
# Format: agent_identifier/YYYY.MM.DD.jsonl
|
|
|
|
| 1055 |
)
|
| 1056 |
day_metadata = load_jsonl(file_path)
|
| 1057 |
|
| 1058 |
+
# Add agent_identifier and filter by time frame (double-check)
|
| 1059 |
+
filtered_count = 0
|
| 1060 |
for review_meta in day_metadata:
|
| 1061 |
+
# Validate review date is within time frame
|
| 1062 |
+
reviewed_at = review_meta.get('reviewed_at')
|
| 1063 |
+
if reviewed_at:
|
| 1064 |
+
try:
|
| 1065 |
+
dt = datetime.fromisoformat(reviewed_at.replace('Z', '+00:00'))
|
| 1066 |
+
if dt < cutoff_date:
|
| 1067 |
+
continue # Skip reviews older than time frame
|
| 1068 |
+
except Exception:
|
| 1069 |
+
pass # Keep reviews with unparseable dates
|
| 1070 |
+
|
| 1071 |
review_meta['agent_identifier'] = agent_identifier
|
| 1072 |
+
all_metadata.append(review_meta)
|
| 1073 |
+
filtered_count += 1
|
| 1074 |
|
| 1075 |
+
print(f" ✓ Loaded {filtered_count} reviews from {filename}")
|
|
|
|
| 1076 |
except Exception as e:
|
| 1077 |
print(f" Warning: Could not load {filename}: {str(e)}")
|
| 1078 |
|
| 1079 |
+
print(f"✓ Loaded {len(all_metadata)} total reviews from last {LEADERBOARD_TIME_FRAME_DAYS} days")
|
| 1080 |
return all_metadata
|
| 1081 |
|
| 1082 |
except Exception as e:
|
| 1083 |
+
print(f"✗ Error loading review metadata from last {LEADERBOARD_TIME_FRAME_DAYS} days: {str(e)}")
|
| 1084 |
return []
|
| 1085 |
|
| 1086 |
|
|
|
|
| 1568 |
Returns dictionary of all agent data with current stats.
|
| 1569 |
"""
|
| 1570 |
token = get_github_token()
|
|
|
|
| 1571 |
|
| 1572 |
# Load agent metadata from HuggingFace
|
| 1573 |
agents = load_agents_from_hf()
|
|
|
|
| 1622 |
else:
|
| 1623 |
print(f" No new reviews to save")
|
| 1624 |
|
| 1625 |
+
# Load ALL metadata to calculate stats (aggregates entire last 6 months)
|
| 1626 |
print(f"📊 Calculating statistics from ALL stored metadata (last 6 months)...")
|
| 1627 |
+
all_year_metadata = load_review_metadata()
|
| 1628 |
|
| 1629 |
# Filter for this specific agent
|
| 1630 |
agent_metadata = [review for review in all_year_metadata if review.get("agent_identifier") == identifier]
|
|
|
|
| 1659 |
Returns dictionary of agent stats.
|
| 1660 |
"""
|
| 1661 |
print("📊 Constructing leaderboard from review metadata...")
|
|
|
|
|
|
|
| 1662 |
# Load agents
|
| 1663 |
agents = load_agents_from_hf()
|
| 1664 |
if not agents:
|
|
|
|
| 1666 |
return {}
|
| 1667 |
|
| 1668 |
# Load all review metadata for current year
|
| 1669 |
+
all_metadata = load_review_metadata()
|
| 1670 |
|
| 1671 |
cache_dict = {}
|
| 1672 |
|
msr.py
DELETED
|
@@ -1,1224 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Standalone miner to fetch PR review metadata and update the leaderboard immediately.
|
| 3 |
-
|
| 4 |
-
This script reuses the same logic and on-disk/HuggingFace formats as app.py, but
|
| 5 |
-
has no UI or scheduler. You can run it once, or run it in a loop for hours.
|
| 6 |
-
|
| 7 |
-
Datasets used:
|
| 8 |
-
- Agents: SWE-Arena/swe_agents
|
| 9 |
-
- Review metadata: SWE-Arena/review_metadata
|
| 10 |
-
|
| 11 |
-
Environment:
|
| 12 |
-
- Requires HF_TOKEN (for HuggingFace uploads)
|
| 13 |
-
- Optional GITHUB_TOKEN (highly recommended to avoid low rate limits)
|
| 14 |
-
- Reads .env if present
|
| 15 |
-
|
| 16 |
-
CLI flags:
|
| 17 |
-
- --debug / --no-debug: Same semantics as app.py (debug limits to 10 PRs/pattern
|
| 18 |
-
and DOES NOT save to HF, mirroring app.py behavior).
|
| 19 |
-
- --loop: Keep running in a loop.
|
| 20 |
-
- --interval-seconds N: Sleep between loops (default 3600 seconds).
|
| 21 |
-
|
| 22 |
-
Note: In production mode (default), data will be saved to HuggingFace datasets.
|
| 23 |
-
"""
|
| 24 |
-
|
| 25 |
-
import argparse
|
| 26 |
-
import json
|
| 27 |
-
import os
|
| 28 |
-
import random
|
| 29 |
-
import sys
|
| 30 |
-
import time
|
| 31 |
-
from collections import defaultdict
|
| 32 |
-
from datetime import datetime, timezone, timedelta
|
| 33 |
-
|
| 34 |
-
import pandas as pd
|
| 35 |
-
import requests
|
| 36 |
-
from dotenv import load_dotenv
|
| 37 |
-
from huggingface_hub import HfApi, hf_hub_download
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
# =============================================================================
|
| 41 |
-
# Environment & CLI
|
| 42 |
-
# =============================================================================
|
| 43 |
-
|
| 44 |
-
load_dotenv()
|
| 45 |
-
|
| 46 |
-
parser = argparse.ArgumentParser(description="Immediate PR review miner for SWE Arena")
|
| 47 |
-
parser.add_argument("--debug", "--DEBUG", action="store_true", help="Enable debug mode (limits PR retrieval to 10 per query; does NOT save to HF)")
|
| 48 |
-
parser.add_argument("--no-debug", "--production", action="store_true", help="Explicitly disable debug mode (force production mode)")
|
| 49 |
-
parser.add_argument("--loop", action="store_true", help="Run in a loop until interrupted")
|
| 50 |
-
parser.add_argument("--interval-seconds", type=int, default=3600, help="Sleep interval between loops in seconds (default: 3600)")
|
| 51 |
-
args = parser.parse_args()
|
| 52 |
-
|
| 53 |
-
# DEBUG MODE priority: 1) flags, 2) env var, 3) default False
|
| 54 |
-
if args.no_debug:
|
| 55 |
-
DEBUG_MODE = False
|
| 56 |
-
elif args.debug:
|
| 57 |
-
DEBUG_MODE = True
|
| 58 |
-
else:
|
| 59 |
-
DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() in ("true", "1", "yes")
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
# =============================================================================
|
| 63 |
-
# Constants (match app.py)
|
| 64 |
-
# =============================================================================
|
| 65 |
-
|
| 66 |
-
DEBUG_REVIEW_METADATA_CACHE = defaultdict(list)
|
| 67 |
-
|
| 68 |
-
AGENTS_REPO = "SWE-Arena/swe_agents"
|
| 69 |
-
REVIEW_METADATA_REPO = "SWE-Arena/review_metadata"
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
# =============================================================================
|
| 73 |
-
# Utilities & I/O (match app.py behavior exactly)
|
| 74 |
-
# =============================================================================
|
| 75 |
-
|
| 76 |
-
def load_jsonl(filename):
|
| 77 |
-
"""Load JSONL file and return list of dictionaries."""
|
| 78 |
-
if not os.path.exists(filename):
|
| 79 |
-
return []
|
| 80 |
-
|
| 81 |
-
data = []
|
| 82 |
-
with open(filename, 'r', encoding='utf-8') as f:
|
| 83 |
-
for line in f:
|
| 84 |
-
line = line.strip()
|
| 85 |
-
if line:
|
| 86 |
-
try:
|
| 87 |
-
entry = json.loads(line)
|
| 88 |
-
data.append(entry)
|
| 89 |
-
except json.JSONDecodeError as e:
|
| 90 |
-
print(f"Warning: Skipping invalid JSON line: {e}")
|
| 91 |
-
return data
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
def save_jsonl(filename, data):
|
| 95 |
-
"""Save list of dictionaries to JSONL file."""
|
| 96 |
-
with open(filename, 'w', encoding='utf-8') as f:
|
| 97 |
-
for item in data:
|
| 98 |
-
f.write(json.dumps(item) + '\n')
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
def cache_to_dict(cache_list):
|
| 102 |
-
return {entry['github_identifier']: entry for entry in cache_list}
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
def dict_to_cache(cache_dict):
|
| 106 |
-
return list(cache_dict.values())
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
def get_github_token():
|
| 110 |
-
token = os.getenv('GITHUB_TOKEN')
|
| 111 |
-
if not token:
|
| 112 |
-
print("Warning: GITHUB_TOKEN not found. API rate limits: 60/hour (authenticated: 5000/hour)")
|
| 113 |
-
return token
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
def get_hf_token():
|
| 117 |
-
token = os.getenv('HF_TOKEN')
|
| 118 |
-
if not token:
|
| 119 |
-
print("Warning: HF_TOKEN not found in environment variables")
|
| 120 |
-
return token
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
def upload_with_retry(api, path_or_fileobj, path_in_repo, repo_id, repo_type, token, max_retries=5):
|
| 124 |
-
"""
|
| 125 |
-
Upload file to HuggingFace with exponential backoff retry logic.
|
| 126 |
-
|
| 127 |
-
Args:
|
| 128 |
-
api: HfApi instance
|
| 129 |
-
path_or_fileobj: Local file path to upload
|
| 130 |
-
path_in_repo: Target path in the repository
|
| 131 |
-
repo_id: Repository ID
|
| 132 |
-
repo_type: Type of repository (e.g., "dataset")
|
| 133 |
-
token: HuggingFace token
|
| 134 |
-
max_retries: Maximum number of retry attempts
|
| 135 |
-
|
| 136 |
-
Returns:
|
| 137 |
-
True if upload succeeded, raises exception if all retries failed
|
| 138 |
-
"""
|
| 139 |
-
delay = 2.0 # Initial delay in seconds
|
| 140 |
-
|
| 141 |
-
for attempt in range(max_retries):
|
| 142 |
-
try:
|
| 143 |
-
api.upload_file(
|
| 144 |
-
path_or_fileobj=path_or_fileobj,
|
| 145 |
-
path_in_repo=path_in_repo,
|
| 146 |
-
repo_id=repo_id,
|
| 147 |
-
repo_type=repo_type,
|
| 148 |
-
token=token
|
| 149 |
-
)
|
| 150 |
-
if attempt > 0:
|
| 151 |
-
print(f" ✓ Upload succeeded on attempt {attempt + 1}/{max_retries}")
|
| 152 |
-
return True
|
| 153 |
-
|
| 154 |
-
except Exception as e:
|
| 155 |
-
if attempt < max_retries - 1:
|
| 156 |
-
wait_time = delay + random.uniform(0, 1.0)
|
| 157 |
-
print(f" ⚠️ Upload failed (attempt {attempt + 1}/{max_retries}): {str(e)}")
|
| 158 |
-
print(f" ⏳ Retrying in {wait_time:.1f} seconds...")
|
| 159 |
-
time.sleep(wait_time)
|
| 160 |
-
delay = min(delay * 2, 60.0) # Exponential backoff, max 60s
|
| 161 |
-
else:
|
| 162 |
-
print(f" ✗ Upload failed after {max_retries} attempts: {str(e)}")
|
| 163 |
-
raise
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
# =============================================================================
|
| 167 |
-
# GitHub API with backoff (same as app.py)
|
| 168 |
-
# =============================================================================
|
| 169 |
-
|
| 170 |
-
def request_with_backoff(method, url, *, headers=None, params=None, json_body=None, data=None, max_retries=10, timeout=30):
|
| 171 |
-
delay = 1.0
|
| 172 |
-
for attempt in range(max_retries):
|
| 173 |
-
try:
|
| 174 |
-
resp = requests.request(
|
| 175 |
-
method,
|
| 176 |
-
url,
|
| 177 |
-
headers=headers or {},
|
| 178 |
-
params=params,
|
| 179 |
-
json=json_body,
|
| 180 |
-
data=data,
|
| 181 |
-
timeout=timeout
|
| 182 |
-
)
|
| 183 |
-
|
| 184 |
-
status = resp.status_code
|
| 185 |
-
|
| 186 |
-
if 200 <= status < 300:
|
| 187 |
-
return resp
|
| 188 |
-
|
| 189 |
-
if status in (403, 429) or 500 <= status < 600:
|
| 190 |
-
wait = None
|
| 191 |
-
retry_after = resp.headers.get('Retry-After') or resp.headers.get('retry-after')
|
| 192 |
-
if retry_after:
|
| 193 |
-
try:
|
| 194 |
-
wait = float(retry_after)
|
| 195 |
-
except Exception:
|
| 196 |
-
wait = None
|
| 197 |
-
if wait is None and status in (403, 429):
|
| 198 |
-
reset_hdr = resp.headers.get('X-RateLimit-Reset') or resp.headers.get('x-ratelimit-reset')
|
| 199 |
-
if reset_hdr:
|
| 200 |
-
try:
|
| 201 |
-
reset_ts = int(float(reset_hdr))
|
| 202 |
-
wait = max(reset_ts - time.time() + 2, 1)
|
| 203 |
-
except Exception:
|
| 204 |
-
wait = None
|
| 205 |
-
if wait is None:
|
| 206 |
-
wait = delay + random.uniform(0, 0.5)
|
| 207 |
-
wait = max(1.0, min(wait, 120.0))
|
| 208 |
-
print(f"GitHub API {status}. Backing off {wait:.1f}s (attempt {attempt + 1}/{max_retries})...")
|
| 209 |
-
time.sleep(wait)
|
| 210 |
-
delay = min(delay * 2, 60.0)
|
| 211 |
-
continue
|
| 212 |
-
|
| 213 |
-
return resp
|
| 214 |
-
|
| 215 |
-
except requests.RequestException as e:
|
| 216 |
-
wait = delay + random.uniform(0, 0.5)
|
| 217 |
-
wait = max(1.0, min(wait, 60.0))
|
| 218 |
-
print(f"Request error: {e}. Retrying in {wait:.1f}s (attempt {attempt + 1}/{max_retries})...")
|
| 219 |
-
time.sleep(wait)
|
| 220 |
-
delay = min(delay * 2, 60.0)
|
| 221 |
-
|
| 222 |
-
print(f"Exceeded max retries for {url}")
|
| 223 |
-
return None
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
def fetch_reviews_with_time_partition(base_query, start_date, end_date, headers, prs_by_url, debug_limit=None, depth=0):
|
| 227 |
-
"""
|
| 228 |
-
Fetch PR reviews within a specific time range using time-based partitioning.
|
| 229 |
-
Recursively splits the time range if hitting the 1000-result limit.
|
| 230 |
-
Supports splitting by day, hour, minute, and second as needed.
|
| 231 |
-
|
| 232 |
-
Args:
|
| 233 |
-
debug_limit: If set, stops fetching after this many NEW PRs total across all partitions (for testing)
|
| 234 |
-
depth: Current recursion depth (for tracking)
|
| 235 |
-
|
| 236 |
-
Returns the number of PRs found in this time partition.
|
| 237 |
-
"""
|
| 238 |
-
# Calculate time difference
|
| 239 |
-
time_diff = end_date - start_date
|
| 240 |
-
total_seconds = time_diff.total_seconds()
|
| 241 |
-
|
| 242 |
-
# Determine granularity and format dates accordingly
|
| 243 |
-
if total_seconds >= 86400: # >= 1 day
|
| 244 |
-
# Use day granularity (YYYY-MM-DD)
|
| 245 |
-
start_str = start_date.strftime('%Y-%m-%d')
|
| 246 |
-
end_str = end_date.strftime('%Y-%m-%d')
|
| 247 |
-
elif total_seconds >= 3600: # >= 1 hour but < 1 day
|
| 248 |
-
# Use hour granularity (YYYY-MM-DDTHH:MM:SSZ)
|
| 249 |
-
start_str = start_date.strftime('%Y-%m-%dT%H:00:00Z')
|
| 250 |
-
end_str = end_date.strftime('%Y-%m-%dT%H:59:59Z')
|
| 251 |
-
elif total_seconds >= 60: # >= 1 minute but < 1 hour
|
| 252 |
-
# Use minute granularity (YYYY-MM-DDTHH:MM:SSZ)
|
| 253 |
-
start_str = start_date.strftime('%Y-%m-%dT%H:%M:00Z')
|
| 254 |
-
end_str = end_date.strftime('%Y-%m-%dT%H:%M:59Z')
|
| 255 |
-
else: # < 1 minute
|
| 256 |
-
# Use second granularity (YYYY-MM-DDTHH:MM:SSZ)
|
| 257 |
-
start_str = start_date.strftime('%Y-%m-%dT%H:%M:%SZ')
|
| 258 |
-
end_str = end_date.strftime('%Y-%m-%dT%H:%M:%SZ')
|
| 259 |
-
|
| 260 |
-
query = f'{base_query} created:{start_str}..{end_str}'
|
| 261 |
-
|
| 262 |
-
indent = " " + " " * depth
|
| 263 |
-
print(f"{indent}Searching range {start_str} to {end_str}...")
|
| 264 |
-
|
| 265 |
-
page = 1
|
| 266 |
-
per_page = 100
|
| 267 |
-
total_in_partition = 0
|
| 268 |
-
|
| 269 |
-
while True:
|
| 270 |
-
# Check debug limit GLOBALLY (total unique PRs across all partitions)
|
| 271 |
-
if debug_limit is not None and len(prs_by_url) >= debug_limit:
|
| 272 |
-
print(f"{indent} 🐛 DEBUG MODE: Reached global limit of {debug_limit} PRs, stopping...")
|
| 273 |
-
return total_in_partition
|
| 274 |
-
|
| 275 |
-
url = 'https://api.github.com/search/issues' # Use issues endpoint for PR search
|
| 276 |
-
params = {
|
| 277 |
-
'q': query,
|
| 278 |
-
'per_page': per_page,
|
| 279 |
-
'page': page,
|
| 280 |
-
'sort': 'created',
|
| 281 |
-
'order': 'asc'
|
| 282 |
-
}
|
| 283 |
-
headers_with_accept = headers.copy() if headers else {}
|
| 284 |
-
|
| 285 |
-
try:
|
| 286 |
-
response = request_with_backoff('GET', url, headers=headers_with_accept, params=params)
|
| 287 |
-
if response is None:
|
| 288 |
-
print(f"{indent} Error: retries exhausted for range {start_str} to {end_str}")
|
| 289 |
-
return total_in_partition
|
| 290 |
-
|
| 291 |
-
if response.status_code != 200:
|
| 292 |
-
print(f"{indent} Error: HTTP {response.status_code} for range {start_str} to {end_str}")
|
| 293 |
-
return total_in_partition
|
| 294 |
-
|
| 295 |
-
data = response.json()
|
| 296 |
-
total_count = data.get('total_count', 0)
|
| 297 |
-
items = data.get('items', [])
|
| 298 |
-
|
| 299 |
-
if not items:
|
| 300 |
-
break
|
| 301 |
-
|
| 302 |
-
# Add PR reviews to global dict (keyed by PR URL)
|
| 303 |
-
for pr in items:
|
| 304 |
-
pr_url = pr.get('html_url')
|
| 305 |
-
pr_number = pr.get('number')
|
| 306 |
-
# Use PR URL as unique key (more reliable than number alone)
|
| 307 |
-
if pr_url and pr_url not in prs_by_url:
|
| 308 |
-
prs_by_url[pr_url] = pr
|
| 309 |
-
total_in_partition += 1
|
| 310 |
-
|
| 311 |
-
# Check if we hit the 1000-result limit
|
| 312 |
-
if total_count > 1000 and page == 10:
|
| 313 |
-
print(f"{indent} ⚠️ Hit 1000-result limit ({total_count} total). Splitting time range...")
|
| 314 |
-
|
| 315 |
-
# Determine how to split based on time range duration
|
| 316 |
-
if total_seconds < 2: # Less than 2 seconds - can't split further
|
| 317 |
-
print(f"{indent} ⚠️ Cannot split further (range < 2 seconds). Some results may be missing.")
|
| 318 |
-
break
|
| 319 |
-
|
| 320 |
-
elif total_seconds < 120: # Less than 2 minutes - split by seconds
|
| 321 |
-
# Split into 2-4 parts depending on range
|
| 322 |
-
num_splits = min(4, max(2, int(total_seconds / 30)))
|
| 323 |
-
split_duration = time_diff / num_splits
|
| 324 |
-
split_dates = [start_date + split_duration * i for i in range(num_splits + 1)]
|
| 325 |
-
|
| 326 |
-
total_from_splits = 0
|
| 327 |
-
for i in range(num_splits):
|
| 328 |
-
split_start = split_dates[i]
|
| 329 |
-
split_end = split_dates[i + 1]
|
| 330 |
-
# Avoid overlapping ranges (add 1 second to start)
|
| 331 |
-
if i > 0:
|
| 332 |
-
split_start = split_start + timedelta(seconds=1)
|
| 333 |
-
|
| 334 |
-
count = fetch_reviews_with_time_partition(
|
| 335 |
-
base_query, split_start, split_end, headers, prs_by_url, debug_limit, depth + 1
|
| 336 |
-
)
|
| 337 |
-
total_from_splits += count
|
| 338 |
-
|
| 339 |
-
return total_from_splits
|
| 340 |
-
|
| 341 |
-
elif total_seconds < 7200: # Less than 2 hours - split by minutes
|
| 342 |
-
# Split into 2-4 parts
|
| 343 |
-
num_splits = min(4, max(2, int(total_seconds / 1800)))
|
| 344 |
-
split_duration = time_diff / num_splits
|
| 345 |
-
split_dates = [start_date + split_duration * i for i in range(num_splits + 1)]
|
| 346 |
-
|
| 347 |
-
total_from_splits = 0
|
| 348 |
-
for i in range(num_splits):
|
| 349 |
-
split_start = split_dates[i]
|
| 350 |
-
split_end = split_dates[i + 1]
|
| 351 |
-
# Avoid overlapping ranges (add 1 minute to start)
|
| 352 |
-
if i > 0:
|
| 353 |
-
split_start = split_start + timedelta(minutes=1)
|
| 354 |
-
|
| 355 |
-
count = fetch_reviews_with_time_partition(
|
| 356 |
-
base_query, split_start, split_end, headers, prs_by_url, debug_limit, depth + 1
|
| 357 |
-
)
|
| 358 |
-
total_from_splits += count
|
| 359 |
-
|
| 360 |
-
return total_from_splits
|
| 361 |
-
|
| 362 |
-
elif total_seconds < 172800: # Less than 2 days - split by hours
|
| 363 |
-
# Split into 2-4 parts
|
| 364 |
-
num_splits = min(4, max(2, int(total_seconds / 43200)))
|
| 365 |
-
split_duration = time_diff / num_splits
|
| 366 |
-
split_dates = [start_date + split_duration * i for i in range(num_splits + 1)]
|
| 367 |
-
|
| 368 |
-
total_from_splits = 0
|
| 369 |
-
for i in range(num_splits):
|
| 370 |
-
split_start = split_dates[i]
|
| 371 |
-
split_end = split_dates[i + 1]
|
| 372 |
-
# Avoid overlapping ranges (add 1 hour to start)
|
| 373 |
-
if i > 0:
|
| 374 |
-
split_start = split_start + timedelta(hours=1)
|
| 375 |
-
|
| 376 |
-
count = fetch_reviews_with_time_partition(
|
| 377 |
-
base_query, split_start, split_end, headers, prs_by_url, debug_limit, depth + 1
|
| 378 |
-
)
|
| 379 |
-
total_from_splits += count
|
| 380 |
-
|
| 381 |
-
return total_from_splits
|
| 382 |
-
|
| 383 |
-
else: # 2+ days - split by days
|
| 384 |
-
days_diff = time_diff.days
|
| 385 |
-
|
| 386 |
-
# Use aggressive splitting for large ranges or deep recursion
|
| 387 |
-
# Split into 4 parts if range is > 30 days, otherwise split in half
|
| 388 |
-
if days_diff > 30 or depth > 5:
|
| 389 |
-
# Split into 4 parts for more aggressive partitioning
|
| 390 |
-
quarter_diff = time_diff / 4
|
| 391 |
-
split_dates = [
|
| 392 |
-
start_date,
|
| 393 |
-
start_date + quarter_diff,
|
| 394 |
-
start_date + quarter_diff * 2,
|
| 395 |
-
start_date + quarter_diff * 3,
|
| 396 |
-
end_date
|
| 397 |
-
]
|
| 398 |
-
|
| 399 |
-
total_from_splits = 0
|
| 400 |
-
for i in range(4):
|
| 401 |
-
split_start = split_dates[i]
|
| 402 |
-
split_end = split_dates[i + 1]
|
| 403 |
-
# Avoid overlapping ranges
|
| 404 |
-
if i > 0:
|
| 405 |
-
split_start = split_start + timedelta(days=1)
|
| 406 |
-
|
| 407 |
-
count = fetch_reviews_with_time_partition(
|
| 408 |
-
base_query, split_start, split_end, headers, prs_by_url, debug_limit, depth + 1
|
| 409 |
-
)
|
| 410 |
-
total_from_splits += count
|
| 411 |
-
|
| 412 |
-
return total_from_splits
|
| 413 |
-
else:
|
| 414 |
-
# Binary split for smaller ranges
|
| 415 |
-
mid_date = start_date + time_diff / 2
|
| 416 |
-
|
| 417 |
-
# Recursively fetch both halves
|
| 418 |
-
count1 = fetch_reviews_with_time_partition(
|
| 419 |
-
base_query, start_date, mid_date, headers, prs_by_url, debug_limit, depth + 1
|
| 420 |
-
)
|
| 421 |
-
count2 = fetch_reviews_with_time_partition(
|
| 422 |
-
base_query, mid_date + timedelta(days=1), end_date, headers, prs_by_url, debug_limit, depth + 1
|
| 423 |
-
)
|
| 424 |
-
|
| 425 |
-
return count1 + count2
|
| 426 |
-
|
| 427 |
-
# Normal pagination: check if there are more pages
|
| 428 |
-
if len(items) < per_page or page >= 10:
|
| 429 |
-
break
|
| 430 |
-
|
| 431 |
-
page += 1
|
| 432 |
-
time.sleep(0.5) # Courtesy delay between pages
|
| 433 |
-
|
| 434 |
-
except Exception as e:
|
| 435 |
-
print(f"{indent} Error fetching range {start_str} to {end_str}: {str(e)}")
|
| 436 |
-
return total_in_partition
|
| 437 |
-
|
| 438 |
-
if total_in_partition > 0:
|
| 439 |
-
print(f"{indent} ✓ Found {total_in_partition} PRs in range {start_str} to {end_str}")
|
| 440 |
-
|
| 441 |
-
return total_in_partition
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
def extract_review_metadata(pr):
|
| 445 |
-
"""
|
| 446 |
-
Extract minimal PR review metadata for efficient storage.
|
| 447 |
-
Only keeps essential fields: html_url, reviewed_at, pr_status, pr_merged, pr_closed_at.
|
| 448 |
-
Note: agent_name is not stored as it's inferred from the folder structure.
|
| 449 |
-
|
| 450 |
-
PR status:
|
| 451 |
-
- pr_status: 'open', 'merged', or 'closed'
|
| 452 |
-
- pr_merged: True if PR was merged, False otherwise
|
| 453 |
-
- pr_closed_at: Date when PR was closed/merged (if applicable)
|
| 454 |
-
|
| 455 |
-
merged PR = PR that was merged after agent review
|
| 456 |
-
Rejected PR = PR that was closed without merging after agent review
|
| 457 |
-
"""
|
| 458 |
-
# Extract PR metadata from search results
|
| 459 |
-
# The GitHub search API returns PR data from /search/issues endpoint
|
| 460 |
-
pr_url = pr.get('html_url')
|
| 461 |
-
pr_number = pr.get('number')
|
| 462 |
-
created_at = pr.get('created_at')
|
| 463 |
-
closed_at = pr.get('closed_at')
|
| 464 |
-
state = pr.get('state', 'open') # open or closed
|
| 465 |
-
|
| 466 |
-
# Check if PR has pull_request field (indicates it's a PR, not an issue)
|
| 467 |
-
pull_request_data = pr.get('pull_request', {})
|
| 468 |
-
|
| 469 |
-
# For initial extraction, we don't know if merged yet
|
| 470 |
-
# This will be updated by update_pr_status function
|
| 471 |
-
pr_merged = pull_request_data.get('merged_at') is not None if pull_request_data else False
|
| 472 |
-
|
| 473 |
-
# Determine initial status
|
| 474 |
-
if pr_merged:
|
| 475 |
-
status = 'merged'
|
| 476 |
-
elif state == 'closed':
|
| 477 |
-
status = 'closed'
|
| 478 |
-
else:
|
| 479 |
-
status = 'open'
|
| 480 |
-
|
| 481 |
-
return {
|
| 482 |
-
'html_url': pr_url,
|
| 483 |
-
'reviewed_at': created_at, # When the PR was created (agent reviewed it)
|
| 484 |
-
'pr_status': status,
|
| 485 |
-
'pr_merged': pr_merged,
|
| 486 |
-
'pr_closed_at': closed_at,
|
| 487 |
-
'pr_url': pr_url, # Store PR URL for tracking
|
| 488 |
-
'review_id': f"pr_{pr_number}" # Use PR number for deduplication
|
| 489 |
-
}
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
def update_pr_status(metadata_list, headers, token):
|
| 493 |
-
"""
|
| 494 |
-
Update PR status for reviews to get current merged/closed state.
|
| 495 |
-
|
| 496 |
-
For each PR associated with a review, fetch current status from GitHub API.
|
| 497 |
-
Updates metadata_list in-place with PR status information.
|
| 498 |
-
|
| 499 |
-
In DEBUG MODE: Skips status updates to avoid API rate limits.
|
| 500 |
-
|
| 501 |
-
Args:
|
| 502 |
-
metadata_list: List of review metadata dictionaries
|
| 503 |
-
headers: HTTP headers for GitHub API
|
| 504 |
-
token: GitHub API token
|
| 505 |
-
|
| 506 |
-
Returns:
|
| 507 |
-
Updated metadata_list with current PR status
|
| 508 |
-
"""
|
| 509 |
-
if not metadata_list:
|
| 510 |
-
return metadata_list
|
| 511 |
-
|
| 512 |
-
# In debug mode, skip status updates to avoid excessive API calls
|
| 513 |
-
if DEBUG_MODE:
|
| 514 |
-
print(f" 🐛 DEBUG MODE: Skipping PR status updates for {len(metadata_list)} reviews")
|
| 515 |
-
return metadata_list
|
| 516 |
-
|
| 517 |
-
# Track unique PRs to avoid duplicate API calls
|
| 518 |
-
pr_url_to_status = {}
|
| 519 |
-
updated_count = 0
|
| 520 |
-
|
| 521 |
-
for metadata in metadata_list:
|
| 522 |
-
pr_url = metadata.get('pr_url')
|
| 523 |
-
if not pr_url:
|
| 524 |
-
continue
|
| 525 |
-
|
| 526 |
-
# Skip if already fetched for this PR
|
| 527 |
-
if pr_url in pr_url_to_status:
|
| 528 |
-
status_info = pr_url_to_status[pr_url]
|
| 529 |
-
metadata['pr_status'] = status_info['status']
|
| 530 |
-
metadata['pr_merged'] = status_info['merged']
|
| 531 |
-
metadata['pr_closed_at'] = status_info['closed_at']
|
| 532 |
-
continue
|
| 533 |
-
|
| 534 |
-
try:
|
| 535 |
-
# Convert HTML URL to API URL
|
| 536 |
-
# https://github.com/owner/repo/pull/123 -> https://api.github.com/repos/owner/repo/pulls/123
|
| 537 |
-
parts = pr_url.replace('https://github.com/', '').split('/')
|
| 538 |
-
if len(parts) >= 4:
|
| 539 |
-
owner, repo, pull_word, pr_number = parts[0], parts[1], parts[2], parts[3]
|
| 540 |
-
api_url = f'https://api.github.com/repos/{owner}/{repo}/pulls/{pr_number}'
|
| 541 |
-
|
| 542 |
-
response = request_with_backoff('GET', api_url, headers=headers, max_retries=3)
|
| 543 |
-
|
| 544 |
-
if response and response.status_code == 200:
|
| 545 |
-
pr_data = response.json()
|
| 546 |
-
state = pr_data.get('state', 'open')
|
| 547 |
-
merged = pr_data.get('merged', False)
|
| 548 |
-
closed_at = pr_data.get('closed_at')
|
| 549 |
-
merged_at = pr_data.get('merged_at')
|
| 550 |
-
|
| 551 |
-
# Determine final status
|
| 552 |
-
if merged:
|
| 553 |
-
status = 'merged'
|
| 554 |
-
elif state == 'closed':
|
| 555 |
-
status = 'closed'
|
| 556 |
-
else:
|
| 557 |
-
status = 'open'
|
| 558 |
-
|
| 559 |
-
status_info = {
|
| 560 |
-
'status': status,
|
| 561 |
-
'merged': merged,
|
| 562 |
-
'closed_at': closed_at or merged_at
|
| 563 |
-
}
|
| 564 |
-
|
| 565 |
-
# Cache and update
|
| 566 |
-
pr_url_to_status[pr_url] = status_info
|
| 567 |
-
metadata['pr_status'] = status
|
| 568 |
-
metadata['pr_merged'] = merged
|
| 569 |
-
metadata['pr_closed_at'] = closed_at or merged_at
|
| 570 |
-
updated_count += 1
|
| 571 |
-
|
| 572 |
-
# Small delay to avoid rate limiting
|
| 573 |
-
time.sleep(0.1)
|
| 574 |
-
|
| 575 |
-
except Exception as e:
|
| 576 |
-
print(f" Warning: Could not check PR status for {pr_url}: {e}")
|
| 577 |
-
continue
|
| 578 |
-
|
| 579 |
-
if updated_count > 0:
|
| 580 |
-
print(f" ✓ Updated status for {updated_count} unique PRs")
|
| 581 |
-
|
| 582 |
-
return metadata_list
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
def fetch_all_reviews_metadata(identifier, agent_name, token=None, start_from_date=None, year=None, exclude_dates=None):
|
| 586 |
-
"""
|
| 587 |
-
Fetch PR reviews associated with a GitHub user or bot for the past 6 months.
|
| 588 |
-
Returns lightweight metadata instead of full review objects.
|
| 589 |
-
|
| 590 |
-
This function employs time-based partitioning to navigate GitHub's 1000-result limit per query.
|
| 591 |
-
It searches using the query pattern:
|
| 592 |
-
- reviewed-by:{identifier} (PR reviews by the agent)
|
| 593 |
-
|
| 594 |
-
After fetching reviews, it updates PR status to determine if PRs were merged or closed.
|
| 595 |
-
|
| 596 |
-
Args:
|
| 597 |
-
identifier: GitHub username or bot identifier
|
| 598 |
-
agent_name: Human-readable name of the agent for metadata purposes
|
| 599 |
-
token: GitHub API token for authentication
|
| 600 |
-
start_from_date: Only fetch reviews created after this date (for incremental updates)
|
| 601 |
-
year: Year parameter (deprecated, retained for compatibility but not utilized)
|
| 602 |
-
exclude_dates: Set of date objects to exclude from mining (dates that have already been processed)
|
| 603 |
-
|
| 604 |
-
Returns:
|
| 605 |
-
List of dictionaries containing minimal PR review metadata with PR status
|
| 606 |
-
"""
|
| 607 |
-
headers = {'Authorization': f'token {token}'} if token else {}
|
| 608 |
-
|
| 609 |
-
# Debug mode: limit review retrieval for testing
|
| 610 |
-
debug_limit_per_pattern = 10 if DEBUG_MODE else None
|
| 611 |
-
|
| 612 |
-
if DEBUG_MODE:
|
| 613 |
-
print(f"\n🐛 DEBUG MODE ENABLED: Limiting to {debug_limit_per_pattern} PRs per query pattern")
|
| 614 |
-
|
| 615 |
-
# Define query pattern for PR reviews:
|
| 616 |
-
query_patterns = []
|
| 617 |
-
|
| 618 |
-
# Add reviewed-by pattern for PR reviews
|
| 619 |
-
query_patterns.append(f'is:pr reviewed-by:{identifier}')
|
| 620 |
-
|
| 621 |
-
# Use a dict to deduplicate PRs by URL
|
| 622 |
-
prs_by_url = {}
|
| 623 |
-
|
| 624 |
-
# Define time range: past 6 months only (or from start_from_date if specified)
|
| 625 |
-
current_time = datetime.now(timezone.utc)
|
| 626 |
-
six_months_ago = current_time - timedelta(days=180) # ~6 months
|
| 627 |
-
|
| 628 |
-
if start_from_date:
|
| 629 |
-
# Use start_from_date but ensure it's not older than 6 months
|
| 630 |
-
start_date = max(start_from_date, six_months_ago)
|
| 631 |
-
else:
|
| 632 |
-
start_date = six_months_ago
|
| 633 |
-
|
| 634 |
-
# End date is current time
|
| 635 |
-
end_date = current_time
|
| 636 |
-
|
| 637 |
-
for query_pattern in query_patterns:
|
| 638 |
-
print(f"\n🔍 Searching with query: {query_pattern}")
|
| 639 |
-
print(f" Time range: {start_date.strftime('%Y-%m-%d')} to {end_date.strftime('%Y-%m-%d')}")
|
| 640 |
-
|
| 641 |
-
pattern_start_time = time.time()
|
| 642 |
-
initial_count = len(prs_by_url)
|
| 643 |
-
|
| 644 |
-
# Fetch with time partitioning
|
| 645 |
-
reviews_found = fetch_reviews_with_time_partition(
|
| 646 |
-
query_pattern,
|
| 647 |
-
start_date,
|
| 648 |
-
end_date,
|
| 649 |
-
headers,
|
| 650 |
-
prs_by_url,
|
| 651 |
-
debug_limit_per_pattern
|
| 652 |
-
)
|
| 653 |
-
|
| 654 |
-
pattern_duration = time.time() - pattern_start_time
|
| 655 |
-
new_reviews = len(prs_by_url) - initial_count
|
| 656 |
-
|
| 657 |
-
print(f" ✓ Pattern complete: {new_reviews} new PRs found ({reviews_found} total fetched, {len(prs_by_url) - initial_count - (reviews_found - new_reviews)} duplicates)")
|
| 658 |
-
print(f" ⏱️ Time taken: {pattern_duration:.1f} seconds")
|
| 659 |
-
|
| 660 |
-
# Delay between different query patterns (shorter in debug mode)
|
| 661 |
-
time.sleep(0.2 if DEBUG_MODE else 1.0)
|
| 662 |
-
|
| 663 |
-
# Convert to lightweight metadata
|
| 664 |
-
all_prs = list(prs_by_url.values())
|
| 665 |
-
|
| 666 |
-
# Filter out PRs from excluded dates if specified
|
| 667 |
-
if exclude_dates:
|
| 668 |
-
filtered_prs = []
|
| 669 |
-
excluded_count = 0
|
| 670 |
-
for pr in all_prs:
|
| 671 |
-
created_at = pr.get('created_at')
|
| 672 |
-
if created_at:
|
| 673 |
-
try:
|
| 674 |
-
dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
| 675 |
-
pr_date = dt.date()
|
| 676 |
-
if pr_date not in exclude_dates:
|
| 677 |
-
filtered_prs.append(pr)
|
| 678 |
-
else:
|
| 679 |
-
excluded_count += 1
|
| 680 |
-
except Exception:
|
| 681 |
-
filtered_prs.append(pr) # Keep PRs with unparseable dates
|
| 682 |
-
else:
|
| 683 |
-
filtered_prs.append(pr) # Keep PRs without created_at
|
| 684 |
-
|
| 685 |
-
if excluded_count > 0:
|
| 686 |
-
print(f" ⏭️ Skipped {excluded_count} PRs from already-mined dates")
|
| 687 |
-
all_prs = filtered_prs
|
| 688 |
-
|
| 689 |
-
if DEBUG_MODE:
|
| 690 |
-
print(f"\n✅ COMPLETE (DEBUG MODE): Found {len(all_prs)} unique PRs reviewed by {identifier}")
|
| 691 |
-
print(f" Note: In production mode, this would fetch ALL PRs")
|
| 692 |
-
else:
|
| 693 |
-
print(f"\n✅ COMPLETE: Found {len(all_prs)} unique PRs reviewed by {identifier}")
|
| 694 |
-
print(f"📦 Extracting minimal metadata and updating PR status...")
|
| 695 |
-
|
| 696 |
-
# Extract metadata for each PR review
|
| 697 |
-
metadata_list = [extract_review_metadata(pr) for pr in all_prs]
|
| 698 |
-
|
| 699 |
-
# Update PR status to get current merged/closed state
|
| 700 |
-
print(f"🔍 Updating PR status for reviewed PRs...")
|
| 701 |
-
metadata_list = update_pr_status(metadata_list, headers, token)
|
| 702 |
-
|
| 703 |
-
# Calculate memory savings
|
| 704 |
-
original_size = sys.getsizeof(str(all_prs))
|
| 705 |
-
metadata_size = sys.getsizeof(str(metadata_list))
|
| 706 |
-
savings_pct = ((original_size - metadata_size) / original_size * 100) if original_size > 0 else 0
|
| 707 |
-
|
| 708 |
-
print(f"💾 Memory efficiency: {original_size // 1024}KB → {metadata_size // 1024}KB (saved {savings_pct:.1f}%)")
|
| 709 |
-
|
| 710 |
-
return metadata_list
|
| 711 |
-
|
| 712 |
-
|
| 713 |
-
def group_metadata_by_date(metadata_list):
|
| 714 |
-
"""
|
| 715 |
-
Group review metadata by exact date (year.month.day) for efficient daily storage.
|
| 716 |
-
Returns dict: {(year, month, day): [metadata_list]}
|
| 717 |
-
"""
|
| 718 |
-
grouped = defaultdict(list)
|
| 719 |
-
|
| 720 |
-
for review_meta in metadata_list:
|
| 721 |
-
reviewed_at = review_meta.get('reviewed_at')
|
| 722 |
-
if not reviewed_at:
|
| 723 |
-
continue
|
| 724 |
-
|
| 725 |
-
try:
|
| 726 |
-
dt = datetime.fromisoformat(reviewed_at.replace('Z', '+00:00'))
|
| 727 |
-
key = (dt.year, dt.month, dt.day)
|
| 728 |
-
grouped[key].append(review_meta)
|
| 729 |
-
except Exception as e:
|
| 730 |
-
print(f"Warning: Could not parse date '{reviewed_at}': {e}")
|
| 731 |
-
|
| 732 |
-
return dict(grouped)
|
| 733 |
-
|
| 734 |
-
|
| 735 |
-
def save_review_metadata_to_hf(metadata_list, agent_identifier):
|
| 736 |
-
"""
|
| 737 |
-
Save review metadata to HuggingFace dataset, organized by [agent_identifier]/YYYY.MM.DD.jsonl.
|
| 738 |
-
Each file is stored in the agent's folder and named YYYY.MM.DD.jsonl for that day's reviews.
|
| 739 |
-
In debug mode, saves to in-memory cache only.
|
| 740 |
-
|
| 741 |
-
This function APPENDS new metadata and DEDUPLICATES by sha.
|
| 742 |
-
|
| 743 |
-
Args:
|
| 744 |
-
metadata_list: List of review metadata dictionaries
|
| 745 |
-
agent_identifier: GitHub identifier of the agent (used as folder name)
|
| 746 |
-
"""
|
| 747 |
-
# Skip saving to HF in debug mode - use in-memory cache instead
|
| 748 |
-
if DEBUG_MODE:
|
| 749 |
-
global DEBUG_REVIEW_METADATA_CACHE
|
| 750 |
-
# Merge with existing cache, deduplicating by review_id
|
| 751 |
-
existing = {review['review_id']: review for review in DEBUG_REVIEW_METADATA_CACHE[agent_identifier] if review.get('review_id')}
|
| 752 |
-
new = {review['review_id']: review for review in metadata_list if review.get('review_id')}
|
| 753 |
-
existing.update(new)
|
| 754 |
-
DEBUG_REVIEW_METADATA_CACHE[agent_identifier] = list(existing.values())
|
| 755 |
-
print(f"🐛 DEBUG MODE: Saved to in-memory cache only ({len(metadata_list)} reviews) - NOT saved to HuggingFace")
|
| 756 |
-
return True
|
| 757 |
-
|
| 758 |
-
try:
|
| 759 |
-
token = get_hf_token()
|
| 760 |
-
if not token:
|
| 761 |
-
raise Exception("No HuggingFace token found")
|
| 762 |
-
|
| 763 |
-
api = HfApi()
|
| 764 |
-
|
| 765 |
-
# Group by exact date (year, month, day)
|
| 766 |
-
grouped = group_metadata_by_date(metadata_list)
|
| 767 |
-
|
| 768 |
-
for (review_year, month, day), day_metadata in grouped.items():
|
| 769 |
-
# New structure: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 770 |
-
filename = f"{agent_identifier}/{review_year}.{month:02d}.{day:02d}.jsonl"
|
| 771 |
-
local_filename = f"{review_year}.{month:02d}.{day:02d}.jsonl"
|
| 772 |
-
print(f"📤 Uploading {len(day_metadata)} reviews to {filename}...")
|
| 773 |
-
|
| 774 |
-
# Download existing file if it exists
|
| 775 |
-
existing_metadata = []
|
| 776 |
-
try:
|
| 777 |
-
file_path = hf_hub_download(
|
| 778 |
-
repo_id=REVIEW_METADATA_REPO,
|
| 779 |
-
filename=filename,
|
| 780 |
-
repo_type="dataset",
|
| 781 |
-
token=token
|
| 782 |
-
)
|
| 783 |
-
existing_metadata = load_jsonl(file_path)
|
| 784 |
-
print(f" Found {len(existing_metadata)} existing reviews in {filename}")
|
| 785 |
-
except Exception:
|
| 786 |
-
print(f" No existing file found for {filename}, creating new")
|
| 787 |
-
|
| 788 |
-
# Merge and deduplicate by review_id
|
| 789 |
-
existing_by_id = {meta['review_id']: meta for meta in existing_metadata if meta.get('review_id')}
|
| 790 |
-
new_by_id = {meta['review_id']: meta for meta in day_metadata if meta.get('review_id')}
|
| 791 |
-
|
| 792 |
-
# Update with new data (new data overwrites old)
|
| 793 |
-
existing_by_id.update(new_by_id)
|
| 794 |
-
merged_metadata = list(existing_by_id.values())
|
| 795 |
-
|
| 796 |
-
# Save locally
|
| 797 |
-
save_jsonl(local_filename, merged_metadata)
|
| 798 |
-
|
| 799 |
-
try:
|
| 800 |
-
# Upload to HuggingFace with folder path
|
| 801 |
-
upload_with_retry(
|
| 802 |
-
api=api,
|
| 803 |
-
path_or_fileobj=local_filename,
|
| 804 |
-
path_in_repo=filename,
|
| 805 |
-
repo_id=REVIEW_METADATA_REPO,
|
| 806 |
-
repo_type="dataset",
|
| 807 |
-
token=token
|
| 808 |
-
)
|
| 809 |
-
print(f" ✓ Saved {len(merged_metadata)} total reviews to {filename}")
|
| 810 |
-
finally:
|
| 811 |
-
# Always clean up local file, even if upload fails
|
| 812 |
-
if os.path.exists(local_filename):
|
| 813 |
-
os.remove(local_filename)
|
| 814 |
-
|
| 815 |
-
return True
|
| 816 |
-
|
| 817 |
-
except Exception as e:
|
| 818 |
-
print(f"✗ Error saving review metadata: {str(e)}")
|
| 819 |
-
return False
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
def load_agents_from_hf():
|
| 823 |
-
try:
|
| 824 |
-
api = HfApi()
|
| 825 |
-
agents = []
|
| 826 |
-
files = api.list_repo_files(repo_id=AGENTS_REPO, repo_type="dataset")
|
| 827 |
-
json_files = [f for f in files if f.endswith('.json')]
|
| 828 |
-
print(f"Found {len(json_files)} agent files in {AGENTS_REPO}")
|
| 829 |
-
for json_file in json_files:
|
| 830 |
-
try:
|
| 831 |
-
file_path = hf_hub_download(
|
| 832 |
-
repo_id=AGENTS_REPO,
|
| 833 |
-
filename=json_file,
|
| 834 |
-
repo_type="dataset"
|
| 835 |
-
)
|
| 836 |
-
with open(file_path, 'r') as f:
|
| 837 |
-
agent_data = json.load(f)
|
| 838 |
-
agents.append(agent_data)
|
| 839 |
-
except Exception as e:
|
| 840 |
-
print(f"Warning: Could not load {json_file}: {str(e)}")
|
| 841 |
-
continue
|
| 842 |
-
print(f"✓ Loaded {len(agents)} agents from HuggingFace")
|
| 843 |
-
return agents
|
| 844 |
-
except Exception as e:
|
| 845 |
-
print(f"Could not load agents from HuggingFace: {str(e)}")
|
| 846 |
-
return None
|
| 847 |
-
|
| 848 |
-
|
| 849 |
-
def load_review_metadata_for_year(year):
|
| 850 |
-
"""
|
| 851 |
-
Load all review metadata for a specific year from HuggingFace.
|
| 852 |
-
Scans all agent folders and loads daily files matching the year.
|
| 853 |
-
In debug mode, loads from in-memory cache if available.
|
| 854 |
-
|
| 855 |
-
Structure: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 856 |
-
|
| 857 |
-
Returns:
|
| 858 |
-
List of dictionaries with 'agent_identifier' added to each review metadata.
|
| 859 |
-
"""
|
| 860 |
-
# In debug mode, check in-memory cache first
|
| 861 |
-
if DEBUG_MODE and DEBUG_REVIEW_METADATA_CACHE:
|
| 862 |
-
all_metadata = []
|
| 863 |
-
for agent_identifier, metadata_list in DEBUG_REVIEW_METADATA_CACHE.items():
|
| 864 |
-
for review_meta in metadata_list:
|
| 865 |
-
review_with_agent = review_meta.copy()
|
| 866 |
-
review_with_agent['agent_identifier'] = agent_identifier
|
| 867 |
-
all_metadata.append(review_with_agent)
|
| 868 |
-
if all_metadata:
|
| 869 |
-
print(f"🐛 DEBUG MODE: Loading review metadata from in-memory cache ({len(all_metadata)} reviews)")
|
| 870 |
-
return all_metadata
|
| 871 |
-
|
| 872 |
-
try:
|
| 873 |
-
api = HfApi()
|
| 874 |
-
token = get_hf_token()
|
| 875 |
-
|
| 876 |
-
# List all files in the repository
|
| 877 |
-
files = api.list_repo_files(repo_id=REVIEW_METADATA_REPO, repo_type="dataset")
|
| 878 |
-
|
| 879 |
-
# Filter for files matching the year pattern: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 880 |
-
# Extract year from filename
|
| 881 |
-
year_str = str(year)
|
| 882 |
-
year_files = []
|
| 883 |
-
for f in files:
|
| 884 |
-
if f.endswith('.jsonl'):
|
| 885 |
-
parts = f.split('/')
|
| 886 |
-
if len(parts) == 2: # [agent_identifier]/YYYY.MM.DD.jsonl
|
| 887 |
-
filename = parts[1]
|
| 888 |
-
if filename.startswith(year_str + '.'):
|
| 889 |
-
year_files.append(f)
|
| 890 |
-
|
| 891 |
-
print(f"📥 Loading review metadata for {year} ({len(year_files)} daily files across all agents)...")
|
| 892 |
-
|
| 893 |
-
all_metadata = []
|
| 894 |
-
for filename in year_files:
|
| 895 |
-
try:
|
| 896 |
-
# Extract agent_identifier from path (first part)
|
| 897 |
-
# Format: agent_identifier/YYYY.MM.DD.jsonl
|
| 898 |
-
parts = filename.split('/')
|
| 899 |
-
if len(parts) != 2:
|
| 900 |
-
print(f" Warning: Unexpected filename format: {filename}")
|
| 901 |
-
continue
|
| 902 |
-
|
| 903 |
-
agent_identifier = parts[0]
|
| 904 |
-
|
| 905 |
-
file_path = hf_hub_download(
|
| 906 |
-
repo_id=REVIEW_METADATA_REPO,
|
| 907 |
-
filename=filename,
|
| 908 |
-
repo_type="dataset",
|
| 909 |
-
token=token
|
| 910 |
-
)
|
| 911 |
-
day_metadata = load_jsonl(file_path)
|
| 912 |
-
|
| 913 |
-
# Add agent_identifier to each review metadata for processing
|
| 914 |
-
for review_meta in day_metadata:
|
| 915 |
-
review_meta['agent_identifier'] = agent_identifier
|
| 916 |
-
|
| 917 |
-
all_metadata.extend(day_metadata)
|
| 918 |
-
print(f" ✓ Loaded {len(day_metadata)} reviews from {filename}")
|
| 919 |
-
except Exception as e:
|
| 920 |
-
print(f" Warning: Could not load {filename}: {str(e)}")
|
| 921 |
-
|
| 922 |
-
print(f"✓ Loaded {len(all_metadata)} total reviews for {year}")
|
| 923 |
-
return all_metadata
|
| 924 |
-
|
| 925 |
-
except Exception as e:
|
| 926 |
-
print(f"✗ Error loading review metadata for {year}: {str(e)}")
|
| 927 |
-
return []
|
| 928 |
-
|
| 929 |
-
|
| 930 |
-
def get_latest_review_date_for_agent(agent_identifier):
|
| 931 |
-
"""
|
| 932 |
-
Get the latest review creation date for an agent from stored metadata.
|
| 933 |
-
Used for incremental updates - only fetch reviews newer than this date.
|
| 934 |
-
|
| 935 |
-
Structure: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 936 |
-
|
| 937 |
-
Args:
|
| 938 |
-
agent_identifier: GitHub identifier of the agent
|
| 939 |
-
|
| 940 |
-
Returns:
|
| 941 |
-
datetime or None if no existing reviews found.
|
| 942 |
-
"""
|
| 943 |
-
try:
|
| 944 |
-
api = HfApi()
|
| 945 |
-
token = get_hf_token()
|
| 946 |
-
|
| 947 |
-
# List all files in the repository
|
| 948 |
-
files = api.list_repo_files(repo_id=REVIEW_METADATA_REPO, repo_type="dataset")
|
| 949 |
-
|
| 950 |
-
# Filter for files in this agent's folder
|
| 951 |
-
# New structure: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 952 |
-
agent_pattern = f"{agent_identifier}/"
|
| 953 |
-
agent_files = [f for f in files if f.startswith(agent_pattern) and f.endswith('.jsonl')]
|
| 954 |
-
|
| 955 |
-
if not agent_files:
|
| 956 |
-
return None
|
| 957 |
-
|
| 958 |
-
# Find latest review_at across all files
|
| 959 |
-
latest_date = None
|
| 960 |
-
for filename in agent_files:
|
| 961 |
-
try:
|
| 962 |
-
file_path = hf_hub_download(
|
| 963 |
-
repo_id=REVIEW_METADATA_REPO,
|
| 964 |
-
filename=filename,
|
| 965 |
-
repo_type="dataset",
|
| 966 |
-
token=token
|
| 967 |
-
)
|
| 968 |
-
metadata = load_jsonl(file_path)
|
| 969 |
-
|
| 970 |
-
for review_meta in metadata:
|
| 971 |
-
reviewed_at = review_meta.get("reviewed_at")
|
| 972 |
-
if reviewed_at:
|
| 973 |
-
try:
|
| 974 |
-
dt = datetime.fromisoformat(reviewed_at.replace("Z", "+00:00"))
|
| 975 |
-
if latest_date is None or dt > latest_date:
|
| 976 |
-
latest_date = dt
|
| 977 |
-
except Exception:
|
| 978 |
-
continue
|
| 979 |
-
except Exception:
|
| 980 |
-
continue
|
| 981 |
-
|
| 982 |
-
return latest_date
|
| 983 |
-
|
| 984 |
-
except Exception:
|
| 985 |
-
return None
|
| 986 |
-
|
| 987 |
-
|
| 988 |
-
def get_already_mined_dates(agent_identifier, n_months=6):
|
| 989 |
-
"""
|
| 990 |
-
Get set of dates that have already been mined for an agent.
|
| 991 |
-
|
| 992 |
-
Args:
|
| 993 |
-
agent_identifier: GitHub identifier of the agent
|
| 994 |
-
n_months: Number of months to look back (default: 6)
|
| 995 |
-
|
| 996 |
-
Returns:
|
| 997 |
-
Set of date objects (datetime.date) that already have data files
|
| 998 |
-
"""
|
| 999 |
-
try:
|
| 1000 |
-
api = HfApi()
|
| 1001 |
-
|
| 1002 |
-
# Calculate date range
|
| 1003 |
-
today = datetime.now(timezone.utc)
|
| 1004 |
-
n_months_ago = today - timedelta(days=30 * n_months)
|
| 1005 |
-
|
| 1006 |
-
# List all files in the repository
|
| 1007 |
-
files = api.list_repo_files(repo_id=REVIEW_METADATA_REPO, repo_type="dataset")
|
| 1008 |
-
|
| 1009 |
-
# Filter for files in this agent's folder
|
| 1010 |
-
agent_pattern = f"{agent_identifier}/"
|
| 1011 |
-
agent_files = [f for f in files if f.startswith(agent_pattern) and f.endswith('.jsonl')]
|
| 1012 |
-
|
| 1013 |
-
mined_dates = set()
|
| 1014 |
-
for filename in agent_files:
|
| 1015 |
-
try:
|
| 1016 |
-
# Extract date from filename: [agent_identifier]/YYYY.MM.DD.jsonl
|
| 1017 |
-
parts = filename.split('/')
|
| 1018 |
-
if len(parts) != 2:
|
| 1019 |
-
continue
|
| 1020 |
-
|
| 1021 |
-
date_part = parts[1].replace('.jsonl', '') # Get YYYY.MM.DD
|
| 1022 |
-
date_components = date_part.split('.')
|
| 1023 |
-
if len(date_components) != 3:
|
| 1024 |
-
continue
|
| 1025 |
-
|
| 1026 |
-
file_year, file_month, file_day = map(int, date_components)
|
| 1027 |
-
file_date = datetime(file_year, file_month, file_day, tzinfo=timezone.utc).date()
|
| 1028 |
-
|
| 1029 |
-
# Only include dates within the last n_months
|
| 1030 |
-
if n_months_ago.date() <= file_date <= today.date():
|
| 1031 |
-
mined_dates.add(file_date)
|
| 1032 |
-
except Exception as e:
|
| 1033 |
-
print(f" Warning: Could not parse date from filename {filename}: {e}")
|
| 1034 |
-
continue
|
| 1035 |
-
|
| 1036 |
-
return mined_dates
|
| 1037 |
-
|
| 1038 |
-
except Exception as e:
|
| 1039 |
-
print(f" Warning: Could not get already-mined dates for {agent_identifier}: {str(e)}")
|
| 1040 |
-
return set()
|
| 1041 |
-
|
| 1042 |
-
|
| 1043 |
-
|
| 1044 |
-
|
| 1045 |
-
def calculate_review_stats_from_metadata(metadata_list):
|
| 1046 |
-
"""
|
| 1047 |
-
Calculate statistics from a list of review metadata (lightweight objects).
|
| 1048 |
-
Works with minimal metadata: html_url, reviewed_at, pr_status, pr_merged, pr_closed_at.
|
| 1049 |
-
|
| 1050 |
-
Returns a dictionary with comprehensive review metrics.
|
| 1051 |
-
|
| 1052 |
-
Acceptance Rate is calculated as:
|
| 1053 |
-
merged PRs / (merged PRs + rejected PRs) * 100
|
| 1054 |
-
|
| 1055 |
-
merged PRs = PRs that were merged (pr_status='merged')
|
| 1056 |
-
Rejected PRs = PRs that were closed without merging (pr_status='closed')
|
| 1057 |
-
Pending PRs = PRs still open (pr_status='open') - excluded from acceptance rate
|
| 1058 |
-
"""
|
| 1059 |
-
total_reviews = len(metadata_list)
|
| 1060 |
-
|
| 1061 |
-
# Count merged PRs (merged)
|
| 1062 |
-
merged_prs = sum(1 for review_meta in metadata_list
|
| 1063 |
-
if review_meta.get('pr_status') == 'merged')
|
| 1064 |
-
|
| 1065 |
-
# Count rejected PRs (closed without merging)
|
| 1066 |
-
rejected_prs = sum(1 for review_meta in metadata_list
|
| 1067 |
-
if review_meta.get('pr_status') == 'closed')
|
| 1068 |
-
|
| 1069 |
-
# Count pending PRs (still open)
|
| 1070 |
-
pending_prs = sum(1 for review_meta in metadata_list
|
| 1071 |
-
if review_meta.get('pr_status') == 'open')
|
| 1072 |
-
|
| 1073 |
-
# Calculate acceptance rate (exclude pending PRs)
|
| 1074 |
-
completed_prs = merged_prs + rejected_prs
|
| 1075 |
-
acceptance_rate = (merged_prs / completed_prs * 100) if completed_prs > 0 else 0
|
| 1076 |
-
|
| 1077 |
-
return {
|
| 1078 |
-
'total_reviews': total_reviews,
|
| 1079 |
-
'merged_prs': merged_prs,
|
| 1080 |
-
'rejected_prs': rejected_prs,
|
| 1081 |
-
'pending_prs': pending_prs,
|
| 1082 |
-
'acceptance_rate': round(acceptance_rate, 2),
|
| 1083 |
-
}
|
| 1084 |
-
|
| 1085 |
-
|
| 1086 |
-
def update_all_agents_incremental():
|
| 1087 |
-
"""
|
| 1088 |
-
Memory-efficient incremental update of review statistics for all agents.
|
| 1089 |
-
|
| 1090 |
-
Strategy:
|
| 1091 |
-
1. For each agent, load existing data from SWE-Arena/review_metadata
|
| 1092 |
-
2. Identify already-mined dates (based on filename: YYYY.MM.DD.jsonl)
|
| 1093 |
-
3. Only fetch reviews from dates that haven't been mined yet (within last 6 months)
|
| 1094 |
-
4. If no data exists at all, mine everything from scratch
|
| 1095 |
-
5. Store minimal metadata (not full review objects) to avoid storage limits
|
| 1096 |
-
6. Construct leaderboard from ALL stored metadata (last 6 months)
|
| 1097 |
-
|
| 1098 |
-
Returns dictionary of all agent data with current stats.
|
| 1099 |
-
"""
|
| 1100 |
-
token = get_github_token()
|
| 1101 |
-
current_year = datetime.now().year
|
| 1102 |
-
|
| 1103 |
-
# Load agent metadata from HuggingFace
|
| 1104 |
-
agents = load_agents_from_hf()
|
| 1105 |
-
if not agents:
|
| 1106 |
-
print("No agents found in HuggingFace dataset")
|
| 1107 |
-
return {}
|
| 1108 |
-
|
| 1109 |
-
cache_dict = {}
|
| 1110 |
-
|
| 1111 |
-
# Update each agent
|
| 1112 |
-
for agent in agents:
|
| 1113 |
-
identifier = agent.get('github_identifier')
|
| 1114 |
-
agent_name = agent.get('agent_name', 'Unknown')
|
| 1115 |
-
|
| 1116 |
-
if not identifier:
|
| 1117 |
-
print(f"Warning: Skipping agent without identifier: {agent}")
|
| 1118 |
-
continue
|
| 1119 |
-
|
| 1120 |
-
try:
|
| 1121 |
-
print(f"\n{'='*80}")
|
| 1122 |
-
print(f"Processing: {agent_name} ({identifier})")
|
| 1123 |
-
print(f"{'='*80}")
|
| 1124 |
-
|
| 1125 |
-
# Get already-mined dates for this agent (last 6 months)
|
| 1126 |
-
already_mined_dates = get_already_mined_dates(identifier, n_months=6)
|
| 1127 |
-
|
| 1128 |
-
if already_mined_dates:
|
| 1129 |
-
print(f"📅 Found {len(already_mined_dates)} already-mined dates")
|
| 1130 |
-
print(f" Skipping these dates and fetching only new data...")
|
| 1131 |
-
# Fetch only reviews from dates not yet mined
|
| 1132 |
-
new_metadata = fetch_all_reviews_metadata(
|
| 1133 |
-
identifier,
|
| 1134 |
-
agent_name,
|
| 1135 |
-
token,
|
| 1136 |
-
start_from_date=None, # Use full 6-month range
|
| 1137 |
-
exclude_dates=already_mined_dates # But exclude already-mined dates
|
| 1138 |
-
)
|
| 1139 |
-
else:
|
| 1140 |
-
print(f"📅 No existing data found. Mining everything from scratch...")
|
| 1141 |
-
# Mine everything from scratch (full 6-month range)
|
| 1142 |
-
new_metadata = fetch_all_reviews_metadata(
|
| 1143 |
-
identifier,
|
| 1144 |
-
agent_name,
|
| 1145 |
-
token,
|
| 1146 |
-
start_from_date=None
|
| 1147 |
-
)
|
| 1148 |
-
|
| 1149 |
-
if new_metadata:
|
| 1150 |
-
# Save new metadata to HuggingFace (organized by agent_identifier/YYYY.MM.DD.jsonl)
|
| 1151 |
-
print(f"💾 Saving {len(new_metadata)} new review records...")
|
| 1152 |
-
save_review_metadata_to_hf(new_metadata, identifier)
|
| 1153 |
-
else:
|
| 1154 |
-
print(f" No new reviews to save")
|
| 1155 |
-
|
| 1156 |
-
# Load ALL metadata for current year to calculate stats (aggregates entire last 6 months)
|
| 1157 |
-
print(f"📊 Calculating statistics from ALL stored metadata (last 6 months)...")
|
| 1158 |
-
all_year_metadata = load_review_metadata_for_year(current_year)
|
| 1159 |
-
|
| 1160 |
-
# Filter for this specific agent
|
| 1161 |
-
agent_metadata = [review for review in all_year_metadata if review.get("agent_identifier") == identifier]
|
| 1162 |
-
|
| 1163 |
-
# Calculate stats from metadata
|
| 1164 |
-
stats = calculate_review_stats_from_metadata(agent_metadata)
|
| 1165 |
-
|
| 1166 |
-
# Merge metadata with stats
|
| 1167 |
-
cache_dict[identifier] = {
|
| 1168 |
-
'agent_name': agent_name,
|
| 1169 |
-
'website': agent.get('website', 'N/A'),
|
| 1170 |
-
'github_identifier': identifier,
|
| 1171 |
-
**stats
|
| 1172 |
-
}
|
| 1173 |
-
|
| 1174 |
-
print(f"✓ Updated {identifier}: {stats['total_reviews']} reviews, {stats['acceptance_rate']}% acceptance rate")
|
| 1175 |
-
|
| 1176 |
-
except Exception as e:
|
| 1177 |
-
print(f"✗ Error updating {identifier}: {str(e)}")
|
| 1178 |
-
import traceback
|
| 1179 |
-
traceback.print_exc()
|
| 1180 |
-
continue
|
| 1181 |
-
|
| 1182 |
-
return cache_dict
|
| 1183 |
-
|
| 1184 |
-
|
| 1185 |
-
def run_once():
|
| 1186 |
-
print("\n🚀 Immediate mining run started")
|
| 1187 |
-
cache_dict = update_all_agents_incremental()
|
| 1188 |
-
if cache_dict:
|
| 1189 |
-
print(f"✓ Updated {len(cache_dict)} agents")
|
| 1190 |
-
print("✅ Immediate mining run completed\n")
|
| 1191 |
-
|
| 1192 |
-
|
| 1193 |
-
def main():
|
| 1194 |
-
if DEBUG_MODE:
|
| 1195 |
-
print("\n" + "="*80)
|
| 1196 |
-
print("🐛 DEBUG MODE ENABLED 🐛")
|
| 1197 |
-
print("="*80)
|
| 1198 |
-
print("PR retrieval is limited to 10 PRs per query pattern per agent")
|
| 1199 |
-
print("Data will NOT be saved to HuggingFace in debug mode.")
|
| 1200 |
-
print("="*80 + "\n")
|
| 1201 |
-
else:
|
| 1202 |
-
print("\n🚀 Starting in PRODUCTION MODE - full review retrieval enabled")
|
| 1203 |
-
print()
|
| 1204 |
-
|
| 1205 |
-
if not args.loop:
|
| 1206 |
-
run_once()
|
| 1207 |
-
return
|
| 1208 |
-
|
| 1209 |
-
print(f"🔁 Loop mode enabled. Interval: {args.interval_seconds} seconds")
|
| 1210 |
-
try:
|
| 1211 |
-
while True:
|
| 1212 |
-
start = time.time()
|
| 1213 |
-
run_once()
|
| 1214 |
-
elapsed = time.time() - start
|
| 1215 |
-
sleep_for = max(0, args.interval_seconds - int(elapsed))
|
| 1216 |
-
if sleep_for > 0:
|
| 1217 |
-
print(f"😴 Sleeping {sleep_for} seconds before next run...")
|
| 1218 |
-
time.sleep(sleep_for)
|
| 1219 |
-
except KeyboardInterrupt:
|
| 1220 |
-
print("\n👋 Loop interrupted by user. Exiting...")
|
| 1221 |
-
|
| 1222 |
-
|
| 1223 |
-
if __name__ == "__main__":
|
| 1224 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|