import json import os from dotenv import load_dotenv from huggingface_hub import HfApi # Load environment variables load_dotenv() # Configuration HF_TOKEN = os.getenv("HF_TOKEN") RESULTS_REPO = "hamzabouajila/results" # Read the original results file def read_results_file(file_path): with open(file_path, 'r') as f: return json.load(f) # Fix the results format def fix_results_format(results): # Fix null accuracy if results['results'].get('accuracy') is None: results['results']['accuracy'] = 0.0 # Replace with actual accuracy if known # Fix model_type format results['model_type'] = results['model_type'].replace('\ud83d\udfe2 : ', '').strip() # Convert params to integer if needed if isinstance(results.get('params'), float): results['params'] = int(results['params'] * 1000000) # Convert to millions return results # Upload to Hugging Face def upload_to_hf(results, file_name): api = HfApi(token=HF_TOKEN) try: api.upload_file( path_or_fileobj=file_name, path_in_repo=os.path.basename(file_name), repo_id=RESULTS_REPO, repo_type="dataset", commit_message=f"Add evaluation results for {results['model']}" ) print(f"Successfully uploaded to Hugging Face") return True except Exception as e: print(f"Error uploading to Hugging Face: {str(e)}") return False if __name__ == "__main__": # Original file path original_file = "/teamspace/studios/this_studio/TunisianLeaderBoard/eval-results/tunis-ai/TunBERT_eval_request_False_float16_Original.json" # Read and fix the results results = read_results_file(original_file) fixed_results = fix_results_format(results) # Save the fixed version fixed_file = "/teamspace/studios/this_studio/TunisianLeaderBoard/eval-results/tunis-ai/TunBERT_eval_request_False_float16_Original_fixed.json" with open(fixed_file, 'w') as f: json.dump(fixed_results, f, indent=2) print(f"Fixed results saved to: {fixed_file}") # Try to upload to Hugging Face if HF_TOKEN: upload_to_hf(fixed_results, fixed_file) else: print("No HF_TOKEN found. Skipping Hugging Face upload.")