Spaces:
Running
Running
| import json | |
| import os | |
| # Load templates from environment variable with a safe default | |
| templates_json = os.getenv('PROMPT_TEMPLATES', '{}') | |
| try: | |
| # Parse JSON data with error handling | |
| prompt_data = json.loads(templates_json) | |
| except json.JSONDecodeError: | |
| # Fallback to empty dict if JSON is invalid | |
| prompt_data = {} | |
| print(prompt_data) | |
| # Create explanations dictionary with safe access | |
| metaprompt_explanations = { | |
| key: data.get("description", "No description available") | |
| for key, data in prompt_data.items() | |
| } if prompt_data else {} | |
| # Generate markdown explanation | |
| explanation_markdown = "".join([ | |
| f"- **{key}**: {value}\n" | |
| for key, value in metaprompt_explanations.items() | |
| ]) | |
| # Define models list | |
| models = [ | |
| "meta-llama/Meta-Llama-3-70B-Instruct", | |
| "meta-llama/Meta-Llama-3-8B-Instruct", | |
| "meta-llama/Llama-3.1-70B-Instruct", | |
| "meta-llama/Llama-3.1-8B-Instruct", | |
| "meta-llama/Llama-3.2-3B-Instruct", | |
| "meta-llama/Llama-3.2-1B-Instruct", | |
| "meta-llama/Llama-2-13b-chat-hf", | |
| "meta-llama/Llama-2-7b-chat-hf", | |
| "HuggingFaceH4/zephyr-7b-beta", | |
| "HuggingFaceH4/zephyr-7b-alpha", | |
| "Qwen/Qwen2.5-72B-Instruct", | |
| "Qwen/Qwen2.5-1.5B", | |
| "microsoft/Phi-3.5-mini-instruct" | |
| ] | |
| # Get API token with error handling | |
| api_token = os.getenv('HF_API_TOKEN') | |
| if not api_token: | |
| raise ValueError("HF_API_TOKEN not found in environment variables") | |
| # Create meta_prompts dictionary with safe access | |
| meta_prompts = { | |
| key: data.get("template", "No template available") | |
| for key, data in prompt_data.items() | |
| } if prompt_data else {} | |
| prompt_refiner_model = os.getenv('PROMPT_REFINER_MODEL', 'meta-llama/Llama-3.1-8B-Instruct') | |
| prompt_refiner_model = os.getenv('prompt_refiner_model') | |
| echo_prompt_refiner = os.getenv('echo_prompt_refiner') | |
| metadone = os.getenv('metadone') | |
| metaprompt1 = os.getenv('metaprompt1') | |
| loic_metaprompt = os.getenv('loic_metaprompt') | |
| openai_metaprompt = os.getenv('openai_metaprompt') | |
| original_meta_prompt = os.getenv('original_meta_prompt') | |
| new_meta_prompt = os.getenv('new_meta_prompt') | |
| advanced_meta_prompt = os.getenv('advanced_meta_prompt') | |
| math_meta_prompt = os.getenv('metamath') | |
| autoregressive_metaprompt = os.getenv('autoregressive_metaprompt') | |
| meta_prompts = { | |
| "morphosis": original_meta_prompt, | |
| "verse": new_meta_prompt, | |
| "physics": metaprompt1, | |
| "bolism": loic_metaprompt, | |
| "done": metadone, | |
| "star": echo_prompt_refiner, | |
| "math": math_meta_prompt, | |
| "arpe": autoregressive_metaprompt | |
| } |