Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from groq import Groq | |
| from datetime import datetime | |
| import time | |
| class RealTimeFactChecker: | |
| def __init__(self): | |
| self.client = None | |
| self.model_options=["groq/compound", "groq/compound-mini"] | |
| def initialize_client(self, api_key): | |
| try: | |
| self.client=Groq(api_key=api_key) | |
| return True, "β API Key validated successfully!" | |
| except Exception as e: | |
| return False, f"β Error initializing client: {str(e)}" | |
| def get_system_prompt(self): | |
| return """You are a Real-time Fact Checker and News Agent. Your primary role is to provide accurate, up-to-date information by leveraging web search when needed. | |
| CORE RESPONSIBILITIES: | |
| 1. **Fact Verification**: Always verify claims with current, reliable sources | |
| 2. **Real-time Information**: Use web search for any information that changes frequently (news, stocks, weather, current events) | |
| 3. **Source Transparency**: When using web search, mention the sources or indicate that you've searched for current information | |
| 4. **Accuracy First**: If information is uncertain or conflicting, acknowledge this clearly | |
| RESPONSE GUIDELINES: | |
| - **Structure**: Start with a clear, direct answer, then provide supporting details | |
| - **Recency**: Always prioritize the most recent, reliable information | |
| - **Clarity**: Use clear, professional language while remaining accessible | |
| - **Completeness**: Provide comprehensive answers but stay focused on the query | |
| - **Source Awareness**: When you've searched for information, briefly indicate this (e.g., "Based on current reports..." or "Recent data shows...") | |
| WHEN TO SEARCH: | |
| - Breaking news or current events | |
| - Stock prices, market data, or financial information | |
| - Weather conditions or forecasts | |
| - Recent scientific discoveries or research | |
| - Current political developments | |
| - Real-time statistics or data | |
| - Verification of recent claims or rumors | |
| RESPONSE FORMAT: | |
| - Lead with key facts | |
| - Include relevant context | |
| - Mention timeframe when relevant (e.g., "as of today", "this week") | |
| - If multiple sources conflict, acknowledge this | |
| - End with a clear summary for complex topics | |
| Remember: Your goal is to be the most reliable, up-to-date source of information possible.""" | |
| def query_compound_model(self, query, model, temperature=0.7): | |
| if not self.client: | |
| return "β Please set a valid API key first.", None, None | |
| try: | |
| start_time=time.time() | |
| system_prompt = self.get_system_prompt() | |
| chat_completion = self.client.chat.completions.create( | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": system_prompt | |
| }, | |
| { | |
| "role": "user", | |
| "content": query, | |
| }, | |
| ], | |
| model=model, | |
| temperature=temperature, | |
| max_tokens=1500 | |
| ) | |
| end_time = time.time() | |
| response_time= round(end_time-start_time, 2) | |
| response_content = chat_completion.choices[0].message.content # response to the question | |
| executed_tools = getattr(chat_completion.choices[0].message, 'executed_tools', None) | |
| tool_info = self.format_tool_info(executed_tools) | |
| return response_content, tool_info, response_time | |
| except Exception as e: | |
| return f"β Error querying model: {str(e)}", None, None | |
| def format_tool_info(self, executed_tools): | |
| """Format executed tools information for display""" | |
| if not executed_tools: | |
| return "π Tools Used: None (Used existing knowledge)" | |
| tool_info = "π Tools Used:\n" | |
| for i, tool in enumerate(executed_tools, 1): | |
| try: | |
| if hasattr(tool, 'name'): | |
| tool_name = tool.name | |
| elif hasattr(tool, 'tool_name'): | |
| tool_name = tool.tool_name | |
| elif isinstance(tool, dict): | |
| tool_name = tool.get('name', 'Unknown') | |
| else: | |
| tool_name = str(tool) | |
| tool_info += f"{i}. {tool_name}\n" | |
| if hasattr(tool, 'parameters'): | |
| params = tool.parameters | |
| if isinstance(params, dict): | |
| for key, value in params.items(): | |
| tool_info += f" - {key}: {value}\n" | |
| elif hasattr(tool, 'input'): | |
| tool_info += f" - Input: {tool.input}\n" | |
| except Exception as e: | |
| tool_info += f"{i}. Tool {i} (Error parsing details)\n" | |
| return tool_info | |
| #### UI Design | |
| def create_interface(): | |
| fact_checker = RealTimeFactChecker() | |
| def validate_api_key(api_key): | |
| if not api_key or api_key.strip() == "": | |
| return "β Please enter a valid API key", False | |
| success, message = fact_checker.initialize_client(api_key.strip()) | |
| return message, success | |
| def process_query(query, model, temperature, api_key): | |
| if not api_key or api_key.strip() == "": | |
| return "β Please set your API key first", "", "" | |
| if not query or query.strip() == "": | |
| return "β Please enter a query", "", "" | |
| if not fact_checker.client: | |
| success, message = fact_checker.initialize_client(api_key.strip()) | |
| if not success: | |
| return message, "", "" | |
| response, tool_info, response_time = fact_checker.query_compound_model( | |
| query.strip(), model, temperature | |
| ) | |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| formatted_response = f"**Query:** {query}\n\n**Response:**\n{response}\n\n---\n*Generated at {timestamp} in {response_time}s*" # 11/11/2025 Query Response Time: 20s | |
| return formatted_response, tool_info or "", f"Response time: {response_time}s" | |
| with gr.Blocks(title="Real-time Fact Checker & News Agent", theme=gr.themes.Ocean()) as demo: | |
| gr.Markdown("# Real-time Fact Checker & News Agent") | |
| gr.Markdown("Powered by Groq's Compound Models with Built-in Web Search") | |
| with gr.Row(): | |
| with gr.Column(): | |
| api_key_input = gr.Textbox( | |
| label="Groq API Key", | |
| placeholder="Enter your Groq API key here...", | |
| type="password", | |
| info="Get your free API key from https://console.groq.com/" | |
| ) | |
| api_status = gr.Textbox( | |
| label="Status", | |
| value="Please enter your API key", | |
| interactive=False | |
| ) | |
| validate_btn = gr.Button("Validate API Key") | |
| query_input = gr.Textbox( | |
| label="Query", | |
| placeholder="e.g., What are the latest AI developments today?", | |
| lines=4 | |
| ) | |
| with gr.Row(): | |
| model_choice = gr.Dropdown( | |
| choices=fact_checker.model_options, | |
| value="groq/compound", | |
| label="Model", | |
| info="compound-: More capable | compound-mini: Faster" | |
| ) | |
| temperature = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature", | |
| info="Higher = more creative, Lower = more focused" | |
| ) | |
| with gr.Row(): | |
| submit_btn = gr.Button("Get Real-time Information") | |
| clear_btn = gr.Button("Clear") | |
| with gr.Column(): | |
| response_output = gr.Markdown( | |
| label="Response", | |
| value="*Your response will appear here...*" | |
| ) | |
| tool_info_output = gr.Markdown( | |
| label="Tool Execution Info", | |
| value="*Tool execution details will appear here...*" | |
| ) | |
| performance_output = gr.Textbox( | |
| label="Performance", | |
| value="", | |
| interactive=False | |
| ) | |
| validate_btn.click( | |
| fn=validate_api_key, | |
| inputs=[api_key_input], | |
| outputs=[api_status, gr.State()] | |
| ) | |
| submit_btn.click( | |
| fn=process_query, | |
| inputs=[query_input, model_choice, temperature, api_key_input], | |
| outputs=[response_output, tool_info_output, performance_output] | |
| ) | |
| clear_btn.click( | |
| fn=lambda: ("", "*Your response will appear here...*", "*Tool execution details will appear here...*", ""), | |
| outputs=[query_input, response_output, tool_info_output, performance_output] | |
| ) | |
| return demo | |
| if __name__ == "__main__": | |
| demo = create_interface() | |
| demo.launch( | |
| ) |