Spaces:
Sleeping
Sleeping
| """ | |
| 🇬🇧 Module: comparison_table.py | |
| Purpose: Generates comparative DataFrame for two portfolios and an LLM commentary. | |
| 🇷🇺 Модуль: comparison_table.py | |
| Назначение: создаёт сравнительную таблицу метрик двух портфелей и комментарий LLM. | |
| """ | |
| import pandas as pd | |
| import asyncio | |
| from services.output_api import fetch_metrics_async, extract_portfolio_id | |
| from services.llm_client import llm_service | |
| from prompts.system_prompts import COMPARISON_SYSTEM_PROMPT | |
| def show_comparison_table(portfolio_a: str, portfolio_b: str): | |
| """Public Gradio entry: returns both a DataFrame and LLM commentary.""" | |
| pid_a = extract_portfolio_id(portfolio_a) | |
| pid_b = extract_portfolio_id(portfolio_b) | |
| if not pid_a or not pid_b: | |
| return "❌ Invalid portfolio IDs.", "No commentary available." | |
| try: | |
| df, commentary = asyncio.run(_build_comparison_with_comment(pid_a, pid_b)) | |
| return df, commentary | |
| except Exception as e: | |
| return f"❌ Error building comparison table: {e}", "❌ LLM analysis failed." | |
| async def _build_comparison_with_comment(p1: str, p2: str): | |
| """Async helper: builds table and gets commentary.""" | |
| m1 = await fetch_metrics_async(p1) | |
| m2 = await fetch_metrics_async(p2) | |
| if not m1 or not m2: | |
| raise ValueError("Metrics unavailable for one or both portfolios.") | |
| all_keys = sorted(set(m1.keys()) | set(m2.keys())) | |
| rows = [] | |
| for k in all_keys: | |
| v1 = m1.get(k, 0) | |
| v2 = m2.get(k, 0) | |
| diff = v1 - v2 | |
| symbol = "▲" if diff > 0 else "▼" if diff < 0 else "—" | |
| rows.append({ | |
| "Metric": k, | |
| "Portfolio A": round(v1, 3), | |
| "Portfolio B": round(v2, 3), | |
| "Δ Difference": f"{symbol} {diff:+.3f}" | |
| }) | |
| df = pd.DataFrame(rows, columns=["Metric", "Portfolio A", "Portfolio B", "Δ Difference"]) | |
| # Generate LLM commentary | |
| summary = "\n".join(f"{r['Metric']}: {r['Δ Difference']}" for r in rows) | |
| prompt = ( | |
| f"{COMPARISON_SYSTEM_PROMPT}\n" | |
| f"Compare and explain the differences between Portfolio A and B:\n{summary}\n" | |
| f"Write your insights as a concise professional commentary." | |
| ) | |
| commentary = "" | |
| for delta in llm_service.stream_chat( | |
| messages=[ | |
| {"role": "system", "content": "You are an investment portfolio analyst."}, | |
| {"role": "user", "content": prompt}, | |
| ], | |
| model="meta-llama/Meta-Llama-3.1-8B-Instruct", | |
| ): | |
| commentary += delta | |
| return df, commentary | |