FIN_ASSISTANT / core /analyzer.py
QAway-to
App translate 2.1
e1e57ed
raw
history blame
2.16 kB
"""
🇬🇧 Module: analyzer.py()
Purpose: Handles single-portfolio analysis using LLM. Fetches metrics, builds prompt, streams reasoning.
🇷🇺 Модуль: analyzer.py
Назначение: анализ одного инвестиционного портфеля с использованием LLM. Получает метрики, формирует промпт, возвращает потоковый ответ.
"""
import asyncio
from typing import Generator
from services.output_api import extract_portfolio_id, fetch_metrics_async
from services.llm_client import llm_service
from prompts.system_prompts import ANALYSIS_SYSTEM_PROMPT
from prompts.reference_templates import REFERENCE_PROMPT
class PortfolioAnalyzer:
"""Main use-case class for analyzing a single portfolio."""
def __init__(self, llm=llm_service, model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"):
self.llm = llm
self.model_name = model_name
def run(self, text: str) -> Generator[str, None, None]:
"""Stream analysis result step by step."""
portfolio_id = extract_portfolio_id(text)
if not portfolio_id:
yield "❗ Please enter a portfolio ID."
return
yield "⏳ Working..."
try:
metrics = asyncio.run(fetch_metrics_async(portfolio_id))
except Exception as e:
yield f"❌ Fail to collect metrics: {e}"
return
if not metrics:
yield "❗ Metrics can't be collected."
return
metrics_text = ", ".join(f"{k}: {v}" for k, v in metrics.items())
prompt = f"{REFERENCE_PROMPT}\n\nИспользуй эти данные для анализа:\n{metrics_text}"
try:
messages = [
{"role": "system", "content": ANALYSIS_SYSTEM_PROMPT},
{"role": "user", "content": prompt},
]
partial = ""
for delta in self.llm.stream_chat(messages=messages, model=self.model_name):
partial += delta
yield partial
except Exception as e:
yield f"❌ LLM's error: {e}"