File size: 2,162 Bytes
b2d5b74
1431d3a
b2d5b74
 
 
15e92d0
b2d5b74
 
 
 
7574047
678027d
b2d5b74
 
 
 
 
 
 
678027d
b2d5b74
 
 
 
 
 
 
e1e57ed
b2d5b74
 
175d4af
b2d5b74
 
 
175d4af
b2d5b74
 
 
175d4af
b2d5b74
 
985f897
15e92d0
b2d5b74
 
 
 
 
 
 
 
 
 
 
 
 
175d4af
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""
🇬🇧 Module: analyzer.py()
Purpose: Handles single-portfolio analysis using LLM. Fetches metrics, builds prompt, streams reasoning.

🇷🇺 Модуль: analyzer.py
Назначение: анализ одного инвестиционного портфеля с использованием LLM. Получает метрики, формирует промпт, возвращает потоковый ответ.
"""

import asyncio
from typing import Generator
from services.output_api import extract_portfolio_id, fetch_metrics_async
from services.llm_client import llm_service
from prompts.system_prompts import ANALYSIS_SYSTEM_PROMPT
from prompts.reference_templates import REFERENCE_PROMPT


class PortfolioAnalyzer:
    """Main use-case class for analyzing a single portfolio."""

    def __init__(self, llm=llm_service, model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"):
        self.llm = llm
        self.model_name = model_name

    def run(self, text: str) -> Generator[str, None, None]:
        """Stream analysis result step by step."""
        portfolio_id = extract_portfolio_id(text)
        if not portfolio_id:
            yield "❗ Please enter a portfolio ID."
            return

        yield "⏳ Working..."
        try:
            metrics = asyncio.run(fetch_metrics_async(portfolio_id))
        except Exception as e:
            yield f"❌ Fail to collect metrics: {e}"
            return

        if not metrics:
            yield "❗ Metrics can't be collected."
            return

        metrics_text = ", ".join(f"{k}: {v}" for k, v in metrics.items())
        prompt = f"{REFERENCE_PROMPT}\n\nИспользуй эти данные для анализа:\n{metrics_text}"

        try:
            messages = [
                {"role": "system", "content": ANALYSIS_SYSTEM_PROMPT},
                {"role": "user", "content": prompt},
            ]

            partial = ""
            for delta in self.llm.stream_chat(messages=messages, model=self.model_name):
                partial += delta
                yield partial

        except Exception as e:
            yield f"❌ LLM's error: {e}"