Spaces:
Sleeping
Sleeping
File size: 2,732 Bytes
b2d5b74 1ed2dd4 b2d5b74 15e92d0 b2d5b74 7574047 678027d b2d5b74 678027d b2d5b74 175d4af b2d5b74 175d4af b2d5b74 175d4af b2d5b74 175d4af b2d5b74 175d4af b2d5b74 678027d b2d5b74 15e92d0 b2d5b74 678027d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
"""
🇬🇧 Module: comparer.py()
Purpose: Compares two portfolios using LLM. Fetches metrics for both and builds a unified comparison prompt.
🇷🇺 Модуль: comparer.py
Назначение: сравнение двух инвестиционных портфелей с помощью LLM. Получает метрики обоих портфелей, формирует промпт и возвращает потоковый результат.
"""
import asyncio
from typing import Generator
from services.output_api import extract_portfolio_id, fetch_metrics_async
from services.llm_client import llm_service
from prompts.system_prompts import COMPARISON_SYSTEM_PROMPT
from prompts.reference_templates import REFERENCE_COMPARISON_PROMPT
class PortfolioComparer:
"""Main use-case class for comparing two portfolios."""
def __init__(self, llm=llm_service, model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"):
self.llm = llm
self.model_name = model_name
def run(self, text1: str, text2: str) -> Generator[str, None, None]:
"""Stream comparison results between two portfolios."""
id1 = extract_portfolio_id(text1)
id2 = extract_portfolio_id(text2)
if text1 == text2:
yield "❗ Please, give me a two difference portfolio ID."
return
if not id1 or not id2:
yield "❗ One of two portfolios is empty or incorrect."
return
yield "⏳ Working..."
try:
m1 = asyncio.run(fetch_metrics_async(id1))
m2 = asyncio.run(fetch_metrics_async(id2))
except Exception as e:
yield f"❌ There are issue via collecting data: {e}"
return
if not m1 or not m2:
yield "❗ One of two portfolios is empty or has incorrect data."
return
m1_text = ", ".join(f"{k}: {v}" for k, v in m1.items())
m2_text = ", ".join(f"{k}: {v}" for k, v in m2.items())
prompt = (
f"{REFERENCE_COMPARISON_PROMPT}\n"
f"Используй эти данные для сравнения:\n"
f"Портфель A: {m1_text}\n"
f"Портфель B: {m2_text}"
)
try:
messages = [
{"role": "system", "content": COMPARISON_SYSTEM_PROMPT},
{"role": "user", "content": prompt},
]
partial = ""
for delta in self.llm.stream_chat(messages=messages, model=self.model_name):
partial += delta
yield partial
except Exception as e:
yield f"❌ Ошибка при сравнении портфелей через LLM: {e}"
|