Spaces:
Sleeping
Sleeping
File size: 1,992 Bytes
06e5903 bd902db f4e5c13 06e5903 e545578 06e5903 fe31096 06e5903 e63adc7 bd902db e63adc7 380268b 06e5903 f4f3f95 e63adc7 23af882 e63adc7 f4e5c13 e63adc7 8219948 e63adc7 06e5903 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import httpx
from prompts import ONE_PROMPT
from prompts import TEST1
from fetch import extract_portfolio_id
from openai import OpenAI
import asyncio
async def get_portfolio_metrics(portfolio_id: str) -> dict | None:
url = f"https://api.tradelink.pro/portfolio/get?portfolioId={portfolio_id}&extended=1"
try:
async with httpx.AsyncClient(timeout=10) as client:
response = await client.get(url)
data = response.json().get("data", {}).get("extended", {})
return {k: v for k, v in data.items() if isinstance(v, (int, float))}
except Exception as e:
print(f"[API Error]: {e}")
return None
def analyze_portfolio_streaming(text: str, client: OpenAI):
portfolio_id = extract_portfolio_id(text)
if not portfolio_id:
yield "❗ Укажите корректный portfolioId или ссылку."
return
yield "⏳ Загружаю метрики портфеля..."
metrics = asyncio.run(get_portfolio_metrics(portfolio_id))
if not metrics:
yield "❗ Не удалось получить метрики портфеля."
return
metrics_text = ", ".join([f"{k}: {v}" for k, v in metrics.items()])
prompt = f"Вот метрики портфеля: {metrics_text}. Проанализируй их. Дай общий отчёт на русском языке."
try:
response_llm = client.chat.completions.create(
model="meta-llama/Meta-Llama-3.1-8B-Instruct",
messages=[
{"role": "system", "content":TEST1},
{"role": "user", "content": prompt}
],
stream=True
)
partial = ""
for chunk in response_llm:
delta = chunk.choices[0].delta.content
if delta:
partial += delta
yield partial
except Exception as e:
yield f"❌ Ошибка при генерации ответа: {e}"
|