Spaces:
Running
Running
| from prompts import TEST1 | |
| from fetch import extract_portfolio_id, fetch_metrics_async | |
| from useCasesReferences import REFERENCE_PROMPT | |
| from openai import OpenAI | |
| import asyncio | |
| def analyze_portfolio_streaming(text: str, client: OpenAI): | |
| portfolio_id = extract_portfolio_id(text) | |
| if not portfolio_id: | |
| yield "❗ Укажите корректный portfolioId или ссылку." | |
| return | |
| yield "⏳ Загружаю метрики портфеля..." | |
| metrics = asyncio.run(fetch_metrics_async(portfolio_id)) | |
| if not metrics: | |
| yield "❗ Не удалось получить метрики портфеля." | |
| return | |
| metrics_text = ", ".join([f"{k}: {v}" for k, v in metrics.items()]) | |
| prompt = f"{REFERENCE_PROMPT} используй этот промпт для анализа:{metrics_text}" | |
| try: | |
| response_llm = client.chat.completions.create( | |
| model="meta-llama/Meta-Llama-3.1-8B-Instruct", | |
| messages=[ | |
| {"role": "system", "content": TEST1}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| stream=True | |
| ) | |
| partial = "" | |
| for chunk in response_llm: | |
| delta = chunk.choices[0].delta.content | |
| if delta: | |
| partial += delta | |
| yield partial | |
| except Exception as e: | |
| yield f"❌ Ошибка при генерации ответа: {e}" | |