File size: 2,211 Bytes
9c7d316 d19d38e 5c0a819 9c7d316 d19d38e 9c7d316 d19d38e 9c7d316 d19d38e 9c7d316 c218a39 d19d38e 9c7d316 d19d38e 93a8701 9c7d316 93a8701 54fe267 93a8701 54fe267 214a350 93a8701 d19d38e 9c7d316 d19d38e 9c7d316 d19d38e 9c7d316 d19d38e 8610b2f d19d38e 8610b2f 9c7d316 8610b2f d19d38e 9c7d316 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from huggingface_hub import hf_hub_download
import os
from huggingface_hub import login
hf_token = os.environ["HF_TOKEN"]
login(token=hf_token)
# Carica modello GRPO fine-tuned
model_name = "SelmaNajih001/GRPORagMinstral2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
pipe = pipeline(
"text-generation",
model=model_name,
tokenizer=model_name,
max_new_tokens=400,
temperature=0.5,
num_beams=6,
repetition_penalty=1.5
)
# Prompt template
prompt_template = """
You are a financial market analyst.
Before making a prediction you always analyze the past, which is given by the Context below.
Answer the Question based on what happened in the past.
Please respond with:
- Chosen Stock: (name)
- Prediction: (price change)
- Explanation: (brief and clear)
Context:
{context}
Question:
{question}
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1")
import tempfile
tmp_dir = tempfile.mkdtemp()
local_faiss_dir = "./faiss_index"
vectorstore = FAISS.load_local(
local_faiss_dir,
embeddings
)
def analisi_finanziaria(query, k=4):
# Recupera i documenti più rilevanti
docs_found = vectorstore.similarity_search(query, k=k)
# Costruisci contesto concatenando i documenti
context = "\n".join([doc.page_content for doc in docs_found])
# Costruisci prompt finale
final_prompt = prompt.format(context=context, question=query)
# Genera risposta
result = pipe(final_prompt)[0]['generated_text']
return result
iface = gr.Interface(
fn=analisi_finanziaria,
inputs=gr.Textbox(label="Enter event or question"),
outputs=gr.Textbox(label="Prediction"),
title="GRPO Financial Analyst",
description="Enter a financial event, the GRPO model will analyze historical context and provide a prediction."
)
iface.launch()
|