Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| # Load the locally saved fine-tuned model inside your space | |
| MODEL_DIR = "./laptop-tinyllama" | |
| def load_pipeline(): | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_DIR) | |
| return pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| # Load model pipeline | |
| generator = load_pipeline() | |
| # Streamlit UI | |
| st.title("💻 Laptop Recommendation with TinyLlama") | |
| st.write("Enter a question like: *Suggest a laptop for gaming under 1 lakh BDT.*") | |
| # Prompt input | |
| prompt = st.text_area("Enter your query", value="Suggest a laptop for programming under 70000 BDT.") | |
| if st.button("Generate Response"): | |
| with st.spinner("Generating..."): | |
| result = generator(prompt, max_new_tokens=100, temperature=0.7) | |
| st.success(result[0]["generated_text"]) | |