Spaces:
Runtime error
Runtime error
| from fastapi import FastAPI | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
| from peft import PeftModel | |
| import torch | |
| app = FastAPI() | |
| # Load Model from Hugging Face Hub | |
| base_model_path = "NousResearch/Hermes-3-Llama-3.2-3B" | |
| adapter_path = "thinkingnew/llama_invs_adapter" | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| base_model_path, torch_dtype=torch.float16, device_map="auto" | |
| ) | |
| model = PeftModel.from_pretrained(base_model, adapter_path) | |
| tokenizer = AutoTokenizer.from_pretrained(base_model_path) | |
| async def root(): | |
| return {"message": "Model is running! Use /generate/ for text generation."} | |
| async def generate_text(prompt: str): | |
| pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=512) | |
| result = pipe(f"<s>[INST] {prompt} [/INST]") | |
| return {"response": result[0]['generated_text']} |