from transformers import AutoModelForCausalLM, AutoTokenizer import gradio as gr import torch model_id = "saikrishnagorijala/friday-V1" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto") def chat(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=200) return tokenizer.decode(outputs[0], skip_special_tokens=True) demo = gr.Interface(fn=chat, inputs="text", outputs="text", title="Friday-V1") demo.launch()