qwenmodel / app.py
Shankarm08's picture
Create app.py
7728085 verified
raw
history blame contribute delete
797 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# Load model and tokenizer
model_id = "Qwen/Qwen3-0.6B"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
# Create pipeline
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Chat function
def chat(prompt):
output = generator(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)
return output[0]["generated_text"]
# Gradio UI
gr.Interface(
fn=chat,
inputs=gr.Textbox(lines=3, placeholder="Enter your prompt here..."),
outputs="text",
title="Qwen3-0.6B Chatbot",
description="A simple demo using Qwen3-0.6B from Hugging Face"
).launch()