import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification # Load model sekali saja MODEL_NAME = "taufiqdp/indonesian-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) class_names = ['negatif', 'netral', 'positif'] def predict_sentiment(text): if not text or text.strip() == "": return "Teks kosong" tokenized = tokenizer(text, return_tensors="pt") with torch.inference_mode(): logits = model(**tokenized).logits pred_id = logits.argmax(dim=1).item() sentiment = class_names[pred_id] confidence = torch.softmax(logits, dim=1)[0][pred_id].item() return f"{sentiment} ({confidence:.2%})" # Gradio interface demo = gr.Interface( fn=predict_sentiment, inputs=gr.Textbox(label="Masukkan teks"), outputs=gr.Textbox(label="Prediksi Sentimen"), title="Indonesian Sentiment Analysis", description="Model klasifikasi sentimen bahasa Indonesia (negatif, netral, positif)." ) if __name__ == "__main__": demo.launch()