sanalysis / app.py
ArslanXD's picture
Update app.py
c0916f2 verified
import gradio as gr
from transformers import pipeline
hate = pipeline("text-classification", model="hossam87/bert-base-arabic-hate-speech", tokenizer="hossam87/bert-base-arabic-hate-speech", return_all_scores=False)
dialect = pipeline("text-classification", model="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", tokenizer="IbrahimAmin/marbertv2-arabic-written-dialect-classifier", return_all_scores=False)
def analyze(text):
# Hate speech detection
hate_res = hate(text)[0]
hate_label = hate_res['label']
hate_conf = hate_res['score']
# Dialect detection
dial_res = dialect(text)[0]
dial_label = dial_res['label']
dial_conf = dial_res['score']
# Threat score
weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
score = hate_conf * weight.get(hate_label, 0)
# Recommended action (modified logic)
if hate_label != "Neutral":
action = "๐Ÿšจ Immediate Review Required โ€” This content contains severe hate speech or threats and should be escalated to moderators immediately."
elif score >= 0.49:
action = "โš ๏ธ Potentially Harmful โ€” The content may contain offensive or harmful language. Please review before taking further action."
else:
action = "โœ… Safe Content โ€” No harmful language detected. No moderation needed."
return hate_label, f"{hate_conf:.2f}", dial_label, f"{dial_conf:.2f}", f"{score:.2f}", action
iface = gr.Interface(
fn=analyze,
inputs=gr.Textbox(lines=4, placeholder="ุงูƒุชุจ ู‡ู†ุง...", label="Arabic Text"),
outputs=[
gr.Text(label="Hate Speech Label"),
gr.Text(label="Confidence"),
gr.Text(label="Dialect"),
gr.Text(label="Confidence"),
gr.Text(label="Threat Score"),
gr.Text(label="Recommended Action")
],
title="๐Ÿ›ก๏ธ Arabic Content Safety Analyzer",
description="Classifies Arabic text for hate speech, detects dialect, assigns threat severity score, and recommends action.",
theme="default"
)
iface.launch()