Update app.py
Browse files
app.py
CHANGED
|
@@ -16,9 +16,11 @@ def analyze(text):
|
|
| 16 |
weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
|
| 17 |
score = hate_conf * weight.get(hate_label, 0)
|
| 18 |
|
|
|
|
|
|
|
| 19 |
action = "✅ Safe Content: No significant signs of hate speech detected. No further action required."
|
| 20 |
|
| 21 |
-
if score > 0.8:
|
| 22 |
action = "🚨 Critical Content Alert: This text is highly likely to contain severe hate speech. Immediate review and removal is strongly advised."
|
| 23 |
elif score >= 0.49:
|
| 24 |
action = "⚠️ Potentially Harmful Content: This text may contain offensive or discriminatory language. Please have a moderator review it."
|
|
|
|
| 16 |
weight = {"Neutral":0, "Offensive":0.5, "Sexism":1, "Racism":1, "Religious Discrimination":1}
|
| 17 |
score = hate_conf * weight.get(hate_label, 0)
|
| 18 |
|
| 19 |
+
keys_list = ["Offensive", "Sexism", "Racism", "Religious Discrimination"]
|
| 20 |
+
|
| 21 |
action = "✅ Safe Content: No significant signs of hate speech detected. No further action required."
|
| 22 |
|
| 23 |
+
if score > 0.8 or hate_label in keys_list:
|
| 24 |
action = "🚨 Critical Content Alert: This text is highly likely to contain severe hate speech. Immediate review and removal is strongly advised."
|
| 25 |
elif score >= 0.49:
|
| 26 |
action = "⚠️ Potentially Harmful Content: This text may contain offensive or discriminatory language. Please have a moderator review it."
|