streamlit app
Browse files- .gitignore +1 -0
- Dockerfile +18 -0
- app.py +55 -0
- requirements.txt +10 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
run.bat
|
Dockerfile
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM continuumio/miniconda3
|
| 2 |
+
|
| 3 |
+
WORKDIR /home/app
|
| 4 |
+
|
| 5 |
+
RUN apt-get update
|
| 6 |
+
RUN apt-get install nano unzip
|
| 7 |
+
RUN apt install curl -y
|
| 8 |
+
|
| 9 |
+
RUN curl -fsSL https://get.deta.dev/cli.sh | sh
|
| 10 |
+
|
| 11 |
+
COPY requirements.txt /dependencies/requirements.txt
|
| 12 |
+
RUN pip install --no-cache-dir -r /dependencies/requirements.txt
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
COPY . /home/app
|
| 16 |
+
|
| 17 |
+
# Use shell form of CMD to allow environment variable substitution
|
| 18 |
+
CMD streamlit run app.py --server.port=${PORT} --server.address=0.0.0.0
|
app.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import plotly.express as px
|
| 4 |
+
import plotly.graph_objects as go
|
| 5 |
+
import numpy as np
|
| 6 |
+
import requests
|
| 7 |
+
|
| 8 |
+
### Config
|
| 9 |
+
st.set_page_config(
|
| 10 |
+
page_title="Hate Speech Recognition",
|
| 11 |
+
page_icon="⚠️",
|
| 12 |
+
layout="wide"
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def hate_speech_detection(text):
|
| 17 |
+
url = "https://llepogam-hate-speech-detection-api.hf.space/predict"
|
| 18 |
+
headers = {
|
| 19 |
+
"accept": "application/json",
|
| 20 |
+
"Content-Type": "application/json"
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
# Define the payload
|
| 24 |
+
Text_to_predict = {
|
| 25 |
+
"Text": text
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
# Make the POST request
|
| 29 |
+
response = requests.post(url, headers=headers, json=Text_to_predict)
|
| 30 |
+
|
| 31 |
+
# Process the response
|
| 32 |
+
if response.status_code == 200:
|
| 33 |
+
return response.json()
|
| 34 |
+
else:
|
| 35 |
+
return f"Failed to get a response. Status code: {response.status_code}, Response: {response.text}"
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
st.title("Hate Speech Detection")
|
| 41 |
+
user_input = st.text_area("Enter a Tweet:")
|
| 42 |
+
|
| 43 |
+
if user_input:
|
| 44 |
+
prediction = hate_speech_detection(user_input)
|
| 45 |
+
st.write(f"Prediction: {prediction['prediction']}")
|
| 46 |
+
st.write(f"Probability: {prediction['probability']}")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
### Footer
|
| 51 |
+
empty_space, footer = st.columns([1, 2])
|
| 52 |
+
|
| 53 |
+
with empty_space:
|
| 54 |
+
st.write("")
|
| 55 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
boto3
|
| 2 |
+
pandas
|
| 3 |
+
gunicorn
|
| 4 |
+
streamlit
|
| 5 |
+
scikit-learn
|
| 6 |
+
matplotlib
|
| 7 |
+
seaborn
|
| 8 |
+
plotly
|
| 9 |
+
huggingface_hub
|
| 10 |
+
numpy
|