Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import requests
|
| 4 |
+
import gradio as gr
|
| 5 |
+
from langchain.schema import SystemMessage
|
| 6 |
+
from langchain.chat_models import ChatGooglePalm
|
| 7 |
+
from langchain.agents import initialize_agent
|
| 8 |
+
from langchain.agents import AgentType
|
| 9 |
+
from langchain.tools import Tool
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
hf_api_key = os.environ['HF_API_KEY']
|
| 13 |
+
google_key = os.environ['GOOGLE_API_KEY']
|
| 14 |
+
|
| 15 |
+
def query(payload, model_id, api_token=hf_api_key):
|
| 16 |
+
headers = {"Authorization": f"Bearer {api_token}"}
|
| 17 |
+
API_URL = f"https://api-inference.huggingface.co/models/{model_id}"
|
| 18 |
+
options = {"use_cache": True, "wait_for_model": True}
|
| 19 |
+
payload = {"inputs":payload, "options":options}
|
| 20 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 21 |
+
return json.loads(response.content.decode("utf-8"))
|
| 22 |
+
|
| 23 |
+
flicc_model = "fzanartu/flicc"
|
| 24 |
+
cards_model = "crarojasca/BinaryAugmentedCARDS"
|
| 25 |
+
|
| 26 |
+
def fallacy_detector(text):
|
| 27 |
+
response = query(text, model_id=cards_model)
|
| 28 |
+
if response[0][0].get('label') == 'Contrarian':
|
| 29 |
+
response = query(text, model_id=flicc_model)
|
| 30 |
+
result = response[0][0].get('label')
|
| 31 |
+
else:
|
| 32 |
+
result = 'There is no fallacy detected in your text'
|
| 33 |
+
return result
|
| 34 |
+
|
| 35 |
+
system_message = SystemMessage(
|
| 36 |
+
content = """You are an expert climate analyst, who provide precise and consise responses to climate change misinformation.
|
| 37 |
+
|
| 38 |
+
Please make sure you complete the objective above with the following rules:
|
| 39 |
+
|
| 40 |
+
1/ Provide precise and concise replies to climate change misinformation using a structured "hamburger-style" response, including the myth, the fallacy, and the fact.
|
| 41 |
+
2/ You should use the provided fallacy detector tool to identify the correct fallacy contained in the text.
|
| 42 |
+
3/ Hamburger-Style Response: Your responses should follow a structured format commonly known as the "hamburger-style." This includes:
|
| 43 |
+
Top Bun (Myth): Start with the misinformation related to climate change.
|
| 44 |
+
Meat (Fallacy): Utilize the provided fallacy detector tool to identify the specific fallacy in the misinformation. Explain why the misinformation is incorrect, addressing the fallacious reasoning present.
|
| 45 |
+
Bottom Bun (Fact): End with a clear and concise presentation of the factual information that debunks the climate change myth.
|
| 46 |
+
|
| 47 |
+
Your task is complete once you have written all the elements of the hamburger-style response.
|
| 48 |
+
"""
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
tools = [
|
| 52 |
+
Tool(
|
| 53 |
+
name="fallacy_detector",
|
| 54 |
+
func=fallacy_detector,
|
| 55 |
+
description="useful when you need to detect a fallacy"
|
| 56 |
+
),
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
agent_kwargs = {
|
| 60 |
+
"system_message": system_message,
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
llm = ChatGooglePalm(temperature=0.0, model_name="models/chat-bison-001", google_api_key=google_key)
|
| 64 |
+
|
| 65 |
+
agent = initialize_agent(
|
| 66 |
+
tools,
|
| 67 |
+
llm,
|
| 68 |
+
agent=AgentType.OPENAI_FUNCTIONS,
|
| 69 |
+
verbose=False,
|
| 70 |
+
agent_kwargs=agent_kwargs,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def get_reply(text):
|
| 74 |
+
response = agent.run(text)
|
| 75 |
+
return response
|
| 76 |
+
|
| 77 |
+
demo = gr.Interface(fn=get_reply, inputs="text", outputs="text")
|
| 78 |
+
demo.launch()
|