Spaces:
Runtime error
Runtime error
Commit
·
3d24b44
1
Parent(s):
66964cc
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,13 @@
|
|
| 1 |
from pyChatGPT import ChatGPT
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
import os, sys, json
|
| 4 |
from loguru import logger
|
| 5 |
import paddlehub as hub
|
| 6 |
import random
|
| 7 |
|
| 8 |
-
|
|
|
|
| 9 |
language_translation_model = hub.Module(directory=f'./baidu_translate')
|
| 10 |
def getTextTrans(text, source='zh', target='en'):
|
| 11 |
def is_chinese(string):
|
|
@@ -32,14 +34,14 @@ def get_api():
|
|
| 32 |
api = ChatGPT(session_token)
|
| 33 |
# api.refresh_auth()
|
| 34 |
except Exception as e:
|
| 35 |
-
|
| 36 |
api = None
|
| 37 |
return api
|
| 38 |
-
|
| 39 |
|
| 40 |
def get_response_from_chatgpt(api, text):
|
| 41 |
if api is None:
|
| 42 |
-
return "
|
|
|
|
| 43 |
try:
|
| 44 |
resp = api.send_message(text)
|
| 45 |
# api.refresh_auth()
|
|
@@ -50,9 +52,37 @@ def get_response_from_chatgpt(api, text):
|
|
| 50 |
# logger.info(f"response_: {response}")
|
| 51 |
logger.info(f"conversation_id_: [{conversation_id}] / parent_id: [{parent_id}]")
|
| 52 |
except:
|
| 53 |
-
response = "
|
|
|
|
| 54 |
return response
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
start_work = """async() => {
|
| 57 |
function isMobile() {
|
| 58 |
try {
|
|
@@ -82,18 +112,26 @@ start_work = """async() => {
|
|
| 82 |
} else {
|
| 83 |
valueSetter.call(element, value);
|
| 84 |
}
|
|
|
|
| 85 |
}
|
| 86 |
function save_conversation(chatbot) {
|
| 87 |
var conversations = new Array();
|
|
|
|
| 88 |
for (var i = 0; i < chatbot.children.length; i++) {
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
}
|
| 91 |
var json_str = JSON.stringify(conversations);
|
|
|
|
| 92 |
localStorage.setItem('chatgpt_conversations', json_str);
|
| 93 |
}
|
| 94 |
function load_conversation(chatbot) {
|
| 95 |
var json_str = localStorage.getItem('chatgpt_conversations');
|
| 96 |
if (json_str) {
|
|
|
|
| 97 |
conversations = JSON.parse(json_str);
|
| 98 |
for (var i = 0; i < conversations.length; i++) {
|
| 99 |
var new_div = document.createElement("div");
|
|
@@ -108,9 +146,15 @@ start_work = """async() => {
|
|
| 108 |
new_div.style.padding = "0.2rem";
|
| 109 |
}
|
| 110 |
}
|
| 111 |
-
|
|
|
|
| 112 |
chatbot.appendChild(new_div);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
}
|
|
|
|
| 114 |
}
|
| 115 |
}
|
| 116 |
var gradioEl = document.querySelector('body > gradio-app').shadowRoot;
|
|
@@ -128,7 +172,8 @@ start_work = """async() => {
|
|
| 128 |
page2.style.display = "block";
|
| 129 |
window['div_count'] = 0;
|
| 130 |
window['chat_bot'] = window['gradioEl'].querySelectorAll('#chat_bot')[0];
|
| 131 |
-
window['chat_bot1'] = window['gradioEl'].querySelectorAll('#chat_bot1')[0];
|
|
|
|
| 132 |
chat_row = window['gradioEl'].querySelectorAll('#chat_row')[0];
|
| 133 |
prompt_row = window['gradioEl'].querySelectorAll('#prompt_row')[0];
|
| 134 |
window['chat_bot1'].children[1].textContent = '';
|
|
@@ -203,7 +248,6 @@ start_work = """async() => {
|
|
| 203 |
for (var i = 0; i < tabitems.length; i++) {
|
| 204 |
inputText = tabitems[i].children[0].children[1].children[0].querySelectorAll('.gr-text-input')[0];
|
| 205 |
setNativeValue(inputText, text_value);
|
| 206 |
-
inputText.dispatchEvent(new Event('input', { bubbles: true }));
|
| 207 |
}
|
| 208 |
setTimeout(function() {
|
| 209 |
btns = window['gradioEl'].querySelectorAll('button');
|
|
@@ -222,7 +266,7 @@ start_work = """async() => {
|
|
| 222 |
var user_div = document.createElement("div");
|
| 223 |
user_div.className = "px-3 py-2 rounded-[22px] rounded-br-none text-white text-sm chat-message svelte-rct66g";
|
| 224 |
user_div.style.backgroundColor = "#16a34a";
|
| 225 |
-
user_div.innerHTML = "<p>" + text0.value + "</p>";
|
| 226 |
window['chat_bot1'].children[2].children[0].appendChild(user_div);
|
| 227 |
var bot_div = document.createElement("div");
|
| 228 |
bot_div.className = "px-3 py-2 rounded-[22px] rounded-bl-none place-self-start text-white text-sm chat-message svelte-rct66g";
|
|
@@ -273,23 +317,29 @@ for space_id in space_ids.keys():
|
|
| 273 |
|
| 274 |
def chat(api, input0, input1, chat_radio, chat_history):
|
| 275 |
out_chat = []
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
if chat_radio == "Talk to chatGPT":
|
| 280 |
-
response = get_response_from_chatgpt(api, input0)
|
| 281 |
# response = get_response_from_microsoft(input0)
|
| 282 |
# response = get_response_from_skywork(input0)
|
|
|
|
| 283 |
out_chat.append((input0, response))
|
| 284 |
-
|
| 285 |
-
return api, out_chat, input1
|
| 286 |
else:
|
| 287 |
prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
|
| 288 |
-
return api, out_chat, prompt_en
|
| 289 |
|
| 290 |
-
with gr.Blocks(title='Talk to chatGPT') as demo:
|
| 291 |
-
gr.
|
| 292 |
-
|
|
|
|
| 293 |
with gr.Group(elem_id="page_1", visible=True) as page_1:
|
| 294 |
with gr.Box():
|
| 295 |
with gr.Row():
|
|
@@ -303,8 +353,8 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
|
|
| 303 |
with gr.Row(elem_id="prompt_row"):
|
| 304 |
prompt_input0 = gr.Textbox(lines=2, label="prompt",show_label=False)
|
| 305 |
prompt_input1 = gr.Textbox(lines=4, label="prompt", visible=False)
|
| 306 |
-
chat_history = gr.Textbox(lines=4, label="prompt", visible=False)
|
| 307 |
-
chat_radio = gr.Radio(["Talk to chatGPT", "Text to Image"], elem_id="chat_radio",value="Talk to chatGPT", show_label=False)
|
| 308 |
with gr.Row(elem_id="btns_row"):
|
| 309 |
with gr.Column(id="submit_col"):
|
| 310 |
submit_btn = gr.Button(value = "submit",elem_id="submit-btn").style(
|
|
@@ -321,10 +371,9 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
|
|
| 321 |
api = gr.State(value=get_api())
|
| 322 |
submit_btn.click(fn=chat,
|
| 323 |
inputs=[api, prompt_input0, prompt_input1, chat_radio, chat_history],
|
| 324 |
-
outputs=[api, chatbot, prompt_input1
|
| 325 |
)
|
| 326 |
with gr.Row(elem_id='tab_img', visible=False).style(height=5):
|
| 327 |
tab_img = gr.TabbedInterface(tab_actions, tab_titles)
|
| 328 |
|
| 329 |
demo.launch(debug = True)
|
| 330 |
-
|
|
|
|
| 1 |
from pyChatGPT import ChatGPT
|
| 2 |
+
import openai
|
| 3 |
import gradio as gr
|
| 4 |
import os, sys, json
|
| 5 |
from loguru import logger
|
| 6 |
import paddlehub as hub
|
| 7 |
import random
|
| 8 |
|
| 9 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
| 10 |
+
|
| 11 |
language_translation_model = hub.Module(directory=f'./baidu_translate')
|
| 12 |
def getTextTrans(text, source='zh', target='en'):
|
| 13 |
def is_chinese(string):
|
|
|
|
| 34 |
api = ChatGPT(session_token)
|
| 35 |
# api.refresh_auth()
|
| 36 |
except Exception as e:
|
| 37 |
+
logger.info(f'get_api_error: {e}')
|
| 38 |
api = None
|
| 39 |
return api
|
|
|
|
| 40 |
|
| 41 |
def get_response_from_chatgpt(api, text):
|
| 42 |
if api is None:
|
| 43 |
+
# return "Sorry, I'm busy. Try again later.(1)"
|
| 44 |
+
return "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home(1)."
|
| 45 |
try:
|
| 46 |
resp = api.send_message(text)
|
| 47 |
# api.refresh_auth()
|
|
|
|
| 52 |
# logger.info(f"response_: {response}")
|
| 53 |
logger.info(f"conversation_id_: [{conversation_id}] / parent_id: [{parent_id}]")
|
| 54 |
except:
|
| 55 |
+
# response = "Sorry, I'm busy. Try again later.(2)"
|
| 56 |
+
response = "Openai said: I'm so tired. Let me lie down for a few days. If you like, you can visit my home(2)."
|
| 57 |
return response
|
| 58 |
|
| 59 |
+
def get_response_from_openai(input, history):
|
| 60 |
+
def openai_create(prompt):
|
| 61 |
+
# no chatgpt, and from gpt-3
|
| 62 |
+
response = openai.Completion.create(
|
| 63 |
+
model="text-davinci-003",
|
| 64 |
+
prompt=prompt,
|
| 65 |
+
temperature=0.9,
|
| 66 |
+
max_tokens=2048,
|
| 67 |
+
top_p=1,
|
| 68 |
+
frequency_penalty=0,
|
| 69 |
+
presence_penalty=0.6,
|
| 70 |
+
stop=[" Human:", " AI:"]
|
| 71 |
+
)
|
| 72 |
+
ret = response.choices[0].text
|
| 73 |
+
if ret == '':
|
| 74 |
+
ret = "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home(3)."
|
| 75 |
+
|
| 76 |
+
return ret
|
| 77 |
+
|
| 78 |
+
history = history or []
|
| 79 |
+
his= [tuple(item) for item in history]
|
| 80 |
+
s = list(sum(his, ()))
|
| 81 |
+
s.append(input)
|
| 82 |
+
inp = ' '.join(s)
|
| 83 |
+
output = openai_create(inp)
|
| 84 |
+
return output
|
| 85 |
+
|
| 86 |
start_work = """async() => {
|
| 87 |
function isMobile() {
|
| 88 |
try {
|
|
|
|
| 112 |
} else {
|
| 113 |
valueSetter.call(element, value);
|
| 114 |
}
|
| 115 |
+
element.dispatchEvent(new Event('input', { bubbles: true }));
|
| 116 |
}
|
| 117 |
function save_conversation(chatbot) {
|
| 118 |
var conversations = new Array();
|
| 119 |
+
var conversations_noimg = new Array();
|
| 120 |
for (var i = 0; i < chatbot.children.length; i++) {
|
| 121 |
+
innerHTML = chatbot.children[i].innerHTML;
|
| 122 |
+
conversations.push(innerHTML);
|
| 123 |
+
if (innerHTML.indexOf("<img ") == -1) {
|
| 124 |
+
conversations_noimg.push(innerHTML);
|
| 125 |
+
}
|
| 126 |
}
|
| 127 |
var json_str = JSON.stringify(conversations);
|
| 128 |
+
setNativeValue(window['chat_his'], JSON.stringify(conversations_noimg));
|
| 129 |
localStorage.setItem('chatgpt_conversations', json_str);
|
| 130 |
}
|
| 131 |
function load_conversation(chatbot) {
|
| 132 |
var json_str = localStorage.getItem('chatgpt_conversations');
|
| 133 |
if (json_str) {
|
| 134 |
+
var conversations_noimg = new Array();
|
| 135 |
conversations = JSON.parse(json_str);
|
| 136 |
for (var i = 0; i < conversations.length; i++) {
|
| 137 |
var new_div = document.createElement("div");
|
|
|
|
| 146 |
new_div.style.padding = "0.2rem";
|
| 147 |
}
|
| 148 |
}
|
| 149 |
+
innerHTML = conversations[i];
|
| 150 |
+
new_div.innerHTML = innerHTML;
|
| 151 |
chatbot.appendChild(new_div);
|
| 152 |
+
|
| 153 |
+
if (innerHTML.indexOf("<img ") == -1) {
|
| 154 |
+
conversations_noimg.push(innerHTML);
|
| 155 |
+
}
|
| 156 |
}
|
| 157 |
+
setNativeValue(window['chat_his'], JSON.stringify(conversations_noimg));
|
| 158 |
}
|
| 159 |
}
|
| 160 |
var gradioEl = document.querySelector('body > gradio-app').shadowRoot;
|
|
|
|
| 172 |
page2.style.display = "block";
|
| 173 |
window['div_count'] = 0;
|
| 174 |
window['chat_bot'] = window['gradioEl'].querySelectorAll('#chat_bot')[0];
|
| 175 |
+
window['chat_bot1'] = window['gradioEl'].querySelectorAll('#chat_bot1')[0];
|
| 176 |
+
window['chat_his'] = window['gradioEl'].querySelectorAll('#chat_history')[0].querySelectorAll('textarea')[0];
|
| 177 |
chat_row = window['gradioEl'].querySelectorAll('#chat_row')[0];
|
| 178 |
prompt_row = window['gradioEl'].querySelectorAll('#prompt_row')[0];
|
| 179 |
window['chat_bot1'].children[1].textContent = '';
|
|
|
|
| 248 |
for (var i = 0; i < tabitems.length; i++) {
|
| 249 |
inputText = tabitems[i].children[0].children[1].children[0].querySelectorAll('.gr-text-input')[0];
|
| 250 |
setNativeValue(inputText, text_value);
|
|
|
|
| 251 |
}
|
| 252 |
setTimeout(function() {
|
| 253 |
btns = window['gradioEl'].querySelectorAll('button');
|
|
|
|
| 266 |
var user_div = document.createElement("div");
|
| 267 |
user_div.className = "px-3 py-2 rounded-[22px] rounded-br-none text-white text-sm chat-message svelte-rct66g";
|
| 268 |
user_div.style.backgroundColor = "#16a34a";
|
| 269 |
+
user_div.innerHTML = "<p>" + text0.value + "</p><img ></img>";
|
| 270 |
window['chat_bot1'].children[2].children[0].appendChild(user_div);
|
| 271 |
var bot_div = document.createElement("div");
|
| 272 |
bot_div.className = "px-3 py-2 rounded-[22px] rounded-bl-none place-self-start text-white text-sm chat-message svelte-rct66g";
|
|
|
|
| 317 |
|
| 318 |
def chat(api, input0, input1, chat_radio, chat_history):
|
| 319 |
out_chat = []
|
| 320 |
+
chat_history = chat_history.replace('<p>', '').replace('</p>', '')
|
| 321 |
+
if chat_history != '':
|
| 322 |
+
out_chat_1 = json.loads(chat_history)
|
| 323 |
+
for i in range(int(len(out_chat_1)/2)):
|
| 324 |
+
out_chat.append([out_chat_1[2*i], out_chat_1[2*i+1]])
|
| 325 |
+
|
| 326 |
+
# logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}")
|
| 327 |
if chat_radio == "Talk to chatGPT":
|
| 328 |
+
# response = get_response_from_chatgpt(api, input0)
|
| 329 |
# response = get_response_from_microsoft(input0)
|
| 330 |
# response = get_response_from_skywork(input0)
|
| 331 |
+
response = get_response_from_openai(input0, out_chat)
|
| 332 |
out_chat.append((input0, response))
|
| 333 |
+
# logger.info(f'liuyz_5___{out_chat}__')
|
| 334 |
+
return api, out_chat, input1
|
| 335 |
else:
|
| 336 |
prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
|
| 337 |
+
return api, out_chat, prompt_en
|
| 338 |
|
| 339 |
+
with gr.Blocks(title='Talk to chatGPT') as demo:
|
| 340 |
+
with gr.Group(elem_id="page_0", visible=True) as page_0:
|
| 341 |
+
gr.HTML("<p>You can duplicating this space and use your own session token: <a style='display:inline-block' href='https://huggingface.co/spaces/yizhangliu/chatGPT?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
|
| 342 |
+
gr.HTML("<p> Instruction on how to get session token can be seen in video <a style='display:inline-block' href='https://www.youtube.com/watch?v=TdNSj_qgdFk'><font style='color:blue;weight:bold;'>here</font></a>. Add your session token by going to settings and add under secrets. </p>")
|
| 343 |
with gr.Group(elem_id="page_1", visible=True) as page_1:
|
| 344 |
with gr.Box():
|
| 345 |
with gr.Row():
|
|
|
|
| 353 |
with gr.Row(elem_id="prompt_row"):
|
| 354 |
prompt_input0 = gr.Textbox(lines=2, label="prompt",show_label=False)
|
| 355 |
prompt_input1 = gr.Textbox(lines=4, label="prompt", visible=False)
|
| 356 |
+
chat_history = gr.Textbox(lines=4, label="prompt", elem_id="chat_history", visible=False)
|
| 357 |
+
chat_radio = gr.Radio(["Talk to chatGPT", "Text to Image"], elem_id="chat_radio",value="Talk to chatGPT", show_label=False, visible=True)
|
| 358 |
with gr.Row(elem_id="btns_row"):
|
| 359 |
with gr.Column(id="submit_col"):
|
| 360 |
submit_btn = gr.Button(value = "submit",elem_id="submit-btn").style(
|
|
|
|
| 371 |
api = gr.State(value=get_api())
|
| 372 |
submit_btn.click(fn=chat,
|
| 373 |
inputs=[api, prompt_input0, prompt_input1, chat_radio, chat_history],
|
| 374 |
+
outputs=[api, chatbot, prompt_input1],
|
| 375 |
)
|
| 376 |
with gr.Row(elem_id='tab_img', visible=False).style(height=5):
|
| 377 |
tab_img = gr.TabbedInterface(tab_actions, tab_titles)
|
| 378 |
|
| 379 |
demo.launch(debug = True)
|
|
|