Increased temperature and added alternate openings
Browse files
app.py
CHANGED
|
@@ -748,8 +748,8 @@ discrim_weights=None
|
|
| 748 |
discrim_meta=None
|
| 749 |
class_label=0
|
| 750 |
length=100
|
| 751 |
-
stepsize=0.
|
| 752 |
-
temperature=1.
|
| 753 |
top_k=2
|
| 754 |
sample=True
|
| 755 |
num_iterations=10
|
|
@@ -811,17 +811,22 @@ tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)
|
|
| 811 |
for param in model.parameters():
|
| 812 |
param.requires_grad = False
|
| 813 |
|
|
|
|
| 814 |
eot_token = "<|endoftext|>"
|
| 815 |
|
| 816 |
-
def get_reply(response, username = None, histories = {}, in_stepsize = 0.
|
| 817 |
if username == None or username == "": return "<div class='chatbot'>Enter a username</div>", histories
|
| 818 |
stepsize = in_stepsize
|
| 819 |
horizon_length = int(in_horizon_length)
|
| 820 |
num_iterations = int(in_num_iterations)
|
| 821 |
top_k = int(in_top_k)
|
| 822 |
-
if response.endswith(("bye", "Bye", "bye.", "Bye.", "bye!", "Bye!")):
|
| 823 |
-
|
| 824 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 825 |
history = histories.get(username, None)
|
| 826 |
convo_hist = (history if history != None else "How are you?<|endoftext|>") + response + eot_token
|
| 827 |
# figure out conditioning text
|
|
@@ -877,8 +882,12 @@ def get_reply(response, username = None, histories = {}, in_stepsize = 0.99, in_
|
|
| 877 |
convo_hist = eot_token.join(convo_hist_split)
|
| 878 |
|
| 879 |
except:
|
| 880 |
-
|
| 881 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 882 |
histories[username] = convo_hist
|
| 883 |
return html, histories
|
| 884 |
|
|
@@ -896,4 +905,4 @@ gr.Interface(fn=get_reply,
|
|
| 896 |
gr.inputs.Textbox(label="Username"),
|
| 897 |
"state"],
|
| 898 |
outputs=["html", "state"],
|
| 899 |
-
css=css).launch(debug=True, enable_queue=True)
|
|
|
|
| 748 |
discrim_meta=None
|
| 749 |
class_label=0
|
| 750 |
length=100
|
| 751 |
+
stepsize=0.32
|
| 752 |
+
temperature=1.3
|
| 753 |
top_k=2
|
| 754 |
sample=True
|
| 755 |
num_iterations=10
|
|
|
|
| 811 |
for param in model.parameters():
|
| 812 |
param.requires_grad = False
|
| 813 |
|
| 814 |
+
starters = ["How are you feeling and why?", "Tell me about your day", "What would you like to talk about?"]
|
| 815 |
eot_token = "<|endoftext|>"
|
| 816 |
|
| 817 |
+
def get_reply(response, username = None, histories = {}, in_stepsize = 0.32, in_horizon_length = 1, in_num_iterations = 10, in_top_k = 2):
|
| 818 |
if username == None or username == "": return "<div class='chatbot'>Enter a username</div>", histories
|
| 819 |
stepsize = in_stepsize
|
| 820 |
horizon_length = int(in_horizon_length)
|
| 821 |
num_iterations = int(in_num_iterations)
|
| 822 |
top_k = int(in_top_k)
|
| 823 |
+
if response.endswith(("bye", "Bye", "bye.", "Bye.", "bye!", "Bye!","Hello", "Hi", "hello")):
|
| 824 |
+
starter = choice(starters)
|
| 825 |
+
histories[username] = starter+"<|endoftext|>"
|
| 826 |
+
html = "<div class='chatbot'> Chatbot restarted"
|
| 827 |
+
html += "<div class='msg user'>"+starter+"</div>"
|
| 828 |
+
html += "</div>"
|
| 829 |
+
return html, histories
|
| 830 |
history = histories.get(username, None)
|
| 831 |
convo_hist = (history if history != None else "How are you?<|endoftext|>") + response + eot_token
|
| 832 |
# figure out conditioning text
|
|
|
|
| 882 |
convo_hist = eot_token.join(convo_hist_split)
|
| 883 |
|
| 884 |
except:
|
| 885 |
+
starter = choice(starters)
|
| 886 |
+
histories[username] = starter+"<|endoftext|>"
|
| 887 |
+
html = "<div class='chatbot'> Chatbot restarted"
|
| 888 |
+
html += "<div class='msg user'>"+starter+"</div>"
|
| 889 |
+
html += "</div>"
|
| 890 |
+
return html, histories
|
| 891 |
histories[username] = convo_hist
|
| 892 |
return html, histories
|
| 893 |
|
|
|
|
| 905 |
gr.inputs.Textbox(label="Username"),
|
| 906 |
"state"],
|
| 907 |
outputs=["html", "state"],
|
| 908 |
+
css=css).launch(debug=True, enable_queue=True, share=True)
|