Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -29,11 +29,11 @@ def load_models(inp): | |
| 29 | 
             
                return gr.update(label=models[inp])
         | 
| 30 |  | 
| 31 | 
             
            def format_prompt(message, history, cust_p):
         | 
| 32 | 
            -
                prompt = ""
         | 
| 33 | 
             
                if history:
         | 
| 34 | 
             
                    for user_prompt, bot_response in history:
         | 
| 35 | 
             
                        prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
         | 
| 36 | 
            -
                        prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
         | 
| 37 | 
             
                        if VERBOSE==True:
         | 
| 38 | 
             
                            print(prompt)
         | 
| 39 | 
             
                #prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
         | 
| @@ -78,7 +78,7 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens, | |
| 78 | 
             
                { "role": "user", "content": f"{formatted_prompt}" },
         | 
| 79 | 
             
                ]
         | 
| 80 |  | 
| 81 | 
            -
                    stream = client.text_generation( | 
| 82 | 
             
                    output = ""
         | 
| 83 | 
             
                    for response in stream:
         | 
| 84 | 
             
                        output += response.token.text
         | 
|  | |
| 29 | 
             
                return gr.update(label=models[inp])
         | 
| 30 |  | 
| 31 | 
             
            def format_prompt(message, history, cust_p):
         | 
| 32 | 
            +
                prompt = "<s>"
         | 
| 33 | 
             
                if history:
         | 
| 34 | 
             
                    for user_prompt, bot_response in history:
         | 
| 35 | 
             
                        prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
         | 
| 36 | 
            +
                        prompt += f"<start_of_turn>model{bot_response}<end_of_turn></s>"
         | 
| 37 | 
             
                        if VERBOSE==True:
         | 
| 38 | 
             
                            print(prompt)
         | 
| 39 | 
             
                #prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
         | 
|  | |
| 78 | 
             
                { "role": "user", "content": f"{formatted_prompt}" },
         | 
| 79 | 
             
                ]
         | 
| 80 |  | 
| 81 | 
            +
                    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
         | 
| 82 | 
             
                    output = ""
         | 
| 83 | 
             
                    for response in stream:
         | 
| 84 | 
             
                        output += response.token.text
         | 
 
			
