Spaces:
Build error
Build error
Commit
·
7144739
1
Parent(s):
8866047
sentence creating on stream
Browse files
app.py
CHANGED
|
@@ -12,7 +12,7 @@ model_path = hf_hub_download(model_name, filename=model_file)
|
|
| 12 |
llm = AutoModelForCausalLM.from_pretrained(model_name, model_file=model_file,
|
| 13 |
model_type="mistral", gpu_layers=0)
|
| 14 |
def alternatingly_agree(message, history):
|
| 15 |
-
|
| 16 |
prompt = """<s> சரியான பதிலுடன் வேலையை வெற்றிகரமாக முடிக்க. தேவையான தகவலை உள்ளிடவும்.
|
| 17 |
|
| 18 |
### Instruction:
|
|
@@ -24,8 +24,9 @@ def alternatingly_agree(message, history):
|
|
| 24 |
|
| 25 |
prompt = prompt.format(message)
|
| 26 |
result = llm(prompt,max_new_tokens=50,temperature=0.7,stream=True)
|
| 27 |
-
for
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
|
|
|
|
| 12 |
llm = AutoModelForCausalLM.from_pretrained(model_name, model_file=model_file,
|
| 13 |
model_type="mistral", gpu_layers=0)
|
| 14 |
def alternatingly_agree(message, history):
|
| 15 |
+
outputs = []
|
| 16 |
prompt = """<s> சரியான பதிலுடன் வேலையை வெற்றிகரமாக முடிக்க. தேவையான தகவலை உள்ளிடவும்.
|
| 17 |
|
| 18 |
### Instruction:
|
|
|
|
| 24 |
|
| 25 |
prompt = prompt.format(message)
|
| 26 |
result = llm(prompt,max_new_tokens=50,temperature=0.7,stream=True)
|
| 27 |
+
for token in result:
|
| 28 |
+
outputs.append(token)
|
| 29 |
+
yield "".join(outputs)
|
| 30 |
|
| 31 |
|
| 32 |
|