Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,14 +16,14 @@ else:
|
|
| 16 |
@spaces.GPU
|
| 17 |
def generate_response(passage: str, question: str) -> str:
|
| 18 |
# Prepare the input text by combining the passage and question
|
| 19 |
-
message = [f"Passage: {passage}\nQuestion: {question}\nAnswer:
|
| 20 |
inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False).to('cuda')
|
| 21 |
|
| 22 |
response = model.generate(**inputs, max_new_tokens=100)
|
| 23 |
|
| 24 |
response = tokenizer.batch_decode(response, skip_special_tokens=True)[0]
|
| 25 |
|
| 26 |
-
response = response[len(message[0]):].strip()
|
| 27 |
|
| 28 |
return response
|
| 29 |
|
|
|
|
| 16 |
@spaces.GPU
|
| 17 |
def generate_response(passage: str, question: str) -> str:
|
| 18 |
# Prepare the input text by combining the passage and question
|
| 19 |
+
message = [f"Passage: {passage}\nQuestion: {question}\nAnswer:"]
|
| 20 |
inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False).to('cuda')
|
| 21 |
|
| 22 |
response = model.generate(**inputs, max_new_tokens=100)
|
| 23 |
|
| 24 |
response = tokenizer.batch_decode(response, skip_special_tokens=True)[0]
|
| 25 |
|
| 26 |
+
response = response[len(message[0]):].strip().split('\n')[0]
|
| 27 |
|
| 28 |
return response
|
| 29 |
|