Spaces:
Running
on
Zero
Running
on
Zero
Change both tokenizers (#8)
Browse files- Change both tokenizers (28b13b0e48cb0747a81c590071935e1b3c8c0237)
model.py
CHANGED
|
@@ -33,7 +33,7 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
|
|
| 33 |
|
| 34 |
def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:
|
| 35 |
prompt = get_prompt(message, chat_history, system_prompt)
|
| 36 |
-
input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens
|
| 37 |
return input_ids.shape[-1]
|
| 38 |
|
| 39 |
|
|
@@ -45,7 +45,7 @@ def run(message: str,
|
|
| 45 |
top_p: float = 0.95,
|
| 46 |
top_k: int = 50) -> Iterator[str]:
|
| 47 |
prompt = get_prompt(message, chat_history, system_prompt)
|
| 48 |
-
inputs = tokenizer([prompt], return_tensors='pt').to('cuda')
|
| 49 |
|
| 50 |
streamer = TextIteratorStreamer(tokenizer,
|
| 51 |
timeout=10.,
|
|
|
|
| 33 |
|
| 34 |
def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:
|
| 35 |
prompt = get_prompt(message, chat_history, system_prompt)
|
| 36 |
+
input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids']
|
| 37 |
return input_ids.shape[-1]
|
| 38 |
|
| 39 |
|
|
|
|
| 45 |
top_p: float = 0.95,
|
| 46 |
top_k: int = 50) -> Iterator[str]:
|
| 47 |
prompt = get_prompt(message, chat_history, system_prompt)
|
| 48 |
+
inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
|
| 49 |
|
| 50 |
streamer = TextIteratorStreamer(tokenizer,
|
| 51 |
timeout=10.,
|