Update README.md
Browse files
README.md
CHANGED
|
@@ -39,7 +39,7 @@ text = tokenizer.apply_chat_template(
|
|
| 39 |
messages,
|
| 40 |
tokenize=False,
|
| 41 |
add_generation_prompt=True,
|
| 42 |
-
enable_thinking=True
|
| 43 |
)
|
| 44 |
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
| 45 |
|
|
@@ -52,7 +52,6 @@ output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
|
|
| 52 |
|
| 53 |
# parsing thinking content
|
| 54 |
try:
|
| 55 |
-
# 查找 </think> 标记 (token id: 151668)
|
| 56 |
index = len(output_ids) - output_ids[::-1].index(151668)
|
| 57 |
except ValueError:
|
| 58 |
index = 0
|
|
@@ -60,8 +59,8 @@ except ValueError:
|
|
| 60 |
thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
|
| 61 |
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
|
| 62 |
|
| 63 |
-
print("thinking content:", thinking_content)
|
| 64 |
-
print("content:", content)
|
| 65 |
```
|
| 66 |
|
| 67 |
---
|
|
|
|
| 39 |
messages,
|
| 40 |
tokenize=False,
|
| 41 |
add_generation_prompt=True,
|
| 42 |
+
enable_thinking=True
|
| 43 |
)
|
| 44 |
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
| 45 |
|
|
|
|
| 52 |
|
| 53 |
# parsing thinking content
|
| 54 |
try:
|
|
|
|
| 55 |
index = len(output_ids) - output_ids[::-1].index(151668)
|
| 56 |
except ValueError:
|
| 57 |
index = 0
|
|
|
|
| 59 |
thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
|
| 60 |
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
|
| 61 |
|
| 62 |
+
print("psychologist thinking content:", thinking_content)
|
| 63 |
+
print("psychologist content:", content)
|
| 64 |
```
|
| 65 |
|
| 66 |
---
|