Update app.py
Browse files
app.py
CHANGED
|
@@ -144,17 +144,8 @@ class Translators:
|
|
| 144 |
else:
|
| 145 |
prompt = f"Translate the following segment into {self.tl}, without additional explanation.\n\n{self.input_text}."
|
| 146 |
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 147 |
-
model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto",
|
| 148 |
messages = [{"role": "user", "content": prompt}]
|
| 149 |
-
# tokenized_chat = tokenizer.apply_chat_template(
|
| 150 |
-
# messages,
|
| 151 |
-
# tokenize=True,
|
| 152 |
-
# add_generation_prompt=True,
|
| 153 |
-
# return_tensors="pt"
|
| 154 |
-
# )
|
| 155 |
-
# outputs = model.generate(tokenized_chat.to(model.device), max_new_tokens=512, top_k=20, top_p=0.6, repetition_penalty=1.05, temperature=0.7)
|
| 156 |
-
# output_text = tokenizer.decode(outputs[0])
|
| 157 |
-
# return output_text
|
| 158 |
# Tokenize the conversation
|
| 159 |
tokenized_chat = tokenizer.apply_chat_template(
|
| 160 |
messages,
|
|
@@ -169,14 +160,18 @@ class Translators:
|
|
| 169 |
tokenized_chat.to(model.device),
|
| 170 |
max_new_tokens=512,
|
| 171 |
temperature=temperature,
|
| 172 |
-
|
|
|
|
|
|
|
| 173 |
do_sample=True if temperature > 0 else False,
|
| 174 |
pad_token_id=tokenizer.eos_token_id
|
| 175 |
)
|
| 176 |
-
|
|
|
|
|
|
|
| 177 |
# Decode only the new tokens
|
| 178 |
-
|
| 179 |
-
return
|
| 180 |
|
| 181 |
def HelsinkiNLP_mulroa(self):
|
| 182 |
try:
|
|
|
|
| 144 |
else:
|
| 145 |
prompt = f"Translate the following segment into {self.tl}, without additional explanation.\n\n{self.input_text}."
|
| 146 |
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 147 |
+
model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto", dtype=torch.bfloat16)
|
| 148 |
messages = [{"role": "user", "content": prompt}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
# Tokenize the conversation
|
| 150 |
tokenized_chat = tokenizer.apply_chat_template(
|
| 151 |
messages,
|
|
|
|
| 160 |
tokenized_chat.to(model.device),
|
| 161 |
max_new_tokens=512,
|
| 162 |
temperature=temperature,
|
| 163 |
+
top_k=20
|
| 164 |
+
top_p=0.95,
|
| 165 |
+
repetition_penalty=1.05,
|
| 166 |
do_sample=True if temperature > 0 else False,
|
| 167 |
pad_token_id=tokenizer.eos_token_id
|
| 168 |
)
|
| 169 |
+
|
| 170 |
+
# outputs = model.generate(tokenized_chat.to(model.device), max_new_tokens=512, top_k=20, top_p=0.6, repetition_penalty=1.05, temperature=0.7)
|
| 171 |
+
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 172 |
# Decode only the new tokens
|
| 173 |
+
# output_text = tokenizer.decode(outputs[0][tokenized_chat.shape[-1]:], skip_special_tokens=True)
|
| 174 |
+
return output_text
|
| 175 |
|
| 176 |
def HelsinkiNLP_mulroa(self):
|
| 177 |
try:
|