| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| model_id = "scb10x/llama-3-typhoon-v1.5-8b-instruct" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| torch_dtype=torch.bfloat16, | |
| device_map="auto", | |
| ) | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful assistant who're always speak Thai."}, | |
| {"role": "user", "content": "ขอสูตรไก่ย่าง"}, | |
| ] | |
| input_ids = tokenizer.apply_chat_template( | |
| messages, | |
| add_generation_prompt=True, | |
| return_tensors="pt" | |
| ).to(model.device) | |
| terminators = [ | |
| tokenizer.eos_token_id, | |
| tokenizer.convert_tokens_to_ids("<|eot_id|>") | |
| ] | |
| outputs = model.generate( | |
| input_ids, | |
| max_new_tokens=512, | |
| eos_token_id=terminators, | |
| do_sample=True, | |
| temperature=0.4, | |
| top_p=0.9, | |
| ) | |
| response = outputs[0][input_ids.shape[-1]:] | |
| print(tokenizer.decode(response, skip_special_tokens=True)) | |