| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| compute_dtype = torch.bfloat16 | |
| device = 'cuda' | |
| model_id = "mobiuslabsgmbh/DeepSeek-R1-ReDistill-Qwen-1.5B-v1.0" | |
| model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=compute_dtype, attn_implementation="sdpa", device_map=device) | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| chat = tokenizer.apply_chat_template([{"role":"user", "content":"What is 1.5+102.2?"}], tokenize=True, add_generation_prompt=True, return_tensors="pt") | |
| outputs = model.generate(chat.to(device), max_new_tokens=1024, do_sample=True) | |
| print(tokenizer.decode(outputs[0])) | |