|
|
from peft import PeftModel, PeftConfig |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
import torch |
|
|
import os |
|
|
|
|
|
|
|
|
LORA_MODEL_PATH = "./checkpoints/archer_Qwen3-14B_rsa" |
|
|
BASE_MODEL_NAME = "/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_rsa/merged_model" |
|
|
|
|
|
def merge_lora(base_model_name, lora_path, output_path): |
|
|
|
|
|
base_model = AutoModelForCausalLM.from_pretrained( |
|
|
base_model_name, |
|
|
return_dict=True, |
|
|
torch_dtype=torch.float16, |
|
|
device_map="auto" |
|
|
) |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(base_model_name) |
|
|
|
|
|
|
|
|
lora_model = PeftModel.from_pretrained(base_model, lora_path) |
|
|
|
|
|
|
|
|
merged_model = lora_model.merge_and_unload() |
|
|
|
|
|
|
|
|
merged_model.save_pretrained(output_path) |
|
|
tokenizer.save_pretrained(output_path) |
|
|
print(f"Merged model saved to {output_path}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
merge_lora(BASE_MODEL_NAME, LORA_MODEL_PATH, os.path.join(LORA_MODEL_PATH, "merged_model")) |