OfflineArcher / test.py
Jessie09's picture
Upload dataset
8014d08 verified
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import os
# Configuration - replace these with your actual paths
LORA_MODEL_PATH = "./checkpoints/archer_Qwen3-14B_rsa" # e.g., "./lora_output"
BASE_MODEL_NAME = "/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_rsa/merged_model" # e.g., "./merged_model"
def merge_lora(base_model_name, lora_path, output_path):
# Load base model
base_model = AutoModelForCausalLM.from_pretrained(
base_model_name,
return_dict=True,
torch_dtype=torch.float16,
device_map="auto"
)
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
# Load LoRA adapter
lora_model = PeftModel.from_pretrained(base_model, lora_path)
# Merge weights
merged_model = lora_model.merge_and_unload()
# Save merged model
merged_model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
print(f"Merged model saved to {output_path}")
if __name__ == "__main__":
merge_lora(BASE_MODEL_NAME, LORA_MODEL_PATH, os.path.join(LORA_MODEL_PATH, "merged_model"))