Datasets:
				
			
			
	
			
	
		
			
	
		
		
		Gresham
		
	commited on
		
		
					Commit 
							
							·
						
						18308c4
	
1
								Parent(s):
							
							aecf6f1
								
fix: load dataset error
Browse files- llama-fine-tuning-QLoRA.py +13 -7
 
    	
        llama-fine-tuning-QLoRA.py
    CHANGED
    
    | 
         @@ -5,7 +5,8 @@ os.chdir(os.path.dirname(__file__)) 
     | 
|
| 5 | 
         | 
| 6 | 
         
             
            # 导入必要的库
         
     | 
| 7 | 
         
             
            import torch
         
     | 
| 8 | 
         
            -
             
     | 
| 
         | 
|
| 9 | 
         
             
            from transformers import (
         
     | 
| 10 | 
         
             
                AutoModelForCausalLM,  # 用于加载预训练的语言模型
         
     | 
| 11 | 
         
             
                AutoTokenizer,  # 用于加载与模型相匹配的分词器
         
     | 
| 
         @@ -15,7 +16,8 @@ from transformers import ( 
     | 
|
| 15 | 
         
             
                pipeline,  # 用于创建模型的pipeline
         
     | 
| 16 | 
         
             
                logging,  # 用于记录日志
         
     | 
| 17 | 
         
             
            )
         
     | 
| 18 | 
         
            -
            from  
     | 
| 
         | 
|
| 19 | 
         
             
            from trl import SFTTrainer  # 用于执行监督式微调的Trainer
         
     | 
| 20 | 
         | 
| 21 | 
         
             
            # 设置预训练模型的名称
         
     | 
| 
         @@ -108,12 +110,16 @@ packing = False 
     | 
|
| 108 | 
         
             
            device_map = {"": 0}
         
     | 
| 109 | 
         | 
| 110 | 
         
             
            # 加载数据集
         
     | 
| 111 | 
         
            -
            dataset = load_dataset(path="json", data_dir="./num_list", data_files="num_list_500_per_sample_100_length.json")
         
     | 
| 112 | 
         
            -
            fine_tune_dataset = []
         
     | 
| 113 | 
         
             
            print("Loading dataset...")
         
     | 
| 114 | 
         
            -
             
     | 
| 115 | 
         
            -
             
     | 
| 116 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 117 | 
         
             
                completion = f"The answer is {answer}."
         
     | 
| 118 | 
         
             
                fine_tune_dataset.append({"prompt": prompt, "completion": completion})
         
     | 
| 119 | 
         | 
| 
         | 
|
| 5 | 
         | 
| 6 | 
         
             
            # 导入必要的库
         
     | 
| 7 | 
         
             
            import torch
         
     | 
| 8 | 
         
            +
            import json
         
     | 
| 9 | 
         
            +
            from datasets import Dataset
         
     | 
| 10 | 
         
             
            from transformers import (
         
     | 
| 11 | 
         
             
                AutoModelForCausalLM,  # 用于加载预训练的语言模型
         
     | 
| 12 | 
         
             
                AutoTokenizer,  # 用于加载与模型相匹配的分词器
         
     | 
| 
         | 
|
| 16 | 
         
             
                pipeline,  # 用于创建模型的pipeline
         
     | 
| 17 | 
         
             
                logging,  # 用于记录日志
         
     | 
| 18 | 
         
             
            )
         
     | 
| 19 | 
         
            +
            from huggingface_hub import hf_hub_download
         
     | 
| 20 | 
         
            +
            from peft import LoraConfig # 用于配置和加载QLoRA模型
         
     | 
| 21 | 
         
             
            from trl import SFTTrainer  # 用于执行监督式微调的Trainer
         
     | 
| 22 | 
         | 
| 23 | 
         
             
            # 设置预训练模型的名称
         
     | 
| 
         | 
|
| 110 | 
         
             
            device_map = {"": 0}
         
     | 
| 111 | 
         | 
| 112 | 
         
             
            # 加载数据集
         
     | 
| 
         | 
|
| 
         | 
|
| 113 | 
         
             
            print("Loading dataset...")
         
     | 
| 114 | 
         
            +
            REPO_ID = "TreeAILab/NumericBench"
         
     | 
| 115 | 
         
            +
            dataset_name = 'num_list/num_list_500_per_sample_100_length.json'
         
     | 
| 116 | 
         
            +
            with open(hf_hub_download(repo_id=REPO_ID, filename=dataset_name, repo_type="dataset")) as f:
         
     | 
| 117 | 
         
            +
                dataset = json.load(f)
         
     | 
| 118 | 
         
            +
            fine_tune_dataset = []
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
            for instance in dataset["data"]:
         
     | 
| 121 | 
         
            +
                prompt = dataset["system_prompt"] + "\n\n" + dataset["description"] + "\nQuestion: " + instance["question"] + "\nData: " + instance["struct_data"]
         
     | 
| 122 | 
         
            +
                answer = instance["answer"]
         
     | 
| 123 | 
         
             
                completion = f"The answer is {answer}."
         
     | 
| 124 | 
         
             
                fine_tune_dataset.append({"prompt": prompt, "completion": completion})
         
     | 
| 125 | 
         |