Upload 36 files
Browse files- llava1_6-vicuna-7b-instruct/README.md +202 -0
- llava1_6-vicuna-7b-instruct/adapter_config.json +26 -0
- llava1_6-vicuna-7b-instruct/adapter_model.safetensors +3 -0
- llava1_6-vicuna-7b-instruct/additional_config.json +1 -0
- llava1_6-vicuna-7b-instruct/configuration.json +11 -0
- llava1_6-vicuna-7b-instruct/generation_config.json +7 -0
- llava1_6-vicuna-7b-instruct/optimizer.pt +3 -0
- llava1_6-vicuna-7b-instruct/rng_state.pth +3 -0
- llava1_6-vicuna-7b-instruct/scheduler.pt +3 -0
- llava1_6-vicuna-7b-instruct/sft_args.json +247 -0
- llava1_6-vicuna-7b-instruct/trainer_state.json +1084 -0
- llava1_6-vicuna-7b-instruct/training_args.bin +3 -0
- minicpm-v-v2_6-chat/README.md +202 -0
- minicpm-v-v2_6-chat/adapter_config.json +26 -0
- minicpm-v-v2_6-chat/adapter_model.safetensors +3 -0
- minicpm-v-v2_6-chat/additional_config.json +1 -0
- minicpm-v-v2_6-chat/configuration.json +13 -0
- minicpm-v-v2_6-chat/generation_config.json +7 -0
- minicpm-v-v2_6-chat/optimizer.pt +3 -0
- minicpm-v-v2_6-chat/rng_state.pth +3 -0
- minicpm-v-v2_6-chat/scheduler.pt +3 -0
- minicpm-v-v2_6-chat/sft_args.json +247 -0
- minicpm-v-v2_6-chat/trainer_state.json +1202 -0
- minicpm-v-v2_6-chat/training_args.bin +3 -0
- qwen2-vl-7b-instruct/README.md +202 -0
- qwen2-vl-7b-instruct/adapter_config.json +26 -0
- qwen2-vl-7b-instruct/adapter_model.safetensors +3 -0
- qwen2-vl-7b-instruct/additional_config.json +1 -0
- qwen2-vl-7b-instruct/configuration.json +14 -0
- qwen2-vl-7b-instruct/generation_config.json +11 -0
- qwen2-vl-7b-instruct/optimizer.pt +3 -0
- qwen2-vl-7b-instruct/rng_state.pth +3 -0
- qwen2-vl-7b-instruct/scheduler.pt +3 -0
- qwen2-vl-7b-instruct/sft_args.json +247 -0
- qwen2-vl-7b-instruct/trainer_state.json +1202 -0
- qwen2-vl-7b-instruct/training_args.bin +3 -0
llava1_6-vicuna-7b-instruct/README.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: /fs/clip-projects/geoguesser/vlms/llava/llava-v1.6-vicuna-7b-hf
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Model Card for Model ID
|
| 7 |
+
|
| 8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
|
| 14 |
+
### Model Description
|
| 15 |
+
|
| 16 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
- **Developed by:** [More Information Needed]
|
| 21 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 22 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 23 |
+
- **Model type:** [More Information Needed]
|
| 24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 25 |
+
- **License:** [More Information Needed]
|
| 26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 27 |
+
|
| 28 |
+
### Model Sources [optional]
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
|
| 32 |
+
- **Repository:** [More Information Needed]
|
| 33 |
+
- **Paper [optional]:** [More Information Needed]
|
| 34 |
+
- **Demo [optional]:** [More Information Needed]
|
| 35 |
+
|
| 36 |
+
## Uses
|
| 37 |
+
|
| 38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 39 |
+
|
| 40 |
+
### Direct Use
|
| 41 |
+
|
| 42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 43 |
+
|
| 44 |
+
[More Information Needed]
|
| 45 |
+
|
| 46 |
+
### Downstream Use [optional]
|
| 47 |
+
|
| 48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 49 |
+
|
| 50 |
+
[More Information Needed]
|
| 51 |
+
|
| 52 |
+
### Out-of-Scope Use
|
| 53 |
+
|
| 54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 55 |
+
|
| 56 |
+
[More Information Needed]
|
| 57 |
+
|
| 58 |
+
## Bias, Risks, and Limitations
|
| 59 |
+
|
| 60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 61 |
+
|
| 62 |
+
[More Information Needed]
|
| 63 |
+
|
| 64 |
+
### Recommendations
|
| 65 |
+
|
| 66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 67 |
+
|
| 68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 69 |
+
|
| 70 |
+
## How to Get Started with the Model
|
| 71 |
+
|
| 72 |
+
Use the code below to get started with the model.
|
| 73 |
+
|
| 74 |
+
[More Information Needed]
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
### Training Data
|
| 79 |
+
|
| 80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 81 |
+
|
| 82 |
+
[More Information Needed]
|
| 83 |
+
|
| 84 |
+
### Training Procedure
|
| 85 |
+
|
| 86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 87 |
+
|
| 88 |
+
#### Preprocessing [optional]
|
| 89 |
+
|
| 90 |
+
[More Information Needed]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#### Training Hyperparameters
|
| 94 |
+
|
| 95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 96 |
+
|
| 97 |
+
#### Speeds, Sizes, Times [optional]
|
| 98 |
+
|
| 99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 100 |
+
|
| 101 |
+
[More Information Needed]
|
| 102 |
+
|
| 103 |
+
## Evaluation
|
| 104 |
+
|
| 105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 106 |
+
|
| 107 |
+
### Testing Data, Factors & Metrics
|
| 108 |
+
|
| 109 |
+
#### Testing Data
|
| 110 |
+
|
| 111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 112 |
+
|
| 113 |
+
[More Information Needed]
|
| 114 |
+
|
| 115 |
+
#### Factors
|
| 116 |
+
|
| 117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 118 |
+
|
| 119 |
+
[More Information Needed]
|
| 120 |
+
|
| 121 |
+
#### Metrics
|
| 122 |
+
|
| 123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 124 |
+
|
| 125 |
+
[More Information Needed]
|
| 126 |
+
|
| 127 |
+
### Results
|
| 128 |
+
|
| 129 |
+
[More Information Needed]
|
| 130 |
+
|
| 131 |
+
#### Summary
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
## Model Examination [optional]
|
| 136 |
+
|
| 137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Environmental Impact
|
| 142 |
+
|
| 143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 144 |
+
|
| 145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 146 |
+
|
| 147 |
+
- **Hardware Type:** [More Information Needed]
|
| 148 |
+
- **Hours used:** [More Information Needed]
|
| 149 |
+
- **Cloud Provider:** [More Information Needed]
|
| 150 |
+
- **Compute Region:** [More Information Needed]
|
| 151 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 152 |
+
|
| 153 |
+
## Technical Specifications [optional]
|
| 154 |
+
|
| 155 |
+
### Model Architecture and Objective
|
| 156 |
+
|
| 157 |
+
[More Information Needed]
|
| 158 |
+
|
| 159 |
+
### Compute Infrastructure
|
| 160 |
+
|
| 161 |
+
[More Information Needed]
|
| 162 |
+
|
| 163 |
+
#### Hardware
|
| 164 |
+
|
| 165 |
+
[More Information Needed]
|
| 166 |
+
|
| 167 |
+
#### Software
|
| 168 |
+
|
| 169 |
+
[More Information Needed]
|
| 170 |
+
|
| 171 |
+
## Citation [optional]
|
| 172 |
+
|
| 173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 174 |
+
|
| 175 |
+
**BibTeX:**
|
| 176 |
+
|
| 177 |
+
[More Information Needed]
|
| 178 |
+
|
| 179 |
+
**APA:**
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
## Glossary [optional]
|
| 184 |
+
|
| 185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 186 |
+
|
| 187 |
+
[More Information Needed]
|
| 188 |
+
|
| 189 |
+
## More Information [optional]
|
| 190 |
+
|
| 191 |
+
[More Information Needed]
|
| 192 |
+
|
| 193 |
+
## Model Card Authors [optional]
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
## Model Card Contact
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
### Framework versions
|
| 201 |
+
|
| 202 |
+
- PEFT 0.12.0
|
llava1_6-vicuna-7b-instruct/adapter_config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "/fs/clip-projects/geoguesser/vlms/llava/llava-v1.6-vicuna-7b-hf",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"fan_in_fan_out": false,
|
| 7 |
+
"inference_mode": true,
|
| 8 |
+
"init_lora_weights": true,
|
| 9 |
+
"layer_replication": null,
|
| 10 |
+
"layers_pattern": null,
|
| 11 |
+
"layers_to_transform": null,
|
| 12 |
+
"loftq_config": {},
|
| 13 |
+
"lora_alpha": 32,
|
| 14 |
+
"lora_dropout": 0.05,
|
| 15 |
+
"megatron_config": null,
|
| 16 |
+
"megatron_core": "megatron.core",
|
| 17 |
+
"modules_to_save": [],
|
| 18 |
+
"peft_type": "LORA",
|
| 19 |
+
"r": 8,
|
| 20 |
+
"rank_pattern": {},
|
| 21 |
+
"revision": null,
|
| 22 |
+
"target_modules": "^(language_model|multi_modal_projector)(?!.*(lm_head|output|emb|wte|shared)).*",
|
| 23 |
+
"task_type": "CAUSAL_LM",
|
| 24 |
+
"use_dora": false,
|
| 25 |
+
"use_rslora": false
|
| 26 |
+
}
|
llava1_6-vicuna-7b-instruct/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b66d04d5b5b9e30873fe7fbb4bd630ba8f28378484cdb0f6a6110e51a491d43
|
| 3 |
+
size 80446352
|
llava1_6-vicuna-7b-instruct/additional_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"lora_dtype": null, "lorap_lr_ratio": null, "lorap_emb_lr": 1e-06}
|
llava1_6-vicuna-7b-instruct/configuration.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_cfg": {
|
| 3 |
+
"model_id_or_path": "/fs/clip-projects/geoguesser/vlms/llava/llava-v1.6-vicuna-7b-hf",
|
| 4 |
+
"model_revision": "master",
|
| 5 |
+
"sft_type": "lora",
|
| 6 |
+
"tuner_backend": "peft",
|
| 7 |
+
"template_type": "llava-vicuna",
|
| 8 |
+
"dtype": "bf16",
|
| 9 |
+
"system": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
|
| 10 |
+
}
|
| 11 |
+
}
|
llava1_6-vicuna-7b-instruct/generation_config.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 1,
|
| 3 |
+
"eos_token_id": 2,
|
| 4 |
+
"max_new_tokens": 2048,
|
| 5 |
+
"pad_token_id": 0,
|
| 6 |
+
"transformers_version": "4.45.1"
|
| 7 |
+
}
|
llava1_6-vicuna-7b-instruct/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:52792b98d009058a00eb4e56e3e140b84b88c4d07f4ba35a02deaa49a20f9351
|
| 3 |
+
size 161140126
|
llava1_6-vicuna-7b-instruct/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf85ab928113c942e29e418bc5b119c6dfca9cde55e4b30382d2ed66445b9953
|
| 3 |
+
size 14244
|
llava1_6-vicuna-7b-instruct/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b75bb0cab5c1ac64f1f42e99abb3e1c6d095394f6dd0c73c81aee621b476d6d9
|
| 3 |
+
size 1064
|
llava1_6-vicuna-7b-instruct/sft_args.json
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_type": "llava1_6-vicuna-7b-instruct",
|
| 3 |
+
"model_id_or_path": "/fs/clip-projects/geoguesser/vlms/llava/llava-v1.6-vicuna-7b-hf",
|
| 4 |
+
"model_revision": "master",
|
| 5 |
+
"full_determinism": false,
|
| 6 |
+
"sft_type": "lora",
|
| 7 |
+
"freeze_parameters": [],
|
| 8 |
+
"freeze_vit": false,
|
| 9 |
+
"freeze_parameters_ratio": 0.0,
|
| 10 |
+
"additional_trainable_parameters": [],
|
| 11 |
+
"tuner_backend": "peft",
|
| 12 |
+
"template_type": "llava-vicuna",
|
| 13 |
+
"output_dir": "/fs/clip-projects/geoguesser/vlms/llava/output/llava1_6-vicuna-7b-instruct/v10-20241108-045625",
|
| 14 |
+
"add_output_dir_suffix": true,
|
| 15 |
+
"ddp_backend": null,
|
| 16 |
+
"ddp_find_unused_parameters": null,
|
| 17 |
+
"ddp_broadcast_buffers": null,
|
| 18 |
+
"ddp_timeout": 1800,
|
| 19 |
+
"seed": 42,
|
| 20 |
+
"resume_from_checkpoint": null,
|
| 21 |
+
"resume_only_model": false,
|
| 22 |
+
"ignore_data_skip": false,
|
| 23 |
+
"dtype": "bf16",
|
| 24 |
+
"packing": false,
|
| 25 |
+
"train_backend": "transformers",
|
| 26 |
+
"tp": 1,
|
| 27 |
+
"pp": 1,
|
| 28 |
+
"min_lr": null,
|
| 29 |
+
"sequence_parallel": false,
|
| 30 |
+
"model_kwargs": null,
|
| 31 |
+
"loss_name": null,
|
| 32 |
+
"dataset": [
|
| 33 |
+
"train_set_a.jsonl"
|
| 34 |
+
],
|
| 35 |
+
"val_dataset": [
|
| 36 |
+
"test_set_a.jsonl"
|
| 37 |
+
],
|
| 38 |
+
"dataset_seed": 42,
|
| 39 |
+
"dataset_test_ratio": 0.0,
|
| 40 |
+
"use_loss_scale": false,
|
| 41 |
+
"loss_scale_config_path": "/fs/clip-projects/geoguesser/zheyuan/DPO/swift/swift/llm/agent/default_loss_scale_config.json",
|
| 42 |
+
"system": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
| 43 |
+
"tools_prompt": "react_en",
|
| 44 |
+
"max_length": 2048,
|
| 45 |
+
"truncation_strategy": "delete",
|
| 46 |
+
"check_dataset_strategy": "none",
|
| 47 |
+
"streaming": false,
|
| 48 |
+
"streaming_val_size": 0,
|
| 49 |
+
"streaming_buffer_size": 16384,
|
| 50 |
+
"model_name": [
|
| 51 |
+
null,
|
| 52 |
+
null
|
| 53 |
+
],
|
| 54 |
+
"model_author": [
|
| 55 |
+
null,
|
| 56 |
+
null
|
| 57 |
+
],
|
| 58 |
+
"quant_method": null,
|
| 59 |
+
"quantization_bit": 0,
|
| 60 |
+
"hqq_axis": 0,
|
| 61 |
+
"hqq_dynamic_config_path": null,
|
| 62 |
+
"bnb_4bit_comp_dtype": "bf16",
|
| 63 |
+
"bnb_4bit_quant_type": "nf4",
|
| 64 |
+
"bnb_4bit_use_double_quant": true,
|
| 65 |
+
"bnb_4bit_quant_storage": null,
|
| 66 |
+
"rescale_image": -1,
|
| 67 |
+
"target_modules": "^(language_model|multi_modal_projector)(?!.*(lm_head|output|emb|wte|shared)).*",
|
| 68 |
+
"target_regex": null,
|
| 69 |
+
"modules_to_save": [],
|
| 70 |
+
"lora_rank": 8,
|
| 71 |
+
"lora_alpha": 32,
|
| 72 |
+
"lora_dropout": 0.05,
|
| 73 |
+
"lora_bias_trainable": "none",
|
| 74 |
+
"lora_dtype": null,
|
| 75 |
+
"lora_lr_ratio": null,
|
| 76 |
+
"use_rslora": false,
|
| 77 |
+
"use_dora": false,
|
| 78 |
+
"init_lora_weights": true,
|
| 79 |
+
"fourier_n_frequency": 2000,
|
| 80 |
+
"fourier_scaling": 300.0,
|
| 81 |
+
"rope_scaling": null,
|
| 82 |
+
"boft_block_size": 4,
|
| 83 |
+
"boft_block_num": 0,
|
| 84 |
+
"boft_n_butterfly_factor": 1,
|
| 85 |
+
"boft_dropout": 0.0,
|
| 86 |
+
"vera_rank": 256,
|
| 87 |
+
"vera_projection_prng_key": 0,
|
| 88 |
+
"vera_dropout": 0.0,
|
| 89 |
+
"vera_d_initial": 0.1,
|
| 90 |
+
"adapter_act": "gelu",
|
| 91 |
+
"adapter_length": 128,
|
| 92 |
+
"use_galore": false,
|
| 93 |
+
"galore_target_modules": null,
|
| 94 |
+
"galore_rank": 128,
|
| 95 |
+
"galore_update_proj_gap": 50,
|
| 96 |
+
"galore_scale": 1.0,
|
| 97 |
+
"galore_proj_type": "std",
|
| 98 |
+
"galore_optim_per_parameter": false,
|
| 99 |
+
"galore_with_embedding": false,
|
| 100 |
+
"galore_quantization": false,
|
| 101 |
+
"galore_proj_quant": false,
|
| 102 |
+
"galore_proj_bits": 4,
|
| 103 |
+
"galore_proj_group_size": 256,
|
| 104 |
+
"galore_cos_threshold": 0.4,
|
| 105 |
+
"galore_gamma_proj": 2,
|
| 106 |
+
"galore_queue_size": 5,
|
| 107 |
+
"adalora_target_r": 8,
|
| 108 |
+
"adalora_init_r": 12,
|
| 109 |
+
"adalora_tinit": 0,
|
| 110 |
+
"adalora_tfinal": 0,
|
| 111 |
+
"adalora_deltaT": 1,
|
| 112 |
+
"adalora_beta1": 0.85,
|
| 113 |
+
"adalora_beta2": 0.85,
|
| 114 |
+
"adalora_orth_reg_weight": 0.5,
|
| 115 |
+
"ia3_feedforward_modules": [],
|
| 116 |
+
"llamapro_num_new_blocks": 4,
|
| 117 |
+
"llamapro_num_groups": null,
|
| 118 |
+
"neftune_noise_alpha": null,
|
| 119 |
+
"neftune_backend": "transformers",
|
| 120 |
+
"lisa_activated_layers": 0,
|
| 121 |
+
"lisa_step_interval": 20,
|
| 122 |
+
"reft_layer_key": null,
|
| 123 |
+
"reft_layers": null,
|
| 124 |
+
"reft_rank": 4,
|
| 125 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 126 |
+
"reft_args": null,
|
| 127 |
+
"use_liger": false,
|
| 128 |
+
"gradient_checkpointing": true,
|
| 129 |
+
"deepspeed": null,
|
| 130 |
+
"batch_size": 1,
|
| 131 |
+
"eval_batch_size": 1,
|
| 132 |
+
"auto_find_batch_size": false,
|
| 133 |
+
"num_train_epochs": 2,
|
| 134 |
+
"max_steps": -1,
|
| 135 |
+
"optim": "adamw_torch",
|
| 136 |
+
"adam_beta1": 0.9,
|
| 137 |
+
"adam_beta2": 0.95,
|
| 138 |
+
"adam_epsilon": 1e-08,
|
| 139 |
+
"learning_rate": 0.0001,
|
| 140 |
+
"weight_decay": 0.1,
|
| 141 |
+
"gradient_accumulation_steps": 4,
|
| 142 |
+
"max_grad_norm": 1,
|
| 143 |
+
"predict_with_generate": false,
|
| 144 |
+
"lr_scheduler_type": "cosine",
|
| 145 |
+
"lr_scheduler_kwargs": {},
|
| 146 |
+
"warmup_ratio": 0.05,
|
| 147 |
+
"warmup_steps": 0,
|
| 148 |
+
"eval_steps": 50,
|
| 149 |
+
"save_steps": 50,
|
| 150 |
+
"save_only_model": false,
|
| 151 |
+
"save_total_limit": 2,
|
| 152 |
+
"logging_steps": 5,
|
| 153 |
+
"acc_steps": 1,
|
| 154 |
+
"dataloader_num_workers": 1,
|
| 155 |
+
"dataloader_pin_memory": true,
|
| 156 |
+
"dataloader_drop_last": false,
|
| 157 |
+
"push_to_hub": false,
|
| 158 |
+
"hub_model_id": null,
|
| 159 |
+
"hub_token": null,
|
| 160 |
+
"hub_private_repo": false,
|
| 161 |
+
"hub_strategy": "every_save",
|
| 162 |
+
"test_oom_error": false,
|
| 163 |
+
"disable_tqdm": false,
|
| 164 |
+
"lazy_tokenize": true,
|
| 165 |
+
"preprocess_num_proc": 1,
|
| 166 |
+
"use_flash_attn": null,
|
| 167 |
+
"ignore_args_error": false,
|
| 168 |
+
"check_model_is_latest": true,
|
| 169 |
+
"logging_dir": "/fs/clip-projects/geoguesser/vlms/llava/output/llava1_6-vicuna-7b-instruct/v10-20241108-045625/runs",
|
| 170 |
+
"report_to": [
|
| 171 |
+
"tensorboard"
|
| 172 |
+
],
|
| 173 |
+
"acc_strategy": "token",
|
| 174 |
+
"save_on_each_node": false,
|
| 175 |
+
"evaluation_strategy": "steps",
|
| 176 |
+
"save_strategy": "steps",
|
| 177 |
+
"save_safetensors": true,
|
| 178 |
+
"gpu_memory_fraction": null,
|
| 179 |
+
"include_num_input_tokens_seen": false,
|
| 180 |
+
"local_repo_path": null,
|
| 181 |
+
"custom_register_path": null,
|
| 182 |
+
"custom_dataset_info": null,
|
| 183 |
+
"device_map_config": null,
|
| 184 |
+
"device_max_memory": [],
|
| 185 |
+
"max_new_tokens": 2048,
|
| 186 |
+
"do_sample": null,
|
| 187 |
+
"temperature": null,
|
| 188 |
+
"top_k": null,
|
| 189 |
+
"top_p": null,
|
| 190 |
+
"repetition_penalty": null,
|
| 191 |
+
"num_beams": 1,
|
| 192 |
+
"fsdp": "",
|
| 193 |
+
"fsdp_config": null,
|
| 194 |
+
"sequence_parallel_size": 1,
|
| 195 |
+
"model_layer_cls_name": null,
|
| 196 |
+
"metric_warmup_step": 0,
|
| 197 |
+
"fsdp_num": 1,
|
| 198 |
+
"per_device_train_batch_size": null,
|
| 199 |
+
"per_device_eval_batch_size": null,
|
| 200 |
+
"eval_strategy": null,
|
| 201 |
+
"self_cognition_sample": 0,
|
| 202 |
+
"train_dataset_mix_ratio": 0.0,
|
| 203 |
+
"train_dataset_mix_ds": [
|
| 204 |
+
"ms-bench"
|
| 205 |
+
],
|
| 206 |
+
"train_dataset_sample": -1,
|
| 207 |
+
"val_dataset_sample": null,
|
| 208 |
+
"safe_serialization": null,
|
| 209 |
+
"only_save_model": null,
|
| 210 |
+
"neftune_alpha": null,
|
| 211 |
+
"deepspeed_config_path": null,
|
| 212 |
+
"model_cache_dir": null,
|
| 213 |
+
"lora_dropout_p": null,
|
| 214 |
+
"lora_target_modules": [],
|
| 215 |
+
"lora_target_regex": null,
|
| 216 |
+
"lora_modules_to_save": [],
|
| 217 |
+
"boft_target_modules": [],
|
| 218 |
+
"boft_modules_to_save": [],
|
| 219 |
+
"vera_target_modules": [],
|
| 220 |
+
"vera_modules_to_save": [],
|
| 221 |
+
"ia3_target_modules": [],
|
| 222 |
+
"ia3_modules_to_save": [],
|
| 223 |
+
"custom_train_dataset_path": [],
|
| 224 |
+
"custom_val_dataset_path": [],
|
| 225 |
+
"device_map_config_path": null,
|
| 226 |
+
"push_hub_strategy": null,
|
| 227 |
+
"use_self_cognition": false,
|
| 228 |
+
"is_multimodal": true,
|
| 229 |
+
"is_vision": true,
|
| 230 |
+
"lora_use_embedding": false,
|
| 231 |
+
"lora_use_all": false,
|
| 232 |
+
"lora_m2s_use_embedding": false,
|
| 233 |
+
"lora_m2s_use_ln": false,
|
| 234 |
+
"torch_dtype": "torch.bfloat16",
|
| 235 |
+
"fp16": false,
|
| 236 |
+
"bf16": true,
|
| 237 |
+
"rank": -1,
|
| 238 |
+
"local_rank": -1,
|
| 239 |
+
"world_size": 1,
|
| 240 |
+
"local_world_size": 1,
|
| 241 |
+
"bnb_4bit_compute_dtype": "torch.bfloat16",
|
| 242 |
+
"load_in_4bit": false,
|
| 243 |
+
"load_in_8bit": false,
|
| 244 |
+
"train_sampler_random": true,
|
| 245 |
+
"train_type": "sft",
|
| 246 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/fs/clip-projects/geoguesser/vlms/llava/output/llava1_6-vicuna-7b-instruct/v10-20241108-045625', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=4, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=0.0001, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1, num_train_epochs=2, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs={}, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/fs/clip-projects/geoguesser/vlms/llava/output/llava1_6-vicuna-7b-instruct/v10-20241108-045625/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<IntervalStrategy.STEPS: 'steps'>, save_steps=50, save_total_limit=2, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=50, dataloader_num_workers=1, dataloader_prefetch_factor=None, past_index=-1, run_name='/fs/clip-projects/geoguesser/vlms/llava/output/llava1_6-vicuna-7b-instruct/v10-20241108-045625', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed=None, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=False, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy=None, push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=False, include_num_input_tokens_seen=False, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=GenerationConfig {\n \"bos_token_id\": 1,\n \"eos_token_id\": 2,\n \"max_new_tokens\": 2048,\n \"pad_token_id\": 0\n}\n, acc_strategy='token', loss_name=None, additional_saved_files=[], train_sampler_random=True, metric_warmup_step=0, train_dataset_sample=-1)"
|
| 247 |
+
}
|
llava1_6-vicuna-7b-instruct/trainer_state.json
ADDED
|
@@ -0,0 +1,1084 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 1.41787565,
|
| 3 |
+
"best_model_checkpoint": "/fs/clip-projects/geoguesser/vlms/llava/output/llava1_6-vicuna-7b-instruct/v10-20241108-045625/checkpoint-534",
|
| 4 |
+
"epoch": 1.9962616822429906,
|
| 5 |
+
"eval_steps": 50,
|
| 6 |
+
"global_step": 534,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.003738317757009346,
|
| 13 |
+
"grad_norm": 0.7382091283798218,
|
| 14 |
+
"learning_rate": 3.7037037037037037e-06,
|
| 15 |
+
"loss": 1.75865197,
|
| 16 |
+
"memory(GiB)": 21.51,
|
| 17 |
+
"step": 1,
|
| 18 |
+
"train_speed(iter/s)": 0.03225
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"epoch": 0.018691588785046728,
|
| 22 |
+
"grad_norm": 0.7008568048477173,
|
| 23 |
+
"learning_rate": 1.8518518518518518e-05,
|
| 24 |
+
"loss": 1.8970871,
|
| 25 |
+
"memory(GiB)": 21.51,
|
| 26 |
+
"step": 5,
|
| 27 |
+
"train_speed(iter/s)": 0.107257
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"epoch": 0.037383177570093455,
|
| 31 |
+
"grad_norm": 0.6195642948150635,
|
| 32 |
+
"learning_rate": 3.7037037037037037e-05,
|
| 33 |
+
"loss": 1.85505066,
|
| 34 |
+
"memory(GiB)": 26.65,
|
| 35 |
+
"step": 10,
|
| 36 |
+
"train_speed(iter/s)": 0.150961
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"epoch": 0.056074766355140186,
|
| 40 |
+
"grad_norm": 0.8053833842277527,
|
| 41 |
+
"learning_rate": 5.555555555555556e-05,
|
| 42 |
+
"loss": 1.85264435,
|
| 43 |
+
"memory(GiB)": 26.65,
|
| 44 |
+
"step": 15,
|
| 45 |
+
"train_speed(iter/s)": 0.174044
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 0.07476635514018691,
|
| 49 |
+
"grad_norm": 0.9945815205574036,
|
| 50 |
+
"learning_rate": 7.407407407407407e-05,
|
| 51 |
+
"loss": 1.75669136,
|
| 52 |
+
"memory(GiB)": 26.65,
|
| 53 |
+
"step": 20,
|
| 54 |
+
"train_speed(iter/s)": 0.188377
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"epoch": 0.09345794392523364,
|
| 58 |
+
"grad_norm": 1.0137534141540527,
|
| 59 |
+
"learning_rate": 9.25925925925926e-05,
|
| 60 |
+
"loss": 1.5954113,
|
| 61 |
+
"memory(GiB)": 26.65,
|
| 62 |
+
"step": 25,
|
| 63 |
+
"train_speed(iter/s)": 0.198196
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"epoch": 0.11214953271028037,
|
| 67 |
+
"grad_norm": 1.0105948448181152,
|
| 68 |
+
"learning_rate": 9.999136119166803e-05,
|
| 69 |
+
"loss": 1.67307549,
|
| 70 |
+
"memory(GiB)": 26.65,
|
| 71 |
+
"step": 30,
|
| 72 |
+
"train_speed(iter/s)": 0.205137
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"epoch": 0.1308411214953271,
|
| 76 |
+
"grad_norm": 1.0798794031143188,
|
| 77 |
+
"learning_rate": 9.99385792841537e-05,
|
| 78 |
+
"loss": 1.68129864,
|
| 79 |
+
"memory(GiB)": 26.65,
|
| 80 |
+
"step": 35,
|
| 81 |
+
"train_speed(iter/s)": 0.210084
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"epoch": 0.14953271028037382,
|
| 85 |
+
"grad_norm": 1.0679413080215454,
|
| 86 |
+
"learning_rate": 9.983786540671051e-05,
|
| 87 |
+
"loss": 1.61183624,
|
| 88 |
+
"memory(GiB)": 26.65,
|
| 89 |
+
"step": 40,
|
| 90 |
+
"train_speed(iter/s)": 0.214094
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"epoch": 0.16822429906542055,
|
| 94 |
+
"grad_norm": 0.9876216053962708,
|
| 95 |
+
"learning_rate": 9.968931622637652e-05,
|
| 96 |
+
"loss": 1.5409358,
|
| 97 |
+
"memory(GiB)": 26.65,
|
| 98 |
+
"step": 45,
|
| 99 |
+
"train_speed(iter/s)": 0.217302
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"epoch": 0.18691588785046728,
|
| 103 |
+
"grad_norm": 0.979777991771698,
|
| 104 |
+
"learning_rate": 9.949307432339625e-05,
|
| 105 |
+
"loss": 1.60590649,
|
| 106 |
+
"memory(GiB)": 26.65,
|
| 107 |
+
"step": 50,
|
| 108 |
+
"train_speed(iter/s)": 0.219869
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"epoch": 0.18691588785046728,
|
| 112 |
+
"eval_loss": 1.543888807296753,
|
| 113 |
+
"eval_runtime": 18.3158,
|
| 114 |
+
"eval_samples_per_second": 2.73,
|
| 115 |
+
"eval_steps_per_second": 2.73,
|
| 116 |
+
"step": 50
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"epoch": 0.205607476635514,
|
| 120 |
+
"grad_norm": 1.0606234073638916,
|
| 121 |
+
"learning_rate": 9.924932805436949e-05,
|
| 122 |
+
"loss": 1.54525614,
|
| 123 |
+
"memory(GiB)": 26.65,
|
| 124 |
+
"step": 55,
|
| 125 |
+
"train_speed(iter/s)": 0.206189
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"epoch": 0.22429906542056074,
|
| 129 |
+
"grad_norm": 1.0801302194595337,
|
| 130 |
+
"learning_rate": 9.895831137146318e-05,
|
| 131 |
+
"loss": 1.54319582,
|
| 132 |
+
"memory(GiB)": 26.65,
|
| 133 |
+
"step": 60,
|
| 134 |
+
"train_speed(iter/s)": 0.209015
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"epoch": 0.24299065420560748,
|
| 138 |
+
"grad_norm": 1.0459623336791992,
|
| 139 |
+
"learning_rate": 9.862030359785981e-05,
|
| 140 |
+
"loss": 1.55986643,
|
| 141 |
+
"memory(GiB)": 26.65,
|
| 142 |
+
"step": 65,
|
| 143 |
+
"train_speed(iter/s)": 0.211483
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"epoch": 0.2616822429906542,
|
| 147 |
+
"grad_norm": 1.0878509283065796,
|
| 148 |
+
"learning_rate": 9.82356291596578e-05,
|
| 149 |
+
"loss": 1.54775982,
|
| 150 |
+
"memory(GiB)": 26.65,
|
| 151 |
+
"step": 70,
|
| 152 |
+
"train_speed(iter/s)": 0.213594
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"epoch": 0.2803738317757009,
|
| 156 |
+
"grad_norm": 1.0929535627365112,
|
| 157 |
+
"learning_rate": 9.780465727448149e-05,
|
| 158 |
+
"loss": 1.60084972,
|
| 159 |
+
"memory(GiB)": 26.65,
|
| 160 |
+
"step": 75,
|
| 161 |
+
"train_speed(iter/s)": 0.215384
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"epoch": 0.29906542056074764,
|
| 165 |
+
"grad_norm": 1.0857256650924683,
|
| 166 |
+
"learning_rate": 9.732780159709912e-05,
|
| 167 |
+
"loss": 1.53291664,
|
| 168 |
+
"memory(GiB)": 26.65,
|
| 169 |
+
"step": 80,
|
| 170 |
+
"train_speed(iter/s)": 0.217022
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"epoch": 0.3177570093457944,
|
| 174 |
+
"grad_norm": 1.0876630544662476,
|
| 175 |
+
"learning_rate": 9.680551982238942e-05,
|
| 176 |
+
"loss": 1.49946527,
|
| 177 |
+
"memory(GiB)": 26.65,
|
| 178 |
+
"step": 85,
|
| 179 |
+
"train_speed(iter/s)": 0.218544
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"epoch": 0.3364485981308411,
|
| 183 |
+
"grad_norm": 1.0945876836776733,
|
| 184 |
+
"learning_rate": 9.623831324603754e-05,
|
| 185 |
+
"loss": 1.57413607,
|
| 186 |
+
"memory(GiB)": 26.65,
|
| 187 |
+
"step": 90,
|
| 188 |
+
"train_speed(iter/s)": 0.219824
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"epoch": 0.35514018691588783,
|
| 192 |
+
"grad_norm": 0.9552567601203918,
|
| 193 |
+
"learning_rate": 9.562672628338233e-05,
|
| 194 |
+
"loss": 1.47238646,
|
| 195 |
+
"memory(GiB)": 26.65,
|
| 196 |
+
"step": 95,
|
| 197 |
+
"train_speed(iter/s)": 0.221014
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"epoch": 0.37383177570093457,
|
| 201 |
+
"grad_norm": 1.0762443542480469,
|
| 202 |
+
"learning_rate": 9.497134594687634e-05,
|
| 203 |
+
"loss": 1.60602245,
|
| 204 |
+
"memory(GiB)": 26.65,
|
| 205 |
+
"step": 100,
|
| 206 |
+
"train_speed(iter/s)": 0.222132
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"epoch": 0.37383177570093457,
|
| 210 |
+
"eval_loss": 1.4929084777832031,
|
| 211 |
+
"eval_runtime": 14.0965,
|
| 212 |
+
"eval_samples_per_second": 3.547,
|
| 213 |
+
"eval_steps_per_second": 3.547,
|
| 214 |
+
"step": 100
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"epoch": 0.3925233644859813,
|
| 218 |
+
"grad_norm": 0.985505998134613,
|
| 219 |
+
"learning_rate": 9.42728012826605e-05,
|
| 220 |
+
"loss": 1.53017511,
|
| 221 |
+
"memory(GiB)": 26.65,
|
| 222 |
+
"step": 105,
|
| 223 |
+
"train_speed(iter/s)": 0.216155
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"epoch": 0.411214953271028,
|
| 227 |
+
"grad_norm": 1.0371544361114502,
|
| 228 |
+
"learning_rate": 9.353176276679396e-05,
|
| 229 |
+
"loss": 1.55461969,
|
| 230 |
+
"memory(GiB)": 26.65,
|
| 231 |
+
"step": 110,
|
| 232 |
+
"train_speed(iter/s)": 0.217382
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"epoch": 0.42990654205607476,
|
| 236 |
+
"grad_norm": 1.1553157567977905,
|
| 237 |
+
"learning_rate": 9.274894166171888e-05,
|
| 238 |
+
"loss": 1.53458586,
|
| 239 |
+
"memory(GiB)": 26.65,
|
| 240 |
+
"step": 115,
|
| 241 |
+
"train_speed(iter/s)": 0.218618
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"epoch": 0.4485981308411215,
|
| 245 |
+
"grad_norm": 1.062723994255066,
|
| 246 |
+
"learning_rate": 9.192508933357753e-05,
|
| 247 |
+
"loss": 1.56342993,
|
| 248 |
+
"memory(GiB)": 26.65,
|
| 249 |
+
"step": 120,
|
| 250 |
+
"train_speed(iter/s)": 0.21963
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"epoch": 0.4672897196261682,
|
| 254 |
+
"grad_norm": 0.9741066098213196,
|
| 255 |
+
"learning_rate": 9.106099653103728e-05,
|
| 256 |
+
"loss": 1.46541033,
|
| 257 |
+
"memory(GiB)": 26.65,
|
| 258 |
+
"step": 125,
|
| 259 |
+
"train_speed(iter/s)": 0.220578
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"epoch": 0.48598130841121495,
|
| 263 |
+
"grad_norm": 1.1155296564102173,
|
| 264 |
+
"learning_rate": 9.015749262631536e-05,
|
| 265 |
+
"loss": 1.45173082,
|
| 266 |
+
"memory(GiB)": 26.65,
|
| 267 |
+
"step": 130,
|
| 268 |
+
"train_speed(iter/s)": 0.221485
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"epoch": 0.5046728971962616,
|
| 272 |
+
"grad_norm": 1.3632838726043701,
|
| 273 |
+
"learning_rate": 8.921544481913218e-05,
|
| 274 |
+
"loss": 1.51770496,
|
| 275 |
+
"memory(GiB)": 26.65,
|
| 276 |
+
"step": 135,
|
| 277 |
+
"train_speed(iter/s)": 0.222401
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"epoch": 0.5233644859813084,
|
| 281 |
+
"grad_norm": 1.165434718132019,
|
| 282 |
+
"learning_rate": 8.823575730435693e-05,
|
| 283 |
+
"loss": 1.55217724,
|
| 284 |
+
"memory(GiB)": 32.07,
|
| 285 |
+
"step": 140,
|
| 286 |
+
"train_speed(iter/s)": 0.223153
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"epoch": 0.5420560747663551,
|
| 290 |
+
"grad_norm": 1.1032906770706177,
|
| 291 |
+
"learning_rate": 8.721937040414481e-05,
|
| 292 |
+
"loss": 1.43740101,
|
| 293 |
+
"memory(GiB)": 32.07,
|
| 294 |
+
"step": 145,
|
| 295 |
+
"train_speed(iter/s)": 0.223845
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"epoch": 0.5607476635514018,
|
| 299 |
+
"grad_norm": 1.1984739303588867,
|
| 300 |
+
"learning_rate": 8.616725966539832e-05,
|
| 301 |
+
"loss": 1.58604784,
|
| 302 |
+
"memory(GiB)": 32.07,
|
| 303 |
+
"step": 150,
|
| 304 |
+
"train_speed(iter/s)": 0.224618
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"epoch": 0.5607476635514018,
|
| 308 |
+
"eval_loss": 1.4658682346343994,
|
| 309 |
+
"eval_runtime": 14.0407,
|
| 310 |
+
"eval_samples_per_second": 3.561,
|
| 311 |
+
"eval_steps_per_second": 3.561,
|
| 312 |
+
"step": 150
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"epoch": 0.5794392523364486,
|
| 316 |
+
"grad_norm": 1.154517650604248,
|
| 317 |
+
"learning_rate": 8.508043492341944e-05,
|
| 318 |
+
"loss": 1.49082041,
|
| 319 |
+
"memory(GiB)": 32.07,
|
| 320 |
+
"step": 155,
|
| 321 |
+
"train_speed(iter/s)": 0.220462
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"epoch": 0.5981308411214953,
|
| 325 |
+
"grad_norm": 1.2047632932662964,
|
| 326 |
+
"learning_rate": 8.395993933265101e-05,
|
| 327 |
+
"loss": 1.53753242,
|
| 328 |
+
"memory(GiB)": 32.07,
|
| 329 |
+
"step": 160,
|
| 330 |
+
"train_speed(iter/s)": 0.221167
|
| 331 |
+
},
|
| 332 |
+
{
|
| 333 |
+
"epoch": 0.616822429906542,
|
| 334 |
+
"grad_norm": 0.9952251315116882,
|
| 335 |
+
"learning_rate": 8.280684836543794e-05,
|
| 336 |
+
"loss": 1.49997816,
|
| 337 |
+
"memory(GiB)": 32.07,
|
| 338 |
+
"step": 165,
|
| 339 |
+
"train_speed(iter/s)": 0.22173
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"epoch": 0.6355140186915887,
|
| 343 |
+
"grad_norm": 1.1730362176895142,
|
| 344 |
+
"learning_rate": 8.162226877976887e-05,
|
| 345 |
+
"loss": 1.50385504,
|
| 346 |
+
"memory(GiB)": 32.07,
|
| 347 |
+
"step": 170,
|
| 348 |
+
"train_speed(iter/s)": 0.222363
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"epoch": 0.6542056074766355,
|
| 352 |
+
"grad_norm": 1.066243052482605,
|
| 353 |
+
"learning_rate": 8.040733755698955e-05,
|
| 354 |
+
"loss": 1.4824049,
|
| 355 |
+
"memory(GiB)": 32.07,
|
| 356 |
+
"step": 175,
|
| 357 |
+
"train_speed(iter/s)": 0.22299
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"epoch": 0.6728971962616822,
|
| 361 |
+
"grad_norm": 1.2189449071884155,
|
| 362 |
+
"learning_rate": 7.916322081050709e-05,
|
| 363 |
+
"loss": 1.49032326,
|
| 364 |
+
"memory(GiB)": 32.07,
|
| 365 |
+
"step": 180,
|
| 366 |
+
"train_speed(iter/s)": 0.223605
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"epoch": 0.6915887850467289,
|
| 370 |
+
"grad_norm": 1.07020103931427,
|
| 371 |
+
"learning_rate": 7.789111266653285e-05,
|
| 372 |
+
"loss": 1.46754303,
|
| 373 |
+
"memory(GiB)": 32.07,
|
| 374 |
+
"step": 185,
|
| 375 |
+
"train_speed(iter/s)": 0.224145
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"epoch": 0.7102803738317757,
|
| 379 |
+
"grad_norm": 1.226481318473816,
|
| 380 |
+
"learning_rate": 7.659223411793798e-05,
|
| 381 |
+
"loss": 1.42194347,
|
| 382 |
+
"memory(GiB)": 32.07,
|
| 383 |
+
"step": 190,
|
| 384 |
+
"train_speed(iter/s)": 0.224687
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"epoch": 0.7289719626168224,
|
| 388 |
+
"grad_norm": 1.111670732498169,
|
| 389 |
+
"learning_rate": 7.526783185232207e-05,
|
| 390 |
+
"loss": 1.50790215,
|
| 391 |
+
"memory(GiB)": 32.07,
|
| 392 |
+
"step": 195,
|
| 393 |
+
"train_speed(iter/s)": 0.225152
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"epoch": 0.7476635514018691,
|
| 397 |
+
"grad_norm": 1.1171320676803589,
|
| 398 |
+
"learning_rate": 7.391917705541927e-05,
|
| 399 |
+
"loss": 1.51145458,
|
| 400 |
+
"memory(GiB)": 32.07,
|
| 401 |
+
"step": 200,
|
| 402 |
+
"train_speed(iter/s)": 0.22563
|
| 403 |
+
},
|
| 404 |
+
{
|
| 405 |
+
"epoch": 0.7476635514018691,
|
| 406 |
+
"eval_loss": 1.4480363130569458,
|
| 407 |
+
"eval_runtime": 14.0508,
|
| 408 |
+
"eval_samples_per_second": 3.559,
|
| 409 |
+
"eval_steps_per_second": 3.559,
|
| 410 |
+
"step": 200
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"epoch": 0.7663551401869159,
|
| 414 |
+
"grad_norm": 0.9992289543151855,
|
| 415 |
+
"learning_rate": 7.254756419099074e-05,
|
| 416 |
+
"loss": 1.53672495,
|
| 417 |
+
"memory(GiB)": 32.07,
|
| 418 |
+
"step": 205,
|
| 419 |
+
"train_speed(iter/s)": 0.222373
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"epoch": 0.7850467289719626,
|
| 423 |
+
"grad_norm": 1.076946496963501,
|
| 424 |
+
"learning_rate": 7.115430975837457e-05,
|
| 425 |
+
"loss": 1.51113377,
|
| 426 |
+
"memory(GiB)": 32.07,
|
| 427 |
+
"step": 210,
|
| 428 |
+
"train_speed(iter/s)": 0.222912
|
| 429 |
+
},
|
| 430 |
+
{
|
| 431 |
+
"epoch": 0.8037383177570093,
|
| 432 |
+
"grad_norm": 1.3144261837005615,
|
| 433 |
+
"learning_rate": 6.974075102888536e-05,
|
| 434 |
+
"loss": 1.51253147,
|
| 435 |
+
"memory(GiB)": 32.07,
|
| 436 |
+
"step": 215,
|
| 437 |
+
"train_speed(iter/s)": 0.223388
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"epoch": 0.822429906542056,
|
| 441 |
+
"grad_norm": 1.2429286241531372,
|
| 442 |
+
"learning_rate": 6.830824476227646e-05,
|
| 443 |
+
"loss": 1.49584999,
|
| 444 |
+
"memory(GiB)": 32.07,
|
| 445 |
+
"step": 220,
|
| 446 |
+
"train_speed(iter/s)": 0.22384
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"epoch": 0.8411214953271028,
|
| 450 |
+
"grad_norm": 1.213188886642456,
|
| 451 |
+
"learning_rate": 6.685816590449708e-05,
|
| 452 |
+
"loss": 1.4517292,
|
| 453 |
+
"memory(GiB)": 32.07,
|
| 454 |
+
"step": 225,
|
| 455 |
+
"train_speed(iter/s)": 0.224262
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"epoch": 0.8598130841121495,
|
| 459 |
+
"grad_norm": 1.1008031368255615,
|
| 460 |
+
"learning_rate": 6.539190626799366e-05,
|
| 461 |
+
"loss": 1.44860907,
|
| 462 |
+
"memory(GiB)": 32.07,
|
| 463 |
+
"step": 230,
|
| 464 |
+
"train_speed(iter/s)": 0.224691
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"epoch": 0.8785046728971962,
|
| 468 |
+
"grad_norm": 1.105083703994751,
|
| 469 |
+
"learning_rate": 6.391087319582264e-05,
|
| 470 |
+
"loss": 1.45654058,
|
| 471 |
+
"memory(GiB)": 32.07,
|
| 472 |
+
"step": 235,
|
| 473 |
+
"train_speed(iter/s)": 0.225105
|
| 474 |
+
},
|
| 475 |
+
{
|
| 476 |
+
"epoch": 0.897196261682243,
|
| 477 |
+
"grad_norm": 1.1485651731491089,
|
| 478 |
+
"learning_rate": 6.241648821085666e-05,
|
| 479 |
+
"loss": 1.4626853,
|
| 480 |
+
"memory(GiB)": 32.07,
|
| 481 |
+
"step": 240,
|
| 482 |
+
"train_speed(iter/s)": 0.225456
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"epoch": 0.9158878504672897,
|
| 486 |
+
"grad_norm": 1.2288539409637451,
|
| 487 |
+
"learning_rate": 6.0910185651380626e-05,
|
| 488 |
+
"loss": 1.41080866,
|
| 489 |
+
"memory(GiB)": 32.07,
|
| 490 |
+
"step": 245,
|
| 491 |
+
"train_speed(iter/s)": 0.225881
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"epoch": 0.9345794392523364,
|
| 495 |
+
"grad_norm": 1.2186890840530396,
|
| 496 |
+
"learning_rate": 5.939341129438739e-05,
|
| 497 |
+
"loss": 1.53512402,
|
| 498 |
+
"memory(GiB)": 32.07,
|
| 499 |
+
"step": 250,
|
| 500 |
+
"train_speed(iter/s)": 0.226215
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"epoch": 0.9345794392523364,
|
| 504 |
+
"eval_loss": 1.438408374786377,
|
| 505 |
+
"eval_runtime": 14.1598,
|
| 506 |
+
"eval_samples_per_second": 3.531,
|
| 507 |
+
"eval_steps_per_second": 3.531,
|
| 508 |
+
"step": 250
|
| 509 |
+
},
|
| 510 |
+
{
|
| 511 |
+
"epoch": 0.9532710280373832,
|
| 512 |
+
"grad_norm": 1.1940230131149292,
|
| 513 |
+
"learning_rate": 5.786762096789431e-05,
|
| 514 |
+
"loss": 1.55513544,
|
| 515 |
+
"memory(GiB)": 32.07,
|
| 516 |
+
"step": 255,
|
| 517 |
+
"train_speed(iter/s)": 0.223566
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"epoch": 0.9719626168224299,
|
| 521 |
+
"grad_norm": 1.0835857391357422,
|
| 522 |
+
"learning_rate": 5.633427915361261e-05,
|
| 523 |
+
"loss": 1.51988029,
|
| 524 |
+
"memory(GiB)": 32.07,
|
| 525 |
+
"step": 260,
|
| 526 |
+
"train_speed(iter/s)": 0.22394
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"epoch": 0.9906542056074766,
|
| 530 |
+
"grad_norm": 1.170660376548767,
|
| 531 |
+
"learning_rate": 5.479485758131089e-05,
|
| 532 |
+
"loss": 1.56143446,
|
| 533 |
+
"memory(GiB)": 32.07,
|
| 534 |
+
"step": 265,
|
| 535 |
+
"train_speed(iter/s)": 0.224337
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"epoch": 1.0093457943925233,
|
| 539 |
+
"grad_norm": 1.0278513431549072,
|
| 540 |
+
"learning_rate": 5.325083381622165e-05,
|
| 541 |
+
"loss": 1.43758631,
|
| 542 |
+
"memory(GiB)": 32.07,
|
| 543 |
+
"step": 270,
|
| 544 |
+
"train_speed(iter/s)": 0.224666
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"epoch": 1.02803738317757,
|
| 548 |
+
"grad_norm": 1.13231360912323,
|
| 549 |
+
"learning_rate": 5.1703689840846945e-05,
|
| 550 |
+
"loss": 1.34864044,
|
| 551 |
+
"memory(GiB)": 32.07,
|
| 552 |
+
"step": 275,
|
| 553 |
+
"train_speed(iter/s)": 0.225034
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"epoch": 1.0467289719626167,
|
| 557 |
+
"grad_norm": 1.2419425249099731,
|
| 558 |
+
"learning_rate": 5.01549106325243e-05,
|
| 559 |
+
"loss": 1.38481417,
|
| 560 |
+
"memory(GiB)": 32.07,
|
| 561 |
+
"step": 280,
|
| 562 |
+
"train_speed(iter/s)": 0.225399
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"epoch": 1.0654205607476634,
|
| 566 |
+
"grad_norm": 1.336288332939148,
|
| 567 |
+
"learning_rate": 4.860598273811792e-05,
|
| 568 |
+
"loss": 1.24492655,
|
| 569 |
+
"memory(GiB)": 32.07,
|
| 570 |
+
"step": 285,
|
| 571 |
+
"train_speed(iter/s)": 0.225789
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"epoch": 1.0841121495327102,
|
| 575 |
+
"grad_norm": 1.241809368133545,
|
| 576 |
+
"learning_rate": 4.705839284720376e-05,
|
| 577 |
+
"loss": 1.36301146,
|
| 578 |
+
"memory(GiB)": 32.07,
|
| 579 |
+
"step": 290,
|
| 580 |
+
"train_speed(iter/s)": 0.226105
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"epoch": 1.102803738317757,
|
| 584 |
+
"grad_norm": 1.4412420988082886,
|
| 585 |
+
"learning_rate": 4.55136263651172e-05,
|
| 586 |
+
"loss": 1.39876356,
|
| 587 |
+
"memory(GiB)": 32.07,
|
| 588 |
+
"step": 295,
|
| 589 |
+
"train_speed(iter/s)": 0.226405
|
| 590 |
+
},
|
| 591 |
+
{
|
| 592 |
+
"epoch": 1.1214953271028036,
|
| 593 |
+
"grad_norm": 1.6165404319763184,
|
| 594 |
+
"learning_rate": 4.397316598723385e-05,
|
| 595 |
+
"loss": 1.32808571,
|
| 596 |
+
"memory(GiB)": 32.07,
|
| 597 |
+
"step": 300,
|
| 598 |
+
"train_speed(iter/s)": 0.226709
|
| 599 |
+
},
|
| 600 |
+
{
|
| 601 |
+
"epoch": 1.1214953271028036,
|
| 602 |
+
"eval_loss": 1.4294430017471313,
|
| 603 |
+
"eval_runtime": 14.1178,
|
| 604 |
+
"eval_samples_per_second": 3.542,
|
| 605 |
+
"eval_steps_per_second": 3.542,
|
| 606 |
+
"step": 300
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"epoch": 1.1401869158878504,
|
| 610 |
+
"grad_norm": 1.4734883308410645,
|
| 611 |
+
"learning_rate": 4.243849027585096e-05,
|
| 612 |
+
"loss": 1.37022314,
|
| 613 |
+
"memory(GiB)": 32.07,
|
| 614 |
+
"step": 305,
|
| 615 |
+
"train_speed(iter/s)": 0.224508
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"epoch": 1.158878504672897,
|
| 619 |
+
"grad_norm": 1.5161515474319458,
|
| 620 |
+
"learning_rate": 4.0911072241036194e-05,
|
| 621 |
+
"loss": 1.40692539,
|
| 622 |
+
"memory(GiB)": 32.07,
|
| 623 |
+
"step": 310,
|
| 624 |
+
"train_speed(iter/s)": 0.224822
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"epoch": 1.1775700934579438,
|
| 628 |
+
"grad_norm": 1.4354695081710815,
|
| 629 |
+
"learning_rate": 3.9392377926805226e-05,
|
| 630 |
+
"loss": 1.31709337,
|
| 631 |
+
"memory(GiB)": 32.07,
|
| 632 |
+
"step": 315,
|
| 633 |
+
"train_speed(iter/s)": 0.225147
|
| 634 |
+
},
|
| 635 |
+
{
|
| 636 |
+
"epoch": 1.1962616822429906,
|
| 637 |
+
"grad_norm": 1.5612841844558716,
|
| 638 |
+
"learning_rate": 3.788386500398583e-05,
|
| 639 |
+
"loss": 1.38046598,
|
| 640 |
+
"memory(GiB)": 32.07,
|
| 641 |
+
"step": 320,
|
| 642 |
+
"train_speed(iter/s)": 0.225425
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"epoch": 1.2149532710280373,
|
| 646 |
+
"grad_norm": 1.353385090827942,
|
| 647 |
+
"learning_rate": 3.6386981371118355e-05,
|
| 648 |
+
"loss": 1.29831305,
|
| 649 |
+
"memory(GiB)": 32.07,
|
| 650 |
+
"step": 325,
|
| 651 |
+
"train_speed(iter/s)": 0.225693
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"epoch": 1.233644859813084,
|
| 655 |
+
"grad_norm": 1.6214525699615479,
|
| 656 |
+
"learning_rate": 3.49031637647361e-05,
|
| 657 |
+
"loss": 1.33498459,
|
| 658 |
+
"memory(GiB)": 32.07,
|
| 659 |
+
"step": 330,
|
| 660 |
+
"train_speed(iter/s)": 0.225991
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"epoch": 1.2523364485981308,
|
| 664 |
+
"grad_norm": 1.441267490386963,
|
| 665 |
+
"learning_rate": 3.343383638035902e-05,
|
| 666 |
+
"loss": 1.2935997,
|
| 667 |
+
"memory(GiB)": 32.07,
|
| 668 |
+
"step": 335,
|
| 669 |
+
"train_speed(iter/s)": 0.226241
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"epoch": 1.2710280373831775,
|
| 673 |
+
"grad_norm": 1.5621421337127686,
|
| 674 |
+
"learning_rate": 3.1980409505524544e-05,
|
| 675 |
+
"loss": 1.32472296,
|
| 676 |
+
"memory(GiB)": 32.07,
|
| 677 |
+
"step": 340,
|
| 678 |
+
"train_speed(iter/s)": 0.226507
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"epoch": 1.2897196261682242,
|
| 682 |
+
"grad_norm": 1.7050727605819702,
|
| 683 |
+
"learning_rate": 3.054427816616773e-05,
|
| 684 |
+
"loss": 1.25045223,
|
| 685 |
+
"memory(GiB)": 32.07,
|
| 686 |
+
"step": 345,
|
| 687 |
+
"train_speed(iter/s)": 0.226762
|
| 688 |
+
},
|
| 689 |
+
{
|
| 690 |
+
"epoch": 1.308411214953271,
|
| 691 |
+
"grad_norm": 1.5206207036972046,
|
| 692 |
+
"learning_rate": 2.91268207876494e-05,
|
| 693 |
+
"loss": 1.33886337,
|
| 694 |
+
"memory(GiB)": 32.07,
|
| 695 |
+
"step": 350,
|
| 696 |
+
"train_speed(iter/s)": 0.226984
|
| 697 |
+
},
|
| 698 |
+
{
|
| 699 |
+
"epoch": 1.308411214953271,
|
| 700 |
+
"eval_loss": 1.4251823425292969,
|
| 701 |
+
"eval_runtime": 14.0593,
|
| 702 |
+
"eval_samples_per_second": 3.556,
|
| 703 |
+
"eval_steps_per_second": 3.556,
|
| 704 |
+
"step": 350
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"epoch": 1.3271028037383177,
|
| 708 |
+
"grad_norm": 1.5205532312393188,
|
| 709 |
+
"learning_rate": 2.7729397871718304e-05,
|
| 710 |
+
"loss": 1.28512764,
|
| 711 |
+
"memory(GiB)": 32.07,
|
| 712 |
+
"step": 355,
|
| 713 |
+
"train_speed(iter/s)": 0.225063
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"epoch": 1.3457943925233644,
|
| 717 |
+
"grad_norm": 1.5533926486968994,
|
| 718 |
+
"learning_rate": 2.635335069067617e-05,
|
| 719 |
+
"loss": 1.30997047,
|
| 720 |
+
"memory(GiB)": 32.07,
|
| 721 |
+
"step": 360,
|
| 722 |
+
"train_speed(iter/s)": 0.225339
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"epoch": 1.3644859813084111,
|
| 726 |
+
"grad_norm": 1.581883192062378,
|
| 727 |
+
"learning_rate": 2.500000000000001e-05,
|
| 728 |
+
"loss": 1.28296366,
|
| 729 |
+
"memory(GiB)": 32.07,
|
| 730 |
+
"step": 365,
|
| 731 |
+
"train_speed(iter/s)": 0.225599
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"epoch": 1.3831775700934579,
|
| 735 |
+
"grad_norm": 1.4634901285171509,
|
| 736 |
+
"learning_rate": 2.367064477065652e-05,
|
| 737 |
+
"loss": 1.31434088,
|
| 738 |
+
"memory(GiB)": 32.07,
|
| 739 |
+
"step": 370,
|
| 740 |
+
"train_speed(iter/s)": 0.225861
|
| 741 |
+
},
|
| 742 |
+
{
|
| 743 |
+
"epoch": 1.4018691588785046,
|
| 744 |
+
"grad_norm": 1.706288456916809,
|
| 745 |
+
"learning_rate": 2.2366560942325832e-05,
|
| 746 |
+
"loss": 1.30933113,
|
| 747 |
+
"memory(GiB)": 32.07,
|
| 748 |
+
"step": 375,
|
| 749 |
+
"train_speed(iter/s)": 0.226141
|
| 750 |
+
},
|
| 751 |
+
{
|
| 752 |
+
"epoch": 1.4205607476635513,
|
| 753 |
+
"grad_norm": 1.6696898937225342,
|
| 754 |
+
"learning_rate": 2.108900019873103e-05,
|
| 755 |
+
"loss": 1.32816324,
|
| 756 |
+
"memory(GiB)": 32.07,
|
| 757 |
+
"step": 380,
|
| 758 |
+
"train_speed(iter/s)": 0.226392
|
| 759 |
+
},
|
| 760 |
+
{
|
| 761 |
+
"epoch": 1.439252336448598,
|
| 762 |
+
"grad_norm": 1.590394377708435,
|
| 763 |
+
"learning_rate": 1.983918876624902e-05,
|
| 764 |
+
"loss": 1.26775227,
|
| 765 |
+
"memory(GiB)": 32.07,
|
| 766 |
+
"step": 385,
|
| 767 |
+
"train_speed(iter/s)": 0.226637
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"epoch": 1.4579439252336448,
|
| 771 |
+
"grad_norm": 1.7391793727874756,
|
| 772 |
+
"learning_rate": 1.8618326236955907e-05,
|
| 773 |
+
"loss": 1.33946781,
|
| 774 |
+
"memory(GiB)": 32.07,
|
| 775 |
+
"step": 390,
|
| 776 |
+
"train_speed(iter/s)": 0.226874
|
| 777 |
+
},
|
| 778 |
+
{
|
| 779 |
+
"epoch": 1.4766355140186915,
|
| 780 |
+
"grad_norm": 1.7035928964614868,
|
| 781 |
+
"learning_rate": 1.7427584417236194e-05,
|
| 782 |
+
"loss": 1.34862604,
|
| 783 |
+
"memory(GiB)": 32.07,
|
| 784 |
+
"step": 395,
|
| 785 |
+
"train_speed(iter/s)": 0.227069
|
| 786 |
+
},
|
| 787 |
+
{
|
| 788 |
+
"epoch": 1.4953271028037383,
|
| 789 |
+
"grad_norm": 1.5830693244934082,
|
| 790 |
+
"learning_rate": 1.626810620306163e-05,
|
| 791 |
+
"loss": 1.27288446,
|
| 792 |
+
"memory(GiB)": 32.07,
|
| 793 |
+
"step": 400,
|
| 794 |
+
"train_speed(iter/s)": 0.227266
|
| 795 |
+
},
|
| 796 |
+
{
|
| 797 |
+
"epoch": 1.4953271028037383,
|
| 798 |
+
"eval_loss": 1.4195191860198975,
|
| 799 |
+
"eval_runtime": 14.0879,
|
| 800 |
+
"eval_samples_per_second": 3.549,
|
| 801 |
+
"eval_steps_per_second": 3.549,
|
| 802 |
+
"step": 400
|
| 803 |
+
},
|
| 804 |
+
{
|
| 805 |
+
"epoch": 1.514018691588785,
|
| 806 |
+
"grad_norm": 1.4809561967849731,
|
| 807 |
+
"learning_rate": 1.5141004483018323e-05,
|
| 808 |
+
"loss": 1.31938076,
|
| 809 |
+
"memory(GiB)": 32.07,
|
| 810 |
+
"step": 405,
|
| 811 |
+
"train_speed(iter/s)": 0.225559
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"epoch": 1.5327102803738317,
|
| 815 |
+
"grad_norm": 1.8556567430496216,
|
| 816 |
+
"learning_rate": 1.4047361070135995e-05,
|
| 817 |
+
"loss": 1.33600292,
|
| 818 |
+
"memory(GiB)": 32.07,
|
| 819 |
+
"step": 410,
|
| 820 |
+
"train_speed(iter/s)": 0.225804
|
| 821 |
+
},
|
| 822 |
+
{
|
| 823 |
+
"epoch": 1.5514018691588785,
|
| 824 |
+
"grad_norm": 1.5470691919326782,
|
| 825 |
+
"learning_rate": 1.2988225663543602e-05,
|
| 826 |
+
"loss": 1.40292425,
|
| 827 |
+
"memory(GiB)": 32.07,
|
| 828 |
+
"step": 415,
|
| 829 |
+
"train_speed(iter/s)": 0.226027
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"epoch": 1.5700934579439252,
|
| 833 |
+
"grad_norm": 1.8364381790161133,
|
| 834 |
+
"learning_rate": 1.1964614840949002e-05,
|
| 835 |
+
"loss": 1.32833939,
|
| 836 |
+
"memory(GiB)": 32.07,
|
| 837 |
+
"step": 420,
|
| 838 |
+
"train_speed(iter/s)": 0.226267
|
| 839 |
+
},
|
| 840 |
+
{
|
| 841 |
+
"epoch": 1.588785046728972,
|
| 842 |
+
"grad_norm": 1.6938135623931885,
|
| 843 |
+
"learning_rate": 1.097751108290867e-05,
|
| 844 |
+
"loss": 1.35209036,
|
| 845 |
+
"memory(GiB)": 32.07,
|
| 846 |
+
"step": 425,
|
| 847 |
+
"train_speed(iter/s)": 0.226474
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"epoch": 1.6074766355140186,
|
| 851 |
+
"grad_norm": 1.7861816883087158,
|
| 852 |
+
"learning_rate": 1.0027861829824952e-05,
|
| 853 |
+
"loss": 1.27312994,
|
| 854 |
+
"memory(GiB)": 32.07,
|
| 855 |
+
"step": 430,
|
| 856 |
+
"train_speed(iter/s)": 0.226695
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"epoch": 1.6261682242990654,
|
| 860 |
+
"grad_norm": 1.6619056463241577,
|
| 861 |
+
"learning_rate": 9.11657857257509e-06,
|
| 862 |
+
"loss": 1.35062437,
|
| 863 |
+
"memory(GiB)": 32.07,
|
| 864 |
+
"step": 435,
|
| 865 |
+
"train_speed(iter/s)": 0.22691
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"epoch": 1.644859813084112,
|
| 869 |
+
"grad_norm": 1.7696343660354614,
|
| 870 |
+
"learning_rate": 8.244535977645585e-06,
|
| 871 |
+
"loss": 1.32785254,
|
| 872 |
+
"memory(GiB)": 32.07,
|
| 873 |
+
"step": 440,
|
| 874 |
+
"train_speed(iter/s)": 0.227108
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"epoch": 1.6635514018691588,
|
| 878 |
+
"grad_norm": 1.6938729286193848,
|
| 879 |
+
"learning_rate": 7.412571047611155e-06,
|
| 880 |
+
"loss": 1.3087183,
|
| 881 |
+
"memory(GiB)": 32.07,
|
| 882 |
+
"step": 445,
|
| 883 |
+
"train_speed(iter/s)": 0.227305
|
| 884 |
+
},
|
| 885 |
+
{
|
| 886 |
+
"epoch": 1.6822429906542056,
|
| 887 |
+
"grad_norm": 1.7258585691452026,
|
| 888 |
+
"learning_rate": 6.621482317764105e-06,
|
| 889 |
+
"loss": 1.30971994,
|
| 890 |
+
"memory(GiB)": 32.07,
|
| 891 |
+
"step": 450,
|
| 892 |
+
"train_speed(iter/s)": 0.227507
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"epoch": 1.6822429906542056,
|
| 896 |
+
"eval_loss": 1.4193787574768066,
|
| 897 |
+
"eval_runtime": 17.3944,
|
| 898 |
+
"eval_samples_per_second": 2.874,
|
| 899 |
+
"eval_steps_per_second": 2.874,
|
| 900 |
+
"step": 450
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"epoch": 1.7009345794392523,
|
| 904 |
+
"grad_norm": 1.8556472063064575,
|
| 905 |
+
"learning_rate": 5.872029089665587e-06,
|
| 906 |
+
"loss": 1.26630516,
|
| 907 |
+
"memory(GiB)": 32.07,
|
| 908 |
+
"step": 455,
|
| 909 |
+
"train_speed(iter/s)": 0.225602
|
| 910 |
+
},
|
| 911 |
+
{
|
| 912 |
+
"epoch": 1.719626168224299,
|
| 913 |
+
"grad_norm": 1.852525234222412,
|
| 914 |
+
"learning_rate": 5.164930702353782e-06,
|
| 915 |
+
"loss": 1.34138193,
|
| 916 |
+
"memory(GiB)": 32.07,
|
| 917 |
+
"step": 460,
|
| 918 |
+
"train_speed(iter/s)": 0.225826
|
| 919 |
+
},
|
| 920 |
+
{
|
| 921 |
+
"epoch": 1.7383177570093458,
|
| 922 |
+
"grad_norm": 1.557905673980713,
|
| 923 |
+
"learning_rate": 4.500865841909168e-06,
|
| 924 |
+
"loss": 1.30747194,
|
| 925 |
+
"memory(GiB)": 32.07,
|
| 926 |
+
"step": 465,
|
| 927 |
+
"train_speed(iter/s)": 0.225996
|
| 928 |
+
},
|
| 929 |
+
{
|
| 930 |
+
"epoch": 1.7570093457943925,
|
| 931 |
+
"grad_norm": 1.7627642154693604,
|
| 932 |
+
"learning_rate": 3.880471890038967e-06,
|
| 933 |
+
"loss": 1.34135695,
|
| 934 |
+
"memory(GiB)": 32.07,
|
| 935 |
+
"step": 470,
|
| 936 |
+
"train_speed(iter/s)": 0.226199
|
| 937 |
+
},
|
| 938 |
+
{
|
| 939 |
+
"epoch": 1.7757009345794392,
|
| 940 |
+
"grad_norm": 1.4336940050125122,
|
| 941 |
+
"learning_rate": 3.3043443123065286e-06,
|
| 942 |
+
"loss": 1.38070517,
|
| 943 |
+
"memory(GiB)": 32.07,
|
| 944 |
+
"step": 475,
|
| 945 |
+
"train_speed(iter/s)": 0.226358
|
| 946 |
+
},
|
| 947 |
+
{
|
| 948 |
+
"epoch": 1.794392523364486,
|
| 949 |
+
"grad_norm": 1.906886339187622,
|
| 950 |
+
"learning_rate": 2.7730360865923956e-06,
|
| 951 |
+
"loss": 1.34674683,
|
| 952 |
+
"memory(GiB)": 32.07,
|
| 953 |
+
"step": 480,
|
| 954 |
+
"train_speed(iter/s)": 0.226536
|
| 955 |
+
},
|
| 956 |
+
{
|
| 957 |
+
"epoch": 1.8130841121495327,
|
| 958 |
+
"grad_norm": 1.7454955577850342,
|
| 959 |
+
"learning_rate": 2.287057172336021e-06,
|
| 960 |
+
"loss": 1.38749065,
|
| 961 |
+
"memory(GiB)": 32.07,
|
| 962 |
+
"step": 485,
|
| 963 |
+
"train_speed(iter/s)": 0.226699
|
| 964 |
+
},
|
| 965 |
+
{
|
| 966 |
+
"epoch": 1.8317757009345794,
|
| 967 |
+
"grad_norm": 1.7366608381271362,
|
| 968 |
+
"learning_rate": 1.8468740210672076e-06,
|
| 969 |
+
"loss": 1.30795374,
|
| 970 |
+
"memory(GiB)": 32.07,
|
| 971 |
+
"step": 490,
|
| 972 |
+
"train_speed(iter/s)": 0.226882
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"epoch": 1.8504672897196262,
|
| 976 |
+
"grad_norm": 1.5829346179962158,
|
| 977 |
+
"learning_rate": 1.4529091286973995e-06,
|
| 978 |
+
"loss": 1.32902784,
|
| 979 |
+
"memory(GiB)": 32.07,
|
| 980 |
+
"step": 495,
|
| 981 |
+
"train_speed(iter/s)": 0.227039
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"epoch": 1.8691588785046729,
|
| 985 |
+
"grad_norm": 1.6908546686172485,
|
| 986 |
+
"learning_rate": 1.1055406300002347e-06,
|
| 987 |
+
"loss": 1.33979492,
|
| 988 |
+
"memory(GiB)": 32.07,
|
| 989 |
+
"step": 500,
|
| 990 |
+
"train_speed(iter/s)": 0.227213
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"epoch": 1.8691588785046729,
|
| 994 |
+
"eval_loss": 1.4183509349822998,
|
| 995 |
+
"eval_runtime": 14.1519,
|
| 996 |
+
"eval_samples_per_second": 3.533,
|
| 997 |
+
"eval_steps_per_second": 3.533,
|
| 998 |
+
"step": 500
|
| 999 |
+
},
|
| 1000 |
+
{
|
| 1001 |
+
"epoch": 1.8878504672897196,
|
| 1002 |
+
"grad_norm": 1.915726900100708,
|
| 1003 |
+
"learning_rate": 8.0510193567086e-07,
|
| 1004 |
+
"loss": 1.30009985,
|
| 1005 |
+
"memory(GiB)": 32.07,
|
| 1006 |
+
"step": 505,
|
| 1007 |
+
"train_speed(iter/s)": 0.225856
|
| 1008 |
+
},
|
| 1009 |
+
{
|
| 1010 |
+
"epoch": 1.9065420560747663,
|
| 1011 |
+
"grad_norm": 1.6646161079406738,
|
| 1012 |
+
"learning_rate": 5.518814123121885e-07,
|
| 1013 |
+
"loss": 1.37087755,
|
| 1014 |
+
"memory(GiB)": 32.07,
|
| 1015 |
+
"step": 510,
|
| 1016 |
+
"train_speed(iter/s)": 0.226034
|
| 1017 |
+
},
|
| 1018 |
+
{
|
| 1019 |
+
"epoch": 1.925233644859813,
|
| 1020 |
+
"grad_norm": 1.7108522653579712,
|
| 1021 |
+
"learning_rate": 3.4612210565528326e-07,
|
| 1022 |
+
"loss": 1.35631628,
|
| 1023 |
+
"memory(GiB)": 32.07,
|
| 1024 |
+
"step": 515,
|
| 1025 |
+
"train_speed(iter/s)": 0.22621
|
| 1026 |
+
},
|
| 1027 |
+
{
|
| 1028 |
+
"epoch": 1.9439252336448598,
|
| 1029 |
+
"grad_norm": 1.7579667568206787,
|
| 1030 |
+
"learning_rate": 1.8802150727962876e-07,
|
| 1031 |
+
"loss": 1.24607553,
|
| 1032 |
+
"memory(GiB)": 32.07,
|
| 1033 |
+
"step": 520,
|
| 1034 |
+
"train_speed(iter/s)": 0.226384
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"epoch": 1.9626168224299065,
|
| 1038 |
+
"grad_norm": 1.634746789932251,
|
| 1039 |
+
"learning_rate": 7.773136505700995e-08,
|
| 1040 |
+
"loss": 1.27467356,
|
| 1041 |
+
"memory(GiB)": 32.07,
|
| 1042 |
+
"step": 525,
|
| 1043 |
+
"train_speed(iter/s)": 0.226543
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"epoch": 1.9813084112149533,
|
| 1047 |
+
"grad_norm": 1.620557188987732,
|
| 1048 |
+
"learning_rate": 1.5357537501159423e-08,
|
| 1049 |
+
"loss": 1.318472,
|
| 1050 |
+
"memory(GiB)": 32.07,
|
| 1051 |
+
"step": 530,
|
| 1052 |
+
"train_speed(iter/s)": 0.226703
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"epoch": 1.9962616822429906,
|
| 1056 |
+
"eval_loss": 1.4178756475448608,
|
| 1057 |
+
"eval_runtime": 14.1624,
|
| 1058 |
+
"eval_samples_per_second": 3.53,
|
| 1059 |
+
"eval_steps_per_second": 3.53,
|
| 1060 |
+
"step": 534
|
| 1061 |
+
}
|
| 1062 |
+
],
|
| 1063 |
+
"logging_steps": 5,
|
| 1064 |
+
"max_steps": 534,
|
| 1065 |
+
"num_input_tokens_seen": 0,
|
| 1066 |
+
"num_train_epochs": 2,
|
| 1067 |
+
"save_steps": 50,
|
| 1068 |
+
"stateful_callbacks": {
|
| 1069 |
+
"TrainerControl": {
|
| 1070 |
+
"args": {
|
| 1071 |
+
"should_epoch_stop": false,
|
| 1072 |
+
"should_evaluate": false,
|
| 1073 |
+
"should_log": false,
|
| 1074 |
+
"should_save": true,
|
| 1075 |
+
"should_training_stop": true
|
| 1076 |
+
},
|
| 1077 |
+
"attributes": {}
|
| 1078 |
+
}
|
| 1079 |
+
},
|
| 1080 |
+
"total_flos": 2.73270917085696e+16,
|
| 1081 |
+
"train_batch_size": 1,
|
| 1082 |
+
"trial_name": null,
|
| 1083 |
+
"trial_params": null
|
| 1084 |
+
}
|
llava1_6-vicuna-7b-instruct/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f1ee500bc16818ccb6af6404742071ed8aa5c214d899905af5c3d73f16eae7b
|
| 3 |
+
size 7416
|
minicpm-v-v2_6-chat/README.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: /fs/clip-projects/geoguesser/vlms/cpm/MiniCPM-V-2_6
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Model Card for Model ID
|
| 7 |
+
|
| 8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
|
| 14 |
+
### Model Description
|
| 15 |
+
|
| 16 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
- **Developed by:** [More Information Needed]
|
| 21 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 22 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 23 |
+
- **Model type:** [More Information Needed]
|
| 24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 25 |
+
- **License:** [More Information Needed]
|
| 26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 27 |
+
|
| 28 |
+
### Model Sources [optional]
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
|
| 32 |
+
- **Repository:** [More Information Needed]
|
| 33 |
+
- **Paper [optional]:** [More Information Needed]
|
| 34 |
+
- **Demo [optional]:** [More Information Needed]
|
| 35 |
+
|
| 36 |
+
## Uses
|
| 37 |
+
|
| 38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 39 |
+
|
| 40 |
+
### Direct Use
|
| 41 |
+
|
| 42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 43 |
+
|
| 44 |
+
[More Information Needed]
|
| 45 |
+
|
| 46 |
+
### Downstream Use [optional]
|
| 47 |
+
|
| 48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 49 |
+
|
| 50 |
+
[More Information Needed]
|
| 51 |
+
|
| 52 |
+
### Out-of-Scope Use
|
| 53 |
+
|
| 54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 55 |
+
|
| 56 |
+
[More Information Needed]
|
| 57 |
+
|
| 58 |
+
## Bias, Risks, and Limitations
|
| 59 |
+
|
| 60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 61 |
+
|
| 62 |
+
[More Information Needed]
|
| 63 |
+
|
| 64 |
+
### Recommendations
|
| 65 |
+
|
| 66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 67 |
+
|
| 68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 69 |
+
|
| 70 |
+
## How to Get Started with the Model
|
| 71 |
+
|
| 72 |
+
Use the code below to get started with the model.
|
| 73 |
+
|
| 74 |
+
[More Information Needed]
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
### Training Data
|
| 79 |
+
|
| 80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 81 |
+
|
| 82 |
+
[More Information Needed]
|
| 83 |
+
|
| 84 |
+
### Training Procedure
|
| 85 |
+
|
| 86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 87 |
+
|
| 88 |
+
#### Preprocessing [optional]
|
| 89 |
+
|
| 90 |
+
[More Information Needed]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#### Training Hyperparameters
|
| 94 |
+
|
| 95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 96 |
+
|
| 97 |
+
#### Speeds, Sizes, Times [optional]
|
| 98 |
+
|
| 99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 100 |
+
|
| 101 |
+
[More Information Needed]
|
| 102 |
+
|
| 103 |
+
## Evaluation
|
| 104 |
+
|
| 105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 106 |
+
|
| 107 |
+
### Testing Data, Factors & Metrics
|
| 108 |
+
|
| 109 |
+
#### Testing Data
|
| 110 |
+
|
| 111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 112 |
+
|
| 113 |
+
[More Information Needed]
|
| 114 |
+
|
| 115 |
+
#### Factors
|
| 116 |
+
|
| 117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 118 |
+
|
| 119 |
+
[More Information Needed]
|
| 120 |
+
|
| 121 |
+
#### Metrics
|
| 122 |
+
|
| 123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 124 |
+
|
| 125 |
+
[More Information Needed]
|
| 126 |
+
|
| 127 |
+
### Results
|
| 128 |
+
|
| 129 |
+
[More Information Needed]
|
| 130 |
+
|
| 131 |
+
#### Summary
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
## Model Examination [optional]
|
| 136 |
+
|
| 137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Environmental Impact
|
| 142 |
+
|
| 143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 144 |
+
|
| 145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 146 |
+
|
| 147 |
+
- **Hardware Type:** [More Information Needed]
|
| 148 |
+
- **Hours used:** [More Information Needed]
|
| 149 |
+
- **Cloud Provider:** [More Information Needed]
|
| 150 |
+
- **Compute Region:** [More Information Needed]
|
| 151 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 152 |
+
|
| 153 |
+
## Technical Specifications [optional]
|
| 154 |
+
|
| 155 |
+
### Model Architecture and Objective
|
| 156 |
+
|
| 157 |
+
[More Information Needed]
|
| 158 |
+
|
| 159 |
+
### Compute Infrastructure
|
| 160 |
+
|
| 161 |
+
[More Information Needed]
|
| 162 |
+
|
| 163 |
+
#### Hardware
|
| 164 |
+
|
| 165 |
+
[More Information Needed]
|
| 166 |
+
|
| 167 |
+
#### Software
|
| 168 |
+
|
| 169 |
+
[More Information Needed]
|
| 170 |
+
|
| 171 |
+
## Citation [optional]
|
| 172 |
+
|
| 173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 174 |
+
|
| 175 |
+
**BibTeX:**
|
| 176 |
+
|
| 177 |
+
[More Information Needed]
|
| 178 |
+
|
| 179 |
+
**APA:**
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
## Glossary [optional]
|
| 184 |
+
|
| 185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 186 |
+
|
| 187 |
+
[More Information Needed]
|
| 188 |
+
|
| 189 |
+
## More Information [optional]
|
| 190 |
+
|
| 191 |
+
[More Information Needed]
|
| 192 |
+
|
| 193 |
+
## Model Card Authors [optional]
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
## Model Card Contact
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
### Framework versions
|
| 201 |
+
|
| 202 |
+
- PEFT 0.12.0
|
minicpm-v-v2_6-chat/adapter_config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "/fs/clip-projects/geoguesser/vlms/cpm/MiniCPM-V-2_6",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"fan_in_fan_out": false,
|
| 7 |
+
"inference_mode": true,
|
| 8 |
+
"init_lora_weights": true,
|
| 9 |
+
"layer_replication": null,
|
| 10 |
+
"layers_pattern": null,
|
| 11 |
+
"layers_to_transform": null,
|
| 12 |
+
"loftq_config": {},
|
| 13 |
+
"lora_alpha": 32,
|
| 14 |
+
"lora_dropout": 0.05,
|
| 15 |
+
"megatron_config": null,
|
| 16 |
+
"megatron_core": "megatron.core",
|
| 17 |
+
"modules_to_save": [],
|
| 18 |
+
"peft_type": "LORA",
|
| 19 |
+
"r": 8,
|
| 20 |
+
"rank_pattern": {},
|
| 21 |
+
"revision": null,
|
| 22 |
+
"target_modules": "^(llm|resampler)(?!.*(lm_head|output|emb|wte|shared)).*",
|
| 23 |
+
"task_type": "CAUSAL_LM",
|
| 24 |
+
"use_dora": false,
|
| 25 |
+
"use_rslora": false
|
| 26 |
+
}
|
minicpm-v-v2_6-chat/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba0c23a6f4291a7bf0ac08aadc2782defa427169e319594f0cc95f379f5c2a6f
|
| 3 |
+
size 81175080
|
minicpm-v-v2_6-chat/additional_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"lora_dtype": null, "lorap_lr_ratio": null, "lorap_emb_lr": 1e-06}
|
minicpm-v-v2_6-chat/configuration.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"framework": "Pytorch",
|
| 3 |
+
"task": "visual-question-answering",
|
| 4 |
+
"adapter_cfg": {
|
| 5 |
+
"model_id_or_path": "/fs/clip-projects/geoguesser/vlms/cpm/MiniCPM-V-2_6",
|
| 6 |
+
"model_revision": "master",
|
| 7 |
+
"sft_type": "lora",
|
| 8 |
+
"tuner_backend": "peft",
|
| 9 |
+
"template_type": "minicpm-v-v2_6",
|
| 10 |
+
"dtype": "bf16",
|
| 11 |
+
"system": "You are a helpful assistant."
|
| 12 |
+
}
|
| 13 |
+
}
|
minicpm-v-v2_6-chat/generation_config.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"eos_token_id": 151645,
|
| 4 |
+
"max_new_tokens": 2048,
|
| 5 |
+
"pad_token_id": 151643,
|
| 6 |
+
"transformers_version": "4.45.1"
|
| 7 |
+
}
|
minicpm-v-v2_6-chat/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f804033311c05e15514241ec05e2702e77e1ea22c7ddc34080f9960abca0dd0
|
| 3 |
+
size 162575542
|
minicpm-v-v2_6-chat/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c267f7c126c2e718bba599dcd30e9c477a5b3cf23ec7812edda9d522e67d4a1b
|
| 3 |
+
size 14244
|
minicpm-v-v2_6-chat/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b75bb0cab5c1ac64f1f42e99abb3e1c6d095394f6dd0c73c81aee621b476d6d9
|
| 3 |
+
size 1064
|
minicpm-v-v2_6-chat/sft_args.json
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_type": "minicpm-v-v2_6-chat",
|
| 3 |
+
"model_id_or_path": "/fs/clip-projects/geoguesser/vlms/cpm/MiniCPM-V-2_6",
|
| 4 |
+
"model_revision": "master",
|
| 5 |
+
"full_determinism": false,
|
| 6 |
+
"sft_type": "lora",
|
| 7 |
+
"freeze_parameters": [],
|
| 8 |
+
"freeze_vit": false,
|
| 9 |
+
"freeze_parameters_ratio": 0.0,
|
| 10 |
+
"additional_trainable_parameters": [],
|
| 11 |
+
"tuner_backend": "peft",
|
| 12 |
+
"template_type": "minicpm-v-v2_6",
|
| 13 |
+
"output_dir": "/fs/clip-projects/geoguesser/vlms/cpm/output/minicpm-v-v2_6-chat/v3-20241108-065955",
|
| 14 |
+
"add_output_dir_suffix": true,
|
| 15 |
+
"ddp_backend": null,
|
| 16 |
+
"ddp_find_unused_parameters": null,
|
| 17 |
+
"ddp_broadcast_buffers": null,
|
| 18 |
+
"ddp_timeout": 1800,
|
| 19 |
+
"seed": 42,
|
| 20 |
+
"resume_from_checkpoint": null,
|
| 21 |
+
"resume_only_model": false,
|
| 22 |
+
"ignore_data_skip": false,
|
| 23 |
+
"dtype": "bf16",
|
| 24 |
+
"packing": false,
|
| 25 |
+
"train_backend": "transformers",
|
| 26 |
+
"tp": 1,
|
| 27 |
+
"pp": 1,
|
| 28 |
+
"min_lr": null,
|
| 29 |
+
"sequence_parallel": false,
|
| 30 |
+
"model_kwargs": null,
|
| 31 |
+
"loss_name": null,
|
| 32 |
+
"dataset": [
|
| 33 |
+
"train_set_a.jsonl"
|
| 34 |
+
],
|
| 35 |
+
"val_dataset": [
|
| 36 |
+
"test_set_a.jsonl"
|
| 37 |
+
],
|
| 38 |
+
"dataset_seed": 42,
|
| 39 |
+
"dataset_test_ratio": 0.0,
|
| 40 |
+
"use_loss_scale": false,
|
| 41 |
+
"loss_scale_config_path": "/fs/clip-projects/geoguesser/zheyuan/DPO/swift/swift/llm/agent/default_loss_scale_config.json",
|
| 42 |
+
"system": "You are a helpful assistant.",
|
| 43 |
+
"tools_prompt": "react_en",
|
| 44 |
+
"max_length": 2048,
|
| 45 |
+
"truncation_strategy": "delete",
|
| 46 |
+
"check_dataset_strategy": "none",
|
| 47 |
+
"streaming": false,
|
| 48 |
+
"streaming_val_size": 0,
|
| 49 |
+
"streaming_buffer_size": 16384,
|
| 50 |
+
"model_name": [
|
| 51 |
+
null,
|
| 52 |
+
null
|
| 53 |
+
],
|
| 54 |
+
"model_author": [
|
| 55 |
+
null,
|
| 56 |
+
null
|
| 57 |
+
],
|
| 58 |
+
"quant_method": null,
|
| 59 |
+
"quantization_bit": 0,
|
| 60 |
+
"hqq_axis": 0,
|
| 61 |
+
"hqq_dynamic_config_path": null,
|
| 62 |
+
"bnb_4bit_comp_dtype": "bf16",
|
| 63 |
+
"bnb_4bit_quant_type": "nf4",
|
| 64 |
+
"bnb_4bit_use_double_quant": true,
|
| 65 |
+
"bnb_4bit_quant_storage": null,
|
| 66 |
+
"rescale_image": -1,
|
| 67 |
+
"target_modules": "^(llm|resampler)(?!.*(lm_head|output|emb|wte|shared)).*",
|
| 68 |
+
"target_regex": null,
|
| 69 |
+
"modules_to_save": [],
|
| 70 |
+
"lora_rank": 8,
|
| 71 |
+
"lora_alpha": 32,
|
| 72 |
+
"lora_dropout": 0.05,
|
| 73 |
+
"lora_bias_trainable": "none",
|
| 74 |
+
"lora_dtype": null,
|
| 75 |
+
"lora_lr_ratio": null,
|
| 76 |
+
"use_rslora": false,
|
| 77 |
+
"use_dora": false,
|
| 78 |
+
"init_lora_weights": true,
|
| 79 |
+
"fourier_n_frequency": 2000,
|
| 80 |
+
"fourier_scaling": 300.0,
|
| 81 |
+
"rope_scaling": null,
|
| 82 |
+
"boft_block_size": 4,
|
| 83 |
+
"boft_block_num": 0,
|
| 84 |
+
"boft_n_butterfly_factor": 1,
|
| 85 |
+
"boft_dropout": 0.0,
|
| 86 |
+
"vera_rank": 256,
|
| 87 |
+
"vera_projection_prng_key": 0,
|
| 88 |
+
"vera_dropout": 0.0,
|
| 89 |
+
"vera_d_initial": 0.1,
|
| 90 |
+
"adapter_act": "gelu",
|
| 91 |
+
"adapter_length": 128,
|
| 92 |
+
"use_galore": false,
|
| 93 |
+
"galore_target_modules": null,
|
| 94 |
+
"galore_rank": 128,
|
| 95 |
+
"galore_update_proj_gap": 50,
|
| 96 |
+
"galore_scale": 1.0,
|
| 97 |
+
"galore_proj_type": "std",
|
| 98 |
+
"galore_optim_per_parameter": false,
|
| 99 |
+
"galore_with_embedding": false,
|
| 100 |
+
"galore_quantization": false,
|
| 101 |
+
"galore_proj_quant": false,
|
| 102 |
+
"galore_proj_bits": 4,
|
| 103 |
+
"galore_proj_group_size": 256,
|
| 104 |
+
"galore_cos_threshold": 0.4,
|
| 105 |
+
"galore_gamma_proj": 2,
|
| 106 |
+
"galore_queue_size": 5,
|
| 107 |
+
"adalora_target_r": 8,
|
| 108 |
+
"adalora_init_r": 12,
|
| 109 |
+
"adalora_tinit": 0,
|
| 110 |
+
"adalora_tfinal": 0,
|
| 111 |
+
"adalora_deltaT": 1,
|
| 112 |
+
"adalora_beta1": 0.85,
|
| 113 |
+
"adalora_beta2": 0.85,
|
| 114 |
+
"adalora_orth_reg_weight": 0.5,
|
| 115 |
+
"ia3_feedforward_modules": [],
|
| 116 |
+
"llamapro_num_new_blocks": 4,
|
| 117 |
+
"llamapro_num_groups": null,
|
| 118 |
+
"neftune_noise_alpha": null,
|
| 119 |
+
"neftune_backend": "transformers",
|
| 120 |
+
"lisa_activated_layers": 0,
|
| 121 |
+
"lisa_step_interval": 20,
|
| 122 |
+
"reft_layer_key": null,
|
| 123 |
+
"reft_layers": null,
|
| 124 |
+
"reft_rank": 4,
|
| 125 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 126 |
+
"reft_args": null,
|
| 127 |
+
"use_liger": false,
|
| 128 |
+
"gradient_checkpointing": true,
|
| 129 |
+
"deepspeed": null,
|
| 130 |
+
"batch_size": 1,
|
| 131 |
+
"eval_batch_size": 1,
|
| 132 |
+
"auto_find_batch_size": false,
|
| 133 |
+
"num_train_epochs": 2,
|
| 134 |
+
"max_steps": -1,
|
| 135 |
+
"optim": "adamw_torch",
|
| 136 |
+
"adam_beta1": 0.9,
|
| 137 |
+
"adam_beta2": 0.95,
|
| 138 |
+
"adam_epsilon": 1e-08,
|
| 139 |
+
"learning_rate": 0.0001,
|
| 140 |
+
"weight_decay": 0.1,
|
| 141 |
+
"gradient_accumulation_steps": 4,
|
| 142 |
+
"max_grad_norm": 1,
|
| 143 |
+
"predict_with_generate": false,
|
| 144 |
+
"lr_scheduler_type": "cosine",
|
| 145 |
+
"lr_scheduler_kwargs": {},
|
| 146 |
+
"warmup_ratio": 0.05,
|
| 147 |
+
"warmup_steps": 0,
|
| 148 |
+
"eval_steps": 50,
|
| 149 |
+
"save_steps": 50,
|
| 150 |
+
"save_only_model": false,
|
| 151 |
+
"save_total_limit": 2,
|
| 152 |
+
"logging_steps": 5,
|
| 153 |
+
"acc_steps": 1,
|
| 154 |
+
"dataloader_num_workers": 1,
|
| 155 |
+
"dataloader_pin_memory": true,
|
| 156 |
+
"dataloader_drop_last": false,
|
| 157 |
+
"push_to_hub": false,
|
| 158 |
+
"hub_model_id": null,
|
| 159 |
+
"hub_token": null,
|
| 160 |
+
"hub_private_repo": false,
|
| 161 |
+
"hub_strategy": "every_save",
|
| 162 |
+
"test_oom_error": false,
|
| 163 |
+
"disable_tqdm": false,
|
| 164 |
+
"lazy_tokenize": true,
|
| 165 |
+
"preprocess_num_proc": 1,
|
| 166 |
+
"use_flash_attn": null,
|
| 167 |
+
"ignore_args_error": false,
|
| 168 |
+
"check_model_is_latest": true,
|
| 169 |
+
"logging_dir": "/fs/clip-projects/geoguesser/vlms/cpm/output/minicpm-v-v2_6-chat/v3-20241108-065955/runs",
|
| 170 |
+
"report_to": [
|
| 171 |
+
"tensorboard"
|
| 172 |
+
],
|
| 173 |
+
"acc_strategy": "token",
|
| 174 |
+
"save_on_each_node": false,
|
| 175 |
+
"evaluation_strategy": "steps",
|
| 176 |
+
"save_strategy": "steps",
|
| 177 |
+
"save_safetensors": true,
|
| 178 |
+
"gpu_memory_fraction": null,
|
| 179 |
+
"include_num_input_tokens_seen": false,
|
| 180 |
+
"local_repo_path": null,
|
| 181 |
+
"custom_register_path": null,
|
| 182 |
+
"custom_dataset_info": null,
|
| 183 |
+
"device_map_config": null,
|
| 184 |
+
"device_max_memory": [],
|
| 185 |
+
"max_new_tokens": 2048,
|
| 186 |
+
"do_sample": null,
|
| 187 |
+
"temperature": null,
|
| 188 |
+
"top_k": null,
|
| 189 |
+
"top_p": null,
|
| 190 |
+
"repetition_penalty": null,
|
| 191 |
+
"num_beams": 1,
|
| 192 |
+
"fsdp": "",
|
| 193 |
+
"fsdp_config": null,
|
| 194 |
+
"sequence_parallel_size": 1,
|
| 195 |
+
"model_layer_cls_name": null,
|
| 196 |
+
"metric_warmup_step": 0,
|
| 197 |
+
"fsdp_num": 1,
|
| 198 |
+
"per_device_train_batch_size": null,
|
| 199 |
+
"per_device_eval_batch_size": null,
|
| 200 |
+
"eval_strategy": null,
|
| 201 |
+
"self_cognition_sample": 0,
|
| 202 |
+
"train_dataset_mix_ratio": 0.0,
|
| 203 |
+
"train_dataset_mix_ds": [
|
| 204 |
+
"ms-bench"
|
| 205 |
+
],
|
| 206 |
+
"train_dataset_sample": -1,
|
| 207 |
+
"val_dataset_sample": null,
|
| 208 |
+
"safe_serialization": null,
|
| 209 |
+
"only_save_model": null,
|
| 210 |
+
"neftune_alpha": null,
|
| 211 |
+
"deepspeed_config_path": null,
|
| 212 |
+
"model_cache_dir": null,
|
| 213 |
+
"lora_dropout_p": null,
|
| 214 |
+
"lora_target_modules": [],
|
| 215 |
+
"lora_target_regex": null,
|
| 216 |
+
"lora_modules_to_save": [],
|
| 217 |
+
"boft_target_modules": [],
|
| 218 |
+
"boft_modules_to_save": [],
|
| 219 |
+
"vera_target_modules": [],
|
| 220 |
+
"vera_modules_to_save": [],
|
| 221 |
+
"ia3_target_modules": [],
|
| 222 |
+
"ia3_modules_to_save": [],
|
| 223 |
+
"custom_train_dataset_path": [],
|
| 224 |
+
"custom_val_dataset_path": [],
|
| 225 |
+
"device_map_config_path": null,
|
| 226 |
+
"push_hub_strategy": null,
|
| 227 |
+
"use_self_cognition": false,
|
| 228 |
+
"is_multimodal": true,
|
| 229 |
+
"is_vision": true,
|
| 230 |
+
"lora_use_embedding": false,
|
| 231 |
+
"lora_use_all": false,
|
| 232 |
+
"lora_m2s_use_embedding": false,
|
| 233 |
+
"lora_m2s_use_ln": false,
|
| 234 |
+
"torch_dtype": "torch.bfloat16",
|
| 235 |
+
"fp16": false,
|
| 236 |
+
"bf16": true,
|
| 237 |
+
"rank": -1,
|
| 238 |
+
"local_rank": -1,
|
| 239 |
+
"world_size": 1,
|
| 240 |
+
"local_world_size": 1,
|
| 241 |
+
"bnb_4bit_compute_dtype": "torch.bfloat16",
|
| 242 |
+
"load_in_4bit": false,
|
| 243 |
+
"load_in_8bit": false,
|
| 244 |
+
"train_sampler_random": true,
|
| 245 |
+
"train_type": "sft",
|
| 246 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/fs/clip-projects/geoguesser/vlms/cpm/output/minicpm-v-v2_6-chat/v3-20241108-065955', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=4, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=0.0001, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1, num_train_epochs=2, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs={}, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/fs/clip-projects/geoguesser/vlms/cpm/output/minicpm-v-v2_6-chat/v3-20241108-065955/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<IntervalStrategy.STEPS: 'steps'>, save_steps=50, save_total_limit=2, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=50, dataloader_num_workers=1, dataloader_prefetch_factor=None, past_index=-1, run_name='/fs/clip-projects/geoguesser/vlms/cpm/output/minicpm-v-v2_6-chat/v3-20241108-065955', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed=None, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=False, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy=None, push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=False, include_num_input_tokens_seen=False, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=GenerationConfig {\n \"bos_token_id\": 151643,\n \"eos_token_id\": 151645,\n \"max_new_tokens\": 2048,\n \"pad_token_id\": 151643\n}\n, acc_strategy='token', loss_name=None, additional_saved_files=[], train_sampler_random=True, metric_warmup_step=0, train_dataset_sample=-1)"
|
| 247 |
+
}
|
minicpm-v-v2_6-chat/trainer_state.json
ADDED
|
@@ -0,0 +1,1202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 1.53044581,
|
| 3 |
+
"best_model_checkpoint": "/fs/clip-projects/geoguesser/vlms/cpm/output/minicpm-v-v2_6-chat/v3-20241108-065955/checkpoint-534",
|
| 4 |
+
"epoch": 1.9962616822429906,
|
| 5 |
+
"eval_steps": 50,
|
| 6 |
+
"global_step": 534,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"acc": 0.55045807,
|
| 13 |
+
"epoch": 0.003738317757009346,
|
| 14 |
+
"grad_norm": 1.613571286201477,
|
| 15 |
+
"learning_rate": 3.7037037037037037e-06,
|
| 16 |
+
"loss": 1.95084548,
|
| 17 |
+
"memory(GiB)": 18.97,
|
| 18 |
+
"step": 1,
|
| 19 |
+
"train_speed(iter/s)": 0.133639
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"acc": 0.50916213,
|
| 23 |
+
"epoch": 0.018691588785046728,
|
| 24 |
+
"grad_norm": 1.757776141166687,
|
| 25 |
+
"learning_rate": 1.8518518518518518e-05,
|
| 26 |
+
"loss": 2.09828925,
|
| 27 |
+
"memory(GiB)": 19.98,
|
| 28 |
+
"step": 5,
|
| 29 |
+
"train_speed(iter/s)": 0.287053
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"acc": 0.54273653,
|
| 33 |
+
"epoch": 0.037383177570093455,
|
| 34 |
+
"grad_norm": 1.2750011682510376,
|
| 35 |
+
"learning_rate": 3.7037037037037037e-05,
|
| 36 |
+
"loss": 1.98593979,
|
| 37 |
+
"memory(GiB)": 19.98,
|
| 38 |
+
"step": 10,
|
| 39 |
+
"train_speed(iter/s)": 0.333912
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"acc": 0.53456535,
|
| 43 |
+
"epoch": 0.056074766355140186,
|
| 44 |
+
"grad_norm": 1.1665784120559692,
|
| 45 |
+
"learning_rate": 5.555555555555556e-05,
|
| 46 |
+
"loss": 1.90109081,
|
| 47 |
+
"memory(GiB)": 21.0,
|
| 48 |
+
"step": 15,
|
| 49 |
+
"train_speed(iter/s)": 0.352318
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"acc": 0.54925652,
|
| 53 |
+
"epoch": 0.07476635514018691,
|
| 54 |
+
"grad_norm": 1.3744975328445435,
|
| 55 |
+
"learning_rate": 7.407407407407407e-05,
|
| 56 |
+
"loss": 1.76896648,
|
| 57 |
+
"memory(GiB)": 21.0,
|
| 58 |
+
"step": 20,
|
| 59 |
+
"train_speed(iter/s)": 0.362181
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"acc": 0.5564671,
|
| 63 |
+
"epoch": 0.09345794392523364,
|
| 64 |
+
"grad_norm": 1.2068527936935425,
|
| 65 |
+
"learning_rate": 9.25925925925926e-05,
|
| 66 |
+
"loss": 1.65932693,
|
| 67 |
+
"memory(GiB)": 21.0,
|
| 68 |
+
"step": 25,
|
| 69 |
+
"train_speed(iter/s)": 0.368232
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"acc": 0.55754151,
|
| 73 |
+
"epoch": 0.11214953271028037,
|
| 74 |
+
"grad_norm": 0.974327802658081,
|
| 75 |
+
"learning_rate": 9.999136119166803e-05,
|
| 76 |
+
"loss": 1.7156683,
|
| 77 |
+
"memory(GiB)": 22.04,
|
| 78 |
+
"step": 30,
|
| 79 |
+
"train_speed(iter/s)": 0.3718
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"acc": 0.554459,
|
| 83 |
+
"epoch": 0.1308411214953271,
|
| 84 |
+
"grad_norm": 1.1023714542388916,
|
| 85 |
+
"learning_rate": 9.99385792841537e-05,
|
| 86 |
+
"loss": 1.76657219,
|
| 87 |
+
"memory(GiB)": 22.04,
|
| 88 |
+
"step": 35,
|
| 89 |
+
"train_speed(iter/s)": 0.37456
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"acc": 0.55972748,
|
| 93 |
+
"epoch": 0.14953271028037382,
|
| 94 |
+
"grad_norm": 1.0901970863342285,
|
| 95 |
+
"learning_rate": 9.983786540671051e-05,
|
| 96 |
+
"loss": 1.64107857,
|
| 97 |
+
"memory(GiB)": 23.08,
|
| 98 |
+
"step": 40,
|
| 99 |
+
"train_speed(iter/s)": 0.376463
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"acc": 0.5773067,
|
| 103 |
+
"epoch": 0.16822429906542055,
|
| 104 |
+
"grad_norm": 1.043094277381897,
|
| 105 |
+
"learning_rate": 9.968931622637652e-05,
|
| 106 |
+
"loss": 1.62755222,
|
| 107 |
+
"memory(GiB)": 23.08,
|
| 108 |
+
"step": 45,
|
| 109 |
+
"train_speed(iter/s)": 0.378202
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"acc": 0.5643084,
|
| 113 |
+
"epoch": 0.18691588785046728,
|
| 114 |
+
"grad_norm": 1.1167865991592407,
|
| 115 |
+
"learning_rate": 9.949307432339625e-05,
|
| 116 |
+
"loss": 1.67872505,
|
| 117 |
+
"memory(GiB)": 23.08,
|
| 118 |
+
"step": 50,
|
| 119 |
+
"train_speed(iter/s)": 0.379293
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"epoch": 0.18691588785046728,
|
| 123 |
+
"eval_acc": 0.5715999400209927,
|
| 124 |
+
"eval_loss": 1.6141570806503296,
|
| 125 |
+
"eval_runtime": 19.3426,
|
| 126 |
+
"eval_samples_per_second": 2.585,
|
| 127 |
+
"eval_steps_per_second": 2.585,
|
| 128 |
+
"step": 50
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"acc": 0.57612801,
|
| 132 |
+
"epoch": 0.205607476635514,
|
| 133 |
+
"grad_norm": 1.0558923482894897,
|
| 134 |
+
"learning_rate": 9.924932805436949e-05,
|
| 135 |
+
"loss": 1.57275066,
|
| 136 |
+
"memory(GiB)": 23.08,
|
| 137 |
+
"step": 55,
|
| 138 |
+
"train_speed(iter/s)": 0.333772
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"acc": 0.56456318,
|
| 142 |
+
"epoch": 0.22429906542056074,
|
| 143 |
+
"grad_norm": 1.2267849445343018,
|
| 144 |
+
"learning_rate": 9.895831137146318e-05,
|
| 145 |
+
"loss": 1.62593994,
|
| 146 |
+
"memory(GiB)": 23.08,
|
| 147 |
+
"step": 60,
|
| 148 |
+
"train_speed(iter/s)": 0.337909
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"acc": 0.57293587,
|
| 152 |
+
"epoch": 0.24299065420560748,
|
| 153 |
+
"grad_norm": 1.203539252281189,
|
| 154 |
+
"learning_rate": 9.862030359785981e-05,
|
| 155 |
+
"loss": 1.62202415,
|
| 156 |
+
"memory(GiB)": 23.08,
|
| 157 |
+
"step": 65,
|
| 158 |
+
"train_speed(iter/s)": 0.341402
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"acc": 0.56972237,
|
| 162 |
+
"epoch": 0.2616822429906542,
|
| 163 |
+
"grad_norm": 1.1968094110488892,
|
| 164 |
+
"learning_rate": 9.82356291596578e-05,
|
| 165 |
+
"loss": 1.62049065,
|
| 166 |
+
"memory(GiB)": 23.08,
|
| 167 |
+
"step": 70,
|
| 168 |
+
"train_speed(iter/s)": 0.344462
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"acc": 0.56793709,
|
| 172 |
+
"epoch": 0.2803738317757009,
|
| 173 |
+
"grad_norm": 1.1416860818862915,
|
| 174 |
+
"learning_rate": 9.780465727448149e-05,
|
| 175 |
+
"loss": 1.68797512,
|
| 176 |
+
"memory(GiB)": 24.13,
|
| 177 |
+
"step": 75,
|
| 178 |
+
"train_speed(iter/s)": 0.346849
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"acc": 0.58297682,
|
| 182 |
+
"epoch": 0.29906542056074764,
|
| 183 |
+
"grad_norm": 1.1054881811141968,
|
| 184 |
+
"learning_rate": 9.732780159709912e-05,
|
| 185 |
+
"loss": 1.57775593,
|
| 186 |
+
"memory(GiB)": 24.13,
|
| 187 |
+
"step": 80,
|
| 188 |
+
"train_speed(iter/s)": 0.349274
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"acc": 0.57728238,
|
| 192 |
+
"epoch": 0.3177570093457944,
|
| 193 |
+
"grad_norm": 1.1610243320465088,
|
| 194 |
+
"learning_rate": 9.680551982238942e-05,
|
| 195 |
+
"loss": 1.59094667,
|
| 196 |
+
"memory(GiB)": 24.13,
|
| 197 |
+
"step": 85,
|
| 198 |
+
"train_speed(iter/s)": 0.351479
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"acc": 0.5719296,
|
| 202 |
+
"epoch": 0.3364485981308411,
|
| 203 |
+
"grad_norm": 1.1896332502365112,
|
| 204 |
+
"learning_rate": 9.623831324603754e-05,
|
| 205 |
+
"loss": 1.65499535,
|
| 206 |
+
"memory(GiB)": 24.13,
|
| 207 |
+
"step": 90,
|
| 208 |
+
"train_speed(iter/s)": 0.353415
|
| 209 |
+
},
|
| 210 |
+
{
|
| 211 |
+
"acc": 0.58935571,
|
| 212 |
+
"epoch": 0.35514018691588783,
|
| 213 |
+
"grad_norm": 1.0724711418151855,
|
| 214 |
+
"learning_rate": 9.562672628338233e-05,
|
| 215 |
+
"loss": 1.58491564,
|
| 216 |
+
"memory(GiB)": 24.13,
|
| 217 |
+
"step": 95,
|
| 218 |
+
"train_speed(iter/s)": 0.355195
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"acc": 0.56192031,
|
| 222 |
+
"epoch": 0.37383177570093457,
|
| 223 |
+
"grad_norm": 1.0967603921890259,
|
| 224 |
+
"learning_rate": 9.497134594687634e-05,
|
| 225 |
+
"loss": 1.68490829,
|
| 226 |
+
"memory(GiB)": 24.13,
|
| 227 |
+
"step": 100,
|
| 228 |
+
"train_speed(iter/s)": 0.356737
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"epoch": 0.37383177570093457,
|
| 232 |
+
"eval_acc": 0.5846453741190584,
|
| 233 |
+
"eval_loss": 1.5773682594299316,
|
| 234 |
+
"eval_runtime": 19.3689,
|
| 235 |
+
"eval_samples_per_second": 2.581,
|
| 236 |
+
"eval_steps_per_second": 2.581,
|
| 237 |
+
"step": 100
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"acc": 0.58094668,
|
| 241 |
+
"epoch": 0.3925233644859813,
|
| 242 |
+
"grad_norm": 1.0203641653060913,
|
| 243 |
+
"learning_rate": 9.42728012826605e-05,
|
| 244 |
+
"loss": 1.63088989,
|
| 245 |
+
"memory(GiB)": 24.13,
|
| 246 |
+
"step": 105,
|
| 247 |
+
"train_speed(iter/s)": 0.334857
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"acc": 0.56821561,
|
| 251 |
+
"epoch": 0.411214953271028,
|
| 252 |
+
"grad_norm": 1.0685813426971436,
|
| 253 |
+
"learning_rate": 9.353176276679396e-05,
|
| 254 |
+
"loss": 1.67582684,
|
| 255 |
+
"memory(GiB)": 24.13,
|
| 256 |
+
"step": 110,
|
| 257 |
+
"train_speed(iter/s)": 0.33707
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"acc": 0.56454325,
|
| 261 |
+
"epoch": 0.42990654205607476,
|
| 262 |
+
"grad_norm": 1.1684739589691162,
|
| 263 |
+
"learning_rate": 9.274894166171888e-05,
|
| 264 |
+
"loss": 1.63463154,
|
| 265 |
+
"memory(GiB)": 24.13,
|
| 266 |
+
"step": 115,
|
| 267 |
+
"train_speed(iter/s)": 0.33915
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"acc": 0.57855415,
|
| 271 |
+
"epoch": 0.4485981308411215,
|
| 272 |
+
"grad_norm": 1.077269434928894,
|
| 273 |
+
"learning_rate": 9.192508933357753e-05,
|
| 274 |
+
"loss": 1.67274055,
|
| 275 |
+
"memory(GiB)": 24.13,
|
| 276 |
+
"step": 120,
|
| 277 |
+
"train_speed(iter/s)": 0.341042
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"acc": 0.57130666,
|
| 281 |
+
"epoch": 0.4672897196261682,
|
| 282 |
+
"grad_norm": 1.066927194595337,
|
| 283 |
+
"learning_rate": 9.106099653103728e-05,
|
| 284 |
+
"loss": 1.57755241,
|
| 285 |
+
"memory(GiB)": 24.13,
|
| 286 |
+
"step": 125,
|
| 287 |
+
"train_speed(iter/s)": 0.34276
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"acc": 0.57805595,
|
| 291 |
+
"epoch": 0.48598130841121495,
|
| 292 |
+
"grad_norm": 1.214378833770752,
|
| 293 |
+
"learning_rate": 9.015749262631536e-05,
|
| 294 |
+
"loss": 1.5515789,
|
| 295 |
+
"memory(GiB)": 24.13,
|
| 296 |
+
"step": 130,
|
| 297 |
+
"train_speed(iter/s)": 0.344333
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"acc": 0.58489175,
|
| 301 |
+
"epoch": 0.5046728971962616,
|
| 302 |
+
"grad_norm": 1.2008962631225586,
|
| 303 |
+
"learning_rate": 8.921544481913218e-05,
|
| 304 |
+
"loss": 1.60749855,
|
| 305 |
+
"memory(GiB)": 24.13,
|
| 306 |
+
"step": 135,
|
| 307 |
+
"train_speed(iter/s)": 0.345892
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"acc": 0.55853381,
|
| 311 |
+
"epoch": 0.5233644859813084,
|
| 312 |
+
"grad_norm": 1.1639275550842285,
|
| 313 |
+
"learning_rate": 8.823575730435693e-05,
|
| 314 |
+
"loss": 1.63589153,
|
| 315 |
+
"memory(GiB)": 25.22,
|
| 316 |
+
"step": 140,
|
| 317 |
+
"train_speed(iter/s)": 0.3473
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"acc": 0.57799473,
|
| 321 |
+
"epoch": 0.5420560747663551,
|
| 322 |
+
"grad_norm": 1.0502570867538452,
|
| 323 |
+
"learning_rate": 8.721937040414481e-05,
|
| 324 |
+
"loss": 1.55451593,
|
| 325 |
+
"memory(GiB)": 25.22,
|
| 326 |
+
"step": 145,
|
| 327 |
+
"train_speed(iter/s)": 0.348596
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"acc": 0.56694794,
|
| 331 |
+
"epoch": 0.5607476635514018,
|
| 332 |
+
"grad_norm": 1.2373900413513184,
|
| 333 |
+
"learning_rate": 8.616725966539832e-05,
|
| 334 |
+
"loss": 1.64150391,
|
| 335 |
+
"memory(GiB)": 25.22,
|
| 336 |
+
"step": 150,
|
| 337 |
+
"train_speed(iter/s)": 0.349855
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"epoch": 0.5607476635514018,
|
| 341 |
+
"eval_acc": 0.5879442195231669,
|
| 342 |
+
"eval_loss": 1.5577113628387451,
|
| 343 |
+
"eval_runtime": 19.4081,
|
| 344 |
+
"eval_samples_per_second": 2.576,
|
| 345 |
+
"eval_steps_per_second": 2.576,
|
| 346 |
+
"step": 150
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"acc": 0.57931132,
|
| 350 |
+
"epoch": 0.5794392523364486,
|
| 351 |
+
"grad_norm": 1.148728847503662,
|
| 352 |
+
"learning_rate": 8.508043492341944e-05,
|
| 353 |
+
"loss": 1.59622688,
|
| 354 |
+
"memory(GiB)": 25.22,
|
| 355 |
+
"step": 155,
|
| 356 |
+
"train_speed(iter/s)": 0.335587
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"acc": 0.57598162,
|
| 360 |
+
"epoch": 0.5981308411214953,
|
| 361 |
+
"grad_norm": 1.1714155673980713,
|
| 362 |
+
"learning_rate": 8.395993933265101e-05,
|
| 363 |
+
"loss": 1.63730679,
|
| 364 |
+
"memory(GiB)": 25.22,
|
| 365 |
+
"step": 160,
|
| 366 |
+
"train_speed(iter/s)": 0.337039
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"acc": 0.56635065,
|
| 370 |
+
"epoch": 0.616822429906542,
|
| 371 |
+
"grad_norm": 1.000349521636963,
|
| 372 |
+
"learning_rate": 8.280684836543794e-05,
|
| 373 |
+
"loss": 1.61145477,
|
| 374 |
+
"memory(GiB)": 25.22,
|
| 375 |
+
"step": 165,
|
| 376 |
+
"train_speed(iter/s)": 0.338357
|
| 377 |
+
},
|
| 378 |
+
{
|
| 379 |
+
"acc": 0.57298255,
|
| 380 |
+
"epoch": 0.6355140186915887,
|
| 381 |
+
"grad_norm": 1.1170839071273804,
|
| 382 |
+
"learning_rate": 8.162226877976887e-05,
|
| 383 |
+
"loss": 1.59238987,
|
| 384 |
+
"memory(GiB)": 25.22,
|
| 385 |
+
"step": 170,
|
| 386 |
+
"train_speed(iter/s)": 0.339705
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"acc": 0.58343244,
|
| 390 |
+
"epoch": 0.6542056074766355,
|
| 391 |
+
"grad_norm": 1.0932862758636475,
|
| 392 |
+
"learning_rate": 8.040733755698955e-05,
|
| 393 |
+
"loss": 1.58805714,
|
| 394 |
+
"memory(GiB)": 25.22,
|
| 395 |
+
"step": 175,
|
| 396 |
+
"train_speed(iter/s)": 0.340942
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"acc": 0.5746861,
|
| 400 |
+
"epoch": 0.6728971962616822,
|
| 401 |
+
"grad_norm": 1.147817850112915,
|
| 402 |
+
"learning_rate": 7.916322081050709e-05,
|
| 403 |
+
"loss": 1.58162947,
|
| 404 |
+
"memory(GiB)": 25.22,
|
| 405 |
+
"step": 180,
|
| 406 |
+
"train_speed(iter/s)": 0.342139
|
| 407 |
+
},
|
| 408 |
+
{
|
| 409 |
+
"acc": 0.57306166,
|
| 410 |
+
"epoch": 0.6915887850467289,
|
| 411 |
+
"grad_norm": 1.076221227645874,
|
| 412 |
+
"learning_rate": 7.789111266653285e-05,
|
| 413 |
+
"loss": 1.58194542,
|
| 414 |
+
"memory(GiB)": 25.22,
|
| 415 |
+
"step": 185,
|
| 416 |
+
"train_speed(iter/s)": 0.343249
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"acc": 0.58896809,
|
| 420 |
+
"epoch": 0.7102803738317757,
|
| 421 |
+
"grad_norm": 1.1743425130844116,
|
| 422 |
+
"learning_rate": 7.659223411793798e-05,
|
| 423 |
+
"loss": 1.53554783,
|
| 424 |
+
"memory(GiB)": 25.22,
|
| 425 |
+
"step": 190,
|
| 426 |
+
"train_speed(iter/s)": 0.344334
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"acc": 0.57240195,
|
| 430 |
+
"epoch": 0.7289719626168224,
|
| 431 |
+
"grad_norm": 1.0945876836776733,
|
| 432 |
+
"learning_rate": 7.526783185232207e-05,
|
| 433 |
+
"loss": 1.59289436,
|
| 434 |
+
"memory(GiB)": 25.22,
|
| 435 |
+
"step": 195,
|
| 436 |
+
"train_speed(iter/s)": 0.345336
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"acc": 0.5794302,
|
| 440 |
+
"epoch": 0.7476635514018691,
|
| 441 |
+
"grad_norm": 1.1055279970169067,
|
| 442 |
+
"learning_rate": 7.391917705541927e-05,
|
| 443 |
+
"loss": 1.621562,
|
| 444 |
+
"memory(GiB)": 25.22,
|
| 445 |
+
"step": 200,
|
| 446 |
+
"train_speed(iter/s)": 0.346324
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"epoch": 0.7476635514018691,
|
| 450 |
+
"eval_acc": 0.5871944819313241,
|
| 451 |
+
"eval_loss": 1.5511505603790283,
|
| 452 |
+
"eval_runtime": 19.403,
|
| 453 |
+
"eval_samples_per_second": 2.577,
|
| 454 |
+
"eval_steps_per_second": 2.577,
|
| 455 |
+
"step": 200
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"acc": 0.56806307,
|
| 459 |
+
"epoch": 0.7663551401869159,
|
| 460 |
+
"grad_norm": 1.017061471939087,
|
| 461 |
+
"learning_rate": 7.254756419099074e-05,
|
| 462 |
+
"loss": 1.6349041,
|
| 463 |
+
"memory(GiB)": 25.22,
|
| 464 |
+
"step": 205,
|
| 465 |
+
"train_speed(iter/s)": 0.335703
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"acc": 0.56848702,
|
| 469 |
+
"epoch": 0.7850467289719626,
|
| 470 |
+
"grad_norm": 1.1138246059417725,
|
| 471 |
+
"learning_rate": 7.115430975837457e-05,
|
| 472 |
+
"loss": 1.60775127,
|
| 473 |
+
"memory(GiB)": 25.22,
|
| 474 |
+
"step": 210,
|
| 475 |
+
"train_speed(iter/s)": 0.336869
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"acc": 0.58039517,
|
| 479 |
+
"epoch": 0.8037383177570093,
|
| 480 |
+
"grad_norm": 1.2759917974472046,
|
| 481 |
+
"learning_rate": 6.974075102888536e-05,
|
| 482 |
+
"loss": 1.59430618,
|
| 483 |
+
"memory(GiB)": 25.22,
|
| 484 |
+
"step": 215,
|
| 485 |
+
"train_speed(iter/s)": 0.337979
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"acc": 0.56688986,
|
| 489 |
+
"epoch": 0.822429906542056,
|
| 490 |
+
"grad_norm": 1.259171724319458,
|
| 491 |
+
"learning_rate": 6.830824476227646e-05,
|
| 492 |
+
"loss": 1.61376076,
|
| 493 |
+
"memory(GiB)": 25.22,
|
| 494 |
+
"step": 220,
|
| 495 |
+
"train_speed(iter/s)": 0.339033
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"acc": 0.58966751,
|
| 499 |
+
"epoch": 0.8411214953271028,
|
| 500 |
+
"grad_norm": 1.1861165761947632,
|
| 501 |
+
"learning_rate": 6.685816590449708e-05,
|
| 502 |
+
"loss": 1.56043501,
|
| 503 |
+
"memory(GiB)": 25.22,
|
| 504 |
+
"step": 225,
|
| 505 |
+
"train_speed(iter/s)": 0.340035
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"acc": 0.58497729,
|
| 509 |
+
"epoch": 0.8598130841121495,
|
| 510 |
+
"grad_norm": 1.1015815734863281,
|
| 511 |
+
"learning_rate": 6.539190626799366e-05,
|
| 512 |
+
"loss": 1.57877932,
|
| 513 |
+
"memory(GiB)": 25.22,
|
| 514 |
+
"step": 230,
|
| 515 |
+
"train_speed(iter/s)": 0.341016
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"acc": 0.58781128,
|
| 519 |
+
"epoch": 0.8785046728971962,
|
| 520 |
+
"grad_norm": 1.2020829916000366,
|
| 521 |
+
"learning_rate": 6.391087319582264e-05,
|
| 522 |
+
"loss": 1.57381382,
|
| 523 |
+
"memory(GiB)": 25.22,
|
| 524 |
+
"step": 235,
|
| 525 |
+
"train_speed(iter/s)": 0.341901
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"acc": 0.57930512,
|
| 529 |
+
"epoch": 0.897196261682243,
|
| 530 |
+
"grad_norm": 1.1107765436172485,
|
| 531 |
+
"learning_rate": 6.241648821085666e-05,
|
| 532 |
+
"loss": 1.56744556,
|
| 533 |
+
"memory(GiB)": 25.22,
|
| 534 |
+
"step": 240,
|
| 535 |
+
"train_speed(iter/s)": 0.338406
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"acc": 0.57636843,
|
| 539 |
+
"epoch": 0.9158878504672897,
|
| 540 |
+
"grad_norm": 1.1953294277191162,
|
| 541 |
+
"learning_rate": 6.0910185651380626e-05,
|
| 542 |
+
"loss": 1.53063288,
|
| 543 |
+
"memory(GiB)": 25.22,
|
| 544 |
+
"step": 245,
|
| 545 |
+
"train_speed(iter/s)": 0.339394
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"acc": 0.57735896,
|
| 549 |
+
"epoch": 0.9345794392523364,
|
| 550 |
+
"grad_norm": 1.1470882892608643,
|
| 551 |
+
"learning_rate": 5.939341129438739e-05,
|
| 552 |
+
"loss": 1.64163361,
|
| 553 |
+
"memory(GiB)": 25.22,
|
| 554 |
+
"step": 250,
|
| 555 |
+
"train_speed(iter/s)": 0.340265
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"epoch": 0.9345794392523364,
|
| 559 |
+
"eval_acc": 0.5865946918578497,
|
| 560 |
+
"eval_loss": 1.539453148841858,
|
| 561 |
+
"eval_runtime": 19.2796,
|
| 562 |
+
"eval_samples_per_second": 2.593,
|
| 563 |
+
"eval_steps_per_second": 2.593,
|
| 564 |
+
"step": 250
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"acc": 0.55690722,
|
| 568 |
+
"epoch": 0.9532710280373832,
|
| 569 |
+
"grad_norm": 1.225205421447754,
|
| 570 |
+
"learning_rate": 5.786762096789431e-05,
|
| 571 |
+
"loss": 1.68273106,
|
| 572 |
+
"memory(GiB)": 25.22,
|
| 573 |
+
"step": 255,
|
| 574 |
+
"train_speed(iter/s)": 0.332148
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"acc": 0.56964278,
|
| 578 |
+
"epoch": 0.9719626168224299,
|
| 579 |
+
"grad_norm": 1.0283957719802856,
|
| 580 |
+
"learning_rate": 5.633427915361261e-05,
|
| 581 |
+
"loss": 1.64168797,
|
| 582 |
+
"memory(GiB)": 25.22,
|
| 583 |
+
"step": 260,
|
| 584 |
+
"train_speed(iter/s)": 0.333054
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"acc": 0.55817146,
|
| 588 |
+
"epoch": 0.9906542056074766,
|
| 589 |
+
"grad_norm": 1.0989590883255005,
|
| 590 |
+
"learning_rate": 5.479485758131089e-05,
|
| 591 |
+
"loss": 1.61564522,
|
| 592 |
+
"memory(GiB)": 25.22,
|
| 593 |
+
"step": 265,
|
| 594 |
+
"train_speed(iter/s)": 0.333997
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"acc": 0.59431157,
|
| 598 |
+
"epoch": 1.0093457943925233,
|
| 599 |
+
"grad_norm": 1.0156042575836182,
|
| 600 |
+
"learning_rate": 5.325083381622165e-05,
|
| 601 |
+
"loss": 1.54522419,
|
| 602 |
+
"memory(GiB)": 25.22,
|
| 603 |
+
"step": 270,
|
| 604 |
+
"train_speed(iter/s)": 0.334781
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"acc": 0.59816217,
|
| 608 |
+
"epoch": 1.02803738317757,
|
| 609 |
+
"grad_norm": 1.1333341598510742,
|
| 610 |
+
"learning_rate": 5.1703689840846945e-05,
|
| 611 |
+
"loss": 1.43400564,
|
| 612 |
+
"memory(GiB)": 25.22,
|
| 613 |
+
"step": 275,
|
| 614 |
+
"train_speed(iter/s)": 0.335687
|
| 615 |
+
},
|
| 616 |
+
{
|
| 617 |
+
"acc": 0.5830586,
|
| 618 |
+
"epoch": 1.0467289719626167,
|
| 619 |
+
"grad_norm": 1.289959192276001,
|
| 620 |
+
"learning_rate": 5.01549106325243e-05,
|
| 621 |
+
"loss": 1.52126703,
|
| 622 |
+
"memory(GiB)": 25.22,
|
| 623 |
+
"step": 280,
|
| 624 |
+
"train_speed(iter/s)": 0.336526
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"acc": 0.62401681,
|
| 628 |
+
"epoch": 1.0654205607476634,
|
| 629 |
+
"grad_norm": 1.3222240209579468,
|
| 630 |
+
"learning_rate": 4.860598273811792e-05,
|
| 631 |
+
"loss": 1.36042976,
|
| 632 |
+
"memory(GiB)": 25.22,
|
| 633 |
+
"step": 285,
|
| 634 |
+
"train_speed(iter/s)": 0.337368
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"acc": 0.57799129,
|
| 638 |
+
"epoch": 1.0841121495327102,
|
| 639 |
+
"grad_norm": 1.261518120765686,
|
| 640 |
+
"learning_rate": 4.705839284720376e-05,
|
| 641 |
+
"loss": 1.48789501,
|
| 642 |
+
"memory(GiB)": 25.22,
|
| 643 |
+
"step": 290,
|
| 644 |
+
"train_speed(iter/s)": 0.338162
|
| 645 |
+
},
|
| 646 |
+
{
|
| 647 |
+
"acc": 0.60333071,
|
| 648 |
+
"epoch": 1.102803738317757,
|
| 649 |
+
"grad_norm": 1.351491928100586,
|
| 650 |
+
"learning_rate": 4.55136263651172e-05,
|
| 651 |
+
"loss": 1.49587126,
|
| 652 |
+
"memory(GiB)": 25.22,
|
| 653 |
+
"step": 295,
|
| 654 |
+
"train_speed(iter/s)": 0.338912
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"acc": 0.61315393,
|
| 658 |
+
"epoch": 1.1214953271028036,
|
| 659 |
+
"grad_norm": 1.6314853429794312,
|
| 660 |
+
"learning_rate": 4.397316598723385e-05,
|
| 661 |
+
"loss": 1.43088112,
|
| 662 |
+
"memory(GiB)": 25.22,
|
| 663 |
+
"step": 300,
|
| 664 |
+
"train_speed(iter/s)": 0.339653
|
| 665 |
+
},
|
| 666 |
+
{
|
| 667 |
+
"epoch": 1.1214953271028036,
|
| 668 |
+
"eval_acc": 0.5912430649272754,
|
| 669 |
+
"eval_loss": 1.5384690761566162,
|
| 670 |
+
"eval_runtime": 19.3435,
|
| 671 |
+
"eval_samples_per_second": 2.585,
|
| 672 |
+
"eval_steps_per_second": 2.585,
|
| 673 |
+
"step": 300
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"acc": 0.60021753,
|
| 677 |
+
"epoch": 1.1401869158878504,
|
| 678 |
+
"grad_norm": 1.4269309043884277,
|
| 679 |
+
"learning_rate": 4.243849027585096e-05,
|
| 680 |
+
"loss": 1.47466078,
|
| 681 |
+
"memory(GiB)": 25.22,
|
| 682 |
+
"step": 305,
|
| 683 |
+
"train_speed(iter/s)": 0.332888
|
| 684 |
+
},
|
| 685 |
+
{
|
| 686 |
+
"acc": 0.58526664,
|
| 687 |
+
"epoch": 1.158878504672897,
|
| 688 |
+
"grad_norm": 1.4827656745910645,
|
| 689 |
+
"learning_rate": 4.0911072241036194e-05,
|
| 690 |
+
"loss": 1.53870859,
|
| 691 |
+
"memory(GiB)": 25.22,
|
| 692 |
+
"step": 310,
|
| 693 |
+
"train_speed(iter/s)": 0.333651
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"acc": 0.6124382,
|
| 697 |
+
"epoch": 1.1775700934579438,
|
| 698 |
+
"grad_norm": 1.428358793258667,
|
| 699 |
+
"learning_rate": 3.9392377926805226e-05,
|
| 700 |
+
"loss": 1.44478369,
|
| 701 |
+
"memory(GiB)": 25.22,
|
| 702 |
+
"step": 315,
|
| 703 |
+
"train_speed(iter/s)": 0.334437
|
| 704 |
+
},
|
| 705 |
+
{
|
| 706 |
+
"acc": 0.58961325,
|
| 707 |
+
"epoch": 1.1962616822429906,
|
| 708 |
+
"grad_norm": 1.4965459108352661,
|
| 709 |
+
"learning_rate": 3.788386500398583e-05,
|
| 710 |
+
"loss": 1.49487057,
|
| 711 |
+
"memory(GiB)": 25.22,
|
| 712 |
+
"step": 320,
|
| 713 |
+
"train_speed(iter/s)": 0.335206
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"acc": 0.58942304,
|
| 717 |
+
"epoch": 1.2149532710280373,
|
| 718 |
+
"grad_norm": 1.3801062107086182,
|
| 719 |
+
"learning_rate": 3.6386981371118355e-05,
|
| 720 |
+
"loss": 1.44996719,
|
| 721 |
+
"memory(GiB)": 25.22,
|
| 722 |
+
"step": 325,
|
| 723 |
+
"train_speed(iter/s)": 0.335913
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"acc": 0.5930171,
|
| 727 |
+
"epoch": 1.233644859813084,
|
| 728 |
+
"grad_norm": 1.550034523010254,
|
| 729 |
+
"learning_rate": 3.49031637647361e-05,
|
| 730 |
+
"loss": 1.4618269,
|
| 731 |
+
"memory(GiB)": 25.22,
|
| 732 |
+
"step": 330,
|
| 733 |
+
"train_speed(iter/s)": 0.336661
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"acc": 0.61733942,
|
| 737 |
+
"epoch": 1.2523364485981308,
|
| 738 |
+
"grad_norm": 1.467942237854004,
|
| 739 |
+
"learning_rate": 3.343383638035902e-05,
|
| 740 |
+
"loss": 1.37453032,
|
| 741 |
+
"memory(GiB)": 25.22,
|
| 742 |
+
"step": 335,
|
| 743 |
+
"train_speed(iter/s)": 0.337353
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"acc": 0.60188942,
|
| 747 |
+
"epoch": 1.2710280373831775,
|
| 748 |
+
"grad_norm": 1.571946620941162,
|
| 749 |
+
"learning_rate": 3.1980409505524544e-05,
|
| 750 |
+
"loss": 1.42003136,
|
| 751 |
+
"memory(GiB)": 25.22,
|
| 752 |
+
"step": 340,
|
| 753 |
+
"train_speed(iter/s)": 0.33804
|
| 754 |
+
},
|
| 755 |
+
{
|
| 756 |
+
"acc": 0.60350924,
|
| 757 |
+
"epoch": 1.2897196261682242,
|
| 758 |
+
"grad_norm": 1.773979663848877,
|
| 759 |
+
"learning_rate": 3.054427816616773e-05,
|
| 760 |
+
"loss": 1.40252638,
|
| 761 |
+
"memory(GiB)": 25.22,
|
| 762 |
+
"step": 345,
|
| 763 |
+
"train_speed(iter/s)": 0.338706
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"acc": 0.59472365,
|
| 767 |
+
"epoch": 1.308411214953271,
|
| 768 |
+
"grad_norm": 1.5724050998687744,
|
| 769 |
+
"learning_rate": 2.91268207876494e-05,
|
| 770 |
+
"loss": 1.46759853,
|
| 771 |
+
"memory(GiB)": 25.22,
|
| 772 |
+
"step": 350,
|
| 773 |
+
"train_speed(iter/s)": 0.339365
|
| 774 |
+
},
|
| 775 |
+
{
|
| 776 |
+
"epoch": 1.308411214953271,
|
| 777 |
+
"eval_acc": 0.5880941670415355,
|
| 778 |
+
"eval_loss": 1.5372613668441772,
|
| 779 |
+
"eval_runtime": 19.309,
|
| 780 |
+
"eval_samples_per_second": 2.589,
|
| 781 |
+
"eval_steps_per_second": 2.589,
|
| 782 |
+
"step": 350
|
| 783 |
+
},
|
| 784 |
+
{
|
| 785 |
+
"acc": 0.60742517,
|
| 786 |
+
"epoch": 1.3271028037383177,
|
| 787 |
+
"grad_norm": 1.6696964502334595,
|
| 788 |
+
"learning_rate": 2.7729397871718304e-05,
|
| 789 |
+
"loss": 1.41251793,
|
| 790 |
+
"memory(GiB)": 25.22,
|
| 791 |
+
"step": 355,
|
| 792 |
+
"train_speed(iter/s)": 0.333524
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"acc": 0.59874382,
|
| 796 |
+
"epoch": 1.3457943925233644,
|
| 797 |
+
"grad_norm": 1.631954550743103,
|
| 798 |
+
"learning_rate": 2.635335069067617e-05,
|
| 799 |
+
"loss": 1.43565807,
|
| 800 |
+
"memory(GiB)": 25.22,
|
| 801 |
+
"step": 360,
|
| 802 |
+
"train_speed(iter/s)": 0.33422
|
| 803 |
+
},
|
| 804 |
+
{
|
| 805 |
+
"acc": 0.62206426,
|
| 806 |
+
"epoch": 1.3644859813084111,
|
| 807 |
+
"grad_norm": 1.6964654922485352,
|
| 808 |
+
"learning_rate": 2.500000000000001e-05,
|
| 809 |
+
"loss": 1.40124302,
|
| 810 |
+
"memory(GiB)": 25.22,
|
| 811 |
+
"step": 365,
|
| 812 |
+
"train_speed(iter/s)": 0.334875
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"acc": 0.60629749,
|
| 816 |
+
"epoch": 1.3831775700934579,
|
| 817 |
+
"grad_norm": 1.4727040529251099,
|
| 818 |
+
"learning_rate": 2.367064477065652e-05,
|
| 819 |
+
"loss": 1.4464427,
|
| 820 |
+
"memory(GiB)": 25.22,
|
| 821 |
+
"step": 370,
|
| 822 |
+
"train_speed(iter/s)": 0.335544
|
| 823 |
+
},
|
| 824 |
+
{
|
| 825 |
+
"acc": 0.60391579,
|
| 826 |
+
"epoch": 1.4018691588785046,
|
| 827 |
+
"grad_norm": 1.7280242443084717,
|
| 828 |
+
"learning_rate": 2.2366560942325832e-05,
|
| 829 |
+
"loss": 1.41403561,
|
| 830 |
+
"memory(GiB)": 25.22,
|
| 831 |
+
"step": 375,
|
| 832 |
+
"train_speed(iter/s)": 0.336204
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"acc": 0.60924401,
|
| 836 |
+
"epoch": 1.4205607476635513,
|
| 837 |
+
"grad_norm": 1.7434614896774292,
|
| 838 |
+
"learning_rate": 2.108900019873103e-05,
|
| 839 |
+
"loss": 1.44466,
|
| 840 |
+
"memory(GiB)": 25.22,
|
| 841 |
+
"step": 380,
|
| 842 |
+
"train_speed(iter/s)": 0.336821
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"acc": 0.62293906,
|
| 846 |
+
"epoch": 1.439252336448598,
|
| 847 |
+
"grad_norm": 1.5931147336959839,
|
| 848 |
+
"learning_rate": 1.983918876624902e-05,
|
| 849 |
+
"loss": 1.36733408,
|
| 850 |
+
"memory(GiB)": 25.22,
|
| 851 |
+
"step": 385,
|
| 852 |
+
"train_speed(iter/s)": 0.337406
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"acc": 0.60391083,
|
| 856 |
+
"epoch": 1.4579439252336448,
|
| 857 |
+
"grad_norm": 1.8774250745773315,
|
| 858 |
+
"learning_rate": 1.8618326236955907e-05,
|
| 859 |
+
"loss": 1.47434216,
|
| 860 |
+
"memory(GiB)": 25.22,
|
| 861 |
+
"step": 390,
|
| 862 |
+
"train_speed(iter/s)": 0.337991
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"acc": 0.59494829,
|
| 866 |
+
"epoch": 1.4766355140186915,
|
| 867 |
+
"grad_norm": 1.6903536319732666,
|
| 868 |
+
"learning_rate": 1.7427584417236194e-05,
|
| 869 |
+
"loss": 1.4961113,
|
| 870 |
+
"memory(GiB)": 25.22,
|
| 871 |
+
"step": 395,
|
| 872 |
+
"train_speed(iter/s)": 0.338556
|
| 873 |
+
},
|
| 874 |
+
{
|
| 875 |
+
"acc": 0.61160607,
|
| 876 |
+
"epoch": 1.4953271028037383,
|
| 877 |
+
"grad_norm": 1.6840981245040894,
|
| 878 |
+
"learning_rate": 1.626810620306163e-05,
|
| 879 |
+
"loss": 1.3965476,
|
| 880 |
+
"memory(GiB)": 25.22,
|
| 881 |
+
"step": 400,
|
| 882 |
+
"train_speed(iter/s)": 0.339121
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"epoch": 1.4953271028037383,
|
| 886 |
+
"eval_acc": 0.5915429599640126,
|
| 887 |
+
"eval_loss": 1.5317269563674927,
|
| 888 |
+
"eval_runtime": 19.3286,
|
| 889 |
+
"eval_samples_per_second": 2.587,
|
| 890 |
+
"eval_steps_per_second": 2.587,
|
| 891 |
+
"step": 400
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"acc": 0.59379582,
|
| 895 |
+
"epoch": 1.514018691588785,
|
| 896 |
+
"grad_norm": 1.6205955743789673,
|
| 897 |
+
"learning_rate": 1.5141004483018323e-05,
|
| 898 |
+
"loss": 1.44787579,
|
| 899 |
+
"memory(GiB)": 25.22,
|
| 900 |
+
"step": 405,
|
| 901 |
+
"train_speed(iter/s)": 0.333982
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"acc": 0.61180854,
|
| 905 |
+
"epoch": 1.5327102803738317,
|
| 906 |
+
"grad_norm": 1.9331731796264648,
|
| 907 |
+
"learning_rate": 1.4047361070135995e-05,
|
| 908 |
+
"loss": 1.46465635,
|
| 909 |
+
"memory(GiB)": 25.22,
|
| 910 |
+
"step": 410,
|
| 911 |
+
"train_speed(iter/s)": 0.334588
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"acc": 0.59493322,
|
| 915 |
+
"epoch": 1.5514018691588785,
|
| 916 |
+
"grad_norm": 1.579399824142456,
|
| 917 |
+
"learning_rate": 1.2988225663543602e-05,
|
| 918 |
+
"loss": 1.51334658,
|
| 919 |
+
"memory(GiB)": 25.22,
|
| 920 |
+
"step": 415,
|
| 921 |
+
"train_speed(iter/s)": 0.335141
|
| 922 |
+
},
|
| 923 |
+
{
|
| 924 |
+
"acc": 0.60085406,
|
| 925 |
+
"epoch": 1.5700934579439252,
|
| 926 |
+
"grad_norm": 1.7813141345977783,
|
| 927 |
+
"learning_rate": 1.1964614840949002e-05,
|
| 928 |
+
"loss": 1.44739676,
|
| 929 |
+
"memory(GiB)": 25.22,
|
| 930 |
+
"step": 420,
|
| 931 |
+
"train_speed(iter/s)": 0.335707
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"acc": 0.59427462,
|
| 935 |
+
"epoch": 1.588785046728972,
|
| 936 |
+
"grad_norm": 1.6966168880462646,
|
| 937 |
+
"learning_rate": 1.097751108290867e-05,
|
| 938 |
+
"loss": 1.47780085,
|
| 939 |
+
"memory(GiB)": 25.22,
|
| 940 |
+
"step": 425,
|
| 941 |
+
"train_speed(iter/s)": 0.336254
|
| 942 |
+
},
|
| 943 |
+
{
|
| 944 |
+
"acc": 0.623209,
|
| 945 |
+
"epoch": 1.6074766355140186,
|
| 946 |
+
"grad_norm": 1.801193118095398,
|
| 947 |
+
"learning_rate": 1.0027861829824952e-05,
|
| 948 |
+
"loss": 1.37850494,
|
| 949 |
+
"memory(GiB)": 25.22,
|
| 950 |
+
"step": 430,
|
| 951 |
+
"train_speed(iter/s)": 0.336818
|
| 952 |
+
},
|
| 953 |
+
{
|
| 954 |
+
"acc": 0.60559597,
|
| 955 |
+
"epoch": 1.6261682242990654,
|
| 956 |
+
"grad_norm": 1.694907307624817,
|
| 957 |
+
"learning_rate": 9.11657857257509e-06,
|
| 958 |
+
"loss": 1.47119045,
|
| 959 |
+
"memory(GiB)": 25.22,
|
| 960 |
+
"step": 435,
|
| 961 |
+
"train_speed(iter/s)": 0.337357
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"acc": 0.60444808,
|
| 965 |
+
"epoch": 1.644859813084112,
|
| 966 |
+
"grad_norm": 1.8144758939743042,
|
| 967 |
+
"learning_rate": 8.244535977645585e-06,
|
| 968 |
+
"loss": 1.46991854,
|
| 969 |
+
"memory(GiB)": 25.22,
|
| 970 |
+
"step": 440,
|
| 971 |
+
"train_speed(iter/s)": 0.337883
|
| 972 |
+
},
|
| 973 |
+
{
|
| 974 |
+
"acc": 0.61087198,
|
| 975 |
+
"epoch": 1.6635514018691588,
|
| 976 |
+
"grad_norm": 1.8258271217346191,
|
| 977 |
+
"learning_rate": 7.412571047611155e-06,
|
| 978 |
+
"loss": 1.39234638,
|
| 979 |
+
"memory(GiB)": 25.22,
|
| 980 |
+
"step": 445,
|
| 981 |
+
"train_speed(iter/s)": 0.3384
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"acc": 0.60143399,
|
| 985 |
+
"epoch": 1.6822429906542056,
|
| 986 |
+
"grad_norm": 1.8941428661346436,
|
| 987 |
+
"learning_rate": 6.621482317764105e-06,
|
| 988 |
+
"loss": 1.43530188,
|
| 989 |
+
"memory(GiB)": 25.22,
|
| 990 |
+
"step": 450,
|
| 991 |
+
"train_speed(iter/s)": 0.338913
|
| 992 |
+
},
|
| 993 |
+
{
|
| 994 |
+
"epoch": 1.6822429906542056,
|
| 995 |
+
"eval_acc": 0.5921427500374868,
|
| 996 |
+
"eval_loss": 1.532676100730896,
|
| 997 |
+
"eval_runtime": 19.3483,
|
| 998 |
+
"eval_samples_per_second": 2.584,
|
| 999 |
+
"eval_steps_per_second": 2.584,
|
| 1000 |
+
"step": 450
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"acc": 0.61480565,
|
| 1004 |
+
"epoch": 1.7009345794392523,
|
| 1005 |
+
"grad_norm": 1.939122200012207,
|
| 1006 |
+
"learning_rate": 5.872029089665587e-06,
|
| 1007 |
+
"loss": 1.39058199,
|
| 1008 |
+
"memory(GiB)": 25.22,
|
| 1009 |
+
"step": 455,
|
| 1010 |
+
"train_speed(iter/s)": 0.33434
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"acc": 0.60005183,
|
| 1014 |
+
"epoch": 1.719626168224299,
|
| 1015 |
+
"grad_norm": 1.91712486743927,
|
| 1016 |
+
"learning_rate": 5.164930702353782e-06,
|
| 1017 |
+
"loss": 1.45630856,
|
| 1018 |
+
"memory(GiB)": 25.22,
|
| 1019 |
+
"step": 460,
|
| 1020 |
+
"train_speed(iter/s)": 0.334872
|
| 1021 |
+
},
|
| 1022 |
+
{
|
| 1023 |
+
"acc": 0.59249868,
|
| 1024 |
+
"epoch": 1.7383177570093458,
|
| 1025 |
+
"grad_norm": 1.6661227941513062,
|
| 1026 |
+
"learning_rate": 4.500865841909168e-06,
|
| 1027 |
+
"loss": 1.46352968,
|
| 1028 |
+
"memory(GiB)": 25.22,
|
| 1029 |
+
"step": 465,
|
| 1030 |
+
"train_speed(iter/s)": 0.335375
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"acc": 0.6019537,
|
| 1034 |
+
"epoch": 1.7570093457943925,
|
| 1035 |
+
"grad_norm": 1.7562310695648193,
|
| 1036 |
+
"learning_rate": 3.880471890038967e-06,
|
| 1037 |
+
"loss": 1.4511817,
|
| 1038 |
+
"memory(GiB)": 25.22,
|
| 1039 |
+
"step": 470,
|
| 1040 |
+
"train_speed(iter/s)": 0.335886
|
| 1041 |
+
},
|
| 1042 |
+
{
|
| 1043 |
+
"acc": 0.60008221,
|
| 1044 |
+
"epoch": 1.7757009345794392,
|
| 1045 |
+
"grad_norm": 1.6243735551834106,
|
| 1046 |
+
"learning_rate": 3.3043443123065286e-06,
|
| 1047 |
+
"loss": 1.49449492,
|
| 1048 |
+
"memory(GiB)": 25.22,
|
| 1049 |
+
"step": 475,
|
| 1050 |
+
"train_speed(iter/s)": 0.336362
|
| 1051 |
+
},
|
| 1052 |
+
{
|
| 1053 |
+
"acc": 0.59502878,
|
| 1054 |
+
"epoch": 1.794392523364486,
|
| 1055 |
+
"grad_norm": 2.1284008026123047,
|
| 1056 |
+
"learning_rate": 2.7730360865923956e-06,
|
| 1057 |
+
"loss": 1.46432257,
|
| 1058 |
+
"memory(GiB)": 25.22,
|
| 1059 |
+
"step": 480,
|
| 1060 |
+
"train_speed(iter/s)": 0.336856
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"acc": 0.58453884,
|
| 1064 |
+
"epoch": 1.8130841121495327,
|
| 1065 |
+
"grad_norm": 1.7423293590545654,
|
| 1066 |
+
"learning_rate": 2.287057172336021e-06,
|
| 1067 |
+
"loss": 1.53189554,
|
| 1068 |
+
"memory(GiB)": 25.22,
|
| 1069 |
+
"step": 485,
|
| 1070 |
+
"train_speed(iter/s)": 0.337332
|
| 1071 |
+
},
|
| 1072 |
+
{
|
| 1073 |
+
"acc": 0.59840698,
|
| 1074 |
+
"epoch": 1.8317757009345794,
|
| 1075 |
+
"grad_norm": 1.787650227546692,
|
| 1076 |
+
"learning_rate": 1.8468740210672076e-06,
|
| 1077 |
+
"loss": 1.44042816,
|
| 1078 |
+
"memory(GiB)": 25.22,
|
| 1079 |
+
"step": 490,
|
| 1080 |
+
"train_speed(iter/s)": 0.337808
|
| 1081 |
+
},
|
| 1082 |
+
{
|
| 1083 |
+
"acc": 0.60692272,
|
| 1084 |
+
"epoch": 1.8504672897196262,
|
| 1085 |
+
"grad_norm": 1.7444405555725098,
|
| 1086 |
+
"learning_rate": 1.4529091286973995e-06,
|
| 1087 |
+
"loss": 1.42584867,
|
| 1088 |
+
"memory(GiB)": 25.22,
|
| 1089 |
+
"step": 495,
|
| 1090 |
+
"train_speed(iter/s)": 0.338284
|
| 1091 |
+
},
|
| 1092 |
+
{
|
| 1093 |
+
"acc": 0.58918037,
|
| 1094 |
+
"epoch": 1.8691588785046729,
|
| 1095 |
+
"grad_norm": 1.7888891696929932,
|
| 1096 |
+
"learning_rate": 1.1055406300002347e-06,
|
| 1097 |
+
"loss": 1.48684044,
|
| 1098 |
+
"memory(GiB)": 25.22,
|
| 1099 |
+
"step": 500,
|
| 1100 |
+
"train_speed(iter/s)": 0.338742
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"epoch": 1.8691588785046729,
|
| 1104 |
+
"eval_acc": 0.5931923826660669,
|
| 1105 |
+
"eval_loss": 1.5313353538513184,
|
| 1106 |
+
"eval_runtime": 19.3381,
|
| 1107 |
+
"eval_samples_per_second": 2.586,
|
| 1108 |
+
"eval_steps_per_second": 2.586,
|
| 1109 |
+
"step": 500
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"acc": 0.61618199,
|
| 1113 |
+
"epoch": 1.8878504672897196,
|
| 1114 |
+
"grad_norm": 1.9864728450775146,
|
| 1115 |
+
"learning_rate": 8.0510193567086e-07,
|
| 1116 |
+
"loss": 1.43374748,
|
| 1117 |
+
"memory(GiB)": 25.22,
|
| 1118 |
+
"step": 505,
|
| 1119 |
+
"train_speed(iter/s)": 0.334578
|
| 1120 |
+
},
|
| 1121 |
+
{
|
| 1122 |
+
"acc": 0.60259299,
|
| 1123 |
+
"epoch": 1.9065420560747663,
|
| 1124 |
+
"grad_norm": 1.688388705253601,
|
| 1125 |
+
"learning_rate": 5.518814123121885e-07,
|
| 1126 |
+
"loss": 1.49144144,
|
| 1127 |
+
"memory(GiB)": 25.22,
|
| 1128 |
+
"step": 510,
|
| 1129 |
+
"train_speed(iter/s)": 0.335056
|
| 1130 |
+
},
|
| 1131 |
+
{
|
| 1132 |
+
"acc": 0.61285515,
|
| 1133 |
+
"epoch": 1.925233644859813,
|
| 1134 |
+
"grad_norm": 1.6542000770568848,
|
| 1135 |
+
"learning_rate": 3.4612210565528326e-07,
|
| 1136 |
+
"loss": 1.44376268,
|
| 1137 |
+
"memory(GiB)": 25.22,
|
| 1138 |
+
"step": 515,
|
| 1139 |
+
"train_speed(iter/s)": 0.335522
|
| 1140 |
+
},
|
| 1141 |
+
{
|
| 1142 |
+
"acc": 0.61628981,
|
| 1143 |
+
"epoch": 1.9439252336448598,
|
| 1144 |
+
"grad_norm": 2.033604145050049,
|
| 1145 |
+
"learning_rate": 1.8802150727962876e-07,
|
| 1146 |
+
"loss": 1.39236612,
|
| 1147 |
+
"memory(GiB)": 25.22,
|
| 1148 |
+
"step": 520,
|
| 1149 |
+
"train_speed(iter/s)": 0.335983
|
| 1150 |
+
},
|
| 1151 |
+
{
|
| 1152 |
+
"acc": 0.62410831,
|
| 1153 |
+
"epoch": 1.9626168224299065,
|
| 1154 |
+
"grad_norm": 1.6692521572113037,
|
| 1155 |
+
"learning_rate": 7.773136505700995e-08,
|
| 1156 |
+
"loss": 1.36968622,
|
| 1157 |
+
"memory(GiB)": 25.22,
|
| 1158 |
+
"step": 525,
|
| 1159 |
+
"train_speed(iter/s)": 0.336429
|
| 1160 |
+
},
|
| 1161 |
+
{
|
| 1162 |
+
"acc": 0.60063834,
|
| 1163 |
+
"epoch": 1.9813084112149533,
|
| 1164 |
+
"grad_norm": 1.7963093519210815,
|
| 1165 |
+
"learning_rate": 1.5357537501159423e-08,
|
| 1166 |
+
"loss": 1.46681242,
|
| 1167 |
+
"memory(GiB)": 25.22,
|
| 1168 |
+
"step": 530,
|
| 1169 |
+
"train_speed(iter/s)": 0.336698
|
| 1170 |
+
},
|
| 1171 |
+
{
|
| 1172 |
+
"epoch": 1.9962616822429906,
|
| 1173 |
+
"eval_acc": 0.5915429599640126,
|
| 1174 |
+
"eval_loss": 1.5304458141326904,
|
| 1175 |
+
"eval_runtime": 20.9568,
|
| 1176 |
+
"eval_samples_per_second": 2.386,
|
| 1177 |
+
"eval_steps_per_second": 2.386,
|
| 1178 |
+
"step": 534
|
| 1179 |
+
}
|
| 1180 |
+
],
|
| 1181 |
+
"logging_steps": 5,
|
| 1182 |
+
"max_steps": 534,
|
| 1183 |
+
"num_input_tokens_seen": 0,
|
| 1184 |
+
"num_train_epochs": 2,
|
| 1185 |
+
"save_steps": 50,
|
| 1186 |
+
"stateful_callbacks": {
|
| 1187 |
+
"TrainerControl": {
|
| 1188 |
+
"args": {
|
| 1189 |
+
"should_epoch_stop": false,
|
| 1190 |
+
"should_evaluate": false,
|
| 1191 |
+
"should_log": false,
|
| 1192 |
+
"should_save": true,
|
| 1193 |
+
"should_training_stop": true
|
| 1194 |
+
},
|
| 1195 |
+
"attributes": {}
|
| 1196 |
+
}
|
| 1197 |
+
},
|
| 1198 |
+
"total_flos": 8.23057811304193e+16,
|
| 1199 |
+
"train_batch_size": 1,
|
| 1200 |
+
"trial_name": null,
|
| 1201 |
+
"trial_params": null
|
| 1202 |
+
}
|
minicpm-v-v2_6-chat/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e3af870f81f5afa455e78d0a60a99ba632dc9eca9239b594c659caed624b59bf
|
| 3 |
+
size 7416
|
qwen2-vl-7b-instruct/README.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: /fs/clip-projects/geoguesser/vlms/qwen/Qwen2-VL-7B-Instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Model Card for Model ID
|
| 7 |
+
|
| 8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
|
| 14 |
+
### Model Description
|
| 15 |
+
|
| 16 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
- **Developed by:** [More Information Needed]
|
| 21 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 22 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 23 |
+
- **Model type:** [More Information Needed]
|
| 24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 25 |
+
- **License:** [More Information Needed]
|
| 26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 27 |
+
|
| 28 |
+
### Model Sources [optional]
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
|
| 32 |
+
- **Repository:** [More Information Needed]
|
| 33 |
+
- **Paper [optional]:** [More Information Needed]
|
| 34 |
+
- **Demo [optional]:** [More Information Needed]
|
| 35 |
+
|
| 36 |
+
## Uses
|
| 37 |
+
|
| 38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 39 |
+
|
| 40 |
+
### Direct Use
|
| 41 |
+
|
| 42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 43 |
+
|
| 44 |
+
[More Information Needed]
|
| 45 |
+
|
| 46 |
+
### Downstream Use [optional]
|
| 47 |
+
|
| 48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 49 |
+
|
| 50 |
+
[More Information Needed]
|
| 51 |
+
|
| 52 |
+
### Out-of-Scope Use
|
| 53 |
+
|
| 54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 55 |
+
|
| 56 |
+
[More Information Needed]
|
| 57 |
+
|
| 58 |
+
## Bias, Risks, and Limitations
|
| 59 |
+
|
| 60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 61 |
+
|
| 62 |
+
[More Information Needed]
|
| 63 |
+
|
| 64 |
+
### Recommendations
|
| 65 |
+
|
| 66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 67 |
+
|
| 68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 69 |
+
|
| 70 |
+
## How to Get Started with the Model
|
| 71 |
+
|
| 72 |
+
Use the code below to get started with the model.
|
| 73 |
+
|
| 74 |
+
[More Information Needed]
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
### Training Data
|
| 79 |
+
|
| 80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 81 |
+
|
| 82 |
+
[More Information Needed]
|
| 83 |
+
|
| 84 |
+
### Training Procedure
|
| 85 |
+
|
| 86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 87 |
+
|
| 88 |
+
#### Preprocessing [optional]
|
| 89 |
+
|
| 90 |
+
[More Information Needed]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#### Training Hyperparameters
|
| 94 |
+
|
| 95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 96 |
+
|
| 97 |
+
#### Speeds, Sizes, Times [optional]
|
| 98 |
+
|
| 99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 100 |
+
|
| 101 |
+
[More Information Needed]
|
| 102 |
+
|
| 103 |
+
## Evaluation
|
| 104 |
+
|
| 105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 106 |
+
|
| 107 |
+
### Testing Data, Factors & Metrics
|
| 108 |
+
|
| 109 |
+
#### Testing Data
|
| 110 |
+
|
| 111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 112 |
+
|
| 113 |
+
[More Information Needed]
|
| 114 |
+
|
| 115 |
+
#### Factors
|
| 116 |
+
|
| 117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 118 |
+
|
| 119 |
+
[More Information Needed]
|
| 120 |
+
|
| 121 |
+
#### Metrics
|
| 122 |
+
|
| 123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 124 |
+
|
| 125 |
+
[More Information Needed]
|
| 126 |
+
|
| 127 |
+
### Results
|
| 128 |
+
|
| 129 |
+
[More Information Needed]
|
| 130 |
+
|
| 131 |
+
#### Summary
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
## Model Examination [optional]
|
| 136 |
+
|
| 137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Environmental Impact
|
| 142 |
+
|
| 143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 144 |
+
|
| 145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 146 |
+
|
| 147 |
+
- **Hardware Type:** [More Information Needed]
|
| 148 |
+
- **Hours used:** [More Information Needed]
|
| 149 |
+
- **Cloud Provider:** [More Information Needed]
|
| 150 |
+
- **Compute Region:** [More Information Needed]
|
| 151 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 152 |
+
|
| 153 |
+
## Technical Specifications [optional]
|
| 154 |
+
|
| 155 |
+
### Model Architecture and Objective
|
| 156 |
+
|
| 157 |
+
[More Information Needed]
|
| 158 |
+
|
| 159 |
+
### Compute Infrastructure
|
| 160 |
+
|
| 161 |
+
[More Information Needed]
|
| 162 |
+
|
| 163 |
+
#### Hardware
|
| 164 |
+
|
| 165 |
+
[More Information Needed]
|
| 166 |
+
|
| 167 |
+
#### Software
|
| 168 |
+
|
| 169 |
+
[More Information Needed]
|
| 170 |
+
|
| 171 |
+
## Citation [optional]
|
| 172 |
+
|
| 173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 174 |
+
|
| 175 |
+
**BibTeX:**
|
| 176 |
+
|
| 177 |
+
[More Information Needed]
|
| 178 |
+
|
| 179 |
+
**APA:**
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
## Glossary [optional]
|
| 184 |
+
|
| 185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 186 |
+
|
| 187 |
+
[More Information Needed]
|
| 188 |
+
|
| 189 |
+
## More Information [optional]
|
| 190 |
+
|
| 191 |
+
[More Information Needed]
|
| 192 |
+
|
| 193 |
+
## Model Card Authors [optional]
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
## Model Card Contact
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
### Framework versions
|
| 201 |
+
|
| 202 |
+
- PEFT 0.12.0
|
qwen2-vl-7b-instruct/adapter_config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "/fs/clip-projects/geoguesser/vlms/qwen/Qwen2-VL-7B-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"fan_in_fan_out": false,
|
| 7 |
+
"inference_mode": true,
|
| 8 |
+
"init_lora_weights": true,
|
| 9 |
+
"layer_replication": null,
|
| 10 |
+
"layers_pattern": null,
|
| 11 |
+
"layers_to_transform": null,
|
| 12 |
+
"loftq_config": {},
|
| 13 |
+
"lora_alpha": 32,
|
| 14 |
+
"lora_dropout": 0.05,
|
| 15 |
+
"megatron_config": null,
|
| 16 |
+
"megatron_core": "megatron.core",
|
| 17 |
+
"modules_to_save": [],
|
| 18 |
+
"peft_type": "LORA",
|
| 19 |
+
"r": 8,
|
| 20 |
+
"rank_pattern": {},
|
| 21 |
+
"revision": null,
|
| 22 |
+
"target_modules": "^(model)(?!.*(lm_head|output|emb|wte|shared)).*",
|
| 23 |
+
"task_type": "CAUSAL_LM",
|
| 24 |
+
"use_dora": false,
|
| 25 |
+
"use_rslora": false
|
| 26 |
+
}
|
qwen2-vl-7b-instruct/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad3e18d84609148116c11627b4ee17df62b366c21aa774b2f935d104dc175a31
|
| 3 |
+
size 80792096
|
qwen2-vl-7b-instruct/additional_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"lora_dtype": null, "lorap_lr_ratio": null, "lorap_emb_lr": 1e-06}
|
qwen2-vl-7b-instruct/configuration.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"framework": "pytorch",
|
| 3 |
+
"task": "image-text-to-text",
|
| 4 |
+
"allow_remote": true,
|
| 5 |
+
"adapter_cfg": {
|
| 6 |
+
"model_id_or_path": "/fs/clip-projects/geoguesser/vlms/qwen/Qwen2-VL-7B-Instruct",
|
| 7 |
+
"model_revision": "master",
|
| 8 |
+
"sft_type": "lora",
|
| 9 |
+
"tuner_backend": "peft",
|
| 10 |
+
"template_type": "qwen2-vl",
|
| 11 |
+
"dtype": "bf16",
|
| 12 |
+
"system": "You are a helpful assistant."
|
| 13 |
+
}
|
| 14 |
+
}
|
qwen2-vl-7b-instruct/generation_config.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": 151645,
|
| 5 |
+
"max_new_tokens": 2048,
|
| 6 |
+
"pad_token_id": 151643,
|
| 7 |
+
"temperature": 0.01,
|
| 8 |
+
"top_k": 1,
|
| 9 |
+
"top_p": 0.001,
|
| 10 |
+
"transformers_version": "4.45.1"
|
| 11 |
+
}
|
qwen2-vl-7b-instruct/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:47a76e0fedff89408fd4f5f5cc90b9d92a914f72baf530908a077c32e47e894e
|
| 3 |
+
size 161810282
|
qwen2-vl-7b-instruct/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:13685db7db07a31ba75d2613df933051064ed0b3f50d2ac2fbbb543b65f88f0c
|
| 3 |
+
size 14244
|
qwen2-vl-7b-instruct/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b75bb0cab5c1ac64f1f42e99abb3e1c6d095394f6dd0c73c81aee621b476d6d9
|
| 3 |
+
size 1064
|
qwen2-vl-7b-instruct/sft_args.json
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_type": "qwen2-vl-7b-instruct",
|
| 3 |
+
"model_id_or_path": "/fs/clip-projects/geoguesser/vlms/qwen/Qwen2-VL-7B-Instruct",
|
| 4 |
+
"model_revision": "master",
|
| 5 |
+
"full_determinism": false,
|
| 6 |
+
"sft_type": "lora",
|
| 7 |
+
"freeze_parameters": [],
|
| 8 |
+
"freeze_vit": false,
|
| 9 |
+
"freeze_parameters_ratio": 0.0,
|
| 10 |
+
"additional_trainable_parameters": [],
|
| 11 |
+
"tuner_backend": "peft",
|
| 12 |
+
"template_type": "qwen2-vl",
|
| 13 |
+
"output_dir": "/fs/clip-projects/geoguesser/vlms/qwen/output/qwen2-vl-7b-instruct/v5-20241108-053635",
|
| 14 |
+
"add_output_dir_suffix": true,
|
| 15 |
+
"ddp_backend": null,
|
| 16 |
+
"ddp_find_unused_parameters": null,
|
| 17 |
+
"ddp_broadcast_buffers": null,
|
| 18 |
+
"ddp_timeout": 1800,
|
| 19 |
+
"seed": 42,
|
| 20 |
+
"resume_from_checkpoint": null,
|
| 21 |
+
"resume_only_model": false,
|
| 22 |
+
"ignore_data_skip": false,
|
| 23 |
+
"dtype": "bf16",
|
| 24 |
+
"packing": false,
|
| 25 |
+
"train_backend": "transformers",
|
| 26 |
+
"tp": 1,
|
| 27 |
+
"pp": 1,
|
| 28 |
+
"min_lr": null,
|
| 29 |
+
"sequence_parallel": false,
|
| 30 |
+
"model_kwargs": null,
|
| 31 |
+
"loss_name": null,
|
| 32 |
+
"dataset": [
|
| 33 |
+
"train_set_a.jsonl"
|
| 34 |
+
],
|
| 35 |
+
"val_dataset": [
|
| 36 |
+
"test_set_a.jsonl"
|
| 37 |
+
],
|
| 38 |
+
"dataset_seed": 42,
|
| 39 |
+
"dataset_test_ratio": 0.0,
|
| 40 |
+
"use_loss_scale": false,
|
| 41 |
+
"loss_scale_config_path": "/fs/clip-projects/geoguesser/zheyuan/DPO/swift/swift/llm/agent/default_loss_scale_config.json",
|
| 42 |
+
"system": "You are a helpful assistant.",
|
| 43 |
+
"tools_prompt": "react_en",
|
| 44 |
+
"max_length": 2048,
|
| 45 |
+
"truncation_strategy": "delete",
|
| 46 |
+
"check_dataset_strategy": "none",
|
| 47 |
+
"streaming": false,
|
| 48 |
+
"streaming_val_size": 0,
|
| 49 |
+
"streaming_buffer_size": 16384,
|
| 50 |
+
"model_name": [
|
| 51 |
+
null,
|
| 52 |
+
null
|
| 53 |
+
],
|
| 54 |
+
"model_author": [
|
| 55 |
+
null,
|
| 56 |
+
null
|
| 57 |
+
],
|
| 58 |
+
"quant_method": null,
|
| 59 |
+
"quantization_bit": 0,
|
| 60 |
+
"hqq_axis": 0,
|
| 61 |
+
"hqq_dynamic_config_path": null,
|
| 62 |
+
"bnb_4bit_comp_dtype": "bf16",
|
| 63 |
+
"bnb_4bit_quant_type": "nf4",
|
| 64 |
+
"bnb_4bit_use_double_quant": true,
|
| 65 |
+
"bnb_4bit_quant_storage": null,
|
| 66 |
+
"rescale_image": -1,
|
| 67 |
+
"target_modules": "^(model)(?!.*(lm_head|output|emb|wte|shared)).*",
|
| 68 |
+
"target_regex": null,
|
| 69 |
+
"modules_to_save": [],
|
| 70 |
+
"lora_rank": 8,
|
| 71 |
+
"lora_alpha": 32,
|
| 72 |
+
"lora_dropout": 0.05,
|
| 73 |
+
"lora_bias_trainable": "none",
|
| 74 |
+
"lora_dtype": null,
|
| 75 |
+
"lora_lr_ratio": null,
|
| 76 |
+
"use_rslora": false,
|
| 77 |
+
"use_dora": false,
|
| 78 |
+
"init_lora_weights": true,
|
| 79 |
+
"fourier_n_frequency": 2000,
|
| 80 |
+
"fourier_scaling": 300.0,
|
| 81 |
+
"rope_scaling": null,
|
| 82 |
+
"boft_block_size": 4,
|
| 83 |
+
"boft_block_num": 0,
|
| 84 |
+
"boft_n_butterfly_factor": 1,
|
| 85 |
+
"boft_dropout": 0.0,
|
| 86 |
+
"vera_rank": 256,
|
| 87 |
+
"vera_projection_prng_key": 0,
|
| 88 |
+
"vera_dropout": 0.0,
|
| 89 |
+
"vera_d_initial": 0.1,
|
| 90 |
+
"adapter_act": "gelu",
|
| 91 |
+
"adapter_length": 128,
|
| 92 |
+
"use_galore": false,
|
| 93 |
+
"galore_target_modules": null,
|
| 94 |
+
"galore_rank": 128,
|
| 95 |
+
"galore_update_proj_gap": 50,
|
| 96 |
+
"galore_scale": 1.0,
|
| 97 |
+
"galore_proj_type": "std",
|
| 98 |
+
"galore_optim_per_parameter": false,
|
| 99 |
+
"galore_with_embedding": false,
|
| 100 |
+
"galore_quantization": false,
|
| 101 |
+
"galore_proj_quant": false,
|
| 102 |
+
"galore_proj_bits": 4,
|
| 103 |
+
"galore_proj_group_size": 256,
|
| 104 |
+
"galore_cos_threshold": 0.4,
|
| 105 |
+
"galore_gamma_proj": 2,
|
| 106 |
+
"galore_queue_size": 5,
|
| 107 |
+
"adalora_target_r": 8,
|
| 108 |
+
"adalora_init_r": 12,
|
| 109 |
+
"adalora_tinit": 0,
|
| 110 |
+
"adalora_tfinal": 0,
|
| 111 |
+
"adalora_deltaT": 1,
|
| 112 |
+
"adalora_beta1": 0.85,
|
| 113 |
+
"adalora_beta2": 0.85,
|
| 114 |
+
"adalora_orth_reg_weight": 0.5,
|
| 115 |
+
"ia3_feedforward_modules": [],
|
| 116 |
+
"llamapro_num_new_blocks": 4,
|
| 117 |
+
"llamapro_num_groups": null,
|
| 118 |
+
"neftune_noise_alpha": null,
|
| 119 |
+
"neftune_backend": "transformers",
|
| 120 |
+
"lisa_activated_layers": 0,
|
| 121 |
+
"lisa_step_interval": 20,
|
| 122 |
+
"reft_layer_key": null,
|
| 123 |
+
"reft_layers": null,
|
| 124 |
+
"reft_rank": 4,
|
| 125 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 126 |
+
"reft_args": null,
|
| 127 |
+
"use_liger": false,
|
| 128 |
+
"gradient_checkpointing": true,
|
| 129 |
+
"deepspeed": null,
|
| 130 |
+
"batch_size": 1,
|
| 131 |
+
"eval_batch_size": 1,
|
| 132 |
+
"auto_find_batch_size": false,
|
| 133 |
+
"num_train_epochs": 2,
|
| 134 |
+
"max_steps": -1,
|
| 135 |
+
"optim": "adamw_torch",
|
| 136 |
+
"adam_beta1": 0.9,
|
| 137 |
+
"adam_beta2": 0.95,
|
| 138 |
+
"adam_epsilon": 1e-08,
|
| 139 |
+
"learning_rate": 0.0001,
|
| 140 |
+
"weight_decay": 0.1,
|
| 141 |
+
"gradient_accumulation_steps": 4,
|
| 142 |
+
"max_grad_norm": 1,
|
| 143 |
+
"predict_with_generate": false,
|
| 144 |
+
"lr_scheduler_type": "cosine",
|
| 145 |
+
"lr_scheduler_kwargs": {},
|
| 146 |
+
"warmup_ratio": 0.05,
|
| 147 |
+
"warmup_steps": 0,
|
| 148 |
+
"eval_steps": 50,
|
| 149 |
+
"save_steps": 50,
|
| 150 |
+
"save_only_model": false,
|
| 151 |
+
"save_total_limit": 2,
|
| 152 |
+
"logging_steps": 5,
|
| 153 |
+
"acc_steps": 1,
|
| 154 |
+
"dataloader_num_workers": 1,
|
| 155 |
+
"dataloader_pin_memory": true,
|
| 156 |
+
"dataloader_drop_last": false,
|
| 157 |
+
"push_to_hub": false,
|
| 158 |
+
"hub_model_id": null,
|
| 159 |
+
"hub_token": null,
|
| 160 |
+
"hub_private_repo": false,
|
| 161 |
+
"hub_strategy": "every_save",
|
| 162 |
+
"test_oom_error": false,
|
| 163 |
+
"disable_tqdm": false,
|
| 164 |
+
"lazy_tokenize": true,
|
| 165 |
+
"preprocess_num_proc": 1,
|
| 166 |
+
"use_flash_attn": null,
|
| 167 |
+
"ignore_args_error": false,
|
| 168 |
+
"check_model_is_latest": true,
|
| 169 |
+
"logging_dir": "/fs/clip-projects/geoguesser/vlms/qwen/output/qwen2-vl-7b-instruct/v5-20241108-053635/runs",
|
| 170 |
+
"report_to": [
|
| 171 |
+
"tensorboard"
|
| 172 |
+
],
|
| 173 |
+
"acc_strategy": "token",
|
| 174 |
+
"save_on_each_node": false,
|
| 175 |
+
"evaluation_strategy": "steps",
|
| 176 |
+
"save_strategy": "steps",
|
| 177 |
+
"save_safetensors": true,
|
| 178 |
+
"gpu_memory_fraction": null,
|
| 179 |
+
"include_num_input_tokens_seen": false,
|
| 180 |
+
"local_repo_path": null,
|
| 181 |
+
"custom_register_path": null,
|
| 182 |
+
"custom_dataset_info": null,
|
| 183 |
+
"device_map_config": null,
|
| 184 |
+
"device_max_memory": [],
|
| 185 |
+
"max_new_tokens": 2048,
|
| 186 |
+
"do_sample": null,
|
| 187 |
+
"temperature": null,
|
| 188 |
+
"top_k": null,
|
| 189 |
+
"top_p": null,
|
| 190 |
+
"repetition_penalty": null,
|
| 191 |
+
"num_beams": 1,
|
| 192 |
+
"fsdp": "",
|
| 193 |
+
"fsdp_config": null,
|
| 194 |
+
"sequence_parallel_size": 1,
|
| 195 |
+
"model_layer_cls_name": null,
|
| 196 |
+
"metric_warmup_step": 0,
|
| 197 |
+
"fsdp_num": 1,
|
| 198 |
+
"per_device_train_batch_size": null,
|
| 199 |
+
"per_device_eval_batch_size": null,
|
| 200 |
+
"eval_strategy": null,
|
| 201 |
+
"self_cognition_sample": 0,
|
| 202 |
+
"train_dataset_mix_ratio": 0.0,
|
| 203 |
+
"train_dataset_mix_ds": [
|
| 204 |
+
"ms-bench"
|
| 205 |
+
],
|
| 206 |
+
"train_dataset_sample": -1,
|
| 207 |
+
"val_dataset_sample": null,
|
| 208 |
+
"safe_serialization": null,
|
| 209 |
+
"only_save_model": null,
|
| 210 |
+
"neftune_alpha": null,
|
| 211 |
+
"deepspeed_config_path": null,
|
| 212 |
+
"model_cache_dir": null,
|
| 213 |
+
"lora_dropout_p": null,
|
| 214 |
+
"lora_target_modules": [],
|
| 215 |
+
"lora_target_regex": null,
|
| 216 |
+
"lora_modules_to_save": [],
|
| 217 |
+
"boft_target_modules": [],
|
| 218 |
+
"boft_modules_to_save": [],
|
| 219 |
+
"vera_target_modules": [],
|
| 220 |
+
"vera_modules_to_save": [],
|
| 221 |
+
"ia3_target_modules": [],
|
| 222 |
+
"ia3_modules_to_save": [],
|
| 223 |
+
"custom_train_dataset_path": [],
|
| 224 |
+
"custom_val_dataset_path": [],
|
| 225 |
+
"device_map_config_path": null,
|
| 226 |
+
"push_hub_strategy": null,
|
| 227 |
+
"use_self_cognition": false,
|
| 228 |
+
"is_multimodal": true,
|
| 229 |
+
"is_vision": true,
|
| 230 |
+
"lora_use_embedding": false,
|
| 231 |
+
"lora_use_all": false,
|
| 232 |
+
"lora_m2s_use_embedding": false,
|
| 233 |
+
"lora_m2s_use_ln": false,
|
| 234 |
+
"torch_dtype": "torch.bfloat16",
|
| 235 |
+
"fp16": false,
|
| 236 |
+
"bf16": true,
|
| 237 |
+
"rank": -1,
|
| 238 |
+
"local_rank": -1,
|
| 239 |
+
"world_size": 1,
|
| 240 |
+
"local_world_size": 1,
|
| 241 |
+
"bnb_4bit_compute_dtype": "torch.bfloat16",
|
| 242 |
+
"load_in_4bit": false,
|
| 243 |
+
"load_in_8bit": false,
|
| 244 |
+
"train_sampler_random": true,
|
| 245 |
+
"train_type": "sft",
|
| 246 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/fs/clip-projects/geoguesser/vlms/qwen/output/qwen2-vl-7b-instruct/v5-20241108-053635', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=4, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=0.0001, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1, num_train_epochs=2, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs={}, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/fs/clip-projects/geoguesser/vlms/qwen/output/qwen2-vl-7b-instruct/v5-20241108-053635/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<IntervalStrategy.STEPS: 'steps'>, save_steps=50, save_total_limit=2, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=50, dataloader_num_workers=1, dataloader_prefetch_factor=None, past_index=-1, run_name='/fs/clip-projects/geoguesser/vlms/qwen/output/qwen2-vl-7b-instruct/v5-20241108-053635', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed=None, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=False, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy=None, push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=False, include_num_input_tokens_seen=False, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=GenerationConfig {\n \"bos_token_id\": 151643,\n \"do_sample\": true,\n \"eos_token_id\": 151645,\n \"max_new_tokens\": 2048,\n \"pad_token_id\": 151643,\n \"temperature\": 0.01,\n \"top_k\": 1,\n \"top_p\": 0.001\n}\n, acc_strategy='token', loss_name=None, additional_saved_files=[], train_sampler_random=True, metric_warmup_step=0, train_dataset_sample=-1)"
|
| 247 |
+
}
|
qwen2-vl-7b-instruct/trainer_state.json
ADDED
|
@@ -0,0 +1,1202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 1.55006742,
|
| 3 |
+
"best_model_checkpoint": "/fs/clip-projects/geoguesser/vlms/qwen/output/qwen2-vl-7b-instruct/v5-20241108-053635/checkpoint-500",
|
| 4 |
+
"epoch": 1.9962616822429906,
|
| 5 |
+
"eval_steps": 50,
|
| 6 |
+
"global_step": 534,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"acc": 0.55171263,
|
| 13 |
+
"epoch": 0.003738317757009346,
|
| 14 |
+
"grad_norm": 1.868323802947998,
|
| 15 |
+
"learning_rate": 3.7037037037037037e-06,
|
| 16 |
+
"loss": 1.850384,
|
| 17 |
+
"memory(GiB)": 31.32,
|
| 18 |
+
"step": 1,
|
| 19 |
+
"train_speed(iter/s)": 0.082547
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"acc": 0.51965243,
|
| 23 |
+
"epoch": 0.018691588785046728,
|
| 24 |
+
"grad_norm": 1.9730364084243774,
|
| 25 |
+
"learning_rate": 1.8518518518518518e-05,
|
| 26 |
+
"loss": 2.06072664,
|
| 27 |
+
"memory(GiB)": 39.03,
|
| 28 |
+
"step": 5,
|
| 29 |
+
"train_speed(iter/s)": 0.11522
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"acc": 0.54611473,
|
| 33 |
+
"epoch": 0.037383177570093455,
|
| 34 |
+
"grad_norm": 1.2351425886154175,
|
| 35 |
+
"learning_rate": 3.7037037037037037e-05,
|
| 36 |
+
"loss": 1.92748413,
|
| 37 |
+
"memory(GiB)": 40.38,
|
| 38 |
+
"step": 10,
|
| 39 |
+
"train_speed(iter/s)": 0.120543
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"acc": 0.53391666,
|
| 43 |
+
"epoch": 0.056074766355140186,
|
| 44 |
+
"grad_norm": 1.3700778484344482,
|
| 45 |
+
"learning_rate": 5.555555555555556e-05,
|
| 46 |
+
"loss": 1.8874958,
|
| 47 |
+
"memory(GiB)": 41.74,
|
| 48 |
+
"step": 15,
|
| 49 |
+
"train_speed(iter/s)": 0.122255
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"acc": 0.53817282,
|
| 53 |
+
"epoch": 0.07476635514018691,
|
| 54 |
+
"grad_norm": 1.5009832382202148,
|
| 55 |
+
"learning_rate": 7.407407407407407e-05,
|
| 56 |
+
"loss": 1.8099781,
|
| 57 |
+
"memory(GiB)": 41.74,
|
| 58 |
+
"step": 20,
|
| 59 |
+
"train_speed(iter/s)": 0.123099
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"acc": 0.55321841,
|
| 63 |
+
"epoch": 0.09345794392523364,
|
| 64 |
+
"grad_norm": 1.3406466245651245,
|
| 65 |
+
"learning_rate": 9.25925925925926e-05,
|
| 66 |
+
"loss": 1.6683075,
|
| 67 |
+
"memory(GiB)": 41.74,
|
| 68 |
+
"step": 25,
|
| 69 |
+
"train_speed(iter/s)": 0.123552
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"acc": 0.55131054,
|
| 73 |
+
"epoch": 0.11214953271028037,
|
| 74 |
+
"grad_norm": 1.2054580450057983,
|
| 75 |
+
"learning_rate": 9.999136119166803e-05,
|
| 76 |
+
"loss": 1.78306332,
|
| 77 |
+
"memory(GiB)": 43.11,
|
| 78 |
+
"step": 30,
|
| 79 |
+
"train_speed(iter/s)": 0.120219
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"acc": 0.54619265,
|
| 83 |
+
"epoch": 0.1308411214953271,
|
| 84 |
+
"grad_norm": 1.3058720827102661,
|
| 85 |
+
"learning_rate": 9.99385792841537e-05,
|
| 86 |
+
"loss": 1.81088448,
|
| 87 |
+
"memory(GiB)": 43.11,
|
| 88 |
+
"step": 35,
|
| 89 |
+
"train_speed(iter/s)": 0.120961
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"acc": 0.56460981,
|
| 93 |
+
"epoch": 0.14953271028037382,
|
| 94 |
+
"grad_norm": 1.2871434688568115,
|
| 95 |
+
"learning_rate": 9.983786540671051e-05,
|
| 96 |
+
"loss": 1.69376984,
|
| 97 |
+
"memory(GiB)": 44.48,
|
| 98 |
+
"step": 40,
|
| 99 |
+
"train_speed(iter/s)": 0.121524
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"acc": 0.57231364,
|
| 103 |
+
"epoch": 0.16822429906542055,
|
| 104 |
+
"grad_norm": 1.1757748126983643,
|
| 105 |
+
"learning_rate": 9.968931622637652e-05,
|
| 106 |
+
"loss": 1.6526125,
|
| 107 |
+
"memory(GiB)": 44.48,
|
| 108 |
+
"step": 45,
|
| 109 |
+
"train_speed(iter/s)": 0.121972
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"acc": 0.56770124,
|
| 113 |
+
"epoch": 0.18691588785046728,
|
| 114 |
+
"grad_norm": 1.3221774101257324,
|
| 115 |
+
"learning_rate": 9.949307432339625e-05,
|
| 116 |
+
"loss": 1.70850391,
|
| 117 |
+
"memory(GiB)": 44.48,
|
| 118 |
+
"step": 50,
|
| 119 |
+
"train_speed(iter/s)": 0.122298
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"epoch": 0.18691588785046728,
|
| 123 |
+
"eval_acc": 0.5727995201679412,
|
| 124 |
+
"eval_loss": 1.6454861164093018,
|
| 125 |
+
"eval_runtime": 60.9474,
|
| 126 |
+
"eval_samples_per_second": 0.82,
|
| 127 |
+
"eval_steps_per_second": 0.82,
|
| 128 |
+
"step": 50
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"acc": 0.56792145,
|
| 132 |
+
"epoch": 0.205607476635514,
|
| 133 |
+
"grad_norm": 1.3802762031555176,
|
| 134 |
+
"learning_rate": 9.924932805436949e-05,
|
| 135 |
+
"loss": 1.61318073,
|
| 136 |
+
"memory(GiB)": 44.48,
|
| 137 |
+
"step": 55,
|
| 138 |
+
"train_speed(iter/s)": 0.10774
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"acc": 0.56201911,
|
| 142 |
+
"epoch": 0.22429906542056074,
|
| 143 |
+
"grad_norm": 1.4624619483947754,
|
| 144 |
+
"learning_rate": 9.895831137146318e-05,
|
| 145 |
+
"loss": 1.68176594,
|
| 146 |
+
"memory(GiB)": 44.48,
|
| 147 |
+
"step": 60,
|
| 148 |
+
"train_speed(iter/s)": 0.109037
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"acc": 0.56515856,
|
| 152 |
+
"epoch": 0.24299065420560748,
|
| 153 |
+
"grad_norm": 1.3205868005752563,
|
| 154 |
+
"learning_rate": 9.862030359785981e-05,
|
| 155 |
+
"loss": 1.65190887,
|
| 156 |
+
"memory(GiB)": 44.48,
|
| 157 |
+
"step": 65,
|
| 158 |
+
"train_speed(iter/s)": 0.110146
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"acc": 0.55511956,
|
| 162 |
+
"epoch": 0.2616822429906542,
|
| 163 |
+
"grad_norm": 1.3879112005233765,
|
| 164 |
+
"learning_rate": 9.82356291596578e-05,
|
| 165 |
+
"loss": 1.6682188,
|
| 166 |
+
"memory(GiB)": 44.48,
|
| 167 |
+
"step": 70,
|
| 168 |
+
"train_speed(iter/s)": 0.111111
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"acc": 0.56104274,
|
| 172 |
+
"epoch": 0.2803738317757009,
|
| 173 |
+
"grad_norm": 1.3009270429611206,
|
| 174 |
+
"learning_rate": 9.780465727448149e-05,
|
| 175 |
+
"loss": 1.7461134,
|
| 176 |
+
"memory(GiB)": 45.86,
|
| 177 |
+
"step": 75,
|
| 178 |
+
"train_speed(iter/s)": 0.110878
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"acc": 0.57672982,
|
| 182 |
+
"epoch": 0.29906542056074764,
|
| 183 |
+
"grad_norm": 1.3524978160858154,
|
| 184 |
+
"learning_rate": 9.732780159709912e-05,
|
| 185 |
+
"loss": 1.61342182,
|
| 186 |
+
"memory(GiB)": 45.86,
|
| 187 |
+
"step": 80,
|
| 188 |
+
"train_speed(iter/s)": 0.111688
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"acc": 0.56858454,
|
| 192 |
+
"epoch": 0.3177570093457944,
|
| 193 |
+
"grad_norm": 1.368619680404663,
|
| 194 |
+
"learning_rate": 9.680551982238942e-05,
|
| 195 |
+
"loss": 1.62513695,
|
| 196 |
+
"memory(GiB)": 45.86,
|
| 197 |
+
"step": 85,
|
| 198 |
+
"train_speed(iter/s)": 0.112415
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"acc": 0.56374822,
|
| 202 |
+
"epoch": 0.3364485981308411,
|
| 203 |
+
"grad_norm": 1.397831916809082,
|
| 204 |
+
"learning_rate": 9.623831324603754e-05,
|
| 205 |
+
"loss": 1.69306774,
|
| 206 |
+
"memory(GiB)": 45.86,
|
| 207 |
+
"step": 90,
|
| 208 |
+
"train_speed(iter/s)": 0.113063
|
| 209 |
+
},
|
| 210 |
+
{
|
| 211 |
+
"acc": 0.57811651,
|
| 212 |
+
"epoch": 0.35514018691588783,
|
| 213 |
+
"grad_norm": 1.271440029144287,
|
| 214 |
+
"learning_rate": 9.562672628338233e-05,
|
| 215 |
+
"loss": 1.63228798,
|
| 216 |
+
"memory(GiB)": 45.86,
|
| 217 |
+
"step": 95,
|
| 218 |
+
"train_speed(iter/s)": 0.113645
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"acc": 0.5570353,
|
| 222 |
+
"epoch": 0.37383177570093457,
|
| 223 |
+
"grad_norm": 1.2964327335357666,
|
| 224 |
+
"learning_rate": 9.497134594687634e-05,
|
| 225 |
+
"loss": 1.72664585,
|
| 226 |
+
"memory(GiB)": 45.86,
|
| 227 |
+
"step": 100,
|
| 228 |
+
"train_speed(iter/s)": 0.114174
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"epoch": 0.37383177570093457,
|
| 232 |
+
"eval_acc": 0.5792472634577898,
|
| 233 |
+
"eval_loss": 1.6085342168807983,
|
| 234 |
+
"eval_runtime": 62.4797,
|
| 235 |
+
"eval_samples_per_second": 0.8,
|
| 236 |
+
"eval_steps_per_second": 0.8,
|
| 237 |
+
"step": 100
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"acc": 0.57630959,
|
| 241 |
+
"epoch": 0.3925233644859813,
|
| 242 |
+
"grad_norm": 1.244130253791809,
|
| 243 |
+
"learning_rate": 9.42728012826605e-05,
|
| 244 |
+
"loss": 1.64715214,
|
| 245 |
+
"memory(GiB)": 45.86,
|
| 246 |
+
"step": 105,
|
| 247 |
+
"train_speed(iter/s)": 0.107229
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"acc": 0.5584549,
|
| 251 |
+
"epoch": 0.411214953271028,
|
| 252 |
+
"grad_norm": 1.3243989944458008,
|
| 253 |
+
"learning_rate": 9.353176276679396e-05,
|
| 254 |
+
"loss": 1.68698692,
|
| 255 |
+
"memory(GiB)": 45.86,
|
| 256 |
+
"step": 110,
|
| 257 |
+
"train_speed(iter/s)": 0.107951
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"acc": 0.5546257,
|
| 261 |
+
"epoch": 0.42990654205607476,
|
| 262 |
+
"grad_norm": 1.437445878982544,
|
| 263 |
+
"learning_rate": 9.274894166171888e-05,
|
| 264 |
+
"loss": 1.66922894,
|
| 265 |
+
"memory(GiB)": 45.86,
|
| 266 |
+
"step": 115,
|
| 267 |
+
"train_speed(iter/s)": 0.10862
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"acc": 0.57244515,
|
| 271 |
+
"epoch": 0.4485981308411215,
|
| 272 |
+
"grad_norm": 1.3543046712875366,
|
| 273 |
+
"learning_rate": 9.192508933357753e-05,
|
| 274 |
+
"loss": 1.70311775,
|
| 275 |
+
"memory(GiB)": 45.86,
|
| 276 |
+
"step": 120,
|
| 277 |
+
"train_speed(iter/s)": 0.109232
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"acc": 0.56850109,
|
| 281 |
+
"epoch": 0.4672897196261682,
|
| 282 |
+
"grad_norm": 1.287984013557434,
|
| 283 |
+
"learning_rate": 9.106099653103728e-05,
|
| 284 |
+
"loss": 1.61406059,
|
| 285 |
+
"memory(GiB)": 45.86,
|
| 286 |
+
"step": 125,
|
| 287 |
+
"train_speed(iter/s)": 0.109801
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"acc": 0.56755419,
|
| 291 |
+
"epoch": 0.48598130841121495,
|
| 292 |
+
"grad_norm": 1.4639618396759033,
|
| 293 |
+
"learning_rate": 9.015749262631536e-05,
|
| 294 |
+
"loss": 1.57637978,
|
| 295 |
+
"memory(GiB)": 45.86,
|
| 296 |
+
"step": 130,
|
| 297 |
+
"train_speed(iter/s)": 0.110329
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"acc": 0.58115373,
|
| 301 |
+
"epoch": 0.5046728971962616,
|
| 302 |
+
"grad_norm": 1.5570566654205322,
|
| 303 |
+
"learning_rate": 8.921544481913218e-05,
|
| 304 |
+
"loss": 1.62401295,
|
| 305 |
+
"memory(GiB)": 45.86,
|
| 306 |
+
"step": 135,
|
| 307 |
+
"train_speed(iter/s)": 0.110827
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"acc": 0.55897279,
|
| 311 |
+
"epoch": 0.5233644859813084,
|
| 312 |
+
"grad_norm": 1.4730037450790405,
|
| 313 |
+
"learning_rate": 8.823575730435693e-05,
|
| 314 |
+
"loss": 1.66579857,
|
| 315 |
+
"memory(GiB)": 52.51,
|
| 316 |
+
"step": 140,
|
| 317 |
+
"train_speed(iter/s)": 0.111291
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"acc": 0.56799178,
|
| 321 |
+
"epoch": 0.5420560747663551,
|
| 322 |
+
"grad_norm": 1.350874423980713,
|
| 323 |
+
"learning_rate": 8.721937040414481e-05,
|
| 324 |
+
"loss": 1.60019073,
|
| 325 |
+
"memory(GiB)": 52.51,
|
| 326 |
+
"step": 145,
|
| 327 |
+
"train_speed(iter/s)": 0.111724
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"acc": 0.55238876,
|
| 331 |
+
"epoch": 0.5607476635514018,
|
| 332 |
+
"grad_norm": 1.5056456327438354,
|
| 333 |
+
"learning_rate": 8.616725966539832e-05,
|
| 334 |
+
"loss": 1.68097,
|
| 335 |
+
"memory(GiB)": 52.51,
|
| 336 |
+
"step": 150,
|
| 337 |
+
"train_speed(iter/s)": 0.11205
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"epoch": 0.5607476635514018,
|
| 341 |
+
"eval_acc": 0.5831458989353726,
|
| 342 |
+
"eval_loss": 1.588950753211975,
|
| 343 |
+
"eval_runtime": 60.5954,
|
| 344 |
+
"eval_samples_per_second": 0.825,
|
| 345 |
+
"eval_steps_per_second": 0.825,
|
| 346 |
+
"step": 150
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"acc": 0.56648855,
|
| 350 |
+
"epoch": 0.5794392523364486,
|
| 351 |
+
"grad_norm": 1.4353731870651245,
|
| 352 |
+
"learning_rate": 8.508043492341944e-05,
|
| 353 |
+
"loss": 1.61546593,
|
| 354 |
+
"memory(GiB)": 52.51,
|
| 355 |
+
"step": 155,
|
| 356 |
+
"train_speed(iter/s)": 0.107639
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"acc": 0.57423716,
|
| 360 |
+
"epoch": 0.5981308411214953,
|
| 361 |
+
"grad_norm": 1.514600396156311,
|
| 362 |
+
"learning_rate": 8.395993933265101e-05,
|
| 363 |
+
"loss": 1.65116329,
|
| 364 |
+
"memory(GiB)": 52.51,
|
| 365 |
+
"step": 160,
|
| 366 |
+
"train_speed(iter/s)": 0.108123
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"acc": 0.56541142,
|
| 370 |
+
"epoch": 0.616822429906542,
|
| 371 |
+
"grad_norm": 1.3241384029388428,
|
| 372 |
+
"learning_rate": 8.280684836543794e-05,
|
| 373 |
+
"loss": 1.65839729,
|
| 374 |
+
"memory(GiB)": 52.51,
|
| 375 |
+
"step": 165,
|
| 376 |
+
"train_speed(iter/s)": 0.108573
|
| 377 |
+
},
|
| 378 |
+
{
|
| 379 |
+
"acc": 0.57026463,
|
| 380 |
+
"epoch": 0.6355140186915887,
|
| 381 |
+
"grad_norm": 1.3388739824295044,
|
| 382 |
+
"learning_rate": 8.162226877976887e-05,
|
| 383 |
+
"loss": 1.61102333,
|
| 384 |
+
"memory(GiB)": 52.51,
|
| 385 |
+
"step": 170,
|
| 386 |
+
"train_speed(iter/s)": 0.109007
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"acc": 0.57890859,
|
| 390 |
+
"epoch": 0.6542056074766355,
|
| 391 |
+
"grad_norm": 1.37869131565094,
|
| 392 |
+
"learning_rate": 8.040733755698955e-05,
|
| 393 |
+
"loss": 1.60712547,
|
| 394 |
+
"memory(GiB)": 52.51,
|
| 395 |
+
"step": 175,
|
| 396 |
+
"train_speed(iter/s)": 0.109415
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"acc": 0.57019186,
|
| 400 |
+
"epoch": 0.6728971962616822,
|
| 401 |
+
"grad_norm": 1.4313998222351074,
|
| 402 |
+
"learning_rate": 7.916322081050709e-05,
|
| 403 |
+
"loss": 1.62115898,
|
| 404 |
+
"memory(GiB)": 52.51,
|
| 405 |
+
"step": 180,
|
| 406 |
+
"train_speed(iter/s)": 0.109805
|
| 407 |
+
},
|
| 408 |
+
{
|
| 409 |
+
"acc": 0.57807865,
|
| 410 |
+
"epoch": 0.6915887850467289,
|
| 411 |
+
"grad_norm": 1.3123388290405273,
|
| 412 |
+
"learning_rate": 7.789111266653285e-05,
|
| 413 |
+
"loss": 1.63029137,
|
| 414 |
+
"memory(GiB)": 52.51,
|
| 415 |
+
"step": 185,
|
| 416 |
+
"train_speed(iter/s)": 0.110173
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"acc": 0.58090611,
|
| 420 |
+
"epoch": 0.7102803738317757,
|
| 421 |
+
"grad_norm": 1.460463047027588,
|
| 422 |
+
"learning_rate": 7.659223411793798e-05,
|
| 423 |
+
"loss": 1.57071505,
|
| 424 |
+
"memory(GiB)": 52.51,
|
| 425 |
+
"step": 190,
|
| 426 |
+
"train_speed(iter/s)": 0.110531
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"acc": 0.57307801,
|
| 430 |
+
"epoch": 0.7289719626168224,
|
| 431 |
+
"grad_norm": 1.3995453119277954,
|
| 432 |
+
"learning_rate": 7.526783185232207e-05,
|
| 433 |
+
"loss": 1.61080112,
|
| 434 |
+
"memory(GiB)": 52.51,
|
| 435 |
+
"step": 195,
|
| 436 |
+
"train_speed(iter/s)": 0.110867
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"acc": 0.5799108,
|
| 440 |
+
"epoch": 0.7476635514018691,
|
| 441 |
+
"grad_norm": 1.4361484050750732,
|
| 442 |
+
"learning_rate": 7.391917705541927e-05,
|
| 443 |
+
"loss": 1.64733868,
|
| 444 |
+
"memory(GiB)": 52.51,
|
| 445 |
+
"step": 200,
|
| 446 |
+
"train_speed(iter/s)": 0.111188
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"epoch": 0.7476635514018691,
|
| 450 |
+
"eval_acc": 0.5834457939721097,
|
| 451 |
+
"eval_loss": 1.570568561553955,
|
| 452 |
+
"eval_runtime": 60.5903,
|
| 453 |
+
"eval_samples_per_second": 0.825,
|
| 454 |
+
"eval_steps_per_second": 0.825,
|
| 455 |
+
"step": 200
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"acc": 0.56698923,
|
| 459 |
+
"epoch": 0.7663551401869159,
|
| 460 |
+
"grad_norm": 1.3287904262542725,
|
| 461 |
+
"learning_rate": 7.254756419099074e-05,
|
| 462 |
+
"loss": 1.64705162,
|
| 463 |
+
"memory(GiB)": 52.51,
|
| 464 |
+
"step": 205,
|
| 465 |
+
"train_speed(iter/s)": 0.107887
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"acc": 0.57151198,
|
| 469 |
+
"epoch": 0.7850467289719626,
|
| 470 |
+
"grad_norm": 1.38331139087677,
|
| 471 |
+
"learning_rate": 7.115430975837457e-05,
|
| 472 |
+
"loss": 1.64652443,
|
| 473 |
+
"memory(GiB)": 52.51,
|
| 474 |
+
"step": 210,
|
| 475 |
+
"train_speed(iter/s)": 0.108252
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"acc": 0.58841505,
|
| 479 |
+
"epoch": 0.8037383177570093,
|
| 480 |
+
"grad_norm": 1.5937939882278442,
|
| 481 |
+
"learning_rate": 6.974075102888536e-05,
|
| 482 |
+
"loss": 1.61707039,
|
| 483 |
+
"memory(GiB)": 52.51,
|
| 484 |
+
"step": 215,
|
| 485 |
+
"train_speed(iter/s)": 0.108603
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"acc": 0.55511918,
|
| 489 |
+
"epoch": 0.822429906542056,
|
| 490 |
+
"grad_norm": 1.6487551927566528,
|
| 491 |
+
"learning_rate": 6.830824476227646e-05,
|
| 492 |
+
"loss": 1.65553608,
|
| 493 |
+
"memory(GiB)": 52.51,
|
| 494 |
+
"step": 220,
|
| 495 |
+
"train_speed(iter/s)": 0.108935
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"acc": 0.58533549,
|
| 499 |
+
"epoch": 0.8411214953271028,
|
| 500 |
+
"grad_norm": 1.4343266487121582,
|
| 501 |
+
"learning_rate": 6.685816590449708e-05,
|
| 502 |
+
"loss": 1.58468885,
|
| 503 |
+
"memory(GiB)": 52.51,
|
| 504 |
+
"step": 225,
|
| 505 |
+
"train_speed(iter/s)": 0.109256
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"acc": 0.57694592,
|
| 509 |
+
"epoch": 0.8598130841121495,
|
| 510 |
+
"grad_norm": 1.368004560470581,
|
| 511 |
+
"learning_rate": 6.539190626799366e-05,
|
| 512 |
+
"loss": 1.60840836,
|
| 513 |
+
"memory(GiB)": 52.51,
|
| 514 |
+
"step": 230,
|
| 515 |
+
"train_speed(iter/s)": 0.109563
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"acc": 0.57554379,
|
| 519 |
+
"epoch": 0.8785046728971962,
|
| 520 |
+
"grad_norm": 1.513482928276062,
|
| 521 |
+
"learning_rate": 6.391087319582264e-05,
|
| 522 |
+
"loss": 1.59513159,
|
| 523 |
+
"memory(GiB)": 52.51,
|
| 524 |
+
"step": 235,
|
| 525 |
+
"train_speed(iter/s)": 0.109855
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"acc": 0.56200686,
|
| 529 |
+
"epoch": 0.897196261682243,
|
| 530 |
+
"grad_norm": 1.447696566581726,
|
| 531 |
+
"learning_rate": 6.241648821085666e-05,
|
| 532 |
+
"loss": 1.61208496,
|
| 533 |
+
"memory(GiB)": 52.51,
|
| 534 |
+
"step": 240,
|
| 535 |
+
"train_speed(iter/s)": 0.110135
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"acc": 0.57686815,
|
| 539 |
+
"epoch": 0.9158878504672897,
|
| 540 |
+
"grad_norm": 1.4834848642349243,
|
| 541 |
+
"learning_rate": 6.0910185651380626e-05,
|
| 542 |
+
"loss": 1.56525345,
|
| 543 |
+
"memory(GiB)": 52.51,
|
| 544 |
+
"step": 245,
|
| 545 |
+
"train_speed(iter/s)": 0.110415
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"acc": 0.57838049,
|
| 549 |
+
"epoch": 0.9345794392523364,
|
| 550 |
+
"grad_norm": 1.4449986219406128,
|
| 551 |
+
"learning_rate": 5.939341129438739e-05,
|
| 552 |
+
"loss": 1.66088371,
|
| 553 |
+
"memory(GiB)": 52.51,
|
| 554 |
+
"step": 250,
|
| 555 |
+
"train_speed(iter/s)": 0.110677
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"epoch": 0.9345794392523364,
|
| 559 |
+
"eval_acc": 0.5871944819313241,
|
| 560 |
+
"eval_loss": 1.5589616298675537,
|
| 561 |
+
"eval_runtime": 60.6063,
|
| 562 |
+
"eval_samples_per_second": 0.825,
|
| 563 |
+
"eval_steps_per_second": 0.825,
|
| 564 |
+
"step": 250
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"acc": 0.56091037,
|
| 568 |
+
"epoch": 0.9532710280373832,
|
| 569 |
+
"grad_norm": 1.4498945474624634,
|
| 570 |
+
"learning_rate": 5.786762096789431e-05,
|
| 571 |
+
"loss": 1.6876915,
|
| 572 |
+
"memory(GiB)": 52.51,
|
| 573 |
+
"step": 255,
|
| 574 |
+
"train_speed(iter/s)": 0.108045
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"acc": 0.57053814,
|
| 578 |
+
"epoch": 0.9719626168224299,
|
| 579 |
+
"grad_norm": 1.2757234573364258,
|
| 580 |
+
"learning_rate": 5.633427915361261e-05,
|
| 581 |
+
"loss": 1.65799484,
|
| 582 |
+
"memory(GiB)": 52.51,
|
| 583 |
+
"step": 260,
|
| 584 |
+
"train_speed(iter/s)": 0.108333
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"acc": 0.56272326,
|
| 588 |
+
"epoch": 0.9906542056074766,
|
| 589 |
+
"grad_norm": 1.4214109182357788,
|
| 590 |
+
"learning_rate": 5.479485758131089e-05,
|
| 591 |
+
"loss": 1.64700985,
|
| 592 |
+
"memory(GiB)": 52.51,
|
| 593 |
+
"step": 265,
|
| 594 |
+
"train_speed(iter/s)": 0.108614
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"acc": 0.59196057,
|
| 598 |
+
"epoch": 1.0093457943925233,
|
| 599 |
+
"grad_norm": 1.255962610244751,
|
| 600 |
+
"learning_rate": 5.325083381622165e-05,
|
| 601 |
+
"loss": 1.56780367,
|
| 602 |
+
"memory(GiB)": 52.51,
|
| 603 |
+
"step": 270,
|
| 604 |
+
"train_speed(iter/s)": 0.108871
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"acc": 0.60109649,
|
| 608 |
+
"epoch": 1.02803738317757,
|
| 609 |
+
"grad_norm": 1.4240363836288452,
|
| 610 |
+
"learning_rate": 5.1703689840846945e-05,
|
| 611 |
+
"loss": 1.45532875,
|
| 612 |
+
"memory(GiB)": 52.51,
|
| 613 |
+
"step": 275,
|
| 614 |
+
"train_speed(iter/s)": 0.109138
|
| 615 |
+
},
|
| 616 |
+
{
|
| 617 |
+
"acc": 0.59727616,
|
| 618 |
+
"epoch": 1.0467289719626167,
|
| 619 |
+
"grad_norm": 1.5935661792755127,
|
| 620 |
+
"learning_rate": 5.01549106325243e-05,
|
| 621 |
+
"loss": 1.51683445,
|
| 622 |
+
"memory(GiB)": 52.51,
|
| 623 |
+
"step": 280,
|
| 624 |
+
"train_speed(iter/s)": 0.109392
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"acc": 0.62937155,
|
| 628 |
+
"epoch": 1.0654205607476634,
|
| 629 |
+
"grad_norm": 1.6722455024719238,
|
| 630 |
+
"learning_rate": 4.860598273811792e-05,
|
| 631 |
+
"loss": 1.35466251,
|
| 632 |
+
"memory(GiB)": 52.51,
|
| 633 |
+
"step": 285,
|
| 634 |
+
"train_speed(iter/s)": 0.109642
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"acc": 0.58850698,
|
| 638 |
+
"epoch": 1.0841121495327102,
|
| 639 |
+
"grad_norm": 1.524778127670288,
|
| 640 |
+
"learning_rate": 4.705839284720376e-05,
|
| 641 |
+
"loss": 1.48758812,
|
| 642 |
+
"memory(GiB)": 52.51,
|
| 643 |
+
"step": 290,
|
| 644 |
+
"train_speed(iter/s)": 0.109812
|
| 645 |
+
},
|
| 646 |
+
{
|
| 647 |
+
"acc": 0.60075417,
|
| 648 |
+
"epoch": 1.102803738317757,
|
| 649 |
+
"grad_norm": 1.757370114326477,
|
| 650 |
+
"learning_rate": 4.55136263651172e-05,
|
| 651 |
+
"loss": 1.50896826,
|
| 652 |
+
"memory(GiB)": 52.51,
|
| 653 |
+
"step": 295,
|
| 654 |
+
"train_speed(iter/s)": 0.110044
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"acc": 0.61744561,
|
| 658 |
+
"epoch": 1.1214953271028036,
|
| 659 |
+
"grad_norm": 2.0011301040649414,
|
| 660 |
+
"learning_rate": 4.397316598723385e-05,
|
| 661 |
+
"loss": 1.42747393,
|
| 662 |
+
"memory(GiB)": 52.51,
|
| 663 |
+
"step": 300,
|
| 664 |
+
"train_speed(iter/s)": 0.110269
|
| 665 |
+
},
|
| 666 |
+
{
|
| 667 |
+
"epoch": 1.1214953271028036,
|
| 668 |
+
"eval_acc": 0.5852451641925326,
|
| 669 |
+
"eval_loss": 1.5581213235855103,
|
| 670 |
+
"eval_runtime": 60.6131,
|
| 671 |
+
"eval_samples_per_second": 0.825,
|
| 672 |
+
"eval_steps_per_second": 0.825,
|
| 673 |
+
"step": 300
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"acc": 0.59082665,
|
| 677 |
+
"epoch": 1.1401869158878504,
|
| 678 |
+
"grad_norm": 1.8005529642105103,
|
| 679 |
+
"learning_rate": 4.243849027585096e-05,
|
| 680 |
+
"loss": 1.49810066,
|
| 681 |
+
"memory(GiB)": 52.51,
|
| 682 |
+
"step": 305,
|
| 683 |
+
"train_speed(iter/s)": 0.108082
|
| 684 |
+
},
|
| 685 |
+
{
|
| 686 |
+
"acc": 0.58803234,
|
| 687 |
+
"epoch": 1.158878504672897,
|
| 688 |
+
"grad_norm": 1.8836215734481812,
|
| 689 |
+
"learning_rate": 4.0911072241036194e-05,
|
| 690 |
+
"loss": 1.53769073,
|
| 691 |
+
"memory(GiB)": 52.51,
|
| 692 |
+
"step": 310,
|
| 693 |
+
"train_speed(iter/s)": 0.108325
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"acc": 0.61663084,
|
| 697 |
+
"epoch": 1.1775700934579438,
|
| 698 |
+
"grad_norm": 1.7952263355255127,
|
| 699 |
+
"learning_rate": 3.9392377926805226e-05,
|
| 700 |
+
"loss": 1.44214535,
|
| 701 |
+
"memory(GiB)": 52.51,
|
| 702 |
+
"step": 315,
|
| 703 |
+
"train_speed(iter/s)": 0.108566
|
| 704 |
+
},
|
| 705 |
+
{
|
| 706 |
+
"acc": 0.59271388,
|
| 707 |
+
"epoch": 1.1962616822429906,
|
| 708 |
+
"grad_norm": 1.8852580785751343,
|
| 709 |
+
"learning_rate": 3.788386500398583e-05,
|
| 710 |
+
"loss": 1.49927893,
|
| 711 |
+
"memory(GiB)": 52.51,
|
| 712 |
+
"step": 320,
|
| 713 |
+
"train_speed(iter/s)": 0.108797
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"acc": 0.60236468,
|
| 717 |
+
"epoch": 1.2149532710280373,
|
| 718 |
+
"grad_norm": 1.737602949142456,
|
| 719 |
+
"learning_rate": 3.6386981371118355e-05,
|
| 720 |
+
"loss": 1.42521906,
|
| 721 |
+
"memory(GiB)": 52.51,
|
| 722 |
+
"step": 325,
|
| 723 |
+
"train_speed(iter/s)": 0.109019
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"acc": 0.6055068,
|
| 727 |
+
"epoch": 1.233644859813084,
|
| 728 |
+
"grad_norm": 1.914955496788025,
|
| 729 |
+
"learning_rate": 3.49031637647361e-05,
|
| 730 |
+
"loss": 1.47248116,
|
| 731 |
+
"memory(GiB)": 52.51,
|
| 732 |
+
"step": 330,
|
| 733 |
+
"train_speed(iter/s)": 0.109238
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"acc": 0.61518903,
|
| 737 |
+
"epoch": 1.2523364485981308,
|
| 738 |
+
"grad_norm": 1.7206995487213135,
|
| 739 |
+
"learning_rate": 3.343383638035902e-05,
|
| 740 |
+
"loss": 1.38390493,
|
| 741 |
+
"memory(GiB)": 52.51,
|
| 742 |
+
"step": 335,
|
| 743 |
+
"train_speed(iter/s)": 0.109447
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"acc": 0.60801978,
|
| 747 |
+
"epoch": 1.2710280373831775,
|
| 748 |
+
"grad_norm": 1.9262409210205078,
|
| 749 |
+
"learning_rate": 3.1980409505524544e-05,
|
| 750 |
+
"loss": 1.41381416,
|
| 751 |
+
"memory(GiB)": 52.51,
|
| 752 |
+
"step": 340,
|
| 753 |
+
"train_speed(iter/s)": 0.109652
|
| 754 |
+
},
|
| 755 |
+
{
|
| 756 |
+
"acc": 0.60384398,
|
| 757 |
+
"epoch": 1.2897196261682242,
|
| 758 |
+
"grad_norm": 2.144967794418335,
|
| 759 |
+
"learning_rate": 3.054427816616773e-05,
|
| 760 |
+
"loss": 1.40025005,
|
| 761 |
+
"memory(GiB)": 52.51,
|
| 762 |
+
"step": 345,
|
| 763 |
+
"train_speed(iter/s)": 0.109855
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"acc": 0.60187116,
|
| 767 |
+
"epoch": 1.308411214953271,
|
| 768 |
+
"grad_norm": 2.0876433849334717,
|
| 769 |
+
"learning_rate": 2.91268207876494e-05,
|
| 770 |
+
"loss": 1.44376688,
|
| 771 |
+
"memory(GiB)": 52.51,
|
| 772 |
+
"step": 350,
|
| 773 |
+
"train_speed(iter/s)": 0.110051
|
| 774 |
+
},
|
| 775 |
+
{
|
| 776 |
+
"epoch": 1.308411214953271,
|
| 777 |
+
"eval_acc": 0.5856950067476383,
|
| 778 |
+
"eval_loss": 1.5565516948699951,
|
| 779 |
+
"eval_runtime": 60.5775,
|
| 780 |
+
"eval_samples_per_second": 0.825,
|
| 781 |
+
"eval_steps_per_second": 0.825,
|
| 782 |
+
"step": 350
|
| 783 |
+
},
|
| 784 |
+
{
|
| 785 |
+
"acc": 0.61420636,
|
| 786 |
+
"epoch": 1.3271028037383177,
|
| 787 |
+
"grad_norm": 1.9940565824508667,
|
| 788 |
+
"learning_rate": 2.7729397871718304e-05,
|
| 789 |
+
"loss": 1.40987692,
|
| 790 |
+
"memory(GiB)": 52.51,
|
| 791 |
+
"step": 355,
|
| 792 |
+
"train_speed(iter/s)": 0.108178
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"acc": 0.59907641,
|
| 796 |
+
"epoch": 1.3457943925233644,
|
| 797 |
+
"grad_norm": 1.9915255308151245,
|
| 798 |
+
"learning_rate": 2.635335069067617e-05,
|
| 799 |
+
"loss": 1.44835072,
|
| 800 |
+
"memory(GiB)": 52.51,
|
| 801 |
+
"step": 360,
|
| 802 |
+
"train_speed(iter/s)": 0.108387
|
| 803 |
+
},
|
| 804 |
+
{
|
| 805 |
+
"acc": 0.62042379,
|
| 806 |
+
"epoch": 1.3644859813084111,
|
| 807 |
+
"grad_norm": 2.130258798599243,
|
| 808 |
+
"learning_rate": 2.500000000000001e-05,
|
| 809 |
+
"loss": 1.41162367,
|
| 810 |
+
"memory(GiB)": 52.51,
|
| 811 |
+
"step": 365,
|
| 812 |
+
"train_speed(iter/s)": 0.108589
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"acc": 0.6139565,
|
| 816 |
+
"epoch": 1.3831775700934579,
|
| 817 |
+
"grad_norm": 1.8815335035324097,
|
| 818 |
+
"learning_rate": 2.367064477065652e-05,
|
| 819 |
+
"loss": 1.41061649,
|
| 820 |
+
"memory(GiB)": 52.51,
|
| 821 |
+
"step": 370,
|
| 822 |
+
"train_speed(iter/s)": 0.108788
|
| 823 |
+
},
|
| 824 |
+
{
|
| 825 |
+
"acc": 0.60995245,
|
| 826 |
+
"epoch": 1.4018691588785046,
|
| 827 |
+
"grad_norm": 2.237551689147949,
|
| 828 |
+
"learning_rate": 2.2366560942325832e-05,
|
| 829 |
+
"loss": 1.41165752,
|
| 830 |
+
"memory(GiB)": 52.51,
|
| 831 |
+
"step": 375,
|
| 832 |
+
"train_speed(iter/s)": 0.108982
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"acc": 0.61310611,
|
| 836 |
+
"epoch": 1.4205607476635513,
|
| 837 |
+
"grad_norm": 2.2738187313079834,
|
| 838 |
+
"learning_rate": 2.108900019873103e-05,
|
| 839 |
+
"loss": 1.46329918,
|
| 840 |
+
"memory(GiB)": 52.51,
|
| 841 |
+
"step": 380,
|
| 842 |
+
"train_speed(iter/s)": 0.109169
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"acc": 0.61972389,
|
| 846 |
+
"epoch": 1.439252336448598,
|
| 847 |
+
"grad_norm": 2.050431966781616,
|
| 848 |
+
"learning_rate": 1.983918876624902e-05,
|
| 849 |
+
"loss": 1.39380827,
|
| 850 |
+
"memory(GiB)": 52.51,
|
| 851 |
+
"step": 385,
|
| 852 |
+
"train_speed(iter/s)": 0.109353
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"acc": 0.60818005,
|
| 856 |
+
"epoch": 1.4579439252336448,
|
| 857 |
+
"grad_norm": 2.2794229984283447,
|
| 858 |
+
"learning_rate": 1.8618326236955907e-05,
|
| 859 |
+
"loss": 1.46415033,
|
| 860 |
+
"memory(GiB)": 52.51,
|
| 861 |
+
"step": 390,
|
| 862 |
+
"train_speed(iter/s)": 0.109532
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"acc": 0.59707479,
|
| 866 |
+
"epoch": 1.4766355140186915,
|
| 867 |
+
"grad_norm": 2.2006595134735107,
|
| 868 |
+
"learning_rate": 1.7427584417236194e-05,
|
| 869 |
+
"loss": 1.49114666,
|
| 870 |
+
"memory(GiB)": 52.51,
|
| 871 |
+
"step": 395,
|
| 872 |
+
"train_speed(iter/s)": 0.109705
|
| 873 |
+
},
|
| 874 |
+
{
|
| 875 |
+
"acc": 0.6111486,
|
| 876 |
+
"epoch": 1.4953271028037383,
|
| 877 |
+
"grad_norm": 2.0496108531951904,
|
| 878 |
+
"learning_rate": 1.626810620306163e-05,
|
| 879 |
+
"loss": 1.38812447,
|
| 880 |
+
"memory(GiB)": 52.51,
|
| 881 |
+
"step": 400,
|
| 882 |
+
"train_speed(iter/s)": 0.109877
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"epoch": 1.4953271028037383,
|
| 886 |
+
"eval_acc": 0.5873444294496926,
|
| 887 |
+
"eval_loss": 1.5544381141662598,
|
| 888 |
+
"eval_runtime": 60.5613,
|
| 889 |
+
"eval_samples_per_second": 0.826,
|
| 890 |
+
"eval_steps_per_second": 0.826,
|
| 891 |
+
"step": 400
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"acc": 0.60279655,
|
| 895 |
+
"epoch": 1.514018691588785,
|
| 896 |
+
"grad_norm": 1.954108476638794,
|
| 897 |
+
"learning_rate": 1.5141004483018323e-05,
|
| 898 |
+
"loss": 1.44826994,
|
| 899 |
+
"memory(GiB)": 52.51,
|
| 900 |
+
"step": 405,
|
| 901 |
+
"train_speed(iter/s)": 0.108237
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"acc": 0.60491271,
|
| 905 |
+
"epoch": 1.5327102803738317,
|
| 906 |
+
"grad_norm": 2.4498937129974365,
|
| 907 |
+
"learning_rate": 1.4047361070135995e-05,
|
| 908 |
+
"loss": 1.4636652,
|
| 909 |
+
"memory(GiB)": 52.51,
|
| 910 |
+
"step": 410,
|
| 911 |
+
"train_speed(iter/s)": 0.108423
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"acc": 0.59805059,
|
| 915 |
+
"epoch": 1.5514018691588785,
|
| 916 |
+
"grad_norm": 1.9891496896743774,
|
| 917 |
+
"learning_rate": 1.2988225663543602e-05,
|
| 918 |
+
"loss": 1.51361618,
|
| 919 |
+
"memory(GiB)": 52.51,
|
| 920 |
+
"step": 415,
|
| 921 |
+
"train_speed(iter/s)": 0.108601
|
| 922 |
+
},
|
| 923 |
+
{
|
| 924 |
+
"acc": 0.60604153,
|
| 925 |
+
"epoch": 1.5700934579439252,
|
| 926 |
+
"grad_norm": 2.281243324279785,
|
| 927 |
+
"learning_rate": 1.1964614840949002e-05,
|
| 928 |
+
"loss": 1.43464155,
|
| 929 |
+
"memory(GiB)": 52.51,
|
| 930 |
+
"step": 420,
|
| 931 |
+
"train_speed(iter/s)": 0.108777
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"acc": 0.59663863,
|
| 935 |
+
"epoch": 1.588785046728972,
|
| 936 |
+
"grad_norm": 2.1692161560058594,
|
| 937 |
+
"learning_rate": 1.097751108290867e-05,
|
| 938 |
+
"loss": 1.47755518,
|
| 939 |
+
"memory(GiB)": 52.51,
|
| 940 |
+
"step": 425,
|
| 941 |
+
"train_speed(iter/s)": 0.108947
|
| 942 |
+
},
|
| 943 |
+
{
|
| 944 |
+
"acc": 0.62566915,
|
| 945 |
+
"epoch": 1.6074766355140186,
|
| 946 |
+
"grad_norm": 2.370448112487793,
|
| 947 |
+
"learning_rate": 1.0027861829824952e-05,
|
| 948 |
+
"loss": 1.36240664,
|
| 949 |
+
"memory(GiB)": 52.51,
|
| 950 |
+
"step": 430,
|
| 951 |
+
"train_speed(iter/s)": 0.109117
|
| 952 |
+
},
|
| 953 |
+
{
|
| 954 |
+
"acc": 0.60366473,
|
| 955 |
+
"epoch": 1.6261682242990654,
|
| 956 |
+
"grad_norm": 2.143240451812744,
|
| 957 |
+
"learning_rate": 9.11657857257509e-06,
|
| 958 |
+
"loss": 1.49398394,
|
| 959 |
+
"memory(GiB)": 52.51,
|
| 960 |
+
"step": 435,
|
| 961 |
+
"train_speed(iter/s)": 0.109226
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"acc": 0.60729022,
|
| 965 |
+
"epoch": 1.644859813084112,
|
| 966 |
+
"grad_norm": 2.266324758529663,
|
| 967 |
+
"learning_rate": 8.244535977645585e-06,
|
| 968 |
+
"loss": 1.4582058,
|
| 969 |
+
"memory(GiB)": 52.51,
|
| 970 |
+
"step": 440,
|
| 971 |
+
"train_speed(iter/s)": 0.109388
|
| 972 |
+
},
|
| 973 |
+
{
|
| 974 |
+
"acc": 0.61000395,
|
| 975 |
+
"epoch": 1.6635514018691588,
|
| 976 |
+
"grad_norm": 2.243384599685669,
|
| 977 |
+
"learning_rate": 7.412571047611155e-06,
|
| 978 |
+
"loss": 1.39406261,
|
| 979 |
+
"memory(GiB)": 52.51,
|
| 980 |
+
"step": 445,
|
| 981 |
+
"train_speed(iter/s)": 0.109547
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"acc": 0.60550241,
|
| 985 |
+
"epoch": 1.6822429906542056,
|
| 986 |
+
"grad_norm": 2.3402411937713623,
|
| 987 |
+
"learning_rate": 6.621482317764105e-06,
|
| 988 |
+
"loss": 1.44629755,
|
| 989 |
+
"memory(GiB)": 52.51,
|
| 990 |
+
"step": 450,
|
| 991 |
+
"train_speed(iter/s)": 0.109702
|
| 992 |
+
},
|
| 993 |
+
{
|
| 994 |
+
"epoch": 1.6822429906542056,
|
| 995 |
+
"eval_acc": 0.5838956365272154,
|
| 996 |
+
"eval_loss": 1.5518497228622437,
|
| 997 |
+
"eval_runtime": 60.463,
|
| 998 |
+
"eval_samples_per_second": 0.827,
|
| 999 |
+
"eval_steps_per_second": 0.827,
|
| 1000 |
+
"step": 450
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"acc": 0.62354083,
|
| 1004 |
+
"epoch": 1.7009345794392523,
|
| 1005 |
+
"grad_norm": 2.3499748706817627,
|
| 1006 |
+
"learning_rate": 5.872029089665587e-06,
|
| 1007 |
+
"loss": 1.36534414,
|
| 1008 |
+
"memory(GiB)": 52.51,
|
| 1009 |
+
"step": 455,
|
| 1010 |
+
"train_speed(iter/s)": 0.108251
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"acc": 0.60730128,
|
| 1014 |
+
"epoch": 1.719626168224299,
|
| 1015 |
+
"grad_norm": 2.479720115661621,
|
| 1016 |
+
"learning_rate": 5.164930702353782e-06,
|
| 1017 |
+
"loss": 1.44677553,
|
| 1018 |
+
"memory(GiB)": 52.51,
|
| 1019 |
+
"step": 460,
|
| 1020 |
+
"train_speed(iter/s)": 0.108417
|
| 1021 |
+
},
|
| 1022 |
+
{
|
| 1023 |
+
"acc": 0.59804258,
|
| 1024 |
+
"epoch": 1.7383177570093458,
|
| 1025 |
+
"grad_norm": 2.117152214050293,
|
| 1026 |
+
"learning_rate": 4.500865841909168e-06,
|
| 1027 |
+
"loss": 1.46659861,
|
| 1028 |
+
"memory(GiB)": 52.51,
|
| 1029 |
+
"step": 465,
|
| 1030 |
+
"train_speed(iter/s)": 0.108577
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"acc": 0.60334945,
|
| 1034 |
+
"epoch": 1.7570093457943925,
|
| 1035 |
+
"grad_norm": 2.2500483989715576,
|
| 1036 |
+
"learning_rate": 3.880471890038967e-06,
|
| 1037 |
+
"loss": 1.4467123,
|
| 1038 |
+
"memory(GiB)": 52.51,
|
| 1039 |
+
"step": 470,
|
| 1040 |
+
"train_speed(iter/s)": 0.108736
|
| 1041 |
+
},
|
| 1042 |
+
{
|
| 1043 |
+
"acc": 0.60877209,
|
| 1044 |
+
"epoch": 1.7757009345794392,
|
| 1045 |
+
"grad_norm": 2.166339635848999,
|
| 1046 |
+
"learning_rate": 3.3043443123065286e-06,
|
| 1047 |
+
"loss": 1.49398079,
|
| 1048 |
+
"memory(GiB)": 52.51,
|
| 1049 |
+
"step": 475,
|
| 1050 |
+
"train_speed(iter/s)": 0.108888
|
| 1051 |
+
},
|
| 1052 |
+
{
|
| 1053 |
+
"acc": 0.59179163,
|
| 1054 |
+
"epoch": 1.794392523364486,
|
| 1055 |
+
"grad_norm": 2.554819107055664,
|
| 1056 |
+
"learning_rate": 2.7730360865923956e-06,
|
| 1057 |
+
"loss": 1.47536173,
|
| 1058 |
+
"memory(GiB)": 52.51,
|
| 1059 |
+
"step": 480,
|
| 1060 |
+
"train_speed(iter/s)": 0.109034
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"acc": 0.58686528,
|
| 1064 |
+
"epoch": 1.8130841121495327,
|
| 1065 |
+
"grad_norm": 2.176454544067383,
|
| 1066 |
+
"learning_rate": 2.287057172336021e-06,
|
| 1067 |
+
"loss": 1.51853113,
|
| 1068 |
+
"memory(GiB)": 52.51,
|
| 1069 |
+
"step": 485,
|
| 1070 |
+
"train_speed(iter/s)": 0.10918
|
| 1071 |
+
},
|
| 1072 |
+
{
|
| 1073 |
+
"acc": 0.61157169,
|
| 1074 |
+
"epoch": 1.8317757009345794,
|
| 1075 |
+
"grad_norm": 2.2419204711914062,
|
| 1076 |
+
"learning_rate": 1.8468740210672076e-06,
|
| 1077 |
+
"loss": 1.45838099,
|
| 1078 |
+
"memory(GiB)": 52.51,
|
| 1079 |
+
"step": 490,
|
| 1080 |
+
"train_speed(iter/s)": 0.109326
|
| 1081 |
+
},
|
| 1082 |
+
{
|
| 1083 |
+
"acc": 0.604812,
|
| 1084 |
+
"epoch": 1.8504672897196262,
|
| 1085 |
+
"grad_norm": 2.1367015838623047,
|
| 1086 |
+
"learning_rate": 1.4529091286973995e-06,
|
| 1087 |
+
"loss": 1.42373133,
|
| 1088 |
+
"memory(GiB)": 52.51,
|
| 1089 |
+
"step": 495,
|
| 1090 |
+
"train_speed(iter/s)": 0.10947
|
| 1091 |
+
},
|
| 1092 |
+
{
|
| 1093 |
+
"acc": 0.59049854,
|
| 1094 |
+
"epoch": 1.8691588785046729,
|
| 1095 |
+
"grad_norm": 2.212156057357788,
|
| 1096 |
+
"learning_rate": 1.1055406300002347e-06,
|
| 1097 |
+
"loss": 1.47500782,
|
| 1098 |
+
"memory(GiB)": 52.51,
|
| 1099 |
+
"step": 500,
|
| 1100 |
+
"train_speed(iter/s)": 0.10961
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"epoch": 1.8691588785046729,
|
| 1104 |
+
"eval_acc": 0.5858449542660069,
|
| 1105 |
+
"eval_loss": 1.55006742477417,
|
| 1106 |
+
"eval_runtime": 60.5418,
|
| 1107 |
+
"eval_samples_per_second": 0.826,
|
| 1108 |
+
"eval_steps_per_second": 0.826,
|
| 1109 |
+
"step": 500
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"acc": 0.61764479,
|
| 1113 |
+
"epoch": 1.8878504672897196,
|
| 1114 |
+
"grad_norm": 2.375039577484131,
|
| 1115 |
+
"learning_rate": 8.0510193567086e-07,
|
| 1116 |
+
"loss": 1.4303463,
|
| 1117 |
+
"memory(GiB)": 52.51,
|
| 1118 |
+
"step": 505,
|
| 1119 |
+
"train_speed(iter/s)": 0.108303
|
| 1120 |
+
},
|
| 1121 |
+
{
|
| 1122 |
+
"acc": 0.60544062,
|
| 1123 |
+
"epoch": 1.9065420560747663,
|
| 1124 |
+
"grad_norm": 2.1975295543670654,
|
| 1125 |
+
"learning_rate": 5.518814123121885e-07,
|
| 1126 |
+
"loss": 1.48970194,
|
| 1127 |
+
"memory(GiB)": 52.51,
|
| 1128 |
+
"step": 510,
|
| 1129 |
+
"train_speed(iter/s)": 0.108451
|
| 1130 |
+
},
|
| 1131 |
+
{
|
| 1132 |
+
"acc": 0.60760684,
|
| 1133 |
+
"epoch": 1.925233644859813,
|
| 1134 |
+
"grad_norm": 2.173210859298706,
|
| 1135 |
+
"learning_rate": 3.4612210565528326e-07,
|
| 1136 |
+
"loss": 1.43905754,
|
| 1137 |
+
"memory(GiB)": 52.51,
|
| 1138 |
+
"step": 515,
|
| 1139 |
+
"train_speed(iter/s)": 0.108595
|
| 1140 |
+
},
|
| 1141 |
+
{
|
| 1142 |
+
"acc": 0.61625342,
|
| 1143 |
+
"epoch": 1.9439252336448598,
|
| 1144 |
+
"grad_norm": 2.543931245803833,
|
| 1145 |
+
"learning_rate": 1.8802150727962876e-07,
|
| 1146 |
+
"loss": 1.40175552,
|
| 1147 |
+
"memory(GiB)": 52.51,
|
| 1148 |
+
"step": 520,
|
| 1149 |
+
"train_speed(iter/s)": 0.108738
|
| 1150 |
+
},
|
| 1151 |
+
{
|
| 1152 |
+
"acc": 0.61394835,
|
| 1153 |
+
"epoch": 1.9626168224299065,
|
| 1154 |
+
"grad_norm": 2.0409328937530518,
|
| 1155 |
+
"learning_rate": 7.773136505700995e-08,
|
| 1156 |
+
"loss": 1.36281643,
|
| 1157 |
+
"memory(GiB)": 52.51,
|
| 1158 |
+
"step": 525,
|
| 1159 |
+
"train_speed(iter/s)": 0.108834
|
| 1160 |
+
},
|
| 1161 |
+
{
|
| 1162 |
+
"acc": 0.60506306,
|
| 1163 |
+
"epoch": 1.9813084112149533,
|
| 1164 |
+
"grad_norm": 2.187635898590088,
|
| 1165 |
+
"learning_rate": 1.5357537501159423e-08,
|
| 1166 |
+
"loss": 1.45838461,
|
| 1167 |
+
"memory(GiB)": 52.51,
|
| 1168 |
+
"step": 530,
|
| 1169 |
+
"train_speed(iter/s)": 0.10897
|
| 1170 |
+
},
|
| 1171 |
+
{
|
| 1172 |
+
"epoch": 1.9962616822429906,
|
| 1173 |
+
"eval_acc": 0.5853951117109012,
|
| 1174 |
+
"eval_loss": 1.550318956375122,
|
| 1175 |
+
"eval_runtime": 60.5393,
|
| 1176 |
+
"eval_samples_per_second": 0.826,
|
| 1177 |
+
"eval_steps_per_second": 0.826,
|
| 1178 |
+
"step": 534
|
| 1179 |
+
}
|
| 1180 |
+
],
|
| 1181 |
+
"logging_steps": 5,
|
| 1182 |
+
"max_steps": 534,
|
| 1183 |
+
"num_input_tokens_seen": 0,
|
| 1184 |
+
"num_train_epochs": 2,
|
| 1185 |
+
"save_steps": 50,
|
| 1186 |
+
"stateful_callbacks": {
|
| 1187 |
+
"TrainerControl": {
|
| 1188 |
+
"args": {
|
| 1189 |
+
"should_epoch_stop": false,
|
| 1190 |
+
"should_evaluate": false,
|
| 1191 |
+
"should_log": false,
|
| 1192 |
+
"should_save": true,
|
| 1193 |
+
"should_training_stop": true
|
| 1194 |
+
},
|
| 1195 |
+
"attributes": {}
|
| 1196 |
+
}
|
| 1197 |
+
},
|
| 1198 |
+
"total_flos": 2.3344607126351155e+17,
|
| 1199 |
+
"train_batch_size": 1,
|
| 1200 |
+
"trial_name": null,
|
| 1201 |
+
"trial_params": null
|
| 1202 |
+
}
|
qwen2-vl-7b-instruct/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d6094c64b2cfee66d8dd4d404b6e3d87c884814eed3dac6a3df1297447208dc
|
| 3 |
+
size 7416
|