OfficerChul commited on
Commit
28fc6cf
·
verified ·
1 Parent(s): 21ce0db

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
Modelfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ollama modelfile auto-generated by llamafactory
2
+
3
+ FROM .
4
+
5
+ TEMPLATE """<bos>{{ if .System }}{{ .System }}
6
+
7
+ {{ end }}{{ range .Messages }}{{ if eq .Role "user" }}<start_of_turn>user
8
+ {{ .Content }}<end_of_turn>
9
+ <start_of_turn>model
10
+ {{ else if eq .Role "assistant" }}{{ .Content }}<end_of_turn>
11
+ {{ end }}{{ end }}"""
12
+
13
+ PARAMETER stop "<end_of_turn>"
14
+ PARAMETER num_ctx 4096
README.md CHANGED
@@ -1,3 +1,85 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Gemma-3n-E2B-it Android Control LoRA Fine-tuned Model
2
+
3
+ ## Model Overview
4
+ This model is a fine-tuned version of Google's `gemma-3n-E2B-it` base model with LoRA adaptation for Android UI control tasks.
5
+
6
+ ## Training Data
7
+ - **Dataset**: [OfficerChul/gemma-3n-E2B-it-Antroid-Control-84k](https://huggingface.co/OfficerChul/gemma-3n-E2B-it-Antroid-Control-84k)
8
+ - **Data Format**: Mobile UI screenshots paired with user instructions to perform appropriate actions (click, scroll, input, etc.)
9
+
10
+ ### Training Data Format Example
11
+ ```json
12
+ {
13
+ "messages": [
14
+ {
15
+ "role": "system",
16
+ "content": "You are a helpful assistant that can identify what action to perform on mobile UI Screenshot given the user instruction."
17
+ },
18
+ {
19
+ "role": "user",
20
+ "content": "<image>Click on the Recording 2"
21
+ },
22
+ {
23
+ "role": "assistant",
24
+ "content": "{\"action_type\": \"click\", \"x\": 561, \"y\": 535}"
25
+ }
26
+ ],
27
+ "images": ["and_ctrl/out_episode_18557_step_001.png"]
28
+ }
29
+ ```
30
+
31
+ ## Training Method
32
+ LoRA fine-tuning performed using [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory) framework
33
+
34
+ ### 1. Training Configuration (`gemma3n-e2b-it.yaml`)
35
+ - **Base Model**: `google/gemma-3n-E2B-it`
36
+ - **Training Method**: LoRA (Low-Rank Adaptation)
37
+ - **LoRA Configuration**:
38
+ - Rank: 32
39
+ - Target modules: `q_proj, k_proj, v_proj, o_proj`
40
+ - **Training Parameters**:
41
+ - Batch size: 4 (gradient accumulation: 48)
42
+ - Learning rate: 2e-5
43
+ - Epochs: 5
44
+ - LR scheduler: Cosine
45
+ - Optimizer: AdamW (fused)
46
+ - Precision: bf16
47
+ - **Additional Settings**:
48
+ - Gradient checkpointing enabled
49
+ - Vision tower, multi-modal projector, and language model all trainable
50
+ - DeepSpeed ZeRO-2 utilized
51
+
52
+ ### 2. Model Merging (`gemma3n-e2b-it_lora_sft_merge.yaml`)
53
+ Merged trained LoRA adapter with base model:
54
+ - **Base Model**: `google/gemma-3n-E2B-it`
55
+
56
+ ## Supported Action Types
57
+ - `click`: Click on specific coordinates
58
+ - `long_press`: Long press action
59
+ - `scroll`: Scroll (up/down)
60
+ - `input_text`: Text input
61
+ - `navigate_back`: Navigate back
62
+ - `navigate_home`: Navigate to home screen
63
+ - `open_app`: Open application
64
+ - `wait`: Wait action
65
+
66
+ ## Usage
67
+ The merged model can be directly loaded using the Hugging Face Transformers library.
68
+
69
+ ```python
70
+ from transformers import AutoModelForCausalLM, AutoTokenizer
71
+
72
+ model_path = "output/gemma-e2b-lora-sft"
73
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
74
+ model = AutoModelForCausalLM.from_pretrained(
75
+ model_path,
76
+ trust_remote_code=True
77
+ )
78
+ ```
79
+
80
+ ## License
81
+ Follows the license terms of the Google Gemma model.
82
+
83
+ ## Notes
84
+ - This model was developed for research purposes in mobile UI automation and accessibility enhancement
85
+ - Proper validation is required when using in production environments
chat_template.jinja ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'audio' -%}
33
+ {{ '<audio_soft_token>' }}
34
+ {%- elif item['type'] == 'image' -%}
35
+ {{ '<image_soft_token>' }}
36
+ {%- elif item['type'] == 'text' -%}
37
+ {{ item['text'] | trim }}
38
+ {%- endif -%}
39
+ {%- endfor -%}
40
+ {%- else -%}
41
+ {{ raise_exception("Invalid content type") }}
42
+ {%- endif -%}
43
+ {{ '<end_of_turn>
44
+ ' }}
45
+ {%- endfor -%}
46
+ {%- if add_generation_prompt -%}
47
+ {{'<start_of_turn>model
48
+ '}}
49
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3nForConditionalGeneration"
4
+ ],
5
+ "audio_config": {
6
+ "conf_attention_chunk_size": 12,
7
+ "conf_attention_context_left": 13,
8
+ "conf_attention_context_right": 0,
9
+ "conf_attention_logit_cap": 50.0,
10
+ "conf_conv_kernel_size": 5,
11
+ "conf_num_attention_heads": 8,
12
+ "conf_num_hidden_layers": 12,
13
+ "conf_reduction_factor": 4,
14
+ "conf_residual_weight": 0.5,
15
+ "dtype": "bfloat16",
16
+ "gradient_clipping": 10000000000.0,
17
+ "hidden_size": 1536,
18
+ "input_feat_size": 128,
19
+ "model_type": "gemma3n_audio",
20
+ "rms_norm_eps": 1e-06,
21
+ "sscp_conv_channel_size": [
22
+ 128,
23
+ 32
24
+ ],
25
+ "sscp_conv_group_norm_eps": 0.001,
26
+ "sscp_conv_kernel_size": [
27
+ [
28
+ 3,
29
+ 3
30
+ ],
31
+ [
32
+ 3,
33
+ 3
34
+ ]
35
+ ],
36
+ "sscp_conv_stride_size": [
37
+ [
38
+ 2,
39
+ 2
40
+ ],
41
+ [
42
+ 2,
43
+ 2
44
+ ]
45
+ ],
46
+ "vocab_offset": 262272,
47
+ "vocab_size": 128
48
+ },
49
+ "audio_soft_tokens_per_image": 188,
50
+ "audio_token_id": 262273,
51
+ "boa_token_id": 256000,
52
+ "boi_token_id": 255999,
53
+ "dtype": "bfloat16",
54
+ "eoa_token_id": 262272,
55
+ "eoi_token_id": 262144,
56
+ "eos_token_id": [
57
+ 1,
58
+ 106
59
+ ],
60
+ "hidden_size": 2048,
61
+ "image_token_id": 262145,
62
+ "initializer_range": 0.02,
63
+ "model_type": "gemma3n",
64
+ "text_config": {
65
+ "activation_sparsity_pattern": [
66
+ 0.95,
67
+ 0.95,
68
+ 0.95,
69
+ 0.95,
70
+ 0.95,
71
+ 0.95,
72
+ 0.95,
73
+ 0.95,
74
+ 0.95,
75
+ 0.95,
76
+ 0.0,
77
+ 0.0,
78
+ 0.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0,
84
+ 0.0,
85
+ 0.0,
86
+ 0.0,
87
+ 0.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 0.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0
96
+ ],
97
+ "altup_active_idx": 0,
98
+ "altup_coef_clip": 120.0,
99
+ "altup_correct_scale": true,
100
+ "altup_num_inputs": 4,
101
+ "attention_bias": false,
102
+ "attention_dropout": 0.0,
103
+ "dtype": "bfloat16",
104
+ "final_logit_softcapping": 30.0,
105
+ "head_dim": 256,
106
+ "hidden_activation": "gelu_pytorch_tanh",
107
+ "hidden_size": 2048,
108
+ "hidden_size_per_layer_input": 256,
109
+ "initializer_range": 0.02,
110
+ "intermediate_size": [
111
+ 8192,
112
+ 8192,
113
+ 8192,
114
+ 8192,
115
+ 8192,
116
+ 8192,
117
+ 8192,
118
+ 8192,
119
+ 8192,
120
+ 8192,
121
+ 8192,
122
+ 8192,
123
+ 8192,
124
+ 8192,
125
+ 8192,
126
+ 8192,
127
+ 8192,
128
+ 8192,
129
+ 8192,
130
+ 8192,
131
+ 8192,
132
+ 8192,
133
+ 8192,
134
+ 8192,
135
+ 8192,
136
+ 8192,
137
+ 8192,
138
+ 8192,
139
+ 8192,
140
+ 8192
141
+ ],
142
+ "laurel_rank": 64,
143
+ "layer_types": [
144
+ "sliding_attention",
145
+ "sliding_attention",
146
+ "sliding_attention",
147
+ "sliding_attention",
148
+ "full_attention",
149
+ "sliding_attention",
150
+ "sliding_attention",
151
+ "sliding_attention",
152
+ "sliding_attention",
153
+ "full_attention",
154
+ "sliding_attention",
155
+ "sliding_attention",
156
+ "sliding_attention",
157
+ "sliding_attention",
158
+ "full_attention",
159
+ "sliding_attention",
160
+ "sliding_attention",
161
+ "sliding_attention",
162
+ "sliding_attention",
163
+ "full_attention",
164
+ "sliding_attention",
165
+ "sliding_attention",
166
+ "sliding_attention",
167
+ "sliding_attention",
168
+ "full_attention",
169
+ "sliding_attention",
170
+ "sliding_attention",
171
+ "sliding_attention",
172
+ "sliding_attention",
173
+ "full_attention"
174
+ ],
175
+ "max_position_embeddings": 32768,
176
+ "model_type": "gemma3n_text",
177
+ "num_attention_heads": 8,
178
+ "num_hidden_layers": 30,
179
+ "num_key_value_heads": 2,
180
+ "num_kv_shared_layers": 10,
181
+ "rms_norm_eps": 1e-06,
182
+ "rope_local_base_freq": 10000.0,
183
+ "rope_scaling": null,
184
+ "rope_theta": 1000000.0,
185
+ "sliding_window": 512,
186
+ "use_cache": true,
187
+ "vocab_size": 262400,
188
+ "vocab_size_per_layer_input": 262144
189
+ },
190
+ "transformers_version": "4.57.0",
191
+ "use_cache": true,
192
+ "vision_config": {
193
+ "architecture": "mobilenetv5_300m_enc",
194
+ "do_pooling": false,
195
+ "dtype": "bfloat16",
196
+ "hidden_size": 2048,
197
+ "initializer_range": 0.02,
198
+ "label_names": [
199
+ "LABEL_0",
200
+ "LABEL_1"
201
+ ],
202
+ "model_args": null,
203
+ "model_type": "gemma3n_vision",
204
+ "num_classes": 2,
205
+ "rms_norm_eps": 1e-06,
206
+ "vocab_offset": 262144,
207
+ "vocab_size": 128
208
+ },
209
+ "vision_soft_tokens_per_image": 256
210
+ }
gemma3n-e2b-it.yaml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: google/gemma-3n-E2B-it
3
+ image_max_pixels: 1048576
4
+ trust_remote_code: true
5
+
6
+ ### method
7
+ stage: sft
8
+ do_train: true
9
+ finetuning_type: lora
10
+ freeze_vision_tower: false
11
+ freeze_multi_modal_projector: false
12
+ freeze_language_model: false
13
+ lora_rank: 32
14
+ lora_target: "q_proj, k_proj, v_proj, o_proj"
15
+ deepspeed: examples/deepspeed/ds_z2_config.json
16
+
17
+ ### dataset
18
+ dataset: and_ctrl_train # video: mllm_video_demo
19
+ template: gemma3n
20
+ cutoff_len: 1024 # 2048 -> 1024 (토큰 짧으면 속도/메모리 이득)
21
+ # max_samples: 1000 # 전체 학습이면 제거/주석 처리
22
+ overwrite_cache: true
23
+ preprocessing_num_workers: 8
24
+ dataloader_num_workers: 8 # 8~16 권장, 중간값으로 상향
25
+
26
+ ### output
27
+ output_dir: saves/gemma-e2b/lora/sft/train
28
+ logging_steps: 1
29
+ save_steps: 300
30
+ plot_loss: true
31
+ overwrite_output_dir: true
32
+ save_only_model: false
33
+ report_to: wandb # choices: [none, wandb, tensorboard, swanlab, mlflow]
34
+ run_name: and_ctrl-skt-gemma-e2b-lora-sft-train
35
+
36
+ ### train
37
+ per_device_train_batch_size: 4
38
+ gradient_accumulation_steps: 48
39
+ learning_rate: 2e-5
40
+ num_train_epochs: 5.0
41
+ lr_scheduler_type: cosine
42
+ warmup_ratio: 0.1
43
+ bf16: true
44
+ ddp_timeout: 180000000
45
+ resume_from_checkpoint: null
46
+ seed: 42
47
+
48
+ # 추가 적용(요청 사항)
49
+ gradient_checkpointing: true
50
+ optim: adamw_torch_fused
51
+ weight_decay: 0.01
52
+
53
+ ### eval
54
+ val_size: 0.05
55
+ per_device_eval_batch_size: 1
56
+ eval_strategy: steps
57
+ eval_steps: 500
gemma3n-e2b-it_lora_sft_merge.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: google/gemma-3n-E2B-it
3
+ adapter_name_or_path: saves/gemma-e2b/lora/sft/train
4
+ template: gemma3n
5
+ trust_remote_code: true
6
+
7
+ ### export
8
+ export_dir: output/gemma-e2b-lora-sft
9
+ export_size: 5
10
+ export_device: auto
11
+ export_legacy_format: false
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "cache_implementation": "hybrid",
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "top_k": 64,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.57.0"
13
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a1a78f91baca05bb933d67e3dad9e643b56b5b45c3384463879c0ed59a3a8e4
3
+ size 4983256208
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af670d3e63e9afd9feb88a3f044bc9b6a046a18b232b5a054f1c912c68d21fe8
3
+ size 4992754064
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f6c2f72292cb7b0b2081dde3451cbe423e47288dd8409f851f3785c3ce38bf0
3
+ size 903075744
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": false,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "dither": 0.0,
8
+ "do_center_crop": null,
9
+ "do_convert_rgb": null,
10
+ "do_normalize": false,
11
+ "do_pad": null,
12
+ "do_rescale": true,
13
+ "do_resize": true,
14
+ "feature_size": 128,
15
+ "fft_length": 1024,
16
+ "fft_overdrive": true,
17
+ "frame_length": 512,
18
+ "hop_length": 160,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "SiglipImageProcessorFast",
25
+ "image_seq_length": 256,
26
+ "image_std": [
27
+ 0.5,
28
+ 0.5,
29
+ 0.5
30
+ ],
31
+ "input_data_format": null,
32
+ "input_scale_factor": 1.0,
33
+ "max_frequency": 7600.0,
34
+ "mel_floor": 1e-05,
35
+ "min_frequency": 125.0,
36
+ "pad_size": null,
37
+ "padding_side": "right",
38
+ "padding_value": 0.0,
39
+ "per_bin_mean": null,
40
+ "per_bin_stddev": null,
41
+ "preemphasis": 0.97,
42
+ "preemphasis_htk_flavor": true,
43
+ "processor_class": "Gemma3nProcessor",
44
+ "resample": 2,
45
+ "rescale_factor": 0.00392156862745098,
46
+ "return_attention_mask": true,
47
+ "return_tensors": null,
48
+ "sampling_rate": 16000,
49
+ "size": {
50
+ "height": 768,
51
+ "width": 768
52
+ }
53
+ }
processor_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "audio_seq_length": 188,
3
+ "image_seq_length": 256,
4
+ "processor_class": "Gemma3nProcessor"
5
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<audio_soft_token>",
3
+ "boa_token": "<start_of_audio>",
4
+ "boi_token": "<start_of_image>",
5
+ "bos_token": {
6
+ "content": "<bos>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eoa_token": "<end_of_audio>",
13
+ "eoi_token": "<end_of_image>",
14
+ "eos_token": {
15
+ "content": "<end_of_turn>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "image_token": "<image_soft_token>",
22
+ "pad_token": {
23
+ "content": "<pad>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "unk_token": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c35ee648c07754b44cd9e371c75d4caa05c4504910b7ad29b1847ee9d8ba5d
3
+ size 33442553
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea5f0cc48abfbfc04d14562270a32e02149a3e7035f368cc5a462786f4a59961
3
+ size 4696020
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff