Image-Text-to-Text
Transformers
Safetensors
GGUF
gemma3
turkish
türkiye
english
ai
lamapi
next
next-x1
efficient
text-generation
open-source
4b
huggingface
large-language-model
llm
causal
transformer
artificial-intelligence
machine-learning
ai-research
natural-language-processing
language
multilingual
multimodal
nlp
finetuned
lightweight
creative
summarization
question-answering
chat
generative-ai
optimized
unsloth
trl
sft
chemistry
code
biology
finance
legal
music
art
state-of-the-art
climate
medical
agent
text-generation-inference
Merge
dense
conversational
Upload 3 files
Browse files- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +3 -3
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
|
| 3 |
+
size 33384568
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
|
| 3 |
+
size 4689074
|
tokenizer_config.json
CHANGED
|
@@ -51325,18 +51325,18 @@
|
|
| 51325 |
},
|
| 51326 |
"boi_token": "<start_of_image>",
|
| 51327 |
"bos_token": "<bos>",
|
|
|
|
| 51328 |
"clean_up_tokenization_spaces": false,
|
| 51329 |
"eoi_token": "<end_of_image>",
|
| 51330 |
-
"eos_token": "<
|
| 51331 |
"extra_special_tokens": {
|
| 51332 |
"boi_token": "<start_of_image>",
|
| 51333 |
"eoi_token": "<end_of_image>",
|
| 51334 |
"image_token": "<image_soft_token>"
|
| 51335 |
},
|
| 51336 |
"image_token": "<image_soft_token>",
|
| 51337 |
-
"model_max_length":
|
| 51338 |
"pad_token": "<pad>",
|
| 51339 |
-
"padding_side": "right",
|
| 51340 |
"processor_class": "Gemma3Processor",
|
| 51341 |
"sp_model_kwargs": null,
|
| 51342 |
"spaces_between_special_tokens": false,
|
|
|
|
| 51325 |
},
|
| 51326 |
"boi_token": "<start_of_image>",
|
| 51327 |
"bos_token": "<bos>",
|
| 51328 |
+
"chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
|
| 51329 |
"clean_up_tokenization_spaces": false,
|
| 51330 |
"eoi_token": "<end_of_image>",
|
| 51331 |
+
"eos_token": "<eos>",
|
| 51332 |
"extra_special_tokens": {
|
| 51333 |
"boi_token": "<start_of_image>",
|
| 51334 |
"eoi_token": "<end_of_image>",
|
| 51335 |
"image_token": "<image_soft_token>"
|
| 51336 |
},
|
| 51337 |
"image_token": "<image_soft_token>",
|
| 51338 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 51339 |
"pad_token": "<pad>",
|
|
|
|
| 51340 |
"processor_class": "Gemma3Processor",
|
| 51341 |
"sp_model_kwargs": null,
|
| 51342 |
"spaces_between_special_tokens": false,
|