Wajdi Ghezaiel
commited on
Update README.md
Browse files
README.md
CHANGED
|
@@ -10,7 +10,7 @@ tags:
|
|
| 10 |
- llama
|
| 11 |
- trl
|
| 12 |
datasets:
|
| 13 |
-
-
|
| 14 |
library_name: transformers
|
| 15 |
---
|
| 16 |
## Model Overview
|
|
@@ -35,7 +35,7 @@ max_seq_length = 128 # Choose any! We auto support RoPE Scaling internally!
|
|
| 35 |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
| 36 |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
| 37 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 38 |
-
model_name = "
|
| 39 |
max_seq_length = max_seq_length,
|
| 40 |
dtype = dtype,
|
| 41 |
load_in_4bit = load_in_4bit,
|
|
|
|
| 10 |
- llama
|
| 11 |
- trl
|
| 12 |
datasets:
|
| 13 |
+
- linagora/Tunisian_Derja_Dataset
|
| 14 |
library_name: transformers
|
| 15 |
---
|
| 16 |
## Model Overview
|
|
|
|
| 35 |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
| 36 |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
| 37 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 38 |
+
model_name = "linagora/Labess-7b-chat-v1",
|
| 39 |
max_seq_length = max_seq_length,
|
| 40 |
dtype = dtype,
|
| 41 |
load_in_4bit = load_in_4bit,
|