spamacc commited on
Commit
2c97e39
·
verified ·
1 Parent(s): b7cf15a

End of training

Browse files
README.md CHANGED
@@ -14,8 +14,6 @@ should probably proofread and complete it, then remove this comment. -->
14
  # t5base-fine-tuned
15
 
16
  This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the None dataset.
17
- It achieves the following results on the evaluation set:
18
- - Loss: 0.8941
19
 
20
  ## Model description
21
 
@@ -35,8 +33,8 @@ More information needed
35
 
36
  The following hyperparameters were used during training:
37
  - learning_rate: 2e-05
38
- - train_batch_size: 6
39
- - eval_batch_size: 6
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
@@ -45,15 +43,11 @@ The following hyperparameters were used during training:
45
 
46
  ### Training results
47
 
48
- | Training Loss | Epoch | Step | Validation Loss |
49
- |:-------------:|:-----:|:-----:|:---------------:|
50
- | 0.7534 | 0.86 | 10000 | 0.6521 |
51
- | 0.9127 | 1.72 | 20000 | 0.8941 |
52
 
53
 
54
  ### Framework versions
55
 
56
- - Transformers 4.37.0
57
  - Pytorch 2.1.2
58
  - Datasets 2.1.0
59
- - Tokenizers 0.15.1
 
14
  # t5base-fine-tuned
15
 
16
  This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the None dataset.
 
 
17
 
18
  ## Model description
19
 
 
33
 
34
  The following hyperparameters were used during training:
35
  - learning_rate: 2e-05
36
+ - train_batch_size: 2
37
+ - eval_batch_size: 2
38
  - seed: 42
39
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
  - lr_scheduler_type: linear
 
43
 
44
  ### Training results
45
 
 
 
 
 
46
 
47
 
48
  ### Framework versions
49
 
50
+ - Transformers 4.38.2
51
  - Pytorch 2.1.2
52
  - Datasets 2.1.0
53
+ - Tokenizers 0.15.2
config.json CHANGED
@@ -55,7 +55,7 @@
55
  }
56
  },
57
  "torch_dtype": "float32",
58
- "transformers_version": "4.37.0",
59
  "use_cache": true,
60
  "vocab_size": 32128
61
  }
 
55
  }
56
  },
57
  "torch_dtype": "float32",
58
+ "transformers_version": "4.38.2",
59
  "use_cache": true,
60
  "vocab_size": 32128
61
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
- "transformers_version": "4.37.0"
7
  }
 
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
+ "transformers_version": "4.38.2"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:21c743a9551de0d8498e60c57de9217b98e3a45f7af2c2ae696377d6616110e3
3
  size 891644712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fabb244f1f9bb3bc93cc6d45cfaba9c7fae9d1bb5f81d4400ce25c80a744c213
3
  size 891644712
runs/Apr05_20-39-18_00b1e96a9200/events.out.tfevents.1712349562.00b1e96a9200.34.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b6805d17455faeafac3275399850f1980c8f59d33e225f0263d6093d51c817e
3
+ size 5570
runs/Apr05_20-48-50_00b1e96a9200/events.out.tfevents.1712350138.00b1e96a9200.34.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bef8cb27017dcd3fdd3bfe0dd2b88a3ec147c018f1578331b53cf2fab4b304d
3
+ size 16519
runs/Apr05_20-48-50_00b1e96a9200/events.out.tfevents.1712350499.00b1e96a9200.34.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:466f7322e7e11b89f8f37f085d1ae5d6ac65bc13a4af721170cd6dab9d15ff41
3
+ size 4184
runs/Apr05_20-48-50_00b1e96a9200/events.out.tfevents.1712350534.00b1e96a9200.34.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b92900b9f33568cfce725683d35450c998055d030c4c14b885da6e5393d6278e
3
+ size 4184
runs/Apr05_20-56-09_00b1e96a9200/events.out.tfevents.1712350571.00b1e96a9200.261.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ebe3fe8a1d8a32ee83b74339e02423c5ace55a519ba5f2107a19e1e471ae45b
3
+ size 5565
runs/Apr05_21-14-14_00b1e96a9200/events.out.tfevents.1712351659.00b1e96a9200.261.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61e48f2c9366bcd2663c94dc9b24f90b00068941d96a07705dbbb6e18f357076
3
+ size 5919
tokenizer.json CHANGED
@@ -2,13 +2,13 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 512,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
  "padding": {
10
  "strategy": {
11
- "Fixed": 512
12
  },
13
  "direction": "Right",
14
  "pad_to_multiple_of": null,
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 128,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
  "padding": {
10
  "strategy": {
11
+ "Fixed": 128
12
  },
13
  "direction": "Right",
14
  "pad_to_multiple_of": null,
tokenizer_config.json CHANGED
@@ -930,7 +930,7 @@
930
  "clean_up_tokenization_spaces": true,
931
  "eos_token": "</s>",
932
  "extra_ids": 100,
933
- "model_max_length": 512,
934
  "pad_token": "<pad>",
935
  "tokenizer_class": "T5Tokenizer",
936
  "unk_token": "<unk>"
 
930
  "clean_up_tokenization_spaces": true,
931
  "eos_token": "</s>",
932
  "extra_ids": 100,
933
+ "model_max_length": 128,
934
  "pad_token": "<pad>",
935
  "tokenizer_class": "T5Tokenizer",
936
  "unk_token": "<unk>"
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e86fa45c23b1f841a99f409b1f0b7cdc6488be4fa7a84ff40fa0775ce20e33bb
3
- size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3984ce4a61f9e260566ba58eeabc858d1197f2762f4822ebaf1832b44ff65dd
3
+ size 5048