Bonnief commited on
Commit
787278a
·
verified ·
1 Parent(s): b0721e3

End of training

Browse files
Files changed (5) hide show
  1. README.md +5 -0
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. train_results.json +9 -0
  5. trainer_state.json +113 -0
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  base_model: castorini/afriberta_small
4
  tags:
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: afriberta-om-flores
8
  results: []
@@ -14,6 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
14
  # afriberta-om-flores
15
 
16
  This model is a fine-tuned version of [castorini/afriberta_small](https://huggingface.co/castorini/afriberta_small) on an unknown dataset.
 
 
 
17
 
18
  ## Model description
19
 
 
3
  base_model: castorini/afriberta_small
4
  tags:
5
  - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
  model-index:
9
  - name: afriberta-om-flores
10
  results: []
 
16
  # afriberta-om-flores
17
 
18
  This model is a fine-tuned version of [castorini/afriberta_small](https://huggingface.co/castorini/afriberta_small) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 3.6838
21
+ - Accuracy: 0.3806
22
 
23
  ## Model description
24
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.092050209205021,
3
+ "eval_accuracy": 0.3805601317957166,
4
+ "eval_loss": 3.6838042736053467,
5
+ "eval_runtime": 0.597,
6
+ "eval_samples": 100,
7
+ "eval_samples_per_second": 167.494,
8
+ "eval_steps_per_second": 41.874,
9
+ "perplexity": 39.7975070584133,
10
+ "total_flos": 38337612204768.0,
11
+ "train_loss": 3.658340881347656,
12
+ "train_runtime": 58.6954,
13
+ "train_samples": 1909,
14
+ "train_samples_per_second": 68.148,
15
+ "train_steps_per_second": 8.519
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.092050209205021,
3
+ "eval_accuracy": 0.3805601317957166,
4
+ "eval_loss": 3.6838042736053467,
5
+ "eval_runtime": 0.597,
6
+ "eval_samples": 100,
7
+ "eval_samples_per_second": 167.494,
8
+ "eval_steps_per_second": 41.874,
9
+ "perplexity": 39.7975070584133
10
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.092050209205021,
3
+ "total_flos": 38337612204768.0,
4
+ "train_loss": 3.658340881347656,
5
+ "train_runtime": 58.6954,
6
+ "train_samples": 1909,
7
+ "train_samples_per_second": 68.148,
8
+ "train_steps_per_second": 8.519
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.092050209205021,
6
+ "eval_steps": 250,
7
+ "global_step": 500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.20920502092050208,
14
+ "grad_norm": 19.030786514282227,
15
+ "learning_rate": 1.8833333333333335e-05,
16
+ "loss": 3.7517,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.41841004184100417,
21
+ "grad_norm": 20.634578704833984,
22
+ "learning_rate": 1.675e-05,
23
+ "loss": 3.6778,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.6276150627615062,
28
+ "grad_norm": 17.215373992919922,
29
+ "learning_rate": 1.4666666666666666e-05,
30
+ "loss": 3.7684,
31
+ "step": 150
32
+ },
33
+ {
34
+ "epoch": 0.8368200836820083,
35
+ "grad_norm": 17.602941513061523,
36
+ "learning_rate": 1.2583333333333334e-05,
37
+ "loss": 3.7645,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 1.0460251046025104,
42
+ "grad_norm": 20.645366668701172,
43
+ "learning_rate": 1.0500000000000001e-05,
44
+ "loss": 3.6481,
45
+ "step": 250
46
+ },
47
+ {
48
+ "epoch": 1.2552301255230125,
49
+ "grad_norm": 17.175325393676758,
50
+ "learning_rate": 8.416666666666667e-06,
51
+ "loss": 3.7027,
52
+ "step": 300
53
+ },
54
+ {
55
+ "epoch": 1.4644351464435146,
56
+ "grad_norm": 21.4941349029541,
57
+ "learning_rate": 6.333333333333333e-06,
58
+ "loss": 3.4982,
59
+ "step": 350
60
+ },
61
+ {
62
+ "epoch": 1.6736401673640167,
63
+ "grad_norm": 17.862903594970703,
64
+ "learning_rate": 4.25e-06,
65
+ "loss": 3.4812,
66
+ "step": 400
67
+ },
68
+ {
69
+ "epoch": 1.8828451882845187,
70
+ "grad_norm": 17.78984260559082,
71
+ "learning_rate": 2.166666666666667e-06,
72
+ "loss": 3.5468,
73
+ "step": 450
74
+ },
75
+ {
76
+ "epoch": 2.092050209205021,
77
+ "grad_norm": 22.636425018310547,
78
+ "learning_rate": 8.333333333333334e-08,
79
+ "loss": 3.744,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 2.092050209205021,
84
+ "step": 500,
85
+ "total_flos": 38337612204768.0,
86
+ "train_loss": 3.658340881347656,
87
+ "train_runtime": 58.6954,
88
+ "train_samples_per_second": 68.148,
89
+ "train_steps_per_second": 8.519
90
+ }
91
+ ],
92
+ "logging_steps": 50,
93
+ "max_steps": 500,
94
+ "num_input_tokens_seen": 0,
95
+ "num_train_epochs": 3,
96
+ "save_steps": 250,
97
+ "stateful_callbacks": {
98
+ "TrainerControl": {
99
+ "args": {
100
+ "should_epoch_stop": false,
101
+ "should_evaluate": false,
102
+ "should_log": false,
103
+ "should_save": true,
104
+ "should_training_stop": true
105
+ },
106
+ "attributes": {}
107
+ }
108
+ },
109
+ "total_flos": 38337612204768.0,
110
+ "train_batch_size": 4,
111
+ "trial_name": null,
112
+ "trial_params": null
113
+ }