add tucano results to experiments
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- bootstrap_confidence_intervals-00000-of-00001.parquet +2 -2
- bootstrap_confidence_intervals_Macro_F1_table.txt +8 -0
- bootstrap_confidence_intervals_QWK_table.txt +8 -0
- bootstrap_confidence_intervals_Weighted_F1_table.txt +8 -0
- create_latex_tables.py +1 -1
- create_parquet_files.py +8 -0
- evaluation_results-00000-of-00001.parquet +2 -2
- evaluation_table_avg_only.txt +8 -0
- evaluation_table_full.txt +20 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/.hydra/config.yaml +46 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/.hydra/hydra.yaml +157 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/bootstrap_confidence_intervals.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/evaluation_results.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16_inference_results.jsonl +0 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/run_inference_experiment.log +166 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/.hydra/config.yaml +46 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/.hydra/hydra.yaml +157 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/bootstrap_confidence_intervals.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/evaluation_results.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8_inference_results.jsonl +0 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/run_inference_experiment.log +166 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/.hydra/config.yaml +46 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/.hydra/hydra.yaml +157 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/bootstrap_confidence_intervals.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/evaluation_results.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16_inference_results.jsonl +0 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/run_inference_experiment.log +166 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/.hydra/config.yaml +46 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/.hydra/hydra.yaml +157 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/bootstrap_confidence_intervals.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/evaluation_results.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8_inference_results.jsonl +0 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/run_inference_experiment.log +166 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/.hydra/config.yaml +46 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/.hydra/hydra.yaml +157 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/bootstrap_confidence_intervals.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/evaluation_results.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16_inference_results.jsonl +0 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/run_inference_experiment.log +166 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/.hydra/config.yaml +46 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/.hydra/hydra.yaml +157 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/.hydra/overrides.yaml +1 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/bootstrap_confidence_intervals.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/evaluation_results.csv +2 -0
- runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8_inference_results.jsonl +0 -0
bootstrap_confidence_intervals-00000-of-00001.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4225cace413654252cbb06c68342c266aa8dc12eb4c2ab878cdace0b985b9d1e
|
3 |
+
size 33112
|
bootstrap_confidence_intervals_Macro_F1_table.txt
CHANGED
@@ -35,6 +35,14 @@ phi4-lora-essay-only-r8 & 0.37 & 0.64 & 0.21 & 0.39 & 0.18 & 0.32 & 0.19 & 0.42
|
|
35 |
phi4-lora-full-context-r16 & 0.31 & 0.55 & 0.32 & 0.54 & 0.32 & 0.53 & 0.23 & 0.48 & 0.21 & 0.39 \\
|
36 |
\hline
|
37 |
phi4-lora-full-context-r8 & 0.36 & 0.61 & 0.31 & 0.54 & 0.27 & 0.48 & 0.24 & 0.48 & 0.23 & 0.37 \\
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
\hline\hline
|
39 |
deepseekR1-extractor-essay\_only & 0.03 & 0.14 & 0.00 & 0.03 & 0.19 & 0.37 & 0.23 & 0.46 & 0.28 & 0.42 \\
|
40 |
deepseekR1-extractor-full\_context & 0.01 & 0.06 & 0.19 & 0.38 & 0.24 & 0.40 & 0.26 & 0.50 & 0.30 & 0.45 \\
|
|
|
35 |
phi4-lora-full-context-r16 & 0.31 & 0.55 & 0.32 & 0.54 & 0.32 & 0.53 & 0.23 & 0.48 & 0.21 & 0.39 \\
|
36 |
\hline
|
37 |
phi4-lora-full-context-r8 & 0.36 & 0.61 & 0.31 & 0.54 & 0.27 & 0.48 & 0.24 & 0.48 & 0.23 & 0.37 \\
|
38 |
+
\hline
|
39 |
+
tucano2b4-lora-essay-only-r16 & 0.38 & 0.65 & 0.12 & 0.23 & 0.18 & 0.32 & 0.23 & 0.56 & 0.19 & 0.35 \\
|
40 |
+
\hline
|
41 |
+
tucano2b4-lora-essay-only-r8 & 0.38 & 0.66 & 0.11 & 0.20 & 0.18 & 0.34 & 0.22 & 0.46 & 0.19 & 0.31 \\
|
42 |
+
\hline
|
43 |
+
tucano2b4-lora-full-context-r16 & 0.31 & 0.51 & 0.13 & 0.28 & 0.15 & 0.30 & 0.27 & 0.52 & 0.19 & 0.31 \\
|
44 |
+
\hline
|
45 |
+
tucano2b4-lora-full-context-r8 & 0.31 & 0.55 & 0.16 & 0.30 & 0.17 & 0.34 & 0.22 & 0.50 & 0.14 & 0.25 \\
|
46 |
\hline\hline
|
47 |
deepseekR1-extractor-essay\_only & 0.03 & 0.14 & 0.00 & 0.03 & 0.19 & 0.37 & 0.23 & 0.46 & 0.28 & 0.42 \\
|
48 |
deepseekR1-extractor-full\_context & 0.01 & 0.06 & 0.19 & 0.38 & 0.24 & 0.40 & 0.26 & 0.50 & 0.30 & 0.45 \\
|
bootstrap_confidence_intervals_QWK_table.txt
CHANGED
@@ -35,6 +35,14 @@ phi4-lora-essay-only-r8 & 0.57 & 0.75 & 0.18 & 0.47 & 0.17 & 0.48 & 0.35 & 0.60
|
|
35 |
phi4-lora-full-context-r16 & 0.51 & 0.70 & 0.47 & 0.71 & 0.53 & 0.76 & 0.49 & 0.68 & 0.40 & 0.64 \\
|
36 |
\hline
|
37 |
phi4-lora-full-context-r8 & 0.57 & 0.75 & 0.46 & 0.70 & 0.45 & 0.68 & 0.43 & 0.65 & 0.38 & 0.64 \\
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
\hline\hline
|
39 |
deepseekR1-extractor-essay\_only & 0.15 & 0.27 & -0.02 & 0.00 & 0.27 & 0.53 & 0.42 & 0.62 & 0.49 & 0.73 \\
|
40 |
deepseekR1-extractor-full\_context & 0.15 & 0.26 & 0.39 & 0.63 & 0.40 & 0.64 & 0.45 & 0.66 & 0.48 & 0.73 \\
|
|
|
35 |
phi4-lora-full-context-r16 & 0.51 & 0.70 & 0.47 & 0.71 & 0.53 & 0.76 & 0.49 & 0.68 & 0.40 & 0.64 \\
|
36 |
\hline
|
37 |
phi4-lora-full-context-r8 & 0.57 & 0.75 & 0.46 & 0.70 & 0.45 & 0.68 & 0.43 & 0.65 & 0.38 & 0.64 \\
|
38 |
+
\hline
|
39 |
+
tucano2b4-lora-essay-only-r16 & 0.56 & 0.76 & 0.07 & 0.35 & 0.21 & 0.49 & 0.36 & 0.63 & 0.40 & 0.65 \\
|
40 |
+
\hline
|
41 |
+
tucano2b4-lora-essay-only-r8 & 0.54 & 0.75 & 0.07 & 0.38 & 0.22 & 0.50 & 0.36 & 0.61 & 0.45 & 0.68 \\
|
42 |
+
\hline
|
43 |
+
tucano2b4-lora-full-context-r16 & 0.43 & 0.65 & 0.07 & 0.39 & 0.07 & 0.39 & 0.39 & 0.64 & 0.28 & 0.56 \\
|
44 |
+
\hline
|
45 |
+
tucano2b4-lora-full-context-r8 & 0.45 & 0.67 & 0.10 & 0.44 & 0.07 & 0.39 & 0.39 & 0.63 & 0.08 & 0.39 \\
|
46 |
\hline\hline
|
47 |
deepseekR1-extractor-essay\_only & 0.15 & 0.27 & -0.02 & 0.00 & 0.27 & 0.53 & 0.42 & 0.62 & 0.49 & 0.73 \\
|
48 |
deepseekR1-extractor-full\_context & 0.15 & 0.26 & 0.39 & 0.63 & 0.40 & 0.64 & 0.45 & 0.66 & 0.48 & 0.73 \\
|
bootstrap_confidence_intervals_Weighted_F1_table.txt
CHANGED
@@ -35,6 +35,14 @@ phi4-lora-essay-only-r8 & 0.53 & 0.69 & 0.28 & 0.45 & 0.23 & 0.39 & 0.49 & 0.67
|
|
35 |
phi4-lora-full-context-r16 & 0.49 & 0.66 & 0.41 & 0.59 & 0.39 & 0.56 & 0.42 & 0.59 & 0.24 & 0.40 \\
|
36 |
\hline
|
37 |
phi4-lora-full-context-r8 & 0.54 & 0.70 & 0.39 & 0.56 & 0.29 & 0.46 & 0.49 & 0.66 & 0.23 & 0.39 \\
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
\hline\hline
|
39 |
deepseekR1-extractor-essay\_only & 0.06 & 0.19 & 0.00 & 0.04 & 0.24 & 0.40 & 0.44 & 0.61 & 0.30 & 0.47 \\
|
40 |
deepseekR1-extractor-full\_context & 0.03 & 0.14 & 0.23 & 0.41 & 0.31 & 0.49 & 0.51 & 0.67 & 0.33 & 0.51 \\
|
|
|
35 |
phi4-lora-full-context-r16 & 0.49 & 0.66 & 0.41 & 0.59 & 0.39 & 0.56 & 0.42 & 0.59 & 0.24 & 0.40 \\
|
36 |
\hline
|
37 |
phi4-lora-full-context-r8 & 0.54 & 0.70 & 0.39 & 0.56 & 0.29 & 0.46 & 0.49 & 0.66 & 0.23 & 0.39 \\
|
38 |
+
\hline
|
39 |
+
tucano2b4-lora-essay-only-r16 & 0.57 & 0.74 & 0.15 & 0.29 & 0.22 & 0.38 & 0.50 & 0.66 & 0.21 & 0.36 \\
|
40 |
+
\hline
|
41 |
+
tucano2b4-lora-essay-only-r8 & 0.56 & 0.72 & 0.12 & 0.27 & 0.26 & 0.42 & 0.48 & 0.65 & 0.22 & 0.37 \\
|
42 |
+
\hline
|
43 |
+
tucano2b4-lora-full-context-r16 & 0.50 & 0.67 & 0.18 & 0.33 & 0.19 & 0.34 & 0.51 & 0.67 & 0.21 & 0.37 \\
|
44 |
+
\hline
|
45 |
+
tucano2b4-lora-full-context-r8 & 0.47 & 0.64 & 0.25 & 0.41 & 0.21 & 0.37 & 0.42 & 0.59 & 0.16 & 0.31 \\
|
46 |
\hline\hline
|
47 |
deepseekR1-extractor-essay\_only & 0.06 & 0.19 & 0.00 & 0.04 & 0.24 & 0.40 & 0.44 & 0.61 & 0.30 & 0.47 \\
|
48 |
deepseekR1-extractor-full\_context & 0.03 & 0.14 & 0.23 & 0.41 & 0.31 & 0.49 & 0.51 & 0.67 & 0.33 & 0.51 \\
|
create_latex_tables.py
CHANGED
@@ -99,7 +99,7 @@ class ExperimentIdParser:
|
|
99 |
|
100 |
# Group definitions
|
101 |
GROUP_1_PREFIXES = ["bertimbau", "bertugues", "mbert", "albertina"]
|
102 |
-
GROUP_2_PREFIXES = ["phi3.5", "phi4", "llama3.1"]
|
103 |
GROUP_3_PREFIXES = ["sabia3", "deepseekr1", "gpt4o"]
|
104 |
|
105 |
|
|
|
99 |
|
100 |
# Group definitions
|
101 |
GROUP_1_PREFIXES = ["bertimbau", "bertugues", "mbert", "albertina"]
|
102 |
+
GROUP_2_PREFIXES = ["phi3.5", "phi4", "llama3.1", "tucano2b4"]
|
103 |
GROUP_3_PREFIXES = ["sabia3", "deepseekr1", "gpt4o"]
|
104 |
|
105 |
|
create_parquet_files.py
CHANGED
@@ -83,6 +83,7 @@ def simplify_experiment_name(name):
|
|
83 |
'phi35_classification_lora',
|
84 |
'phi4_classification_lora',
|
85 |
'encoder_classification'
|
|
|
86 |
]
|
87 |
|
88 |
for pattern in duplication_patterns:
|
@@ -112,6 +113,12 @@ def simplify_experiment_name(name):
|
|
112 |
name = name.replace('Llama-3.1-8B-llama31_classification_lora', 'llama3.1-8b-lora')
|
113 |
elif 'Llama-3.1-8B' in name:
|
114 |
name = name.replace('Llama-3.1-8B', 'llama3.1-8b-lora')
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
# Handle Phi variants
|
117 |
elif 'Phi-3.5-mini-instruct-phi35_classification_lora' in name:
|
@@ -129,6 +136,7 @@ def simplify_experiment_name(name):
|
|
129 |
name = name.replace('-llama31', '')
|
130 |
name = name.replace('-phi35', '')
|
131 |
name = name.replace('-phi4', '')
|
|
|
132 |
|
133 |
# Extract components and reorder
|
134 |
parts = name.split('-')
|
|
|
83 |
'phi35_classification_lora',
|
84 |
'phi4_classification_lora',
|
85 |
'encoder_classification'
|
86 |
+
'tucano_classification_lora'
|
87 |
]
|
88 |
|
89 |
for pattern in duplication_patterns:
|
|
|
113 |
name = name.replace('Llama-3.1-8B-llama31_classification_lora', 'llama3.1-8b-lora')
|
114 |
elif 'Llama-3.1-8B' in name:
|
115 |
name = name.replace('Llama-3.1-8B', 'llama3.1-8b-lora')
|
116 |
+
|
117 |
+
# Handle Tucano variants
|
118 |
+
elif 'Tucano-2b4-Instruct-tucano_classification_lora' in name:
|
119 |
+
name = name.replace('Tucano-2b4-Instruct-tucano_classification_lora', 'tucano2b4-lora')
|
120 |
+
elif 'Tucano-2b4-Instruct' in name:
|
121 |
+
name = name.replace('Tucano-2b4-Instruct', 'tucano2b4-lora')
|
122 |
|
123 |
# Handle Phi variants
|
124 |
elif 'Phi-3.5-mini-instruct-phi35_classification_lora' in name:
|
|
|
136 |
name = name.replace('-llama31', '')
|
137 |
name = name.replace('-phi35', '')
|
138 |
name = name.replace('-phi4', '')
|
139 |
+
name = name.replace('-tucano', '')
|
140 |
|
141 |
# Extract components and reorder
|
142 |
parts = name.split('-')
|
evaluation_results-00000-of-00001.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:84870958125768914592b2ff017e851dd47749364b49123bef1ce60d0c094d15
|
3 |
+
size 77396
|
evaluation_table_avg_only.txt
CHANGED
@@ -35,6 +35,14 @@ phi4-lora-essay-only-r8-avg(A,B) & 0.50 & 0.61 & 0.67 & 0.28 & 0.39 & 0.34 & 0.2
|
|
35 |
phi4-lora-full-context-r16-avg(A,B) & 0.41 & 0.58 & 0.61 & 0.42 & 0.52 & 0.60 & 0.42 & 0.48 & 0.66 & 0.35 & 0.50 & 0.60 & 0.29 & 0.33 & 0.53 \\
|
36 |
\hline
|
37 |
phi4-lora-full-context-r8-avg(A,B) & 0.48 & 0.63 & 0.67 & 0.40 & 0.50 & 0.59 & 0.37 & 0.38 & 0.57 & 0.37 & 0.58 & 0.55 & 0.29 & 0.32 & 0.52 \\
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
\hline\hline
|
39 |
deepseekR1-extractor-essay\_only-avg(A,B) & 0.07 & 0.12 & 0.21 & 0.01 & 0.01 & -0.01 & 0.27 & 0.34 & 0.41 & 0.33 & 0.53 & 0.53 & 0.35 & 0.39 & 0.62 \\
|
40 |
deepseekR1-extractor-full\_context-avg(A,B) & 0.04 & 0.08 & 0.20 & 0.27 & 0.34 & 0.51 & 0.30 & 0.41 & 0.52 & 0.37 & 0.60 & 0.56 & 0.37 & 0.42 & 0.61 \\
|
|
|
35 |
phi4-lora-full-context-r16-avg(A,B) & 0.41 & 0.58 & 0.61 & 0.42 & 0.52 & 0.60 & 0.42 & 0.48 & 0.66 & 0.35 & 0.50 & 0.60 & 0.29 & 0.33 & 0.53 \\
|
36 |
\hline
|
37 |
phi4-lora-full-context-r8-avg(A,B) & 0.48 & 0.63 & 0.67 & 0.40 & 0.50 & 0.59 & 0.37 & 0.38 & 0.57 & 0.37 & 0.58 & 0.55 & 0.29 & 0.32 & 0.52 \\
|
38 |
+
\hline
|
39 |
+
tucano2b4-lora-essay-only-r16-avg(A,B) & 0.52 & 0.66 & 0.67 & 0.15 & 0.24 & 0.22 & 0.24 & 0.31 & 0.36 & 0.40 & 0.58 & 0.50 & 0.26 & 0.29 & 0.54 \\
|
40 |
+
\hline
|
41 |
+
tucano2b4-lora-essay-only-r8-avg(A,B) & 0.52 & 0.64 & 0.66 & 0.14 & 0.22 & 0.23 & 0.25 & 0.35 & 0.36 & 0.34 & 0.57 & 0.49 & 0.25 & 0.30 & 0.58 \\
|
42 |
+
\hline
|
43 |
+
tucano2b4-lora-full-context-r16-avg(A,B) & 0.40 & 0.59 & 0.54 & 0.19 & 0.27 & 0.24 & 0.22 & 0.27 & 0.24 & 0.39 & 0.59 & 0.52 & 0.25 & 0.29 & 0.43 \\
|
44 |
+
\hline
|
45 |
+
tucano2b4-lora-full-context-r8-avg(A,B) & 0.43 & 0.56 & 0.57 & 0.21 & 0.35 & 0.28 & 0.24 & 0.31 & 0.24 & 0.37 & 0.51 & 0.52 & 0.19 & 0.24 & 0.24 \\
|
46 |
\hline\hline
|
47 |
deepseekR1-extractor-essay\_only-avg(A,B) & 0.07 & 0.12 & 0.21 & 0.01 & 0.01 & -0.01 & 0.27 & 0.34 & 0.41 & 0.33 & 0.53 & 0.53 & 0.35 & 0.39 & 0.62 \\
|
48 |
deepseekR1-extractor-full\_context-avg(A,B) & 0.04 & 0.08 & 0.20 & 0.27 & 0.34 & 0.51 & 0.30 & 0.41 & 0.52 & 0.37 & 0.60 & 0.56 & 0.37 & 0.42 & 0.61 \\
|
evaluation_table_full.txt
CHANGED
@@ -86,6 +86,26 @@ phi4-lora-full-context-r8-avg(A,B) & 0.48 & 0.63 & 0.67 & 0.40 & 0.50 & 0.59 & 0
|
|
86 |
phi4-lora-full-context-r8-concat(A,B) & 0.43 & 0.62 & 0.67 & 0.39 & 0.47 & 0.59 & 0.34 & 0.37 & 0.57 & 0.33 & 0.58 & 0.55 & 0.30 & 0.31 & 0.52 \\
|
87 |
phi4-lora-full-context-r8-onlyA & 0.45 & 0.61 & 0.66 & 0.28 & 0.46 & 0.56 & 0.25 & 0.33 & 0.53 & 0.30 & 0.58 & 0.50 & 0.25 & 0.28 & 0.49 \\
|
88 |
phi4-lora-full-context-r8-onlyB & 0.52 & 0.64 & 0.68 & 0.53 & 0.54 & 0.62 & 0.49 & 0.44 & 0.61 & 0.44 & 0.57 & 0.60 & 0.34 & 0.35 & 0.55 \\
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
\hline\hline
|
90 |
deepseekR1-extractor-essay\_only-avg(A,B) & 0.07 & 0.12 & 0.21 & 0.01 & 0.01 & -0.01 & 0.27 & 0.34 & 0.41 & 0.33 & 0.53 & 0.53 & 0.35 & 0.39 & 0.62 \\
|
91 |
deepseekR1-extractor-essay\_only-concat(A,B) & 0.07 & 0.12 & 0.21 & 0.01 & 0.01 & -0.01 & 0.25 & 0.32 & 0.40 & 0.28 & 0.53 & 0.53 & 0.36 & 0.38 & 0.62 \\
|
|
|
86 |
phi4-lora-full-context-r8-concat(A,B) & 0.43 & 0.62 & 0.67 & 0.39 & 0.47 & 0.59 & 0.34 & 0.37 & 0.57 & 0.33 & 0.58 & 0.55 & 0.30 & 0.31 & 0.52 \\
|
87 |
phi4-lora-full-context-r8-onlyA & 0.45 & 0.61 & 0.66 & 0.28 & 0.46 & 0.56 & 0.25 & 0.33 & 0.53 & 0.30 & 0.58 & 0.50 & 0.25 & 0.28 & 0.49 \\
|
88 |
phi4-lora-full-context-r8-onlyB & 0.52 & 0.64 & 0.68 & 0.53 & 0.54 & 0.62 & 0.49 & 0.44 & 0.61 & 0.44 & 0.57 & 0.60 & 0.34 & 0.35 & 0.55 \\
|
89 |
+
\hline
|
90 |
+
tucano2b4-lora-essay-only-r16-avg(A,B) & 0.52 & 0.66 & 0.67 & 0.15 & 0.24 & 0.22 & 0.24 & 0.31 & 0.36 & 0.40 & 0.58 & 0.50 & 0.26 & 0.29 & 0.54 \\
|
91 |
+
tucano2b4-lora-essay-only-r16-concat(A,B) & 0.46 & 0.66 & 0.67 & 0.17 & 0.22 & 0.22 & 0.24 & 0.30 & 0.36 & 0.39 & 0.58 & 0.50 & 0.27 & 0.28 & 0.54 \\
|
92 |
+
tucano2b4-lora-essay-only-r16-onlyA & 0.42 & 0.62 & 0.62 & 0.15 & 0.23 & 0.20 & 0.20 & 0.28 & 0.33 & 0.21 & 0.52 & 0.38 & 0.23 & 0.29 & 0.52 \\
|
93 |
+
tucano2b4-lora-essay-only-r16-onlyB & 0.63 & 0.70 & 0.72 & 0.16 & 0.25 & 0.24 & 0.28 & 0.34 & 0.39 & 0.59 & 0.64 & 0.62 & 0.30 & 0.29 & 0.55 \\
|
94 |
+
\hline
|
95 |
+
tucano2b4-lora-essay-only-r8-avg(A,B) & 0.52 & 0.64 & 0.66 & 0.14 & 0.22 & 0.23 & 0.25 & 0.35 & 0.36 & 0.34 & 0.57 & 0.49 & 0.25 & 0.30 & 0.58 \\
|
96 |
+
tucano2b4-lora-essay-only-r8-concat(A,B) & 0.47 & 0.64 & 0.65 & 0.15 & 0.20 & 0.23 & 0.26 & 0.34 & 0.36 & 0.32 & 0.56 & 0.50 & 0.25 & 0.29 & 0.58 \\
|
97 |
+
tucano2b4-lora-essay-only-r8-onlyA & 0.47 & 0.62 & 0.62 & 0.08 & 0.12 & 0.17 & 0.25 & 0.39 & 0.36 & 0.23 & 0.52 & 0.41 & 0.26 & 0.33 & 0.56 \\
|
98 |
+
tucano2b4-lora-essay-only-r8-onlyB & 0.58 & 0.66 & 0.69 & 0.20 & 0.32 & 0.30 & 0.25 & 0.30 & 0.36 & 0.45 & 0.61 & 0.58 & 0.23 & 0.27 & 0.59 \\
|
99 |
+
\hline
|
100 |
+
tucano2b4-lora-full-context-r16-avg(A,B) & 0.40 & 0.59 & 0.54 & 0.19 & 0.27 & 0.24 & 0.22 & 0.27 & 0.24 & 0.39 & 0.59 & 0.52 & 0.25 & 0.29 & 0.43 \\
|
101 |
+
tucano2b4-lora-full-context-r16-concat(A,B) & 0.40 & 0.59 & 0.54 & 0.19 & 0.25 & 0.23 & 0.21 & 0.26 & 0.24 & 0.40 & 0.59 & 0.52 & 0.25 & 0.29 & 0.42 \\
|
102 |
+
tucano2b4-lora-full-context-r16-onlyA & 0.37 & 0.54 & 0.47 & 0.14 & 0.25 & 0.16 & 0.18 & 0.28 & 0.25 & 0.35 & 0.58 & 0.44 & 0.28 & 0.33 & 0.43 \\
|
103 |
+
tucano2b4-lora-full-context-r16-onlyB & 0.43 & 0.64 & 0.61 & 0.24 & 0.29 & 0.31 & 0.26 & 0.26 & 0.22 & 0.43 & 0.61 & 0.60 & 0.21 & 0.26 & 0.42 \\
|
104 |
+
\hline
|
105 |
+
tucano2b4-lora-full-context-r8-avg(A,B) & 0.43 & 0.56 & 0.57 & 0.21 & 0.35 & 0.28 & 0.24 & 0.31 & 0.24 & 0.37 & 0.51 & 0.52 & 0.19 & 0.24 & 0.24 \\
|
106 |
+
tucano2b4-lora-full-context-r8-concat(A,B) & 0.38 & 0.56 & 0.57 & 0.21 & 0.33 & 0.28 & 0.24 & 0.29 & 0.23 & 0.34 & 0.50 & 0.52 & 0.20 & 0.23 & 0.24 \\
|
107 |
+
tucano2b4-lora-full-context-r8-onlyA & 0.39 & 0.55 & 0.57 & 0.22 & 0.41 & 0.24 & 0.20 & 0.35 & 0.30 & 0.28 & 0.47 & 0.45 & 0.20 & 0.23 & 0.21 \\
|
108 |
+
tucano2b4-lora-full-context-r8-onlyB & 0.46 & 0.58 & 0.57 & 0.21 & 0.28 & 0.32 & 0.29 & 0.27 & 0.18 & 0.46 & 0.54 & 0.59 & 0.19 & 0.24 & 0.26 \\
|
109 |
\hline\hline
|
110 |
deepseekR1-extractor-essay\_only-avg(A,B) & 0.07 & 0.12 & 0.21 & 0.01 & 0.01 & -0.01 & 0.27 & 0.34 & 0.41 & 0.33 & 0.53 & 0.53 & 0.35 & 0.39 & 0.62 \\
|
111 |
deepseekR1-extractor-essay\_only-concat(A,B) & 0.07 & 0.12 & 0.21 & 0.01 & 0.01 & -0.01 & 0.25 & 0.32 & 0.40 & 0.28 & 0.53 & 0.53 & 0.36 & 0.38 & 0.62 \\
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/.hydra/config.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cache_dir: /tmp/
|
2 |
+
dataset:
|
3 |
+
name: kamel-usp/aes_enem_dataset
|
4 |
+
split: JBCS2025
|
5 |
+
training_params:
|
6 |
+
seed: 42
|
7 |
+
num_train_epochs: 20
|
8 |
+
logging_steps: 100
|
9 |
+
metric_for_best_model: QWK
|
10 |
+
bf16: true
|
11 |
+
bootstrap:
|
12 |
+
enabled: true
|
13 |
+
n_bootstrap: 10000
|
14 |
+
bootstrap_seed: 42
|
15 |
+
metrics:
|
16 |
+
- QWK
|
17 |
+
- Macro_F1
|
18 |
+
- Weighted_F1
|
19 |
+
post_training_results:
|
20 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
21 |
+
experiments:
|
22 |
+
model:
|
23 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
24 |
+
type: tucano_classification_lora
|
25 |
+
num_labels: 6
|
26 |
+
output_dir: ./results/
|
27 |
+
logging_dir: ./logs/
|
28 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
29 |
+
lora_r: 16
|
30 |
+
lora_dropout: 0.1
|
31 |
+
lora_alpha: 32
|
32 |
+
lora_target_modules: all-linear
|
33 |
+
checkpoint_path: ''
|
34 |
+
tokenizer:
|
35 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
36 |
+
dataset:
|
37 |
+
grade_index: 0
|
38 |
+
use_full_context: false
|
39 |
+
training_params:
|
40 |
+
weight_decay: 0.01
|
41 |
+
warmup_ratio: 0.1
|
42 |
+
learning_rate: 5.0e-05
|
43 |
+
train_batch_size: 8
|
44 |
+
eval_batch_size: 4
|
45 |
+
gradient_accumulation_steps: 2
|
46 |
+
gradient_checkpointing: true
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: inference_output/2025-07-13/19-05-14
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
simple:
|
72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
73 |
+
handlers:
|
74 |
+
console:
|
75 |
+
class: logging.StreamHandler
|
76 |
+
formatter: simple
|
77 |
+
stream: ext://sys.stdout
|
78 |
+
root:
|
79 |
+
level: INFO
|
80 |
+
handlers:
|
81 |
+
- console
|
82 |
+
loggers:
|
83 |
+
logging_example:
|
84 |
+
level: DEBUG
|
85 |
+
disable_existing_loggers: false
|
86 |
+
job_logging:
|
87 |
+
version: 1
|
88 |
+
formatters:
|
89 |
+
simple:
|
90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
91 |
+
handlers:
|
92 |
+
console:
|
93 |
+
class: logging.StreamHandler
|
94 |
+
formatter: simple
|
95 |
+
stream: ext://sys.stdout
|
96 |
+
file:
|
97 |
+
class: logging.FileHandler
|
98 |
+
formatter: simple
|
99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
100 |
+
root:
|
101 |
+
level: INFO
|
102 |
+
handlers:
|
103 |
+
- console
|
104 |
+
- file
|
105 |
+
disable_existing_loggers: false
|
106 |
+
env: {}
|
107 |
+
mode: RUN
|
108 |
+
searchpath: []
|
109 |
+
callbacks: {}
|
110 |
+
output_subdir: .hydra
|
111 |
+
overrides:
|
112 |
+
hydra:
|
113 |
+
- hydra.run.dir=inference_output/2025-07-13/19-05-14
|
114 |
+
- hydra.mode=RUN
|
115 |
+
task:
|
116 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
117 |
+
job:
|
118 |
+
name: run_inference_experiment
|
119 |
+
chdir: null
|
120 |
+
override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
121 |
+
id: ???
|
122 |
+
num: ???
|
123 |
+
config_name: config
|
124 |
+
env_set: {}
|
125 |
+
env_copy: []
|
126 |
+
config:
|
127 |
+
override_dirname:
|
128 |
+
kv_sep: '='
|
129 |
+
item_sep: ','
|
130 |
+
exclude_keys: []
|
131 |
+
runtime:
|
132 |
+
version: 1.3.2
|
133 |
+
version_base: '1.1'
|
134 |
+
cwd: /workspace/jbcs2025
|
135 |
+
config_sources:
|
136 |
+
- path: hydra.conf
|
137 |
+
schema: pkg
|
138 |
+
provider: hydra
|
139 |
+
- path: /workspace/jbcs2025/configs
|
140 |
+
schema: file
|
141 |
+
provider: main
|
142 |
+
- path: ''
|
143 |
+
schema: structured
|
144 |
+
provider: schema
|
145 |
+
output_dir: /workspace/jbcs2025/inference_output/2025-07-13/19-05-14
|
146 |
+
choices:
|
147 |
+
experiments: temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
148 |
+
hydra/env: default
|
149 |
+
hydra/callbacks: null
|
150 |
+
hydra/job_logging: default
|
151 |
+
hydra/hydra_logging: default
|
152 |
+
hydra/hydra_help: default
|
153 |
+
hydra/help: default
|
154 |
+
hydra/sweeper: basic
|
155 |
+
hydra/launcher: basic
|
156 |
+
hydra/output: default
|
157 |
+
verbose: false
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/bootstrap_confidence_intervals.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
|
2 |
+
jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16,2025-07-13 19:05:20,0.6660605871795818,0.5649087373336189,0.759754258752704,0.19484552141908507,0.4960154879519803,0.3776119517656355,0.6508110988620321,0.27319914709639664,0.6561547817502508,0.5735970409220825,0.7351317388784108,0.16153469795632824
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/evaluation_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
2 |
+
0.6521739130434783,26.811202503993453,0.6671853119651471,0.007246376811594235,0.4596197967879384,0.6521739130434783,0.6556581591210294,0,137,0,1,0,138,0,0,8,110,18,2,35,65,7,31,43,68,19,8,4,124,4,6,2025-07-13 19:05:20,jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16_inference_results.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16/run_inference_experiment.log
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2025-07-13 19:05:20,174][__main__][INFO] - Starting inference experiment
|
2 |
+
[2025-07-13 19:05:20,176][__main__][INFO] - cache_dir: /tmp/
|
3 |
+
dataset:
|
4 |
+
name: kamel-usp/aes_enem_dataset
|
5 |
+
split: JBCS2025
|
6 |
+
training_params:
|
7 |
+
seed: 42
|
8 |
+
num_train_epochs: 20
|
9 |
+
logging_steps: 100
|
10 |
+
metric_for_best_model: QWK
|
11 |
+
bf16: true
|
12 |
+
bootstrap:
|
13 |
+
enabled: true
|
14 |
+
n_bootstrap: 10000
|
15 |
+
bootstrap_seed: 42
|
16 |
+
metrics:
|
17 |
+
- QWK
|
18 |
+
- Macro_F1
|
19 |
+
- Weighted_F1
|
20 |
+
post_training_results:
|
21 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
22 |
+
experiments:
|
23 |
+
model:
|
24 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
25 |
+
type: tucano_classification_lora
|
26 |
+
num_labels: 6
|
27 |
+
output_dir: ./results/
|
28 |
+
logging_dir: ./logs/
|
29 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
30 |
+
lora_r: 16
|
31 |
+
lora_dropout: 0.1
|
32 |
+
lora_alpha: 32
|
33 |
+
lora_target_modules: all-linear
|
34 |
+
checkpoint_path: ''
|
35 |
+
tokenizer:
|
36 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
37 |
+
dataset:
|
38 |
+
grade_index: 0
|
39 |
+
use_full_context: false
|
40 |
+
training_params:
|
41 |
+
weight_decay: 0.01
|
42 |
+
warmup_ratio: 0.1
|
43 |
+
learning_rate: 5.0e-05
|
44 |
+
train_batch_size: 8
|
45 |
+
eval_batch_size: 4
|
46 |
+
gradient_accumulation_steps: 2
|
47 |
+
gradient_checkpointing: true
|
48 |
+
|
49 |
+
[2025-07-13 19:05:20,178][__main__][INFO] - Running inference with fine-tuned HF model
|
50 |
+
[2025-07-13 19:05:21,196][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
51 |
+
[2025-07-13 19:05:21,197][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer.json
|
52 |
+
[2025-07-13 19:05:21,197][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
53 |
+
[2025-07-13 19:05:21,197][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/special_tokens_map.json
|
54 |
+
[2025-07-13 19:05:21,197][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer_config.json
|
55 |
+
[2025-07-13 19:05:21,197][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
56 |
+
[2025-07-13 19:05:21,247][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
57 |
+
[2025-07-13 19:05:21,255][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False; Use Full Context: False
|
58 |
+
[2025-07-13 19:05:22,535][__main__][INFO] -
|
59 |
+
Token statistics for 'train' split:
|
60 |
+
[2025-07-13 19:05:22,535][__main__][INFO] - Total examples: 500
|
61 |
+
[2025-07-13 19:05:22,535][__main__][INFO] - Min tokens: 1878
|
62 |
+
[2025-07-13 19:05:22,535][__main__][INFO] - Max tokens: 1878
|
63 |
+
[2025-07-13 19:05:22,535][__main__][INFO] - Avg tokens: 1878.00
|
64 |
+
[2025-07-13 19:05:22,535][__main__][INFO] - Std tokens: 0.00
|
65 |
+
[2025-07-13 19:05:22,787][__main__][INFO] -
|
66 |
+
Token statistics for 'validation' split:
|
67 |
+
[2025-07-13 19:05:22,787][__main__][INFO] - Total examples: 132
|
68 |
+
[2025-07-13 19:05:22,787][__main__][INFO] - Min tokens: 1620
|
69 |
+
[2025-07-13 19:05:22,787][__main__][INFO] - Max tokens: 1620
|
70 |
+
[2025-07-13 19:05:22,787][__main__][INFO] - Avg tokens: 1620.00
|
71 |
+
[2025-07-13 19:05:22,787][__main__][INFO] - Std tokens: 0.00
|
72 |
+
[2025-07-13 19:05:23,058][__main__][INFO] -
|
73 |
+
Token statistics for 'test' split:
|
74 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - Total examples: 138
|
75 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - Min tokens: 1673
|
76 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - Max tokens: 1673
|
77 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - Avg tokens: 1673.00
|
78 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - Std tokens: 0.00
|
79 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
|
80 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - Model max length: 4096. If it is the same as stats, then there is a high chance that sequences are being truncated.
|
81 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
82 |
+
[2025-07-13 19:05:23,058][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
83 |
+
[2025-07-13 19:05:23,627][__main__][INFO] - Model need ≈ 14.65 GiB to run inference and 42.44 for training
|
84 |
+
[2025-07-13 19:05:23,683][__main__][INFO] - Loading PEFT model configuration from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
85 |
+
[2025-07-13 19:05:23,683][__main__][INFO] - Base model name: TucanoBR/Tucano-2b4-Instruct
|
86 |
+
[2025-07-13 19:05:23,724][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/config.json
|
87 |
+
[2025-07-13 19:05:23,726][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
88 |
+
"architectures": [
|
89 |
+
"LlamaForCausalLM"
|
90 |
+
],
|
91 |
+
"attention_bias": false,
|
92 |
+
"attention_dropout": 0.0,
|
93 |
+
"bos_token_id": 1,
|
94 |
+
"eos_token_id": 2,
|
95 |
+
"head_dim": 160,
|
96 |
+
"hidden_act": "silu",
|
97 |
+
"hidden_size": 2560,
|
98 |
+
"id2label": {
|
99 |
+
"0": "LABEL_0",
|
100 |
+
"1": "LABEL_1",
|
101 |
+
"2": "LABEL_2",
|
102 |
+
"3": "LABEL_3",
|
103 |
+
"4": "LABEL_4",
|
104 |
+
"5": "LABEL_5"
|
105 |
+
},
|
106 |
+
"initializer_range": 0.02,
|
107 |
+
"intermediate_size": 10240,
|
108 |
+
"label2id": {
|
109 |
+
"LABEL_0": 0,
|
110 |
+
"LABEL_1": 1,
|
111 |
+
"LABEL_2": 2,
|
112 |
+
"LABEL_3": 3,
|
113 |
+
"LABEL_4": 4,
|
114 |
+
"LABEL_5": 5
|
115 |
+
},
|
116 |
+
"max_position_embeddings": 4096,
|
117 |
+
"mlp_bias": false,
|
118 |
+
"model_type": "llama",
|
119 |
+
"num_attention_heads": 16,
|
120 |
+
"num_hidden_layers": 24,
|
121 |
+
"num_key_value_heads": 4,
|
122 |
+
"pad_token_id": 3,
|
123 |
+
"pretraining_tp": 1,
|
124 |
+
"rms_norm_eps": 1e-05,
|
125 |
+
"rope_scaling": null,
|
126 |
+
"rope_theta": 10000.0,
|
127 |
+
"tie_word_embeddings": false,
|
128 |
+
"torch_dtype": "float32",
|
129 |
+
"transformers_version": "4.53.2",
|
130 |
+
"use_cache": false,
|
131 |
+
"vocab_size": 32002
|
132 |
+
}
|
133 |
+
|
134 |
+
[2025-07-13 19:05:23,885][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/model.safetensors.index.json
|
135 |
+
[2025-07-13 19:05:23,886][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
|
136 |
+
[2025-07-13 19:05:23,886][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.float32.
|
137 |
+
[2025-07-13 19:05:23,887][transformers.modeling_utils][WARNING] - Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in LlamaForSequenceClassification is torch.float32. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator, or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`
|
138 |
+
[2025-07-13 19:05:25,484][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at TucanoBR/Tucano-2b4-Instruct were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
139 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
140 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
141 |
+
[2025-07-13 19:05:25,484][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at TucanoBR/Tucano-2b4-Instruct and are newly initialized: ['score.weight']
|
142 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
143 |
+
[2025-07-13 19:05:28,236][__main__][INFO] - Loaded fine-tuned PEFT model from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16
|
144 |
+
[2025-07-13 19:05:28,239][__main__][INFO] - None
|
145 |
+
[2025-07-13 19:05:28,251][transformers.training_args][INFO] - PyTorch: setting up devices
|
146 |
+
[2025-07-13 19:05:28,274][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
147 |
+
[2025-07-13 19:05:28,283][accelerate.utils.other][WARNING] - Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
|
148 |
+
[2025-07-13 19:05:28,284][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
149 |
+
[2025-07-13 19:05:28,307][transformers.trainer][INFO] - Using auto half precision backend
|
150 |
+
[2025-07-13 19:05:28,307][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
151 |
+
[2025-07-13 19:05:28,600][__main__][INFO] - Running inference on test dataset
|
152 |
+
[2025-07-13 19:05:28,602][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id_prompt, essay_year, id, reference, grades, prompt, essay_text, supporting_text. If id_prompt, essay_year, id, reference, grades, prompt, essay_text, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
153 |
+
[2025-07-13 19:05:28,623][transformers.trainer][INFO] -
|
154 |
+
***** Running Prediction *****
|
155 |
+
[2025-07-13 19:05:28,623][transformers.trainer][INFO] - Num examples = 138
|
156 |
+
[2025-07-13 19:05:28,623][transformers.trainer][INFO] - Batch size = 4
|
157 |
+
[2025-07-13 19:05:28,880][transformers.modeling_flash_attention_utils][WARNING] - The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in torch.bfloat16.
|
158 |
+
[2025-07-13 19:05:48,706][__main__][INFO] - Inference results saved to jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r16-tucano_classification_lora-C1-essay_only-r16_inference_results.jsonl
|
159 |
+
[2025-07-13 19:05:48,711][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
|
160 |
+
[2025-07-13 19:07:35,230][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
|
161 |
+
[2025-07-13 19:07:35,230][__main__][INFO] - Bootstrap Confidence Intervals (95%):
|
162 |
+
[2025-07-13 19:07:35,230][__main__][INFO] - QWK: 0.6661 [0.5649, 0.7598]
|
163 |
+
[2025-07-13 19:07:35,230][__main__][INFO] - Macro_F1: 0.4960 [0.3776, 0.6508]
|
164 |
+
[2025-07-13 19:07:35,230][__main__][INFO] - Weighted_F1: 0.6562 [0.5736, 0.7351]
|
165 |
+
[2025-07-13 19:07:35,230][__main__][INFO] - Inference results: {'accuracy': 0.6521739130434783, 'RMSE': 26.811202503993453, 'QWK': 0.6671853119651471, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.4596197967879384, 'Micro_F1': 0.6521739130434783, 'Weighted_F1': 0.6556581591210294, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(8), 'TN_2': np.int64(110), 'FP_2': np.int64(18), 'FN_2': np.int64(2), 'TP_3': np.int64(35), 'TN_3': np.int64(65), 'FP_3': np.int64(7), 'FN_3': np.int64(31), 'TP_4': np.int64(43), 'TN_4': np.int64(68), 'FP_4': np.int64(19), 'FN_4': np.int64(8), 'TP_5': np.int64(4), 'TN_5': np.int64(124), 'FP_5': np.int64(4), 'FN_5': np.int64(6)}
|
166 |
+
[2025-07-13 19:07:35,230][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/.hydra/config.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cache_dir: /tmp/
|
2 |
+
dataset:
|
3 |
+
name: kamel-usp/aes_enem_dataset
|
4 |
+
split: JBCS2025
|
5 |
+
training_params:
|
6 |
+
seed: 42
|
7 |
+
num_train_epochs: 20
|
8 |
+
logging_steps: 100
|
9 |
+
metric_for_best_model: QWK
|
10 |
+
bf16: true
|
11 |
+
bootstrap:
|
12 |
+
enabled: true
|
13 |
+
n_bootstrap: 10000
|
14 |
+
bootstrap_seed: 42
|
15 |
+
metrics:
|
16 |
+
- QWK
|
17 |
+
- Macro_F1
|
18 |
+
- Weighted_F1
|
19 |
+
post_training_results:
|
20 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
21 |
+
experiments:
|
22 |
+
model:
|
23 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
24 |
+
type: tucano_classification_lora
|
25 |
+
num_labels: 6
|
26 |
+
output_dir: ./results/
|
27 |
+
logging_dir: ./logs/
|
28 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
29 |
+
lora_r: 8
|
30 |
+
lora_dropout: 0.05
|
31 |
+
lora_alpha: 16
|
32 |
+
lora_target_modules: all-linear
|
33 |
+
checkpoint_path: ''
|
34 |
+
tokenizer:
|
35 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
36 |
+
dataset:
|
37 |
+
grade_index: 0
|
38 |
+
use_full_context: false
|
39 |
+
training_params:
|
40 |
+
weight_decay: 0.01
|
41 |
+
warmup_ratio: 0.1
|
42 |
+
learning_rate: 5.0e-05
|
43 |
+
train_batch_size: 8
|
44 |
+
eval_batch_size: 4
|
45 |
+
gradient_accumulation_steps: 2
|
46 |
+
gradient_checkpointing: true
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: inference_output/2025-07-13/19-02-42
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
simple:
|
72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
73 |
+
handlers:
|
74 |
+
console:
|
75 |
+
class: logging.StreamHandler
|
76 |
+
formatter: simple
|
77 |
+
stream: ext://sys.stdout
|
78 |
+
root:
|
79 |
+
level: INFO
|
80 |
+
handlers:
|
81 |
+
- console
|
82 |
+
loggers:
|
83 |
+
logging_example:
|
84 |
+
level: DEBUG
|
85 |
+
disable_existing_loggers: false
|
86 |
+
job_logging:
|
87 |
+
version: 1
|
88 |
+
formatters:
|
89 |
+
simple:
|
90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
91 |
+
handlers:
|
92 |
+
console:
|
93 |
+
class: logging.StreamHandler
|
94 |
+
formatter: simple
|
95 |
+
stream: ext://sys.stdout
|
96 |
+
file:
|
97 |
+
class: logging.FileHandler
|
98 |
+
formatter: simple
|
99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
100 |
+
root:
|
101 |
+
level: INFO
|
102 |
+
handlers:
|
103 |
+
- console
|
104 |
+
- file
|
105 |
+
disable_existing_loggers: false
|
106 |
+
env: {}
|
107 |
+
mode: RUN
|
108 |
+
searchpath: []
|
109 |
+
callbacks: {}
|
110 |
+
output_subdir: .hydra
|
111 |
+
overrides:
|
112 |
+
hydra:
|
113 |
+
- hydra.run.dir=inference_output/2025-07-13/19-02-42
|
114 |
+
- hydra.mode=RUN
|
115 |
+
task:
|
116 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
117 |
+
job:
|
118 |
+
name: run_inference_experiment
|
119 |
+
chdir: null
|
120 |
+
override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
121 |
+
id: ???
|
122 |
+
num: ???
|
123 |
+
config_name: config
|
124 |
+
env_set: {}
|
125 |
+
env_copy: []
|
126 |
+
config:
|
127 |
+
override_dirname:
|
128 |
+
kv_sep: '='
|
129 |
+
item_sep: ','
|
130 |
+
exclude_keys: []
|
131 |
+
runtime:
|
132 |
+
version: 1.3.2
|
133 |
+
version_base: '1.1'
|
134 |
+
cwd: /workspace/jbcs2025
|
135 |
+
config_sources:
|
136 |
+
- path: hydra.conf
|
137 |
+
schema: pkg
|
138 |
+
provider: hydra
|
139 |
+
- path: /workspace/jbcs2025/configs
|
140 |
+
schema: file
|
141 |
+
provider: main
|
142 |
+
- path: ''
|
143 |
+
schema: structured
|
144 |
+
provider: schema
|
145 |
+
output_dir: /workspace/jbcs2025/inference_output/2025-07-13/19-02-42
|
146 |
+
choices:
|
147 |
+
experiments: temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
148 |
+
hydra/env: default
|
149 |
+
hydra/callbacks: null
|
150 |
+
hydra/job_logging: default
|
151 |
+
hydra/hydra_logging: default
|
152 |
+
hydra/hydra_help: default
|
153 |
+
hydra/help: default
|
154 |
+
hydra/sweeper: basic
|
155 |
+
hydra/launcher: basic
|
156 |
+
hydra/output: default
|
157 |
+
verbose: false
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/bootstrap_confidence_intervals.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
|
2 |
+
jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8,2025-07-13 19:02:48,0.6507828121779963,0.5448168161247899,0.7481935868380011,0.20337677071321114,0.5021917733557918,0.38358598323817006,0.6558073073196127,0.2722213240814426,0.6419113348079484,0.5607762751113807,0.7198630642668818,0.15908678915550112
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/evaluation_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
2 |
+
0.6376811594202898,27.240208984279956,0.6531573986804902,0.007246376811594235,0.4658552631578948,0.6376811594202898,0.641843058733791,0,137,0,1,0,138,0,0,6,120,8,4,46,56,16,20,29,72,15,22,7,117,11,3,2025-07-13 19:02:48,jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8_inference_results.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8/run_inference_experiment.log
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2025-07-13 19:02:48,480][__main__][INFO] - Starting inference experiment
|
2 |
+
[2025-07-13 19:02:48,481][__main__][INFO] - cache_dir: /tmp/
|
3 |
+
dataset:
|
4 |
+
name: kamel-usp/aes_enem_dataset
|
5 |
+
split: JBCS2025
|
6 |
+
training_params:
|
7 |
+
seed: 42
|
8 |
+
num_train_epochs: 20
|
9 |
+
logging_steps: 100
|
10 |
+
metric_for_best_model: QWK
|
11 |
+
bf16: true
|
12 |
+
bootstrap:
|
13 |
+
enabled: true
|
14 |
+
n_bootstrap: 10000
|
15 |
+
bootstrap_seed: 42
|
16 |
+
metrics:
|
17 |
+
- QWK
|
18 |
+
- Macro_F1
|
19 |
+
- Weighted_F1
|
20 |
+
post_training_results:
|
21 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
22 |
+
experiments:
|
23 |
+
model:
|
24 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
25 |
+
type: tucano_classification_lora
|
26 |
+
num_labels: 6
|
27 |
+
output_dir: ./results/
|
28 |
+
logging_dir: ./logs/
|
29 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
30 |
+
lora_r: 8
|
31 |
+
lora_dropout: 0.05
|
32 |
+
lora_alpha: 16
|
33 |
+
lora_target_modules: all-linear
|
34 |
+
checkpoint_path: ''
|
35 |
+
tokenizer:
|
36 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
37 |
+
dataset:
|
38 |
+
grade_index: 0
|
39 |
+
use_full_context: false
|
40 |
+
training_params:
|
41 |
+
weight_decay: 0.01
|
42 |
+
warmup_ratio: 0.1
|
43 |
+
learning_rate: 5.0e-05
|
44 |
+
train_batch_size: 8
|
45 |
+
eval_batch_size: 4
|
46 |
+
gradient_accumulation_steps: 2
|
47 |
+
gradient_checkpointing: true
|
48 |
+
|
49 |
+
[2025-07-13 19:02:48,483][__main__][INFO] - Running inference with fine-tuned HF model
|
50 |
+
[2025-07-13 19:02:51,357][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
51 |
+
[2025-07-13 19:02:51,357][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer.json
|
52 |
+
[2025-07-13 19:02:51,357][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
53 |
+
[2025-07-13 19:02:51,357][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/special_tokens_map.json
|
54 |
+
[2025-07-13 19:02:51,357][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer_config.json
|
55 |
+
[2025-07-13 19:02:51,357][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
56 |
+
[2025-07-13 19:02:51,405][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
57 |
+
[2025-07-13 19:02:51,477][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False; Use Full Context: False
|
58 |
+
[2025-07-13 19:02:53,323][__main__][INFO] -
|
59 |
+
Token statistics for 'train' split:
|
60 |
+
[2025-07-13 19:02:53,323][__main__][INFO] - Total examples: 500
|
61 |
+
[2025-07-13 19:02:53,323][__main__][INFO] - Min tokens: 1878
|
62 |
+
[2025-07-13 19:02:53,323][__main__][INFO] - Max tokens: 1878
|
63 |
+
[2025-07-13 19:02:53,324][__main__][INFO] - Avg tokens: 1878.00
|
64 |
+
[2025-07-13 19:02:53,324][__main__][INFO] - Std tokens: 0.00
|
65 |
+
[2025-07-13 19:02:53,566][__main__][INFO] -
|
66 |
+
Token statistics for 'validation' split:
|
67 |
+
[2025-07-13 19:02:53,566][__main__][INFO] - Total examples: 132
|
68 |
+
[2025-07-13 19:02:53,566][__main__][INFO] - Min tokens: 1620
|
69 |
+
[2025-07-13 19:02:53,566][__main__][INFO] - Max tokens: 1620
|
70 |
+
[2025-07-13 19:02:53,566][__main__][INFO] - Avg tokens: 1620.00
|
71 |
+
[2025-07-13 19:02:53,566][__main__][INFO] - Std tokens: 0.00
|
72 |
+
[2025-07-13 19:02:53,828][__main__][INFO] -
|
73 |
+
Token statistics for 'test' split:
|
74 |
+
[2025-07-13 19:02:53,828][__main__][INFO] - Total examples: 138
|
75 |
+
[2025-07-13 19:02:53,828][__main__][INFO] - Min tokens: 1673
|
76 |
+
[2025-07-13 19:02:53,828][__main__][INFO] - Max tokens: 1673
|
77 |
+
[2025-07-13 19:02:53,828][__main__][INFO] - Avg tokens: 1673.00
|
78 |
+
[2025-07-13 19:02:53,828][__main__][INFO] - Std tokens: 0.00
|
79 |
+
[2025-07-13 19:02:53,829][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
|
80 |
+
[2025-07-13 19:02:53,829][__main__][INFO] - Model max length: 4096. If it is the same as stats, then there is a high chance that sequences are being truncated.
|
81 |
+
[2025-07-13 19:02:53,829][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
82 |
+
[2025-07-13 19:02:53,829][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
83 |
+
[2025-07-13 19:02:54,562][__main__][INFO] - Model need ≈ 14.53 GiB to run inference and 42.09 for training
|
84 |
+
[2025-07-13 19:02:54,620][__main__][INFO] - Loading PEFT model configuration from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
85 |
+
[2025-07-13 19:02:54,620][__main__][INFO] - Base model name: TucanoBR/Tucano-2b4-Instruct
|
86 |
+
[2025-07-13 19:02:54,732][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/config.json
|
87 |
+
[2025-07-13 19:02:54,734][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
88 |
+
"architectures": [
|
89 |
+
"LlamaForCausalLM"
|
90 |
+
],
|
91 |
+
"attention_bias": false,
|
92 |
+
"attention_dropout": 0.0,
|
93 |
+
"bos_token_id": 1,
|
94 |
+
"eos_token_id": 2,
|
95 |
+
"head_dim": 160,
|
96 |
+
"hidden_act": "silu",
|
97 |
+
"hidden_size": 2560,
|
98 |
+
"id2label": {
|
99 |
+
"0": "LABEL_0",
|
100 |
+
"1": "LABEL_1",
|
101 |
+
"2": "LABEL_2",
|
102 |
+
"3": "LABEL_3",
|
103 |
+
"4": "LABEL_4",
|
104 |
+
"5": "LABEL_5"
|
105 |
+
},
|
106 |
+
"initializer_range": 0.02,
|
107 |
+
"intermediate_size": 10240,
|
108 |
+
"label2id": {
|
109 |
+
"LABEL_0": 0,
|
110 |
+
"LABEL_1": 1,
|
111 |
+
"LABEL_2": 2,
|
112 |
+
"LABEL_3": 3,
|
113 |
+
"LABEL_4": 4,
|
114 |
+
"LABEL_5": 5
|
115 |
+
},
|
116 |
+
"max_position_embeddings": 4096,
|
117 |
+
"mlp_bias": false,
|
118 |
+
"model_type": "llama",
|
119 |
+
"num_attention_heads": 16,
|
120 |
+
"num_hidden_layers": 24,
|
121 |
+
"num_key_value_heads": 4,
|
122 |
+
"pad_token_id": 3,
|
123 |
+
"pretraining_tp": 1,
|
124 |
+
"rms_norm_eps": 1e-05,
|
125 |
+
"rope_scaling": null,
|
126 |
+
"rope_theta": 10000.0,
|
127 |
+
"tie_word_embeddings": false,
|
128 |
+
"torch_dtype": "float32",
|
129 |
+
"transformers_version": "4.53.2",
|
130 |
+
"use_cache": false,
|
131 |
+
"vocab_size": 32002
|
132 |
+
}
|
133 |
+
|
134 |
+
[2025-07-13 19:02:55,093][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/model.safetensors.index.json
|
135 |
+
[2025-07-13 19:02:58,720][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
|
136 |
+
[2025-07-13 19:02:58,720][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.float32.
|
137 |
+
[2025-07-13 19:02:58,721][transformers.modeling_utils][WARNING] - Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in LlamaForSequenceClassification is torch.float32. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator, or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`
|
138 |
+
[2025-07-13 19:03:00,188][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at TucanoBR/Tucano-2b4-Instruct were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
139 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
140 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
141 |
+
[2025-07-13 19:03:00,188][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at TucanoBR/Tucano-2b4-Instruct and are newly initialized: ['score.weight']
|
142 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
143 |
+
[2025-07-13 19:03:01,711][__main__][INFO] - Loaded fine-tuned PEFT model from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8
|
144 |
+
[2025-07-13 19:03:01,713][__main__][INFO] - None
|
145 |
+
[2025-07-13 19:03:01,726][transformers.training_args][INFO] - PyTorch: setting up devices
|
146 |
+
[2025-07-13 19:03:01,755][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
147 |
+
[2025-07-13 19:03:01,774][accelerate.utils.other][WARNING] - Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
|
148 |
+
[2025-07-13 19:03:01,774][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
149 |
+
[2025-07-13 19:03:01,867][transformers.trainer][INFO] - Using auto half precision backend
|
150 |
+
[2025-07-13 19:03:01,868][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
151 |
+
[2025-07-13 19:03:05,171][__main__][INFO] - Running inference on test dataset
|
152 |
+
[2025-07-13 19:03:05,173][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_text, id_prompt, id, grades, essay_year, reference, supporting_text, prompt. If essay_text, id_prompt, id, grades, essay_year, reference, supporting_text, prompt are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
153 |
+
[2025-07-13 19:03:05,193][transformers.trainer][INFO] -
|
154 |
+
***** Running Prediction *****
|
155 |
+
[2025-07-13 19:03:05,193][transformers.trainer][INFO] - Num examples = 138
|
156 |
+
[2025-07-13 19:03:05,193][transformers.trainer][INFO] - Batch size = 4
|
157 |
+
[2025-07-13 19:03:05,456][transformers.modeling_flash_attention_utils][WARNING] - The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in torch.bfloat16.
|
158 |
+
[2025-07-13 19:03:25,314][__main__][INFO] - Inference results saved to jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-essay_only-r8-tucano_classification_lora-C1-essay_only-r8_inference_results.jsonl
|
159 |
+
[2025-07-13 19:03:25,327][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
|
160 |
+
[2025-07-13 19:05:11,669][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
|
161 |
+
[2025-07-13 19:05:11,669][__main__][INFO] - Bootstrap Confidence Intervals (95%):
|
162 |
+
[2025-07-13 19:05:11,669][__main__][INFO] - QWK: 0.6508 [0.5448, 0.7482]
|
163 |
+
[2025-07-13 19:05:11,669][__main__][INFO] - Macro_F1: 0.5022 [0.3836, 0.6558]
|
164 |
+
[2025-07-13 19:05:11,669][__main__][INFO] - Weighted_F1: 0.6419 [0.5608, 0.7199]
|
165 |
+
[2025-07-13 19:05:11,669][__main__][INFO] - Inference results: {'accuracy': 0.6376811594202898, 'RMSE': 27.240208984279956, 'QWK': 0.6531573986804902, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.4658552631578948, 'Micro_F1': 0.6376811594202898, 'Weighted_F1': 0.641843058733791, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(6), 'TN_2': np.int64(120), 'FP_2': np.int64(8), 'FN_2': np.int64(4), 'TP_3': np.int64(46), 'TN_3': np.int64(56), 'FP_3': np.int64(16), 'FN_3': np.int64(20), 'TP_4': np.int64(29), 'TN_4': np.int64(72), 'FP_4': np.int64(15), 'FN_4': np.int64(22), 'TP_5': np.int64(7), 'TN_5': np.int64(117), 'FP_5': np.int64(11), 'FN_5': np.int64(3)}
|
166 |
+
[2025-07-13 19:05:11,669][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/.hydra/config.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cache_dir: /tmp/
|
2 |
+
dataset:
|
3 |
+
name: kamel-usp/aes_enem_dataset
|
4 |
+
split: JBCS2025
|
5 |
+
training_params:
|
6 |
+
seed: 42
|
7 |
+
num_train_epochs: 20
|
8 |
+
logging_steps: 100
|
9 |
+
metric_for_best_model: QWK
|
10 |
+
bf16: true
|
11 |
+
bootstrap:
|
12 |
+
enabled: true
|
13 |
+
n_bootstrap: 10000
|
14 |
+
bootstrap_seed: 42
|
15 |
+
metrics:
|
16 |
+
- QWK
|
17 |
+
- Macro_F1
|
18 |
+
- Weighted_F1
|
19 |
+
post_training_results:
|
20 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
21 |
+
experiments:
|
22 |
+
model:
|
23 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
24 |
+
type: tucano_classification_lora
|
25 |
+
num_labels: 6
|
26 |
+
output_dir: ./results/
|
27 |
+
logging_dir: ./logs/
|
28 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
29 |
+
lora_r: 16
|
30 |
+
lora_dropout: 0.1
|
31 |
+
lora_alpha: 32
|
32 |
+
lora_target_modules: all-linear
|
33 |
+
checkpoint_path: ''
|
34 |
+
tokenizer:
|
35 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
36 |
+
dataset:
|
37 |
+
grade_index: 0
|
38 |
+
use_full_context: true
|
39 |
+
training_params:
|
40 |
+
weight_decay: 0.01
|
41 |
+
warmup_ratio: 0.1
|
42 |
+
learning_rate: 5.0e-05
|
43 |
+
train_batch_size: 8
|
44 |
+
eval_batch_size: 4
|
45 |
+
gradient_accumulation_steps: 2
|
46 |
+
gradient_checkpointing: true
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: inference_output/2025-07-13/19-10-18
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
simple:
|
72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
73 |
+
handlers:
|
74 |
+
console:
|
75 |
+
class: logging.StreamHandler
|
76 |
+
formatter: simple
|
77 |
+
stream: ext://sys.stdout
|
78 |
+
root:
|
79 |
+
level: INFO
|
80 |
+
handlers:
|
81 |
+
- console
|
82 |
+
loggers:
|
83 |
+
logging_example:
|
84 |
+
level: DEBUG
|
85 |
+
disable_existing_loggers: false
|
86 |
+
job_logging:
|
87 |
+
version: 1
|
88 |
+
formatters:
|
89 |
+
simple:
|
90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
91 |
+
handlers:
|
92 |
+
console:
|
93 |
+
class: logging.StreamHandler
|
94 |
+
formatter: simple
|
95 |
+
stream: ext://sys.stdout
|
96 |
+
file:
|
97 |
+
class: logging.FileHandler
|
98 |
+
formatter: simple
|
99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
100 |
+
root:
|
101 |
+
level: INFO
|
102 |
+
handlers:
|
103 |
+
- console
|
104 |
+
- file
|
105 |
+
disable_existing_loggers: false
|
106 |
+
env: {}
|
107 |
+
mode: RUN
|
108 |
+
searchpath: []
|
109 |
+
callbacks: {}
|
110 |
+
output_subdir: .hydra
|
111 |
+
overrides:
|
112 |
+
hydra:
|
113 |
+
- hydra.run.dir=inference_output/2025-07-13/19-10-18
|
114 |
+
- hydra.mode=RUN
|
115 |
+
task:
|
116 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
117 |
+
job:
|
118 |
+
name: run_inference_experiment
|
119 |
+
chdir: null
|
120 |
+
override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
121 |
+
id: ???
|
122 |
+
num: ???
|
123 |
+
config_name: config
|
124 |
+
env_set: {}
|
125 |
+
env_copy: []
|
126 |
+
config:
|
127 |
+
override_dirname:
|
128 |
+
kv_sep: '='
|
129 |
+
item_sep: ','
|
130 |
+
exclude_keys: []
|
131 |
+
runtime:
|
132 |
+
version: 1.3.2
|
133 |
+
version_base: '1.1'
|
134 |
+
cwd: /workspace/jbcs2025
|
135 |
+
config_sources:
|
136 |
+
- path: hydra.conf
|
137 |
+
schema: pkg
|
138 |
+
provider: hydra
|
139 |
+
- path: /workspace/jbcs2025/configs
|
140 |
+
schema: file
|
141 |
+
provider: main
|
142 |
+
- path: ''
|
143 |
+
schema: structured
|
144 |
+
provider: schema
|
145 |
+
output_dir: /workspace/jbcs2025/inference_output/2025-07-13/19-10-18
|
146 |
+
choices:
|
147 |
+
experiments: temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
148 |
+
hydra/env: default
|
149 |
+
hydra/callbacks: null
|
150 |
+
hydra/job_logging: default
|
151 |
+
hydra/hydra_logging: default
|
152 |
+
hydra/hydra_help: default
|
153 |
+
hydra/help: default
|
154 |
+
hydra/sweeper: basic
|
155 |
+
hydra/launcher: basic
|
156 |
+
hydra/output: default
|
157 |
+
verbose: false
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/bootstrap_confidence_intervals.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
|
2 |
+
jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16,2025-07-13 19:10:23,0.5374147344079784,0.42826449121883836,0.6486915411102406,0.22042704989140227,0.39899885708948146,0.3116464511412663,0.5077752976328532,0.1961288464915869,0.5897030082205146,0.5029569255639786,0.6727103842494871,0.1697534586855085
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/evaluation_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
2 |
+
0.5942028985507246,31.576913232223355,0.5379943942696979,0.021739130434782594,0.3995024077046549,0.5942028985507246,0.589919045292763,0,135,2,1,0,138,0,0,7,117,11,3,48,46,26,18,25,74,13,26,2,124,4,8,2025-07-13 19:10:23,jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16_inference_results.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16/run_inference_experiment.log
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2025-07-13 19:10:23,944][__main__][INFO] - Starting inference experiment
|
2 |
+
[2025-07-13 19:10:23,946][__main__][INFO] - cache_dir: /tmp/
|
3 |
+
dataset:
|
4 |
+
name: kamel-usp/aes_enem_dataset
|
5 |
+
split: JBCS2025
|
6 |
+
training_params:
|
7 |
+
seed: 42
|
8 |
+
num_train_epochs: 20
|
9 |
+
logging_steps: 100
|
10 |
+
metric_for_best_model: QWK
|
11 |
+
bf16: true
|
12 |
+
bootstrap:
|
13 |
+
enabled: true
|
14 |
+
n_bootstrap: 10000
|
15 |
+
bootstrap_seed: 42
|
16 |
+
metrics:
|
17 |
+
- QWK
|
18 |
+
- Macro_F1
|
19 |
+
- Weighted_F1
|
20 |
+
post_training_results:
|
21 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
22 |
+
experiments:
|
23 |
+
model:
|
24 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
25 |
+
type: tucano_classification_lora
|
26 |
+
num_labels: 6
|
27 |
+
output_dir: ./results/
|
28 |
+
logging_dir: ./logs/
|
29 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
30 |
+
lora_r: 16
|
31 |
+
lora_dropout: 0.1
|
32 |
+
lora_alpha: 32
|
33 |
+
lora_target_modules: all-linear
|
34 |
+
checkpoint_path: ''
|
35 |
+
tokenizer:
|
36 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
37 |
+
dataset:
|
38 |
+
grade_index: 0
|
39 |
+
use_full_context: true
|
40 |
+
training_params:
|
41 |
+
weight_decay: 0.01
|
42 |
+
warmup_ratio: 0.1
|
43 |
+
learning_rate: 5.0e-05
|
44 |
+
train_batch_size: 8
|
45 |
+
eval_batch_size: 4
|
46 |
+
gradient_accumulation_steps: 2
|
47 |
+
gradient_checkpointing: true
|
48 |
+
|
49 |
+
[2025-07-13 19:10:23,948][__main__][INFO] - Running inference with fine-tuned HF model
|
50 |
+
[2025-07-13 19:10:25,230][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
51 |
+
[2025-07-13 19:10:25,230][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer.json
|
52 |
+
[2025-07-13 19:10:25,230][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
53 |
+
[2025-07-13 19:10:25,230][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/special_tokens_map.json
|
54 |
+
[2025-07-13 19:10:25,230][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer_config.json
|
55 |
+
[2025-07-13 19:10:25,230][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
56 |
+
[2025-07-13 19:10:25,278][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
57 |
+
[2025-07-13 19:10:25,286][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False; Use Full Context: True
|
58 |
+
[2025-07-13 19:10:27,036][__main__][INFO] -
|
59 |
+
Token statistics for 'train' split:
|
60 |
+
[2025-07-13 19:10:27,036][__main__][INFO] - Total examples: 500
|
61 |
+
[2025-07-13 19:10:27,036][__main__][INFO] - Min tokens: 2685
|
62 |
+
[2025-07-13 19:10:27,037][__main__][INFO] - Max tokens: 2685
|
63 |
+
[2025-07-13 19:10:27,037][__main__][INFO] - Avg tokens: 2685.00
|
64 |
+
[2025-07-13 19:10:27,037][__main__][INFO] - Std tokens: 0.00
|
65 |
+
[2025-07-13 19:10:27,461][__main__][INFO] -
|
66 |
+
Token statistics for 'validation' split:
|
67 |
+
[2025-07-13 19:10:27,461][__main__][INFO] - Total examples: 132
|
68 |
+
[2025-07-13 19:10:27,461][__main__][INFO] - Min tokens: 2887
|
69 |
+
[2025-07-13 19:10:27,461][__main__][INFO] - Max tokens: 2887
|
70 |
+
[2025-07-13 19:10:27,461][__main__][INFO] - Avg tokens: 2887.00
|
71 |
+
[2025-07-13 19:10:27,461][__main__][INFO] - Std tokens: 0.00
|
72 |
+
[2025-07-13 19:10:27,912][__main__][INFO] -
|
73 |
+
Token statistics for 'test' split:
|
74 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - Total examples: 138
|
75 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - Min tokens: 2910
|
76 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - Max tokens: 2910
|
77 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - Avg tokens: 2910.00
|
78 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - Std tokens: 0.00
|
79 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
|
80 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - Model max length: 4096. If it is the same as stats, then there is a high chance that sequences are being truncated.
|
81 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
82 |
+
[2025-07-13 19:10:27,912][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
83 |
+
[2025-07-13 19:10:28,832][__main__][INFO] - Model need ≈ 14.65 GiB to run inference and 42.44 for training
|
84 |
+
[2025-07-13 19:10:28,916][__main__][INFO] - Loading PEFT model configuration from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
85 |
+
[2025-07-13 19:10:28,916][__main__][INFO] - Base model name: TucanoBR/Tucano-2b4-Instruct
|
86 |
+
[2025-07-13 19:10:28,953][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/config.json
|
87 |
+
[2025-07-13 19:10:28,955][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
88 |
+
"architectures": [
|
89 |
+
"LlamaForCausalLM"
|
90 |
+
],
|
91 |
+
"attention_bias": false,
|
92 |
+
"attention_dropout": 0.0,
|
93 |
+
"bos_token_id": 1,
|
94 |
+
"eos_token_id": 2,
|
95 |
+
"head_dim": 160,
|
96 |
+
"hidden_act": "silu",
|
97 |
+
"hidden_size": 2560,
|
98 |
+
"id2label": {
|
99 |
+
"0": "LABEL_0",
|
100 |
+
"1": "LABEL_1",
|
101 |
+
"2": "LABEL_2",
|
102 |
+
"3": "LABEL_3",
|
103 |
+
"4": "LABEL_4",
|
104 |
+
"5": "LABEL_5"
|
105 |
+
},
|
106 |
+
"initializer_range": 0.02,
|
107 |
+
"intermediate_size": 10240,
|
108 |
+
"label2id": {
|
109 |
+
"LABEL_0": 0,
|
110 |
+
"LABEL_1": 1,
|
111 |
+
"LABEL_2": 2,
|
112 |
+
"LABEL_3": 3,
|
113 |
+
"LABEL_4": 4,
|
114 |
+
"LABEL_5": 5
|
115 |
+
},
|
116 |
+
"max_position_embeddings": 4096,
|
117 |
+
"mlp_bias": false,
|
118 |
+
"model_type": "llama",
|
119 |
+
"num_attention_heads": 16,
|
120 |
+
"num_hidden_layers": 24,
|
121 |
+
"num_key_value_heads": 4,
|
122 |
+
"pad_token_id": 3,
|
123 |
+
"pretraining_tp": 1,
|
124 |
+
"rms_norm_eps": 1e-05,
|
125 |
+
"rope_scaling": null,
|
126 |
+
"rope_theta": 10000.0,
|
127 |
+
"tie_word_embeddings": false,
|
128 |
+
"torch_dtype": "float32",
|
129 |
+
"transformers_version": "4.53.2",
|
130 |
+
"use_cache": false,
|
131 |
+
"vocab_size": 32002
|
132 |
+
}
|
133 |
+
|
134 |
+
[2025-07-13 19:10:29,115][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/model.safetensors.index.json
|
135 |
+
[2025-07-13 19:10:29,115][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
|
136 |
+
[2025-07-13 19:10:29,115][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.float32.
|
137 |
+
[2025-07-13 19:10:29,117][transformers.modeling_utils][WARNING] - Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in LlamaForSequenceClassification is torch.float32. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator, or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`
|
138 |
+
[2025-07-13 19:10:30,587][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at TucanoBR/Tucano-2b4-Instruct were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
139 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
140 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
141 |
+
[2025-07-13 19:10:30,587][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at TucanoBR/Tucano-2b4-Instruct and are newly initialized: ['score.weight']
|
142 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
143 |
+
[2025-07-13 19:10:32,995][__main__][INFO] - Loaded fine-tuned PEFT model from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16
|
144 |
+
[2025-07-13 19:10:32,997][__main__][INFO] - None
|
145 |
+
[2025-07-13 19:10:33,010][transformers.training_args][INFO] - PyTorch: setting up devices
|
146 |
+
[2025-07-13 19:10:33,035][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
147 |
+
[2025-07-13 19:10:33,044][accelerate.utils.other][WARNING] - Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
|
148 |
+
[2025-07-13 19:10:33,044][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
149 |
+
[2025-07-13 19:10:33,067][transformers.trainer][INFO] - Using auto half precision backend
|
150 |
+
[2025-07-13 19:10:33,067][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
151 |
+
[2025-07-13 19:10:36,364][__main__][INFO] - Running inference on test dataset
|
152 |
+
[2025-07-13 19:10:36,366][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: id, id_prompt, prompt, essay_text, essay_year, supporting_text, reference, grades. If id, id_prompt, prompt, essay_text, essay_year, supporting_text, reference, grades are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
153 |
+
[2025-07-13 19:10:36,386][transformers.trainer][INFO] -
|
154 |
+
***** Running Prediction *****
|
155 |
+
[2025-07-13 19:10:36,386][transformers.trainer][INFO] - Num examples = 138
|
156 |
+
[2025-07-13 19:10:36,386][transformers.trainer][INFO] - Batch size = 4
|
157 |
+
[2025-07-13 19:10:36,657][transformers.modeling_flash_attention_utils][WARNING] - The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in torch.bfloat16.
|
158 |
+
[2025-07-13 19:11:10,065][__main__][INFO] - Inference results saved to jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r16-tucano_classification_lora-C1-full_context-r16_inference_results.jsonl
|
159 |
+
[2025-07-13 19:11:10,071][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
|
160 |
+
[2025-07-13 19:12:55,261][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
|
161 |
+
[2025-07-13 19:12:55,261][__main__][INFO] - Bootstrap Confidence Intervals (95%):
|
162 |
+
[2025-07-13 19:12:55,261][__main__][INFO] - QWK: 0.5374 [0.4283, 0.6487]
|
163 |
+
[2025-07-13 19:12:55,261][__main__][INFO] - Macro_F1: 0.3990 [0.3116, 0.5078]
|
164 |
+
[2025-07-13 19:12:55,261][__main__][INFO] - Weighted_F1: 0.5897 [0.5030, 0.6727]
|
165 |
+
[2025-07-13 19:12:55,261][__main__][INFO] - Inference results: {'accuracy': 0.5942028985507246, 'RMSE': 31.576913232223355, 'QWK': 0.5379943942696979, 'HDIV': 0.021739130434782594, 'Macro_F1': 0.3995024077046549, 'Micro_F1': 0.5942028985507246, 'Weighted_F1': 0.589919045292763, 'TP_0': np.int64(0), 'TN_0': np.int64(135), 'FP_0': np.int64(2), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(7), 'TN_2': np.int64(117), 'FP_2': np.int64(11), 'FN_2': np.int64(3), 'TP_3': np.int64(48), 'TN_3': np.int64(46), 'FP_3': np.int64(26), 'FN_3': np.int64(18), 'TP_4': np.int64(25), 'TN_4': np.int64(74), 'FP_4': np.int64(13), 'FN_4': np.int64(26), 'TP_5': np.int64(2), 'TN_5': np.int64(124), 'FP_5': np.int64(4), 'FN_5': np.int64(8)}
|
166 |
+
[2025-07-13 19:12:55,261][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/.hydra/config.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cache_dir: /tmp/
|
2 |
+
dataset:
|
3 |
+
name: kamel-usp/aes_enem_dataset
|
4 |
+
split: JBCS2025
|
5 |
+
training_params:
|
6 |
+
seed: 42
|
7 |
+
num_train_epochs: 20
|
8 |
+
logging_steps: 100
|
9 |
+
metric_for_best_model: QWK
|
10 |
+
bf16: true
|
11 |
+
bootstrap:
|
12 |
+
enabled: true
|
13 |
+
n_bootstrap: 10000
|
14 |
+
bootstrap_seed: 42
|
15 |
+
metrics:
|
16 |
+
- QWK
|
17 |
+
- Macro_F1
|
18 |
+
- Weighted_F1
|
19 |
+
post_training_results:
|
20 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
21 |
+
experiments:
|
22 |
+
model:
|
23 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
24 |
+
type: tucano_classification_lora
|
25 |
+
num_labels: 6
|
26 |
+
output_dir: ./results/
|
27 |
+
logging_dir: ./logs/
|
28 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
29 |
+
lora_r: 8
|
30 |
+
lora_dropout: 0.05
|
31 |
+
lora_alpha: 16
|
32 |
+
lora_target_modules: all-linear
|
33 |
+
checkpoint_path: ''
|
34 |
+
tokenizer:
|
35 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
36 |
+
dataset:
|
37 |
+
grade_index: 0
|
38 |
+
use_full_context: true
|
39 |
+
training_params:
|
40 |
+
weight_decay: 0.01
|
41 |
+
warmup_ratio: 0.1
|
42 |
+
learning_rate: 5.0e-05
|
43 |
+
train_batch_size: 8
|
44 |
+
eval_batch_size: 4
|
45 |
+
gradient_accumulation_steps: 2
|
46 |
+
gradient_checkpointing: true
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: inference_output/2025-07-13/19-07-39
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
simple:
|
72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
73 |
+
handlers:
|
74 |
+
console:
|
75 |
+
class: logging.StreamHandler
|
76 |
+
formatter: simple
|
77 |
+
stream: ext://sys.stdout
|
78 |
+
root:
|
79 |
+
level: INFO
|
80 |
+
handlers:
|
81 |
+
- console
|
82 |
+
loggers:
|
83 |
+
logging_example:
|
84 |
+
level: DEBUG
|
85 |
+
disable_existing_loggers: false
|
86 |
+
job_logging:
|
87 |
+
version: 1
|
88 |
+
formatters:
|
89 |
+
simple:
|
90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
91 |
+
handlers:
|
92 |
+
console:
|
93 |
+
class: logging.StreamHandler
|
94 |
+
formatter: simple
|
95 |
+
stream: ext://sys.stdout
|
96 |
+
file:
|
97 |
+
class: logging.FileHandler
|
98 |
+
formatter: simple
|
99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
100 |
+
root:
|
101 |
+
level: INFO
|
102 |
+
handlers:
|
103 |
+
- console
|
104 |
+
- file
|
105 |
+
disable_existing_loggers: false
|
106 |
+
env: {}
|
107 |
+
mode: RUN
|
108 |
+
searchpath: []
|
109 |
+
callbacks: {}
|
110 |
+
output_subdir: .hydra
|
111 |
+
overrides:
|
112 |
+
hydra:
|
113 |
+
- hydra.run.dir=inference_output/2025-07-13/19-07-39
|
114 |
+
- hydra.mode=RUN
|
115 |
+
task:
|
116 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
117 |
+
job:
|
118 |
+
name: run_inference_experiment
|
119 |
+
chdir: null
|
120 |
+
override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
121 |
+
id: ???
|
122 |
+
num: ???
|
123 |
+
config_name: config
|
124 |
+
env_set: {}
|
125 |
+
env_copy: []
|
126 |
+
config:
|
127 |
+
override_dirname:
|
128 |
+
kv_sep: '='
|
129 |
+
item_sep: ','
|
130 |
+
exclude_keys: []
|
131 |
+
runtime:
|
132 |
+
version: 1.3.2
|
133 |
+
version_base: '1.1'
|
134 |
+
cwd: /workspace/jbcs2025
|
135 |
+
config_sources:
|
136 |
+
- path: hydra.conf
|
137 |
+
schema: pkg
|
138 |
+
provider: hydra
|
139 |
+
- path: /workspace/jbcs2025/configs
|
140 |
+
schema: file
|
141 |
+
provider: main
|
142 |
+
- path: ''
|
143 |
+
schema: structured
|
144 |
+
provider: schema
|
145 |
+
output_dir: /workspace/jbcs2025/inference_output/2025-07-13/19-07-39
|
146 |
+
choices:
|
147 |
+
experiments: temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
148 |
+
hydra/env: default
|
149 |
+
hydra/callbacks: null
|
150 |
+
hydra/job_logging: default
|
151 |
+
hydra/hydra_logging: default
|
152 |
+
hydra/hydra_help: default
|
153 |
+
hydra/help: default
|
154 |
+
hydra/sweeper: basic
|
155 |
+
hydra/launcher: basic
|
156 |
+
hydra/output: default
|
157 |
+
verbose: false
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/bootstrap_confidence_intervals.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
|
2 |
+
jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8,2025-07-13 19:07:44,0.5629665177856308,0.45023226528655275,0.6661682802919398,0.215936015005387,0.4127433561801439,0.30884451396084633,0.5475081624419281,0.2386636484810818,0.558846735395311,0.4730360557696718,0.6439182617161876,0.17088220594651582
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/evaluation_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
2 |
+
0.5579710144927537,31.20757990421976,0.5655172413793104,0.007246376811594235,0.3825386450876646,0.5579710144927537,0.5584592935402825,0,137,0,1,0,138,0,0,7,115,13,3,47,49,23,19,19,76,11,32,4,114,14,6,2025-07-13 19:07:44,jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8_inference_results.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8/run_inference_experiment.log
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2025-07-13 19:07:44,364][__main__][INFO] - Starting inference experiment
|
2 |
+
[2025-07-13 19:07:44,366][__main__][INFO] - cache_dir: /tmp/
|
3 |
+
dataset:
|
4 |
+
name: kamel-usp/aes_enem_dataset
|
5 |
+
split: JBCS2025
|
6 |
+
training_params:
|
7 |
+
seed: 42
|
8 |
+
num_train_epochs: 20
|
9 |
+
logging_steps: 100
|
10 |
+
metric_for_best_model: QWK
|
11 |
+
bf16: true
|
12 |
+
bootstrap:
|
13 |
+
enabled: true
|
14 |
+
n_bootstrap: 10000
|
15 |
+
bootstrap_seed: 42
|
16 |
+
metrics:
|
17 |
+
- QWK
|
18 |
+
- Macro_F1
|
19 |
+
- Weighted_F1
|
20 |
+
post_training_results:
|
21 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
22 |
+
experiments:
|
23 |
+
model:
|
24 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
25 |
+
type: tucano_classification_lora
|
26 |
+
num_labels: 6
|
27 |
+
output_dir: ./results/
|
28 |
+
logging_dir: ./logs/
|
29 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
30 |
+
lora_r: 8
|
31 |
+
lora_dropout: 0.05
|
32 |
+
lora_alpha: 16
|
33 |
+
lora_target_modules: all-linear
|
34 |
+
checkpoint_path: ''
|
35 |
+
tokenizer:
|
36 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
37 |
+
dataset:
|
38 |
+
grade_index: 0
|
39 |
+
use_full_context: true
|
40 |
+
training_params:
|
41 |
+
weight_decay: 0.01
|
42 |
+
warmup_ratio: 0.1
|
43 |
+
learning_rate: 5.0e-05
|
44 |
+
train_batch_size: 8
|
45 |
+
eval_batch_size: 4
|
46 |
+
gradient_accumulation_steps: 2
|
47 |
+
gradient_checkpointing: true
|
48 |
+
|
49 |
+
[2025-07-13 19:07:44,368][__main__][INFO] - Running inference with fine-tuned HF model
|
50 |
+
[2025-07-13 19:07:45,523][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
51 |
+
[2025-07-13 19:07:45,523][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer.json
|
52 |
+
[2025-07-13 19:07:45,523][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
53 |
+
[2025-07-13 19:07:45,523][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/special_tokens_map.json
|
54 |
+
[2025-07-13 19:07:45,523][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer_config.json
|
55 |
+
[2025-07-13 19:07:45,523][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
56 |
+
[2025-07-13 19:07:45,573][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
57 |
+
[2025-07-13 19:07:45,581][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False; Use Full Context: True
|
58 |
+
[2025-07-13 19:07:48,329][__main__][INFO] -
|
59 |
+
Token statistics for 'train' split:
|
60 |
+
[2025-07-13 19:07:48,329][__main__][INFO] - Total examples: 500
|
61 |
+
[2025-07-13 19:07:48,329][__main__][INFO] - Min tokens: 2685
|
62 |
+
[2025-07-13 19:07:48,329][__main__][INFO] - Max tokens: 2685
|
63 |
+
[2025-07-13 19:07:48,329][__main__][INFO] - Avg tokens: 2685.00
|
64 |
+
[2025-07-13 19:07:48,329][__main__][INFO] - Std tokens: 0.00
|
65 |
+
[2025-07-13 19:07:48,751][__main__][INFO] -
|
66 |
+
Token statistics for 'validation' split:
|
67 |
+
[2025-07-13 19:07:48,752][__main__][INFO] - Total examples: 132
|
68 |
+
[2025-07-13 19:07:48,752][__main__][INFO] - Min tokens: 2887
|
69 |
+
[2025-07-13 19:07:48,752][__main__][INFO] - Max tokens: 2887
|
70 |
+
[2025-07-13 19:07:48,752][__main__][INFO] - Avg tokens: 2887.00
|
71 |
+
[2025-07-13 19:07:48,752][__main__][INFO] - Std tokens: 0.00
|
72 |
+
[2025-07-13 19:07:49,195][__main__][INFO] -
|
73 |
+
Token statistics for 'test' split:
|
74 |
+
[2025-07-13 19:07:49,195][__main__][INFO] - Total examples: 138
|
75 |
+
[2025-07-13 19:07:49,195][__main__][INFO] - Min tokens: 2910
|
76 |
+
[2025-07-13 19:07:49,195][__main__][INFO] - Max tokens: 2910
|
77 |
+
[2025-07-13 19:07:49,195][__main__][INFO] - Avg tokens: 2910.00
|
78 |
+
[2025-07-13 19:07:49,195][__main__][INFO] - Std tokens: 0.00
|
79 |
+
[2025-07-13 19:07:49,195][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
|
80 |
+
[2025-07-13 19:07:49,195][__main__][INFO] - Model max length: 4096. If it is the same as stats, then there is a high chance that sequences are being truncated.
|
81 |
+
[2025-07-13 19:07:49,196][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
82 |
+
[2025-07-13 19:07:49,196][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
83 |
+
[2025-07-13 19:07:50,228][__main__][INFO] - Model need ≈ 14.53 GiB to run inference and 42.09 for training
|
84 |
+
[2025-07-13 19:07:50,286][__main__][INFO] - Loading PEFT model configuration from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
85 |
+
[2025-07-13 19:07:50,286][__main__][INFO] - Base model name: TucanoBR/Tucano-2b4-Instruct
|
86 |
+
[2025-07-13 19:07:50,328][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/config.json
|
87 |
+
[2025-07-13 19:07:50,330][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
88 |
+
"architectures": [
|
89 |
+
"LlamaForCausalLM"
|
90 |
+
],
|
91 |
+
"attention_bias": false,
|
92 |
+
"attention_dropout": 0.0,
|
93 |
+
"bos_token_id": 1,
|
94 |
+
"eos_token_id": 2,
|
95 |
+
"head_dim": 160,
|
96 |
+
"hidden_act": "silu",
|
97 |
+
"hidden_size": 2560,
|
98 |
+
"id2label": {
|
99 |
+
"0": "LABEL_0",
|
100 |
+
"1": "LABEL_1",
|
101 |
+
"2": "LABEL_2",
|
102 |
+
"3": "LABEL_3",
|
103 |
+
"4": "LABEL_4",
|
104 |
+
"5": "LABEL_5"
|
105 |
+
},
|
106 |
+
"initializer_range": 0.02,
|
107 |
+
"intermediate_size": 10240,
|
108 |
+
"label2id": {
|
109 |
+
"LABEL_0": 0,
|
110 |
+
"LABEL_1": 1,
|
111 |
+
"LABEL_2": 2,
|
112 |
+
"LABEL_3": 3,
|
113 |
+
"LABEL_4": 4,
|
114 |
+
"LABEL_5": 5
|
115 |
+
},
|
116 |
+
"max_position_embeddings": 4096,
|
117 |
+
"mlp_bias": false,
|
118 |
+
"model_type": "llama",
|
119 |
+
"num_attention_heads": 16,
|
120 |
+
"num_hidden_layers": 24,
|
121 |
+
"num_key_value_heads": 4,
|
122 |
+
"pad_token_id": 3,
|
123 |
+
"pretraining_tp": 1,
|
124 |
+
"rms_norm_eps": 1e-05,
|
125 |
+
"rope_scaling": null,
|
126 |
+
"rope_theta": 10000.0,
|
127 |
+
"tie_word_embeddings": false,
|
128 |
+
"torch_dtype": "float32",
|
129 |
+
"transformers_version": "4.53.2",
|
130 |
+
"use_cache": false,
|
131 |
+
"vocab_size": 32002
|
132 |
+
}
|
133 |
+
|
134 |
+
[2025-07-13 19:07:50,493][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/model.safetensors.index.json
|
135 |
+
[2025-07-13 19:07:50,493][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
|
136 |
+
[2025-07-13 19:07:50,494][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.float32.
|
137 |
+
[2025-07-13 19:07:50,495][transformers.modeling_utils][WARNING] - Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in LlamaForSequenceClassification is torch.float32. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator, or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`
|
138 |
+
[2025-07-13 19:07:51,961][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at TucanoBR/Tucano-2b4-Instruct were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
139 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
140 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
141 |
+
[2025-07-13 19:07:51,961][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at TucanoBR/Tucano-2b4-Instruct and are newly initialized: ['score.weight']
|
142 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
143 |
+
[2025-07-13 19:07:53,447][__main__][INFO] - Loaded fine-tuned PEFT model from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8
|
144 |
+
[2025-07-13 19:07:53,449][__main__][INFO] - None
|
145 |
+
[2025-07-13 19:07:53,462][transformers.training_args][INFO] - PyTorch: setting up devices
|
146 |
+
[2025-07-13 19:07:53,500][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
147 |
+
[2025-07-13 19:07:53,510][accelerate.utils.other][WARNING] - Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
|
148 |
+
[2025-07-13 19:07:53,510][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
149 |
+
[2025-07-13 19:07:53,533][transformers.trainer][INFO] - Using auto half precision backend
|
150 |
+
[2025-07-13 19:07:53,533][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
151 |
+
[2025-07-13 19:07:56,900][__main__][INFO] - Running inference on test dataset
|
152 |
+
[2025-07-13 19:07:56,902][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: prompt, id_prompt, grades, supporting_text, reference, essay_text, essay_year, id. If prompt, id_prompt, grades, supporting_text, reference, essay_text, essay_year, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
153 |
+
[2025-07-13 19:07:56,922][transformers.trainer][INFO] -
|
154 |
+
***** Running Prediction *****
|
155 |
+
[2025-07-13 19:07:56,922][transformers.trainer][INFO] - Num examples = 138
|
156 |
+
[2025-07-13 19:07:56,922][transformers.trainer][INFO] - Batch size = 4
|
157 |
+
[2025-07-13 19:07:57,205][transformers.modeling_flash_attention_utils][WARNING] - The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in torch.bfloat16.
|
158 |
+
[2025-07-13 19:08:30,602][__main__][INFO] - Inference results saved to jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C1-full_context-r8-tucano_classification_lora-C1-full_context-r8_inference_results.jsonl
|
159 |
+
[2025-07-13 19:08:30,607][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
|
160 |
+
[2025-07-13 19:10:15,450][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
|
161 |
+
[2025-07-13 19:10:15,451][__main__][INFO] - Bootstrap Confidence Intervals (95%):
|
162 |
+
[2025-07-13 19:10:15,451][__main__][INFO] - QWK: 0.5630 [0.4502, 0.6662]
|
163 |
+
[2025-07-13 19:10:15,451][__main__][INFO] - Macro_F1: 0.4127 [0.3088, 0.5475]
|
164 |
+
[2025-07-13 19:10:15,451][__main__][INFO] - Weighted_F1: 0.5588 [0.4730, 0.6439]
|
165 |
+
[2025-07-13 19:10:15,451][__main__][INFO] - Inference results: {'accuracy': 0.5579710144927537, 'RMSE': 31.20757990421976, 'QWK': 0.5655172413793104, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.3825386450876646, 'Micro_F1': 0.5579710144927537, 'Weighted_F1': 0.5584592935402825, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(7), 'TN_2': np.int64(115), 'FP_2': np.int64(13), 'FN_2': np.int64(3), 'TP_3': np.int64(47), 'TN_3': np.int64(49), 'FP_3': np.int64(23), 'FN_3': np.int64(19), 'TP_4': np.int64(19), 'TN_4': np.int64(76), 'FP_4': np.int64(11), 'FN_4': np.int64(32), 'TP_5': np.int64(4), 'TN_5': np.int64(114), 'FP_5': np.int64(14), 'FN_5': np.int64(6)}
|
166 |
+
[2025-07-13 19:10:15,451][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/.hydra/config.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cache_dir: /tmp/
|
2 |
+
dataset:
|
3 |
+
name: kamel-usp/aes_enem_dataset
|
4 |
+
split: JBCS2025
|
5 |
+
training_params:
|
6 |
+
seed: 42
|
7 |
+
num_train_epochs: 20
|
8 |
+
logging_steps: 100
|
9 |
+
metric_for_best_model: QWK
|
10 |
+
bf16: true
|
11 |
+
bootstrap:
|
12 |
+
enabled: true
|
13 |
+
n_bootstrap: 10000
|
14 |
+
bootstrap_seed: 42
|
15 |
+
metrics:
|
16 |
+
- QWK
|
17 |
+
- Macro_F1
|
18 |
+
- Weighted_F1
|
19 |
+
post_training_results:
|
20 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
21 |
+
experiments:
|
22 |
+
model:
|
23 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
24 |
+
type: tucano_classification_lora
|
25 |
+
num_labels: 6
|
26 |
+
output_dir: ./results/
|
27 |
+
logging_dir: ./logs/
|
28 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
29 |
+
lora_r: 16
|
30 |
+
lora_dropout: 0.1
|
31 |
+
lora_alpha: 32
|
32 |
+
lora_target_modules: all-linear
|
33 |
+
checkpoint_path: ''
|
34 |
+
tokenizer:
|
35 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
36 |
+
dataset:
|
37 |
+
grade_index: 1
|
38 |
+
use_full_context: false
|
39 |
+
training_params:
|
40 |
+
weight_decay: 0.01
|
41 |
+
warmup_ratio: 0.1
|
42 |
+
learning_rate: 5.0e-05
|
43 |
+
train_batch_size: 8
|
44 |
+
eval_batch_size: 4
|
45 |
+
gradient_accumulation_steps: 2
|
46 |
+
gradient_checkpointing: true
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: inference_output/2025-07-13/19-15-36
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
simple:
|
72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
73 |
+
handlers:
|
74 |
+
console:
|
75 |
+
class: logging.StreamHandler
|
76 |
+
formatter: simple
|
77 |
+
stream: ext://sys.stdout
|
78 |
+
root:
|
79 |
+
level: INFO
|
80 |
+
handlers:
|
81 |
+
- console
|
82 |
+
loggers:
|
83 |
+
logging_example:
|
84 |
+
level: DEBUG
|
85 |
+
disable_existing_loggers: false
|
86 |
+
job_logging:
|
87 |
+
version: 1
|
88 |
+
formatters:
|
89 |
+
simple:
|
90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
91 |
+
handlers:
|
92 |
+
console:
|
93 |
+
class: logging.StreamHandler
|
94 |
+
formatter: simple
|
95 |
+
stream: ext://sys.stdout
|
96 |
+
file:
|
97 |
+
class: logging.FileHandler
|
98 |
+
formatter: simple
|
99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
100 |
+
root:
|
101 |
+
level: INFO
|
102 |
+
handlers:
|
103 |
+
- console
|
104 |
+
- file
|
105 |
+
disable_existing_loggers: false
|
106 |
+
env: {}
|
107 |
+
mode: RUN
|
108 |
+
searchpath: []
|
109 |
+
callbacks: {}
|
110 |
+
output_subdir: .hydra
|
111 |
+
overrides:
|
112 |
+
hydra:
|
113 |
+
- hydra.run.dir=inference_output/2025-07-13/19-15-36
|
114 |
+
- hydra.mode=RUN
|
115 |
+
task:
|
116 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
117 |
+
job:
|
118 |
+
name: run_inference_experiment
|
119 |
+
chdir: null
|
120 |
+
override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
121 |
+
id: ???
|
122 |
+
num: ???
|
123 |
+
config_name: config
|
124 |
+
env_set: {}
|
125 |
+
env_copy: []
|
126 |
+
config:
|
127 |
+
override_dirname:
|
128 |
+
kv_sep: '='
|
129 |
+
item_sep: ','
|
130 |
+
exclude_keys: []
|
131 |
+
runtime:
|
132 |
+
version: 1.3.2
|
133 |
+
version_base: '1.1'
|
134 |
+
cwd: /workspace/jbcs2025
|
135 |
+
config_sources:
|
136 |
+
- path: hydra.conf
|
137 |
+
schema: pkg
|
138 |
+
provider: hydra
|
139 |
+
- path: /workspace/jbcs2025/configs
|
140 |
+
schema: file
|
141 |
+
provider: main
|
142 |
+
- path: ''
|
143 |
+
schema: structured
|
144 |
+
provider: schema
|
145 |
+
output_dir: /workspace/jbcs2025/inference_output/2025-07-13/19-15-36
|
146 |
+
choices:
|
147 |
+
experiments: temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
148 |
+
hydra/env: default
|
149 |
+
hydra/callbacks: null
|
150 |
+
hydra/job_logging: default
|
151 |
+
hydra/hydra_logging: default
|
152 |
+
hydra/hydra_help: default
|
153 |
+
hydra/help: default
|
154 |
+
hydra/sweeper: basic
|
155 |
+
hydra/launcher: basic
|
156 |
+
hydra/output: default
|
157 |
+
verbose: false
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/bootstrap_confidence_intervals.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
|
2 |
+
jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16,2025-07-13 19:15:41,0.21268663521047046,0.06707541515778438,0.3508818060178967,0.28380639086011233,0.17118416853070587,0.11503203355555276,0.23195845400407022,0.11692642044851746,0.22042911158108158,0.15073799122838333,0.2949646121134494,0.14422662088506605
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/evaluation_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
2 |
+
0.21739130434782608,70.60812450686403,0.2155774111675125,0.1376811594202898,0.17402959084916517,0.21739130434782608,0.22084770067813536,0,133,4,1,12,75,28,23,2,109,24,3,7,62,25,44,6,96,16,20,3,107,11,17,2025-07-13 19:15:41,jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16_inference_results.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16/run_inference_experiment.log
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2025-07-13 19:15:41,342][__main__][INFO] - Starting inference experiment
|
2 |
+
[2025-07-13 19:15:41,344][__main__][INFO] - cache_dir: /tmp/
|
3 |
+
dataset:
|
4 |
+
name: kamel-usp/aes_enem_dataset
|
5 |
+
split: JBCS2025
|
6 |
+
training_params:
|
7 |
+
seed: 42
|
8 |
+
num_train_epochs: 20
|
9 |
+
logging_steps: 100
|
10 |
+
metric_for_best_model: QWK
|
11 |
+
bf16: true
|
12 |
+
bootstrap:
|
13 |
+
enabled: true
|
14 |
+
n_bootstrap: 10000
|
15 |
+
bootstrap_seed: 42
|
16 |
+
metrics:
|
17 |
+
- QWK
|
18 |
+
- Macro_F1
|
19 |
+
- Weighted_F1
|
20 |
+
post_training_results:
|
21 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
22 |
+
experiments:
|
23 |
+
model:
|
24 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
25 |
+
type: tucano_classification_lora
|
26 |
+
num_labels: 6
|
27 |
+
output_dir: ./results/
|
28 |
+
logging_dir: ./logs/
|
29 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
30 |
+
lora_r: 16
|
31 |
+
lora_dropout: 0.1
|
32 |
+
lora_alpha: 32
|
33 |
+
lora_target_modules: all-linear
|
34 |
+
checkpoint_path: ''
|
35 |
+
tokenizer:
|
36 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
37 |
+
dataset:
|
38 |
+
grade_index: 1
|
39 |
+
use_full_context: false
|
40 |
+
training_params:
|
41 |
+
weight_decay: 0.01
|
42 |
+
warmup_ratio: 0.1
|
43 |
+
learning_rate: 5.0e-05
|
44 |
+
train_batch_size: 8
|
45 |
+
eval_batch_size: 4
|
46 |
+
gradient_accumulation_steps: 2
|
47 |
+
gradient_checkpointing: true
|
48 |
+
|
49 |
+
[2025-07-13 19:15:41,346][__main__][INFO] - Running inference with fine-tuned HF model
|
50 |
+
[2025-07-13 19:15:42,420][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
|
51 |
+
[2025-07-13 19:15:42,421][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer.json
|
52 |
+
[2025-07-13 19:15:42,421][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
|
53 |
+
[2025-07-13 19:15:42,421][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/special_tokens_map.json
|
54 |
+
[2025-07-13 19:15:42,421][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/tokenizer_config.json
|
55 |
+
[2025-07-13 19:15:42,421][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
|
56 |
+
[2025-07-13 19:15:42,469][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
57 |
+
[2025-07-13 19:15:42,477][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False; Use Full Context: False
|
58 |
+
[2025-07-13 19:15:44,250][__main__][INFO] -
|
59 |
+
Token statistics for 'train' split:
|
60 |
+
[2025-07-13 19:15:44,250][__main__][INFO] - Total examples: 500
|
61 |
+
[2025-07-13 19:15:44,250][__main__][INFO] - Min tokens: 2750
|
62 |
+
[2025-07-13 19:15:44,250][__main__][INFO] - Max tokens: 2750
|
63 |
+
[2025-07-13 19:15:44,251][__main__][INFO] - Avg tokens: 2750.00
|
64 |
+
[2025-07-13 19:15:44,251][__main__][INFO] - Std tokens: 0.00
|
65 |
+
[2025-07-13 19:15:44,621][__main__][INFO] -
|
66 |
+
Token statistics for 'validation' split:
|
67 |
+
[2025-07-13 19:15:44,621][__main__][INFO] - Total examples: 132
|
68 |
+
[2025-07-13 19:15:44,621][__main__][INFO] - Min tokens: 2492
|
69 |
+
[2025-07-13 19:15:44,621][__main__][INFO] - Max tokens: 2492
|
70 |
+
[2025-07-13 19:15:44,621][__main__][INFO] - Avg tokens: 2492.00
|
71 |
+
[2025-07-13 19:15:44,621][__main__][INFO] - Std tokens: 0.00
|
72 |
+
[2025-07-13 19:15:45,015][__main__][INFO] -
|
73 |
+
Token statistics for 'test' split:
|
74 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - Total examples: 138
|
75 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - Min tokens: 2545
|
76 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - Max tokens: 2545
|
77 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - Avg tokens: 2545.00
|
78 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - Std tokens: 0.00
|
79 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
|
80 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - Model max length: 4096. If it is the same as stats, then there is a high chance that sequences are being truncated.
|
81 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
82 |
+
[2025-07-13 19:15:45,015][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
83 |
+
[2025-07-13 19:15:47,736][__main__][INFO] - Model need ≈ 14.65 GiB to run inference and 42.44 for training
|
84 |
+
[2025-07-13 19:15:47,790][__main__][INFO] - Loading PEFT model configuration from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
85 |
+
[2025-07-13 19:15:47,790][__main__][INFO] - Base model name: TucanoBR/Tucano-2b4-Instruct
|
86 |
+
[2025-07-13 19:15:47,825][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/config.json
|
87 |
+
[2025-07-13 19:15:47,827][transformers.configuration_utils][INFO] - Model config LlamaConfig {
|
88 |
+
"architectures": [
|
89 |
+
"LlamaForCausalLM"
|
90 |
+
],
|
91 |
+
"attention_bias": false,
|
92 |
+
"attention_dropout": 0.0,
|
93 |
+
"bos_token_id": 1,
|
94 |
+
"eos_token_id": 2,
|
95 |
+
"head_dim": 160,
|
96 |
+
"hidden_act": "silu",
|
97 |
+
"hidden_size": 2560,
|
98 |
+
"id2label": {
|
99 |
+
"0": "LABEL_0",
|
100 |
+
"1": "LABEL_1",
|
101 |
+
"2": "LABEL_2",
|
102 |
+
"3": "LABEL_3",
|
103 |
+
"4": "LABEL_4",
|
104 |
+
"5": "LABEL_5"
|
105 |
+
},
|
106 |
+
"initializer_range": 0.02,
|
107 |
+
"intermediate_size": 10240,
|
108 |
+
"label2id": {
|
109 |
+
"LABEL_0": 0,
|
110 |
+
"LABEL_1": 1,
|
111 |
+
"LABEL_2": 2,
|
112 |
+
"LABEL_3": 3,
|
113 |
+
"LABEL_4": 4,
|
114 |
+
"LABEL_5": 5
|
115 |
+
},
|
116 |
+
"max_position_embeddings": 4096,
|
117 |
+
"mlp_bias": false,
|
118 |
+
"model_type": "llama",
|
119 |
+
"num_attention_heads": 16,
|
120 |
+
"num_hidden_layers": 24,
|
121 |
+
"num_key_value_heads": 4,
|
122 |
+
"pad_token_id": 3,
|
123 |
+
"pretraining_tp": 1,
|
124 |
+
"rms_norm_eps": 1e-05,
|
125 |
+
"rope_scaling": null,
|
126 |
+
"rope_theta": 10000.0,
|
127 |
+
"tie_word_embeddings": false,
|
128 |
+
"torch_dtype": "float32",
|
129 |
+
"transformers_version": "4.53.2",
|
130 |
+
"use_cache": false,
|
131 |
+
"vocab_size": 32002
|
132 |
+
}
|
133 |
+
|
134 |
+
[2025-07-13 19:15:47,997][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--TucanoBR--Tucano-2b4-Instruct/snapshots/d763c3ed97909de3b664742dd955bf35d1cca620/model.safetensors.index.json
|
135 |
+
[2025-07-13 19:15:47,998][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
|
136 |
+
[2025-07-13 19:15:47,998][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.float32.
|
137 |
+
[2025-07-13 19:15:47,999][transformers.modeling_utils][WARNING] - Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in LlamaForSequenceClassification is torch.float32. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator, or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`
|
138 |
+
[2025-07-13 19:15:49,389][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at TucanoBR/Tucano-2b4-Instruct were not used when initializing LlamaForSequenceClassification: ['lm_head.weight']
|
139 |
+
- This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
|
140 |
+
- This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
|
141 |
+
[2025-07-13 19:15:49,389][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at TucanoBR/Tucano-2b4-Instruct and are newly initialized: ['score.weight']
|
142 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
143 |
+
[2025-07-13 19:15:51,973][__main__][INFO] - Loaded fine-tuned PEFT model from kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16
|
144 |
+
[2025-07-13 19:15:51,975][__main__][INFO] - None
|
145 |
+
[2025-07-13 19:15:51,987][transformers.training_args][INFO] - PyTorch: setting up devices
|
146 |
+
[2025-07-13 19:15:52,011][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
|
147 |
+
[2025-07-13 19:15:52,020][accelerate.utils.other][WARNING] - Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
|
148 |
+
[2025-07-13 19:15:52,021][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
|
149 |
+
[2025-07-13 19:15:52,043][transformers.trainer][INFO] - Using auto half precision backend
|
150 |
+
[2025-07-13 19:15:52,044][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
|
151 |
+
[2025-07-13 19:15:52,310][__main__][INFO] - Running inference on test dataset
|
152 |
+
[2025-07-13 19:15:52,312][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_text, reference, id_prompt, prompt, essay_year, grades, supporting_text, id. If essay_text, reference, id_prompt, prompt, essay_year, grades, supporting_text, id are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
|
153 |
+
[2025-07-13 19:15:52,334][transformers.trainer][INFO] -
|
154 |
+
***** Running Prediction *****
|
155 |
+
[2025-07-13 19:15:52,334][transformers.trainer][INFO] - Num examples = 138
|
156 |
+
[2025-07-13 19:15:52,334][transformers.trainer][INFO] - Batch size = 4
|
157 |
+
[2025-07-13 19:15:52,609][transformers.modeling_flash_attention_utils][WARNING] - The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in torch.bfloat16.
|
158 |
+
[2025-07-13 19:16:22,331][__main__][INFO] - Inference results saved to jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r16-tucano_classification_lora-C2-essay_only-r16_inference_results.jsonl
|
159 |
+
[2025-07-13 19:16:22,336][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
|
160 |
+
[2025-07-13 19:18:07,639][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
|
161 |
+
[2025-07-13 19:18:07,639][__main__][INFO] - Bootstrap Confidence Intervals (95%):
|
162 |
+
[2025-07-13 19:18:07,639][__main__][INFO] - QWK: 0.2127 [0.0671, 0.3509]
|
163 |
+
[2025-07-13 19:18:07,639][__main__][INFO] - Macro_F1: 0.1712 [0.1150, 0.2320]
|
164 |
+
[2025-07-13 19:18:07,639][__main__][INFO] - Weighted_F1: 0.2204 [0.1507, 0.2950]
|
165 |
+
[2025-07-13 19:18:07,639][__main__][INFO] - Inference results: {'accuracy': 0.21739130434782608, 'RMSE': 70.60812450686403, 'QWK': 0.2155774111675125, 'HDIV': 0.1376811594202898, 'Macro_F1': 0.17402959084916517, 'Micro_F1': 0.21739130434782608, 'Weighted_F1': 0.22084770067813536, 'TP_0': np.int64(0), 'TN_0': np.int64(133), 'FP_0': np.int64(4), 'FN_0': np.int64(1), 'TP_1': np.int64(12), 'TN_1': np.int64(75), 'FP_1': np.int64(28), 'FN_1': np.int64(23), 'TP_2': np.int64(2), 'TN_2': np.int64(109), 'FP_2': np.int64(24), 'FN_2': np.int64(3), 'TP_3': np.int64(7), 'TN_3': np.int64(62), 'FP_3': np.int64(25), 'FN_3': np.int64(44), 'TP_4': np.int64(6), 'TN_4': np.int64(96), 'FP_4': np.int64(16), 'FN_4': np.int64(20), 'TP_5': np.int64(3), 'TN_5': np.int64(107), 'FP_5': np.int64(11), 'FN_5': np.int64(17)}
|
166 |
+
[2025-07-13 19:18:07,639][__main__][INFO] - Inference experiment completed
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/.hydra/config.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cache_dir: /tmp/
|
2 |
+
dataset:
|
3 |
+
name: kamel-usp/aes_enem_dataset
|
4 |
+
split: JBCS2025
|
5 |
+
training_params:
|
6 |
+
seed: 42
|
7 |
+
num_train_epochs: 20
|
8 |
+
logging_steps: 100
|
9 |
+
metric_for_best_model: QWK
|
10 |
+
bf16: true
|
11 |
+
bootstrap:
|
12 |
+
enabled: true
|
13 |
+
n_bootstrap: 10000
|
14 |
+
bootstrap_seed: 42
|
15 |
+
metrics:
|
16 |
+
- QWK
|
17 |
+
- Macro_F1
|
18 |
+
- Weighted_F1
|
19 |
+
post_training_results:
|
20 |
+
model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
|
21 |
+
experiments:
|
22 |
+
model:
|
23 |
+
name: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8
|
24 |
+
type: tucano_classification_lora
|
25 |
+
num_labels: 6
|
26 |
+
output_dir: ./results/
|
27 |
+
logging_dir: ./logs/
|
28 |
+
best_model_dir: kamel-usp/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8
|
29 |
+
lora_r: 8
|
30 |
+
lora_dropout: 0.05
|
31 |
+
lora_alpha: 16
|
32 |
+
lora_target_modules: all-linear
|
33 |
+
checkpoint_path: ''
|
34 |
+
tokenizer:
|
35 |
+
name: TucanoBR/Tucano-2b4-Instruct
|
36 |
+
dataset:
|
37 |
+
grade_index: 1
|
38 |
+
use_full_context: false
|
39 |
+
training_params:
|
40 |
+
weight_decay: 0.01
|
41 |
+
warmup_ratio: 0.1
|
42 |
+
learning_rate: 5.0e-05
|
43 |
+
train_batch_size: 8
|
44 |
+
eval_batch_size: 4
|
45 |
+
gradient_accumulation_steps: 2
|
46 |
+
gradient_checkpointing: true
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: inference_output/2025-07-13/19-12-58
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
simple:
|
72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
73 |
+
handlers:
|
74 |
+
console:
|
75 |
+
class: logging.StreamHandler
|
76 |
+
formatter: simple
|
77 |
+
stream: ext://sys.stdout
|
78 |
+
root:
|
79 |
+
level: INFO
|
80 |
+
handlers:
|
81 |
+
- console
|
82 |
+
loggers:
|
83 |
+
logging_example:
|
84 |
+
level: DEBUG
|
85 |
+
disable_existing_loggers: false
|
86 |
+
job_logging:
|
87 |
+
version: 1
|
88 |
+
formatters:
|
89 |
+
simple:
|
90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
91 |
+
handlers:
|
92 |
+
console:
|
93 |
+
class: logging.StreamHandler
|
94 |
+
formatter: simple
|
95 |
+
stream: ext://sys.stdout
|
96 |
+
file:
|
97 |
+
class: logging.FileHandler
|
98 |
+
formatter: simple
|
99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
100 |
+
root:
|
101 |
+
level: INFO
|
102 |
+
handlers:
|
103 |
+
- console
|
104 |
+
- file
|
105 |
+
disable_existing_loggers: false
|
106 |
+
env: {}
|
107 |
+
mode: RUN
|
108 |
+
searchpath: []
|
109 |
+
callbacks: {}
|
110 |
+
output_subdir: .hydra
|
111 |
+
overrides:
|
112 |
+
hydra:
|
113 |
+
- hydra.run.dir=inference_output/2025-07-13/19-12-58
|
114 |
+
- hydra.mode=RUN
|
115 |
+
task:
|
116 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8
|
117 |
+
job:
|
118 |
+
name: run_inference_experiment
|
119 |
+
chdir: null
|
120 |
+
override_dirname: experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8
|
121 |
+
id: ???
|
122 |
+
num: ???
|
123 |
+
config_name: config
|
124 |
+
env_set: {}
|
125 |
+
env_copy: []
|
126 |
+
config:
|
127 |
+
override_dirname:
|
128 |
+
kv_sep: '='
|
129 |
+
item_sep: ','
|
130 |
+
exclude_keys: []
|
131 |
+
runtime:
|
132 |
+
version: 1.3.2
|
133 |
+
version_base: '1.1'
|
134 |
+
cwd: /workspace/jbcs2025
|
135 |
+
config_sources:
|
136 |
+
- path: hydra.conf
|
137 |
+
schema: pkg
|
138 |
+
provider: hydra
|
139 |
+
- path: /workspace/jbcs2025/configs
|
140 |
+
schema: file
|
141 |
+
provider: main
|
142 |
+
- path: ''
|
143 |
+
schema: structured
|
144 |
+
provider: schema
|
145 |
+
output_dir: /workspace/jbcs2025/inference_output/2025-07-13/19-12-58
|
146 |
+
choices:
|
147 |
+
experiments: temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8
|
148 |
+
hydra/env: default
|
149 |
+
hydra/callbacks: null
|
150 |
+
hydra/job_logging: default
|
151 |
+
hydra/hydra_logging: default
|
152 |
+
hydra/hydra_help: default
|
153 |
+
hydra/help: default
|
154 |
+
hydra/sweeper: basic
|
155 |
+
hydra/launcher: basic
|
156 |
+
hydra/output: default
|
157 |
+
verbose: false
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- experiments=temp_inference/kamel-usp_jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/bootstrap_confidence_intervals.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
|
2 |
+
jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8,2025-07-13 19:13:03,0.22765543842637567,0.06634735802075493,0.38464170683555055,0.3182943488147956,0.15226069620931507,0.10668661958018352,0.2003830920118047,0.09369647243162119,0.19524247402313993,0.12271293814431007,0.2705875619228997,0.14787462377858962
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/evaluation_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
|
2 |
+
0.2246376811594203,67.75916430879359,0.22918077183480012,0.1811594202898551,0.15410549674983645,0.2246376811594203,0.19513756825779216,0,131,6,1,9,90,13,26,3,94,39,2,3,86,1,48,16,68,44,10,0,114,4,20,2025-07-13 19:13:03,jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8
|
runs/slm_decoder_models/tucano2b4/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8/jbcs2025_Tucano-2b4-Instruct-tucano_classification_lora-C2-essay_only-r8-tucano_classification_lora-C2-essay_only-r8_inference_results.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|