abarbosa commited on
Commit
1626023
·
1 Parent(s): 5c85311

update files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. runs/api_models/compute_bootstrap_ci.py +100 -40
  2. runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C1/bootstrap_confidence_intervals.csv +1 -1
  3. runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C2/bootstrap_confidence_intervals.csv +1 -1
  4. runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C3/bootstrap_confidence_intervals.csv +1 -1
  5. runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C4/bootstrap_confidence_intervals.csv +1 -1
  6. runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C5/bootstrap_confidence_intervals.csv +1 -1
  7. runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C1/bootstrap_confidence_intervals.csv +1 -1
  8. runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C2/bootstrap_confidence_intervals.csv +1 -1
  9. runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C3/bootstrap_confidence_intervals.csv +1 -1
  10. runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C4/bootstrap_confidence_intervals.csv +1 -1
  11. runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C5/bootstrap_confidence_intervals.csv +1 -1
  12. runs/api_models/sabia-3/sabia-3-zero-shot-C1/bootstrap_confidence_intervals.csv +1 -1
  13. runs/api_models/sabia-3/sabia-3-zero-shot-C2/bootstrap_confidence_intervals.csv +1 -1
  14. runs/api_models/sabia-3/sabia-3-zero-shot-C3/bootstrap_confidence_intervals.csv +1 -1
  15. runs/api_models/sabia-3/sabia-3-zero-shot-C4/bootstrap_confidence_intervals.csv +1 -1
  16. runs/api_models/sabia-3/sabia-3-zero-shot-C5/bootstrap_confidence_intervals.csv +1 -1
  17. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1 → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/.hydra/config.yaml +1 -0
  18. runs/base_models/{mbert/jbcs2025_mbert_base-C1-encoder_classification-C1 → bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/.hydra/hydra.yaml +3 -3
  19. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1 → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/.hydra/overrides.yaml +0 -0
  20. runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only/bootstrap_confidence_intervals.csv +2 -0
  21. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1 → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/evaluation_results.csv +1 -1
  22. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1/jbcs2025_bertimbau_base-C1-encoder_classification-C1_inference_results.jsonl → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only/jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only_inference_results.jsonl} +0 -0
  23. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1 → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/run_inference_experiment.log +49 -46
  24. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/.hydra/config.yaml +1 -0
  25. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/.hydra/hydra.yaml +3 -3
  26. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/.hydra/overrides.yaml +0 -0
  27. runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only/bootstrap_confidence_intervals.csv +2 -0
  28. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/evaluation_results.csv +1 -1
  29. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2/jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only/jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only_inference_results.jsonl} +0 -0
  30. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/run_inference_experiment.log +49 -46
  31. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3 → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/.hydra/config.yaml +1 -0
  32. runs/base_models/{mbert/jbcs2025_mbert_base-C3-encoder_classification-C3 → bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/.hydra/hydra.yaml +3 -3
  33. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3 → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/.hydra/overrides.yaml +0 -0
  34. runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only/bootstrap_confidence_intervals.csv +2 -0
  35. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3 → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/evaluation_results.csv +1 -1
  36. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3/jbcs2025_bertimbau_base-C3-encoder_classification-C3_inference_results.jsonl → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only/jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only_inference_results.jsonl} +0 -0
  37. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3 → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/run_inference_experiment.log +49 -46
  38. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/.hydra/config.yaml +1 -0
  39. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/.hydra/hydra.yaml +3 -3
  40. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/.hydra/overrides.yaml +0 -0
  41. runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only/bootstrap_confidence_intervals.csv +2 -0
  42. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/evaluation_results.csv +1 -1
  43. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4/jbcs2025_bertimbau_base-C4-encoder_classification-C4_inference_results.jsonl → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only/jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only_inference_results.jsonl} +0 -0
  44. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/run_inference_experiment.log +49 -46
  45. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5 → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only}/.hydra/config.yaml +1 -0
  46. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5 → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only}/.hydra/hydra.yaml +3 -3
  47. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5 → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only}/.hydra/overrides.yaml +0 -0
  48. runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only/bootstrap_confidence_intervals.csv +2 -0
  49. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5 → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only}/evaluation_results.csv +1 -1
  50. runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5/jbcs2025_bertimbau_base-C5-encoder_classification-C5_inference_results.jsonl → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only/jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only_inference_results.jsonl} +0 -0
runs/api_models/compute_bootstrap_ci.py CHANGED
@@ -1,7 +1,5 @@
1
  import argparse
2
  import json
3
- import re
4
- import sys
5
  from pathlib import Path
6
  from typing import Dict, List, Tuple
7
 
@@ -45,29 +43,38 @@ def extract_config_from_log(log_path: Path) -> Dict:
45
  return OmegaConf.create(config_dict)
46
 
47
 
48
- def load_inference_results(jsonl_path: Path) -> Tuple[np.ndarray, np.ndarray]:
49
  """
50
- Load predictions and labels from the inference results JSONL file.
51
 
52
  Returns:
53
- predictions: Array of predicted scores (from 'pontuacao' field)
54
- labels: Array of true labels (from 'label' field)
55
  """
56
- predictions = []
57
- labels = []
58
 
59
  with open(jsonl_path, 'r', encoding='utf-8') as f:
60
  for line in f:
61
  data = json.loads(line.strip())
62
- predictions.append(data['pontuacao'])
63
- labels.append(data['label'])
64
-
65
- return np.array(predictions), np.array(labels)
 
 
 
 
 
 
 
 
 
66
 
67
 
68
- def compute_bootstrap_confidence_intervals(
69
- predictions: np.ndarray,
70
- labels: np.ndarray,
71
  metrics_to_compute: List[str],
72
  cfg: DictConfig,
73
  n_bootstrap: int = 1000,
@@ -75,11 +82,17 @@ def compute_bootstrap_confidence_intervals(
75
  random_state: int = 42,
76
  ) -> Dict[str, Tuple[float, float, float]]:
77
  """
78
- Compute bootstrap confidence intervals for specified metrics.
 
 
 
 
 
 
79
 
80
  Parameters:
81
- predictions: Model predictions
82
- labels: Ground truth labels
83
  metrics_to_compute: List of metric names to compute CIs for
84
  cfg: Configuration object
85
  n_bootstrap: Number of bootstrap samples
@@ -92,24 +105,46 @@ def compute_bootstrap_confidence_intervals(
92
  if random_state is not None:
93
  np.random.seed(random_state)
94
 
95
- n_samples = len(predictions)
 
 
 
 
96
  bootstrap_metrics = {metric: [] for metric in metrics_to_compute}
97
 
98
  # Perform bootstrap sampling
99
  for _ in tqdm(range(n_bootstrap), desc="Performing Bootstrap samples"):
100
- # Sample with replacement
101
- indices = np.random.choice(n_samples, size=n_samples, replace=True)
102
- boot_predictions = predictions[indices]
103
- boot_labels = labels[indices]
 
 
 
 
104
 
105
- # Compute metrics for this bootstrap sample
106
- # For API models, predictions are already final values, not logits
107
- boot_metrics = compute_metrics((boot_predictions, boot_labels), cfg)
 
 
 
108
 
109
- # Store only the requested metrics
 
 
 
 
 
 
 
 
 
 
110
  for metric in metrics_to_compute:
111
- if metric in boot_metrics:
112
- bootstrap_metrics[metric].append(boot_metrics[metric])
 
113
 
114
  # Calculate confidence intervals
115
  alpha = 1 - confidence_level
@@ -171,7 +206,7 @@ def main():
171
  --confidence-level 0.99
172
  """
173
  parser = argparse.ArgumentParser(
174
- description='Compute bootstrap confidence intervals for API model inference results'
175
  )
176
  parser.add_argument(
177
  'experiment_dir',
@@ -238,14 +273,14 @@ def main():
238
  except Exception as e:
239
  raise RuntimeError(f"Failed to extract configuration from log file: {e}")
240
 
241
- # Load predictions and labels
242
- predictions, labels = load_inference_results(results_path)
243
- print(f"Loaded {len(predictions)} samples")
244
 
245
- # Compute bootstrap confidence intervals
246
- ci_results = compute_bootstrap_confidence_intervals(
247
- predictions=predictions,
248
- labels=labels,
249
  metrics_to_compute=args.metrics,
250
  cfg=cfg,
251
  n_bootstrap=args.n_bootstrap,
@@ -253,13 +288,38 @@ def main():
253
  random_state=seed,
254
  )
255
 
256
- # Display results
257
- print("\nBootstrap Confidence Intervals (95%):")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  for metric, (mean_val, lower, upper) in ci_results.items():
259
  print(f" {metric}: {mean_val:.4f} [{lower:.4f}, {upper:.4f}]")
260
 
261
  # Save results
262
- output_path = exp_dir / f"bootstrap_confidence_intervals.csv"
263
  save_results_to_csv(experiment_id, ci_results, output_path)
264
 
265
 
 
1
  import argparse
2
  import json
 
 
3
  from pathlib import Path
4
  from typing import Dict, List, Tuple
5
 
 
43
  return OmegaConf.create(config_dict)
44
 
45
 
46
+ def load_inference_results_by_grader(jsonl_path: Path) -> Tuple[Dict[str, Dict], Dict[str, Dict]]:
47
  """
48
+ Load predictions and labels from the inference results JSONL file, organized by grader.
49
 
50
  Returns:
51
+ grader_a_data: Dictionary mapping essay_id to {'prediction': score, 'label': label}
52
+ grader_b_data: Dictionary mapping essay_id to {'prediction': score, 'label': label}
53
  """
54
+ grader_a_data = {}
55
+ grader_b_data = {}
56
 
57
  with open(jsonl_path, 'r', encoding='utf-8') as f:
58
  for line in f:
59
  data = json.loads(line.strip())
60
+ essay_id = (data['id'], data['id_prompt'], data['essay_text'])
61
+
62
+ essay_data = {
63
+ 'prediction': data['pontuacao'],
64
+ 'label': data['label']
65
+ }
66
+
67
+ if data['reference'] == 'grader_a':
68
+ grader_a_data[essay_id] = essay_data
69
+ elif data['reference'] == 'grader_b':
70
+ grader_b_data[essay_id] = essay_data
71
+ assert len(grader_a_data) == len(grader_b_data), "Mismatch in number of essays graded by A and B"
72
+ return grader_a_data, grader_b_data
73
 
74
 
75
+ def compute_bootstrap_confidence_intervals_two_graders(
76
+ grader_a_data: Dict[str, Dict],
77
+ grader_b_data: Dict[str, Dict],
78
  metrics_to_compute: List[str],
79
  cfg: DictConfig,
80
  n_bootstrap: int = 1000,
 
82
  random_state: int = 42,
83
  ) -> Dict[str, Tuple[float, float, float]]:
84
  """
85
+ Compute bootstrap confidence intervals for specified metrics using two-grader structure.
86
+
87
+ For each bootstrap sample:
88
+ 1. Sample essay IDs with replacement
89
+ 2. For each sampled essay ID, get both grader A and grader B predictions/labels
90
+ 3. Compute metrics separately for grader A and grader B
91
+ 4. Take the mean of the two grader metrics
92
 
93
  Parameters:
94
+ grader_a_data: Dictionary mapping essay_id to prediction/label for grader A
95
+ grader_b_data: Dictionary mapping essay_id to prediction/label for grader B
96
  metrics_to_compute: List of metric names to compute CIs for
97
  cfg: Configuration object
98
  n_bootstrap: Number of bootstrap samples
 
105
  if random_state is not None:
106
  np.random.seed(random_state)
107
 
108
+ # Get common essay IDs (should be the same for both graders)
109
+ essay_ids = list(grader_a_data.keys())
110
+ assert set(essay_ids) == set(grader_b_data.keys()), "Essay IDs don't match between graders"
111
+
112
+ n_essays = len(essay_ids)
113
  bootstrap_metrics = {metric: [] for metric in metrics_to_compute}
114
 
115
  # Perform bootstrap sampling
116
  for _ in tqdm(range(n_bootstrap), desc="Performing Bootstrap samples"):
117
+ # Sample indices with replacement
118
+ sampled_indices = np.random.choice(n_essays, size=n_essays, replace=True)
119
+
120
+ # Collect predictions and labels for both graders
121
+ grader_a_predictions = []
122
+ grader_a_labels = []
123
+ grader_b_predictions = []
124
+ grader_b_labels = []
125
 
126
+ for idx in sampled_indices:
127
+ essay_id = essay_ids[idx]
128
+ grader_a_predictions.append(grader_a_data[essay_id]['prediction'])
129
+ grader_a_labels.append(grader_a_data[essay_id]['label'])
130
+ grader_b_predictions.append(grader_b_data[essay_id]['prediction'])
131
+ grader_b_labels.append(grader_b_data[essay_id]['label'])
132
 
133
+ # Convert to numpy arrays
134
+ grader_a_predictions = np.array(grader_a_predictions)
135
+ grader_a_labels = np.array(grader_a_labels)
136
+ grader_b_predictions = np.array(grader_b_predictions)
137
+ grader_b_labels = np.array(grader_b_labels)
138
+
139
+ # Compute metrics for each grader
140
+ metrics_a = compute_metrics((grader_a_predictions, grader_a_labels), cfg)
141
+ metrics_b = compute_metrics((grader_b_predictions, grader_b_labels), cfg)
142
+
143
+ # Compute mean of the two grader metrics
144
  for metric in metrics_to_compute:
145
+ if metric in metrics_a and metric in metrics_b:
146
+ mean_metric = (metrics_a[metric] + metrics_b[metric]) / 2
147
+ bootstrap_metrics[metric].append(mean_metric)
148
 
149
  # Calculate confidence intervals
150
  alpha = 1 - confidence_level
 
206
  --confidence-level 0.99
207
  """
208
  parser = argparse.ArgumentParser(
209
+ description='Compute bootstrap confidence intervals for API model inference results with two-grader structure'
210
  )
211
  parser.add_argument(
212
  'experiment_dir',
 
273
  except Exception as e:
274
  raise RuntimeError(f"Failed to extract configuration from log file: {e}")
275
 
276
+ # Load predictions and labels by grader
277
+ grader_a_data, grader_b_data = load_inference_results_by_grader(results_path)
278
+ print(f"Loaded {len(grader_a_data)} essays with data from both graders")
279
 
280
+ # Compute bootstrap confidence intervals with two-grader structure
281
+ ci_results = compute_bootstrap_confidence_intervals_two_graders(
282
+ grader_a_data=grader_a_data,
283
+ grader_b_data=grader_b_data,
284
  metrics_to_compute=args.metrics,
285
  cfg=cfg,
286
  n_bootstrap=args.n_bootstrap,
 
288
  random_state=seed,
289
  )
290
 
291
+ # Also compute metrics for the full dataset (without bootstrap) for reference
292
+ all_predictions_a = np.array([data['prediction'] for data in grader_a_data.values()])
293
+ all_labels_a = np.array([data['label'] for data in grader_a_data.values()])
294
+ all_predictions_b = np.array([data['prediction'] for data in grader_b_data.values()])
295
+ all_labels_b = np.array([data['label'] for data in grader_b_data.values()])
296
+
297
+ metrics_full_a = compute_metrics((all_predictions_a, all_labels_a), cfg)
298
+ metrics_full_b = compute_metrics((all_predictions_b, all_labels_b), cfg)
299
+
300
+ print("\nFull Dataset Metrics:")
301
+ print(" Grader A:")
302
+ for metric in args.metrics:
303
+ if metric in metrics_full_a:
304
+ print(f" {metric}: {metrics_full_a[metric]:.4f}")
305
+ print(" Grader B:")
306
+ for metric in args.metrics:
307
+ if metric in metrics_full_b:
308
+ print(f" {metric}: {metrics_full_b[metric]:.4f}")
309
+ print(" Mean (A+B)/2:")
310
+ for metric in args.metrics:
311
+ if metric in metrics_full_a and metric in metrics_full_b:
312
+ mean_val = (metrics_full_a[metric] + metrics_full_b[metric]) / 2
313
+ print(f" {metric}: {mean_val:.4f}")
314
+
315
+ # Display bootstrap results
316
+ print(f"\nBootstrap Confidence Intervals ({args.confidence_level*100:.0f}%):")
317
+ print(" (Based on mean of grader A and B metrics)")
318
  for metric, (mean_val, lower, upper) in ci_results.items():
319
  print(f" {metric}: {mean_val:.4f} [{lower:.4f}, {upper:.4f}]")
320
 
321
  # Save results
322
+ output_path = exp_dir / "bootstrap_confidence_intervals.csv"
323
  save_results_to_csv(experiment_id, ci_results, output_path)
324
 
325
 
runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C1/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- deepseek-reasoner-zero-shot-C1,2025-06-30 15:33:44,0.5065391840566376,0.42139117620280725,0.5858167517132609,0.16442557551045361,0.2215346775433057,0.15611671900668242,0.306723118942748,0.1506063999360656,0.40940278020811266,0.32520094685192064,0.49356231540594575,0.16836136855402511
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ deepseek-reasoner-zero-shot-C1,2025-06-30 19:24:13,0.5031813172396491,0.39612234405215646,0.6029211931389544,0.20679884908679796,0.2274033422818625,0.1601275797791265,0.30359755990743703,0.14346998012831053,0.4095234288772193,0.31337848309417465,0.5061183175473122,0.19273983445313753
runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C2/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- deepseek-reasoner-zero-shot-C2,2025-06-30 15:34:23,0.01473848598300359,-0.014940334195692621,0.055896445993668054,0.07083678018936068,0.09512985172272563,0.020127342058186615,0.18882669143307435,0.16869934937488773,0.06962107856931386,0.023982700826140426,0.12434890820191224,0.10036620737577182
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ deepseek-reasoner-zero-shot-C2,2025-06-30 19:25:29,0.015085628670856775,-0.01609536786795569,0.052453535386255155,0.06854890325421084,0.07182213834437161,0.011318407960199004,0.13775287212787207,0.12643446416767307,0.06959656298664449,0.012077294685990336,0.14131656764438613,0.1292392729583958
runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C3/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- deepseek-reasoner-zero-shot-C3,2025-06-30 15:35:03,0.42427887678877213,0.29278650188137595,0.5449246220699722,0.2521381201885962,0.24709564450221505,0.18539374481613408,0.3228920638060504,0.13749831898991632,0.3378260927471412,0.2574970316843851,0.42096414890304595,0.16346711721866086
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ deepseek-reasoner-zero-shot-C3,2025-06-30 19:26:47,0.41836228230962436,0.2489692256251884,0.5721549000685506,0.3231856744433622,0.2508176762202804,0.19202954797363142,0.31841585804142564,0.12638631006779422,0.3489492834022517,0.26249349858090687,0.4409494892377097,0.17845599065680284
runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C4/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- deepseek-reasoner-zero-shot-C4,2025-06-30 15:35:42,0.3815712950108138,0.2803760092271128,0.47899580849660867,0.19861979926949586,0.17144527260462014,0.1258957932112163,0.2285557981324928,0.1026600049212765,0.37175290907977915,0.2863213955266201,0.45743767424989523,0.17111627872327512
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ deepseek-reasoner-zero-shot-C4,2025-06-30 19:28:03,0.38116513528991663,0.2524444433299289,0.4995197940323231,0.24707535070239423,0.18445354750863818,0.12960402116931058,0.2520732864669916,0.12246926529768104,0.3717237598742471,0.2708849808787627,0.47315576881465315,0.20227078793589043
runs/api_models/deepseek-r1/deepseek-reasoner-zero-shot-C5/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- deepseek-reasoner-zero-shot-C5,2025-06-30 15:36:21,0.5064905297336298,0.37585136953931625,0.627167414884462,0.2513160453451458,0.2822318244472843,0.216820362063635,0.3513804341294417,0.1345600720658067,0.3339847771542349,0.2537047920593666,0.41843901801470434,0.16473422595533777
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ deepseek-reasoner-zero-shot-C5,2025-06-30 19:29:20,0.5044185730458359,0.3386976025293209,0.6493071919406782,0.3106095894113573,0.2750475967576382,0.19692221304008536,0.35612601878145045,0.15920380574136508,0.34002870708452154,0.24892362385780695,0.4303997744623871,0.18147615060458014
runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C1/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- gpt-4o-2024-11-20-zero-shot-C1,2025-06-30 15:30:28,0.4875098128776781,0.4075366331732076,0.5661412785954112,0.1586046454222036,0.22039074946734208,0.15490609880315764,0.3037138278889899,0.14880772908583223,0.4310748797592803,0.34464224298367047,0.5155400944584835,0.170897851474813
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ gpt-4o-2024-11-20-zero-shot-C1,2025-06-30 19:17:49,0.48433658533825436,0.3938363183140824,0.5711858066254478,0.17734948831136543,0.22518881129681048,0.16630298512225883,0.2878183259188317,0.12151534079657289,0.4300958775306066,0.3296266094763709,0.5319377997290938,0.20231119025272293
runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C2/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- gpt-4o-2024-11-20-zero-shot-C2,2025-06-30 15:31:07,0.2072455118121436,0.05778147725394796,0.34982293350689964,0.2920414562529517,0.17292647136998113,0.1110580864444951,0.24655129780363258,0.1354932113591375,0.20964837378050505,0.13828835193510916,0.28382396163802226,0.1455356097029131
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ gpt-4o-2024-11-20-zero-shot-C2,2025-06-30 19:19:06,0.2057111902943848,0.0156862847643509,0.3869618990819648,0.3712756143176139,0.15361878269998214,0.09770302074727197,0.21631382644102345,0.11861080569375149,0.2219144484635966,0.1413571234157044,0.30646439739268355,0.16510727397697916
runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C3/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- gpt-4o-2024-11-20-zero-shot-C3,2025-06-30 15:31:46,0.3874602413243428,0.2481470939391138,0.5176722378471288,0.26952514390801496,0.24471418829414107,0.1736233183958159,0.32888693913009015,0.15526362073427424,0.26652211762769756,0.19183379129568326,0.34306527287253047,0.1512314815768472
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ gpt-4o-2024-11-20-zero-shot-C3,2025-06-30 19:20:22,0.3840952652326012,0.20135412416730708,0.5521159645854775,0.35076184041817043,0.24817043489029247,0.1736164217858921,0.32927018371031214,0.15565376192442004,0.2805536033062157,0.19552156653388297,0.36858673687915616,0.1730651703452732
runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C4/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- gpt-4o-2024-11-20-zero-shot-C4,2025-06-30 15:32:25,0.5116996703669392,0.4104212578835423,0.6037155276446341,0.19329426976109182,0.2807685632339878,0.1704651875949637,0.42070727894369825,0.25024209134873454,0.3843695067989506,0.298747939199668,0.4701118175919892,0.1713638783923212
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ gpt-4o-2024-11-20-zero-shot-C4,2025-06-30 19:21:39,0.5089531567133179,0.3888031492362075,0.6152417779964572,0.22643862876024967,0.28852928653851234,0.18681228454562476,0.3824251224257857,0.19561283788016093,0.3902781779473028,0.2826152500998546,0.49870073612105204,0.21608548602119743
runs/api_models/gpt-4o/gpt-4o-2024-11-20-zero-shot-C5/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- gpt-4o-2024-11-20-zero-shot-C5,2025-06-30 15:33:04,0.5428095384041859,0.4066260449774681,0.6651585702907594,0.2585325253132913,0.27362099303431836,0.21423232459778377,0.3347781839450608,0.12054585934727705,0.2828746236369826,0.2096485443838562,0.3605551612749459,0.15090661689108972
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ gpt-4o-2024-11-20-zero-shot-C5,2025-06-30 19:22:56,0.5394873809979558,0.3575120262106833,0.698971508885962,0.34145948267527876,0.2658013896856878,0.20354630700238951,0.3238312656226109,0.12028495862022137,0.2925305547803538,0.20843460191112617,0.38070279562069903,0.17226819370957286
runs/api_models/sabia-3/sabia-3-zero-shot-C1/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- sabia-3-zero-shot-C1,2025-06-30 15:27:09,0.6774316249330261,0.6057142857142858,0.7444916275847978,0.13877734187051205,0.3322749233375538,0.25107884260922797,0.44217262102209304,0.19109377841286507,0.6468183948171619,0.5651409581904621,0.728388842853713,0.1632478846632509
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ sabia-3-zero-shot-C1,2025-06-30 19:11:26,0.6758141520978987,0.5821561906848018,0.7574582527396938,0.17530206205489196,0.347325574090326,0.2672238537073168,0.45622434106717363,0.1890004873598568,0.6477251551391441,0.5570079559770961,0.7353169437026666,0.17830898772557058
runs/api_models/sabia-3/sabia-3-zero-shot-C2/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- sabia-3-zero-shot-C2,2025-06-30 15:27:48,0.03192278656645976,-0.014177957722872423,0.07661827209946197,0.09079622982233439,0.0752924263856506,0.043123040752351106,0.11350596521980306,0.07038292446745195,0.09842797080806479,0.05146133128447315,0.15524936783957105,0.1037880365550979
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ sabia-3-zero-shot-C2,2025-06-30 19:12:42,0.028512195484997012,-0.030817136956953983,0.08987572236215731,0.12069285931911129,0.08821109362412456,0.03657240950873027,0.15393705068531627,0.11736464117658599,0.09941741515103789,0.03854145956316684,0.1757644223312846,0.13722296276811774
runs/api_models/sabia-3/sabia-3-zero-shot-C3/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- sabia-3-zero-shot-C3,2025-06-30 15:28:28,0.3246568788369811,0.19279666995851927,0.45209812867633853,0.2593014587178193,0.21136810332780584,0.14867370449328218,0.28738962256439154,0.13871591807110936,0.285006859113549,0.21002954980503674,0.36795514373849075,0.157925593933454
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ sabia-3-zero-shot-C3,2025-06-30 19:13:59,0.32252754287380525,0.1419556961526131,0.4922296040905474,0.3502739079379343,0.21004968949354197,0.1469750587962625,0.2809477206064128,0.13397266181015033,0.3011921097511097,0.2194068220641772,0.3865792849068527,0.16717246284267553
runs/api_models/sabia-3/sabia-3-zero-shot-C4/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- sabia-3-zero-shot-C4,2025-06-30 15:29:08,0.4653994276483699,0.34018819834002006,0.5772301426746734,0.23704194433465337,0.26759160010047583,0.17743920345301964,0.3846598028320292,0.20722059937900958,0.5124443951217361,0.42347344832630857,0.6024732123251144,0.17899976399880585
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ sabia-3-zero-shot-C4,2025-06-30 19:15:16,0.4644634004271149,0.30569116709717425,0.6074495907906716,0.30175842369349737,0.2767955968709957,0.18925636717607686,0.3717433634379734,0.18248699626189652,0.5163666279003479,0.4031141694720424,0.6250066983091925,0.2218925288371501
runs/api_models/sabia-3/sabia-3-zero-shot-C5/bootstrap_confidence_intervals.csv CHANGED
@@ -1,2 +1,2 @@
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
- sabia-3-zero-shot-C5,2025-06-30 15:29:48,0.5451644712018167,0.40711410991535146,0.6741883926545115,0.26707428273916,0.3689488072678057,0.3005606300203075,0.43848899339416186,0.1379283633738544,0.4279799537243523,0.343551042281439,0.5146665075184903,0.1711154652370513
 
1
  experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ sabia-3-zero-shot-C5,2025-06-30 19:16:33,0.5392831394260602,0.35046363402046493,0.7055545310294663,0.35509089700900137,0.36099722283776575,0.28393184893449175,0.4381994677690793,0.15426761883458756,0.43190209919436773,0.33418961241763373,0.5281077284227921,0.19391811600515835
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1 → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/.hydra/config.yaml RENAMED
@@ -30,6 +30,7 @@ experiments:
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 0
 
33
  training_params:
34
  weight_decay: 0.01
35
  warmup_ratio: 0.1
 
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 0
33
+ use_full_context: false
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
runs/base_models/{mbert/jbcs2025_mbert_base-C1-encoder_classification-C1 → bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/.hydra/hydra.yaml RENAMED
@@ -130,18 +130,18 @@ hydra:
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
- cwd: /home/andrebarbosa/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
- - path: /home/andrebarbosa/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
- output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-45-08
145
  choices:
146
  experiments: base_models/C1
147
  hydra/env: default
 
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
+ cwd: /workspace/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
+ - path: /workspace/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
+ output_dir: /workspace/jbcs2025/outputs/2025-06-30/23-51-41
145
  choices:
146
  experiments: base_models/C1
147
  hydra/env: default
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1 → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/.hydra/overrides.yaml RENAMED
File without changes
runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only/bootstrap_confidence_intervals.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only,2025-06-30 23:51:41,0.6726698793738349,0.5786694701512399,0.7587417074110893,0.18007223725984933,0.4756728951042896,0.36004609141863914,0.6232464233862081,0.2632003319675689,0.6413009122974154,0.556374600523932,0.7241688998827073,0.16779429935877532
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1 → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/evaluation_results.csv RENAMED
@@ -1,2 +1,2 @@
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
- 0.644927536231884,26.37521893583148,0.6742722265932337,0.007246376811594235,0.44138845418188133,0.644927536231884,0.6413771139990777,0,137,0,1,0,138,0,0,5,123,5,5,56,52,20,10,22,79,8,29,6,112,16,4,2025-06-28 17:36:00,jbcs2025_bertimbau_base-C1-encoder_classification-C1
 
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.644927536231884,26.37521893583148,0.6742722265932337,0.007246376811594235,0.44138845418188133,0.644927536231884,0.6413771139990777,0,137,0,1,0,138,0,0,5,123,5,5,56,52,20,10,22,79,8,29,6,112,16,4,2025-06-30 23:51:41,jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1/jbcs2025_bertimbau_base-C1-encoder_classification-C1_inference_results.jsonl → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only/jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only_inference_results.jsonl} RENAMED
File without changes
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C1-encoder_classification-C1 → jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only}/run_inference_experiment.log RENAMED
@@ -1,5 +1,5 @@
1
- [2025-06-28 17:36:00,040][__main__][INFO] - Starting inference experiment
2
- [2025-06-28 17:36:00,041][__main__][INFO] - cache_dir: /tmp/
3
  dataset:
4
  name: kamel-usp/aes_enem_dataset
5
  split: JBCS2025
@@ -31,6 +31,7 @@ experiments:
31
  name: neuralmind/bert-base-portuguese-cased
32
  dataset:
33
  grade_index: 0
 
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
@@ -40,9 +41,9 @@ experiments:
40
  gradient_accumulation_steps: 1
41
  gradient_checkpointing: false
42
 
43
- [2025-06-28 17:36:00,055][__main__][INFO] - Running inference with fine-tuned HF model
44
- [2025-06-28 17:36:04,586][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
45
- [2025-06-28 17:36:04,587][transformers.configuration_utils][INFO] - Model config BertConfig {
46
  "architectures": [
47
  "BertForMaskedLM"
48
  ],
@@ -67,20 +68,20 @@ experiments:
67
  "pooler_size_per_head": 128,
68
  "pooler_type": "first_token_transform",
69
  "position_embedding_type": "absolute",
70
- "transformers_version": "4.50.3",
71
  "type_vocab_size": 2,
72
  "use_cache": true,
73
  "vocab_size": 29794
74
  }
75
 
76
- [2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt
77
- [2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
78
- [2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json
79
- [2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json
80
- [2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json
81
- [2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
82
- [2025-06-28 17:36:04,590][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
83
- [2025-06-28 17:36:04,591][transformers.configuration_utils][INFO] - Model config BertConfig {
84
  "architectures": [
85
  "BertForMaskedLM"
86
  ],
@@ -105,14 +106,14 @@ experiments:
105
  "pooler_size_per_head": 128,
106
  "pooler_type": "first_token_transform",
107
  "position_embedding_type": "absolute",
108
- "transformers_version": "4.50.3",
109
  "type_vocab_size": 2,
110
  "use_cache": true,
111
  "vocab_size": 29794
112
  }
113
 
114
- [2025-06-28 17:36:04,639][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
115
- [2025-06-28 17:36:04,639][transformers.configuration_utils][INFO] - Model config BertConfig {
116
  "architectures": [
117
  "BertForMaskedLM"
118
  ],
@@ -137,16 +138,18 @@ experiments:
137
  "pooler_size_per_head": 128,
138
  "pooler_type": "first_token_transform",
139
  "position_embedding_type": "absolute",
140
- "transformers_version": "4.50.3",
141
  "type_vocab_size": 2,
142
  "use_cache": true,
143
  "vocab_size": 29794
144
  }
145
 
146
- [2025-06-28 17:36:04,653][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True
147
- [2025-06-28 17:36:04,665][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C1
148
- [2025-06-28 17:36:04,870][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C1/snapshots/1ad2e0f61009276ce3c1d23b24b6f55e0eb102d8/config.json
149
- [2025-06-28 17:36:04,874][transformers.configuration_utils][INFO] - Model config BertConfig {
 
 
150
  "architectures": [
151
  "BertForSequenceClassification"
152
  ],
@@ -189,35 +192,35 @@ experiments:
189
  "position_embedding_type": "absolute",
190
  "problem_type": "single_label_classification",
191
  "torch_dtype": "float32",
192
- "transformers_version": "4.50.3",
193
  "type_vocab_size": 2,
194
  "use_cache": true,
195
  "vocab_size": 29794
196
  }
197
 
198
- [2025-06-28 17:36:04,937][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C1/snapshots/1ad2e0f61009276ce3c1d23b24b6f55e0eb102d8/model.safetensors
199
- [2025-06-28 17:36:04,937][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
200
- [2025-06-28 17:36:04,937][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
201
- [2025-06-28 17:36:05,142][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
202
 
203
- [2025-06-28 17:36:05,142][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C1.
204
  If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
205
- [2025-06-28 17:36:05,148][transformers.training_args][INFO] - PyTorch: setting up devices
206
- [2025-06-28 17:36:05,160][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
207
- [2025-06-28 17:36:05,164][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
208
- [2025-06-28 17:36:05,180][transformers.trainer][INFO] - Using auto half precision backend
209
- [2025-06-28 17:36:08,901][__main__][INFO] - Running inference on test dataset
210
- [2025-06-28 17:36:08,902][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: prompt, reference, id, supporting_text, essay_text, essay_year, id_prompt, grades. If prompt, reference, id, supporting_text, essay_text, essay_year, id_prompt, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
211
- [2025-06-28 17:36:08,906][transformers.trainer][INFO] -
212
  ***** Running Prediction *****
213
- [2025-06-28 17:36:08,906][transformers.trainer][INFO] - Num examples = 138
214
- [2025-06-28 17:36:08,906][transformers.trainer][INFO] - Batch size = 16
215
- [2025-06-28 17:36:12,220][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C1-encoder_classification-C1_inference_results.jsonl
216
- [2025-06-28 17:36:12,224][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
217
- [2025-06-28 17:37:02,235][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
218
- [2025-06-28 17:37:02,235][__main__][INFO] - Bootstrap Confidence Intervals (95%):
219
- [2025-06-28 17:37:02,235][__main__][INFO] - QWK: 0.6727 [0.5787, 0.7587]
220
- [2025-06-28 17:37:02,235][__main__][INFO] - Macro_F1: 0.4757 [0.3600, 0.6232]
221
- [2025-06-28 17:37:02,235][__main__][INFO] - Weighted_F1: 0.6413 [0.5564, 0.7242]
222
- [2025-06-28 17:37:02,235][__main__][INFO] - Inference results: {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6742722265932337, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.44138845418188133, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6413771139990777, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(5), 'TP_3': np.int64(56), 'TN_3': np.int64(52), 'FP_3': np.int64(20), 'FN_3': np.int64(10), 'TP_4': np.int64(22), 'TN_4': np.int64(79), 'FP_4': np.int64(8), 'FN_4': np.int64(29), 'TP_5': np.int64(6), 'TN_5': np.int64(112), 'FP_5': np.int64(16), 'FN_5': np.int64(4)}
223
- [2025-06-28 17:37:02,235][__main__][INFO] - Inference experiment completed
 
1
+ [2025-06-30 23:51:41,386][__main__][INFO] - Starting inference experiment
2
+ [2025-06-30 23:51:41,387][__main__][INFO] - cache_dir: /tmp/
3
  dataset:
4
  name: kamel-usp/aes_enem_dataset
5
  split: JBCS2025
 
31
  name: neuralmind/bert-base-portuguese-cased
32
  dataset:
33
  grade_index: 0
34
+ use_full_context: false
35
  training_params:
36
  weight_decay: 0.01
37
  warmup_ratio: 0.1
 
41
  gradient_accumulation_steps: 1
42
  gradient_checkpointing: false
43
 
44
+ [2025-06-30 23:51:41,389][__main__][INFO] - Running inference with fine-tuned HF model
45
+ [2025-06-30 23:51:46,517][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
46
+ [2025-06-30 23:51:46,518][transformers.configuration_utils][INFO] - Model config BertConfig {
47
  "architectures": [
48
  "BertForMaskedLM"
49
  ],
 
68
  "pooler_size_per_head": 128,
69
  "pooler_type": "first_token_transform",
70
  "position_embedding_type": "absolute",
71
+ "transformers_version": "4.53.0",
72
  "type_vocab_size": 2,
73
  "use_cache": true,
74
  "vocab_size": 29794
75
  }
76
 
77
+ [2025-06-30 23:51:46,722][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt
78
+ [2025-06-30 23:51:46,722][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
79
+ [2025-06-30 23:51:46,722][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json
80
+ [2025-06-30 23:51:46,722][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json
81
+ [2025-06-30 23:51:46,722][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json
82
+ [2025-06-30 23:51:46,722][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
83
+ [2025-06-30 23:51:46,722][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
84
+ [2025-06-30 23:51:46,723][transformers.configuration_utils][INFO] - Model config BertConfig {
85
  "architectures": [
86
  "BertForMaskedLM"
87
  ],
 
106
  "pooler_size_per_head": 128,
107
  "pooler_type": "first_token_transform",
108
  "position_embedding_type": "absolute",
109
+ "transformers_version": "4.53.0",
110
  "type_vocab_size": 2,
111
  "use_cache": true,
112
  "vocab_size": 29794
113
  }
114
 
115
+ [2025-06-30 23:51:46,749][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
116
+ [2025-06-30 23:51:46,749][transformers.configuration_utils][INFO] - Model config BertConfig {
117
  "architectures": [
118
  "BertForMaskedLM"
119
  ],
 
138
  "pooler_size_per_head": 128,
139
  "pooler_type": "first_token_transform",
140
  "position_embedding_type": "absolute",
141
+ "transformers_version": "4.53.0",
142
  "type_vocab_size": 2,
143
  "use_cache": true,
144
  "vocab_size": 29794
145
  }
146
 
147
+ [2025-06-30 23:51:46,765][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True; Use Full Context: False
148
+ [2025-06-30 23:51:46,816][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C1
149
+ [2025-06-30 23:51:46,816][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C1
150
+ [2025-06-30 23:51:47,523][__main__][INFO] - Model need 1.36 GiB to run inference and 2.58 for training
151
+ [2025-06-30 23:51:47,758][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C1/snapshots/1ad2e0f61009276ce3c1d23b24b6f55e0eb102d8/config.json
152
+ [2025-06-30 23:51:47,758][transformers.configuration_utils][INFO] - Model config BertConfig {
153
  "architectures": [
154
  "BertForSequenceClassification"
155
  ],
 
192
  "position_embedding_type": "absolute",
193
  "problem_type": "single_label_classification",
194
  "torch_dtype": "float32",
195
+ "transformers_version": "4.53.0",
196
  "type_vocab_size": 2,
197
  "use_cache": true,
198
  "vocab_size": 29794
199
  }
200
 
201
+ [2025-06-30 23:51:47,903][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C1/snapshots/1ad2e0f61009276ce3c1d23b24b6f55e0eb102d8/model.safetensors
202
+ [2025-06-30 23:51:47,903][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
203
+ [2025-06-30 23:51:47,903][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
204
+ [2025-06-30 23:51:48,334][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
205
 
206
+ [2025-06-30 23:51:48,334][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C1.
207
  If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
208
+ [2025-06-30 23:51:48,339][transformers.training_args][INFO] - PyTorch: setting up devices
209
+ [2025-06-30 23:51:48,372][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
210
+ [2025-06-30 23:51:48,376][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
211
+ [2025-06-30 23:51:48,396][transformers.trainer][INFO] - Using auto half precision backend
212
+ [2025-06-30 23:51:51,813][__main__][INFO] - Running inference on test dataset
213
+ [2025-06-30 23:51:51,814][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: supporting_text, prompt, grades, reference, essay_text, essay_year, id, id_prompt. If supporting_text, prompt, grades, reference, essay_text, essay_year, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
214
+ [2025-06-30 23:51:51,818][transformers.trainer][INFO] -
215
  ***** Running Prediction *****
216
+ [2025-06-30 23:51:51,818][transformers.trainer][INFO] - Num examples = 138
217
+ [2025-06-30 23:51:51,818][transformers.trainer][INFO] - Batch size = 16
218
+ [2025-06-30 23:51:52,209][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C1-encoder_classification-C1-essay_only_inference_results.jsonl
219
+ [2025-06-30 23:51:52,214][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
220
+ [2025-06-30 23:53:26,617][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
221
+ [2025-06-30 23:53:26,617][__main__][INFO] - Bootstrap Confidence Intervals (95%):
222
+ [2025-06-30 23:53:26,617][__main__][INFO] - QWK: 0.6727 [0.5787, 0.7587]
223
+ [2025-06-30 23:53:26,617][__main__][INFO] - Macro_F1: 0.4757 [0.3600, 0.6232]
224
+ [2025-06-30 23:53:26,617][__main__][INFO] - Weighted_F1: 0.6413 [0.5564, 0.7242]
225
+ [2025-06-30 23:53:26,617][__main__][INFO] - Inference results: {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6742722265932337, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.44138845418188133, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6413771139990777, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(5), 'TP_3': np.int64(56), 'TN_3': np.int64(52), 'FP_3': np.int64(20), 'FN_3': np.int64(10), 'TP_4': np.int64(22), 'TN_4': np.int64(79), 'FP_4': np.int64(8), 'FN_4': np.int64(29), 'TP_5': np.int64(6), 'TN_5': np.int64(112), 'FP_5': np.int64(16), 'FN_5': np.int64(4)}
226
+ [2025-06-30 23:53:26,617][__main__][INFO] - Inference experiment completed
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/.hydra/config.yaml RENAMED
@@ -30,6 +30,7 @@ experiments:
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 1
 
33
  training_params:
34
  weight_decay: 0.01
35
  warmup_ratio: 0.1
 
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 1
33
+ use_full_context: false
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/.hydra/hydra.yaml RENAMED
@@ -130,18 +130,18 @@ hydra:
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
- cwd: /home/andrebarbosa/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
- - path: /home/andrebarbosa/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
- output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-37-11
145
  choices:
146
  experiments: base_models/C2
147
  hydra/env: default
 
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
+ cwd: /workspace/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
+ - path: /workspace/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
+ output_dir: /workspace/jbcs2025/outputs/2025-06-30/23-53-32
145
  choices:
146
  experiments: base_models/C2
147
  hydra/env: default
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/.hydra/overrides.yaml RENAMED
File without changes
runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only/bootstrap_confidence_intervals.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only,2025-06-30 23:53:32,0.41819188204779456,0.27759865754644286,0.5466018786751335,0.2690032211286907,0.29623085261327686,0.21542890620802888,0.3976815226515651,0.18225261644353621,0.3817868369579885,0.2993269590182539,0.46412896590642116,0.16480200688816726
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/evaluation_results.csv RENAMED
@@ -1,2 +1,2 @@
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
- 0.37681159420289856,55.32512598464997,0.4220445459737294,0.06521739130434778,0.2801049472150572,0.37681159420289856,0.38226236003582026,0,137,0,1,13,90,13,22,3,112,21,2,25,56,31,26,5,99,13,21,6,110,8,14,2025-06-28 17:37:11,jbcs2025_bertimbau_base-C2-encoder_classification-C2
 
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.37681159420289856,55.32512598464997,0.4220445459737294,0.06521739130434778,0.2801049472150572,0.37681159420289856,0.38226236003582026,0,137,0,1,13,90,13,22,3,112,21,2,25,56,31,26,5,99,13,21,6,110,8,14,2025-06-30 23:53:32,jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2/jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only/jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only_inference_results.jsonl} RENAMED
File without changes
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C2-encoder_classification-C2 → jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only}/run_inference_experiment.log RENAMED
@@ -1,5 +1,5 @@
1
- [2025-06-28 17:37:11,852][__main__][INFO] - Starting inference experiment
2
- [2025-06-28 17:37:11,853][__main__][INFO] - cache_dir: /tmp/
3
  dataset:
4
  name: kamel-usp/aes_enem_dataset
5
  split: JBCS2025
@@ -31,6 +31,7 @@ experiments:
31
  name: neuralmind/bert-base-portuguese-cased
32
  dataset:
33
  grade_index: 1
 
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
@@ -40,9 +41,9 @@ experiments:
40
  gradient_accumulation_steps: 1
41
  gradient_checkpointing: false
42
 
43
- [2025-06-28 17:37:11,866][__main__][INFO] - Running inference with fine-tuned HF model
44
- [2025-06-28 17:37:16,964][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
45
- [2025-06-28 17:37:16,965][transformers.configuration_utils][INFO] - Model config BertConfig {
46
  "architectures": [
47
  "BertForMaskedLM"
48
  ],
@@ -67,20 +68,20 @@ experiments:
67
  "pooler_size_per_head": 128,
68
  "pooler_type": "first_token_transform",
69
  "position_embedding_type": "absolute",
70
- "transformers_version": "4.50.3",
71
  "type_vocab_size": 2,
72
  "use_cache": true,
73
  "vocab_size": 29794
74
  }
75
 
76
- [2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt
77
- [2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
78
- [2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json
79
- [2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json
80
- [2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json
81
- [2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
82
- [2025-06-28 17:37:16,966][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
83
- [2025-06-28 17:37:16,967][transformers.configuration_utils][INFO] - Model config BertConfig {
84
  "architectures": [
85
  "BertForMaskedLM"
86
  ],
@@ -105,14 +106,14 @@ experiments:
105
  "pooler_size_per_head": 128,
106
  "pooler_type": "first_token_transform",
107
  "position_embedding_type": "absolute",
108
- "transformers_version": "4.50.3",
109
  "type_vocab_size": 2,
110
  "use_cache": true,
111
  "vocab_size": 29794
112
  }
113
 
114
- [2025-06-28 17:37:16,994][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
115
- [2025-06-28 17:37:16,994][transformers.configuration_utils][INFO] - Model config BertConfig {
116
  "architectures": [
117
  "BertForMaskedLM"
118
  ],
@@ -137,16 +138,18 @@ experiments:
137
  "pooler_size_per_head": 128,
138
  "pooler_type": "first_token_transform",
139
  "position_embedding_type": "absolute",
140
- "transformers_version": "4.50.3",
141
  "type_vocab_size": 2,
142
  "use_cache": true,
143
  "vocab_size": 29794
144
  }
145
 
146
- [2025-06-28 17:37:17,008][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True
147
- [2025-06-28 17:37:17,060][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C2
148
- [2025-06-28 17:37:17,290][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C2/snapshots/3afae7b80c36bf0042b19778620a0ad1135b7135/config.json
149
- [2025-06-28 17:37:17,293][transformers.configuration_utils][INFO] - Model config BertConfig {
 
 
150
  "architectures": [
151
  "BertForSequenceClassification"
152
  ],
@@ -189,35 +192,35 @@ experiments:
189
  "position_embedding_type": "absolute",
190
  "problem_type": "single_label_classification",
191
  "torch_dtype": "float32",
192
- "transformers_version": "4.50.3",
193
  "type_vocab_size": 2,
194
  "use_cache": true,
195
  "vocab_size": 29794
196
  }
197
 
198
- [2025-06-28 17:37:17,352][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C2/snapshots/3afae7b80c36bf0042b19778620a0ad1135b7135/model.safetensors
199
- [2025-06-28 17:37:17,353][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
200
- [2025-06-28 17:37:17,353][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
201
- [2025-06-28 17:37:17,545][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
202
 
203
- [2025-06-28 17:37:17,545][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C2.
204
  If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
205
- [2025-06-28 17:37:17,551][transformers.training_args][INFO] - PyTorch: setting up devices
206
- [2025-06-28 17:37:17,563][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
207
- [2025-06-28 17:37:17,567][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
208
- [2025-06-28 17:37:17,583][transformers.trainer][INFO] - Using auto half precision backend
209
- [2025-06-28 17:37:21,074][__main__][INFO] - Running inference on test dataset
210
- [2025-06-28 17:37:21,075][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_text, id, prompt, id_prompt, supporting_text, essay_year, reference. If grades, essay_text, id, prompt, id_prompt, supporting_text, essay_year, reference are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
211
- [2025-06-28 17:37:21,079][transformers.trainer][INFO] -
212
  ***** Running Prediction *****
213
- [2025-06-28 17:37:21,079][transformers.trainer][INFO] - Num examples = 138
214
- [2025-06-28 17:37:21,079][transformers.trainer][INFO] - Batch size = 16
215
- [2025-06-28 17:37:24,400][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl
216
- [2025-06-28 17:37:24,403][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
217
- [2025-06-28 17:38:13,623][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
218
- [2025-06-28 17:38:13,623][__main__][INFO] - Bootstrap Confidence Intervals (95%):
219
- [2025-06-28 17:38:13,623][__main__][INFO] - QWK: 0.4182 [0.2776, 0.5466]
220
- [2025-06-28 17:38:13,623][__main__][INFO] - Macro_F1: 0.2962 [0.2154, 0.3977]
221
- [2025-06-28 17:38:13,624][__main__][INFO] - Weighted_F1: 0.3818 [0.2993, 0.4641]
222
- [2025-06-28 17:38:13,624][__main__][INFO] - Inference results: {'accuracy': 0.37681159420289856, 'RMSE': 55.32512598464997, 'QWK': 0.4220445459737294, 'HDIV': 0.06521739130434778, 'Macro_F1': 0.2801049472150572, 'Micro_F1': 0.37681159420289856, 'Weighted_F1': 0.38226236003582026, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(13), 'TN_1': np.int64(90), 'FP_1': np.int64(13), 'FN_1': np.int64(22), 'TP_2': np.int64(3), 'TN_2': np.int64(112), 'FP_2': np.int64(21), 'FN_2': np.int64(2), 'TP_3': np.int64(25), 'TN_3': np.int64(56), 'FP_3': np.int64(31), 'FN_3': np.int64(26), 'TP_4': np.int64(5), 'TN_4': np.int64(99), 'FP_4': np.int64(13), 'FN_4': np.int64(21), 'TP_5': np.int64(6), 'TN_5': np.int64(110), 'FP_5': np.int64(8), 'FN_5': np.int64(14)}
223
- [2025-06-28 17:38:13,624][__main__][INFO] - Inference experiment completed
 
1
+ [2025-06-30 23:53:32,300][__main__][INFO] - Starting inference experiment
2
+ [2025-06-30 23:53:32,301][__main__][INFO] - cache_dir: /tmp/
3
  dataset:
4
  name: kamel-usp/aes_enem_dataset
5
  split: JBCS2025
 
31
  name: neuralmind/bert-base-portuguese-cased
32
  dataset:
33
  grade_index: 1
34
+ use_full_context: false
35
  training_params:
36
  weight_decay: 0.01
37
  warmup_ratio: 0.1
 
41
  gradient_accumulation_steps: 1
42
  gradient_checkpointing: false
43
 
44
+ [2025-06-30 23:53:32,303][__main__][INFO] - Running inference with fine-tuned HF model
45
+ [2025-06-30 23:53:37,072][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
46
+ [2025-06-30 23:53:37,073][transformers.configuration_utils][INFO] - Model config BertConfig {
47
  "architectures": [
48
  "BertForMaskedLM"
49
  ],
 
68
  "pooler_size_per_head": 128,
69
  "pooler_type": "first_token_transform",
70
  "position_embedding_type": "absolute",
71
+ "transformers_version": "4.53.0",
72
  "type_vocab_size": 2,
73
  "use_cache": true,
74
  "vocab_size": 29794
75
  }
76
 
77
+ [2025-06-30 23:53:37,279][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt
78
+ [2025-06-30 23:53:37,279][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
79
+ [2025-06-30 23:53:37,279][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json
80
+ [2025-06-30 23:53:37,279][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json
81
+ [2025-06-30 23:53:37,279][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json
82
+ [2025-06-30 23:53:37,279][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
83
+ [2025-06-30 23:53:37,279][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
84
+ [2025-06-30 23:53:37,280][transformers.configuration_utils][INFO] - Model config BertConfig {
85
  "architectures": [
86
  "BertForMaskedLM"
87
  ],
 
106
  "pooler_size_per_head": 128,
107
  "pooler_type": "first_token_transform",
108
  "position_embedding_type": "absolute",
109
+ "transformers_version": "4.53.0",
110
  "type_vocab_size": 2,
111
  "use_cache": true,
112
  "vocab_size": 29794
113
  }
114
 
115
+ [2025-06-30 23:53:37,305][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
116
+ [2025-06-30 23:53:37,305][transformers.configuration_utils][INFO] - Model config BertConfig {
117
  "architectures": [
118
  "BertForMaskedLM"
119
  ],
 
138
  "pooler_size_per_head": 128,
139
  "pooler_type": "first_token_transform",
140
  "position_embedding_type": "absolute",
141
+ "transformers_version": "4.53.0",
142
  "type_vocab_size": 2,
143
  "use_cache": true,
144
  "vocab_size": 29794
145
  }
146
 
147
+ [2025-06-30 23:53:37,322][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True; Use Full Context: False
148
+ [2025-06-30 23:53:37,526][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C2
149
+ [2025-06-30 23:53:37,526][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C2
150
+ [2025-06-30 23:53:38,363][__main__][INFO] - Model need 1.36 GiB to run inference and 2.58 for training
151
+ [2025-06-30 23:53:39,154][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C2/snapshots/3afae7b80c36bf0042b19778620a0ad1135b7135/config.json
152
+ [2025-06-30 23:53:39,155][transformers.configuration_utils][INFO] - Model config BertConfig {
153
  "architectures": [
154
  "BertForSequenceClassification"
155
  ],
 
192
  "position_embedding_type": "absolute",
193
  "problem_type": "single_label_classification",
194
  "torch_dtype": "float32",
195
+ "transformers_version": "4.53.0",
196
  "type_vocab_size": 2,
197
  "use_cache": true,
198
  "vocab_size": 29794
199
  }
200
 
201
+ [2025-06-30 23:53:51,890][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C2/snapshots/3afae7b80c36bf0042b19778620a0ad1135b7135/model.safetensors
202
+ [2025-06-30 23:53:51,891][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
203
+ [2025-06-30 23:53:51,891][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
204
+ [2025-06-30 23:53:52,245][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
205
 
206
+ [2025-06-30 23:53:52,245][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C2.
207
  If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
208
+ [2025-06-30 23:53:52,251][transformers.training_args][INFO] - PyTorch: setting up devices
209
+ [2025-06-30 23:53:52,307][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
210
+ [2025-06-30 23:53:52,312][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
211
+ [2025-06-30 23:53:52,330][transformers.trainer][INFO] - Using auto half precision backend
212
+ [2025-06-30 23:53:55,801][__main__][INFO] - Running inference on test dataset
213
+ [2025-06-30 23:53:55,802][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id, reference, essay_year, supporting_text, prompt, grades, essay_text, id_prompt. If id, reference, essay_year, supporting_text, prompt, grades, essay_text, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
214
+ [2025-06-30 23:53:55,806][transformers.trainer][INFO] -
215
  ***** Running Prediction *****
216
+ [2025-06-30 23:53:55,806][transformers.trainer][INFO] - Num examples = 138
217
+ [2025-06-30 23:53:55,806][transformers.trainer][INFO] - Batch size = 16
218
+ [2025-06-30 23:53:56,214][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C2-encoder_classification-C2-essay_only_inference_results.jsonl
219
+ [2025-06-30 23:53:56,220][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
220
+ [2025-06-30 23:55:32,845][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
221
+ [2025-06-30 23:55:32,845][__main__][INFO] - Bootstrap Confidence Intervals (95%):
222
+ [2025-06-30 23:55:32,845][__main__][INFO] - QWK: 0.4182 [0.2776, 0.5466]
223
+ [2025-06-30 23:55:32,845][__main__][INFO] - Macro_F1: 0.2962 [0.2154, 0.3977]
224
+ [2025-06-30 23:55:32,845][__main__][INFO] - Weighted_F1: 0.3818 [0.2993, 0.4641]
225
+ [2025-06-30 23:55:32,845][__main__][INFO] - Inference results: {'accuracy': 0.37681159420289856, 'RMSE': 55.32512598464997, 'QWK': 0.4220445459737294, 'HDIV': 0.06521739130434778, 'Macro_F1': 0.2801049472150572, 'Micro_F1': 0.37681159420289856, 'Weighted_F1': 0.38226236003582026, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(13), 'TN_1': np.int64(90), 'FP_1': np.int64(13), 'FN_1': np.int64(22), 'TP_2': np.int64(3), 'TN_2': np.int64(112), 'FP_2': np.int64(21), 'FN_2': np.int64(2), 'TP_3': np.int64(25), 'TN_3': np.int64(56), 'FP_3': np.int64(31), 'FN_3': np.int64(26), 'TP_4': np.int64(5), 'TN_4': np.int64(99), 'FP_4': np.int64(13), 'FN_4': np.int64(21), 'TP_5': np.int64(6), 'TN_5': np.int64(110), 'FP_5': np.int64(8), 'FN_5': np.int64(14)}
226
+ [2025-06-30 23:55:32,845][__main__][INFO] - Inference experiment completed
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3 → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/.hydra/config.yaml RENAMED
@@ -30,6 +30,7 @@ experiments:
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 2
 
33
  training_params:
34
  weight_decay: 0.01
35
  warmup_ratio: 0.1
 
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 2
33
+ use_full_context: false
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
runs/base_models/{mbert/jbcs2025_mbert_base-C3-encoder_classification-C3 → bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/.hydra/hydra.yaml RENAMED
@@ -130,18 +130,18 @@ hydra:
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
- cwd: /home/andrebarbosa/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
- - path: /home/andrebarbosa/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
- output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-49-33
145
  choices:
146
  experiments: base_models/C3
147
  hydra/env: default
 
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
+ cwd: /workspace/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
+ - path: /workspace/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
+ output_dir: /workspace/jbcs2025/outputs/2025-06-30/23-55-38
145
  choices:
146
  experiments: base_models/C3
147
  hydra/env: default
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3 → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/.hydra/overrides.yaml RENAMED
File without changes
runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only/bootstrap_confidence_intervals.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only,2025-06-30 23:55:38,0.3442546344979946,0.20848447033589465,0.47933895194622367,0.270854481610329,0.27540748660610137,0.20263838658028993,0.36522069296926984,0.1625823063889799,0.33565410439112764,0.25734749784644845,0.4165551974170723,0.15920769957062386
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3 → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/evaluation_results.csv RENAMED
@@ -1,2 +1,2 @@
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
- 0.37681159420289856,52.64042641120627,0.3452054794520547,0.09420289855072461,0.25943499029705924,0.37681159420289856,0.33380294701134283,0,137,0,1,0,109,0,29,13,101,19,5,20,71,22,25,17,67,33,21,2,119,12,5,2025-06-28 17:38:23,jbcs2025_bertimbau_base-C3-encoder_classification-C3
 
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.37681159420289856,52.64042641120627,0.3452054794520547,0.09420289855072461,0.25943499029705924,0.37681159420289856,0.33380294701134283,0,137,0,1,0,109,0,29,13,101,19,5,20,71,22,25,17,67,33,21,2,119,12,5,2025-06-30 23:55:38,jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3/jbcs2025_bertimbau_base-C3-encoder_classification-C3_inference_results.jsonl → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only/jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only_inference_results.jsonl} RENAMED
File without changes
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C3-encoder_classification-C3 → jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only}/run_inference_experiment.log RENAMED
@@ -1,5 +1,5 @@
1
- [2025-06-28 17:38:23,230][__main__][INFO] - Starting inference experiment
2
- [2025-06-28 17:38:23,232][__main__][INFO] - cache_dir: /tmp/
3
  dataset:
4
  name: kamel-usp/aes_enem_dataset
5
  split: JBCS2025
@@ -31,6 +31,7 @@ experiments:
31
  name: neuralmind/bert-base-portuguese-cased
32
  dataset:
33
  grade_index: 2
 
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
@@ -40,9 +41,9 @@ experiments:
40
  gradient_accumulation_steps: 1
41
  gradient_checkpointing: false
42
 
43
- [2025-06-28 17:38:23,244][__main__][INFO] - Running inference with fine-tuned HF model
44
- [2025-06-28 17:38:28,780][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
45
- [2025-06-28 17:38:28,784][transformers.configuration_utils][INFO] - Model config BertConfig {
46
  "architectures": [
47
  "BertForMaskedLM"
48
  ],
@@ -67,20 +68,20 @@ experiments:
67
  "pooler_size_per_head": 128,
68
  "pooler_type": "first_token_transform",
69
  "position_embedding_type": "absolute",
70
- "transformers_version": "4.50.3",
71
  "type_vocab_size": 2,
72
  "use_cache": true,
73
  "vocab_size": 29794
74
  }
75
 
76
- [2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt
77
- [2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
78
- [2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json
79
- [2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json
80
- [2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json
81
- [2025-06-28 17:38:28,788][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
82
- [2025-06-28 17:38:28,789][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
83
- [2025-06-28 17:38:28,791][transformers.configuration_utils][INFO] - Model config BertConfig {
84
  "architectures": [
85
  "BertForMaskedLM"
86
  ],
@@ -105,14 +106,14 @@ experiments:
105
  "pooler_size_per_head": 128,
106
  "pooler_type": "first_token_transform",
107
  "position_embedding_type": "absolute",
108
- "transformers_version": "4.50.3",
109
  "type_vocab_size": 2,
110
  "use_cache": true,
111
  "vocab_size": 29794
112
  }
113
 
114
- [2025-06-28 17:38:28,843][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
115
- [2025-06-28 17:38:28,844][transformers.configuration_utils][INFO] - Model config BertConfig {
116
  "architectures": [
117
  "BertForMaskedLM"
118
  ],
@@ -137,16 +138,18 @@ experiments:
137
  "pooler_size_per_head": 128,
138
  "pooler_type": "first_token_transform",
139
  "position_embedding_type": "absolute",
140
- "transformers_version": "4.50.3",
141
  "type_vocab_size": 2,
142
  "use_cache": true,
143
  "vocab_size": 29794
144
  }
145
 
146
- [2025-06-28 17:38:28,858][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True
147
- [2025-06-28 17:38:28,913][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C3
148
- [2025-06-28 17:38:29,119][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C3/snapshots/bad03f1db697f1fb612e4d74bb55d6f0e8cd7a16/config.json
149
- [2025-06-28 17:38:29,122][transformers.configuration_utils][INFO] - Model config BertConfig {
 
 
150
  "architectures": [
151
  "BertForSequenceClassification"
152
  ],
@@ -189,35 +192,35 @@ experiments:
189
  "position_embedding_type": "absolute",
190
  "problem_type": "single_label_classification",
191
  "torch_dtype": "float32",
192
- "transformers_version": "4.50.3",
193
  "type_vocab_size": 2,
194
  "use_cache": true,
195
  "vocab_size": 29794
196
  }
197
 
198
- [2025-06-28 17:38:29,182][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C3/snapshots/bad03f1db697f1fb612e4d74bb55d6f0e8cd7a16/model.safetensors
199
- [2025-06-28 17:38:29,183][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
200
- [2025-06-28 17:38:29,183][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
201
- [2025-06-28 17:38:29,376][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
202
 
203
- [2025-06-28 17:38:29,376][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C3.
204
  If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
205
- [2025-06-28 17:38:29,382][transformers.training_args][INFO] - PyTorch: setting up devices
206
- [2025-06-28 17:38:29,395][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
207
- [2025-06-28 17:38:29,398][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
208
- [2025-06-28 17:38:29,414][transformers.trainer][INFO] - Using auto half precision backend
209
- [2025-06-28 17:38:32,887][__main__][INFO] - Running inference on test dataset
210
- [2025-06-28 17:38:32,890][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, prompt, id, id_prompt, supporting_text, essay_text, essay_year, reference. If grades, prompt, id, id_prompt, supporting_text, essay_text, essay_year, reference are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
211
- [2025-06-28 17:38:32,909][transformers.trainer][INFO] -
212
  ***** Running Prediction *****
213
- [2025-06-28 17:38:32,909][transformers.trainer][INFO] - Num examples = 138
214
- [2025-06-28 17:38:32,910][transformers.trainer][INFO] - Batch size = 16
215
- [2025-06-28 17:38:36,240][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C3-encoder_classification-C3_inference_results.jsonl
216
- [2025-06-28 17:38:36,244][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
217
- [2025-06-28 17:39:26,821][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
218
- [2025-06-28 17:39:26,821][__main__][INFO] - Bootstrap Confidence Intervals (95%):
219
- [2025-06-28 17:39:26,821][__main__][INFO] - QWK: 0.3443 [0.2085, 0.4793]
220
- [2025-06-28 17:39:26,821][__main__][INFO] - Macro_F1: 0.2754 [0.2026, 0.3652]
221
- [2025-06-28 17:39:26,821][__main__][INFO] - Weighted_F1: 0.3357 [0.2573, 0.4166]
222
- [2025-06-28 17:39:26,821][__main__][INFO] - Inference results: {'accuracy': 0.37681159420289856, 'RMSE': 52.64042641120627, 'QWK': 0.3452054794520547, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.25943499029705924, 'Micro_F1': 0.37681159420289856, 'Weighted_F1': 0.33380294701134283, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(13), 'TN_2': np.int64(101), 'FP_2': np.int64(19), 'FN_2': np.int64(5), 'TP_3': np.int64(20), 'TN_3': np.int64(71), 'FP_3': np.int64(22), 'FN_3': np.int64(25), 'TP_4': np.int64(17), 'TN_4': np.int64(67), 'FP_4': np.int64(33), 'FN_4': np.int64(21), 'TP_5': np.int64(2), 'TN_5': np.int64(119), 'FP_5': np.int64(12), 'FN_5': np.int64(5)}
223
- [2025-06-28 17:39:26,821][__main__][INFO] - Inference experiment completed
 
1
+ [2025-06-30 23:55:38,582][__main__][INFO] - Starting inference experiment
2
+ [2025-06-30 23:55:38,583][__main__][INFO] - cache_dir: /tmp/
3
  dataset:
4
  name: kamel-usp/aes_enem_dataset
5
  split: JBCS2025
 
31
  name: neuralmind/bert-base-portuguese-cased
32
  dataset:
33
  grade_index: 2
34
+ use_full_context: false
35
  training_params:
36
  weight_decay: 0.01
37
  warmup_ratio: 0.1
 
41
  gradient_accumulation_steps: 1
42
  gradient_checkpointing: false
43
 
44
+ [2025-06-30 23:55:38,585][__main__][INFO] - Running inference with fine-tuned HF model
45
+ [2025-06-30 23:55:44,174][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
46
+ [2025-06-30 23:55:44,176][transformers.configuration_utils][INFO] - Model config BertConfig {
47
  "architectures": [
48
  "BertForMaskedLM"
49
  ],
 
68
  "pooler_size_per_head": 128,
69
  "pooler_type": "first_token_transform",
70
  "position_embedding_type": "absolute",
71
+ "transformers_version": "4.53.0",
72
  "type_vocab_size": 2,
73
  "use_cache": true,
74
  "vocab_size": 29794
75
  }
76
 
77
+ [2025-06-30 23:55:44,390][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt
78
+ [2025-06-30 23:55:44,390][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
79
+ [2025-06-30 23:55:44,390][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json
80
+ [2025-06-30 23:55:44,390][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json
81
+ [2025-06-30 23:55:44,390][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json
82
+ [2025-06-30 23:55:44,391][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
83
+ [2025-06-30 23:55:44,391][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
84
+ [2025-06-30 23:55:44,391][transformers.configuration_utils][INFO] - Model config BertConfig {
85
  "architectures": [
86
  "BertForMaskedLM"
87
  ],
 
106
  "pooler_size_per_head": 128,
107
  "pooler_type": "first_token_transform",
108
  "position_embedding_type": "absolute",
109
+ "transformers_version": "4.53.0",
110
  "type_vocab_size": 2,
111
  "use_cache": true,
112
  "vocab_size": 29794
113
  }
114
 
115
+ [2025-06-30 23:55:44,421][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
116
+ [2025-06-30 23:55:44,422][transformers.configuration_utils][INFO] - Model config BertConfig {
117
  "architectures": [
118
  "BertForMaskedLM"
119
  ],
 
138
  "pooler_size_per_head": 128,
139
  "pooler_type": "first_token_transform",
140
  "position_embedding_type": "absolute",
141
+ "transformers_version": "4.53.0",
142
  "type_vocab_size": 2,
143
  "use_cache": true,
144
  "vocab_size": 29794
145
  }
146
 
147
+ [2025-06-30 23:55:44,438][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True; Use Full Context: False
148
+ [2025-06-30 23:55:44,646][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C3
149
+ [2025-06-30 23:55:44,646][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C3
150
+ [2025-06-30 23:55:45,504][__main__][INFO] - Model need 1.36 GiB to run inference and 2.58 for training
151
+ [2025-06-30 23:55:46,269][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C3/snapshots/bad03f1db697f1fb612e4d74bb55d6f0e8cd7a16/config.json
152
+ [2025-06-30 23:55:46,270][transformers.configuration_utils][INFO] - Model config BertConfig {
153
  "architectures": [
154
  "BertForSequenceClassification"
155
  ],
 
192
  "position_embedding_type": "absolute",
193
  "problem_type": "single_label_classification",
194
  "torch_dtype": "float32",
195
+ "transformers_version": "4.53.0",
196
  "type_vocab_size": 2,
197
  "use_cache": true,
198
  "vocab_size": 29794
199
  }
200
 
201
+ [2025-06-30 23:55:59,432][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C3/snapshots/bad03f1db697f1fb612e4d74bb55d6f0e8cd7a16/model.safetensors
202
+ [2025-06-30 23:55:59,433][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
203
+ [2025-06-30 23:55:59,433][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
204
+ [2025-06-30 23:55:59,824][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
205
 
206
+ [2025-06-30 23:55:59,825][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C3.
207
  If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
208
+ [2025-06-30 23:55:59,830][transformers.training_args][INFO] - PyTorch: setting up devices
209
+ [2025-06-30 23:55:59,868][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
210
+ [2025-06-30 23:55:59,872][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
211
+ [2025-06-30 23:55:59,891][transformers.trainer][INFO] - Using auto half precision backend
212
+ [2025-06-30 23:56:03,371][__main__][INFO] - Running inference on test dataset
213
+ [2025-06-30 23:56:03,372][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, essay_text, essay_year, id, supporting_text, id_prompt, grades, prompt. If reference, essay_text, essay_year, id, supporting_text, id_prompt, grades, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
214
+ [2025-06-30 23:56:03,376][transformers.trainer][INFO] -
215
  ***** Running Prediction *****
216
+ [2025-06-30 23:56:03,376][transformers.trainer][INFO] - Num examples = 138
217
+ [2025-06-30 23:56:03,377][transformers.trainer][INFO] - Batch size = 16
218
+ [2025-06-30 23:56:03,760][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C3-encoder_classification-C3-essay_only_inference_results.jsonl
219
+ [2025-06-30 23:56:03,767][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
220
+ [2025-06-30 23:57:39,277][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
221
+ [2025-06-30 23:57:39,277][__main__][INFO] - Bootstrap Confidence Intervals (95%):
222
+ [2025-06-30 23:57:39,277][__main__][INFO] - QWK: 0.3443 [0.2085, 0.4793]
223
+ [2025-06-30 23:57:39,277][__main__][INFO] - Macro_F1: 0.2754 [0.2026, 0.3652]
224
+ [2025-06-30 23:57:39,277][__main__][INFO] - Weighted_F1: 0.3357 [0.2573, 0.4166]
225
+ [2025-06-30 23:57:39,277][__main__][INFO] - Inference results: {'accuracy': 0.37681159420289856, 'RMSE': 52.64042641120627, 'QWK': 0.3452054794520547, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.25943499029705924, 'Micro_F1': 0.37681159420289856, 'Weighted_F1': 0.33380294701134283, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(13), 'TN_2': np.int64(101), 'FP_2': np.int64(19), 'FN_2': np.int64(5), 'TP_3': np.int64(20), 'TN_3': np.int64(71), 'FP_3': np.int64(22), 'FN_3': np.int64(25), 'TP_4': np.int64(17), 'TN_4': np.int64(67), 'FP_4': np.int64(33), 'FN_4': np.int64(21), 'TP_5': np.int64(2), 'TN_5': np.int64(119), 'FP_5': np.int64(12), 'FN_5': np.int64(5)}
226
+ [2025-06-30 23:57:39,277][__main__][INFO] - Inference experiment completed
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/.hydra/config.yaml RENAMED
@@ -30,6 +30,7 @@ experiments:
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 3
 
33
  training_params:
34
  weight_decay: 0.01
35
  warmup_ratio: 0.1
 
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 3
33
+ use_full_context: false
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/.hydra/hydra.yaml RENAMED
@@ -130,18 +130,18 @@ hydra:
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
- cwd: /home/andrebarbosa/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
- - path: /home/andrebarbosa/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
- output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-39-36
145
  choices:
146
  experiments: base_models/C4
147
  hydra/env: default
 
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
+ cwd: /workspace/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
+ - path: /workspace/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
+ output_dir: /workspace/jbcs2025/outputs/2025-06-30/23-57-45
145
  choices:
146
  experiments: base_models/C4
147
  hydra/env: default
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/.hydra/overrides.yaml RENAMED
File without changes
runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only/bootstrap_confidence_intervals.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only,2025-06-30 23:57:45,0.623338029229533,0.5110704244499952,0.7250524714839471,0.21398204703395196,0.41365346789602125,0.2906398052196123,0.5906355015808844,0.2999956963612721,0.6556936287214997,0.5748725140399749,0.7321161735801723,0.15724365954019748
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/evaluation_results.csv RENAMED
@@ -1,2 +1,2 @@
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
- 0.644927536231884,26.37521893583148,0.6258134490238612,0.007246376811594235,0.36114488348530904,0.644927536231884,0.6545879036165807,0,137,0,1,0,137,0,1,5,118,11,4,51,49,13,25,30,74,18,16,3,126,7,2,2025-06-28 17:39:36,jbcs2025_bertimbau_base-C4-encoder_classification-C4
 
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.644927536231884,26.37521893583148,0.6258134490238612,0.007246376811594235,0.36114488348530904,0.644927536231884,0.6545879036165807,0,137,0,1,0,137,0,1,5,118,11,4,51,49,13,25,30,74,18,16,3,126,7,2,2025-06-30 23:57:45,jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4/jbcs2025_bertimbau_base-C4-encoder_classification-C4_inference_results.jsonl → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only/jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only_inference_results.jsonl} RENAMED
File without changes
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C4-encoder_classification-C4 → jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only}/run_inference_experiment.log RENAMED
@@ -1,5 +1,5 @@
1
- [2025-06-28 17:39:36,464][__main__][INFO] - Starting inference experiment
2
- [2025-06-28 17:39:36,466][__main__][INFO] - cache_dir: /tmp/
3
  dataset:
4
  name: kamel-usp/aes_enem_dataset
5
  split: JBCS2025
@@ -31,6 +31,7 @@ experiments:
31
  name: neuralmind/bert-base-portuguese-cased
32
  dataset:
33
  grade_index: 3
 
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
@@ -40,9 +41,9 @@ experiments:
40
  gradient_accumulation_steps: 1
41
  gradient_checkpointing: false
42
 
43
- [2025-06-28 17:39:36,479][__main__][INFO] - Running inference with fine-tuned HF model
44
- [2025-06-28 17:39:41,572][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
45
- [2025-06-28 17:39:41,573][transformers.configuration_utils][INFO] - Model config BertConfig {
46
  "architectures": [
47
  "BertForMaskedLM"
48
  ],
@@ -67,20 +68,20 @@ experiments:
67
  "pooler_size_per_head": 128,
68
  "pooler_type": "first_token_transform",
69
  "position_embedding_type": "absolute",
70
- "transformers_version": "4.50.3",
71
  "type_vocab_size": 2,
72
  "use_cache": true,
73
  "vocab_size": 29794
74
  }
75
 
76
- [2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt
77
- [2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
78
- [2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json
79
- [2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json
80
- [2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json
81
- [2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
82
- [2025-06-28 17:39:41,574][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
83
- [2025-06-28 17:39:41,574][transformers.configuration_utils][INFO] - Model config BertConfig {
84
  "architectures": [
85
  "BertForMaskedLM"
86
  ],
@@ -105,14 +106,14 @@ experiments:
105
  "pooler_size_per_head": 128,
106
  "pooler_type": "first_token_transform",
107
  "position_embedding_type": "absolute",
108
- "transformers_version": "4.50.3",
109
  "type_vocab_size": 2,
110
  "use_cache": true,
111
  "vocab_size": 29794
112
  }
113
 
114
- [2025-06-28 17:39:41,599][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
115
- [2025-06-28 17:39:41,599][transformers.configuration_utils][INFO] - Model config BertConfig {
116
  "architectures": [
117
  "BertForMaskedLM"
118
  ],
@@ -137,16 +138,18 @@ experiments:
137
  "pooler_size_per_head": 128,
138
  "pooler_type": "first_token_transform",
139
  "position_embedding_type": "absolute",
140
- "transformers_version": "4.50.3",
141
  "type_vocab_size": 2,
142
  "use_cache": true,
143
  "vocab_size": 29794
144
  }
145
 
146
- [2025-06-28 17:39:41,613][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True
147
- [2025-06-28 17:39:41,666][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C4
148
- [2025-06-28 17:39:41,892][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C4/snapshots/be129129fc134c0e782ae9f62b33da331367ab7b/config.json
149
- [2025-06-28 17:39:41,893][transformers.configuration_utils][INFO] - Model config BertConfig {
 
 
150
  "architectures": [
151
  "BertForSequenceClassification"
152
  ],
@@ -189,35 +192,35 @@ experiments:
189
  "position_embedding_type": "absolute",
190
  "problem_type": "single_label_classification",
191
  "torch_dtype": "float32",
192
- "transformers_version": "4.50.3",
193
  "type_vocab_size": 2,
194
  "use_cache": true,
195
  "vocab_size": 29794
196
  }
197
 
198
- [2025-06-28 17:39:41,925][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C4/snapshots/be129129fc134c0e782ae9f62b33da331367ab7b/model.safetensors
199
- [2025-06-28 17:39:41,926][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
200
- [2025-06-28 17:39:41,926][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
201
- [2025-06-28 17:39:42,130][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
202
 
203
- [2025-06-28 17:39:42,131][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C4.
204
  If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
205
- [2025-06-28 17:39:42,136][transformers.training_args][INFO] - PyTorch: setting up devices
206
- [2025-06-28 17:39:42,149][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
207
- [2025-06-28 17:39:42,152][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
208
- [2025-06-28 17:39:42,169][transformers.trainer][INFO] - Using auto half precision backend
209
- [2025-06-28 17:39:45,638][__main__][INFO] - Running inference on test dataset
210
- [2025-06-28 17:39:45,640][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, supporting_text, prompt, essay_text, essay_year, id, id_prompt, reference. If grades, supporting_text, prompt, essay_text, essay_year, id, id_prompt, reference are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
211
- [2025-06-28 17:39:45,644][transformers.trainer][INFO] -
212
  ***** Running Prediction *****
213
- [2025-06-28 17:39:45,644][transformers.trainer][INFO] - Num examples = 138
214
- [2025-06-28 17:39:45,644][transformers.trainer][INFO] - Batch size = 16
215
- [2025-06-28 17:39:48,945][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C4-encoder_classification-C4_inference_results.jsonl
216
- [2025-06-28 17:39:48,948][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
217
- [2025-06-28 17:40:38,116][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
218
- [2025-06-28 17:40:38,116][__main__][INFO] - Bootstrap Confidence Intervals (95%):
219
- [2025-06-28 17:40:38,116][__main__][INFO] - QWK: 0.6233 [0.5111, 0.7251]
220
- [2025-06-28 17:40:38,116][__main__][INFO] - Macro_F1: 0.4137 [0.2906, 0.5906]
221
- [2025-06-28 17:40:38,116][__main__][INFO] - Weighted_F1: 0.6557 [0.5749, 0.7321]
222
- [2025-06-28 17:40:38,116][__main__][INFO] - Inference results: {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6258134490238612, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.36114488348530904, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6545879036165807, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(5), 'TN_2': np.int64(118), 'FP_2': np.int64(11), 'FN_2': np.int64(4), 'TP_3': np.int64(51), 'TN_3': np.int64(49), 'FP_3': np.int64(13), 'FN_3': np.int64(25), 'TP_4': np.int64(30), 'TN_4': np.int64(74), 'FP_4': np.int64(18), 'FN_4': np.int64(16), 'TP_5': np.int64(3), 'TN_5': np.int64(126), 'FP_5': np.int64(7), 'FN_5': np.int64(2)}
223
- [2025-06-28 17:40:38,116][__main__][INFO] - Inference experiment completed
 
1
+ [2025-06-30 23:57:45,116][__main__][INFO] - Starting inference experiment
2
+ [2025-06-30 23:57:45,117][__main__][INFO] - cache_dir: /tmp/
3
  dataset:
4
  name: kamel-usp/aes_enem_dataset
5
  split: JBCS2025
 
31
  name: neuralmind/bert-base-portuguese-cased
32
  dataset:
33
  grade_index: 3
34
+ use_full_context: false
35
  training_params:
36
  weight_decay: 0.01
37
  warmup_ratio: 0.1
 
41
  gradient_accumulation_steps: 1
42
  gradient_checkpointing: false
43
 
44
+ [2025-06-30 23:57:45,119][__main__][INFO] - Running inference with fine-tuned HF model
45
+ [2025-06-30 23:57:50,144][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
46
+ [2025-06-30 23:57:50,145][transformers.configuration_utils][INFO] - Model config BertConfig {
47
  "architectures": [
48
  "BertForMaskedLM"
49
  ],
 
68
  "pooler_size_per_head": 128,
69
  "pooler_type": "first_token_transform",
70
  "position_embedding_type": "absolute",
71
+ "transformers_version": "4.53.0",
72
  "type_vocab_size": 2,
73
  "use_cache": true,
74
  "vocab_size": 29794
75
  }
76
 
77
+ [2025-06-30 23:57:50,350][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt
78
+ [2025-06-30 23:57:50,350][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
79
+ [2025-06-30 23:57:50,350][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json
80
+ [2025-06-30 23:57:50,350][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json
81
+ [2025-06-30 23:57:50,350][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json
82
+ [2025-06-30 23:57:50,350][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
83
+ [2025-06-30 23:57:50,350][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
84
+ [2025-06-30 23:57:50,351][transformers.configuration_utils][INFO] - Model config BertConfig {
85
  "architectures": [
86
  "BertForMaskedLM"
87
  ],
 
106
  "pooler_size_per_head": 128,
107
  "pooler_type": "first_token_transform",
108
  "position_embedding_type": "absolute",
109
+ "transformers_version": "4.53.0",
110
  "type_vocab_size": 2,
111
  "use_cache": true,
112
  "vocab_size": 29794
113
  }
114
 
115
+ [2025-06-30 23:57:50,376][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json
116
+ [2025-06-30 23:57:50,376][transformers.configuration_utils][INFO] - Model config BertConfig {
117
  "architectures": [
118
  "BertForMaskedLM"
119
  ],
 
138
  "pooler_size_per_head": 128,
139
  "pooler_type": "first_token_transform",
140
  "position_embedding_type": "absolute",
141
+ "transformers_version": "4.53.0",
142
  "type_vocab_size": 2,
143
  "use_cache": true,
144
  "vocab_size": 29794
145
  }
146
 
147
+ [2025-06-30 23:57:50,392][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True; Use Full Context: False
148
+ [2025-06-30 23:57:50,599][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C4
149
+ [2025-06-30 23:57:50,599][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C4
150
+ [2025-06-30 23:57:51,501][__main__][INFO] - Model need 1.36 GiB to run inference and 2.58 for training
151
+ [2025-06-30 23:57:52,494][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C4/snapshots/be129129fc134c0e782ae9f62b33da331367ab7b/config.json
152
+ [2025-06-30 23:57:52,494][transformers.configuration_utils][INFO] - Model config BertConfig {
153
  "architectures": [
154
  "BertForSequenceClassification"
155
  ],
 
192
  "position_embedding_type": "absolute",
193
  "problem_type": "single_label_classification",
194
  "torch_dtype": "float32",
195
+ "transformers_version": "4.53.0",
196
  "type_vocab_size": 2,
197
  "use_cache": true,
198
  "vocab_size": 29794
199
  }
200
 
201
+ [2025-06-30 23:58:07,769][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C4/snapshots/be129129fc134c0e782ae9f62b33da331367ab7b/model.safetensors
202
+ [2025-06-30 23:58:07,770][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
203
+ [2025-06-30 23:58:07,770][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
204
+ [2025-06-30 23:58:08,132][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification.
205
 
206
+ [2025-06-30 23:58:08,133][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C4.
207
  If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training.
208
+ [2025-06-30 23:58:08,138][transformers.training_args][INFO] - PyTorch: setting up devices
209
+ [2025-06-30 23:58:08,191][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
210
+ [2025-06-30 23:58:08,196][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
211
+ [2025-06-30 23:58:08,215][transformers.trainer][INFO] - Using auto half precision backend
212
+ [2025-06-30 23:58:11,691][__main__][INFO] - Running inference on test dataset
213
+ [2025-06-30 23:58:11,692][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, essay_text, supporting_text, grades, id_prompt, id, essay_year, prompt. If reference, essay_text, supporting_text, grades, id_prompt, id, essay_year, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
214
+ [2025-06-30 23:58:11,696][transformers.trainer][INFO] -
215
  ***** Running Prediction *****
216
+ [2025-06-30 23:58:11,696][transformers.trainer][INFO] - Num examples = 138
217
+ [2025-06-30 23:58:11,696][transformers.trainer][INFO] - Batch size = 16
218
+ [2025-06-30 23:58:12,089][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C4-encoder_classification-C4-essay_only_inference_results.jsonl
219
+ [2025-06-30 23:58:12,096][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1']
220
+ [2025-06-30 23:59:49,674][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv
221
+ [2025-06-30 23:59:49,674][__main__][INFO] - Bootstrap Confidence Intervals (95%):
222
+ [2025-06-30 23:59:49,674][__main__][INFO] - QWK: 0.6233 [0.5111, 0.7251]
223
+ [2025-06-30 23:59:49,674][__main__][INFO] - Macro_F1: 0.4137 [0.2906, 0.5906]
224
+ [2025-06-30 23:59:49,674][__main__][INFO] - Weighted_F1: 0.6557 [0.5749, 0.7321]
225
+ [2025-06-30 23:59:49,674][__main__][INFO] - Inference results: {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6258134490238612, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.36114488348530904, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6545879036165807, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(5), 'TN_2': np.int64(118), 'FP_2': np.int64(11), 'FN_2': np.int64(4), 'TP_3': np.int64(51), 'TN_3': np.int64(49), 'FP_3': np.int64(13), 'FN_3': np.int64(25), 'TP_4': np.int64(30), 'TN_4': np.int64(74), 'FP_4': np.int64(18), 'FN_4': np.int64(16), 'TP_5': np.int64(3), 'TN_5': np.int64(126), 'FP_5': np.int64(7), 'FN_5': np.int64(2)}
226
+ [2025-06-30 23:59:49,675][__main__][INFO] - Inference experiment completed
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5 → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only}/.hydra/config.yaml RENAMED
@@ -30,6 +30,7 @@ experiments:
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 4
 
33
  training_params:
34
  weight_decay: 0.01
35
  warmup_ratio: 0.1
 
30
  name: neuralmind/bert-base-portuguese-cased
31
  dataset:
32
  grade_index: 4
33
+ use_full_context: false
34
  training_params:
35
  weight_decay: 0.01
36
  warmup_ratio: 0.1
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5 → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only}/.hydra/hydra.yaml RENAMED
@@ -130,18 +130,18 @@ hydra:
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
- cwd: /home/andrebarbosa/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
- - path: /home/andrebarbosa/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
- output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-40-47
145
  choices:
146
  experiments: base_models/C5
147
  hydra/env: default
 
130
  runtime:
131
  version: 1.3.2
132
  version_base: '1.1'
133
+ cwd: /workspace/jbcs2025
134
  config_sources:
135
  - path: hydra.conf
136
  schema: pkg
137
  provider: hydra
138
+ - path: /workspace/jbcs2025/configs
139
  schema: file
140
  provider: main
141
  - path: ''
142
  schema: structured
143
  provider: schema
144
+ output_dir: /workspace/jbcs2025/outputs/2025-06-30/23-59-55
145
  choices:
146
  experiments: base_models/C5
147
  hydra/env: default
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5 → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only}/.hydra/overrides.yaml RENAMED
File without changes
runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only/bootstrap_confidence_intervals.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width
2
+ jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only,2025-06-30 23:59:55,0.47349799901126716,0.3401973117894254,0.5947975929869902,0.2546002811975648,0.20469588256838514,0.14697576658446224,0.27274642041824704,0.1257706538337848,0.25750931482031114,0.18034272476682853,0.33952288243091566,0.15918015766408714
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5 → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only}/evaluation_results.csv RENAMED
@@ -1,2 +1,2 @@
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
- 0.3188405797101449,61.2904702146299,0.476219483623073,0.13043478260869568,0.2055897809038726,0.3188405797101449,0.25808413038205613,3,113,3,19,9,71,35,23,3,103,11,21,1,108,5,24,28,66,40,4,0,135,0,3,2025-06-28 17:40:47,jbcs2025_bertimbau_base-C5-encoder_classification-C5
 
1
  accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id
2
+ 0.3188405797101449,61.2904702146299,0.476219483623073,0.13043478260869568,0.2055897809038726,0.3188405797101449,0.25808413038205613,3,113,3,19,9,71,35,23,3,103,11,21,1,108,5,24,28,66,40,4,0,135,0,3,2025-06-30 23:59:55,jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only
runs/base_models/bertimbau/{jbcs2025_bertimbau_base-C5-encoder_classification-C5/jbcs2025_bertimbau_base-C5-encoder_classification-C5_inference_results.jsonl → jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only/jbcs2025_bertimbau_base-C5-encoder_classification-C5-essay_only_inference_results.jsonl} RENAMED
File without changes