abarbosa commited on
Commit
ec168b6
·
1 Parent(s): 4ccf071

update parquet tables and fix typo

Browse files
.gitignore CHANGED
@@ -1 +1,2 @@
1
  runs/api_models/__pycache__/*.pyc
 
 
1
  runs/api_models/__pycache__/*.pyc
2
+ logs/**/*
README.md CHANGED
@@ -8,8 +8,8 @@ configs:
8
  path: evaluation_results-*.parquet
9
  - config_name: bootstrap_confidence_intervals
10
  data_files:
11
- - split: boostrap_confidence_intervals
12
- path: boostrap_confidence_intervals-*.parquet
13
  tags:
14
  - automatic-essay-scoring
15
  - portuguese
 
8
  path: evaluation_results-*.parquet
9
  - config_name: bootstrap_confidence_intervals
10
  data_files:
11
+ - split: bootstrap_confidence_intervals
12
+ path: bootstrap_confidence_intervals-*.parquet
13
  tags:
14
  - automatic-essay-scoring
15
  - portuguese
boostrap_confidence_intervals-00000-of-00001.parquet → bootstrap_confidence_intervals-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:689e3cce6e89d56a60154dd1d32b8d5690d4850b5bdb322451ad39912bf4e129
3
- size 29456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90fa615f5ff10a5ff533b5c8e65df895ba313844d9e5d708573f7d3e81787fc4
3
+ size 28166
create_parquet_files.py CHANGED
@@ -1,40 +1,391 @@
1
  import pandas as pd
2
  from pathlib import Path
3
  import pyarrow # ensures pyarrow is installed for Parquet support
 
 
 
 
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  def find_and_group_csvs():
7
  base = Path(".")
8
  groups = {
9
  "evaluation_results": sorted(base.rglob("evaluation_results.csv")),
10
- "bootstrap_confidence_intervals": sorted(base.rglob("bootstrap_confidence_intervals.csv")),
 
 
11
  }
12
  for name, paths in groups.items():
13
- print(f"[INFO] Found {len(paths)} files for '{name}'")
14
  if not paths:
15
- print(f"[WARNING] No files found for '{name}'")
16
  return groups
17
 
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  def combine(paths, out_path):
20
  if not paths:
21
- print(f"[SKIP] No files to combine for {out_path}")
22
  return
23
 
24
- print(f"[INFO] Combining {len(paths)} files into {out_path}")
25
- dfs = [pd.read_csv(p) for p in paths]
 
 
 
 
 
 
 
 
 
 
26
 
27
  # Basic schema validation
28
  cols = {tuple(df.columns) for df in dfs}
29
  if len(cols) > 1:
30
- raise ValueError(f"[ERROR] {out_path}: header mismatch across shards")
 
 
31
 
32
  combined = pd.concat(dfs, ignore_index=True)
 
 
 
 
 
 
33
  combined.to_parquet(out_path, engine="pyarrow", index=False)
34
- print(f"[SUCCESS] Written {out_path} with {len(combined)} rows")
35
 
36
 
37
  if __name__ == "__main__":
 
 
 
38
  groups = find_and_group_csvs()
39
  combine(groups["evaluation_results"], "evaluation_results-00000-of-00001.parquet")
40
- combine(groups["bootstrap_confidence_intervals"], "boostrap_confidence_intervals-00000-of-00001.parquet")
 
 
 
 
 
 
1
  import pandas as pd
2
  from pathlib import Path
3
  import pyarrow # ensures pyarrow is installed for Parquet support
4
+ import numpy as np
5
+ import sys
6
+ from tqdm.auto import tqdm
7
+ import logging
8
+ from datetime import datetime
9
 
10
+ # Add the api_models directory to the Python path to import existing modules
11
+ sys.path.append(str(Path(__file__).parent / "runs" / "api_models"))
12
+
13
+ from compute_bootstrap_ci import (
14
+ load_inference_results_by_grader,
15
+ extract_config_from_log,
16
+ )
17
+ from metrics import compute_metrics
18
+ from omegaconf import OmegaConf
19
+
20
+ # Set up logging
21
+ log_dir = Path("logs")
22
+ log_dir.mkdir(exist_ok=True)
23
+ log_file = log_dir / f"create_parquet_files_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
24
+
25
+ # Configure logging to write to both file and console
26
+ logging.basicConfig(
27
+ level=logging.INFO,
28
+ format='%(asctime)s - %(levelname)s - %(message)s',
29
+ handlers=[
30
+ logging.FileHandler(log_file),
31
+ logging.StreamHandler(sys.stdout)
32
+ ]
33
+ )
34
+ logger = logging.getLogger(__name__)
35
+
36
+ # Also create a separate error-only log file
37
+ error_log_file = log_dir / f"create_parquet_files_errors_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
38
+ error_handler = logging.FileHandler(error_log_file)
39
+ error_handler.setLevel(logging.ERROR)
40
+ error_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
41
+ logger.addHandler(error_handler)
42
+
43
+ def simplify_experiment_name(name):
44
+ """Simplify experiment names according to the mapping rules."""
45
+ if pd.isna(name):
46
+ return name
47
+
48
+ # Convert to string to handle any non-string inputs
49
+ name = str(name)
50
+
51
+ # Define the mapping rules
52
+ mappings = {
53
+ # Sabia-3 mappings
54
+ 'sabia-3-zero-shot': 'sabia3-studentPrompt',
55
+ 'sabia-3-extractor-zero-shot': 'sabia3-extractor',
56
+ 'sabia-3-grader-zero-shot': 'sabia3-graderPrompt',
57
+
58
+ # Deepseek mappings
59
+ 'deepseek-reasoner-zero-shot': 'deepseekR1-studentPrompt',
60
+ 'deepseek-reasoner-extractor-zero-shot': 'deepseekR1-extractor',
61
+ 'deepseek-reasoner-grader-zero-shot': 'deepseekR1-graderPrompt',
62
+
63
+ # GPT-4o mappings
64
+ 'gpt-4o-2024-11-20-zero-shot': 'gpt4o-studentPrompt',
65
+ 'gpt-4o-2024-11-20-extractor-zero-shot': 'gpt4o-extractor',
66
+ 'gpt-4o-2024-11-20-grader-zero-shot': 'gpt4o-graderPrompt',
67
+ }
68
+
69
+ # Apply direct mappings first
70
+ for pattern, replacement in mappings.items():
71
+ if pattern in name:
72
+ name = name.replace(pattern, replacement)
73
+
74
+ # Handle jbcs2025 prefixed names
75
+ if name.startswith('jbcs2025_'):
76
+ # Remove the prefix
77
+ name = name[9:]
78
+
79
+ # First, remove any duplicated model-specific patterns that appear multiple times
80
+ # These patterns indicate the experiment setup was duplicated in the name
81
+ duplication_patterns = [
82
+ 'llama31_classification_lora',
83
+ 'phi35_classification_lora',
84
+ 'phi4_classification_lora',
85
+ 'encoder_classification'
86
+ ]
87
+
88
+ for pattern in duplication_patterns:
89
+ # Count occurrences
90
+ count = name.count(f'-{pattern}-')
91
+ if count > 1:
92
+ # Replace all but keep track of components
93
+ parts = name.split(f'-{pattern}-')
94
+ # Keep the first part and the last part (which has the config)
95
+ if len(parts) > 2:
96
+ name = parts[0] + '-' + parts[-1]
97
+
98
+ # Handle BERT variants
99
+ if 'bert-base-portuguese-cased-encoder_classification' in name:
100
+ name = name.replace('bert-base-portuguese-cased-encoder_classification', 'bertimbau-base')
101
+ elif 'BERTugues-base-portuguese-cased-encoder_classification' in name:
102
+ name = name.replace('BERTugues-base-portuguese-cased-encoder_classification', 'bertugues-base')
103
+ elif 'bert-base-multilingual-cased-encoder_classification' in name:
104
+ name = name.replace('bert-base-multilingual-cased-encoder_classification', 'mbert-base')
105
+ elif 'bert-large-portuguese-cased-encoder_classification' in name:
106
+ name = name.replace('bert-large-portuguese-cased-encoder_classification', 'bertimbau-large')
107
+
108
+ # Handle Llama variants
109
+ elif 'Llama-3.1-8B-llama31_classification_lora' in name:
110
+ name = name.replace('Llama-3.1-8B-llama31_classification_lora', 'llama3.1-8b-lora')
111
+ elif 'Llama-3.1-8B' in name:
112
+ name = name.replace('Llama-3.1-8B', 'llama3.1-8b-lora')
113
+
114
+ # Handle Phi variants
115
+ elif 'Phi-3.5-mini-instruct-phi35_classification_lora' in name:
116
+ name = name.replace('Phi-3.5-mini-instruct-phi35_classification_lora', 'phi3.5-mini-lora')
117
+ elif 'Phi-3.5-mini-instruct' in name:
118
+ name = name.replace('Phi-3.5-mini-instruct', 'phi3.5-mini-lora')
119
+ elif 'phi-4-phi4_classification_lora' in name:
120
+ name = name.replace('phi-4-phi4_classification_lora', 'phi4-lora')
121
+ elif 'phi-4' in name:
122
+ name = name.replace('phi-4', 'phi4-lora')
123
+
124
+ # Clean up any remaining classification patterns
125
+ name = name.replace('-encoder_classification', '')
126
+ name = name.replace('_classification_lora', '')
127
+ name = name.replace('-llama31', '')
128
+ name = name.replace('-phi35', '')
129
+ name = name.replace('-phi4', '')
130
+
131
+ # Extract components and reorder
132
+ parts = name.split('-')
133
+
134
+ # Look for competency (C1-C5), context type, and LoRA rank
135
+ competency = None
136
+ context = None
137
+ lora_rank = None
138
+ model_parts = []
139
+
140
+ i = 0
141
+ while i < len(parts):
142
+ part = parts[i]
143
+ if part in ['C1', 'C2', 'C3', 'C4', 'C5']:
144
+ competency = part
145
+ elif part == 'essay_only':
146
+ context = 'essay-only'
147
+ elif part == 'full_context':
148
+ context = 'full-context'
149
+ elif part in ['essay', 'full'] and i + 1 < len(parts):
150
+ # Handle split context names
151
+ if parts[i+1] == 'only':
152
+ context = 'essay-only'
153
+ i += 1 # Skip next part
154
+ elif parts[i+1] == 'context':
155
+ context = 'full-context'
156
+ i += 1 # Skip next part
157
+ elif part in ['r8', 'r16']:
158
+ lora_rank = part
159
+ elif part and part not in ['only', 'context']: # Skip empty parts and orphaned context words
160
+ model_parts.append(part)
161
+ i += 1
162
+
163
+ # Reconstruct the name in the desired order: model-competency-context-rank
164
+ new_parts = model_parts
165
+ if competency:
166
+ new_parts.append(competency)
167
+ if context:
168
+ new_parts.append(context)
169
+ if lora_rank:
170
+ new_parts.append(lora_rank)
171
+
172
+ name = '-'.join(new_parts)
173
+
174
+ # Final cleanup: remove any double dashes
175
+ while '--' in name:
176
+ name = name.replace('--', '-')
177
+
178
+ return name
179
 
180
  def find_and_group_csvs():
181
  base = Path(".")
182
  groups = {
183
  "evaluation_results": sorted(base.rglob("evaluation_results.csv")),
184
+ "bootstrap_confidence_intervals": sorted(
185
+ base.rglob("bootstrap_confidence_intervals.csv")
186
+ ),
187
  }
188
  for name, paths in groups.items():
189
+ logger.info(f"Found {len(paths)} files for '{name}'")
190
  if not paths:
191
+ logger.warning(f"No files found for '{name}'")
192
  return groups
193
 
194
 
195
+ def enhance_evaluation_results(eval_df, csv_paths):
196
+ """Enhance evaluation results with additional metrics from JSONL files."""
197
+ enhanced_rows = []
198
+ failed_count = 0
199
+
200
+ # Create a mapping from row index to CSV path
201
+ # Since we're processing multiple CSVs that get concatenated,
202
+ # we need to track which rows came from which CSV file
203
+ row_to_path = {}
204
+ current_idx = 0
205
+
206
+ for path in csv_paths:
207
+ df = pd.read_csv(path)
208
+ for i in range(len(df)):
209
+ row_to_path[current_idx + i] = path
210
+ current_idx += len(df)
211
+
212
+ for idx, row in tqdm(
213
+ eval_df.iterrows(), desc="Processing evaluation rows", total=len(eval_df)
214
+ ):
215
+ # Get the CSV path for this row
216
+ csv_path = row_to_path.get(idx)
217
+
218
+ if csv_path is None:
219
+ error_msg = f"CSV file not found for row {idx}"
220
+ logger.error(error_msg)
221
+ failed_count += 1
222
+ continue
223
+
224
+ try:
225
+ # Extract experiment ID from the path
226
+ # The experiment ID is typically the parent directory name
227
+ experiment_id = csv_path.parent.name
228
+
229
+ # Simplify the experiment ID
230
+ experiment_id = simplify_experiment_name(experiment_id)
231
+
232
+ # Find corresponding JSONL file in the same directory
233
+ jsonl_path = csv_path.parent / "inference_results.jsonl"
234
+ if not jsonl_path.exists():
235
+ # Try with experiment name prefix
236
+ jsonl_files = list(csv_path.parent.glob("*_inference_results.jsonl"))
237
+ if jsonl_files:
238
+ jsonl_path = jsonl_files[0]
239
+ else:
240
+ raise FileNotFoundError(f"JSONL file not found in {csv_path.parent}")
241
+
242
+ # Find log file to extract configuration
243
+ log_files = list(csv_path.parent.glob("*run_inference_experiment.log"))
244
+ if not log_files:
245
+ raise FileNotFoundError(f"Log file not found in {csv_path.parent}")
246
+
247
+ log_path = log_files[0]
248
+
249
+ # Load inference results and compute metrics
250
+ # Extract configuration from log file
251
+ config_dict = extract_config_from_log(log_path)
252
+ # Convert to OmegaConf DictConfig for compatibility with compute_metrics
253
+ cfg = OmegaConf.create(config_dict)
254
+
255
+ # Load data using the existing function
256
+ grader_a_data, grader_b_data = load_inference_results_by_grader(jsonl_path)
257
+
258
+ # Extract predictions and labels for each grader
259
+ all_predictions_a = np.array(
260
+ [data["prediction"] for data in grader_a_data.values()]
261
+ )
262
+ all_labels_a = np.array([data["label"] for data in grader_a_data.values()])
263
+ all_predictions_b = np.array(
264
+ [data["prediction"] for data in grader_b_data.values()]
265
+ )
266
+ all_labels_b = np.array([data["label"] for data in grader_b_data.values()])
267
+
268
+ # Compute concat(A,B) metrics for verification
269
+ # Concatenate predictions and labels from both graders
270
+ concat_predictions = np.concatenate([all_predictions_a, all_predictions_b])
271
+ concat_labels = np.concatenate([all_labels_a, all_labels_b])
272
+ metrics_concat = compute_metrics((concat_predictions, concat_labels), cfg)
273
+
274
+ # Verify that computed concat metrics match original CSV values
275
+ # Check a few key metrics with some tolerance for floating point comparison
276
+ tolerance = 1e-6
277
+ for metric in ["accuracy", "QWK", "Macro_F1", "Weighted_F1"]:
278
+ if metric in row and metric in metrics_concat:
279
+ original_value = row[metric]
280
+ computed_value = metrics_concat[metric]
281
+ # You can make this a hard assertion if needed:
282
+ assert abs(original_value - computed_value) <= tolerance, (
283
+ f"Metric {metric} mismatch: CSV={original_value}, Computed={computed_value}"
284
+ )
285
+
286
+ # 1. Add original row with concat(A,B) metrics
287
+ concat_row = row.copy()
288
+ concat_row["experiment_id"] = experiment_id
289
+ concat_row["metric_group"] = "concat(A,B)"
290
+ enhanced_rows.append(concat_row)
291
+
292
+ # 2. Compute metrics for A and B separately first
293
+ metrics_a = compute_metrics((all_predictions_a, all_labels_a), cfg)
294
+ metrics_b = compute_metrics((all_predictions_b, all_labels_b), cfg)
295
+
296
+ # 3. Compute avg(A,B) as the average of metrics, not metrics of averaged predictions
297
+ avg_row = row.copy()
298
+ avg_row["experiment_id"] = experiment_id
299
+ avg_row["metric_group"] = "avg(A,B)"
300
+ # Average the metrics from A and B
301
+ for metric in metrics_a:
302
+ if metric in metrics_b and metric in avg_row:
303
+ avg_value = (metrics_a[metric] + metrics_b[metric]) / 2
304
+ avg_row[metric] = avg_value
305
+ enhanced_rows.append(avg_row)
306
+
307
+ # 4. Add onlyA metrics
308
+ only_a_row = row.copy()
309
+ only_a_row["experiment_id"] = experiment_id
310
+ only_a_row["metric_group"] = "onlyA"
311
+ # Update metric columns with onlyA values
312
+ for metric, value in metrics_a.items():
313
+ if metric in only_a_row:
314
+ only_a_row[metric] = value
315
+ enhanced_rows.append(only_a_row)
316
+
317
+ # 5. Add onlyB metrics
318
+ only_b_row = row.copy()
319
+ only_b_row["experiment_id"] = experiment_id
320
+ only_b_row["metric_group"] = "onlyB"
321
+ # Update metric columns with onlyB values
322
+ for metric, value in metrics_b.items():
323
+ if metric in only_b_row:
324
+ only_b_row[metric] = value
325
+ enhanced_rows.append(only_b_row)
326
+
327
+ except Exception as e:
328
+ failed_count += 1
329
+ error_msg = f"Failed to process {csv_path.parent if csv_path else 'unknown path'}: {str(e)}"
330
+ logger.error(error_msg)
331
+ # Log full traceback for debugging
332
+ import traceback
333
+ logger.error(f"Traceback:\n{traceback.format_exc()}")
334
+ # Skip this row and continue with the next one
335
+ continue
336
+
337
+ logger.info(f"Successfully processed {len(enhanced_rows)//4} out of {len(eval_df)} rows")
338
+ if failed_count > 0:
339
+ logger.warning(f"Failed to process {failed_count} rows. Check error log: {error_log_file}")
340
+
341
+ return pd.DataFrame(enhanced_rows)
342
+
343
+
344
  def combine(paths, out_path):
345
  if not paths:
346
+ logger.info(f"No files to combine for {out_path}")
347
  return
348
 
349
+ logger.info(f"Combining {len(paths)} files into {out_path}")
350
+ dfs = []
351
+
352
+ for p in paths:
353
+ df = pd.read_csv(p)
354
+
355
+ # Add experiment_id column based on the parent directory name
356
+ experiment_id = p.parent.name
357
+ experiment_id = simplify_experiment_name(experiment_id)
358
+ df["experiment_id"] = experiment_id
359
+
360
+ dfs.append(df)
361
 
362
  # Basic schema validation
363
  cols = {tuple(df.columns) for df in dfs}
364
  if len(cols) > 1:
365
+ error_msg = f"{out_path}: header mismatch across shards"
366
+ logger.error(error_msg)
367
+ raise ValueError(error_msg)
368
 
369
  combined = pd.concat(dfs, ignore_index=True)
370
+
371
+ # Enhance evaluation results with additional metrics
372
+ if "evaluation_results" in out_path:
373
+ logger.info("Enhancing evaluation results with additional metrics...")
374
+ combined = enhance_evaluation_results(combined, paths)
375
+
376
  combined.to_parquet(out_path, engine="pyarrow", index=False)
377
+ logger.info(f"Successfully written {out_path} with {len(combined)} rows")
378
 
379
 
380
  if __name__ == "__main__":
381
+ logger.info(f"Starting parquet file creation. Logs will be saved to: {log_file}")
382
+ logger.info(f"Error-only log will be saved to: {error_log_file}")
383
+
384
  groups = find_and_group_csvs()
385
  combine(groups["evaluation_results"], "evaluation_results-00000-of-00001.parquet")
386
+ combine(
387
+ groups["bootstrap_confidence_intervals"],
388
+ "bootstrap_confidence_intervals-00000-of-00001.parquet",
389
+ )
390
+
391
+ logger.info("Parquet file creation completed")
evaluation_results-00000-of-00001.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15b923fed30c883c4775e5f8cc310fe8e6b0bf1d2faa65c5149b59f7dc2cfa89
3
- size 37257
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31558c69a4e80b9e1dac959968c45fefec8401758dec2bb47732f374908a0960
3
+ size 65235
runs/api_models/compute_bootstrap_ci.py CHANGED
@@ -16,7 +16,7 @@ def extract_config_from_log(log_path: Path) -> Dict:
16
  Parse the experiment configuration from run_experiment.log file.
17
  The config is in YAML format at the beginning of the log file.
18
  """
19
- with open(log_path, 'r') as f:
20
  lines = f.readlines()
21
 
22
  # Find the start of the YAML config (after the first log line)
@@ -58,9 +58,12 @@ def load_inference_results_by_grader(jsonl_path: Path) -> Tuple[Dict[str, Dict],
58
  for line in f:
59
  data = json.loads(line.strip())
60
  essay_id = (data['id'], data['id_prompt'], data['essay_text'])
61
-
 
 
 
62
  essay_data = {
63
- 'prediction': data['pontuacao'],
64
  'label': data['label']
65
  }
66
 
 
16
  Parse the experiment configuration from run_experiment.log file.
17
  The config is in YAML format at the beginning of the log file.
18
  """
19
+ with open(log_path, 'r', encoding="latin1") as f:
20
  lines = f.readlines()
21
 
22
  # Find the start of the YAML config (after the first log line)
 
58
  for line in f:
59
  data = json.loads(line.strip())
60
  essay_id = (data['id'], data['id_prompt'], data['essay_text'])
61
+ # Determine prediction field based on model type
62
+ model_types = ["gpt", "sabia", "deepseek"]
63
+ prediction_field = "pontuacao" if any(model in jsonl_path.name for model in model_types) else "prediction"
64
+ prediction = data[prediction_field]
65
  essay_data = {
66
+ 'prediction': prediction,
67
  'label': data['label']
68
  }
69
 
runs/api_models/metrics.py CHANGED
@@ -57,12 +57,15 @@ def _process_predictions(eval_pred, model_type: str) -> Tuple[List[int], List[in
57
 
58
  elif _is_classification_model(model_type):
59
  # Classification and ordinal models return logits
60
- logits, all_true_labels = eval_pred
61
 
62
  # Ensure true labels are in the correct format (original scale)
63
  if isinstance(all_true_labels[0], (int, np.integer)) and max(all_true_labels) <= 5:
64
  all_true_labels = all_true_labels * 40
65
-
 
 
 
66
  return all_predictions.tolist(), all_true_labels.tolist()
67
 
68
  else:
@@ -73,9 +76,9 @@ def compute_metrics(eval_pred, cfg):
73
  """Compute evaluation metrics for the model."""
74
  transformers_logger = logging.getLogger("transformers")
75
  model_type = cfg.experiments.model.type
76
-
77
  try:
78
  # Process predictions based on model type
 
79
  all_predictions, all_true_labels = _process_predictions(eval_pred, model_type)
80
 
81
  # Compute metrics
 
57
 
58
  elif _is_classification_model(model_type):
59
  # Classification and ordinal models return logits
60
+ all_predictions, all_true_labels = eval_pred
61
 
62
  # Ensure true labels are in the correct format (original scale)
63
  if isinstance(all_true_labels[0], (int, np.integer)) and max(all_true_labels) <= 5:
64
  all_true_labels = all_true_labels * 40
65
+ # Ensure true labels are in the correct format (original scale)
66
+ if isinstance(all_predictions[0], (int, np.integer)) and max(all_predictions) <= 5:
67
+ all_predictions = all_predictions * 40
68
+
69
  return all_predictions.tolist(), all_true_labels.tolist()
70
 
71
  else:
 
76
  """Compute evaluation metrics for the model."""
77
  transformers_logger = logging.getLogger("transformers")
78
  model_type = cfg.experiments.model.type
 
79
  try:
80
  # Process predictions based on model type
81
+
82
  all_predictions, all_true_labels = _process_predictions(eval_pred, model_type)
83
 
84
  # Compute metrics