gardarjuto commited on
Commit
9e6a3bf
·
1 Parent(s): a0ee03a

remove debug prints

Browse files
Files changed (2) hide show
  1. src/leaderboard/read_evals.py +0 -5
  2. src/populate.py +0 -2
src/leaderboard/read_evals.py CHANGED
@@ -127,9 +127,6 @@ class EvalResult:
127
  }
128
 
129
  for task in Tasks:
130
- print(f"task: {task}")
131
- print(f"task.value.col_name: {task.value.col_name}")
132
- print(f"task.value.benchmark: {task.value.benchmark}")
133
  data_dict[task.value.col_name] = self.results[task.value.benchmark]
134
 
135
  return data_dict
@@ -189,8 +186,6 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
189
  else:
190
  eval_results[eval_name] = eval_result
191
 
192
- print(f"Eval results: {eval_results}")
193
-
194
  results = []
195
  for v in eval_results.values():
196
  try:
 
127
  }
128
 
129
  for task in Tasks:
 
 
 
130
  data_dict[task.value.col_name] = self.results[task.value.benchmark]
131
 
132
  return data_dict
 
186
  else:
187
  eval_results[eval_name] = eval_result
188
 
 
 
189
  results = []
190
  for v in eval_results.values():
191
  try:
src/populate.py CHANGED
@@ -12,8 +12,6 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
12
  """Creates a dataframe from all the individual experiment results"""
13
  raw_data = get_raw_eval_results(results_path, requests_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
- print("LOADED JSON DATA:")
16
- print(all_data_json)
17
 
18
  df = pd.DataFrame.from_records(all_data_json)
19
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
 
12
  """Creates a dataframe from all the individual experiment results"""
13
  raw_data = get_raw_eval_results(results_path, requests_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
 
 
15
 
16
  df = pd.DataFrame.from_records(all_data_json)
17
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)