debug print
Browse files
src/leaderboard/read_evals.py
CHANGED
@@ -175,6 +175,8 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
175 |
for file in files:
|
176 |
model_result_filepaths.append(os.path.join(root, file))
|
177 |
|
|
|
|
|
178 |
eval_results = {}
|
179 |
for model_result_filepath in model_result_filepaths:
|
180 |
# Creation of result
|
@@ -188,6 +190,8 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
188 |
else:
|
189 |
eval_results[eval_name] = eval_result
|
190 |
|
|
|
|
|
191 |
results = []
|
192 |
for v in eval_results.values():
|
193 |
try:
|
|
|
175 |
for file in files:
|
176 |
model_result_filepaths.append(os.path.join(root, file))
|
177 |
|
178 |
+
print(f"Model results: {model_results_filepaths}")
|
179 |
+
|
180 |
eval_results = {}
|
181 |
for model_result_filepath in model_result_filepaths:
|
182 |
# Creation of result
|
|
|
190 |
else:
|
191 |
eval_results[eval_name] = eval_result
|
192 |
|
193 |
+
print(f"Eval results: {eval_results}")
|
194 |
+
|
195 |
results = []
|
196 |
for v in eval_results.values():
|
197 |
try:
|