Upload folder using huggingface_hub
Browse files- .DS_Store +0 -0
- README.md +0 -5
- matematik_och_fysikprovet.csv +0 -0
- src/.DS_Store +0 -0
- src/combine.py +24 -0
- src/run_tests.sh +15 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
README.md
CHANGED
@@ -32,11 +32,6 @@ from datasets import load_dataset
|
|
32 |
ds = load_dataset("Ekgren/swedish_skolprov", "all")
|
33 |
```
|
34 |
|
35 |
-
You can find an evaluation script that uses OpenRouter here: [src/evaluate_data.py](src/evaluate_data.py)
|
36 |
-
|
37 |
-
```bash
|
38 |
-
python evaluate_data.py --model_name "mistralai/mistral-small-3.1-24b-instruct" --eval_subset "all" --output_path "./results"
|
39 |
-
```
|
40 |
|
41 |
## Högskoleprovet
|
42 |
- https://www.studera.nu/hogskoleprov/forbered/tidigare-hogskoleprov/
|
|
|
32 |
ds = load_dataset("Ekgren/swedish_skolprov", "all")
|
33 |
```
|
34 |
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
## Högskoleprovet
|
37 |
- https://www.studera.nu/hogskoleprov/forbered/tidigare-hogskoleprov/
|
matematik_och_fysikprovet.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
src/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
src/combine.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import json
|
3 |
+
import glob
|
4 |
+
import os
|
5 |
+
|
6 |
+
json_folder = "results"
|
7 |
+
output_csv = "results/combined.csv"
|
8 |
+
|
9 |
+
data_rows = []
|
10 |
+
|
11 |
+
for file in glob.glob(os.path.join(json_folder, "*.json")):
|
12 |
+
with open(file, 'r') as f:
|
13 |
+
content = json.load(f)
|
14 |
+
flat_row = {'sampled_model_name': content['sampled_model_name']}
|
15 |
+
|
16 |
+
for key, value in content.items():
|
17 |
+
if key != 'sampled_model_name':
|
18 |
+
flat_row[f"{key}_points"] = value['points']
|
19 |
+
flat_row[f"{key}_total"] = value['total']
|
20 |
+
|
21 |
+
data_rows.append(flat_row)
|
22 |
+
|
23 |
+
df = pd.DataFrame(data_rows)
|
24 |
+
df.to_csv(output_csv, index=False)
|
src/run_tests.sh
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# List of model names
|
4 |
+
models=(
|
5 |
+
"openai/gpt-4o-mini"
|
6 |
+
"openai/o3-mini-high"
|
7 |
+
"google/gemini-2.0-flash-001"
|
8 |
+
)
|
9 |
+
|
10 |
+
# Loop through each model and run evaluation
|
11 |
+
for model in "${models[@]}"; do
|
12 |
+
echo "Evaluating model: ${model}"
|
13 |
+
python evaluate_data.py --model_name "${model}" --eval_subset "all" --output_path "./results"
|
14 |
+
done
|
15 |
+
|