Update README.md
Browse files
README.md
CHANGED
@@ -1,698 +1,699 @@
|
|
1 |
-
---
|
2 |
-
license: cc-by-nc-4.0
|
3 |
-
language:
|
4 |
-
- ro
|
5 |
-
base_model:
|
6 |
-
- google/gemma-2-9b-it
|
7 |
-
datasets:
|
8 |
-
- OpenLLM-Ro/ro_sft_alpaca
|
9 |
-
- OpenLLM-Ro/ro_sft_alpaca_gpt4
|
10 |
-
- OpenLLM-Ro/ro_sft_dolly
|
11 |
-
- OpenLLM-Ro/ro_sft_selfinstruct_gpt4
|
12 |
-
- OpenLLM-Ro/ro_sft_norobots
|
13 |
-
- OpenLLM-Ro/ro_sft_orca
|
14 |
-
- OpenLLM-Ro/ro_sft_camel
|
15 |
-
- OpenLLM-Ro/ro_sft_oasst
|
16 |
-
- OpenLLM-Ro/ro_sft_ultrachat
|
17 |
-
- OpenLLM-Ro/ro_sft_magpie_mt
|
18 |
-
- OpenLLM-Ro/ro_sft_magpie_reasoning
|
19 |
-
model-index:
|
20 |
-
- name: OpenLLM-Ro/RoGemma2-9b-Instruct-2025-04-23
|
21 |
-
results:
|
22 |
-
- task:
|
23 |
-
type: text-generation
|
24 |
-
dataset:
|
25 |
-
name: RoMT-Bench
|
26 |
-
type: RoMT-Bench
|
27 |
-
metrics:
|
28 |
-
- name: Score
|
29 |
-
type: Score
|
30 |
-
value: 6.78
|
31 |
-
- task:
|
32 |
-
type: text-generation
|
33 |
-
dataset:
|
34 |
-
name: RoCulturaBench
|
35 |
-
type: RoCulturaBench
|
36 |
-
metrics:
|
37 |
-
- name: Score
|
38 |
-
type: Score
|
39 |
-
value: 4.89
|
40 |
-
- task:
|
41 |
-
type: text-generation
|
42 |
-
dataset:
|
43 |
-
name: Romanian_Academic_Benchmarks
|
44 |
-
type: Romanian_Academic_Benchmarks
|
45 |
-
metrics:
|
46 |
-
- name: Average accuracy
|
47 |
-
type: accuracy
|
48 |
-
value: 54.39
|
49 |
-
- task:
|
50 |
-
type: text-generation
|
51 |
-
dataset:
|
52 |
-
name: OpenLLM-Ro/ro_arc_challenge
|
53 |
-
type: OpenLLM-Ro/ro_arc_challenge
|
54 |
-
metrics:
|
55 |
-
- name: Average accuracy
|
56 |
-
type: accuracy
|
57 |
-
value: 50.24
|
58 |
-
- task:
|
59 |
-
type: text-generation
|
60 |
-
dataset:
|
61 |
-
name: OpenLLM-Ro/ro_mmlu
|
62 |
-
type: OpenLLM-Ro/ro_mmlu
|
63 |
-
metrics:
|
64 |
-
- name: Average accuracy
|
65 |
-
type: accuracy
|
66 |
-
value: 62.00
|
67 |
-
- task:
|
68 |
-
type: text-generation
|
69 |
-
dataset:
|
70 |
-
name: OpenLLM-Ro/ro_winogrande
|
71 |
-
type: OpenLLM-Ro/ro_winogrande
|
72 |
-
metrics:
|
73 |
-
- name: Average accuracy
|
74 |
-
type: accuracy
|
75 |
-
value: 70.38
|
76 |
-
- task:
|
77 |
-
type: text-generation
|
78 |
-
dataset:
|
79 |
-
name: OpenLLM-Ro/ro_hellaswag
|
80 |
-
type: OpenLLM-Ro/ro_hellaswag
|
81 |
-
metrics:
|
82 |
-
- name: Average accuracy
|
83 |
-
type: accuracy
|
84 |
-
value: 52.25
|
85 |
-
- task:
|
86 |
-
type: text-generation
|
87 |
-
dataset:
|
88 |
-
name: OpenLLM-Ro/ro_gsm8k
|
89 |
-
type: OpenLLM-Ro/ro_gsm8k
|
90 |
-
metrics:
|
91 |
-
- name: Average accuracy
|
92 |
-
type: accuracy
|
93 |
-
value: 40.51
|
94 |
-
- task:
|
95 |
-
type: text-generation
|
96 |
-
dataset:
|
97 |
-
name: OpenLLM-Ro/ro_truthfulqa
|
98 |
-
type: OpenLLM-Ro/ro_truthfulqa
|
99 |
-
metrics:
|
100 |
-
- name: Average accuracy
|
101 |
-
type: accuracy
|
102 |
-
value: 50.97
|
103 |
-
- task:
|
104 |
-
type: text-generation
|
105 |
-
dataset:
|
106 |
-
name: LaRoSeDa_binary
|
107 |
-
type: LaRoSeDa_binary
|
108 |
-
metrics:
|
109 |
-
- name: Average macro-f1
|
110 |
-
type: macro-f1
|
111 |
-
value: 84.23
|
112 |
-
- task:
|
113 |
-
type: text-generation
|
114 |
-
dataset:
|
115 |
-
name: LaRoSeDa_multiclass
|
116 |
-
type: LaRoSeDa_multiclass
|
117 |
-
metrics:
|
118 |
-
- name: Average macro-f1
|
119 |
-
type: macro-f1
|
120 |
-
value: 60.14
|
121 |
-
- task:
|
122 |
-
type: text-generation
|
123 |
-
dataset:
|
124 |
-
name: WMT_EN-RO
|
125 |
-
type: WMT_EN-RO
|
126 |
-
metrics:
|
127 |
-
- name: Average bleu
|
128 |
-
type: bleu
|
129 |
-
value: 17.78
|
130 |
-
- task:
|
131 |
-
type: text-generation
|
132 |
-
dataset:
|
133 |
-
name: WMT_RO-EN
|
134 |
-
type: WMT_RO-EN
|
135 |
-
metrics:
|
136 |
-
- name: Average bleu
|
137 |
-
type: bleu
|
138 |
-
value: 18.24
|
139 |
-
- task:
|
140 |
-
type: text-generation
|
141 |
-
dataset:
|
142 |
-
name: XQuAD
|
143 |
-
type: XQuAD
|
144 |
-
metrics:
|
145 |
-
- name: Average exact_match
|
146 |
-
type: exact_match
|
147 |
-
value: 49.22
|
148 |
-
- task:
|
149 |
-
type: text-generation
|
150 |
-
dataset:
|
151 |
-
name: XQuAD
|
152 |
-
type: XQuAD
|
153 |
-
metrics:
|
154 |
-
- name: Average f1
|
155 |
-
type: f1
|
156 |
-
value: 66.33
|
157 |
-
- task:
|
158 |
-
type: text-generation
|
159 |
-
dataset:
|
160 |
-
name: STS
|
161 |
-
type: STS
|
162 |
-
metrics:
|
163 |
-
- name: Average spearman
|
164 |
-
type: spearman
|
165 |
-
value: 70.17
|
166 |
-
- task:
|
167 |
-
type: text-generation
|
168 |
-
dataset:
|
169 |
-
name: STS
|
170 |
-
type: STS
|
171 |
-
metrics:
|
172 |
-
- name: Average pearson
|
173 |
-
type: pearson
|
174 |
-
value: 70.80
|
175 |
-
- task:
|
176 |
-
type: text-generation
|
177 |
-
dataset:
|
178 |
-
name: RoMT-Bench
|
179 |
-
type: RoMT-Bench
|
180 |
-
metrics:
|
181 |
-
- name: First turn
|
182 |
-
type: Score
|
183 |
-
value: 7.00
|
184 |
-
- name: Second turn
|
185 |
-
type: Score
|
186 |
-
value: 6.55
|
187 |
-
- task:
|
188 |
-
type: text-generation
|
189 |
-
dataset:
|
190 |
-
name: OpenLLM-Ro/ro_arc_challenge
|
191 |
-
type: OpenLLM-Ro/ro_arc_challenge
|
192 |
-
metrics:
|
193 |
-
- name: 0-shot
|
194 |
-
type: accuracy
|
195 |
-
value: 47.47
|
196 |
-
- name: 1-shot
|
197 |
-
type: accuracy
|
198 |
-
value: 50.56
|
199 |
-
- name: 3-shot
|
200 |
-
type: accuracy
|
201 |
-
value: 50.73
|
202 |
-
- name: 5-shot
|
203 |
-
type: accuracy
|
204 |
-
value: 50.39
|
205 |
-
- name: 10-shot
|
206 |
-
type: accuracy
|
207 |
-
value: 50.99
|
208 |
-
- name: 25-shot
|
209 |
-
type: accuracy
|
210 |
-
value: 51.33
|
211 |
-
- task:
|
212 |
-
type: text-generation
|
213 |
-
dataset:
|
214 |
-
name: OpenLLM-Ro/ro_mmlu
|
215 |
-
type: OpenLLM-Ro/ro_mmlu
|
216 |
-
metrics:
|
217 |
-
- name: 0-shot
|
218 |
-
type: accuracy
|
219 |
-
value: 58.73
|
220 |
-
- name: 1-shot
|
221 |
-
type: accuracy
|
222 |
-
value: 60.12
|
223 |
-
- name: 3-shot
|
224 |
-
type: accuracy
|
225 |
-
value: 64.93
|
226 |
-
- name: 5-shot
|
227 |
-
type: accuracy
|
228 |
-
value: 64.21
|
229 |
-
- task:
|
230 |
-
type: text-generation
|
231 |
-
dataset:
|
232 |
-
name: OpenLLM-Ro/ro_winogrande
|
233 |
-
type: OpenLLM-Ro/ro_winogrande
|
234 |
-
metrics:
|
235 |
-
- name: 0-shot
|
236 |
-
type: accuracy
|
237 |
-
value: 66.06
|
238 |
-
- name: 1-shot
|
239 |
-
type: accuracy
|
240 |
-
value: 70.40
|
241 |
-
- name: 3-shot
|
242 |
-
type: accuracy
|
243 |
-
value: 72.30
|
244 |
-
- name: 5-shot
|
245 |
-
type: accuracy
|
246 |
-
value: 72.77
|
247 |
-
- task:
|
248 |
-
type: text-generation
|
249 |
-
dataset:
|
250 |
-
name: OpenLLM-Ro/ro_hellaswag
|
251 |
-
type: OpenLLM-Ro/ro_hellaswag
|
252 |
-
metrics:
|
253 |
-
- name: 0-shot
|
254 |
-
type: accuracy
|
255 |
-
value: 56.30
|
256 |
-
- name: 1-shot
|
257 |
-
type: accuracy
|
258 |
-
value: 58.29
|
259 |
-
- name: 3-shot
|
260 |
-
type: accuracy
|
261 |
-
value: 50.88
|
262 |
-
- name: 5-shot
|
263 |
-
type: accuracy
|
264 |
-
value: 44.38
|
265 |
-
- name: 10-shot
|
266 |
-
type: accuracy
|
267 |
-
value: 51.41
|
268 |
-
- task:
|
269 |
-
type: text-generation
|
270 |
-
dataset:
|
271 |
-
name: OpenLLM-Ro/ro_gsm8k
|
272 |
-
type: OpenLLM-Ro/ro_gsm8k
|
273 |
-
metrics:
|
274 |
-
- name: 1-shot
|
275 |
-
type: accuracy
|
276 |
-
value: 27.29
|
277 |
-
- name: 3-shot
|
278 |
-
type: accuracy
|
279 |
-
value: 39.04
|
280 |
-
- name: 5-shot
|
281 |
-
type: accuracy
|
282 |
-
value: 55.19
|
283 |
-
- task:
|
284 |
-
type: text-generation
|
285 |
-
dataset:
|
286 |
-
name: LaRoSeDa_binary
|
287 |
-
type: LaRoSeDa_binary
|
288 |
-
metrics:
|
289 |
-
- name: 0-shot
|
290 |
-
type: macro-f1
|
291 |
-
value: 59.19
|
292 |
-
- name: 1-shot
|
293 |
-
type: macro-f1
|
294 |
-
value: 94.22
|
295 |
-
- name: 3-shot
|
296 |
-
type: macro-f1
|
297 |
-
value: 93.24
|
298 |
-
- name: 5-shot
|
299 |
-
type: macro-f1
|
300 |
-
value: 90.27
|
301 |
-
- task:
|
302 |
-
type: text-generation
|
303 |
-
dataset:
|
304 |
-
name: LaRoSeDa_multiclass
|
305 |
-
type: LaRoSeDa_multiclass
|
306 |
-
metrics:
|
307 |
-
- name: 0-shot
|
308 |
-
type: macro-f1
|
309 |
-
value: 32.52
|
310 |
-
- name: 1-shot
|
311 |
-
type: macro-f1
|
312 |
-
value: 68.64
|
313 |
-
- name: 3-shot
|
314 |
-
type: macro-f1
|
315 |
-
value: 70.14
|
316 |
-
- name: 5-shot
|
317 |
-
type: macro-f1
|
318 |
-
value: 69.26
|
319 |
-
- task:
|
320 |
-
type: text-generation
|
321 |
-
dataset:
|
322 |
-
name: WMT_EN-RO
|
323 |
-
type: WMT_EN-RO
|
324 |
-
metrics:
|
325 |
-
- name: 0-shot
|
326 |
-
type: bleu
|
327 |
-
value: 1.96
|
328 |
-
- name: 1-shot
|
329 |
-
type: bleu
|
330 |
-
value: 27.30
|
331 |
-
- name: 3-shot
|
332 |
-
type: bleu
|
333 |
-
value: 28.31
|
334 |
-
- name: 5-shot
|
335 |
-
type: bleu
|
336 |
-
value: 13.56
|
337 |
-
- task:
|
338 |
-
type: text-generation
|
339 |
-
dataset:
|
340 |
-
name: WMT_RO-EN
|
341 |
-
type: WMT_RO-EN
|
342 |
-
metrics:
|
343 |
-
- name: 0-shot
|
344 |
-
type: bleu
|
345 |
-
value: 0.66
|
346 |
-
- name: 1-shot
|
347 |
-
type: bleu
|
348 |
-
value: 26.76
|
349 |
-
- name: 3-shot
|
350 |
-
type: bleu
|
351 |
-
value: 31.88
|
352 |
-
- name: 5-shot
|
353 |
-
type: bleu
|
354 |
-
value: 13.66
|
355 |
-
- task:
|
356 |
-
type: text-generation
|
357 |
-
dataset:
|
358 |
-
name: XQuAD_EM
|
359 |
-
type: XQuAD_EM
|
360 |
-
metrics:
|
361 |
-
- name: 0-shot
|
362 |
-
type: exact_match
|
363 |
-
value: 49.92
|
364 |
-
- name: 1-shot
|
365 |
-
type: exact_match
|
366 |
-
value: 47.98
|
367 |
-
- name: 3-shot
|
368 |
-
type: exact_match
|
369 |
-
value: 45.71
|
370 |
-
- name: 5-shot
|
371 |
-
type: exact_match
|
372 |
-
value: 53.28
|
373 |
-
- task:
|
374 |
-
type: text-generation
|
375 |
-
dataset:
|
376 |
-
name: XQuAD_F1
|
377 |
-
type: XQuAD_F1
|
378 |
-
metrics:
|
379 |
-
- name: 0-shot
|
380 |
-
type: f1
|
381 |
-
value: 67.52
|
382 |
-
- name: 1-shot
|
383 |
-
type: f1
|
384 |
-
value: 63.97
|
385 |
-
- name: 3-shot
|
386 |
-
type: f1
|
387 |
-
value: 62.39
|
388 |
-
- name: 5-shot
|
389 |
-
type: f1
|
390 |
-
value: 71.43
|
391 |
-
- task:
|
392 |
-
type: text-generation
|
393 |
-
dataset:
|
394 |
-
name: STS_Spearman
|
395 |
-
type: STS_Spearman
|
396 |
-
metrics:
|
397 |
-
- name: 1-shot
|
398 |
-
type: spearman
|
399 |
-
value: 82.53
|
400 |
-
- name: 3-shot
|
401 |
-
type: spearman
|
402 |
-
value: 65.73
|
403 |
-
- name: 5-shot
|
404 |
-
type: spearman
|
405 |
-
value: 62.25
|
406 |
-
- task:
|
407 |
-
type: text-generation
|
408 |
-
dataset:
|
409 |
-
name: STS_Pearson
|
410 |
-
type: STS_Pearson
|
411 |
-
metrics:
|
412 |
-
- name: 1-shot
|
413 |
-
type: pearson
|
414 |
-
value: 82.89
|
415 |
-
- name: 3-shot
|
416 |
-
type: pearson
|
417 |
-
value: 66.26
|
418 |
-
- name: 5-shot
|
419 |
-
type: pearson
|
420 |
-
value: 63.25
|
421 |
-
|
422 |
-
|
423 |
-
---
|
424 |
-
|
425 |
-
# Model Card for Model ID
|
426 |
-
|
427 |
-
<!-- Provide a quick summary of what the model is/does. -->
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
<!-- - **
|
442 |
-
<!-- - **
|
443 |
-
- **
|
444 |
-
- **
|
445 |
-
- **
|
446 |
-
- **
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
- **
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
<
|
495 |
-
<
|
496 |
-
<
|
497 |
-
<td><strong
|
498 |
-
<td><strong><center>
|
499 |
-
<td><strong><center>
|
500 |
-
<td><strong><center>
|
501 |
-
<td><strong><center>
|
502 |
-
<td><strong><center>
|
503 |
-
<td><strong><center>
|
504 |
-
</
|
505 |
-
|
506 |
-
<
|
507 |
-
</
|
508 |
-
|
509 |
-
<
|
510 |
-
</
|
511 |
-
|
512 |
-
<
|
513 |
-
</
|
514 |
-
|
515 |
-
<
|
516 |
-
</
|
517 |
-
|
518 |
-
<
|
519 |
-
</
|
520 |
-
</
|
521 |
-
</
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
<
|
528 |
-
<
|
529 |
-
<
|
530 |
-
<td
|
531 |
-
<td colspan="4"><center><strong>
|
532 |
-
</
|
533 |
-
|
534 |
-
<
|
535 |
-
<td
|
536 |
-
<td colspan="2"><center><strong>
|
537 |
-
<td colspan="2"><center><strong>
|
538 |
-
<td colspan="2"><center><strong>
|
539 |
-
</
|
540 |
-
|
541 |
-
<
|
542 |
-
<td><
|
543 |
-
<td><center><strong>
|
544 |
-
<td><center><strong>
|
545 |
-
<td><center><strong>
|
546 |
-
<td><center><strong>
|
547 |
-
<td><center><strong>RO
|
548 |
-
<td><center><strong>EN
|
549 |
-
<td><center><strong>RO
|
550 |
-
</
|
551 |
-
|
552 |
-
<
|
553 |
-
</
|
554 |
-
|
555 |
-
<
|
556 |
-
</
|
557 |
-
|
558 |
-
<
|
559 |
-
</
|
560 |
-
|
561 |
-
<
|
562 |
-
</
|
563 |
-
|
564 |
-
<
|
565 |
-
</
|
566 |
-
</
|
567 |
-
</
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
<
|
572 |
-
<
|
573 |
-
<
|
574 |
-
<td
|
575 |
-
<td colspan="4"><center><strong>
|
576 |
-
</
|
577 |
-
|
578 |
-
<
|
579 |
-
<td
|
580 |
-
<td colspan="2"><center><strong>
|
581 |
-
<td colspan="2"><center><strong>
|
582 |
-
<td colspan="2"><center><strong>
|
583 |
-
</
|
584 |
-
|
585 |
-
<
|
586 |
-
<td><
|
587 |
-
<td><center><strong>(
|
588 |
-
<td><center><strong>(
|
589 |
-
<td><center><strong>(
|
590 |
-
<td><center><strong>(
|
591 |
-
<td><center><strong>(
|
592 |
-
<td><center><strong>(
|
593 |
-
<td><center><strong>(
|
594 |
-
</
|
595 |
-
|
596 |
-
<
|
597 |
-
</
|
598 |
-
|
599 |
-
<
|
600 |
-
</
|
601 |
-
|
602 |
-
<
|
603 |
-
</
|
604 |
-
|
605 |
-
<
|
606 |
-
</
|
607 |
-
|
608 |
-
<
|
609 |
-
</
|
610 |
-
</
|
611 |
-
</
|
612 |
-
|
613 |
-
|
614 |
-
|
615 |
-
|
616 |
-
|
617 |
-
<
|
618 |
-
<
|
619 |
-
<
|
620 |
-
<td><strong
|
621 |
-
<td><strong><center>
|
622 |
-
<td><strong><center>
|
623 |
-
<td><strong><center>
|
624 |
-
</
|
625 |
-
|
626 |
-
<
|
627 |
-
</
|
628 |
-
|
629 |
-
<
|
630 |
-
</
|
631 |
-
|
632 |
-
<
|
633 |
-
</
|
634 |
-
|
635 |
-
<
|
636 |
-
</
|
637 |
-
|
638 |
-
<
|
639 |
-
</
|
640 |
-
</
|
641 |
-
</
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
<
|
648 |
-
<
|
649 |
-
<
|
650 |
-
<td><strong
|
651 |
-
<td><strong><center>
|
652 |
-
</
|
653 |
-
|
654 |
-
<
|
655 |
-
</
|
656 |
-
|
657 |
-
<
|
658 |
-
</
|
659 |
-
|
660 |
-
<
|
661 |
-
</
|
662 |
-
|
663 |
-
<
|
664 |
-
</
|
665 |
-
|
666 |
-
<
|
667 |
-
</
|
668 |
-
</
|
669 |
-
</
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|RoGemma2-9b-Instruct-DPO-
|
680 |
-
|
681 |
-
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
686 |
-
|
687 |
-
|
688 |
-
|
689 |
-
|
690 |
-
|
691 |
-
|
692 |
-
|
693 |
-
|
694 |
-
}
|
695 |
-
|
696 |
-
|
697 |
-
|
|
|
698 |
[More Information Needed] -->
|
|
|
1 |
+
---
|
2 |
+
license: cc-by-nc-4.0
|
3 |
+
language:
|
4 |
+
- ro
|
5 |
+
base_model:
|
6 |
+
- google/gemma-2-9b-it
|
7 |
+
datasets:
|
8 |
+
- OpenLLM-Ro/ro_sft_alpaca
|
9 |
+
- OpenLLM-Ro/ro_sft_alpaca_gpt4
|
10 |
+
- OpenLLM-Ro/ro_sft_dolly
|
11 |
+
- OpenLLM-Ro/ro_sft_selfinstruct_gpt4
|
12 |
+
- OpenLLM-Ro/ro_sft_norobots
|
13 |
+
- OpenLLM-Ro/ro_sft_orca
|
14 |
+
- OpenLLM-Ro/ro_sft_camel
|
15 |
+
- OpenLLM-Ro/ro_sft_oasst
|
16 |
+
- OpenLLM-Ro/ro_sft_ultrachat
|
17 |
+
- OpenLLM-Ro/ro_sft_magpie_mt
|
18 |
+
- OpenLLM-Ro/ro_sft_magpie_reasoning
|
19 |
+
model-index:
|
20 |
+
- name: OpenLLM-Ro/RoGemma2-9b-Instruct-2025-04-23
|
21 |
+
results:
|
22 |
+
- task:
|
23 |
+
type: text-generation
|
24 |
+
dataset:
|
25 |
+
name: RoMT-Bench
|
26 |
+
type: RoMT-Bench
|
27 |
+
metrics:
|
28 |
+
- name: Score
|
29 |
+
type: Score
|
30 |
+
value: 6.78
|
31 |
+
- task:
|
32 |
+
type: text-generation
|
33 |
+
dataset:
|
34 |
+
name: RoCulturaBench
|
35 |
+
type: RoCulturaBench
|
36 |
+
metrics:
|
37 |
+
- name: Score
|
38 |
+
type: Score
|
39 |
+
value: 4.89
|
40 |
+
- task:
|
41 |
+
type: text-generation
|
42 |
+
dataset:
|
43 |
+
name: Romanian_Academic_Benchmarks
|
44 |
+
type: Romanian_Academic_Benchmarks
|
45 |
+
metrics:
|
46 |
+
- name: Average accuracy
|
47 |
+
type: accuracy
|
48 |
+
value: 54.39
|
49 |
+
- task:
|
50 |
+
type: text-generation
|
51 |
+
dataset:
|
52 |
+
name: OpenLLM-Ro/ro_arc_challenge
|
53 |
+
type: OpenLLM-Ro/ro_arc_challenge
|
54 |
+
metrics:
|
55 |
+
- name: Average accuracy
|
56 |
+
type: accuracy
|
57 |
+
value: 50.24
|
58 |
+
- task:
|
59 |
+
type: text-generation
|
60 |
+
dataset:
|
61 |
+
name: OpenLLM-Ro/ro_mmlu
|
62 |
+
type: OpenLLM-Ro/ro_mmlu
|
63 |
+
metrics:
|
64 |
+
- name: Average accuracy
|
65 |
+
type: accuracy
|
66 |
+
value: 62.00
|
67 |
+
- task:
|
68 |
+
type: text-generation
|
69 |
+
dataset:
|
70 |
+
name: OpenLLM-Ro/ro_winogrande
|
71 |
+
type: OpenLLM-Ro/ro_winogrande
|
72 |
+
metrics:
|
73 |
+
- name: Average accuracy
|
74 |
+
type: accuracy
|
75 |
+
value: 70.38
|
76 |
+
- task:
|
77 |
+
type: text-generation
|
78 |
+
dataset:
|
79 |
+
name: OpenLLM-Ro/ro_hellaswag
|
80 |
+
type: OpenLLM-Ro/ro_hellaswag
|
81 |
+
metrics:
|
82 |
+
- name: Average accuracy
|
83 |
+
type: accuracy
|
84 |
+
value: 52.25
|
85 |
+
- task:
|
86 |
+
type: text-generation
|
87 |
+
dataset:
|
88 |
+
name: OpenLLM-Ro/ro_gsm8k
|
89 |
+
type: OpenLLM-Ro/ro_gsm8k
|
90 |
+
metrics:
|
91 |
+
- name: Average accuracy
|
92 |
+
type: accuracy
|
93 |
+
value: 40.51
|
94 |
+
- task:
|
95 |
+
type: text-generation
|
96 |
+
dataset:
|
97 |
+
name: OpenLLM-Ro/ro_truthfulqa
|
98 |
+
type: OpenLLM-Ro/ro_truthfulqa
|
99 |
+
metrics:
|
100 |
+
- name: Average accuracy
|
101 |
+
type: accuracy
|
102 |
+
value: 50.97
|
103 |
+
- task:
|
104 |
+
type: text-generation
|
105 |
+
dataset:
|
106 |
+
name: LaRoSeDa_binary
|
107 |
+
type: LaRoSeDa_binary
|
108 |
+
metrics:
|
109 |
+
- name: Average macro-f1
|
110 |
+
type: macro-f1
|
111 |
+
value: 84.23
|
112 |
+
- task:
|
113 |
+
type: text-generation
|
114 |
+
dataset:
|
115 |
+
name: LaRoSeDa_multiclass
|
116 |
+
type: LaRoSeDa_multiclass
|
117 |
+
metrics:
|
118 |
+
- name: Average macro-f1
|
119 |
+
type: macro-f1
|
120 |
+
value: 60.14
|
121 |
+
- task:
|
122 |
+
type: text-generation
|
123 |
+
dataset:
|
124 |
+
name: WMT_EN-RO
|
125 |
+
type: WMT_EN-RO
|
126 |
+
metrics:
|
127 |
+
- name: Average bleu
|
128 |
+
type: bleu
|
129 |
+
value: 17.78
|
130 |
+
- task:
|
131 |
+
type: text-generation
|
132 |
+
dataset:
|
133 |
+
name: WMT_RO-EN
|
134 |
+
type: WMT_RO-EN
|
135 |
+
metrics:
|
136 |
+
- name: Average bleu
|
137 |
+
type: bleu
|
138 |
+
value: 18.24
|
139 |
+
- task:
|
140 |
+
type: text-generation
|
141 |
+
dataset:
|
142 |
+
name: XQuAD
|
143 |
+
type: XQuAD
|
144 |
+
metrics:
|
145 |
+
- name: Average exact_match
|
146 |
+
type: exact_match
|
147 |
+
value: 49.22
|
148 |
+
- task:
|
149 |
+
type: text-generation
|
150 |
+
dataset:
|
151 |
+
name: XQuAD
|
152 |
+
type: XQuAD
|
153 |
+
metrics:
|
154 |
+
- name: Average f1
|
155 |
+
type: f1
|
156 |
+
value: 66.33
|
157 |
+
- task:
|
158 |
+
type: text-generation
|
159 |
+
dataset:
|
160 |
+
name: STS
|
161 |
+
type: STS
|
162 |
+
metrics:
|
163 |
+
- name: Average spearman
|
164 |
+
type: spearman
|
165 |
+
value: 70.17
|
166 |
+
- task:
|
167 |
+
type: text-generation
|
168 |
+
dataset:
|
169 |
+
name: STS
|
170 |
+
type: STS
|
171 |
+
metrics:
|
172 |
+
- name: Average pearson
|
173 |
+
type: pearson
|
174 |
+
value: 70.80
|
175 |
+
- task:
|
176 |
+
type: text-generation
|
177 |
+
dataset:
|
178 |
+
name: RoMT-Bench
|
179 |
+
type: RoMT-Bench
|
180 |
+
metrics:
|
181 |
+
- name: First turn
|
182 |
+
type: Score
|
183 |
+
value: 7.00
|
184 |
+
- name: Second turn
|
185 |
+
type: Score
|
186 |
+
value: 6.55
|
187 |
+
- task:
|
188 |
+
type: text-generation
|
189 |
+
dataset:
|
190 |
+
name: OpenLLM-Ro/ro_arc_challenge
|
191 |
+
type: OpenLLM-Ro/ro_arc_challenge
|
192 |
+
metrics:
|
193 |
+
- name: 0-shot
|
194 |
+
type: accuracy
|
195 |
+
value: 47.47
|
196 |
+
- name: 1-shot
|
197 |
+
type: accuracy
|
198 |
+
value: 50.56
|
199 |
+
- name: 3-shot
|
200 |
+
type: accuracy
|
201 |
+
value: 50.73
|
202 |
+
- name: 5-shot
|
203 |
+
type: accuracy
|
204 |
+
value: 50.39
|
205 |
+
- name: 10-shot
|
206 |
+
type: accuracy
|
207 |
+
value: 50.99
|
208 |
+
- name: 25-shot
|
209 |
+
type: accuracy
|
210 |
+
value: 51.33
|
211 |
+
- task:
|
212 |
+
type: text-generation
|
213 |
+
dataset:
|
214 |
+
name: OpenLLM-Ro/ro_mmlu
|
215 |
+
type: OpenLLM-Ro/ro_mmlu
|
216 |
+
metrics:
|
217 |
+
- name: 0-shot
|
218 |
+
type: accuracy
|
219 |
+
value: 58.73
|
220 |
+
- name: 1-shot
|
221 |
+
type: accuracy
|
222 |
+
value: 60.12
|
223 |
+
- name: 3-shot
|
224 |
+
type: accuracy
|
225 |
+
value: 64.93
|
226 |
+
- name: 5-shot
|
227 |
+
type: accuracy
|
228 |
+
value: 64.21
|
229 |
+
- task:
|
230 |
+
type: text-generation
|
231 |
+
dataset:
|
232 |
+
name: OpenLLM-Ro/ro_winogrande
|
233 |
+
type: OpenLLM-Ro/ro_winogrande
|
234 |
+
metrics:
|
235 |
+
- name: 0-shot
|
236 |
+
type: accuracy
|
237 |
+
value: 66.06
|
238 |
+
- name: 1-shot
|
239 |
+
type: accuracy
|
240 |
+
value: 70.40
|
241 |
+
- name: 3-shot
|
242 |
+
type: accuracy
|
243 |
+
value: 72.30
|
244 |
+
- name: 5-shot
|
245 |
+
type: accuracy
|
246 |
+
value: 72.77
|
247 |
+
- task:
|
248 |
+
type: text-generation
|
249 |
+
dataset:
|
250 |
+
name: OpenLLM-Ro/ro_hellaswag
|
251 |
+
type: OpenLLM-Ro/ro_hellaswag
|
252 |
+
metrics:
|
253 |
+
- name: 0-shot
|
254 |
+
type: accuracy
|
255 |
+
value: 56.30
|
256 |
+
- name: 1-shot
|
257 |
+
type: accuracy
|
258 |
+
value: 58.29
|
259 |
+
- name: 3-shot
|
260 |
+
type: accuracy
|
261 |
+
value: 50.88
|
262 |
+
- name: 5-shot
|
263 |
+
type: accuracy
|
264 |
+
value: 44.38
|
265 |
+
- name: 10-shot
|
266 |
+
type: accuracy
|
267 |
+
value: 51.41
|
268 |
+
- task:
|
269 |
+
type: text-generation
|
270 |
+
dataset:
|
271 |
+
name: OpenLLM-Ro/ro_gsm8k
|
272 |
+
type: OpenLLM-Ro/ro_gsm8k
|
273 |
+
metrics:
|
274 |
+
- name: 1-shot
|
275 |
+
type: accuracy
|
276 |
+
value: 27.29
|
277 |
+
- name: 3-shot
|
278 |
+
type: accuracy
|
279 |
+
value: 39.04
|
280 |
+
- name: 5-shot
|
281 |
+
type: accuracy
|
282 |
+
value: 55.19
|
283 |
+
- task:
|
284 |
+
type: text-generation
|
285 |
+
dataset:
|
286 |
+
name: LaRoSeDa_binary
|
287 |
+
type: LaRoSeDa_binary
|
288 |
+
metrics:
|
289 |
+
- name: 0-shot
|
290 |
+
type: macro-f1
|
291 |
+
value: 59.19
|
292 |
+
- name: 1-shot
|
293 |
+
type: macro-f1
|
294 |
+
value: 94.22
|
295 |
+
- name: 3-shot
|
296 |
+
type: macro-f1
|
297 |
+
value: 93.24
|
298 |
+
- name: 5-shot
|
299 |
+
type: macro-f1
|
300 |
+
value: 90.27
|
301 |
+
- task:
|
302 |
+
type: text-generation
|
303 |
+
dataset:
|
304 |
+
name: LaRoSeDa_multiclass
|
305 |
+
type: LaRoSeDa_multiclass
|
306 |
+
metrics:
|
307 |
+
- name: 0-shot
|
308 |
+
type: macro-f1
|
309 |
+
value: 32.52
|
310 |
+
- name: 1-shot
|
311 |
+
type: macro-f1
|
312 |
+
value: 68.64
|
313 |
+
- name: 3-shot
|
314 |
+
type: macro-f1
|
315 |
+
value: 70.14
|
316 |
+
- name: 5-shot
|
317 |
+
type: macro-f1
|
318 |
+
value: 69.26
|
319 |
+
- task:
|
320 |
+
type: text-generation
|
321 |
+
dataset:
|
322 |
+
name: WMT_EN-RO
|
323 |
+
type: WMT_EN-RO
|
324 |
+
metrics:
|
325 |
+
- name: 0-shot
|
326 |
+
type: bleu
|
327 |
+
value: 1.96
|
328 |
+
- name: 1-shot
|
329 |
+
type: bleu
|
330 |
+
value: 27.30
|
331 |
+
- name: 3-shot
|
332 |
+
type: bleu
|
333 |
+
value: 28.31
|
334 |
+
- name: 5-shot
|
335 |
+
type: bleu
|
336 |
+
value: 13.56
|
337 |
+
- task:
|
338 |
+
type: text-generation
|
339 |
+
dataset:
|
340 |
+
name: WMT_RO-EN
|
341 |
+
type: WMT_RO-EN
|
342 |
+
metrics:
|
343 |
+
- name: 0-shot
|
344 |
+
type: bleu
|
345 |
+
value: 0.66
|
346 |
+
- name: 1-shot
|
347 |
+
type: bleu
|
348 |
+
value: 26.76
|
349 |
+
- name: 3-shot
|
350 |
+
type: bleu
|
351 |
+
value: 31.88
|
352 |
+
- name: 5-shot
|
353 |
+
type: bleu
|
354 |
+
value: 13.66
|
355 |
+
- task:
|
356 |
+
type: text-generation
|
357 |
+
dataset:
|
358 |
+
name: XQuAD_EM
|
359 |
+
type: XQuAD_EM
|
360 |
+
metrics:
|
361 |
+
- name: 0-shot
|
362 |
+
type: exact_match
|
363 |
+
value: 49.92
|
364 |
+
- name: 1-shot
|
365 |
+
type: exact_match
|
366 |
+
value: 47.98
|
367 |
+
- name: 3-shot
|
368 |
+
type: exact_match
|
369 |
+
value: 45.71
|
370 |
+
- name: 5-shot
|
371 |
+
type: exact_match
|
372 |
+
value: 53.28
|
373 |
+
- task:
|
374 |
+
type: text-generation
|
375 |
+
dataset:
|
376 |
+
name: XQuAD_F1
|
377 |
+
type: XQuAD_F1
|
378 |
+
metrics:
|
379 |
+
- name: 0-shot
|
380 |
+
type: f1
|
381 |
+
value: 67.52
|
382 |
+
- name: 1-shot
|
383 |
+
type: f1
|
384 |
+
value: 63.97
|
385 |
+
- name: 3-shot
|
386 |
+
type: f1
|
387 |
+
value: 62.39
|
388 |
+
- name: 5-shot
|
389 |
+
type: f1
|
390 |
+
value: 71.43
|
391 |
+
- task:
|
392 |
+
type: text-generation
|
393 |
+
dataset:
|
394 |
+
name: STS_Spearman
|
395 |
+
type: STS_Spearman
|
396 |
+
metrics:
|
397 |
+
- name: 1-shot
|
398 |
+
type: spearman
|
399 |
+
value: 82.53
|
400 |
+
- name: 3-shot
|
401 |
+
type: spearman
|
402 |
+
value: 65.73
|
403 |
+
- name: 5-shot
|
404 |
+
type: spearman
|
405 |
+
value: 62.25
|
406 |
+
- task:
|
407 |
+
type: text-generation
|
408 |
+
dataset:
|
409 |
+
name: STS_Pearson
|
410 |
+
type: STS_Pearson
|
411 |
+
metrics:
|
412 |
+
- name: 1-shot
|
413 |
+
type: pearson
|
414 |
+
value: 82.89
|
415 |
+
- name: 3-shot
|
416 |
+
type: pearson
|
417 |
+
value: 66.26
|
418 |
+
- name: 5-shot
|
419 |
+
type: pearson
|
420 |
+
value: 63.25
|
421 |
+
|
422 |
+
|
423 |
+
---
|
424 |
+
|
425 |
+
# Model Card for Model ID
|
426 |
+
|
427 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
428 |
+
This model points/is identical to [RoGemma2-9b-Instruct-2025-04-23](https://huggingface.co/OpenLLM-Ro/RoGemma2-9b-Instruct-2025-04-23).
|
429 |
+
|
430 |
+
RoGemma2 is a family of pretrained and fine-tuned generative text models for Romanian. This is the repository for the **instruct 9B model**. Links to other models can be found at the bottom of this page.
|
431 |
+
|
432 |
+
## Model Details
|
433 |
+
|
434 |
+
### Model Description
|
435 |
+
|
436 |
+
<!-- Provide a longer summary of what this model is. -->
|
437 |
+
OpenLLM-Ro represents the first open-source effort to build a LLM specialized for Romanian. OpenLLM-Ro developed and publicly releases a collection of Romanian LLMs, both in the form of foundational model and instruct and chat variants.
|
438 |
+
|
439 |
+
|
440 |
+
- **Developed by:** OpenLLM-Ro
|
441 |
+
<!-- - **Funded by [optional]:** [More Information Needed] -->
|
442 |
+
<!-- - **Shared by [optional]:** [More Information Needed] -->
|
443 |
+
<!-- - **Model type:** [More Information Needed] -->
|
444 |
+
- **Language(s):** Romanian
|
445 |
+
- **License:** cc-by-nc-4.0
|
446 |
+
- **Finetuned from model:** [gemma-2-9b-it](https://huggingface.co/google/gemma-2-9b-it)
|
447 |
+
- **Trained using:** [RoAlpaca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca), [RoAlpacaGPT4](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca_gpt4), [RoDolly](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_dolly), [RoSelfInstruct](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_selfinstruct_gpt4), [RoNoRobots](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_norobots), [RoOrca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_orca), [RoCamel](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_camel), [RoOpenAssistant](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_oasst), [RoUltraChat](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_ultrachat), [RoMagpiePro](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_magpie_mt), [RoMagpieReasoning](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_magpie_reasoning)
|
448 |
+
|
449 |
+
|
450 |
+
### Model Sources
|
451 |
+
|
452 |
+
<!-- Provide the basic links for the model. -->
|
453 |
+
|
454 |
+
- **Repository:** https://github.com/OpenLLM-Ro/LLaMA-Factory
|
455 |
+
- **Paper:** https://arxiv.org/abs/2406.18266
|
456 |
+
|
457 |
+
## Intended Use
|
458 |
+
|
459 |
+
### Intended Use Cases
|
460 |
+
|
461 |
+
RoGemma2 is intented for research use in Romanian. Base models can be adapted for a variety of natural language tasks while instruction and chat tuned models are intended for assistant-like chat.
|
462 |
+
|
463 |
+
### Out-of-Scope Use
|
464 |
+
|
465 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
466 |
+
|
467 |
+
Use in any manner that violates the license, any applicable laws or regluations, use in languages other than Romanian.
|
468 |
+
|
469 |
+
|
470 |
+
|
471 |
+
## How to Get Started with the Model
|
472 |
+
|
473 |
+
Use the code below to get started with the model.
|
474 |
+
|
475 |
+
```python
|
476 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
477 |
+
|
478 |
+
tokenizer = AutoTokenizer.from_pretrained("OpenLLM-Ro/RoGemma2-9b-Instruct")
|
479 |
+
model = AutoModelForCausalLM.from_pretrained("OpenLLM-Ro/RoGemma2-9b-Instruct")
|
480 |
+
|
481 |
+
instruction = "Ce jocuri de societate pot juca cu prietenii mei?"
|
482 |
+
chat = [
|
483 |
+
{"role": "user", "content": instruction},
|
484 |
+
]
|
485 |
+
prompt = tokenizer.apply_chat_template(chat, tokenize=False, system_message="")
|
486 |
+
|
487 |
+
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
488 |
+
outputs = model.generate(input_ids=inputs, max_new_tokens=128)
|
489 |
+
print(tokenizer.decode(outputs[0]))
|
490 |
+
```
|
491 |
+
|
492 |
+
## Academic Benchmarks
|
493 |
+
|
494 |
+
<table>
|
495 |
+
<tbody>
|
496 |
+
<tr>
|
497 |
+
<td><strong>Model</strong></td>
|
498 |
+
<td><strong><center>Average</center></strong></td>
|
499 |
+
<td><strong><center>ARC</center></strong></td>
|
500 |
+
<td><strong><center>MMLU</center></strong></td>
|
501 |
+
<td><strong><center>Winogrande</center></strong></td>
|
502 |
+
<td><strong><center>Hellaswag</center></strong></td>
|
503 |
+
<td><strong><center>GSM8k</center></strong></td>
|
504 |
+
<td><strong><center>TruthfulQA</center></strong></td>
|
505 |
+
</tr>
|
506 |
+
<tr>
|
507 |
+
<td>gemma-2-9b-it</td><td><center>56.22</center></td><td><center>50.33</center></td><td><center><strong>64.01</strong></center></td><td><center>64.88</center></td><td><center>63.11</center></td><td><center>41.95</center></td><td><center>53.03</center></td>
|
508 |
+
</tr>
|
509 |
+
<tr>
|
510 |
+
<td>RoGemma2-9b-Instruct-2024-10-09</td><td><center>57.06</center></td><td><center><strong>56.20</strong></center></td><td><center>62.98</center></td><td><center>71.00</center></td><td><center>60.52</center></td><td><center>37.86</center></td><td><center>53.77</center></td>
|
511 |
+
</tr>
|
512 |
+
<tr>
|
513 |
+
<td><em>RoGemma2-9b-Instruct-2025-04-23</em></td><td><center><em>54.39</em></center></td><td><center><em>50.24</em></center></td><td><center><em>62.00</em></center></td><td><center><em>70.38</em></center></td><td><center><em>52.25</em></center></td><td><center><em>40.51</em></center></td><td><center><em>50.97</em></center></td>
|
514 |
+
</tr>
|
515 |
+
<tr>
|
516 |
+
<td>RoGemma2-9b-Instruct-DPO-2024-10-09</td><td><center>59.08</center></td><td><center>54.10</center></td><td><center>63.41</center></td><td><center>70.02</center></td><td><center>59.35</center></td><td><center><strong>57.24</strong></center></td><td><center>50.39</center></td>
|
517 |
+
</tr>
|
518 |
+
<tr>
|
519 |
+
<td>RoGemma2-9b-Instruct-DPO-2025-04-23</td><td><center><strong>59.79</strong></center></td><td><center>55.66</center></td><td><center>64.00</center></td><td><center><strong>73.16</strong></center></td><td><center><strong>64.26</strong></center></td><td><center>37.80</center></td><td><center><strong>63.86</strong></center></td>
|
520 |
+
</tr>
|
521 |
+
</tbody>
|
522 |
+
</table>
|
523 |
+
|
524 |
+
|
525 |
+
## Downstream tasks
|
526 |
+
|
527 |
+
<table>
|
528 |
+
<tbody>
|
529 |
+
<tr>
|
530 |
+
<td></td>
|
531 |
+
<td colspan="4"><center><strong>LaRoSeDa</strong></center></td>
|
532 |
+
<td colspan="4"><center><strong>WMT</strong></center></td>
|
533 |
+
</tr>
|
534 |
+
<tr>
|
535 |
+
<td></td>
|
536 |
+
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
537 |
+
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
538 |
+
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
539 |
+
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
540 |
+
</tr>
|
541 |
+
<tr>
|
542 |
+
<td><strong>Model</strong></td>
|
543 |
+
<td><center><strong>Binary<br>(Macro F1)</strong></center></td>
|
544 |
+
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td>
|
545 |
+
<td><center><strong>Binary<br>(Macro F1)</strong></center></td>
|
546 |
+
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td>
|
547 |
+
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td>
|
548 |
+
<td><center><strong>RO-EN<br>(Bleu)</strong></center></td>
|
549 |
+
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td>
|
550 |
+
<td><center><strong>RO-EN<br>(Bleu)</strong></center>
|
551 |
+
</tr>
|
552 |
+
<tr>
|
553 |
+
<td>gemma-2-9b-it</td><td><center>90.82</center></td><td><center>52.51</center></td><td><center><strong>98.97</strong></center></td><td><center>86.02</center></td><td><center>19.97</center></td><td><center><strong>28.94</strong></center></td><td><center>27.94</center></td><td><center><strong>41.61</strong></center></td>
|
554 |
+
</tr>
|
555 |
+
<tr>
|
556 |
+
<td>RoGemma2-9b-Instruct-2024-10-09</td><td><center>96.19</center></td><td><center>62.49</center></td><td><center>98.93</center></td><td><center><strong>88.33</strong></center></td><td><center>25.74</center></td><td><center>23.16</center></td><td><center><strong>28.43</strong></center></td><td><center>40.94</center></td>
|
557 |
+
</tr>
|
558 |
+
<tr>
|
559 |
+
<td><em>RoGemma2-9b-Instruct-2025-04-23</em></td><td><center><em>84.23</em></center></td><td><center><em>60.14</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>17.78</em></center></td><td><center><em>18.24</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td>
|
560 |
+
</tr>
|
561 |
+
<tr>
|
562 |
+
<td>RoGemma2-9b-Instruct-DPO-2024-10-09</td><td><center><strong>97.74</strong></center></td><td><center><strong>67.40</strong></center></td><td><center>-</center></td><td><center>-</center></td><td><center>27.32</center></td><td><center>15.96</center></td><td><center>-</center></td><td><center>-</center></td>
|
563 |
+
</tr>
|
564 |
+
<tr>
|
565 |
+
<td>RoGemma2-9b-Instruct-DPO-2025-04-23</td><td><center>82.84</center></td><td><center>65.95</center></td><td><center>-</center></td><td><center>-</center></td><td><center><strong>28.16</strong></center></td><td><center>19.34</center></td><td><center>-</center></td><td><center>-</center></td>
|
566 |
+
</tr>
|
567 |
+
</tbody>
|
568 |
+
</table>
|
569 |
+
|
570 |
+
|
571 |
+
<table>
|
572 |
+
<tbody>
|
573 |
+
<tr>
|
574 |
+
<td></td>
|
575 |
+
<td colspan="4"><center><strong>XQuAD</strong></center></td>
|
576 |
+
<td colspan="4"><center><strong>STS</strong></center></td>
|
577 |
+
</tr>
|
578 |
+
<tr>
|
579 |
+
<td></td>
|
580 |
+
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
581 |
+
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
582 |
+
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
583 |
+
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
584 |
+
</tr>
|
585 |
+
<tr>
|
586 |
+
<td><strong>Model</strong></td>
|
587 |
+
<td><center><strong>(EM)</strong></center></td>
|
588 |
+
<td><center><strong>(F1)</strong></center></td>
|
589 |
+
<td><center><strong>(EM)</strong></center></td>
|
590 |
+
<td><center><strong>(F1)</strong></center></td>
|
591 |
+
<td><center><strong>(Spearman)</strong></center></td>
|
592 |
+
<td><center><strong>(Pearson)</strong></center></td>
|
593 |
+
<td><center><strong>(Spearman)</strong></center></td>
|
594 |
+
<td><center><strong>(Pearson)</strong></center></td>
|
595 |
+
</tr>
|
596 |
+
<tr>
|
597 |
+
<td>gemma-2-9b-it</td><td><center>37.56</center></td><td><center>57.48</center></td><td><center><strong>71.09</strong></center></td><td><center><strong>84.78</strong></center></td><td><center>71.39</center></td><td><center>71.73</center></td><td><center>89.07</center></td><td><center>89.29</center></td>
|
598 |
+
</tr>
|
599 |
+
<tr>
|
600 |
+
<td>RoGemma2-9b-Instruct-2024-10-09</td><td><center><strong>51.37</strong></center></td><td><center><strong>70.74</strong></center></td><td><center>50.00</center></td><td><center>64.10</center></td><td><center>77.15</center></td><td><center>77.10</center></td><td><center><strong>89.45</strong></center></td><td><center><strong>89.89</strong></center></td>
|
601 |
+
</tr>
|
602 |
+
<tr>
|
603 |
+
<td><em>RoGemma2-9b-Instruct-2025-04-23</em></td><td><center><em>49.22</em></center></td><td><center><em>66.33</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>70.17</em></center></td><td><center><em>70.80</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td>
|
604 |
+
</tr>
|
605 |
+
<tr>
|
606 |
+
<td>RoGemma2-9b-Instruct-DPO-2024-10-09</td><td><center>32.42</center></td><td><center>58.68</center></td><td><center>-</center></td><td><center>-</center></td><td><center><strong>80.82</strong></center></td><td><center><strong>81.50</strong></center></td><td><center>-</center></td><td><center>-</center></td>
|
607 |
+
</tr>
|
608 |
+
<tr>
|
609 |
+
<td>RoGemma2-9b-Instruct-DPO-2025-04-23</td><td><center>30.82</center></td><td><center>48.53</center></td><td><center>-</center></td><td><center>-</center></td><td><center>73.24</center></td><td><center>73.13</center></td><td><center>-</center></td><td><center>-</center></td>
|
610 |
+
</tr>
|
611 |
+
</tbody>
|
612 |
+
</table>
|
613 |
+
|
614 |
+
|
615 |
+
## MT-Bench
|
616 |
+
|
617 |
+
<table>
|
618 |
+
<tbody>
|
619 |
+
<tr>
|
620 |
+
<td><strong>Model</strong></td>
|
621 |
+
<td><strong><center>Average</center></strong></td>
|
622 |
+
<td><strong><center>1st turn</center></strong></td>
|
623 |
+
<td><strong><center>2nd turn</center></strong></td>
|
624 |
+
<td><strong><center>Answers in Ro</center></strong></td>
|
625 |
+
</tr>
|
626 |
+
<tr>
|
627 |
+
<td>gemma-2-9b-it</td><td><center><strong>7.50</strong></center></td><td><center><strong>7.91</strong></center></td><td><center><strong>7.09</strong></center></td><td><center>159/160</center></td>
|
628 |
+
</tr>
|
629 |
+
<tr>
|
630 |
+
<td>RoGemma2-9b-Instruct-2024-10-09</td><td><center>6.08</center></td><td><center>6.78</center></td><td><center>5.39</center></td><td><center><strong>160/160</strong></center></td>
|
631 |
+
</tr>
|
632 |
+
<tr>
|
633 |
+
<td><em>RoGemma2-9b-Instruct-2025-04-23</em></td><td><center><em>6.78</em></center></td><td><center><em>7.00</em></center></td><td><center><em>6.55</em></center></td><td><center><em><strong>160/160</strong></em></center></td>
|
634 |
+
</tr>
|
635 |
+
<tr>
|
636 |
+
<td>RoGemma2-9b-Instruct-DPO-2024-10-09</td><td><center>6.77</center></td><td><center>7.24</center></td><td><center>6.30</center></td><td><center><strong>160/160</strong></center></td>
|
637 |
+
</tr>
|
638 |
+
<tr>
|
639 |
+
<td>RoGemma2-9b-Instruct-DPO-2025-04-23</td><td><center>7.26</center></td><td><center>7.65</center></td><td><center>6.86</center></td><td><center><strong>160/160</strong></center></td>
|
640 |
+
</tr>
|
641 |
+
</tbody>
|
642 |
+
</table>
|
643 |
+
|
644 |
+
|
645 |
+
## RoCulturaBench
|
646 |
+
|
647 |
+
<table>
|
648 |
+
<tbody>
|
649 |
+
<tr>
|
650 |
+
<td><strong>Model</strong></td>
|
651 |
+
<td><strong><center>Average</center></strong></td>
|
652 |
+
<td><strong><center>Answers in Ro</center></strong></td>
|
653 |
+
</tr>
|
654 |
+
<tr>
|
655 |
+
<td>gemma-2-9b-it</td><td><center><strong>5.68</strong></center></td><td><center><strong>100/100</strong></center></td>
|
656 |
+
</tr>
|
657 |
+
<tr>
|
658 |
+
<td>RoGemma2-9b-Instruct-2024-10-09</td><td><center>4.20</center></td><td><center><strong>100/100</strong></center></td>
|
659 |
+
</tr>
|
660 |
+
<tr>
|
661 |
+
<td><em>RoGemma2-9b-Instruct-2025-04-23</em></td><td><center><em>4.89</em></center></td><td><center><em><strong>100/100</strong></em></center></td>
|
662 |
+
</tr>
|
663 |
+
<tr>
|
664 |
+
<td>RoGemma2-9b-Instruct-DPO-2024-10-09</td><td><center>4.83</center></td><td><center><strong>100/100</strong></center></td>
|
665 |
+
</tr>
|
666 |
+
<tr>
|
667 |
+
<td>RoGemma2-9b-Instruct-DPO-2025-04-23</td><td><center>5.36</center></td><td><center><strong>100/100</strong></center></td>
|
668 |
+
</tr>
|
669 |
+
</tbody>
|
670 |
+
</table>
|
671 |
+
|
672 |
+
|
673 |
+
## RoGemma2 Model Family
|
674 |
+
|
675 |
+
| Model | Link |
|
676 |
+
|--------------------|:--------:|
|
677 |
+
|RoGemma2-9b-Instruct-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoGemma2-9b-Instruct-2024-10-09) |
|
678 |
+
|*RoGemma2-9b-Instruct-2025-04-23*| [link](https://huggingface.co/OpenLLM-Ro/RoGemma2-9b-Instruct-2024-10-09) |
|
679 |
+
|RoGemma2-9b-Instruct-DPO-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoGemma2-9b-Instruct-DPO-2024-10-09) |
|
680 |
+
|RoGemma2-9b-Instruct-DPO-2025-04-23| [link](https://huggingface.co/OpenLLM-Ro/RoGemma2-9b-Instruct-DPO-2024-10-09) |
|
681 |
+
|
682 |
+
|
683 |
+
|
684 |
+
## Citation
|
685 |
+
|
686 |
+
```
|
687 |
+
@misc{masala2024vorbecstiromanecsterecipetrain,
|
688 |
+
title={"Vorbe\c{s}ti Rom\^ane\c{s}te?" A Recipe to Train Powerful Romanian LLMs with English Instructions},
|
689 |
+
author={Mihai Masala and Denis C. Ilie-Ablachim and Alexandru Dima and Dragos Corlatescu and Miruna Zavelca and Ovio Olaru and Simina Terian-Dan and Andrei Terian-Dan and Marius Leordeanu and Horia Velicu and Marius Popescu and Mihai Dascalu and Traian Rebedea},
|
690 |
+
year={2024},
|
691 |
+
eprint={2406.18266},
|
692 |
+
archivePrefix={arXiv},
|
693 |
+
primaryClass={cs.CL},
|
694 |
+
url={https://arxiv.org/abs/2406.18266},
|
695 |
+
}
|
696 |
+
```
|
697 |
+
<!-- **APA:**
|
698 |
+
|
699 |
[More Information Needed] -->
|