eduagarcia commited on
Commit
95e25c3
ยท
verified ยท
1 Parent(s): 88481fa

Retry 36 FAILED models

Browse files
Files changed (36) hide show
  1. AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_bfloat16_Original.json +2 -4
  2. AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_float16_Original.json +2 -4
  3. BAAI/Gemma2-9B-IT-Simpo-Infinity-Preference_eval_request_False_bfloat16_Original.json +2 -4
  4. EpistemeAI2/Fireball-Alpaca-Llama3.1.06-8B-Philos_eval_request_False_float16_Original.json +2 -4
  5. J-LAB/Nemo_Think2_eval_request_False_float16_Original.json +2 -4
  6. J-LAB/Nemo_Think3_eval_request_False_float16_Original.json +2 -4
  7. J-LAB/Nemo_Think_eval_request_False_float16_Original.json +2 -4
  8. Magpie-Align/MagpieLM-8B-Chat-v0.1_eval_request_False_bfloat16_Original.json +2 -4
  9. Magpie-Align/MagpieLM-8B-SFT-v0.1_eval_request_False_bfloat16_Original.json +2 -4
  10. MaziyarPanahi/calme-2.1-qwen2-7b_eval_request_False_bfloat16_Original.json +2 -4
  11. MaziyarPanahi/calme-2.2-qwen2-7b_eval_request_False_bfloat16_Original.json +2 -4
  12. MaziyarPanahi/calme-2.3-qwen2-7b_eval_request_False_bfloat16_Original.json +2 -4
  13. MaziyarPanahi/calme-2.4-qwen2-7b_eval_request_False_bfloat16_Original.json +2 -4
  14. Qwen/Qwen2.5-14B_eval_request_False_bfloat16_Original.json +2 -4
  15. Qwen/Qwen2.5-7B-Instruct_eval_request_False_bfloat16_Original.json +2 -4
  16. Qwen/Qwen2.5-7B_eval_request_False_bfloat16_Original.json +2 -4
  17. SicariusSicariiStuff/Qwen2.5-14B_Uncencored_eval_request_False_float16_Original.json +2 -4
  18. TheDrummer/Big-Tiger-Gemma-27B-v1_eval_request_False_bfloat16_Original.json +2 -4
  19. TheDrummer/Gemmasutra-9B-v1_eval_request_False_bfloat16_Original.json +2 -4
  20. VAGOsolutions/SauerkrautLM-Phi-3-medium_eval_request_False_bfloat16_Original.json +2 -4
  21. abacusai/Slerp-CM-mist-dpo_eval_request_False_float16_Original.json +2 -4
  22. abacusai/bigstral-12b-32k_eval_request_False_float16_Original.json +2 -4
  23. abacusai/bigyi-15b_eval_request_False_float16_Original.json +2 -4
  24. anthracite-org/magnum-v2.5-12b-kto_eval_request_False_float16_Original.json +2 -4
  25. bunnycore/HyperLlama-3.1-8B_eval_request_False_bfloat16_Original.json +2 -4
  26. byroneverson/gemma-2-27b-it-abliterated_eval_request_False_bfloat16_Original.json +2 -4
  27. gaverfraxz/Meta-Llama-3.1-8B-Instruct-HalfAbliterated-TIES_eval_request_False_float16_Original.json +2 -4
  28. jpacifico/Chocolatine-14B-Instruct-DPO-v1.2_eval_request_False_float16_Original.json +2 -4
  29. migtissera/Tess-3-Mistral-Nemo-12B_eval_request_False_bfloat16_Original.json +2 -4
  30. migtissera/Tess-v2.5-Gemma-2-27B-alpha_eval_request_False_bfloat16_Original.json +2 -4
  31. monsterapi/Llama-3_1-8B-Instruct-orca-ORPO_eval_request_False_bfloat16_Adapter.json +2 -4
  32. nbeerbower/mistral-nemo-wissenschaft-12B_eval_request_False_bfloat16_Original.json +2 -4
  33. princeton-nlp/gemma-2-9b-it-DPO_eval_request_False_bfloat16_Original.json +2 -4
  34. recoilme/recoilme-gemma-2-9B-v0.3_eval_request_False_bfloat16_Original.json +2 -4
  35. recoilme/recoilme-gemma-2-9B-v0.4_eval_request_False_bfloat16_Original.json +2 -4
  36. v000000/Qwen2.5-Lumen-14B_eval_request_False_bfloat16_Original.json +2 -4
AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-08-29T19:24:31Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1061,
16
- "job_start_time": "2024-09-09T02-16-20.601975",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3903, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4351, in _load_pretrained_model\n resolved_archive_file = logging.tqdm(resolved_archive_file, desc=\"Loading checkpoint shards\")\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-08-29T19:24:31Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1061,
16
+ "job_start_time": "2024-09-09T02-16-20.601975"
 
 
17
  }
AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-05T20:33:39Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1058,
16
- "job_start_time": "2024-09-09T02-07-11.545200",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3903, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4351, in _load_pretrained_model\n resolved_archive_file = logging.tqdm(resolved_archive_file, desc=\"Loading checkpoint shards\")\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-05T20:33:39Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1058,
16
+ "job_start_time": "2024-09-09T02-07-11.545200"
 
 
17
  }
BAAI/Gemma2-9B-IT-Simpo-Infinity-Preference_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-02T16:32:57Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1050,
16
- "job_start_time": "2024-09-09T01-43-17.349330",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-02T16:32:57Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1050,
16
+ "job_start_time": "2024-09-09T01-43-17.349330"
 
 
17
  }
EpistemeAI2/Fireball-Alpaca-Llama3.1.06-8B-Philos_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-09T17:52:08Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1063,
16
- "job_start_time": "2024-09-10T01-40-21.109448",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-09T17:52:08Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1063,
16
+ "job_start_time": "2024-09-10T01-40-21.109448"
 
 
17
  }
J-LAB/Nemo_Think2_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T20:13:29Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1120,
16
- "job_start_time": "2024-09-23T09-36-36.092477",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T20:13:29Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1120,
16
+ "job_start_time": "2024-09-23T09-36-36.092477"
 
 
17
  }
J-LAB/Nemo_Think3_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-20T19:24:41Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1123,
16
- "job_start_time": "2024-09-23T09-46-58.753067",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-20T19:24:41Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1123,
16
+ "job_start_time": "2024-09-23T09-46-58.753067"
 
 
17
  }
J-LAB/Nemo_Think_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T13:42:42Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1113,
16
- "job_start_time": "2024-09-23T09-11-05.359972",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T13:42:42Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1113,
16
+ "job_start_time": "2024-09-23T09-11-05.359972"
 
 
17
  }
Magpie-Align/MagpieLM-8B-Chat-v0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T04:37:59Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1112,
16
- "job_start_time": "2024-09-23T09-07-25.125234",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T04:37:59Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1112,
16
+ "job_start_time": "2024-09-23T09-07-25.125234"
 
 
17
  }
Magpie-Align/MagpieLM-8B-SFT-v0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T04:36:41Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1111,
16
- "job_start_time": "2024-09-23T09-03-42.325574",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T04:36:41Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1111,
16
+ "job_start_time": "2024-09-23T09-03-42.325574"
 
 
17
  }
MaziyarPanahi/calme-2.1-qwen2-7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-18T20:25:05Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1103,
16
- "job_start_time": "2024-09-23T05-22-36.556078",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1553, in generate_until\n pbar.update(1)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1243, in update\n self.refresh(lock_args=self.lock_args)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1348, in refresh\n self.display()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1496, in display\n self.sp(self.__str__() if msg is None else msg)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 462, in print_status\n fp_write('\\r' + s + (' ' * max(last_len[0] - len_s, 0)))\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 455, in fp_write\n fp.write(str(s))\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/utils.py\", line 195, in inner\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-18T20:25:05Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1103,
16
+ "job_start_time": "2024-09-23T05-22-36.556078"
 
 
17
  }
MaziyarPanahi/calme-2.2-qwen2-7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-18T20:25:13Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1104,
16
- "job_start_time": "2024-09-23T08-38-27.841505",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-18T20:25:13Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1104,
16
+ "job_start_time": "2024-09-23T08-38-27.841505"
 
 
17
  }
MaziyarPanahi/calme-2.3-qwen2-7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-18T20:25:21Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1105,
16
- "job_start_time": "2024-09-23T08-42-03.286008",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-18T20:25:21Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1105,
16
+ "job_start_time": "2024-09-23T08-42-03.286008"
 
 
17
  }
MaziyarPanahi/calme-2.4-qwen2-7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-18T20:25:28Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1106,
16
- "job_start_time": "2024-09-23T08-45-39.179643",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-18T20:25:28Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1106,
16
+ "job_start_time": "2024-09-23T08-45-39.179643"
 
 
17
  }
Qwen/Qwen2.5-14B_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-18T22:09:13Z",
13
  "model_type": "๐ŸŸข : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 1109,
16
- "job_start_time": "2024-09-23T08-56-25.014044",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-18T22:09:13Z",
13
  "model_type": "๐ŸŸข : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 1109,
16
+ "job_start_time": "2024-09-23T08-56-25.014044"
 
 
17
  }
Qwen/Qwen2.5-7B-Instruct_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-18T22:08:38Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1108,
16
- "job_start_time": "2024-09-23T08-52-49.584127",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-18T22:08:38Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1108,
16
+ "job_start_time": "2024-09-23T08-52-49.584127"
 
 
17
  }
Qwen/Qwen2.5-7B_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-18T22:09:22Z",
13
  "model_type": "๐ŸŸข : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 1110,
16
- "job_start_time": "2024-09-23T09-00-01.154915",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-18T22:09:22Z",
13
  "model_type": "๐ŸŸข : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 1110,
16
+ "job_start_time": "2024-09-23T09-00-01.154915"
 
 
17
  }
SicariusSicariiStuff/Qwen2.5-14B_Uncencored_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-20T17:08:18Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1121,
16
- "job_start_time": "2024-09-23T09-40-08.401951",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-20T17:08:18Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1121,
16
+ "job_start_time": "2024-09-23T09-40-08.401951"
 
 
17
  }
TheDrummer/Big-Tiger-Gemma-27B-v1_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-05T20:25:36Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1056,
16
- "job_start_time": "2024-09-09T02-01-14.321515",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-05T20:25:36Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1056,
16
+ "job_start_time": "2024-09-09T02-01-14.321515"
 
 
17
  }
TheDrummer/Gemmasutra-9B-v1_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T15:28:20Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1115,
16
- "job_start_time": "2024-09-23T09-18-21.723039",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T15:28:20Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1115,
16
+ "job_start_time": "2024-09-23T09-18-21.723039"
 
 
17
  }
VAGOsolutions/SauerkrautLM-Phi-3-medium_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T16:16:35Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1119,
16
- "job_start_time": "2024-09-23T09-32-59.618920",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T16:16:35Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1119,
16
+ "job_start_time": "2024-09-23T09-32-59.618920"
 
 
17
  }
abacusai/Slerp-CM-mist-dpo_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-05T03:45:24Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1053,
16
- "job_start_time": "2024-09-09T01-52-14.100568",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-05T03:45:24Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1053,
16
+ "job_start_time": "2024-09-09T01-52-14.100568"
 
 
17
  }
abacusai/bigstral-12b-32k_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-04T16:49:34Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1051,
16
- "job_start_time": "2024-09-09T01-46-16.045949",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-04T16:49:34Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1051,
16
+ "job_start_time": "2024-09-09T01-46-16.045949"
 
 
17
  }
abacusai/bigyi-15b_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-04T16:50:31Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1052,
16
- "job_start_time": "2024-09-09T01-49-14.056504",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-04T16:50:31Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1052,
16
+ "job_start_time": "2024-09-09T01-49-14.056504"
 
 
17
  }
anthracite-org/magnum-v2.5-12b-kto_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-08-22T18:39:50Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1060,
16
- "job_start_time": "2024-09-09T02-13-17.352675",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-08-22T18:39:50Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1060,
16
+ "job_start_time": "2024-09-09T02-13-17.352675"
 
 
17
  }
bunnycore/HyperLlama-3.1-8B_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-05T20:28:57Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1057,
16
- "job_start_time": "2024-09-09T02-04-13.523740",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-05T20:28:57Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1057,
16
+ "job_start_time": "2024-09-09T02-04-13.523740"
 
 
17
  }
byroneverson/gemma-2-27b-it-abliterated_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-05T20:20:45Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1054,
16
- "job_start_time": "2024-09-09T01-55-12.781709",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-05T20:20:45Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1054,
16
+ "job_start_time": "2024-09-09T01-55-12.781709"
 
 
17
  }
gaverfraxz/Meta-Llama-3.1-8B-Instruct-HalfAbliterated-TIES_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T16:00:14Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1118,
16
- "job_start_time": "2024-09-23T09-29-21.605427",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T16:00:14Z",
13
  "model_type": "๐Ÿค : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 1118,
16
+ "job_start_time": "2024-09-23T09-29-21.605427"
 
 
17
  }
jpacifico/Chocolatine-14B-Instruct-DPO-v1.2_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Phi3ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-07T01:34:44Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1059,
16
- "job_start_time": "2024-09-09T02-10-17.527294",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 559, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Phi3ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-07T01:34:44Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1059,
16
+ "job_start_time": "2024-09-09T02-10-17.527294"
 
 
17
  }
migtissera/Tess-3-Mistral-Nemo-12B_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-08-30T03:06:38Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1049,
16
- "job_start_time": "2024-09-09T01-40-19.519026",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-08-30T03:06:38Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1049,
16
+ "job_start_time": "2024-09-09T01-40-19.519026"
 
 
17
  }
migtissera/Tess-v2.5-Gemma-2-27B-alpha_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-05T20:23:26Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1055,
16
- "job_start_time": "2024-09-09T01-58-12.573627",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-05T20:23:26Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1055,
16
+ "job_start_time": "2024-09-09T01-58-12.573627"
 
 
17
  }
monsterapi/Llama-3_1-8B-Instruct-orca-ORPO_eval_request_False_bfloat16_Adapter.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-08-30T04:21:37Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1062,
16
- "job_start_time": "2024-09-09T02-19-18.425282",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3903, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4351, in _load_pretrained_model\n resolved_archive_file = logging.tqdm(resolved_archive_file, desc=\"Loading checkpoint shards\")\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-08-30T04:21:37Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1062,
16
+ "job_start_time": "2024-09-09T02-19-18.425282"
 
 
17
  }
nbeerbower/mistral-nemo-wissenschaft-12B_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-08-30T03:00:12Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1048,
16
- "job_start_time": "2024-09-09T01-37-18.215572",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-08-30T03:00:12Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1048,
16
+ "job_start_time": "2024-09-09T01-37-18.215572"
 
 
17
  }
princeton-nlp/gemma-2-9b-it-DPO_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T15:24:11Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1114,
16
- "job_start_time": "2024-09-23T09-14-45.604278",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T15:24:11Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1114,
16
+ "job_start_time": "2024-09-23T09-14-45.604278"
 
 
17
  }
recoilme/recoilme-gemma-2-9B-v0.3_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T15:29:39Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1116,
16
- "job_start_time": "2024-09-23T09-22-04.155178",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T15:29:39Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1116,
16
+ "job_start_time": "2024-09-23T09-22-04.155178"
 
 
17
  }
recoilme/recoilme-gemma-2-9B-v0.4_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-19T15:29:45Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1117,
16
- "job_start_time": "2024-09-23T09-25-45.091565",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Gemma2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-19T15:29:45Z",
13
  "model_type": "๐Ÿ”ถ : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1117,
16
+ "job_start_time": "2024-09-23T09-25-45.091565"
 
 
17
  }
v000000/Qwen2.5-Lumen-14B_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-20T18:11:48Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1122,
16
- "job_start_time": "2024-09-23T09-43-32.674090",
17
- "error_msg": "[Errno 28] No space left on device",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3658, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1076, in get_checkpoint_shard_files\n for shard_filename in tqdm(shard_filenames, desc=\"Downloading shards\", disable=not show_progress_bar):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/logging.py\", line 361, in __call__\n return tqdm_lib.tqdm(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/asyncio.py\", line 24, in __init__\n super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1097, in __init__\n self.sp = self.status_printer(self.fp)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 451, in status_printer\n getattr(sys.stderr, 'flush', lambda: None)()\nOSError: [Errno 28] No space left on device\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-20T18:11:48Z",
13
  "model_type": "๐Ÿ’ฌ : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1122,
16
+ "job_start_time": "2024-09-23T09-43-32.674090"
 
 
17
  }