eduagarcia commited on
Commit
60f0efb
Β·
verified Β·
1 Parent(s): 486dfcf

Retry 53 FAILED models

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. 22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json +2 -4
  2. BAAI/Aquila-7B_eval_request_False_float16_Original.json +2 -4
  3. BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json +2 -4
  4. BAAI/Aquila2-7B_eval_request_False_float16_Original.json +2 -4
  5. Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json +2 -4
  6. EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json +2 -4
  7. EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json +2 -4
  8. EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json +2 -4
  9. EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json +1 -1
  10. EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json +2 -4
  11. EleutherAI/pythia-14m_eval_request_False_float16_Original.json +2 -4
  12. EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json +2 -4
  13. EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json +2 -4
  14. EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json +2 -4
  15. EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json +2 -4
  16. EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json +2 -4
  17. EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json +2 -4
  18. NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json +2 -4
  19. OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json +2 -4
  20. Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json +2 -4
  21. Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json +2 -4
  22. Qwen/Qwen-7B_eval_request_False_bfloat16_Original.json +2 -4
  23. Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json +2 -4
  24. THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json +2 -4
  25. THUDM/glm-10b_eval_request_False_float16_Original.json +2 -4
  26. THUDM/glm-2b_eval_request_False_float16_Original.json +2 -4
  27. baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json +2 -4
  28. baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json +2 -4
  29. baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json +2 -4
  30. deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json +2 -4
  31. deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json +2 -4
  32. deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json +2 -4
  33. facebook/opt-66b_eval_request_False_float16_Original.json +2 -4
  34. google/umt5-base_eval_request_False_bfloat16_Original.json +1 -1
  35. google/umt5-small_eval_request_False_bfloat16_Original.json +1 -1
  36. gpt2_eval_request_False_float16_Original.json +2 -4
  37. huggyllama/llama-30b_eval_request_False_float16_Original.json +2 -4
  38. huggyllama/llama-65b_eval_request_False_float16_Original.json +2 -4
  39. matsuo-lab/weblab-10b_eval_request_False_float16_Original.json +1 -1
  40. meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json +2 -4
  41. mistralai/Mixtral-8x7B-v0.1_eval_request_False_bfloat16_Original.json +2 -4
  42. nicolasdec/cabra13b_eval_request_False_float16_Original.json +1 -1
  43. recogna-nlp/bode-13b-alpaca-pt-br_eval_request_False_float16_Adapter.json +1 -1
  44. t5-base_eval_request_False_bfloat16_Original.json +2 -4
  45. t5-large_eval_request_False_bfloat16_Original.json +2 -4
  46. t5-small_eval_request_False_bfloat16_Original.json +2 -4
  47. tiiuae/falcon-40b_eval_request_False_bfloat16_Original.json +2 -4
  48. tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json +2 -4
  49. xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json +2 -4
  50. xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json +2 -4
22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:03:11Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
14
  "job_id": 14,
15
- "job_start_time": "2024-02-06T16-32-12.465705",
16
- "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n del MODELS_DOWNLOADED[f\"{request['model']}_{request['revision']}\"]\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 54, in run_eval_on_model\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 633, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n config = PEFT_TYPE_TO_CONFIG_MAPPING[\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n return cls.from_peft_type(**kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\n return config_cls(**kwargs)\n ^^^^^^^^^^^^^^^^^^^^\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n"
18
  }
 
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:03:11Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
14
  "job_id": 14,
15
+ "job_start_time": "2024-02-06T16-32-12.465705"
 
 
16
  }
BAAI/Aquila-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:00Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 55,
15
- "job_start_time": "2024-02-07T11-57-53.658761",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:00Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 55,
15
+ "job_start_time": "2024-02-07T11-57-53.658761"
 
 
16
  }
BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 34.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 64,
15
- "job_start_time": "2024-02-07T14-43-05.091904",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 34.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 64,
15
+ "job_start_time": "2024-02-07T14-43-05.091904"
 
 
16
  }
BAAI/Aquila2-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:07Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 56,
15
- "job_start_time": "2024-02-07T11-57-55.232433",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:07Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 56,
15
+ "job_start_time": "2024-02-07T11-57-55.232433"
 
 
16
  }
Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 5.717,
8
  "architectures": "DeciLMForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:06:24Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
14
  "job_id": 38,
15
- "job_start_time": "2024-02-07T06-27-10.296449",
16
- "error_msg": "'DeciLMModel' object has no attribute '_use_flash_attention_2'",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 804, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1046, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1181, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1027, in forward\n if self._use_flash_attention_2:\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1695, in __getattr__\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\nAttributeError: 'DeciLMModel' object has no attribute '_use_flash_attention_2'\n"
18
  }
 
7
  "params": 5.717,
8
  "architectures": "DeciLMForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:06:24Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
14
  "job_id": 38,
15
+ "job_start_time": "2024-02-07T06-27-10.296449"
 
 
16
  }
EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 1.366,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:12:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 1.366,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:12:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.15,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:59Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.15,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:59Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 2.718,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:12:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 2.718,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:12:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 12.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:53Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 77,
15
- "job_start_time": "2024-02-07T15-18-15.926306",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 12.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:53Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 77,
15
+ "job_start_time": "2024-02-07T15-18-15.926306"
 
 
16
  }
EleutherAI/pythia-14m_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.039,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:12Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.039,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:12Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.213,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.213,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 1.079,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 1.079,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 2.909,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:43Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 2.909,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:43Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.506,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.506,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 6.9,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:48Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 76,
15
- "job_start_time": "2024-02-07T15-03-11.086465",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 6.9,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:48Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 76,
15
+ "job_start_time": "2024-02-07T15-03-11.086465"
 
 
16
  }
EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.096,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.096,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 21.828,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:04Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 69,
15
- "job_start_time": "2024-02-07T14-44-07.880943",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 21.828,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:04Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 69,
15
+ "job_start_time": "2024-02-07T14-44-07.880943"
 
 
16
  }
OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 14.0,
8
  "architectures": "OrionForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 52,
15
- "job_start_time": "2024-02-07T11-39-50.350493",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 14.0,
8
  "architectures": "OrionForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 52,
15
+ "job_start_time": "2024-02-07T11-39-50.350493"
 
 
16
  }
Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 14.167,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 44,
15
- "job_start_time": "2024-02-07T11-35-01.755186",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 804, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1046, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 1043, in forward\n transformer_outputs = self.transformer(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 891, in forward\n outputs = block(\n ^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 610, in forward\n attn_outputs = self.attn(\n ^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 433, in forward\n key = apply_rotary_pos_emb(key, k_pos_emb)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 1342, in apply_rotary_pos_emb\n return apply_rotary_emb_func(t_float, cos, sin).type_as(t)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 14.167,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 44,
15
+ "job_start_time": "2024-02-07T11-35-01.755186"
 
 
16
  }
Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 72.288,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:47Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 60,
15
- "job_start_time": "2024-02-07T11-59-01.419020",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 72.288,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:47Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 60,
15
+ "job_start_time": "2024-02-07T11-59-01.419020"
 
 
16
  }
Qwen/Qwen-7B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.721,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:22Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 43,
15
- "job_start_time": "2024-02-07T11-28-04.623288",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1497, in generate_until\n context_enc = context_enc.to(self.device)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.721,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:22Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 43,
15
+ "job_start_time": "2024-02-07T11-28-04.623288"
 
 
16
  }
Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "SkyworkForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 48,
15
- "job_start_time": "2024-02-07T11-38-46.017323",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "SkyworkForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 48,
15
+ "job_start_time": "2024-02-07T11-38-46.017323"
 
 
16
  }
THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 6.0,
8
  "architectures": "ChatGLMModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:16Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 57,
15
- "job_start_time": "2024-02-07T11-57-55.923805",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 6.0,
8
  "architectures": "ChatGLMModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:16Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 57,
15
+ "job_start_time": "2024-02-07T11-57-55.923805"
 
 
16
  }
THUDM/glm-10b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 10.0,
8
  "architectures": "GLMModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-10b.696788d4f82ac96b90823555f547d1e754839ff4.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, FalconConfig, StableLMEpochConfig, StableLMEpochConfig, InternLMConfig, InternLMConfig, DeciLMConfig, DeciLMConfig, XverseConfig, XverseConfig, QWenConfig, XverseConfig, QWenConfig, QWenConfig, DeepseekConfig.",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-10b.696788d4f82ac96b90823555f547d1e754839ff4.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, FalconConfig, StableLMEpochConfig, StableLMEpochConfig, InternLMConfig, InternLMConfig, DeciLMConfig, DeciLMConfig, XverseConfig, XverseConfig, QWenConfig, XverseConfig, QWenConfig, QWenConfig, DeepseekConfig.\n"
18
  }
 
7
  "params": 10.0,
8
  "architectures": "GLMModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
THUDM/glm-2b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 2.0,
8
  "architectures": "GLMModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:22Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-2b.774fda883d7ad028b8effc3c65afec510fce9634.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, FalconConfig, StableLMEpochConfig, StableLMEpochConfig, InternLMConfig, InternLMConfig, DeciLMConfig, DeciLMConfig, XverseConfig, XverseConfig, QWenConfig, XverseConfig, QWenConfig, QWenConfig, DeepseekConfig.",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-2b.774fda883d7ad028b8effc3c65afec510fce9634.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, FalconConfig, StableLMEpochConfig, StableLMEpochConfig, InternLMConfig, InternLMConfig, DeciLMConfig, DeciLMConfig, XverseConfig, XverseConfig, QWenConfig, XverseConfig, QWenConfig, QWenConfig, DeepseekConfig.\n"
18
  }
 
7
  "params": 2.0,
8
  "architectures": "GLMModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:22Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "BaiChuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:13Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 49,
15
- "job_start_time": "2024-02-07T11-38-47.434170",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "BaiChuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:13Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 49,
15
+ "job_start_time": "2024-02-07T11-38-47.434170"
 
 
16
  }
baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:33Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 51,
15
- "job_start_time": "2024-02-07T11-39-52.799043",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:33Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 51,
15
+ "job_start_time": "2024-02-07T11-39-52.799043"
 
 
16
  }
baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 50,
15
- "job_start_time": "2024-02-07T11-39-48.928240",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 50,
15
+ "job_start_time": "2024-02-07T11-39-48.928240"
 
 
16
  }
deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 67.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 63,
15
- "job_start_time": "2024-02-07T14-43-01.265695",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 67.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 63,
15
+ "job_start_time": "2024-02-07T14-43-01.265695"
 
 
16
  }
deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:46Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 53,
15
- "job_start_time": "2024-02-07T11-45-51.756387",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:46Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 53,
15
+ "job_start_time": "2024-02-07T11-45-51.756387"
 
 
16
  }
deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 16.376,
8
  "architectures": "DeepseekForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:52Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 54,
15
- "job_start_time": "2024-02-07T11-57-54.215866",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 16.376,
8
  "architectures": "DeepseekForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:52Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 54,
15
+ "job_start_time": "2024-02-07T11-57-54.215866"
 
 
16
  }
facebook/opt-66b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 66.0,
8
  "architectures": "OPTForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:13:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 86,
15
- "job_start_time": "2024-02-07T21-05-08.970477",
16
- "error_msg": "CUDA out of memory. Tried to allocate 162.00 MiB. GPU 1 has a total capacty of 79.35 GiB of which 78.19 MiB is free. Process 1605417 has 79.27 GiB memory in use. Of the allocated memory 78.75 GiB is allocated by PyTorch, and 14.44 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 566, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3706, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4116, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 778, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU 1 has a total capacty of 79.35 GiB of which 78.19 MiB is free. Process 1605417 has 79.27 GiB memory in use. Of the allocated memory 78.75 GiB is allocated by PyTorch, and 14.44 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 66.0,
8
  "architectures": "OPTForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:13:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 86,
15
+ "job_start_time": "2024-02-07T21-05-08.970477"
 
 
16
  }
google/umt5-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:18:55Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:18:55Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
google/umt5-small_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:18:45Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:18:45Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
gpt2_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.137,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 65,
15
- "job_start_time": "2024-02-07T14-43-02.993316",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.137,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 65,
15
+ "job_start_time": "2024-02-07T14-43-02.993316"
 
 
16
  }
huggyllama/llama-30b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 32.529,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:05:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 32,
15
- "job_start_time": "2024-02-07T02-57-00.029985",
16
- "error_msg": "CUDA out of memory. Tried to allocate 72.00 MiB. GPU 2 has a total capacty of 79.35 GiB of which 34.19 MiB is free. Process 146155 has 79.31 GiB memory in use. Of the allocated memory 74.94 GiB is allocated by PyTorch, and 3.87 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1718, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2579, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1181, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1068, in forward\n layer_outputs = decoder_layer(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 796, in forward\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n ^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 708, in forward\n key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/cache_utils.py\", line 128, in update\n self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 MiB. GPU 2 has a total capacty of 79.35 GiB of which 34.19 MiB is free. Process 146155 has 79.31 GiB memory in use. Of the allocated memory 74.94 GiB is allocated by PyTorch, and 3.87 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 32.529,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:05:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 32,
15
+ "job_start_time": "2024-02-07T02-57-00.029985"
 
 
16
  }
huggyllama/llama-65b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 65.286,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:05:56Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 35,
15
- "job_start_time": "2024-02-07T05-18-08.631185",
16
- "error_msg": "CUDA out of memory. Tried to allocate 128.00 MiB. GPU 2 has a total capacty of 79.35 GiB of which 96.19 MiB is free. Process 146155 has 79.25 GiB memory in use. Of the allocated memory 78.65 GiB is allocated by PyTorch, and 92.25 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 566, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3706, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4116, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 778, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 2 has a total capacty of 79.35 GiB of which 96.19 MiB is free. Process 146155 has 79.25 GiB memory in use. Of the allocated memory 78.65 GiB is allocated by PyTorch, and 92.25 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 65.286,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:05:56Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 35,
15
+ "job_start_time": "2024-02-07T05-18-08.631185"
 
 
16
  }
matsuo-lab/weblab-10b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 10.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:15:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 10.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:15:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 68.977,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:05:49Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 34,
15
- "job_start_time": "2024-02-07T05-16-55.598041",
16
- "error_msg": "CUDA out of memory. Tried to allocate 448.00 MiB. GPU 2 has a total capacty of 79.35 GiB of which 248.19 MiB is free. Process 146155 has 79.10 GiB memory in use. Of the allocated memory 78.59 GiB is allocated by PyTorch, and 4.33 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 566, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3706, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4116, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 778, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 448.00 MiB. GPU 2 has a total capacty of 79.35 GiB of which 248.19 MiB is free. Process 146155 has 79.10 GiB memory in use. Of the allocated memory 78.59 GiB is allocated by PyTorch, and 4.33 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 68.977,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:05:49Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 34,
15
+ "job_start_time": "2024-02-07T05-16-55.598041"
 
 
16
  }
mistralai/Mixtral-8x7B-v0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 46.703,
8
  "architectures": "MixtralForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:05:20Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 31,
15
- "job_start_time": "2024-02-07T02-56-00.588725",
16
- "error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 2 has a total capacty of 79.35 GiB of which 90.19 MiB is free. Process 146155 has 79.26 GiB memory in use. Of the allocated memory 78.64 GiB is allocated by PyTorch, and 107.61 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 566, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3706, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4116, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 778, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 2 has a total capacty of 79.35 GiB of which 90.19 MiB is free. Process 146155 has 79.26 GiB memory in use. Of the allocated memory 78.64 GiB is allocated by PyTorch, and 107.61 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 46.703,
8
  "architectures": "MixtralForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:05:20Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 31,
15
+ "job_start_time": "2024-02-07T02-56-00.588725"
 
 
16
  }
nicolasdec/cabra13b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 13.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:03:22Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "script",
 
7
  "params": 13.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:03:22Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "script",
recogna-nlp/bode-13b-alpaca-pt-br_eval_request_False_float16_Adapter.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 13.0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:03:03Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "script",
 
7
  "params": 13.0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:03:03Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "script",
t5-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.223,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:42Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 67,
15
- "job_start_time": "2024-02-07T14-43-06.548205",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.223,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:42Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 67,
15
+ "job_start_time": "2024-02-07T14-43-06.548205"
 
 
16
  }
t5-large_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.738,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:49Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 68,
15
- "job_start_time": "2024-02-07T14-44-06.548553",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.738,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:49Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 68,
15
+ "job_start_time": "2024-02-07T14-44-06.548553"
 
 
16
  }
t5-small_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.061,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 66,
15
- "job_start_time": "2024-02-07T14-43-04.579928",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.061,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 66,
15
+ "job_start_time": "2024-02-07T14-43-04.579928"
 
 
16
  }
tiiuae/falcon-40b_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 40.0,
8
  "architectures": "FalconForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:13:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 87,
15
- "job_start_time": "2024-02-07T21-06-25.854633",
16
- "error_msg": "CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacty of 79.35 GiB of which 16.19 MiB is free. Process 1605417 has 79.33 GiB memory in use. Of the allocated memory 78.37 GiB is allocated by PyTorch, and 457.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1718, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2579, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py\", line 900, in forward\n transformer_outputs = self.transformer(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py\", line 797, in forward\n outputs = block(\n ^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py\", line 453, in forward\n attn_outputs = self.self_attention(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py\", line 315, in forward\n value_layer = torch.cat((past_value, value_layer), dim=1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacty of 79.35 GiB of which 16.19 MiB is free. Process 1605417 has 79.33 GiB memory in use. Of the allocated memory 78.37 GiB is allocated by PyTorch, and 457.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 40.0,
8
  "architectures": "FalconForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:13:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 87,
15
+ "job_start_time": "2024-02-07T21-06-25.854633"
 
 
16
  }
tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "FalconForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:04:18Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 24,
15
- "job_start_time": "2024-02-07T00-17-18.893818",
16
- "error_msg": "value cannot be converted to type at::Half without overflow",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1718, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2579, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-7b/898df1396f35e447d5fe44e0a3ccaaaa69f30d36/modeling_falcon.py\", line 900, in forward\n transformer_outputs = self.transformer(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-7b/898df1396f35e447d5fe44e0a3ccaaaa69f30d36/modeling_falcon.py\", line 797, in forward\n outputs = block(\n ^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-7b/898df1396f35e447d5fe44e0a3ccaaaa69f30d36/modeling_falcon.py\", line 453, in forward\n attn_outputs = self.self_attention(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-7b/898df1396f35e447d5fe44e0a3ccaaaa69f30d36/modeling_falcon.py\", line 323, in forward\n attention_mask_float = (attention_mask * 1.0).masked_fill(attention_mask, float(\"-1e9\")).to(query_layer.dtype)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: value cannot be converted to type at::Half without overflow\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "FalconForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:04:18Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 24,
15
+ "job_start_time": "2024-02-07T00-17-18.893818"
 
 
16
  }
xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:58Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 47,
15
- "job_start_time": "2024-02-07T11-38-46.017067",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:58Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 47,
15
+ "job_start_time": "2024-02-07T11-38-46.017067"
 
 
16
  }
xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:51Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 46,
15
- "job_start_time": "2024-02-07T11-38-44.448848",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:51Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 46,
15
+ "job_start_time": "2024-02-07T11-38-44.448848"
 
 
16
  }