Retry 18 FAILED models
Browse files- 01-ai/Yi-34B_eval_request_False_bfloat16_Original.json +2 -4
- 22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json +2 -4
- Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json +2 -4
- Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json +2 -4
- Qwen/Qwen1.5-72B-Chat_eval_request_False_bfloat16_Original.json +2 -4
- Qwen/Qwen1.5-72B_eval_request_False_bfloat16_Original.json +2 -4
- THUDM/glm-10b_eval_request_False_float16_Original.json +2 -4
- THUDM/glm-2b_eval_request_False_float16_Original.json +2 -4
- bigscience/bloom-560m_eval_request_False_float16_Original.json +1 -1
- fernandosola/bluearara-7B_eval_request_False_bfloat16_Original.json +2 -4
- huggyllama/llama-30b_eval_request_False_float16_Original.json +2 -4
- huggyllama/llama-65b_eval_request_False_float16_Original.json +2 -4
- meta-llama/Llama-2-70b-chat-hf_eval_request_False_float16_Original.json +2 -4
- meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json +2 -4
- mistralai/Mistral-7B-v0.1_eval_request_False_bfloat16_Original.json +2 -4
- tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json +2 -4
- xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json +2 -4
- xverse/XVERSE-65B-2_eval_request_False_bfloat16_Original.json +1 -1
01-ai/Yi-34B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 34.389,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:05:39Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
@@ -24,7 +24,5 @@
|
|
24 |
"sparrow_pt": 0.3916234220734354
|
25 |
},
|
26 |
"result_metrics_average": 0.6416817888200871,
|
27 |
-
"result_metrics_npm": 0.4958265468665359
|
28 |
-
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
29 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
30 |
}
|
|
|
7 |
"params": 34.389,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:05:39Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
|
|
24 |
"sparrow_pt": 0.3916234220734354
|
25 |
},
|
26 |
"result_metrics_average": 0.6416817888200871,
|
27 |
+
"result_metrics_npm": 0.4958265468665359
|
|
|
|
|
28 |
}
|
22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 0,
|
8 |
"architectures": "?",
|
9 |
"weight_type": "Adapter",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:03:11Z",
|
12 |
"model_type": "πΆ : fine-tuned",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
-
"job_start_time": "2024-02-10T03-25-43.141586"
|
16 |
-
"error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 633, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n config = PEFT_TYPE_TO_CONFIG_MAPPING[\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n return cls.from_peft_type(**kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\n return config_cls(**kwargs)\n ^^^^^^^^^^^^^^^^^^^^\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n"
|
18 |
}
|
|
|
7 |
"params": 0,
|
8 |
"architectures": "?",
|
9 |
"weight_type": "Adapter",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:03:11Z",
|
12 |
"model_type": "πΆ : fine-tuned",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
+
"job_start_time": "2024-02-10T03-25-43.141586"
|
|
|
|
|
16 |
}
|
Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 5.717,
|
8 |
"architectures": "DeciLMForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:06:24Z",
|
12 |
"model_type": "πΆ : fine-tuned",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
-
"job_start_time": "2024-02-10T17-20-44.462615"
|
16 |
-
"error_msg": "'DeciLMModel' object has no attribute '_use_flash_attention_2'",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 804, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1046, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1181, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1027, in forward\n if self._use_flash_attention_2:\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1695, in __getattr__\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\nAttributeError: 'DeciLMModel' object has no attribute '_use_flash_attention_2'\n"
|
18 |
}
|
|
|
7 |
"params": 5.717,
|
8 |
"architectures": "DeciLMForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:06:24Z",
|
12 |
"model_type": "πΆ : fine-tuned",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
+
"job_start_time": "2024-02-10T17-20-44.462615"
|
|
|
|
|
16 |
}
|
Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 14.167,
|
8 |
"architectures": "QWenLMHeadModel",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:07:31Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
-
"job_start_time": "2024-02-10T17-24-57.661379"
|
16 |
-
"error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 804, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1046, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 1043, in forward\n transformer_outputs = self.transformer(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 891, in forward\n outputs = block(\n ^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 610, in forward\n attn_outputs = self.attn(\n ^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 499, in forward\n attn_output = self.core_attention_flash(q, k, v, attention_mask=attention_mask)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 227, in forward\n output = flash_attn_unpadded_func(\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 1059, in flash_attn_varlen_func\n return FlashAttnVarlenFunc.apply(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/autograd/function.py\", line 539, in apply\n return super().apply(*args, **kwargs) # type: ignore[misc]\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 576, in forward\n out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = _flash_attn_varlen_forward(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/flash_attn_interface.py\", line 85, in _flash_attn_varlen_forward\n out, q, k, v, out_padded, softmax_lse, S_dmask, rng_state = flash_attn_cuda.varlen_fwd(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
|
18 |
}
|
|
|
7 |
"params": 14.167,
|
8 |
"architectures": "QWenLMHeadModel",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:07:31Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
+
"job_start_time": "2024-02-10T17-24-57.661379"
|
|
|
|
|
16 |
}
|
Qwen/Qwen1.5-72B-Chat_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 72.0,
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-18T11:55:28Z",
|
12 |
"model_type": "π¬ : chat models (RLHF, DPO, IFT, ...)",
|
13 |
"source": "leaderboard",
|
14 |
"job_id": 253,
|
15 |
-
"job_start_time": "2024-02-18T15-43-20.059302"
|
16 |
-
"error_msg": "No executable batch and length size found, max_length reached less than 512 at batch_size 1.",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 137, in decorator\n raise RuntimeError(f\"No executable batch and length size found, max_length reached less than {minumum_max_length} at batch_size {batch_size}.\")\nRuntimeError: No executable batch and length size found, max_length reached less than 512 at batch_size 1.\n"
|
18 |
}
|
|
|
7 |
"params": 72.0,
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-18T11:55:28Z",
|
12 |
"model_type": "π¬ : chat models (RLHF, DPO, IFT, ...)",
|
13 |
"source": "leaderboard",
|
14 |
"job_id": 253,
|
15 |
+
"job_start_time": "2024-02-18T15-43-20.059302"
|
|
|
|
|
16 |
}
|
Qwen/Qwen1.5-72B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 72.288,
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-17T03:45:34Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "leaderboard",
|
14 |
"job_id": 253,
|
15 |
-
"job_start_time": "2024-02-18T13-56-06.348098"
|
16 |
-
"error_msg": "No executable batch and length size found, max_length reached less than 512 at batch_size 1.",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 137, in decorator\n raise RuntimeError(f\"No executable batch and length size found, max_length reached less than {minumum_max_length} at batch_size {batch_size}.\")\nRuntimeError: No executable batch and length size found, max_length reached less than 512 at batch_size 1.\n"
|
18 |
}
|
|
|
7 |
"params": 72.288,
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-17T03:45:34Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "leaderboard",
|
14 |
"job_id": 253,
|
15 |
+
"job_start_time": "2024-02-18T13-56-06.348098"
|
|
|
|
|
16 |
}
|
THUDM/glm-10b_eval_request_False_float16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 10.0,
|
8 |
"architectures": "GLMModel",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:09:31Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": -1,
|
15 |
-
"job_start_time": null
|
16 |
-
"error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-10b.696788d4f82ac96b90823555f547d1e754839ff4.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, BaichuanConfig, BaichuanConfig, OrionConfig, DeepseekConfig.",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 184, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-10b.696788d4f82ac96b90823555f547d1e754839ff4.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, BaichuanConfig, BaichuanConfig, OrionConfig, DeepseekConfig.\n"
|
18 |
}
|
|
|
7 |
"params": 10.0,
|
8 |
"architectures": "GLMModel",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:09:31Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": -1,
|
15 |
+
"job_start_time": null
|
|
|
|
|
16 |
}
|
THUDM/glm-2b_eval_request_False_float16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 2.0,
|
8 |
"architectures": "GLMModel",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:09:22Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": -1,
|
15 |
-
"job_start_time": null
|
16 |
-
"error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-2b.774fda883d7ad028b8effc3c65afec510fce9634.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, BaichuanConfig, BaichuanConfig, OrionConfig.",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 184, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-2b.774fda883d7ad028b8effc3c65afec510fce9634.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, BaichuanConfig, BaichuanConfig, OrionConfig.\n"
|
18 |
}
|
|
|
7 |
"params": 2.0,
|
8 |
"architectures": "GLMModel",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:09:22Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": -1,
|
15 |
+
"job_start_time": null
|
|
|
|
|
16 |
}
|
bigscience/bloom-560m_eval_request_False_float16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 0.559,
|
8 |
"architectures": "BloomForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:04:24Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
|
|
7 |
"params": 0.559,
|
8 |
"architectures": "BloomForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:04:24Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
fernandosola/bluearara-7B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 7.0,
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-17T01:32:05Z",
|
12 |
"model_type": "π : language adapted models (FP, FT, ...)",
|
13 |
"source": "leaderboard",
|
14 |
"job_id": -1,
|
15 |
-
"job_start_time": null
|
16 |
-
"error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Can't load the configuration of 'fernandosola/bluearara-7B'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'fernandosola/bluearara-7B' is the correct path to a directory containing a config.json file",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 215, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Can't load the configuration of 'fernandosola/bluearara-7B'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'fernandosola/bluearara-7B' is the correct path to a directory containing a config.json file\n"
|
18 |
}
|
|
|
7 |
"params": 7.0,
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-17T01:32:05Z",
|
12 |
"model_type": "π : language adapted models (FP, FT, ...)",
|
13 |
"source": "leaderboard",
|
14 |
"job_id": -1,
|
15 |
+
"job_start_time": null
|
|
|
|
|
16 |
}
|
huggyllama/llama-30b_eval_request_False_float16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 32.529,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:05:30Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
@@ -24,7 +24,5 @@
|
|
24 |
"sparrow_pt": 0.32914824721309877
|
25 |
},
|
26 |
"result_metrics_average": 0.514878494009653,
|
27 |
-
"result_metrics_npm": 0.3114125756635208
|
28 |
-
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
29 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
30 |
}
|
|
|
7 |
"params": 32.529,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:05:30Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
|
|
24 |
"sparrow_pt": 0.32914824721309877
|
25 |
},
|
26 |
"result_metrics_average": 0.514878494009653,
|
27 |
+
"result_metrics_npm": 0.3114125756635208
|
|
|
|
|
28 |
}
|
huggyllama/llama-65b_eval_request_False_float16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 65.286,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:05:56Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
@@ -24,7 +24,5 @@
|
|
24 |
"sparrow_pt": 0.3347628863885618
|
25 |
},
|
26 |
"result_metrics_average": 0.49213626018208545,
|
27 |
-
"result_metrics_npm": 0.29059391754452213
|
28 |
-
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
29 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
30 |
}
|
|
|
7 |
"params": 65.286,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:05:56Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
|
|
24 |
"sparrow_pt": 0.3347628863885618
|
25 |
},
|
26 |
"result_metrics_average": 0.49213626018208545,
|
27 |
+
"result_metrics_npm": 0.29059391754452213
|
|
|
|
|
28 |
}
|
meta-llama/Llama-2-70b-chat-hf_eval_request_False_float16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 68.977,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-18T11:56:04Z",
|
12 |
"model_type": "π¬ : chat models (RLHF, DPO, IFT, ...)",
|
13 |
"source": "leaderboard",
|
14 |
"job_id": 253,
|
15 |
-
"job_start_time": "2024-02-18T17-35-14.472711"
|
16 |
-
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
18 |
}
|
|
|
7 |
"params": 68.977,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-18T11:56:04Z",
|
12 |
"model_type": "π¬ : chat models (RLHF, DPO, IFT, ...)",
|
13 |
"source": "leaderboard",
|
14 |
"job_id": 253,
|
15 |
+
"job_start_time": "2024-02-18T17-35-14.472711"
|
|
|
|
|
16 |
}
|
meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 68.977,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:05:49Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
@@ -24,7 +24,5 @@
|
|
24 |
"sparrow_pt": 0.3906386817294493
|
25 |
},
|
26 |
"result_metrics_average": 0.56302986080404,
|
27 |
-
"result_metrics_npm": 0.3629311363102707
|
28 |
-
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
29 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
30 |
}
|
|
|
7 |
"params": 68.977,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:05:49Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
|
|
24 |
"sparrow_pt": 0.3906386817294493
|
25 |
},
|
26 |
"result_metrics_average": 0.56302986080404,
|
27 |
+
"result_metrics_npm": 0.3629311363102707
|
|
|
|
|
28 |
}
|
mistralai/Mistral-7B-v0.1_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 7.242,
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T22:59:49Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
@@ -23,7 +23,5 @@
|
|
23 |
"faquad_nli": 0.43917169974115616,
|
24 |
"sparrow_pt": 0.28879981784595526
|
25 |
},
|
26 |
-
"result_metrics_average": 0.4910895361200423
|
27 |
-
"error_msg": "value cannot be converted to type at::Half without overflow",
|
28 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1718, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2579, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mistral/modeling_mistral.py\", line 1053, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mistral/modeling_mistral.py\", line 908, in forward\n attention_mask = _prepare_4d_causal_attention_mask(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_attn_mask_utils.py\", line 306, in _prepare_4d_causal_attention_mask\n attention_mask = attn_mask_converter.to_4d(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_attn_mask_utils.py\", line 121, in to_4d\n causal_4d_mask = self._make_causal_mask(\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_attn_mask_utils.py\", line 155, in _make_causal_mask\n mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: value cannot be converted to type at::Half without overflow\n"
|
29 |
}
|
|
|
7 |
"params": 7.242,
|
8 |
"architectures": "MistralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T22:59:49Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
|
|
23 |
"faquad_nli": 0.43917169974115616,
|
24 |
"sparrow_pt": 0.28879981784595526
|
25 |
},
|
26 |
+
"result_metrics_average": 0.4910895361200423
|
|
|
|
|
27 |
}
|
tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 7.0,
|
8 |
"architectures": "FalconForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:04:18Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
@@ -24,7 +24,5 @@
|
|
24 |
"sparrow_pt": 0.2808893907981011
|
25 |
},
|
26 |
"result_metrics_average": 0.25656239278954934,
|
27 |
-
"result_metrics_npm": -0.045028894002826225
|
28 |
-
"error_msg": "value cannot be converted to type at::Half without overflow",
|
29 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 216, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1718, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2579, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-7b/898df1396f35e447d5fe44e0a3ccaaaa69f30d36/modeling_falcon.py\", line 900, in forward\n transformer_outputs = self.transformer(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-7b/898df1396f35e447d5fe44e0a3ccaaaa69f30d36/modeling_falcon.py\", line 797, in forward\n outputs = block(\n ^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-7b/898df1396f35e447d5fe44e0a3ccaaaa69f30d36/modeling_falcon.py\", line 453, in forward\n attn_outputs = self.self_attention(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-7b/898df1396f35e447d5fe44e0a3ccaaaa69f30d36/modeling_falcon.py\", line 323, in forward\n attention_mask_float = (attention_mask * 1.0).masked_fill(attention_mask, float(\"-1e9\")).to(query_layer.dtype)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: value cannot be converted to type at::Half without overflow\n"
|
30 |
}
|
|
|
7 |
"params": 7.0,
|
8 |
"architectures": "FalconForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:04:18Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
|
|
24 |
"sparrow_pt": 0.2808893907981011
|
25 |
},
|
26 |
"result_metrics_average": 0.25656239278954934,
|
27 |
+
"result_metrics_npm": -0.045028894002826225
|
|
|
|
|
28 |
}
|
xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,12 +7,10 @@
|
|
7 |
"params": 13.0,
|
8 |
"architectures": "XverseForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:07:58Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
-
"job_start_time": "2024-02-13T13-07-18.548492"
|
16 |
-
"error_msg": "use_cache is not supported",
|
17 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 185, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1718, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2579, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 715, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 603, in forward\n layer_outputs = decoder_layer(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 311, in forward\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n ^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 249, in forward\n assert not use_cache, \"use_cache is not supported\"\nAssertionError: use_cache is not supported\n"
|
18 |
}
|
|
|
7 |
"params": 13.0,
|
8 |
"architectures": "XverseForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:07:58Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
+
"job_start_time": "2024-02-13T13-07-18.548492"
|
|
|
|
|
16 |
}
|
xverse/XVERSE-65B-2_eval_request_False_bfloat16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 65.0,
|
8 |
"architectures": "XverseForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T23:10:02Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|
|
|
7 |
"params": 65.0,
|
8 |
"architectures": "XverseForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "RERUN",
|
11 |
"submitted_time": "2024-02-05T23:10:02Z",
|
12 |
"model_type": "π’ : pretrained",
|
13 |
"source": "script",
|