Update status of cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_628c376_False_bfloat16_Original to FAILED
Browse files
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_628c376_False_bfloat16_Original.json
CHANGED
@@ -8,10 +8,12 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-31T08:44:12Z",
|
13 |
"model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 788,
|
16 |
-
"job_start_time": "2024-06-12T15-32-24.307081"
|
|
|
|
|
17 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "FAILED",
|
12 |
"submitted_time": "2024-05-31T08:44:12Z",
|
13 |
"model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 788,
|
16 |
+
"job_start_time": "2024-06-12T15-32-24.307081",
|
17 |
+
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacity of 31.75 GiB of which 37.75 MiB is free. Process 69470 has 31.71 GiB memory in use. Of the allocated memory 30.72 GiB is allocated by PyTorch, and 62.94 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)",
|
18 |
+
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/auto_factory.py\", line 563, in from_pretrained\n model_class = _get_model_class(config, cls._model_mapping)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/modeling_utils.py\", line 3679, in from_pretrained\n if gguf_path is None and (low_cpu_mem_usage or (use_keep_in_fp32_modules and is_accelerate_available())):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/modeling_utils.py\", line 4106, in _load_pretrained_model\n # This will only initialize submodules that are not marked as initialized by the line above.\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/modeling_utils.py\", line 887, in _load_state_dict_into_meta_model\n or (not hf_quantizer.requires_parameters_quantization)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 399, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacity of 31.75 GiB of which 37.75 MiB is free. Process 69470 has 31.71 GiB memory in use. Of the allocated memory 30.72 GiB is allocated by PyTorch, and 62.94 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n"
|
19 |
}
|