eduagarcia commited on
Commit
bb04638
·
verified ·
1 Parent(s): 7203037

Update status of 01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original to FAILED

Browse files
01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 34.389,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:18:19Z",
12
  "model_type": "🟢 : pretrained",
13
  "source": "script",
@@ -24,5 +24,7 @@
24
  "sparrow_pt": 0.37142261482383704
25
  },
26
  "result_metrics_average": 0.6356464211312362,
27
- "result_metrics_npm": 0.4967380342980293
 
 
28
  }
 
7
  "params": 34.389,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:18:19Z",
12
  "model_type": "🟢 : pretrained",
13
  "source": "script",
 
24
  "sparrow_pt": 0.37142261482383704
25
  },
26
  "result_metrics_average": 0.6356464211312362,
27
+ "result_metrics_npm": 0.4967380342980293,
28
+ "error_msg": "CUDA out of memory. Tried to allocate 298.02 GiB. GPU 0 has a total capacty of 79.35 GiB of which 14.77 GiB is free. Process 580799 has 64.57 GiB memory in use. Of the allocated memory 64.06 GiB is allocated by PyTorch, and 7.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
29
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3558, in from_pretrained\n dispatch_model(model, **device_map_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/big_modeling.py\", line 445, in dispatch_model\n model.to(device)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 2556, in to\n return super().to(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1160, in to\n return self._apply(convert)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 810, in _apply\n module._apply(fn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 857, in _apply\n self._buffers[key] = fn(buf)\n ^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1158, in convert\n return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 298.02 GiB. GPU 0 has a total capacty of 79.35 GiB of which 14.77 GiB is free. Process 580799 has 64.57 GiB memory in use. Of the allocated memory 64.06 GiB is allocated by PyTorch, and 7.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
30
  }