|
{ |
|
"output_dir": "\/gypsum\/work1\/zamani\/hzeng\/llm_as_retriever\/checkpoints\/llama3-1b-marco-mntp-sparse-nce-kldiv-lora-1e-4_qreg_05_dreg_04_bs_28_epochs_7_nnegs_16", |
|
"overwrite_output_dir": false, |
|
"do_train": true, |
|
"do_eval": false, |
|
"do_predict": false, |
|
"eval_strategy": "no", |
|
"prediction_loss_only": false, |
|
"per_device_train_batch_size": 28, |
|
"per_device_eval_batch_size": 8, |
|
"per_gpu_train_batch_size": null, |
|
"per_gpu_eval_batch_size": null, |
|
"gradient_accumulation_steps": 1, |
|
"eval_accumulation_steps": null, |
|
"eval_delay": 0, |
|
"torch_empty_cache_steps": null, |
|
"learning_rate": 0.0001, |
|
"weight_decay": 0.0, |
|
"adam_beta1": 0.9, |
|
"adam_beta2": 0.999, |
|
"adam_epsilon": 1e-8, |
|
"max_grad_norm": 1.0, |
|
"num_train_epochs": 3.0, |
|
"max_steps": 33250, |
|
"lr_scheduler_type": "linear", |
|
"lr_scheduler_kwargs": {}, |
|
"warmup_ratio": 0.04, |
|
"warmup_steps": 0, |
|
"log_level": "passive", |
|
"log_level_replica": "warning", |
|
"log_on_each_node": true, |
|
"logging_dir": "\/gypsum\/work1\/zamani\/hzeng\/llm_as_retriever\/checkpoints\/llama3-1b-marco-mntp-sparse-nce-kldiv-lora-1e-4_qreg_05_dreg_04_bs_28_epochs_7_nnegs_16\/runs\/Jan31_00-36-49_gpu016", |
|
"logging_strategy": "steps", |
|
"logging_first_step": false, |
|
"logging_steps": 50, |
|
"logging_nan_inf_filter": true, |
|
"save_strategy": "steps", |
|
"save_steps": 6650, |
|
"save_total_limit": 1, |
|
"save_safetensors": true, |
|
"save_on_each_node": false, |
|
"save_only_model": false, |
|
"restore_callback_states_from_checkpoint": false, |
|
"no_cuda": false, |
|
"use_cpu": false, |
|
"use_mps_device": false, |
|
"seed": 45, |
|
"data_seed": null, |
|
"jit_mode_eval": false, |
|
"use_ipex": false, |
|
"bf16": true, |
|
"fp16": false, |
|
"fp16_opt_level": "O1", |
|
"half_precision_backend": "auto", |
|
"bf16_full_eval": false, |
|
"fp16_full_eval": false, |
|
"tf32": null, |
|
"local_rank": 0, |
|
"ddp_backend": null, |
|
"tpu_num_cores": null, |
|
"tpu_metrics_debug": false, |
|
"debug": [], |
|
"dataloader_drop_last": false, |
|
"eval_steps": null, |
|
"dataloader_num_workers": 0, |
|
"dataloader_prefetch_factor": null, |
|
"past_index": -1, |
|
"run_name": "llama3-1b-marco-mntp-sparse-nce-kldiv-lora-1e-4_qreg_05_dreg_04_bs_28_epochs_7_nnegs_16", |
|
"disable_tqdm": false, |
|
"remove_unused_columns": false, |
|
"label_names": null, |
|
"load_best_model_at_end": false, |
|
"metric_for_best_model": null, |
|
"greater_is_better": null, |
|
"ignore_data_skip": false, |
|
"fsdp": [ |
|
"full_shard", |
|
"auto_wrap" |
|
], |
|
"fsdp_min_num_params": 0, |
|
"fsdp_config": { |
|
"fsdp": { |
|
"activation_checkpointing": true, |
|
"cpu_ram_efficient_loading": false, |
|
"forward_prefetch": false, |
|
"sync_module_states": false, |
|
"use_orig_params": true |
|
}, |
|
"min_num_params": 0, |
|
"xla": false, |
|
"xla_fsdp_v2": false, |
|
"xla_fsdp_grad_ckpt": false |
|
}, |
|
"fsdp_transformer_layer_cls_to_wrap": null, |
|
"accelerator_config": { |
|
"split_batches": false, |
|
"dispatch_batches": null, |
|
"even_batches": true, |
|
"use_seedable_sampler": true, |
|
"non_blocking": false, |
|
"gradient_accumulation_kwargs": null, |
|
"use_configured_state": false |
|
}, |
|
"deepspeed": null, |
|
"label_smoothing_factor": 0.0, |
|
"optim": "adamw_torch", |
|
"optim_args": null, |
|
"adafactor": false, |
|
"group_by_length": false, |
|
"length_column_name": "length", |
|
"report_to": [ |
|
"wandb" |
|
], |
|
"ddp_find_unused_parameters": null, |
|
"ddp_bucket_cap_mb": null, |
|
"ddp_broadcast_buffers": null, |
|
"dataloader_pin_memory": false, |
|
"dataloader_persistent_workers": false, |
|
"skip_memory_metrics": true, |
|
"use_legacy_prediction_loop": false, |
|
"push_to_hub": false, |
|
"resume_from_checkpoint": null, |
|
"hub_model_id": null, |
|
"hub_strategy": "every_save", |
|
"hub_token": null, |
|
"hub_private_repo": false, |
|
"hub_always_push": false, |
|
"gradient_checkpointing": true, |
|
"gradient_checkpointing_kwargs": null, |
|
"include_inputs_for_metrics": false, |
|
"eval_do_concat_batches": true, |
|
"fp16_backend": "auto", |
|
"evaluation_strategy": null, |
|
"push_to_hub_model_id": null, |
|
"push_to_hub_organization": null, |
|
"push_to_hub_token": null, |
|
"_n_gpu": 1, |
|
"mp_parameters": "", |
|
"auto_find_batch_size": false, |
|
"full_determinism": false, |
|
"torchdynamo": null, |
|
"ray_scope": "last", |
|
"ddp_timeout": 1800, |
|
"torch_compile": false, |
|
"torch_compile_backend": null, |
|
"torch_compile_mode": null, |
|
"dispatch_batches": null, |
|
"split_batches": null, |
|
"include_tokens_per_second": false, |
|
"include_num_input_tokens_seen": false, |
|
"neftune_noise_alpha": null, |
|
"optim_target_modules": null, |
|
"batch_eval_metrics": false, |
|
"eval_on_start": false, |
|
"eval_use_gather_object": false, |
|
"task_names": [ |
|
"rank", |
|
"query_reg", |
|
"doc_reg" |
|
], |
|
"task_weights": [ |
|
1.0, |
|
0.05, |
|
0.04 |
|
], |
|
"ln_to_weight": { |
|
"rank": 1.0, |
|
"query_reg": 0.05, |
|
"doc_reg": 0.04 |
|
}, |
|
"num_tasks": 3, |
|
"wandb_project_name": "llm_as_retriever", |
|
"model_name_or_path": "\/gypsum\/work1\/zamani\/hzeng\/llm_as_retriever\/checkpoints\/mntp\/llama3-1b-msmarco", |
|
"model_type": "llama", |
|
"train_path": "\/work\/hzeng_umass_edu\/ir-research\/llm_as_retriever_data\/data\/msmarco_train_teacher_scores.jsonl", |
|
"eval_path": null, |
|
"corpus_path": "\/work\/hzeng_umass_edu\/ir-research\/data\/msmarco-full\/full_collection\/raw.tsv", |
|
"cache_dir": "", |
|
"loss_type": "nce_kldiv", |
|
"query_max_length": 64, |
|
"doc_max_length": 128, |
|
"n_negs": 16, |
|
"world_size": 1, |
|
"lora": true, |
|
"lora_r": 16, |
|
"lora_alpha": 32, |
|
"lora_dropout": 0.1, |
|
"lora_modules_to_save": null, |
|
"train_config": "\/work\/hzeng_umass_edu\/ir-research\/llm_as_retriever\/train_configs\/llama_config.json" |
|
} |