PyTorch
ssl-aasist
custom_code
File size: 1,302 Bytes
d28af7f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
dataset:
  video_processor: ShardedVideoProcessor
  bert_name: bert-base-uncased
  meta_processor: ShardedHow2MetaProcessor
  train_path: data/how2/how2_s3d_train.lst
  val_path: data/how2/how2_s3d_val.lst
  vfeat_dir: data/feat/feat_how2_s3d_shard_small
  text_processor: ShardedTextProcessor
  tfeat_dir: data/feat/feat_how2_s3d_shard_small/raw_caption_dedup.bert-base-uncased.
  aligner: MFMMLMAligner
  subsampling: 32
  sampled_min_len: 8
  sampled_max_len: 64
  max_video_len: 32
  max_len: 96
  lazy_vfeat_mask: true
  mfm_probability: 0.15
  mlm_probability: 0.15
  mm_prob: 0.5
fairseq:
  common:
    tensorboard_logdir: run
    log_interval: 1000
    fp16: true
  dataset:
    num_workers: 4
    batch_size: 256
  optimization:
    lr:
    - 5.0e-05
    clip_norm: 2.0
    optimizer: adam
    adam_betas: (0.9, 0.98)
    lr_scheduler: polynomial_decay
    total_num_update: 1000000
    warmup_updates: 1000
    weight_decay: 0.0
    ddp_backend: no_c10d
    max_epoch: 15
  checkpoint:
    save_dir: runs/mtm/vlm
    save_interval_updates: 1024
    keep_interval_updates: 2
    keep_last_epochs: 30
task_type: sweep_big
slurm_config: big
eval:
  save_path: runs/mtm/vlm
model:
  model_cls: MMFusionMTM
  mm_encoder_cls: MMBertForMFMMLM
  use_seg_emb: true
loss:
  loss_cls: MTM
task: VLMTask