Tiger-J commited on
Commit
0cdbc6f
·
verified ·
1 Parent(s): 80cc1df

Update app_util.py

Browse files
Files changed (1) hide show
  1. app_util.py +4 -4
app_util.py CHANGED
@@ -34,14 +34,14 @@ def parse_args() -> argparse.Namespace:
34
  help="Type of positional embedding to use on top of the image features")
35
  parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
36
  help="position / size * scale")
37
- parser.add_argument('--num_feature_levels', default=5, type=int, help='number of feature levels')
38
 
39
  # * Transformer
40
  parser.add_argument('--enc_layers', default=6, type=int,
41
  help="Number of encoding layers in the transformer")
42
  parser.add_argument('--dec_layers', default=6, type=int,
43
  help="Number of decoding layers in the transformer")
44
- parser.add_argument('--dim_feedforward', default=2048, type=int,
45
  help="Intermediate size of the feedforward layers in the transformer blocks")
46
  parser.add_argument('--hidden_dim', default=256, type=int,
47
  help="Size of the embeddings (dimension of the transformer)")
@@ -49,7 +49,7 @@ def parse_args() -> argparse.Namespace:
49
  help="Dropout applied in the transformer")
50
  parser.add_argument('--nheads', default=8, type=int,
51
  help="Number of attention heads inside the transformer's attentions")
52
- parser.add_argument('--num_queries', default=900, type=int,
53
  help="Number of query slots")
54
  parser.add_argument('--dec_n_points', default=4, type=int)
55
  parser.add_argument('--enc_n_points', default=4, type=int)
@@ -118,7 +118,7 @@ class ContextDetDemo():
118
  args.resume = resume
119
 
120
  args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
121
- num_classes = 2
122
  device = torch.device(args.device)
123
 
124
  backbone = build_backbone(args)
 
34
  help="Type of positional embedding to use on top of the image features")
35
  parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
36
  help="position / size * scale")
37
+ parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
38
 
39
  # * Transformer
40
  parser.add_argument('--enc_layers', default=6, type=int,
41
  help="Number of encoding layers in the transformer")
42
  parser.add_argument('--dec_layers', default=6, type=int,
43
  help="Number of decoding layers in the transformer")
44
+ parser.add_argument('--dim_feedforward', default=1024, type=int,
45
  help="Intermediate size of the feedforward layers in the transformer blocks")
46
  parser.add_argument('--hidden_dim', default=256, type=int,
47
  help="Size of the embeddings (dimension of the transformer)")
 
49
  help="Dropout applied in the transformer")
50
  parser.add_argument('--nheads', default=8, type=int,
51
  help="Number of attention heads inside the transformer's attentions")
52
+ parser.add_argument('--num_queries', default=300, type=int,
53
  help="Number of query slots")
54
  parser.add_argument('--dec_n_points', default=4, type=int)
55
  parser.add_argument('--enc_n_points', default=4, type=int)
 
118
  args.resume = resume
119
 
120
  args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
121
+ num_classes = 1
122
  device = torch.device(args.device)
123
 
124
  backbone = build_backbone(args)