Ahmadzei's picture
added 3 more tables for large emb model
5fa1a76
import evaluate
from tqdm import tqdm
model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5")
module = evaluate.load("ybelkada/cocoevaluate", coco=test_ds_coco_format.coco)
val_dataloader = torch.utils.data.DataLoader(
test_ds_coco_format, batch_size=8, shuffle=False, num_workers=4, collate_fn=collate_fn
)
with torch.no_grad():
for idx, batch in enumerate(tqdm(val_dataloader)):
pixel_values = batch["pixel_values"]
pixel_mask = batch["pixel_mask"]
labels = [
{k: v for k, v in t.items()} for t in batch["labels"]
] # these are in DETR format, resized + normalized
# forward pass
outputs = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
orig_target_sizes = torch.stack([target["orig_size"] for target in labels], dim=0)
results = im_processor.post_process(outputs, orig_target_sizes) # convert outputs of model to Pascal VOC format (xmin, ymin, xmax, ymax)
module.add(prediction=results, reference=labels)
del batch
results = module.compute()
print(results)
Accumulating evaluation results
DONE (t=0.08s).