thon | |
input_points = [[[2592, 1728]]] # point location of the bee | |
inputs = processor(image, input_points=input_points, return_tensors="pt").to(device) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) | |
`` | |
We can visualize the three masks in themasks` output. |