How to use detectron2 instance segmentation for model-assisted labeling #2538
Unanswered
Nobuyuki-Enzan
asked this question in
Q&A
Replies: 1 comment 1 reply
-
Hi @Nobuyuki-Enzan , I unfortunately don't know the answer but did you ever resolve this and figure out how to parse the odd json file dumped by the evaluator? |
Beta Was this translation helpful? Give feedback.
1 reply
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
I'm trying to convert Detectron2 inference results to COCO annotation format so that the annotation tool can import the Detectron2 results.
However, the json file dumped by COCOEvaluator is RLE format, so the annotation tool can't import.
Is there any function to convert Detectron2 inference results to COCO fomart (polygon).
Below is the program I used.
import os
import numpy as np
import json
import cv2
import random
from detectron2 import model_zoo
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.structures import BoxMode
from detectron2.data import DatasetCatalog, MetadataCatalog
import tensorboard
from matplotlib import pyplot as plt
def get_balloon_dicts(img_dir):
json_file = os.path.join(img_dir, "via_region_data.json")
with open(json_file) as f:
imgs_anns = json.load(f)
def verify_dataset(dists_name):
dataset_dicts = get_balloon_dicts(dicts_name)
for d in random.sample(dataset_dicts,3):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=balloon_metadata, scale=0.5)
vis = visualizer.draw_dataset_dict(d)
cv2.imshow(dicts_name, vis.get_image()[:, :, ::-1])
cv2.waitKey(2000)
cv2.destroyAllWindows()
def train_dataset(dataset_name):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
# cfg.merge_from_file("../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = (dataset_name,)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025
cfg.SOLVER.MAX_ITER = 300
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.MODEL.DEVICE = "cpu"
if name == 'main':
dataset_name = "balloon_train"
dicts_name = "/workspaces///PYTHON/Docker/detectron2_manual2/IMAGE/balloon_dataset/balloon/train"
Validation
import cv2
import json
import numpy as np
import os
import random
from detectron2 import model_zoo
from detectron2.utils.visualizer import ColorMode
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.engine import DefaultTrainer
from detectron2.structures import BoxMode
from detectron2.utils.visualizer import Visualizer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from matplotlib import pyplot as plt
def get_balloon_dicts(img_dir):
json_file = os.path.join(img_dir, "via_region_data.json")
with open(json_file) as f:
imgs_anns = json.load(f)
if name == 'main':
train_dataset_name = "balloon_train"
val_dataset_name = "balloon_val"
val_dicts_name = "/workspaces///PYTHON/Docker/detectron2_manual2/IMAGE/balloon_dataset/balloon/val"
Below is the json file dumped by COCOEvaluator.
[{"image_id": 0, "category_id": 0, "bbox": [579.28076171875, 324.41278076171875, 328.86798095703125, 431.18328857421875], "score": 0.9565192461013794, "segmentation": {"size": [2048, 1323], "counts": "P_\T1]1Vn1k0ZOa0B:H7H8I7H8H8I6K6I6K5K5K5K4M3L5K4L5K4L5K4L5L3L5L3M2N4K3N3L4L4L4M2N3L3N3M2N3M3M2N2N3L3N2N3M2N2O1N2N3M2O1N3M2N2O1N2N2N2N2N2N2N2N2N2O1N2N2O1N2N101N2O001N2O1N2N2O1N2O1N2N2O1N2N101N2O1N101N2O0O2O1O1N2O1O001N2O1O1O0O2O1O001N101O1O0O2O1O1O0O2O1O1O1N2O1O0O2O1N2O001N2O1O001N2O1O001N2O1O1O1O0O2O001O001O001O1O00001O001O001O001O001O1O001O0O20O0001O000000001O0000001O01O0000000010O000001O00001O001O001O00001O00001O00001O0000001O00001O0000001O00001O00001O00001N101O1O001N101O1N2O001N2O0O2N101N1O2N1O2N1O2O0O2N1O2N2N1N3N2N2M3N2N2N2N2N1O2N2M3N2N1N3N2M3N2N2N2N2N2N2N3L3N2N2M3M3N2N2M3N3L3N3L3M4K4M3M4L3N2M3M3M4L3M4L3L5L4K4M4K4M4L3N3K5L4K5K6I6K6I7J5K6K4K7I8E>@d0WO_aoi0"}}]
The json file which I want is like this.
{"licenses": [{"name": "", "id": 0, "url": ""}], "info": {"contributor": "", "date_created": "", "description": "", "url": "", "version": "", "year": ""}, "categories": [{"id": 1, "name": "mitochondria", "supercategory": ""}], "images": [{"id": 1, "width": 2048, "height": 2048, "file_name": "P19-00069-1_SA-MAG_X5000_8660.jpg", "license": 0, "flickr_url": "", "coco_url": "", "date_captured": 0}, {"id": 2, "width": 2048, "height": 2048, "file_name": "P19-00069-1_SA-MAG_X5000_8655.jpg", "license": 0, "flickr_url": "", "coco_url": "", "date_captured": 0}], "annotations": [{"id": 1, "image_id": 1, "category_id": 1, "segmentation": [[1627.0, 668.0, 1626.0, 670.0, 1624.0, 674.0, 1621.0, 679.0, 1620.0, 682.0, 1619.0, 686.0, 1619.0, 694.0, 1616.0, 697.0, 1616.0, 700.0, 1612.0, 708.0, 1612.0, 714.0, 1610.0, 717.0, 1610.0, 723.0, 1609.0, 728.0, 1606.0, 737.0, 1603.0, 742.0, 1602.0, 745.0, 1602.0, 750.0, 1601.0, 754.0, 1600.0, 758.0, 1598.0, 760.0, 1598.0, 766.0, 1596.0, 770.0, 1596.0, 777.0, 1596.0, 779.0, 1598.0, 781.0, 1598.0, 783.0, 1600.0, 784.0, 1603.0, 785.0, 1603.0, 789.0, 1602.0, 799.0, 1600.0, 807.0, 1601.0, 830.0, 1602.0, 835.0, 1603.0, 838.0, 1604.0, 841.0, 1606.0, 844.0, 1607.0, 847.0, 1608.0, 851.0, 1609.0, 855.0, 1607.0, 859.0, 1607.0, 862.0, 1608.0, 866.0, 1609.0, 870.0, 1612.0, 874.0, 1613.0, 875.0, 1615.0, 876.0, 1617.0, 877.0, 1620.0, 878.0, 1627.0, 878.0, 1633.0, 880.0, 1676.0, 880.0, 1682.0, 877.0, 1687.0, 878.0, 1689.0, 876.0, 1692.0, 875.0, 1695.0, 875.0, 1700.0, 873.0, 1712.0, 867.0, 1715.0, 864.0, 1720.0, 862.0, 1726.0, 856.0, 1728.0, 855.0, 1732.0, 854.0, 1735.0, 853.0, 1739.0, 850.0, 1743.0, 845.0, 1744.0, 841.0, 1744.0, 832.0, 1743.0, 804.0, 1741.0, 795.0, 1741.0, 790.0, 1742.0, 788.0, 1743.0, 786.0, 1747.0, 782.0, 1748.0, 780.0, 1750.0, 775.0, 1751.0, 771.0, 1751.0, 764.0, 1749.0, 757.0, 1749.0, 747.0, 1748.0, 740.0, 1747.0, 735.0, 1744.0, 728.0, 1744.0, 723.0, 1743.0, 720.0, 1741.0, 717.0, 1741.0, 714.0, 1740.0, 712.0, 1737.0, 709.0, 1736.0, 705.0, 1735.0, 703.0, 1724.0, 692.0, 1724.0, 688.0, 1719.0, 683.0, 1716.0, 683.0, 1709.0, 679.0, 1706.0, 678.0, 1697.0, 678.0, 1689.0, 675.0, 1681.0, 675.0, 1665.0, 672.0, 1659.0, 671.0, 1655.0, 669.0, 1651.0, 668.0, 1639.0, 667.0]], "area": 26978.0, "bbox": [1596.0, 667.0, 155.0, 213.0], "iscrowd": 0, "attributes": {"membrane": "fission", "occluded": false}}]}
Any suggestion would be appreciated.
Beta Was this translation helpful? Give feedback.
All reactions