Detectronソースの解読-roidbデータ構造


roidbデータ構造
roidbのタイプはlistであり、各要素のデータタイプはdictであり、roidbリストの長さはデータセットの数であり、roidbの各要素の詳細は以下の表の通りである.for entry in roidbデータの種類
詳細に説明するentry['id']要点
現在のイメージを代表しました.identry['file_name']ストリングス
現在のピクチャを表すファイル名(有.jpg拡張子)entry['dataset']ストリングス
所属データセットを指定しますか?entry['image']ストリングス
現在のイメージのファイルパスentry['flipped']ブック
現在の画像を反転しますか?entry['height']要点
現在の画像の高さentry['width']要点
現在の画像の幅entry['has_visible_keypoints']ブック
ポイントが含まれていますか?entry['boxes']float 32,numpy配列(numubjs,4)
numObjsは現在の画像の目標物体の個数で、4はbboxの座標を表します.entry['segms']二次元リスト[],[],…]
リストの各要素はいずれもリストです.ここには、各オブジェクトのploygonのインスタンスタグが格納されています.entry['gt_classes']int 32,numpy配列(numubjs)
現在の画像の各objの本当のカテゴリを指定します.entry['seg_areas']float 32,numpy配列(numubjs)
現在の画像の各objのマスク面積を表します.entry['gt_overlaps']float 32,scipy.sparse.csr_matrixデータ(num ubjs,81)
各objと81の異なるカテゴリのoverlapを表します.entry['is_crowd']book,numpy配列(num ubjs)
現在のマスクがグループ化されているかどうかを表します.
ntry[boxtガツェン]
int 32,numpy配列(numubjs)
このリストには、boxの順序の下付き値が格納されています.同じ1次元配列で、直接つなぎ合わせて、各roiをindexにマッピングします.indexは、entry[gtuclasses]>0のroisリストの下付きです.combined_roidb_for_training()方法
ターゲット検出クラスタスクには、基本的なデータ構造としてデータキューに存在し、DetectronのデータロードクラスRoIDdataLoaderも、このデータ構造をメンバー変数として使用しているので、このデータ構造を分析する必要がある.
まず、トレーニングスクリプトを実行するとdetectron/utils/train.pytrain()関数が呼び出され、train()の関数の内部では現在のファイルのadd_model_training_inputs()関数が呼び出され、この関数の内部ではdetectron/datasets/roidbファイルのcombined_roidb_for_training()関数が呼び出されます.したがって、この関数を分析します.この関数コードの解析は以下の通りです.
# detectron/datasets/roidb.py

#               roidbs, along with optional object proposals
#   roidb entry           ,              
def combined_roidb_for_training(dataset_names, proposal_files):
    def get_roidb(dataset_name, proposal_file):
        #    dataset_name    's'

        # from detectron.datasets.json_dataset import JsonDataset
        #     , roidb    JsonDataset    get_roidb()     
        #   ,                  
        ds = JsonDataset(dataset_name)
        roidb = ds.get_roidb(
            gt=True,
            proposal_file=proposal_file,
            crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH
        )
        if cfg.TRAIN.USE_FLIPPED:
            logger.info("Appending horizontally-flipped training examples...")
            extend_with_flipped_entries(roidb, ds)
        logger.info("Loaded dataset: {:s}".format(ds.name))
        return roidb
    if isinstance(dataset_names, basestring):
        #...
    #...
roidb方法
上記の関数では、get_roidb()関数の内部には、他の関数combined_roidb_for_trainingが定義されていますが、この関数は主にget_roidb()detectron/datasets/json_dataset.pyクラスおよびこのクラスのメンバーシップ方法JsonDatasetに基づいて実現されています.したがって、get_roidbファイルにジャンプして、このクラスの内部実装がどのようなものかを見てみます.
# detectron/datasets/json_dataset.py

class JsonDataset(object):
    #            COCO json     
    #               ,                    
    # COCO    json  ,     ,               .
    def __init__(self, name):
        assert dataset_catalog.contains(name), \
            "Unknown dataset name: {}".format(name)
        assert...
        #...

        #           
        category_ids = self.COCO.getCatIds() # 1~80,   80  
        # coco loadCats  ,          cat id,        
        #     ,    id       ,            ,   'name','id','supercategory'    
        #         , person, bicycle,bus  ,              id cat_ids        
        categories = [c['name'] for c in sefl.COCO.loadCats(category_ids)]
        #      name   id       ,   cat_name key,cat_id  
        self.category_to_id_map = dict(zip(categories, category_ids)) #   ,   '__background__'
        self.classes = ['__background__'] + categories #  '__background__'   categories       
        self.num_classes = len(self.classes)
        # coco      90,      80  ,        ,   id     ,
        self.json_category_id_to_contiguous_id = {
            v: i + 1 # key coco    id, value 1~80   id,     
            for i, v in enumerate(self.COCO.getCatIds())
        }
        self.contiguous_category_id_to_json_id = {
            v: k # key 1~80   id, value coco    id,     
            for k, v in self.json_category_id_to_contiguous_id.items()
        }
        self._init_keypoints() #      keypoints     .

    def get_roidb(
        self,
        gt=False,
        proposal_file=None,
        min_proposal_size=2,
        proposal_limit=-1,
        crowd_filter_thresh=0
    ):
        """
          json dataset   roidb  ,         :
        -  roidb   gt boxes
        -     proposal file     proposals
        -        proposals   
        -          proposals   
        """

        assert gt is True or crowd_filter_thresh == 0, \
            "Crowd filter threshold must be 0 if gt " \
            "annotations are not included."
        #      COCO   API,   COCO             ,            
        #         ,              id
        image_ids = self.COCO.getImgIds()
        image_ids.sort() #  id           
        # roidb     ,             ,      imageid     .
        #     :coco_url, license, width, filename, height, flickr_url, id, date_captured
        roidb = copy.deepcopy(self.COCO.loadImgs(image_ids))
        for entry in roidb:
            #            _prep_roidb_entry(), entry   .
            #     entry   ,      ,   box, segms,     ,               
            #   ,                ,       gt_overlap   
            self._prep_roidb_entry(entry)
        if gt:
            #        gt  ,     _add_gt_annotations
            #       ,           ,             
            self.debug_timer.tic()
            for entry in roidb:
                #   ,       entry     ,          imgid     
                #   _add_gt_annotations              
                self._add_gt_annotations(entry)
            logger.debug(
                '_add_gt_annotations took {:.3f}s'.
                format(self.debug_timer.toc(average=False))
            )
        if proposal_file is not None:
            self.debug_timer.tic()
            #   proposals   roidb ,                
            self._add_proposals_from_file(
                roidb, proposal_file, min_proposal_size, proposal_limit,
                crowd_filter_thresh
            )
            logger.debug(
                '_add_proposals_from_file took {:.3f}s'.
                format(self.debug_timer.toc(average=False))
            )
        #       ,        roidb   box   
        _add_class_assignments(roidb)
        return roidb
json_dataset.py方法
データ準備関数_prep_roidb_entry()の実現解析
# detectron/datasets/json_dataset.py

class JsonDataset(object):
    def __init__(...):
        #...
    def get_roidb(...):
        #...
    #               roidb entry 
    def _prep_roidb_entry(self, entry):
        # entry 'dataset'   ,   self.
        entry['dataset'] = self
        im_path = os.path.join(self.image_directory, self.image_prefix+entry['file_name'])
        assert os.path.exists(im_path), "Image \"{} \" not found".format(im_path)
        # entry 'image'   ,     imageid   image  
        entry['image'] = im_path
        entry['flipped'] = False #     
        entry['has_visible_keypoints'] = False

        #   entry        ,      

        # entry 'boxes'   ,  n×4 numpy  , n  box   ,     0
        entry['boxes'] = np.empty((0,4), dtype=np.float32)
        entry['segms'] = [] # entry 'segms'   ,       ,    
        # entry 'gt_classes'   ,       ,    box   n  ,   0
        entry['gt_classes'] = np.empty((0), dtype=np.int32)
        #        ,  n ,  boxes    
        entry['seg_areas'] = np.empty((0), dtype=np.float32)
        # TODO,          ,      n×c, c     ,           ?
        entry['gt_overlaps'] = scipy.sparse.csr_matrix(
            np.empty((0, self.num_classes), dtype=np.float32)
        )
        #    n 1 , n boxes    ,      `    `
        entry['is_crowd'] = np.empty((0), dtype=np.bool)
        # shape   roi  ,     roi     index 
        # index  entry['gt_classes']>0 rois      TODO          
        entry['box_to_gt_ind_map'] = np.empty((0), dtype=np.int32)
        #      ,         
        if self.keypoints is not None:
            entry['gt_keypoints'] = np.empty(
                (0, 3, self.num_keypoints), dtype=np.int32
            )
        #      json file           
        for k in ['date_captured', 'url', 'license', 'file_name']:
            if k in entry:
                del entry[k]
_prep_roidb_entry()方法
ロードラベルファイルの関数_add_gt_annotations()の解析
# detectron/datasets/json_dataset.py

class JsonDataset(object):
    def __init__(...):
        #...
    def get_roidb(...):
        #...
    def _prep_roidb_entry(self, entry):
        #...
    #                roidb entry 
    def _add_gt_annotations(self, entry):
        #     imgid annid   (    box)
        ann_ids = self.COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)
        #   annids id  ,     id       , objs     
        #                ,              ,  bbox,segmentation   
        objs = self.COCO.loadAnns(ann_ids)
        #        bboxes    ,           
        valid_objs=[] #      objs
        valid_segms=[] #      segms
        width = entry['width'] #   entry  width  ,        
        height = entry['height'] #   entry  height  ,        
        for obj in objs:
            # crowd    RLE  
            # import detectron.utils.segms as segm_utils
            #        segmentation polygon    rle  ,        ,        
            #   True polygon  ,   Fasle rle  
            if segm_utils.is_poly(obj['segmentation']):
                # poly      >=3           ,     >=6    
                #             PLOYGON   ,    RLE     ,            
                obj['segmentation'] = [
                    p for p in obj['segmentation'] if len(p) >=6
                ]
            if obj['area'] < cfg.TRAIN.GT_MIN_AREA:
                continue #        ,         ,      valid  
            if 'ignore' in obj and obj['ignore'] == 1:
                continue
            # import detectron.utils.boxes as box_utils
            #  [x1,y1,w,h]        [x1,y1,x2,y2]   
            x1, y1, x2, y2 = box_utils.xywh_to_xyxy(obj['bbox'])
            #  [x1,y1,x2,y2]           [width,height]   ,     
            x1, y1, x2, y2 = box_utils.clip_xyxy_to_image(
                x1, y1, x2, y2, height, width
            )

            if obj['area'] > 0 and x2 > x1 and y2 > y1: #      ,         
                obj['clean_bbox'] = [x1, y1, x2, y2]
                valid_objs.append(obj)
                valid_segms.append(obj['segmentation']) #     segms     (RLE/PLOYGON)
        num_valid_objs = len(valid_objs) # num_valid_objs  objs     

        #   ,              0
        # boxes    objs ×4  numpy  ,       objs     
        boxes = np.zeros((num_valid_objs,4), dtype=entry['seg_areas'].dtype)
        #   objs     
        gt_classes = np.zeros((num_valid_objs), dtype=entry['gt_classes'].dtype)
        gt_overlaps = np.zeros( #       objs ×num_class   numpy  ,        IoU  
            (num_valid_objs, self.num_classes),
            dtype=entry['gt_overlaps'].dtype
        )
        #     
        seg_areas = np.zeros((num_valid_objs), dtype=entry['seg_areas'].dtype)
        #   crowd
        is_crowd = np.zeros((num_valid_objs), dtype=entry['is_crowd'].dtype)
        #    ???
        box_to_gt_ind_map = np.zeros(
            (num_valid_objs), dtype=entry['box_to_gt_ind_map'].dtype
        )
        if self.keypoints is not None:
            gt_keypoints = np.zeros(
                (num_valid_objs, 3, self.num_keypoints),
                dtype=entry['gt_keypoints'].dtype
            )

        #            ?
        im_has_visible_keypoints = False
        for ix, obj in enumerate(valid_objs):# ix   , obj       
            # category_id coco  id,json_category_id_to_contiguous_id      
            #   , key coco    id, value 1~80   id,     ,       coco    id        id
            cls = self.json_category_id_to_contiguous_id[obj['category_id']]
            boxes[ix, :] = obj['clean_box'] #    obj box  boxes  
            gt_classes[ix] = cls #    id  gt_classes  
            seg_areas[ix] = obj['area'] #  area  seg_areas  
            is_crowd[ix] = obj['iscrowd']
            box_to_gt_ind_map[ix] = ix #       box      
            if self.keypoints is not None:
                # ...
            if obj['iscrowd']:
                #        crowd  ,        overlap    -1,
                #           ,            !!
                gt_overlaps[ix, :] = -1.0
            else:
                gt_overlaps[ix, cls] = 1.0  #        overlap   1,    0
        #  gt boxes   entry ,   axis 0,      0   ,       n×4   
        #   , entry['boxes']       ,                 
        entry['boxes'] = np.append(entry['boxes'], boxes, axis=0)
        #   segms        ,        extend    valid_segms     
        entry['segms'].extend(valid_segms)  
        # gt_classes      numpy  (     obj   ),         axis  ,             
        entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)
        #   ,   numpy  (     obj   ),     axis  
        entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas)
        # gt_overlaps  num_objs × num_classes numpy  ,     obj          
        #   entry['gt_overlaps']    scipy.sparse.csr.csr_matrix,         toarray       numpy  ,     gt_overlaps  ,
        #  entry['gt_overlaps']     0 × 81 ,          num_objs × num_classes numpy  
        entry['gt_overlaps'] = np.append(
            entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
        )
        #       scipy.sparse.csr.csr_matrix  
        entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
        #   numpy  ,      
        entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd)
        #       box      ,        ,     
        entry['box_to_gt_ind_map'] = np.append(
            entry['box_to_gt_ind_map'], box_to_gt_ind_map
        )
        if self.keypoints is not None:
            entry['gt_keypoints'] = np.append(
                entry['gt_keypoints'], gt_keypoints, axis=0
            )
            entry['has_visible_keypoints'] = im_has_visible_keypoints

_add_gt_annotations()
# detectron/datasets/json_dataset.py

class JsonDataset(object):
    def __init__(...):
        #...
    def get_roidb(...):
        #...
    def _prep_roidb_entry(self, entry):
        #...
    def _add_gt_annotations(self, entry):
        #...
    #
    def _add_proposals_from_file(
        self, roidb, proposal_file, min_proposal_size, top_k, crowd_thresh
    ):


連続解_add_proposals_from_file()方法
次に、combined_roidb_for_training()ファイルのdetectron/datasets/roidb.py関数に戻り、下を見続ける.
# detectron/datasets/roidb.py

#               roidbs, along with optional object proposals
#   roidb entry           ,              
def combined_roidb_for_training(dataset_names, proposal_files):
    def get_roidb(dataset_name, proposal_file): #      's'
        # from detectron.datasets.json_dataset import JsonDataset
        #     , roidb    JsonDataset    get_roidb()     
        #   gt   True,                     
        ds = JsonDataset(dataset_name)
        roidb = ds.get_roidb(
            gt=True,
            proposal_file=proposal_file,
            crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH
        )
        #           ,                  
        if cfg.TRAIN.USE_FLIPPED:
            logger.info("Appending horizontally-flipped training examples...")
            extend_with_flipped_entries(roidb, ds)
        logger.info("Loaded dataset: {:s}".format(ds.name))
        #   ,          ,  roidb      
        return roidb
    if isinstance(dataset_names, basestring):
        dataset_names=(dataset_names, )
    if isinstance(proposal_files, basestring):
        proposal_files = (proposal_files, )
    if len(proposal_files) == 0:
        proposal_files = (None, ) * len(dataset_names)
    assert len(dataset_names) == len(proposal_files)
    roidbs = [get_roidb(*args) for args in zip(dataset_names, proposal_files)]
    roidb = roidbs[0]
    for r in roidbs[1:]:
        roidb.extend(r)
    roidb = filter_for_training(roidb)

    logger.info("Computing bounding-box regression targets...")
    #    bounding-box         information
    add_bbox_regression_targets(roidb)
    logger.info("done")
    _compute_and_log_stats(roidb)

    return roidb