60分钟吃掉detectron2

技术

本范例演示使用非常有名的目标检测框架detectron2 🤗🤗

在自己的数据集(balloon数据)上训练实例分割模型MaskRCNN的方法。

detectron2框架的设计有以下一些优点:

  • 1,强大:提供了包括目标检测、实例分割、全景分割等非常广泛的视觉任务模型库。
  • 2,灵活:可以通过注册机制自定义模块或模型结构,从而进行扩展和改进。
  • 3,易用:通过list of dict格式定义自己的数据集, 简单好用。

公众号算法美食屋后台回复关键词: torchkeras ,获取本文源代码和balloon数据集下载链接。

我们首先需要安装并导入detectron库~


        
          
!pip install 'git+https://github.com/facebookresearch/detectron2.git'  
!pip install torchkeras   

      

        
          
import numpy as np  
import os, json, cv2, random  
from PIL import Image   
  
import torch   
  
import detectron2  
from detectron2.config import get_cfg  
from detectron2.utils.visualizer import Visualizer  
from detectron2.data import MetadataCatalog, DatasetCatalog  
from detectron2 import model_zoo  
from detectron2.engine import DefaultPredictor  
  
#from detectron2.utils.logger import setup\_logger  
#setup\_logger()  
  
def cv2\_show(arr):  
    img = Image.fromarray(cv2.cvtColor(arr, cv2.COLOR_BGR2RGB))  
    return img   
  

      

0,预训练模型


        
          
from torchkeras import data   
#下载测试图片  
img = data.get_example_image('park.jpg')  
img.save('park.jpg')  

      

        
          
cfg = get_cfg()  
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask\_rcnn\_R\_50\_FPN\_3x.yaml"))  
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model  
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask\_rcnn\_R\_50\_FPN\_3x.yaml")  
predictor = DefaultPredictor(cfg)  
im = cv2.imread("park.jpg")  
outputs = predictor(im)  
  

      

        
          
print(outputs["instances"].pred_classes)  
print(outputs["instances"].pred_boxes)  

      

        
          
v = Visualizer(im[:, :, ::-1],   
            MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)  
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))  
cv2_show(out.get_image()[:, :, ::-1])   
  

      

picture.image

一,准备数据

detectron准备数据集,需要先注册。

如果是coco格式的数据,可以用以下方法快速注册:


        
          
from detectron2.data.datasets import register_coco_instances  
register_coco_instances("my\_dataset\_train",{},"json\_annotation\_train.json","path/to/image/dir")  
register_coco_instances("my\_dataset\_val", {}, "json\_annotation\_val.json","path/to/image/dir")  

      

非coco格式的数据,可以用以下步骤进行注册:

  • 1,先将数据集整理成字典组成的列表形式
  • 2,使用DatasetCatalog注册数据集

        
          
from detectron2.structures import BoxMode  
  
def get\_balloon\_dicts(img\_dir):  
    json_file = os.path.join(img_dir, "via\_region\_data.json")  
    with open(json_file) as f:  
        imgs_anns = json.load(f)  
  
    dataset_dicts = []  
    for idx, v in enumerate(imgs_anns.values()):  
        record = {}  
          
        filename = os.path.join(img_dir, v["filename"])  
        height, width = cv2.imread(filename).shape[:2]  
          
        record["file\_name"] = filename  
        record["image\_id"] = idx  
        record["height"] = height  
        record["width"] = width  
        
        annos = v["regions"]  
        objs = []  
        for _, anno in annos.items():  
            assert not anno["region\_attributes"]  
            anno = anno["shape\_attributes"]  
            px = anno["all\_points\_x"]  
            py = anno["all\_points\_y"]  
            poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]  
            poly = [p for x in poly for p in x]  
  
            obj = {  
                "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],  
                "bbox\_mode": BoxMode.XYXY_ABS,  
                "segmentation": [poly],  
                "category\_id": 0,  
            }  
            objs.append(obj)  
        record["annotations"] = objs  
        dataset_dicts.append(record)  
    return dataset_dicts  
  
  
  
try:  
    #DatasetCatalog.remove('balloon\_train')  
    #DatasetCatalog.remove('balloon\_val')  
      
    DatasetCatalog.register("balloon\_train", lambda : get_balloon_dicts("./data/balloon/train"))  
    MetadataCatalog.get("balloon\_train" ).set(thing_classes=["balloon"])  
      
    DatasetCatalog.register("balloon\_val", lambda : get_balloon_dicts("./data/balloon/val"))  
    MetadataCatalog.get("balloon\_val" ).set(thing_classes=["balloon"])  
      
except Exception as err:  
    pass   
      
balloon_metadata = MetadataCatalog.get("balloon\_train")  
  
  

      

我们来可视化一下数据,看看是否正确。


        
          
dicts_train = DatasetCatalog.get('balloon\_train') #get\_balloon\_dicts("./data/balloon/train")    
dicts_val = DatasetCatalog.get('balloon\_val') #get\_balloon\_dicts("./data/balloon/val")  
  

      

        
          
dic = dicts_train[3]  
img = cv2.imread(dic["file\_name"])  
visualizer = Visualizer(img[:, :, ::-1], metadata=balloon_metadata, scale=0.5)  
out = visualizer.draw_dataset_dict(dic)  
cv2_show(out.get_image()[:, :, ::-1])  
  

      

picture.image

二,定义模型

detectron2通过配置文件定义模型。可以查看 detectron2目录下的configs路径,有各种各样功能的模型配置文件可以使用。

包括:Detection(检测), InstanceSegmentation(实例分割), Keypoints(关键点检测), Panoptic(全景分割) 等各种类型


        
          
cfg = get_cfg()  
  
cfg.merge_from_file(model_zoo.get_config_file(  
    "COCO-InstanceSegmentation/mask\_rcnn\_R\_50\_FPN\_3x.yaml"))  
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(  
   "COCO-InstanceSegmentation/mask\_rcnn\_R\_50\_FPN\_3x.yaml")    
  
  
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # The "RoIHead batch size". 128 is faster, and good enough for this toy dataset (default: 512)  
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon).   
  
  
#model = detectron2.modeling.build\_model(cfg) # This way do not load pretrained weights  
  
predictor = DefaultPredictor(cfg)  
model = predictor.model   
  

      

三,训练模型

以下代码使用detectron2原生的DefaultTrainer进行训练,比较简单。

但是这个DefaultTrainer灵活性一般,当你想在训练循环中加入自己想要的功能时比较麻烦,并且日志输出不够直观。

此外也没有earlystopping,不能够保存验证集上最优的权重。


        
          
from detectron2.engine import DefaultTrainer  
  
cfg.DATASETS.TRAIN = ("balloon\_train",)  
cfg.DATASETS.TEST = ("balloon\_val",)  
cfg.DATALOADER.NUM_WORKERS = 2  
cfg.SOLVER.IMS_PER_BATCH = 4  
  
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)  
  
      
cfg.DATALOADER.NUM_WORKERS = 2  
cfg.SOLVER.IMS_PER_BATCH = 2  # This is the real "batch size" commonly known to deep learning people  
cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR  
cfg.SOLVER.MAX_ITER = 600      
cfg.SOLVER.STEPS = []        # do not decay learning rate  
  
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)  
  
trainer = DefaultTrainer(cfg)   
trainer.resume_or_load(resume=False)  
trainer.train()  

      

picture.image

下面使用我们的 梦中情炉 ~ torchkeras ~ 实现最优雅的训练循环~ 😋😋


        
          
ds_train = detectron2.data.DatasetFromList(dicts_train)  
ds_val = detectron2.data.DatasetFromList(dicts_val)  
  
mp = detectron2.data.DatasetMapper(cfg,is_train=True)  
  

      

        
          
batch_size = 16  
dl_train = detectron2.data.build_detection_train_loader(ds_train,  
        mapper=mp,total_batch_size=batch_size,num_workers=2)  
dl_train.size = len(ds_train)//batch_size   
  
dl_val = detectron2.data.build_detection_train_loader(ds_val,  
        mapper=mp,total_batch_size=1,num_workers=2)  
dl_val.size = len(dicts_val)  
  
  

      

        
          
for batch in dl_val:  
    break   

      

        
          
from torchkeras import KerasModel   
from tqdm import tqdm   
from detectron2.utils.events import EventStorage   
  
class StepRunner:  
    def \_\_init\_\_(self, net, loss\_fn, accelerator,   
                 stage = "train", metrics\_dict = None,   
                 optimizer = None, lr\_scheduler = None  
                 ):  
        self.net,self.loss_fn,self.metrics_dict,self.stage = net,loss_fn,metrics_dict,stage  
        self.optimizer,self.lr_scheduler = optimizer,lr_scheduler  
        self.accelerator = accelerator  
          
        if self.stage=='train':  
            self.net.train()   
        else:  
            self.net.train()   
      
    def \_\_call\_\_(self, batch):  
          
        #loss  
        with EventStorage() as event_storage:  
            loss_dict = self.net(batch)  
              
        loss = sum(loss_dict.values())  
          
        #backward()  
        if self.optimizer is not None and self.stage=="train":  
            self.accelerator.backward(loss)  
            self.optimizer.step()  
            if self.lr_scheduler is not None:  
                self.lr_scheduler.step()  
            self.optimizer.zero_grad()  
              
        all_loss = self.accelerator.gather(loss).sum()  
          
        #losses  
        step_losses = {self.stage+"\_loss":all_loss.item()}  
          
        #metrics  
        step_metrics = {}  
          
        if self.stage=="train":  
            if self.optimizer is not None:  
                step_metrics['lr'] = self.optimizer.state_dict()['param\_groups'][0]['lr']  
            else:  
                step_metrics['lr'] = 0.0  
        return step_losses,step_metrics  
      
class EpochRunner:  
    def \_\_init\_\_(self,steprunner,quiet=False):  
        self.steprunner = steprunner  
        self.stage = steprunner.stage  
        self.accelerator = self.steprunner.accelerator  
        self.quiet = quiet  
          
    def \_\_call\_\_(self,dataloader):  
          
        try:  
            n = len(dataloader)  
        except Exception as err:  
            n = dataloader.size   
        loop = tqdm(enumerate(dataloader,start=1),   
                    total =n,  
                    file=sys.stdout,  
                    disable=not self.accelerator.is_local_main_process or self.quiet,  
                    ncols = 100  
                   )  
        epoch_losses = {}  
        for step, batch in loop:   
            step_losses,step_metrics = self.steprunner(batch)     
            step_log = dict(step_losses,**step_metrics)  
            for k,v in step_losses.items():  
                epoch_losses[k] = epoch_losses.get(k,0.0)+v  
            if step<n:  
                loop.set_postfix(**step_log)  
            elif step==n:  
                epoch_metrics = step_metrics  
                epoch_metrics.update({self.stage+"\_"+name:metric_fn.compute().item()   
                                 for name,metric_fn in self.steprunner.metrics_dict.items()})  
                epoch_losses = {k:v/step for k,v in epoch_losses.items()}  
                epoch_log = dict(epoch_losses,**epoch_metrics)  
                loop.set_postfix(**epoch_log)  
                for name,metric_fn in self.steprunner.metrics_dict.items():  
                    metric_fn.reset()  
            else:  
                break   
        return epoch_log  
      
      
KerasModel.StepRunner = StepRunner   
KerasModel.EpochRunner = EpochRunner  
  

      

        
          
params = [p for p in model.parameters() if p.requires_grad]  
optimizer = torch.optim.AdamW(params, lr=1e-4)  
  
keras_model = KerasModel(model,  
                         loss_fn = None,  
                         metrics_dict=None,  
                         optimizer= optimizer  
                        )  
  

      

        
          
ckpt_path = 'checkpoint.pt'  
keras_model.fit(train_data=dl_train,val_data=dl_val,  
    epochs=30,patience=10,  
    monitor='val\_loss',  
    mode='min',  
    ckpt_path =ckpt_path,  
    plot=True  
)  

      

picture.image

picture.image

四,评估模型


        
          
from detectron2.evaluation import COCOEvaluator, inference_on_dataset  
from detectron2.data import build_detection_test_loader  
  
  
evaluator = COCOEvaluator("balloon\_val", output_dir="./output")  
dl_val = build_detection_test_loader(cfg, "balloon\_val")  
print(inference_on_dataset(model, dl_val, evaluator))  
  

      

五,使用模型


        
          
from detectron2.engine import DefaultPredictor  
  
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT\_DIR, "model\_final.pth")   
cfg.MODEL.WEIGHTS = ckpt_path  
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7   # set a custom testing threshold  
predictor = DefaultPredictor(cfg)  
  

      

        
          
from detectron2.utils.visualizer import ColorMode  
  
im = cv2.imread(dicts_val[10]['file\_name'])  

      

        
          
outputs = predictor(im)  # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format  
vis = Visualizer(im[:, :, ::-1],  
               metadata=balloon_metadata,   
               scale=0.5,   
               instance_mode=ColorMode.IMAGE_BW   # remove the colors of unsegmented pixels. This option is only available for segmentation models  
)  

      

        
          
out = vis.draw_instance_predictions(outputs["instances"].to("cpu"))  
cv2_show(out.get_image()[:, :, ::-1])  
  

      

picture.image

公众号 算法美食屋 后台回复关键词: torchkeras ,获取本文源代码和balloon数据集下载链接。

万水千山总是情,点个赞赞行不行?😋😋

0
0
0
0
关于作者
关于作者

文章

0

获赞

0

收藏

0

相关资源
字节跳动大数据容器化构建与落地实践
随着字节跳动旗下业务的快速发展,数据急剧膨胀,原有的大数据架构在面临日趋复杂的业务需求时逐渐显现疲态。而伴随着大数据架构向云原生演进的行业趋势,字节跳动也对大数据体系进行了云原生改造。本次分享将详细介绍字节跳动大数据容器化的演进与实践。
相关产品
评论
未登录
看完啦,登录分享一下感受吧~
暂无评论