将 Detectron2 模型转换为 torchscript

问题描述 投票:0回答:2

我想将 detectorron2 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml 模型' 转换为 torchscript。 我用过托克 我的代码如下。

import cv2

import numpy as np

import torch
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.modeling import build_model
from detectron2.export.flatten import TracingAdapter
import os

ModelPath='/home/jayasanka/working_files/create_torchsript/model.pt'
with open('savepic.npy', 'rb') as f:
    image = np.load(f)

#-------------------------------------------------------------------------------------

cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))

cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # your number of classes + 1

cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, ModelPath)

cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.60  # set the testing threshold for this model

predictor = DefaultPredictor(cfg)


我使用了 TracingAdapter 和跟踪函数。我不太了解其背后的概念是什么。

# im = cv2.imread(image)
im = torch.tensor(image)

def inference_func(model, image):
    inputs= [{"image": image}]
    return model.inference(inputs, do_postprocess=False)[0]

wrapper= TracingAdapter(predictor, im, inference_func)
wrapper.eval()
traced_script_module= torch.jit.trace(wrapper, (im,))
traced_script_module.save("torchscript.pt")

它给出了下面给出的错误。

Traceback (most recent call last):
  File "script.py", line 49, in <module>
    traced_script_module= torch.jit.trace(wrapper, (im,))
  File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/torch/jit/_trace.py", line 744, in trace
    _module_class,
  File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/torch/jit/_trace.py", line 959, in trace_module
    argument_names,
  File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1039, in _slow_forward
    result = self.forward(*input, **kwargs)
  File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/detectron2/export/flatten.py", line 294, in forward
    outputs = self.inference_func(self.model, *inputs_orig_format)
  File "script.py", line 44, in inference_func
    return model.inference(inputs, do_postprocess=False)[0]
  File "/home/jayasanka/anaconda3/envs/vha/lib/python3.7/site-packages/yacs/config.py", line 141, in __getattr__
    raise AttributeError(name)
AttributeError: inference

你能帮我解决这个问题吗? 还有其他方法可以轻松做到这一点吗?

python machine-learning detectron torchscript
2个回答
0
投票

更改为

def inference(model, inputs):
    # use do_postprocess=False so it returns ROI mask
    inst = model.inference(inputs, do_postprocess=False)[0]
    return [{"instances": inst}]

isinstance(image, np.ndarray) == True
image_tensor = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
wrapper= TracingAdapter(predictor, inputs=[{"image": image_tensor}], inference_func=inference)

0
投票

我面临着类似的问题,因此在这里发布我的解决方案以供未来的受众使用。如果你仔细观察你的 inference_func()

# im = cv2.imread(image)
im = torch.tensor(image)

def inference_func(model, image):
    inputs= [{"image": image}]
    return model.inference(inputs, do_postprocess=False)[0]

wrapper= TracingAdapter(predictor, im, inference_func)
wrapper.eval()
traced_script_module= torch.jit.trace(wrapper, (im,))
traced_script_module.save("torchscript.pt")

函数返回 model.inference(),并且在 TracingAdapter(predictor, im, inference_func) 中,您传递的是预测器而不是模型。因此,请进行以下更改:

  1. 导入检测检查点

    从 detectorron2.checkpoint 导入DetectionCheckpointer

  2. 使用buildmodel构建模型并在DetectionCheckpointer的帮助下加载权重

  3. 现在终于在 TracingAdapter 中将 predictor 替换为 model 变量。

完整代码如下:

import cv2
import numpy as np
import torch
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.modeling import build_model
from detectron2.export.flatten import TracingAdapter
import os
from detectron2.checkpoint import DetectionCheckpointer

ModelPath='/home/jayasanka/working_files/create_torchsript/model.pt'
with open('savepic.npy', 'rb') as f:
    image = np.load(f)

#-------------------------------------------------------------------------------------

cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))

cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # your number of classes + 1

cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, ModelPath)

cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.60  # set the testing threshold for this model

# predictor = DefaultPredictor(cfg)
model = build_model(cfg)
DetectionCheckpointer(original_model).load(cfg.MODEL.WEIGHTS)

im = torch.tensor(image)

def inference_func(model, image):
    inputs= [{"image": image}]
    return model.inference(inputs, do_postprocess=False)[0]

wrapper= TracingAdapter(model, im, inference_func)
wrapper.eval()
traced_script_module= torch.jit.trace(wrapper, (im,))
traced_script_module.save("torchscript.pt")
© www.soinside.com 2019 - 2024. All rights reserved.