我是靠谱客的博主 无聊冬瓜,这篇文章主要介绍YOLOV7算法(五)pth/pt转onnx学习记录,现在分享给大家,希望可以做个参考。

输入指令

复制代码
1
python export.py --weights /kaxier01/projects/FAS/yolov7/weights/yolov7.pt --grid --end2end --simplify --topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640 --max-wh 640

export.py代码学习

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
import argparse import sys import time import warnings sys.path.append('./') # to run '$ python *.py' files in subdirectories import torch import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile import models from models.experimental import attempt_load, End2End from utils.activations import Hardswish, SiLU from utils.general import set_logging, check_img_size from utils.torch_utils import select_device from utils.add_nms import RegisterNMS import sys import warnings warnings.filterwarnings('ignore') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='/kaxier01/projects/FAS/yolov7/weights/yolov7.pt', help='weights path') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') parser.add_argument('--dynamic-batch', action='store_true', help='dynamic batch onnx for tensorrt and onnx-runtime') parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') parser.add_argument('--end2end', action='store_true', help='export end2end onnx') parser.add_argument('--max-wh', type=int, default=None, help='None for tensorrt nms, int value for onnx-runtime nms') parser.add_argument('--topk-all', type=int, default=100, help='topk objects for every images') parser.add_argument('--iou-thres', type=float, default=0.45, help='iou threshold for NMS') parser.add_argument('--conf-thres', type=float, default=0.25, help='conf threshold for NMS') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--simplify', action='store_true', help='simplify onnx model') parser.add_argument('--include-nms', action='store_true', help='export end2end onnx') parser.add_argument('--fp16', action='store_true', help='CoreML FP16 half-precision export') parser.add_argument('--int8', action='store_true', help='CoreML INT8 quantization') opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # opt.img_size=[640, 640] opt.dynamic = opt.dynamic and not opt.end2end # False opt.dynamic = False if opt.dynamic_batch else opt.dynamic # False print(opt) set_logging() t = time.time() # Load PyTorch model device = select_device(opt.device) # device='cpu' model = attempt_load(opt.weights, map_location=device) # load FP32 model labels = model.names ''' labels= ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] ''' # Checks gs = int(max(model.stride)) # grid size (max stride), gs=32 opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples, opt.img_size=[640, 640] # Input img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection, img.shape=torch.Size([1, 3, 640, 640]) # Update model for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations if isinstance(m.act, nn.Hardswish): m.act = Hardswish() elif isinstance(m.act, nn.SiLU): m.act = SiLU() model.model[-1].export = not opt.grid # set Detect() layer grid export, model.model[-1].export=False y = model(img) # dry run if opt.include_nms: model.model[-1].include_nms = True y = None # TorchScript export try: print('nStarting TorchScript export with torch %s...' % torch.__version__) f = opt.weights.replace('.pt', '.torchscript.pt') # f='/kaxier01/projects/FAS/yolov7/weights/yolov7.torchscript.pt' ts = torch.jit.trace(model, img, strict=False) ts.save(f) # .torchscript.pt模型可以不依赖于python而直接在c++等环境中运行 print('TorchScript export success, saved as %s' % f) except Exception as e: print('TorchScript export failure: %s' % e) # CoreML export try: import coremltools as ct print('nStarting CoreML export with coremltools %s...' % ct.__version__) # convert model from torchscript and apply pixel scaling as per detect.py ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) bits, mode = (8, 'kmeans_lut') if opt.int8 else (16, 'linear') if opt.fp16 else (32, None) if bits < 32: if sys.platform.lower() == 'darwin': # quantization only supported on macOS with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) else: print('quantization only supported on macOS, skipping...') f = opt.weights.replace('.pt', '.mlmodel') # f='/kaxier01/projects/FAS/yolov7/weights/yolov7.mlmodel' ct_model.save(f) # .mlmodel可部署到IOS端 print('CoreML export success, saved as %s' % f) except Exception as e: print('CoreML export failure: %s' % e) # TorchScript-Lite export try: print('nStarting TorchScript-Lite export with torch %s...' % torch.__version__) f = opt.weights.replace('.pt', '.torchscript.ptl') # f='/kaxier01/projects/FAS/yolov7/weights/yolov7.torchscript.ptl' tsl = torch.jit.trace(model, img, strict=False) tsl = optimize_for_mobile(tsl) tsl._save_for_lite_interpreter(f) # .torchscript.ptl模型可部署到Android端 print('TorchScript-Lite export success, saved as %s' % f) except Exception as e: print('TorchScript-Lite export failure: %s' % e) # ONNX export try: import onnx print('nStarting ONNX export with onnx %s...' % onnx.__version__) f = opt.weights.replace('.pt', '.onnx') # f='/kaxier01/projects/FAS/yolov7/weights/yolov7.onnx' model.eval() output_names = ['classes', 'boxes'] if y is None else ['output'] # output_names=['output'] dynamic_axes = None if opt.dynamic: dynamic_axes = {'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic_batch: opt.batch_size = 'batch' dynamic_axes = { 'images': { 0: 'batch', }, } if opt.end2end and opt.max_wh is None: output_axes = { 'num_dets': {0: 'batch'}, 'det_boxes': {0: 'batch'}, 'det_scores': {0: 'batch'}, 'det_classes': {0: 'batch'}, } else: output_axes = { 'output': {0: 'batch'}, } dynamic_axes.update(output_axes) if opt.grid: if opt.end2end: print('nStarting export end2end onnx model for %s...' % 'TensorRT' if opt.max_wh is None else 'onnxruntime') model = End2End(model,opt.topk_all,opt.iou_thres,opt.conf_thres,opt.max_wh,device,len(labels)) if opt.end2end and opt.max_wh is None: output_names = ['num_dets', 'det_boxes', 'det_scores', 'det_classes'] shapes = [opt.batch_size, 1, opt.batch_size, opt.topk_all, 4, opt.batch_size, opt.topk_all, opt.batch_size, opt.topk_all] else: output_names = ['output'] else: model.model[-1].concat = True torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], output_names=output_names, dynamic_axes=dynamic_axes) # Checks onnx_model = onnx.load(f) # load onnx model onnx.checker.check_model(onnx_model) # check onnx model if opt.end2end and opt.max_wh is None: for i in onnx_model.graph.output: for j in i.type.tensor_type.shape.dim: j.dim_param = str(shapes.pop(0)) # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model if opt.simplify: try: import onnxsim print('nStarting to simplify ONNX...') onnx_model, check = onnxsim.simplify(onnx_model) # 简化模型 assert check, 'assert check failed' except Exception as e: print(f'Simplifier failure: {e}') # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model onnx.save(onnx_model,f) print('ONNX export success, saved as %s' % f) if opt.include_nms: print('Registering NMS plugin for ONNX...') mo = RegisterNMS(f) mo.register_nms() mo.save(f) except Exception as e: print('ONNX export failure: %s' % e) # Finish print('nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))

如果遇到

复制代码
1
CoreML export failure: Core ML only supports tensors with rank <= 5. Layer "model.105.anchor_grid", with type "const", outputs a rank 6 tensor.

则把输入指令改成

复制代码
1
python export.py --weights /kaxier01/projects/FAS/yolov7/weights/yolov7.pt --end2end --simplify --topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640 --max-wh 640

yolov7.onnx网络结构图

最后

以上就是无聊冬瓜最近收集整理的关于YOLOV7算法(五)pth/pt转onnx学习记录的全部内容,更多相关YOLOV7算法(五)pth/pt转onnx学习记录内容请搜索靠谱客的其他文章。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(60)

评论列表共有 0 条评论

立即
投稿
返回
顶部