什么是tensorrt?
其他廠商: Qualcomm, Hailo, google TPU
tensorrt的優劣勢
使用tensorrt的pipeline
tensorrt使用中存在的問題以及解決方案
tensorrt的應用場景
自動駕駛模型部署需要關注的問題:
邊端硬件資源有限
散熱(不能水冷)
實時性:15/30fps 局部提高分辨率,CPU與GPU異步
電力消耗:小于100w
tensorrt的模塊
優化策略——層融合
優化策略——kernel auto-tuning
在什么硬件上做部署,就要在相應硬件上做性能分析和優化。
優化策略——量化
計算機中浮點數的表示:
混合精度訓練:在Training的時候為了加快速度,也在使用量化的技巧。這個方法被稱作Mixed precision training(混合精度學習),Baidu和NVIDIA發表在ICLR2018。
導出onnx,分析onnx
PyTorch導出ONNX的方法,以及onnxsimplify的使用
查看onnx
netron xxx.onnx --host 00.00.00.00 --post 22
導出onnx
舉例為主
必要的安裝包
torch.onnx.export
import torch
import torch.nn as nn
import torch.onnxclass Model(torch.nn.Module):def __init__(self, in_features, out_features, weights1, weights2, bias=False):super().__init__()self.linear1 = nn.Linear(in_features, out_features, bias)self.linear2 = nn.Linear(in_features, out_features, bias)with torch.no_grad():self.linear1.weight.copy_(weights1)self.linear2.weight.copy_(weights2)def forward(self, x):x1 = self.linear1(x)x2 = self.linear2(x)return x1, x2def infer():in_features = torch.tensor([1, 2, 3, 4], dtype=torch.float32)weights1 = torch.tensor([[1, 2, 3, 4],[2, 3, 4, 5],[3, 4, 5, 6]],dtype=torch.float32)weights2 = torch.tensor([[2, 3, 4, 5],[3, 4, 5, 6],[4, 5, 6, 7]],dtype=torch.float32)model = Model(4, 3, weights1, weights2)x1, x2 = model(in_features)print("result is: \n")print(x1)print(x2)def export_onnx():input = torch.zeros(1, 1, 1, 4)weights1 = torch.tensor([[1, 2, 3, 4],[2, 3, 4, 5],[3, 4, 5, 6]],dtype=torch.float32)weights2 = torch.tensor([[2, 3, 4, 5],[3, 4, 5, 6],[4, 5, 6, 7]],dtype=torch.float32)model = Model(4, 3, weights1, weights2)model.eval() #添加eval防止權重繼續更新# pytorch導出onnx的方式,參數有很多,也可以支持動態sizetorch.onnx.export(model = model, args = (input,),f = "../models/example_two_head.onnx",input_names = ["input0"],output_names = ["output0", "output1"],opset_version = 12)print("Finished onnx export")if __name__ == "__main__":infer()export_onnx()
netron可視化onnx如下:
動態shape
import torch
import torch.nn as nn
import torch.onnxclass Model(torch.nn.Module):def __init__(self, in_features, out_features, weights, bias=False):super().__init__()self.linear = nn.Linear(in_features, out_features, bias)with torch.no_grad():self.linear.weight.copy_(weights)def forward(self, x):x = self.linear(x)return xdef infer():in_features = torch.tensor([1, 2, 3, 4], dtype=torch.float32)weights = torch.tensor([[1, 2, 3, 4],[2, 3, 4, 5],[3, 4, 5, 6]],dtype=torch.float32)model = Model(4, 3, weights)x = model(in_features)print("result of {1, 1, 1 ,4} is ", x.data)def export_onnx():input = torch.zeros(1, 1, 1, 4)weights = torch.tensor([[1, 2, 3, 4],[2, 3, 4, 5],[3, 4, 5, 6]],dtype=torch.float32)model = Model(4, 3, weights)model.eval() #添加eval防止權重繼續更新# pytorch導出onnx的方式,參數有很多,也可以支持動態sizetorch.onnx.export(model = model, args = (input,),f = "../models/example_dynamic_shape.onnx",input_names = ["input0"],output_names = ["output0"],# 允許輸入輸出的形狀不定,batch可以替換成其他名字dynamic_axes = {'input0': {0: 'batch'},'output0': {0: 'batch'}},opset_version = 12)print("Finished onnx export")if __name__ == "__main__":infer()export_onnx()
netron可視化如下:
onnxsim
import torch
import torch.nn as nn
import torch.onnx
import onnxsim
import onnxclass Model(torch.nn.Module):def __init__(self):super().__init__()self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, padding=1)self.bn1 = nn.BatchNorm2d(num_features=16)self.act1 = nn.ReLU()self.conv2 = nn.Conv2d(in_channels=16, out_channels=64, kernel_size=5, padding=2)self.bn2 = nn.BatchNorm2d(num_features=64)self.act2 = nn.ReLU()self.avgpool = nn.AdaptiveAvgPool1d(1)self.head = nn.Linear(in_features=64, out_features=10)def forward(self, x):x = self.conv1(x)x = self.bn1(x)x = self.act1(x)x = self.conv2(x)x = self.bn2(x)x = self.act2(x) x = torch.flatten(x, 2, 3) # B, C, H, W -> B, C, L (這一個過程產生了shape->slice->concat->reshape這一系列計算節點, 思考為什么)# b, c, w, h = x.shape# x = x.reshape(b, c, w * h)# x = x.view(b, c, -1)x = self.avgpool(x) # B, C, L -> B, C, 1x = torch.flatten(x, 1) # B, C, 1 -> B, Cx = self.head(x) # B, L -> B, 10return xdef export_norm_onnx():input = torch.rand(1, 3, 64, 64)model = Model()file = "../models/sample-reshape.onnx"torch.onnx.export(model = model, args = (input,),f = file,input_names = ["input0"],output_names = ["output0"],opset_version = 15)print("Finished normal onnx export")model_onnx = onnx.load(file)# 檢查導入的onnx modelonnx.checker.check_model(model_onnx)# 使用onnx-simplifier來進行onnx的簡化。# 可以試試把這個簡化給注釋掉,看看flatten操作在簡化前后的區別# onnx中其實會有一些constant value,以及不需要計算圖跟蹤的節點# 大家可以一起從netron中看看這些節點都在干什么print(f"Simplifying with onnx-simplifier {onnxsim.__version__}...")model_onnx, check = onnxsim.simplify(model_onnx)assert check, "assert check failed"onnx.save(model_onnx, file)if __name__ == "__main__":export_norm_onnx()
簡化前:
pytorch模型中使用的是flatten算子,而導出的onnx會變復雜的原因:B H W的值不執行到這里是不知道的,導出onnx的時候會做每一步計算的追蹤,算出所有計算節點,做一個計算圖?
簡化后:
torchvision
有許多現成的模型,可以直接調用
import torch
import torchvision
import onnxsim
import onnx
import argparsedef get_model(type, dir):if type == "resnet":model = torchvision.models.resnet50()file = dir + "resnet50.onnx"elif type == "vgg":model = torchvision.models.vgg11()file = dir + "vgg11.onnx"elif type == "mobilenet":model = torchvision.models.mobilenet_v3_small()file = dir + "mobilenetV3.onnx"elif type == "efficientnet":model = torchvision.models.efficientnet_b0()file = dir + "efficientnetb0.onnx"elif type == "efficientnetv2":model = torchvision.models.efficientnet_v2_s()file = dir + "efficientnetV2.onnx"elif type == "regnet":model = torchvision.models.regnet_x_1_6gf()file = dir + "regnet1.6gf.onnx"return model, filedef export_norm_onnx(model, file, input):model.cuda()torch.onnx.export(model = model, args = (input,),f = file,input_names = ["input0"],output_names = ["output0"],opset_version = 15)print("Finished normal onnx export")model_onnx = onnx.load(file)# 檢查導入的onnx modelonnx.checker.check_model(model_onnx)# 使用onnx-simplifier來進行onnx的簡化。print(f"Simplifying with onnx-simplifier {onnxsim.__version__}...")model_onnx, check = onnxsim.simplify(model_onnx)assert check, "assert check failed"onnx.save(model_onnx, file)def main(args):type = args.typedir = args.dirinput = torch.rand(1, 3, 224, 224, device='cuda')model, file = get_model(type, dir)export_norm_onnx(model, file, input)if __name__ == "__main__":parser = argparse.ArgumentParser()parser.add_argument("-t", "--type", type=str, default="resnet")parser.add_argument("-d", "--dir", type=str, default="../models/")opt = parser.parse_args()main(opt)
剖析ONNX架構并理解ProtoBuf
ONNX的Proto架構,使用onnx.helper創建onnx、修改onnx
onnx是什么?
opset與版本對應情況:官網鏈接
實踐發現導出onnx的時候,opset版本的指定與torch版本也有關。
使用onnx.helper創建onnx
import onnx
from onnx import helper
from onnx import TensorProto# 理解onnx中的組織結構
# - ModelProto (描述的是整個模型的信息)
# --- GraphProto (描述的是整個網絡的信息)
# ------ NodeProto (描述的是各個計算節點,比如conv, linear)
# ------ TensorProto (描述的是tensor的信息,主要包括權重)
# ------ ValueInfoProto (描述的是input/output信息)
# ------ AttributeProto (描述的是node節點的各種屬性信息)def create_onnx():# 創建ValueProtoa = helper.make_tensor_value_info('a', TensorProto.FLOAT, [10, 10])x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 10])b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [10, 10])y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10, 10])# 創建NodeProtomul = helper.make_node('Mul', ['a', 'x'], 'c', "multiply")add = helper.make_node('Add', ['c', 'b'], 'y', "add")# 構建GraphProtograph = helper.make_graph([mul, add], 'sample-linear', [a, x, b], [y])# 構建ModelProtomodel = helper.make_model(graph)# 檢查model是否有錯誤onnx.checker.check_model(model)# print(model)# 保存modelonnx.save(model, "../models/sample-linear.onnx")return modelif __name__ == "__main__":model = create_onnx()
使用onnx.helper解析onnx
import onnx
import numpy as np# 注意,因為weight是以字節的形式存儲的,所以要想讀,需要轉變為float類型
def read_weight(initializer: onnx.TensorProto):shape = initializer.dimsdata = np.frombuffer(initializer.raw_data, dtype=np.float32).reshape(shape)print("\n**************parse weight data******************")print("initializer info: \\n\tname: {} \\n\tdata: \n{}".format(initializer.name, data))def parse_onnx(model: onnx.ModelProto):graph = model.graphinitializers = graph.initializernodes = graph.nodeinputs = graph.inputoutputs = graph.outputprint("\n**************parse input/output*****************")for input in inputs:input_shape = []for d in input.type.tensor_type.shape.dim:if d.dim_value == 0:input_shape.append(None)else:input_shape.append(d.dim_value)print("Input info: \\n\tname: {} \\n\tdata Type: {} \\n\tshape: {}".format(input.name, input.type.tensor_type.elem_type, input_shape))for output in outputs:output_shape = []for d in output.type.tensor_type.shape.dim:if d.dim_value == 0:output_shape.append(None)else:output_shape.append(d.dim_value)print("Output info: \\n\tname: {} \\n\tdata Type: {} \\n\tshape: {}".format(input.name, output.type.tensor_type.elem_type, input_shape))print("\n**************parse node************************")for node in nodes:print("node info: \\n\tname: {} \\n\top_type: {} \\n\tinputs: {} \\n\toutputs: {}".format(node.name, node.op_type, node.input, node.output))print("\n**************parse initializer*****************")for initializer in initializers:print("initializer info: \\n\tname: {} \\n\tdata_type: {} \\n\tshape: {}".format(initializer.name, initializer.data_type, initializer.dims))
onnx注冊算子
pytorch導出onnx不成功的時候如何解決(without plugin篇)
onnx導出不成功解題思路
接下來列舉第三條:在pytorch注冊onnx中某些算子的示例
問題1:onnx的opset中已經支持相應算子,但是導出onnx的時候出現算子不支持
查看當前算子注冊情況:
參考路徑:/home/user/miniconda3/envs/trt-starter/lib/python3.9/site-packages/torch/onnx
每個人的環境不盡相同,可以按照類似的路徑去查找。
發現在onnx的operators.md中已經支持了asinh算子,但是pytorch2onnx中沒有建立起橋梁。
算子注冊方法
方法一:
import torch
import torch.onnx
import onnxruntime
from torch.onnx import register_custom_op_symbolic# 創建一個asinh算子的symblic,符號函數,用來登記
# 符號函數內部調用g.op, 為onnx計算圖添加Asinh算子
# g: 就是graph,計算圖
# 也就是說,在計算圖中添加onnx算子
# 由于我們已經知道Asinh在onnx是有實現的,所以我們只要在g.op調用這個op的名字就好了
# symblic的參數需要與Pytorch的asinh接口函數的參數對齊
# def asinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def asinh_symbolic(g, input, *, out=None):return g.op("Asinh", input)# 在這里,將asinh_symbolic這個符號函數,與PyTorch的asinh算子綁定。也就是所謂的“注冊算子”
# asinh是在名為aten的一個c++命名空間下進行實現的# 那么aten是什么呢?
# aten是"a Tensor Library"的縮寫,是一個實現張量運算的C++庫
register_custom_op_symbolic('aten::asinh', asinh_symbolic, 12)# 這里容易混淆的地方:
# 1. register_op中的第一個參數是PyTorch中的算子名字: aten::asinh
# 2. g.op中的第一個參數是onnx中的算子名字: Asinhclass Model(torch.nn.Module):def __init__(self):super().__init__()def forward(self, x):x = torch.asinh(x)return xdef validate_onnx():input = torch.rand(1, 5)# PyTorch的推理model = Model()x = model(input)print("result from Pytorch is :", x)# onnxruntime的推理sess = onnxruntime.InferenceSession('../models/sample-asinh.onnx')x = sess.run(None, {'input0': input.numpy()})print("result from onnx is: ", x)def export_norm_onnx():input = torch.rand(1, 5)model = Model()model.eval()file = "../models/sample-asinh.onnx"torch.onnx.export(model = model, args = (input,),f = file,input_names = ["input0"],output_names = ["output0"],opset_version = 12)print("Finished normal onnx export")if __name__ == "__main__":export_norm_onnx()# 自定義完onnx以后必須要進行一下驗證validate_onnx()
方法二:
import torch
import torch.onnx
import onnxruntime
import functools
from torch.onnx import register_custom_op_symbolic
from torch.onnx._internal import registration_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=9)# 另外一個寫法
# 這個是類似于torch/onnx/symbolic_opset*.py中的寫法
# 通過torch._internal中的registration來注冊這個算子,讓這個算子可以與底層C++實現的aten::asinh綁定
# 一般如果這么寫的話,其實可以把這個算子直接加入到torch/onnx/symbolic_opset*.py中
@_onnx_symbolic('aten::asinh')
def asinh_symbolic(g, input, *, out=None):return g.op("Asinh", input)class Model(torch.nn.Module):def __init__(self):super().__init__()def forward(self, x):x = torch.asinh(x)return xdef validate_onnx():input = torch.rand(1, 5)# PyTorch的推理model = Model()x = model(input)print("result from Pytorch is :", x)# onnxruntime的推理sess = onnxruntime.InferenceSession('../models/sample-asinh2.onnx')x = sess.run(None, {'input0': input.numpy()})print("result from onnx is: ", x)def export_norm_onnx():input = torch.rand(1, 5)model = Model()model.eval()file = "../models/sample-asinh2.onnx"torch.onnx.export(model = model, args = (input,),f = file,input_names = ["input0"],output_names = ["output0"],opset_version = 12)print("Finished normal onnx export")if __name__ == "__main__":export_norm_onnx()# 自定義完onnx以后必須要進行一下驗證validate_onnx()
問題2:onnx中不支持相關算子
以torchvision.ops.DeformConv2d為例
算子注冊
import torch
import torch.nn as nn
import torchvision
import torch.onnx
import onnxruntime
from torch.onnx import register_custom_op_symbolic
from torch.onnx.symbolic_helper import parse_args# 注意
# 這里需要把args的各個參數的類型都指定
# 這里還沒有實現底層對deform_conv2d的實現
# 具體dcn的底層實現是在c++完成的,這里會在后面的TensorRT plugin中回到這里繼續講這個案例
# 這里先知道對于不支持的算子,onnx如何導出即可
@parse_args("v", "v", "v", "v", "v", "i", "i", "i", "i", "i","i", "i", "i", "none")
def dcn_symbolic(g,input,weight,offset,mask,bias,stride_h, stride_w,pad_h, pad_w,dil_h, dil_w,n_weight_grps,n_offset_grps,use_mask):return g.op("custom::deform_conv2d", input, offset)register_custom_op_symbolic("torchvision::deform_conv2d", dcn_symbolic, 12)class Model(torch.nn.Module):def __init__(self):super().__init__()self.conv1 = nn.Conv2d(3, 18, 3)self.conv2 = torchvision.ops.DeformConv2d(3, 3, 3)def forward(self, x):x = self.conv2(x, self.conv1(x))return xdef validate_onnx():input = torch.rand(1, 3, 5, 5)# PyTorch的推理model = Model()x = model(input)print("result from Pytorch is :", x)# onnxruntime的推理sess = onnxruntime.InferenceSession('../models/sample-deformable-conv.onnx')x = sess.run(None, {'input0': input.numpy()})print("result from onnx is: ", x)def infer():input = torch.rand(1, 3, 5, 5)model = Model()x = model(input)print("input is: ", input.data)print("result is: ", x.data)def export_norm_onnx():input = torch.rand(1, 3, 5, 5)model = Model()model.eval()file = "../models/sample-deformable-conv.onnx"torch.onnx.export(model = model, args = (input,),f = file,input_names = ["input0"],output_names = ["output0"],opset_version = 12)print("Finished normal onnx export")if __name__ == "__main__":# infer()export_norm_onnx()validate_onnx()
上述只是注冊了相關的算子,并沒有提供相關C++實現,只能實現onnx的順利導出,但是不能使用onnxruntime運行。
算子c++實現(待補充)
onnx-graph-surgeon
使用onnx-surgeon,比較與onnx.helper的區別,學習快速修改onnx以及替換算子/創建算子的技巧
安裝
python3 -m pip install onnx_graphsurgeon --index-url https://pypi.ngc.nvidia.com
onnx-surgeon 與onnx.helper的區別
創建onnx方面的區別
與polygraphy結合使用
保存子圖
import onnx_graphsurgeon as gs
import numpy as np
import onnxdef load_model(model : onnx.ModelProto):graph = gs.import_onnx(model)print(graph.inputs)print(graph.outputs)def main() -> None:model = onnx.load("../models/swin-tiny.onnx")graph = gs.import_onnx(model)tensors = graph.tensors()# LayerNorm部分 print(tensors["374"]) # LN的input1: 1 x 3136 x 128print(tensors["375"]) # LN的input2: 1 x 3136 x 1print(tensors["383"]) # LN的輸出: 1 x 3136 x 128graph.inputs = [tensors["374"].to_variable(dtype=np.float32, shape=(1, 3136, 128))]graph.outputs = [tensors["383"].to_variable(dtype=np.float32, shape=(1, 3136, 128))]graph.cleanup()onnx.save(gs.export_onnx(graph), "../models/swin-subgraph-LN.onnx")# MHSA部分graph = gs.import_onnx(model)tensors = graph.tensors()print(tensors["457"]) # MHSA輸入matmul: 64 x 49 x 128print(tensors["5509"]) # MHSA輸入matmul的權重: 128 x 384print(tensors["5518"]) # MHSA輸出matmul的權重: 128 x 128print(tensors["512"]) # MHSA輸出: 64 x 49 x 128graph.inputs = [tensors["457"].to_variable(dtype=np.float32, shape=(64, 49, 128))]graph.outputs = [tensors["512"].to_variable(dtype=np.float32, shape=(64, 49, 128))]graph.cleanup()onnx.save(gs.export_onnx(graph), "../models/swin-subgraph-MSHA.onnx")# 我們想把swin中LayerNorm中的這一部分單獨拿出來
if __name__ == "__main__":main()
結合polygraphy進行分析(待補充)
與TensorRT plugin結合使用
onnx算子融合/替換
import onnx_graphsurgeon as gs
import numpy as np
import onnx
import onnxruntime
import torch#####################在graph注冊調用的函數########################
@gs.Graph.register()
def min(self, *args):return self.layer(op="Min", inputs=args, outputs=["min_output"])@gs.Graph.register()
def max(self, *args):return self.layer(op="Max", inputs=args, outputs=["max_output"])@gs.Graph.register()
def identity(self, a):return self.layer(op="Identity", inputs=[a], outputs=["identity_output"])@gs.Graph.register()
def clip(self, inputs, outputs):return self.layer(op="Clip", inputs=inputs, outputs=outputs)#####################通過注冊的函數進行創建網絡########################
# input (5, 5)
# |
# identity
# |
# min
# |
# max
# |
# identity
# |
# output (5, 5)
def create_onnx_graph():# 初始化網絡的opsetgraph = gs.Graph(opset=12)# 初始化網絡需要用的參數min_val = np.array(0, dtype=np.float32)max_val = np.array(1, dtype=np.float32)input0 = gs.Variable(name="input0", dtype=np.float32, shape=(5, 5))# 設計網絡架構identity0 = graph.identity(input0)min0 = graph.min(*identity0, max_val)max0 = graph.max(*min0, min_val)output0 = graph.identity(*max0)# 設置網絡的輸入輸出graph.inputs = [input0]graph.outputs = output0# 設置網絡的輸出的數據類型for out in graph.outputs:out.dtype = np.float32# 保存模型onnx.save(gs.export_onnx(graph), "../models/sample-minmax.onnx")#####################通過注冊的clip算子替換網絡節點####################
# input (5, 5)
# |
# identity
# |
# clip
# |
# identity
# |
# output (5, 5)
def change_onnx_graph():graph = gs.import_onnx(onnx.load_model('../models/sample-minmax.onnx'))tensors = graph.tensors()inputs = [tensors["identity_output_0"], tensors["onnx_graphsurgeon_constant_5"],tensors["onnx_graphsurgeon_constant_2"]]outputs = [tensors["max_output_6"]]# 因為要替換子網,所以需要把子網和周圍的所有節點都斷開聯系for item in inputs:# print(item.outputs)item.outputs.clear()for item in outputs:# print(item.inputs)item.inputs.clear()# 通過注冊的clip,重新把斷開的聯系鏈接起來graph.clip(inputs, outputs)# 刪除所有額外的節點graph.cleanup()onnx.save(gs.export_onnx(graph), "../models/sample-minmax-to-clip.onnx")#####################驗證模型##########################################
def validate_onnx_graph(input, path):sess = onnxruntime.InferenceSession(path)output = sess.run(None, {'input0': input.numpy()})print("input is \n", input)print("output is \n", output)def main() -> None:input = torch.Tensor(5, 5).uniform_(-1, 1)# 創建一個minmax的網絡create_onnx_graph()# 通過onnxruntime確認導出onnx是否正確生成print("\nBefore modification:")validate_onnx_graph(input, "../models/sample-minmax.onnx")# 將minmax網絡修改成clip網絡change_onnx_graph()# 確認網絡修改的結構是否正確print("\nAfter modification:")validate_onnx_graph(input, "../models/sample-minmax-to-clip.onnx")if __name__ == "__main__":main()
編寫TensorRT plugin(待補充)
快速分析開源代碼并導出onnx
以Swin Transformer為例學習快速導出onnx并分析onnx的方法
常規導出pipeline
swin_transformer的github源代碼下載,按照readme配置環境,model build方式查找
編寫基礎導出onnx代碼:
問題一:opset9中roll算子不兼容
運行腳本報錯roll算子不支持,升級opset12繼續嘗試
問題二:opset12中roll算子不兼容
運行腳本報錯roll算子不支持,考慮替換pytorch中的roll為其他算子,但是可以實現相同的功能;考慮注冊onnx算子,此處嘗試后者:
修改symbolic_opset12.py,添加roll算子實現;
此目錄也可以看出當前環境的torch對opset版本的支持情況。
問題三:onnxsim對onnx的LN算子簡化效果不佳
注冊并實現LN算子:
查看更高版本opset與tensorrt對LN的支持情況:
運行腳本導出發現opset17不支持。
問題四:opset17與torch版本不兼容
升級torch以及cuda toolkit等環境依賴的版本,使得torch支持opset17
導出tensorrt
trtexec
什么是trtexec
build
trtexec官網說明
傳參說明:Commonly Used Command-Line Flags
#!/bin/bash
# how to use:
# bash tools/build.sh ${input.onnx} ${tag}IFS=. file=(${1})
IFS=/ file=(${file})
IFS=
PREFIX=${file[1]}if [[ ${2} != "" ]]
thenPREFIX=${PREFIX}-${2}
fiMODE="build"
ONNX_PATH="models"
BUILD_PATH="build"
ENGINE_PATH=$BUILD_PATH/engines
LOG_PATH=${BUILD_PATH}"/log/"${PREFIX}"/"${MODE}mkdir -p ${ENGINE_PATH}
mkdir -p $LOG_PATHtrtexec --onnx=${1} \--memPoolSize=workspace:2048 \--saveEngine=${ENGINE_PATH}/${PREFIX}.engine \--profilingVerbosity=detailed \--dumpOutput \--dumpProfile \--dumpLayerInfo \--exportOutput=${LOG_PATH}/build_output.log\--exportProfile=${LOG_PATH}/build_profile.log \--exportLayerInfo=${LOG_PATH}/build_layer_info.log \--warmUp=200 \--iterations=50 \--verbose \--fp16 \> ${LOG_PATH}/build.log
inference
#!/bin/bash
# how to use:
# bash tools/infer.sh ${input.engine}IFS=. file=(${1})
IFS=/ file=(${file})
IFS=
PREFIX=${file[2]}if [[ ${2} != "" ]]
thenPREFIX=${PREFIX}-${2}
fiMODE="infer"
ONNX_PATH="models"
BUILD_PATH="build"
ENGINE_PATH=$BUILD_PATH/engines
LOG_PATH=${BUILD_PATH}"/log/"${PREFIX}"/"${MODE}mkdir -p ${ENGINE_PATH}
mkdir -p $LOG_PATHtrtexec --loadEngine=${ENGINE_PATH}/${PREFIX}.engine \--dumpOutput \--dumpProfile \--dumpLayerInfo \--exportOutput=${LOG_PATH}/infer_output.log\--exportProfile=${LOG_PATH}/infer_profile.log \--exportLayerInfo=${LOG_PATH}/infer_layer_info.log \--warmUp=200 \--iterations=50 \> ${LOG_PATH}/infer.log
profile
#!/bin/bash
# how to use:
# bash tools/profile.sh ${input.engine} IFS=. file=(${1})
IFS=/ file=(${file})
IFS=
PREFIX=${file[2]}if [[ ${2} != "" ]]
thenPREFIX=${PREFIX}-${2}
fiMODE="profile"
ONNX_PATH="models"
BUILD_PATH="build"
ENGINE_PATH=$BUILD_PATH/engines
LOG_PATH=${BUILD_PATH}"/log/"${PREFIX}"/"${MODE}mkdir -p ${ENGINE_PATH}
mkdir -p $LOG_PATHnsys profile \--output=${LOG_PATH}/${PREFIX} \--force-overwrite true \trtexec --loadEngine=${ENGINE_PATH}/${PREFIX}.engine \--warmUp=0 \--duration=0 \--iterations=20 \--noDataTransfers \> ${LOG_PATH}/profile.log
將-rep文件用nsight system打開,既可以獲得性能分析結果。
分析trtexec日志
–verbose 打印詳細日志