根據下面的內容寫一篇技術博客,要求增加更多的解釋,讓普通讀者也能了解為什么這樣做,具體怎么做
移植driver_monitoring_system里的MobileNet到RK3588
- 一、背景
- 二、操作步驟
- 2.1 下載源碼
- 2.2 Tensorflow轉成ONNX
- 2.2.1 在x86上創建容器,安裝依賴
- 2.2.2 保存為saved-model
- 2.2.3 saved-model轉ONNX
- 2.2.4 ONNX推理
- 2.3 ONNX轉RKNN
- 2.4 RKNN推理
一、背景
driver_monitoring_system
是一個旨在監控駕駛員狀態和行為的項目,例如打哈欠、打電話MobileNet
用來預測駕駛員的行為(電話、短信) 該模型基于tensorflow- 本文介紹如果將該模型移植到
RK3588
- 手機檢測模型參考:在RK3588上實現YOLOv8n高效推理
二、操作步驟
2.1 下載源碼
git clone https://github.com/jhan15/driver_monitoring.git
cd driver_monitoring
2.2 Tensorflow轉成ONNX
2.2.1 在x86上創建容器,安裝依賴
docker run -it --privileged --net=host \-v $PWD:/home -w /home --rm nvcr.io/nvidia/pytorch:22.02-py3 /bin/bashpip install tf-estimator-nightly==2.8.0.dev2021122109
pip install tensorflow==2.8.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install tf2onnx onnx -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install onnxruntime -i https://pypi.tuna.tsinghua.edu.cn/simple
2.2.2 保存為saved-model
cat > cvt.py <<-'EOF'
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
from tensorflow.keras.layers import RandomRotation, RandomZoom, Dense, Dropout,\BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.optimizers import Adam
from tf2onnx import convert
from net import MobileNet
from PIL import Image
import tensorflow as tf
import numpy as npclass MobileNet(Sequential): def __init__(self, input_shape=(224,224,3), num_classes=2, dropout=0.25, lr=1e-3,augmentation=False, train_base=False, add_layer=False):super().__init__()self.base_model = tf.keras.applications.MobileNetV2(weights='imagenet',input_shape=input_shape,include_top=False)self.base_model.trainable = train_base self.add(self.base_model)self.add(GlobalAveragePooling2D())self.add(Dropout(dropout)) if add_layer:self.add(Dense(256, activation='relu'))self.add(Dropout(dropout)) self.add(Dense(num_classes, activation='softmax')) self.compile(optimizer=Adam(learning_rate=lr),loss='sparse_categorical_crossentropy',metrics=["accuracy"])model = MobileNet()
model.load_weights('models/model_split.h5')rgb_image = Image.open("0.jpg")
rgb_image = np.array(rgb_image.resize((224,224))).astype(np.float32)
rgb_image = (tf.expand_dims(rgb_image, 0)-127.5)/127.5
print(rgb_image.shape)
y = model.predict(rgb_image)
print(model.input)
print(model.output)
print(y.shape,y.reshape(-1)[:8])
result = np.argmax(y, axis=1)
print(result.shape)
model.save("keras_model")
EOF
rm -rf keras_model
python3 cvt.py
2.2.3 saved-model轉ONNX
python -m tf2onnx.convert --inputs-as-nchw mobilenetv2_1_00_224_input:0 \--inputs mobilenetv2_1_00_224_input:0[1,224,224,3] \--saved-model keras_model --output model.onnx
rm keras_model -rf
2.2.4 ONNX推理
cat > onnx_forward.py <<-'EOF'
import onnxruntime as ort
import numpy as np
import sys
from PIL import Imagemodel_path = "model.onnx"
session = ort.InferenceSession(model_path)
inputs_info = session.get_inputs()
outputs_info = session.get_outputs()rgb_image = Image.open("0.jpg")
rgb_image = np.array(rgb_image.resize((224,224)))
rgb_image = rgb_image[np.newaxis, :].transpose(0, 3, 1, 2)
rgb_image = (rgb_image.astype(np.float32)-127.5)/127.5input_data = {}
input_data['mobilenetv2_1_00_224_input:0']=rgb_image
outputs = session.run(None, input_data)for i, output_info in enumerate(outputs_info):output_name = output_info.nameprint(outputs[i])
EOF
python3 onnx_forward.py
2.3 ONNX轉RKNN
cat > onnx2rknn.py <<-'EOF'
import os
import urllib
import traceback
import time
import sys
import numpy as np
import cv2
from rknn.api import RKNN
from math import exp
import cv2
import numpy as npONNX_MODEL = 'model.onnx'
RKNN_MODEL = 'model.rknn'
is_quant=1rknn = RKNN(verbose=True)
rknn.config(mean_values=[[127.5, 127.5, 127.5]], std_values=[[127.5, 127.5, 127.5]], target_platform='rk3588')
ret = rknn.load_onnx(model=ONNX_MODEL)
if ret != 0:print('Load model failed!')exit(ret)
ret = rknn.build(do_quantization=is_quant, dataset='./dataset.txt',auto_hybrid=True)
if ret != 0:print('Build model failed!')exit(ret)
ret = rknn.export_rknn(RKNN_MODEL)
if ret != 0:print('Export rknn model failed!')exit(ret)
rknn.release()
EOF
echo "0.jpg" > ./dataset.txt
python3 onnx2rknn.py
2.4 RKNN推理
cat > rknn_forward.py <<-'EOF'
import time
import numpy as np
from rknnlite.api import RKNNLite
from PIL import Image
import numpy as nprgb_image = Image.open("0.jpg")
rgb_image = np.array(rgb_image.resize((224,224)))
rgb_image = rgb_image[np.newaxis, :]rknn_lite = RKNNLite()
ret = rknn_lite.load_rknn('model.rknn')
if ret != 0:print('Load RKNN model failed')exit(ret)
ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_AUTO)
if ret != 0:print('Init runtime environment failed')exit(ret)
outputs = rknn_lite.inference(inputs=[rgb_image],data_format=['nhwc'])
print(outputs)
EOF
python3 rknn_forward.py