訓練:K230 借助 AICube部署AI 視覺模型 YOLO等教程_嘉楠 ai cube多標簽分類-CSDN博客K230模型訓練ai cube報錯生成部署文件異常_aicube部署模型顯示生成部署文件異常-CSDN博客
部署:
# 導入必要的庫和模塊
import os
import ujson # 超快的JSON解析庫
import aicube # AI加速庫
from media.sensor import * # 攝像頭傳感器相關
from media.display import * # 顯示相關
from media.media import * # 媒體處理基礎庫
from time import * # 時間相關
import nncase_runtime as nn # 神經網絡運行時庫
import ulab.numpy as np # 嵌入式優化的numpy替代庫
import time
import utime # 更精確的時間庫
import image # 圖像處理
import random
import gc # 垃圾回收
import utime# 顯示模式配置(LCD或HDMI)
display_mode="lcd"
if display_mode=="lcd":DISPLAY_WIDTH = ALIGN_UP(800, 16) # 對齊到16的倍數(硬件要求)DISPLAY_HEIGHT = 480
else:DISPLAY_WIDTH = ALIGN_UP(1920, 16)DISPLAY_HEIGHT = 1080# RGB888輸出分辨率配置
OUT_RGB888P_WIDTH = ALIGN_UP(1080, 16)
OUT_RGB888P_HEIGH = 720# 預定義的80種顏色(ARGB格式),用于不同類別的檢測框顯示
# 顏色盤
color_four = [(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230),(255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70),(255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0),(255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255),(255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157),(255, 110, 76, 0), (255, 174, 57, 255), (255, 199, 100, 0), (255, 72, 0, 118),(255, 255, 179, 240), (255, 0, 125, 92), (255, 209, 0, 151), (255, 188, 208, 182),(255, 0, 220, 176), (255, 255, 99, 164), (255, 92, 0, 73), (255, 133, 129, 255),(255, 78, 180, 255), (255, 0, 228, 0), (255, 174, 255, 243), (255, 45, 89, 255),(255, 134, 134, 103), (255, 145, 148, 174), (255, 255, 208, 186),(255, 197, 226, 255), (255, 171, 134, 1), (255, 109, 63, 54), (255, 207, 138, 255),(255, 151, 0, 95), (255, 9, 80, 61), (255, 84, 105, 51), (255, 74, 65, 105),(255, 166, 196, 102), (255, 208, 195, 210), (255, 255, 109, 65), (255, 0, 143, 149),(255, 179, 0, 194), (255, 209, 99, 106), (255, 5, 121, 0), (255, 227, 255, 205),(255, 147, 186, 208), (255, 153, 69, 1), (255, 3, 95, 161), (255, 163, 255, 0),(255, 119, 0, 170), (255, 0, 182, 199), (255, 0, 165, 120), (255, 183, 130, 88),(255, 95, 32, 0), (255, 130, 114, 135), (255, 110, 129, 133), (255, 166, 74, 118),(255, 219, 142, 185), (255, 79, 210, 114), (255, 178, 90, 62), (255, 65, 70, 15),(255, 127, 167, 115), (255, 59, 105, 106), (255, 142, 108, 45), (255, 196, 172, 0),(255, 95, 54, 80), (255, 128, 76, 255), (255, 201, 57, 1), (255, 246, 0, 122),(255, 191, 162, 208)]# 配置文件路徑
root_path="/sdcard/mp_deployment_source/"
config_path=root_path+"deploy_config.json"
deploy_conf={}
debug_mode=1 # 調試模式開關# 性能分析工具類(上下文管理器)
class ScopedTiming:def __init__(self, info="", enable_profile=True):self.info = infoself.enable_profile = enable_profiledef __enter__(self):if self.enable_profile:self.start_time = time.time_ns()return selfdef __exit__(self, exc_type, exc_value, traceback):if self.enable_profile:elapsed_time = time.time_ns() - self.start_timeprint(f"{self.info} took {elapsed_time / 1000000:.2f} ms")# 讀取部署配置文件
def read_deploy_config(config_path):with open(config_path, 'r') as json_file:try:config = ujson.load(json_file) # 使用ujson快速解析except ValueError as e:print("JSON 解析錯誤:", e)return config# 主檢測函數
def detection():print("det_infer start")# 加載部署配置deploy_conf=read_deploy_config(config_path)# 解析配置參數kmodel_name=deploy_conf["kmodel_path"] # 模型文件路徑labels=deploy_conf["categories"] # 類別標簽confidence_threshold= deploy_conf["confidence_threshold"] # 置信度閾值nms_threshold = deploy_conf["nms_threshold"] # NMS閾值img_size=deploy_conf["img_size"] # 模型輸入尺寸num_classes=deploy_conf["num_classes"] # 類別數量nms_option = deploy_conf["nms_option"] # NMS選項model_type = deploy_conf["model_type"] # 模型類型(AnchorBaseDet等)# 錨點配置(如果是基于錨點的檢測模型)if model_type == "AnchorBaseDet":anchors = deploy_conf["anchors"][0] + deploy_conf["anchors"][1] + deploy_conf["anchors"][2]# 圖像預處理參數計算kmodel_frame_size = img_size # 模型輸入尺寸frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH] # 實際輸入尺寸strides = [8,16,32] # 特征圖步長# 計算保持長寬比的縮放參數ori_w = OUT_RGB888P_WIDTHori_h = OUT_RGB888P_HEIGHwidth, height = kmodel_frame_sizeratiow = width / ori_wratioh = height / ori_hratio = min(ratiow, ratioh) # 選擇較小比例保持長寬比new_w = int(ratio * ori_w)new_h = int(ratio * ori_h)# 計算填充參數(居中填充)dw = (width - new_w) / 2dh = (height - new_h) / 2top = int(round(dh - 0.1))bottom = int(round(dh + 0.1))left = int(round(dw - 0.1))right = int(round(dw - 0.1))# 初始化神經網絡推理組件kpu = nn.kpu() # 創建KPU(神經網絡加速器)實例ai2d = nn.ai2d() # 創建AI2D(圖像預處理加速器)實例# 加載kmodel模型文件kpu.load_kmodel(root_path+kmodel_name)# 配置AI2D預處理參數ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8)ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) # 填充灰色ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)# 構建AI2D預處理流水線ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], [1,3,height,width])# 初始化攝像頭傳感器sensor = Sensor(id = 1)sensor.reset()# 攝像頭參數配置sensor.set_hmirror(False) # 水平鏡像關閉sensor.set_vflip(False) # 垂直翻轉關閉# 配置顯示通道(YUV420格式)sensor.set_framesize(DISPLAY_WIDTH, DISPLAY_HEIGHT)sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420)# 配置AI處理通道(RGB888格式)sensor.set_framesize(OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH, chn=CAM_CHN_ID_2)sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2)# 綁定顯示層sensor_bind_info = sensor.bind_info(x=0, y=0, chn=CAM_CHN_ID_0)Display.bind_layer(**sensor_bind_info, layer=Display.LAYER_VIDEO1)# 初始化顯示設備if display_mode=="lcd":Display.init(Display.ST7701, to_ide=True) # 7寸LCD屏else:Display.init(Display.LT9611, to_ide=True) # HDMI輸出# 創建OSD層用于繪制檢測結果osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888)try:MediaManager.init() # 初始化媒體系統sensor.run() # 啟動攝像頭# 初始化AI2D輸入輸出張量ai2d_input_tensor = Noneai2d_output_tensor = nn.from_numpy(np.ones((1,3,width,height), dtype=np.uint8))# 主循環while True:with ScopedTiming("total", debug_mode > 0):# 捕獲圖像rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2)if rgb888p_img.format() == image.RGBP888:# 轉換為numpy格式ai2d_input = rgb888p_img.to_numpy_ref()ai2d_input_tensor = nn.from_numpy(ai2d_input)# 執行預處理(縮放+填充)ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor)# 模型推理kpu.set_input_tensor(0, ai2d_output_tensor)kpu.run()# 獲取輸出results = []for i in range(kpu.outputs_size()):out_data = kpu.get_output_tensor(i)result = out_data.to_numpy().flatten()results.append(result)del out_data # 及時釋放內存gc.collect() # 垃圾回收# 后處理if model_type == "AnchorBaseDet":det_boxes = aicube.anchorbasedet_post_process(...)elif model_type == "GFLDet":det_boxes = aicube.gfldet_post_process(...)else:det_boxes = aicube.anchorfreedet_post_process(...)# 清空OSD層并繪制結果osd_img.clear()if det_boxes:for box in det_boxes:# 坐標轉換到顯示尺寸x1, y1, x2, y2 = box[2], box[3], box[4], box[5]w = (x2 - x1) * DISPLAY_WIDTH / OUT_RGB888P_WIDTHh = (y2 - y1) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGH# 繪制矩形框osd_img.draw_rectangle(int(x1 * DISPLAY_WIDTH / OUT_RGB888P_WIDTH),int(y1 * DISPLAY_HEIGHT / OUT_RGB888P_HEIGH),int(w), int(h),color=color_four[box[0]][1:] # 根據類別選擇顏色)# 顯示OSD層Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3)gc.collect()rgb888p_img = None # 釋放圖像引用except Exception as e:print(f"運行異常: {e}")finally:# 資源清理os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)del ai2d_input_tensordel ai2d_output_tensorsensor.stop() # 停止攝像頭Display.deinit() # 關閉顯示MediaManager.deinit()# 釋放媒體資源gc.collect()time.sleep(1)nn.shrink_memory_pool() # 釋放神經網絡內存print("det_infer end")return 0if __name__=="__main__":detection()