1.創建環境
conda create -n yolvo5 python=3.8
去pytorch.org下載1.8.2的版本。
pip --default-timeout=1688 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu111
github上下載yolov5的zip
pip --default-timeout=1688 install -r requirements.txt
通過網盤分享的文件:yolov5
鏈接: https://pan.baidu.com/s/1oigw2mPo9uVoX4hWXmiGWA?pwd=npue 提取碼: npue
安裝jupyter
pip install pywinpty==2.0.5
pip --default-timeout=1688 install jupyterlab
安裝標注工具
pip install labelimg
pip install pyqt5 sip
2.模型檢測
weights
訓練好的模型文件
python detect.py --weights yolov5x.pt
source
檢測的目標,可以是單張圖片,文件夾,屏幕或者攝像頭等。
conf-thres
iou-thres
基于torch.hub的檢測方法
import torchmodel = torch.hub.load("./", "yolov5s", source = "local")img = "./data/images/zidane.jpg"results = model(img)results.show()
3.數據集構建
用labelimg
存儲格式改成yolov
a上一張圖
d下一張圖
w創建框
4.模型訓練
創建
分別存放沒標注的和標注的文件
把labels里的classes移到和images、labels同一目錄
看detect.py文件中的weight和data參數
相關文件的路徑
把.yaml中的內容修改,訓練要把train.py的
修改成修改的.yaml的路徑。
修改虛擬內存
設置? ?系統? 系統信息 高級系統設置
更改所在盤的虛擬內存就行了。
訓練好后,可以用
tensorboard --logdir runs
看參數
沒數據的話,安裝一下這個
pip install protobuf==3.20.0
檢測視頻
python detect.py --weights runs/train/exp11/weights/best.pt --source BVN.mp4 --view-img
5.Pyside6可視化界面
pip install Pyside6
在可視化界面弄好前端后,在vscode中下載pyside6插件后。將ui文件轉換成py
import cv2
import sys
import torch
from PySide6.QtWidgets import QMainWindow, QApplication, QFileDialog
from PySide6.QtGui import QPixmap, QImage
from PySide6.QtCore import QTimerfrom main_window_ui import Ui_MainWindowdef convert2QImage(img):height, width, channel = img.shapereturn QImage(img, width, height, width * channel, QImage.Format_RGB888)class MainWindow(QMainWindow, Ui_MainWindow):def __init__(self):super(MainWindow, self).__init__()self.model = torch.hub.load('./', # 指向本地克隆的倉庫'custom', path='runs/train/exp11/weights/best.pt',source='local')self.setupUi(self)self.timer = QTimer()self.timer.setInterval(100)self.video = Noneself.bind_slots()def image_pred(self, file_path):results = self.model(file_path)image = results.render()[0]return convert2QImage(image)def open_image(self):print("點擊了檢測文件")self.timer.stop()file_path, _ = QFileDialog.getOpenFileName(self, dir = "./demo_images/images/train", filter = "*.jpg;*.png;*.jpeg")if file_path:pixmap = QPixmap(file_path)qimage = self.image_pred(file_path)self.input.setPixmap(pixmap)self.output.setPixmap(QPixmap.fromImage(qimage))def video_pred(self):ret, frame = self.video.read()if not ret:self.timer.stop()else:frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)self.input.setPixmap(QPixmap.fromImage(convert2QImage(frame)))results = self.model(frame)image = results.render()[0]self.output.setPixmap(QPixmap.fromImage(convert2QImage(image)))def open_video(self):print("點擊了檢測文件")file_path, _ = QFileDialog.getOpenFileName(self, dir = "./", filter = "*.mp4")if file_path:self.video = cv2.VideoCapture(file_path)self.timer.start() def bind_slots(self):self.detpic.clicked.connect(self.open_image)self.detvid.clicked.connect(self.open_video)self.timer.timeout.connect(self.video_pred)if __name__ == "__main__":app = QApplication(sys.argv)window = MainWindow()window.show()app.exec()
6.Gradio搭建Web GUI
pip install gradio
import torch
import gradio as grmodel = torch.hub.load('./', 'custom', path='runs/train/exp11/weights/best.pt',source='local')title = "基于Gradio的YOLOv5演示項目"desc = "這是一個基于Gradio的YOLOv5演示項目,非常簡潔,非常方便!"base_conf, base_iou = 0.25, 0.45def det_image(img, conf, iou):model.conf = confmodel.iou = ioureturn model(img).render()[0]
gr.Interface(inputs = ["image", gr.Slider(minimum = 0, maximum = 1, value = base_conf, interactive=True), gr.Slider(minimum = 0, maximum = 1, value = base_iou, interactive=True)],outputs = ["image"],fn = det_image,title = title,description = desc,live = True,examples = [["./demo_images/images/train/demo_images30.jpg", base_conf, base_iou], ["./demo_images/images/train/demo_images120.jpg", base_conf, base_iou]]).launch()