前端圖像視頻實時檢測

需求:在目標檢測時,我們要求前端能夠將后端檢測的結果實時渲染在圖片或者視頻上。圖片是靜態的,只需要渲染一次;而視頻是動態的,播放時需要根據幀數來實時渲染標注框,可以想象視頻就是由一張張圖片播放的效果。

1.前端技術:sse、canvas

2.模型:yolov11


效果圖

圖片檢測:

視頻檢測:


步驟1:預覽圖片和視頻,canvas繪制標注框

<div class="image-list"><div v-if="currentMedia" class="btn" @click="prevPage"><kp-icon name="LeftOutlined" style="font-size: 30px"></kp-icon></div><div v-if="currentMedia" class="media"><div class="workspace"><!--圖片預覽--><div v-if="isImage" class="media-container"><img ref="imgPreviewRef" style="height: 100%; overflow: scroll" alt="" /><canvas id="imgCanvasOverlay"></canvas></div><!--視頻預覽--><div v-else class="media-container"><video id="videoElement" controls height="90%"></video><canvas id="canvasOverlay"></canvas></div></div></div><empty v-else title="暫無圖像或視頻" /><div v-if="currentMedia" class="btn" @click="nextPage"><kp-icon name="RightOutlined" style="font-size: 30px"></kp-icon></div>
</div>

步驟2:前端選擇sse,在頁面加載時與后端建立連接

onMounted(() => {clientId.value = Math.random().toString(36).substring(2, 15)let sseUrl = `${shipDetectUrl}/sse/${clientId.value}`sseConnection.value = new EventSource(sseUrl)           sseConnection.value.addEventListener('message', event => {try {let data = JSON.parse(event.data)if (data && data.filename === currentMedia.value?.fileName) {// 情況一:圖片if (!data['is_video']) {// 縮放比例let scale = data.height / imgPreviewRef.value.getBoundingClientRect().heightdata.data.map((item, index) => {imgAnnotation.value.push([item.det_res[0] / scale,item.det_res[1] / scale,(item.det_res[2] - item.det_res[0]) / scale,(item.det_res[3] - item.det_res[1]) / scale,item.det_res[4],item.det_res[5],item.cls_res,index,])})drawImgAnnotations()} else if (data['is_video']) {// 情況二:視頻if (videoCheckState.value === 0) {videoCheckState.value = 1} else if (data['video_end']) {videoCheckState.value = 2}frameRate.value = data.fps// 縮放比例let scale = data.height / video.value.getBoundingClientRect().height let annotationData = data.data.map((item, index) => {return [item.det_res[0] / scale,item.det_res[1] / scale,(item.det_res[2] - item.det_res[0]) / scale,(item.det_res[3] - item.det_res[1]) / scale,item.det_res[4],item.det_res[5],item.cls_res,index,]})annotations.set(data.frame, annotationData)}} else if (data['batch_end']) {batchLoading.value = false// 情況三:批量檢測checkState.value = 2message.success('已完成批量檢測!')if (!isImage.value && video.value.paused) video.value.play()handleDetection()}} catch (error) {console.error('Error parsing SSE data:', error)}})sseConnection.value.addEventListener('error', error => {console.error('SSE connection error:', error)})
})

步驟3:圖片或者視頻加載完成,立即調用檢測接口,檢測結果在sse連接中返回

// 預覽圖像
function previewImg() {nextTick(() => {imageCanvas.value = document.getElementById('imgCanvasOverlay')if (imageCanvas.value) {imageCanvasCtx.value = imageCanvas.value.getContext('2d')}imgPreviewRef.value.src = `/_api/detectionDataset/preview?filePath=${currentMedia.value.filePath.replaceAll('\\', '/')}`imgPreviewRef.value.onload = () => {imgAnnotation.value = []clickedBoxId.value = nullimageCanvas.value.width = imgPreviewRef.value.widthimageCanvas.value.height = imgPreviewRef.value.heighthandleDetection()}imgPreviewRef.value.addEventListener('click', handleCanvasClick)})
}const isFirst = ref(false)
const isRePlaying = ref(false) // 是否暫停后重新開始播放 
// 預覽視頻
function previewVideo() {annotations.clear()nextTick(() => {video.value = document.getElementById('videoElement')videoCanvas.value = document.getElementById('canvasOverlay')if (videoCanvas.value) {videoCanvasCtx.value = videoCanvas.value.getContext('2d')}if (video.value && currentMedia.value) {video.value.src = currentMedia.value.srcvideo.value.addEventListener('canplay', () => {if (!isFirst.value && video.value.currentTime == 0) {handleDetection()video.value.playbackRate = 0.5video.value.play()animationLoop() // 開始繪制循環isFirst.value = true}})video.value.addEventListener('play', () => {console.log('play開始播放')isFirst.value = falseclickedBoxId.value = null})video.value.addEventListener('pause', () => {if (!isRePlaying.value) {console.log('pause暫停播放')stopVideoDetection()}isRePlaying.value = true})video.value.addEventListener('playing', () => {console.log('暫停后重新開始播放')if (video.value.currentTime !== 0 && isRePlaying.value) {handleDetection()}isRePlaying.value = false})video.value.addEventListener('loadedmetadata', resizeCanvas)window.addEventListener('resize', resizeCanvas)video.value.addEventListener('click', handleCanvasClick)}})
}// 調用檢測接口
function handleDetection() {let param = {source: currentMedia.value.filePath,model: modelType.value, // 模型client_id: clientId.value,}if (!isImage.value) {param.frame = Math.floor(video.value.currentTime * frameRate.value)}localImageVideo.startDetect(param).then(res => {taskId.value = res.data.task_id})
}

步驟4:圖片繪制一次標注框;視頻則按幀繪制標注框,因為返回結果是以幀數為單位(幀數 = 當前時間 * 幀率)

// 繪制圖片的標注框
function drawImgAnnotations() {if (imageCanvasCtx.value)imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)detectionData.value = []imgAnnotation.value.forEach(item => {const [x1, y1, width, height, level, type, cls_res, id] = itemdetectionData.value.push({type: type,level: level,children: cls_res,id,})detectionResultRef.value.getData(detectionData.value)// 繪制矩形imageCanvasCtx.value.beginPath()imageCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'imageCanvasCtx.value.lineWidth = 2imageCanvasCtx.value.strokeRect(x1, y1, width, height)// 繪制 level 標簽imageCanvasCtx.value.font = '20px Arial'imageCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'const labelText = type.toString()const labelX = x1const labelY = y1 - 5 // 調整標簽的垂直位置,使其位于矩形上方imageCanvasCtx.value.fillText(labelText, labelX, labelY)})
}// 繪制視頻的標注框
function drawAnnotations() {if (video.value) {videoCanvasCtx.value?.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)if (video.value.paused) returnconst currentFrame = Math.round(video.value.currentTime * frameRate.value)let boxes = []// 查找當前時間對應的標注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}detectionData.value = []if (boxes?.length === 0) {videoCanvasCtx.value.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)return}// 繪制每個框boxes?.forEach(box => {// 解構坐標(歸一化值)const [x1, y1, width, height, level, type, cls_res, id] = boxdetectionData.value.push({type: type,level: level,children: cls_res,id,})detectionResultRef.value.getData(detectionData.value)// 繪制矩形videoCanvasCtx.value.beginPath()videoCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'videoCanvasCtx.value.lineWidth = 2videoCanvasCtx.value.strokeRect(x1, y1, width, height)// 繪制 level 標簽videoCanvasCtx.value.font = '20px Arial'videoCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'const labelText = type.toString()const labelX = x1const labelY = y1 - 5 // 調整標簽的垂直位置,使其位于矩形上方videoCanvasCtx.value.fillText(labelText, labelX, labelY)})}
}// 視頻高精度幀監聽
function animationLoop() {drawAnnotations()requestAnimationFrame(animationLoop)
}

步驟5:點擊標注框可查看目標細節

<!--展示目標細節的容器-->
<div class="detail"><div class="detail-title">細節</div><div class="magnifier-glass"><img id="croppedPreview" /></div>
</div><style lang="scss" scoped>
.detail {width: 300px;background: #fff;padding: 10px;.magnifier-glass {padding: 10px 0;width: calc(100% - 20px);overflow: scroll;#croppedPreview {object-fit: contain;border: 2px solid #fff;}}
}
</style>
// 點擊畫布上的標注框
function handleCanvasClick(event) {// 獲取點擊坐標let rect = isImage.value? imageCanvas.value.getBoundingClientRect(): videoCanvas.value.getBoundingClientRect()let clickedBox = nullconst clickX = event.clientX - rect.leftconst clickY = event.clientY - rect.top// 圖片if (isImage.value) {// 查找被點擊的框clickedBox = imgAnnotation.value.find(box => {const [x1, y1, width, height, level, type, cls_res] = boxreturn clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height})} else {// 視頻// 獲取當前幀數據const currentFrame = Math.floor(video.value.currentTime * frameRate.value)let boxes = []// 查找當前時間對應的標注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}if (!boxes) return// 查找被點擊的框clickedBox = boxes.find(box => {const [x1, y1, width, height, level, type, cls_res] = boxreturn clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height})}if (clickedBox) {event.preventDefault()detectionResultRef.value.selectResult(clickedBox)captureBoxArea(clickedBox)clickedBoxId.value = clickedBox[clickedBox.length - 1]// 重新繪制標注框if (isImage.value) {drawImgAnnotations()} else {drawAnnotations()}}
}// 繪制被點擊的目標
function captureBoxArea(box) {// 創建臨時canvasconst tempCanvas = document.createElement('canvas')const tempCtx = tempCanvas.getContext('2d')// 設置臨時canvas尺寸為實際視頻尺寸let dom = isImage.value ? imgPreviewRef.value : video.valueconst domWidth = dom.getBoundingClientRect().widthconst domHeight = dom.getBoundingClientRect().heighttempCanvas.width = domWidthtempCanvas.height = domHeight// 繪制當前視頻幀tempCtx.drawImage(dom, 0, 0, domWidth, domHeight)// 計算實際像素坐標const [x1, y1, width, height, level, type, cls_res] = box// 截取區域const imageData = tempCtx.getImageData(x1, y1, width, height)// 創建新canvas處理圖像const croppedCanvas = document.createElement('canvas')croppedCanvas.width = widthcroppedCanvas.height = heightcroppedCanvas.getContext('2d').putImageData(imageData, 0, 0)// 顯示預覽const croppedPreview = document.getElementById('croppedPreview')croppedPreview.src = croppedCanvas.toDataURL()croppedPreview.style.display = 'block'
}

完整代碼

<template><div class="analysis"><div class="analysis-top"><div class="preview-wrap"><div class="top-btns"><div v-if="checkState !== 1" class="left-btn"><a-button type="primary" @click="handleDataset">選擇數據集</a-button><a-button type="primary" style="margin-left: 10px" @click="handleUpload">本地上傳</a-button></div><div v-if="currentMedia" class="name">{{ currentMedia.fileName }}</div><div v-if="currentMedia" class="right-btn"><!--數量--><span class="num">{{ activeMediaIndex + 1 }} / {{ mediaList.length }}</span><span class="refresh" @click="changeLoad(datasetId)"><kp-icon name="icon_shuaxin"></kp-icon></span><span>模型類型:<a-selectv-model:value="modelType":options="modelTypeOptions"style="width: 80px"@change="reset"></a-select></span><!--檢測按鈕--><!-- <a-button class="btn" @click="handleDetection">檢測</a-button> --><a-buttonv-if="!isImage && videoCheckState === 1"class="btn"@click="stopVideoDetection">暫停檢測</a-button><a-button :loading="batchLoading" class="btn" @click="handleBatchDetection">批量檢測</a-button></div></div><div class="image-list"><div v-if="currentMedia" class="btn" @click="prevPage"><kp-icon name="LeftOutlined" style="font-size: 30px"></kp-icon></div><div v-if="currentMedia" class="media"><div class="workspace"><div v-if="isImage" class="media-container"><img ref="imgPreviewRef" style="height: 100%; overflow: scroll" alt="" /><canvas id="imgCanvasOverlay"></canvas></div><!--視頻預覽--><div v-else class="media-container"><video id="videoElement" controls height="90%"></video><canvas id="canvasOverlay"></canvas></div></div></div><empty v-else title="暫無圖像或視頻" /><div v-if="currentMedia" class="btn" @click="nextPage"><kp-icon name="RightOutlined" style="font-size: 30px"></kp-icon></div></div></div></div><div class="analysis-bottom"><div class="result"><detection-resultref="detectionResultRef":current-media="currentMedia"@select-row="selectRow"></detection-result></div><div class="detail"><div class="detail-title">細節</div><div class="magnifier-glass"><img id="croppedPreview" /></div></div></div></div><select-dataset ref="selectDatasetRef" @handle-img-and-video="handleImgAndVideo"></select-dataset><local-upload ref="localUploadRef" @change-load="changeLoad"></local-upload>
</template><script setup lang="ts">
import '@kunpeng/layout/default/vgg.css'
import '@kunpeng/layout/default/vgg3.css'
import DetectionResult from './components/detectionResult.vue'
import SelectDataset from './components/selectDataset.vue'
import LocalUpload from './components/localUpload.vue'
import empty from '@/components/empty/index.vue'
import annotated from '@kunpeng/api/dataset-annotation/annotated'
import localImageVideo from '@/api/localImageVideo'
import { message } from 'ant-design-vue'
import { ref } from 'vue'const $store = inject('$store')
const baseApi = $store.$appStore.baseApi
const userInfo = $store.$userStore.userInfoconst resultZoomInImg = ref<string>({}) // 結果放大影像數據
const sourceCanvas = ref<HTMLCanvasElement | null>(null)
const isRunning = ref(false) // 視頻是否正在播放
const mediaList = ref([]) // 圖像或視頻列表
const currentMedia = ref(null) // 當前圖像或者視頻
const selectDatasetRef = ref()
const localUploadRef = ref()
const openImage = ref(() => {})
const openVideo = ref(() => {})
const handleAnnotated = ref(() => {})
const handleImgDetection = ref(() => {}) // 檢測單個圖片
const activeMediaIndex = ref(0)
const currentStreamJson = ref('') // 當前sse接收到的標注信息
const imgAnnotation = ref([]) //圖片檢測結果
const detectionData = ref([]) // 檢測結果數據
const batchLoading = ref(false) //批量檢測加載狀態
const checkState = ref(0) // 批量檢測狀態 0 未檢測 1 檢測中 2 檢測完成
const videoCheckState = ref(0) // 視頻檢測狀態 0 未檢測 1 檢測中 2 檢測完成
//屬性json
const attributes_json = ref({region: {},
})
const labelJson = ref([])
const fileList = ref([])
const allRegionList = ref([])
const videoInterval = ref(null)
const isImage = computed(() => {return ['png', 'jpg', 'bmp', 'jpeg', 'webp'].includes(currentMedia.value?.ext)
})
const imgPreviewRef = ref(null) // 圖片
const imageCanvas = ref(null) // 圖片的標注畫布
const imageCanvasCtx = ref(null)
const video = ref(null) // 視頻
const videoCanvas = ref(null) // 視頻的標注畫布
const videoCanvasCtx = ref(null)
let annotations = new Map() // 存儲視頻的標注數據,按時間戳索引
const frameRate = ref(25) // 視頻的幀率
const detectionResultRef = ref(null)
const isCustomButton = ref(false)
const clickedBoxId = ref(null)
const modelType = ref(0)
const modelTypeOptions = window.config.modelTypeOptions
const datasetId = ref('')
const loading = ref(false) // 檢測按鈕loading狀態
const taskId = ref('') // 圖片視頻檢測任務id
const clientId = ref('') // 客戶端id
const sseConnection = ref(null)
const shipDetectUrl = window.config.shipDetectUrlonMounted(() => {clientId.value = Math.random().toString(36).substring(2, 15)console.log(clientId.value)let sseUrl = `${shipDetectUrl}/sse/${clientId.value}`sseConnection.value = new EventSource(sseUrl)// Removed the nextTick block since we're now using @loadedmetadata directlysseConnection.value.addEventListener('message', event => {try {let data = JSON.parse(event.data)if (data && data.filename === currentMedia.value?.fileName) {// 情況一:圖片if (!data['is_video']) {let scale = data.height / imgPreviewRef.value.getBoundingClientRect().height // 縮放比例data.data.map((item, index) => {imgAnnotation.value.push([item.det_res[0] / scale,item.det_res[1] / scale,(item.det_res[2] - item.det_res[0]) / scale,(item.det_res[3] - item.det_res[1]) / scale,item.det_res[4],item.det_res[5],item.cls_res,index,])})drawImgAnnotations()} else if (data['is_video']) {console.log('sse消息', data)// 情況二:視頻// 收到消息開始播放視頻if (videoCheckState.value === 0 && video.value.paused) {videoCheckState.value = 1} else if (data['video_end']) {videoCheckState.value = 2}frameRate.value = data.fpslet scale = data.height / video.value.getBoundingClientRect().height // 縮放比例let annotationData = data.data.map((item, index) => {return [item.det_res[0] / scale,item.det_res[1] / scale,(item.det_res[2] - item.det_res[0]) / scale,(item.det_res[3] - item.det_res[1]) / scale,item.det_res[4],item.det_res[5],item.cls_res,index,]})annotations.set(data.frame, annotationData)console.log('frame', data.frame)console.log('annotationData', annotationData)console.log('annotations', annotations)}} else if (data['batch_end']) {batchLoading.value = false// 情況三:批量檢測checkState.value = 2message.success('已完成批量檢測!')if (!isImage.value && video.value.paused) video.value.play()handleDetection()}} catch (error) {console.error('Error parsing SSE data:', error)}})sseConnection.value.addEventListener('error', error => {console.error('SSE connection error:', error)})
})
onUnmounted(() => {clearInterval(videoInterval.value)
})// 檢測按鈕點擊事件
async function handleDetection() {let param = {source: currentMedia.value.filePath,model: modelType.value, // 模型client_id: clientId.value,}if (!isImage.value) {param.frame = Math.floor(video.value.currentTime * frameRate.value)}await localImageVideo.startDetect(param).then(res => {taskId.value = res.data.task_id})
}// 批量檢測按鈕點擊事件
function handleBatchDetection() {checkState.value = 1message.info('批量檢測中')batchLoading.value = truelet lastIndex = currentMedia.value.filePath.lastIndexOf('/')let param = {source: currentMedia.value.filePath.substring(0, lastIndex),model: modelType.value, // 模型client_id: clientId.value,}localImageVideo.startDetect(param)
}// 預覽圖像
function previewImg() {// imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)nextTick(() => {imageCanvas.value = document.getElementById('imgCanvasOverlay')if (imageCanvas.value) {imageCanvasCtx.value = imageCanvas.value.getContext('2d')}imgPreviewRef.value.src = `/_api/detectionDataset/preview?filePath=${currentMedia.value.filePath.replaceAll('\\', '/')}`imgPreviewRef.value.onload = () => {imgAnnotation.value = []clickedBoxId.value = nullimageCanvas.value.width = imgPreviewRef.value.widthimageCanvas.value.height = imgPreviewRef.value.height// 如果批量檢測結束,則調檢測接口handleDetection()}imgPreviewRef.value.addEventListener('click', handleCanvasClick)})
}const isFirst = ref(false)
const isSeeking = ref(false) // 是否正在拖動進度條
const debounceTimer = ref(null) // 防抖定時器
const isRePlaying = ref(false) // 是否暫停后重新開始播放// 預覽視頻
function previewVideo() {annotations.clear()nextTick(() => {video.value = document.getElementById('videoElement')videoCanvas.value = document.getElementById('canvasOverlay')if (videoCanvas.value) {videoCanvasCtx.value = videoCanvas.value.getContext('2d')}if (video.value && currentMedia.value) {video.value.src = currentMedia.value.srcvideo.value.addEventListener('canplay', () => {if (!isFirst.value && video.value.currentTime <= 0.1) {handleDetection()video.value.playbackRate = 0.5video.value.play()animationLoop() // 開始繪制循環console.log('觸發檢測1111111111')isFirst.value = true}})video.value.addEventListener('play', () => {console.log('play開始播放')isFirst.value = falseclickedBoxId.value = null// animationLoop() // 開始繪制循環})video.value.addEventListener('pause', () => {if (!isRePlaying.value) {console.log('pause暫停播放')stopVideoDetection()}isRePlaying.value = true})video.value.addEventListener('playing', () => {console.log('暫停后重新開始播放')if (video.value.currentTime !== 0 && isRePlaying.value) {console.log('暫停后重新開始播放==只調用一次')console.log('觸發檢測222222222222222222')handleDetection()}isRePlaying.value = false})video.value.addEventListener('loadedmetadata', resizeCanvas)window.addEventListener('resize', resizeCanvas)video.value.addEventListener('click', handleCanvasClick)}})
}function handleCanvasClick(event) {// 獲取點擊坐標let rect = isImage.value? imageCanvas.value.getBoundingClientRect(): videoCanvas.value.getBoundingClientRect()let clickedBox = nullconst clickX = event.clientX - rect.leftconst clickY = event.clientY - rect.top// 圖片if (isImage.value) {// 查找被點擊的框clickedBox = imgAnnotation.value.find(box => {const [x1, y1, width, height, level, type, cls_res] = boxreturn clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height})} else {// 視頻// 獲取當前幀數據const currentFrame = Math.floor(video.value.currentTime * frameRate.value)let boxes = []// 查找當前時間對應的標注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}if (!boxes) return// 查找被點擊的框clickedBox = boxes.find(box => {const [x1, y1, width, height, level, type, cls_res] = boxreturn clickX >= x1 && clickX <= x1 + width && clickY >= y1 && clickY <= y1 + height})}if (clickedBox) {event.preventDefault()detectionResultRef.value.selectResult(clickedBox)captureBoxArea(clickedBox)clickedBoxId.value = clickedBox[clickedBox.length - 1]// 重新繪制標注框if (isImage.value) {drawImgAnnotations()} else {drawAnnotations()}}
}function selectRow(data) {let boxes = []if (isImage.value) {boxes = imgAnnotation.value} else {// 獲取當前幀數據// video.value.currentTime   這個是視頻當前播放的時間 okconst currentFrame = Math.floor(video.value.currentTime * frameRate.value)// 查找當前時間對應的標注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}if (!boxes) return}// 查找被點擊的框const clickedBox = boxes.find(box => {return data.id === box[box.length - 1]})clickedBoxId.value = clickedBox[clickedBox.length - 1]if (isImage.value) {drawImgAnnotations()} else {drawAnnotations()}captureBoxArea(clickedBox)
}function captureBoxArea(box) {// 創建臨時canvasconst tempCanvas = document.createElement('canvas')const tempCtx = tempCanvas.getContext('2d')// 設置臨時canvas尺寸為實際視頻尺寸let dom = isImage.value ? imgPreviewRef.value : video.valueconst domWidth = dom.getBoundingClientRect().widthconst domHeight = dom.getBoundingClientRect().heighttempCanvas.width = domWidthtempCanvas.height = domHeight// 繪制當前視頻幀tempCtx.drawImage(dom, 0, 0, domWidth, domHeight)// 計算實際像素坐標const [x1, y1, width, height, level, type, cls_res] = box// 截取區域const imageData = tempCtx.getImageData(x1, y1, width, height)// 創建新canvas處理圖像const croppedCanvas = document.createElement('canvas')croppedCanvas.width = widthcroppedCanvas.height = heightcroppedCanvas.getContext('2d').putImageData(imageData, 0, 0)// 顯示預覽const croppedPreview = document.getElementById('croppedPreview')croppedPreview.src = croppedCanvas.toDataURL()croppedPreview.style.display = 'block'
}// 高精度幀監聽
function animationLoop() {drawAnnotations()requestAnimationFrame(animationLoop)
}// 調整Canvas尺寸以匹配視頻
function resizeCanvas() {videoCanvas.value.width = video.value.offsetWidthvideoCanvas.value.height = video.value.offsetHeight
}// 視頻暫停檢測
function stopVideoDetection() {let data = {task_id: taskId.value,}localImageVideo.pauseDetect(data).then(res => {if (res.status === 200) {videoCheckState.value = 0video.value?.pause()}})
}// 繪制圖片的標注框
function drawImgAnnotations() {if (imageCanvasCtx.value)imageCanvasCtx.value.clearRect(0, 0, imageCanvas.value.width, imageCanvas.value.height)detectionData.value = []imgAnnotation.value.forEach(item => {const [x1, y1, width, height, level, type, cls_res, id] = itemdetectionData.value.push({type: type,level: level,children: cls_res,id,})detectionResultRef.value.getData(detectionData.value)console.log(clickedBoxId.value, id)// 繪制矩形imageCanvasCtx.value.beginPath()imageCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'imageCanvasCtx.value.lineWidth = 2imageCanvasCtx.value.strokeRect(x1, y1, width, height)// 繪制 level 標簽imageCanvasCtx.value.font = '20px Arial'imageCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'const labelText = type.toString()const labelX = x1const labelY = y1 - 5 // 調整標簽的垂直位置,使其位于矩形上方imageCanvasCtx.value.fillText(labelText, labelX, labelY)})
}// 繪制視頻的標注框
function drawAnnotations() {if (video.value) {videoCanvasCtx.value?.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)if (video.value.paused) returnconst currentFrame = Math.round(video.value.currentTime * frameRate.value)console.log('currentTime', video.value.currentTime)console.log('currentFrame', currentFrame)let boxes = []// 查找當前時間對應的標注boxes = annotations.get(currentFrame)if (currentFrame === 0) {boxes = annotations.get(1)} else if (currentFrame > annotations.size) {boxes = annotations.get(annotations.size)}detectionData.value = []if (boxes?.length === 0) {videoCanvasCtx.value.clearRect(0, 0, videoCanvas.value.width, videoCanvas.value.height)return}console.log('boxes', boxes)// 繪制每個框boxes?.forEach(box => {// 解構坐標(歸一化值)const [x1, y1, width, height, level, type, cls_res, id] = boxdetectionData.value.push({type: type,level: level,children: cls_res,id,})detectionResultRef.value.getData(detectionData.value)// 繪制矩形videoCanvasCtx.value.beginPath()videoCanvasCtx.value.strokeStyle = clickedBoxId.value === id ? 'red' : 'yellow'videoCanvasCtx.value.lineWidth = 2videoCanvasCtx.value.strokeRect(x1, y1, width, height)// 繪制 level 標簽videoCanvasCtx.value.font = '20px Arial'videoCanvasCtx.value.fillStyle = clickedBoxId.value === id ? 'red' : 'yellow'const labelText = type.toString()const labelX = x1const labelY = y1 - 5 // 調整標簽的垂直位置,使其位于矩形上方videoCanvasCtx.value.fillText(labelText, labelX, labelY)})}
}async function getMediaList(datasetId, type) {let param = {page: {limit: -1,page: -1,total: 0,},querys: [{group: 'advance',operation: 'EQUAL',property: 'dataset_id',relation: 'AND',value: datasetId,},],}let res = await localImageVideo.getImgList(param)mediaList.value = res.data.listif (mediaList.value && mediaList.value.length > 0) {mediaList.value.forEach(item => {let random = Math.ceil(Math.random() * 100000)item.src = `/_api/detectionDataset/preview?time=${random}&&filePath=${item.filePath.replaceAll('\\', '/')}`})}if (type == 'localupload') {activeMediaIndex.value = 0currentMedia.value = mediaList.value[0]console.log('currentMedia', currentMedia.value)} else {activeMediaIndex.value = mediaList.value.findIndex(item => item.id === currentMedia.value.id)}if (isImage.value) {previewImg()} else {previewVideo()}reset()
}
// 選擇數據集
async function handleImgAndVideo(datasetId: string, checkedList: string[]) {checkState.value = 0currentMedia.value = checkedList[0]getMediaList(datasetId, 'selectDataset')
}
// 本地上傳的數據集
function changeLoad(id) {datasetId.value = idcheckState.value = 0getMediaList(id, 'localupload')
}// 重置數據
function reset() {detectionData.value = []detectionResultRef.value.getData(detectionData.value)// 清除細節畫布const croppedPreview = document.getElementById('croppedPreview')croppedPreview.src = ''if (isImage.value) {imgAnnotation.value = []drawImgAnnotations()} else {annotations.clear()drawAnnotations()}
}function handleDataset() {if (!isImage.value) {stopVideoDetection()annotations.clear()}selectDatasetRef.value.open()
}// 本地上傳
function handleUpload() {if (!isImage.value) {stopVideoDetection()annotations.clear()}localUploadRef.value.open()
}// 上一個
async function prevPage() {if (!isImage.value) {console.log('sp')stopVideoDetection()annotations.clear()}if (activeMediaIndex.value > 0) {activeMediaIndex.value--currentMedia.value = mediaList.value[activeMediaIndex.value]// 上一個為圖片if (isImage.value) {previewImg()} else {// 上一個為視頻previewVideo()}reset()} else {message.warning('此為第一個影像')}
}// 下一個
async function nextPage() {if (!isImage.value) {console.log('sp')stopVideoDetection()annotations.clear()}if (activeMediaIndex.value < mediaList.value.length - 1) {activeMediaIndex.value++currentMedia.value = mediaList.value[activeMediaIndex.value]reset()// 下一個為圖片if (isImage.value) {previewImg()} else {// 下一個為視頻previewVideo()}} else {message.warning('此為最后一個影像')}
}
</script><style scoped lang="scss">
.analysis {width: 100%;height: 100%;background: #f3f5fb;.analysis-top {height: 60%;display: flex;.preview-wrap {//width: calc(100% - 300px);width: 100%;background: #fff;//margin-right: 10px;.top-btns {width: 100%;height: 50px;padding: 10px 20px;position: relative;display: flex;justify-content: space-between;align-items: center;.right-btn {.refresh {margin: 0 20px 0 10px;}.btn {margin-left: 10px;}}.name {font-weight: 600;font-size: 16px;}}.image-list {width: 100%;height: calc(100% - 52px);display: flex;justify-content: space-between;align-items: center;.media {width: calc(100% - 60px);height: 100%;position: relative;}.workspace {width: 100%;height: 100%;display: flex;.media-container {width: 100%;height: 100%;position: relative;display: inline-block;img,video {position: absolute;left: 50%;transform: translateX(-50%);}#imgCanvasOverlay,#canvasOverlay {position: absolute;top: 0;// left: 0;left: 50%;transform: translateX(-50%);pointer-events: none; /* 確保canvas不阻擋視頻操作 */z-index: 99;}}}.btn {height: 100%;line-height: calc(100% - 50px);width: 60px;display: flex;justify-content: center;align-items: center;// background: #ccc;}}}}.analysis-bottom {height: calc(40% - 10px);display: flex;margin-top: 10px;.result {width: calc(100% - 300px);height: 100%;background: #fff;margin-right: 10px;}.detail {width: 300px;background: #fff;padding: 10px;.magnifier-glass {padding: 10px 0;width: calc(100% - 20px);overflow: scroll;#croppedPreview {object-fit: contain;border: 2px solid #fff;}}}}
}
video::-webkit-media-controls-fullscreen-button {display: none;
}
</style>

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/pingmian/89808.shtml
繁體地址,請注明出處:http://hk.pswp.cn/pingmian/89808.shtml
英文地址,請注明出處:http://en.pswp.cn/pingmian/89808.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

如何解決pip安裝報錯ModuleNotFoundError: No module named ‘sqlalchemy’問題

【Python系列Bug修復PyCharm控制臺pip install報錯】如何解決pip安裝報錯ModuleNotFoundError: No module named ‘sqlalchemy’問題 摘要 在使用 PyCharm 控制臺執行 pip install sqlalchemy 后&#xff0c;仍然在代碼中提示 ModuleNotFoundError: No module named sqlalche…

第4.3節 iOS App生成追溯關系

iOS生成追溯關系的邏輯和Android端從用戶角度來說是一致的&#xff0c;就是需要通過開始和結束關聯用例&#xff0c;將用例信息與覆蓋率信息建立關系&#xff0c;然后再解析覆蓋率數據。 4.3.1 添加關聯用例彈層 關聯用例彈層和Android類似&#xff0c;只要你能設計出相應的樣…

STM32 USB鍵盤實現指南

概述 在STM32上實現鍵盤功能可以通過USB HID(人機接口設備)協議來實現,使STM32設備能被計算機識別為標準鍵盤。以下是完整的實現方案: 硬件準備 STM32開發板(支持USB,如STM32F103、STM32F4系列) USB接口(Micro USB或Type-C) 按鍵矩陣或單個按鍵 必要的電阻和連接…

繼電器基礎知識

繼電器是一種電控制器件,它具有隔離功能的自動開關元件,廣泛應用于遙控、遙測、通訊、自動控制、機電一體化及電力電子設備中,是最重要的控制元件之一。 繼繼電器的核心功能是通過小電流來控制大電流的通斷。它通常包含一個線圈和一組或多組觸點。當給繼電器的線圈施加一定…

MYSQL:庫的操作

文章目錄MYSQL&#xff1a;庫的操作1. 本文簡述2. 查看數據庫2.1 語法3. 創建數據庫3.1 語法3.2 示例3.2.1 創建一個簡單的數據庫3.2.2 使用 IF NOT EXISTS 防止重復創建3.2.3 再次運行&#xff0c;觀察現象3.2.4 查看這個警告到底是什么3.2.5 創建數據庫允許使用關鍵字4. 字符…

Xilinx FPGA XCKU115?2FLVA1517I AMD KintexUltraScale

XCKU115?2FLVA1517I 隸屬于 Xilinx &#xff08;AMD&#xff09;Kintex UltraScale 系列&#xff0c;基于領先的 20?nm FinFET 制程打造。該器件采用 1517?ball FCBGA&#xff08;FLVA1517&#xff09;封裝&#xff0c;速度等級 ?2&#xff0c;可實現高達 725?MHz 的核心邏…

Linux Ubuntu安裝教程|附安裝文件?安裝教程

[軟件名稱]: Linux Ubuntu18.0 [軟件大小]: 1.8GB [安裝環境]: VMware [夸克網盤接] 鏈接&#xff1a;https://pan.quark.cn/s/971f685256ef &#xff08;建議用手機保存到網盤后&#xff0c;再用電腦下載&#xff09;更多免費軟件見https://docs.qq.com/sheet/DRkdWVFFCWm9UeG…

深入解析Hadoop YARN:三層調度模型與資源管理機制

Hadoop YARN概述與產生背景從MapReduce到YARN的演進之路在Hadoop早期版本中&#xff0c;MapReduce框架采用JobTracker/TaskTracker架構&#xff0c;這種設計逐漸暴露出嚴重局限性。JobTracker需要同時處理資源管理和作業控制兩大核心功能&#xff0c;隨著集群規模擴大&#xff…

Pycaita二次開發基礎代碼解析:邊線提取、路徑追蹤與曲線固定

本文將深入剖析CATIA二次開發中三個核心類方法&#xff1a;邊線提取特征創建、元素結構路徑查找和草圖曲線固定技術。通過逐行解讀代碼實現&#xff0c;揭示其在工業設計中的專業應用價值和技術原理。一、邊線提取技術&#xff1a;幾何特征的精確捕獲與復用1.1 方法功能全景ext…

Linux 任務調度在進程管理中的關系和運行機制

&#x1f4d6; 推薦閱讀&#xff1a;《Yocto項目實戰教程:高效定制嵌入式Linux系統》 &#x1f3a5; 更多學習視頻請關注 B 站&#xff1a;嵌入式Jerry Linux 任務調度在進程管理中的關系和運行機制 Linux 內核中的“任務調度”是進程管理系統的核心部分&#xff0c;相互關聯而…

JAVA后端開發—— JWT(JSON Web Token)實踐

1. 什么是HTTP請求頭 (Request Headers)&#xff1f;當你的瀏覽器或手機App向服務器發起一個HTTP請求時&#xff0c;這個請求并不僅僅包含你要訪問的URL&#xff08;比如 /logout&#xff09;和可能的數據&#xff08;請求體&#xff09;&#xff0c;它還附帶了一堆“元數據&am…

【SVM smote】MAP - Charting Student Math Misunderstandings

針對數據不平衡問題&#xff0c;用調整類別權重的方式來處理數據不平衡問題&#xff0c;同時使用支持向量機&#xff08;SVM&#xff09;模型進行訓練。 我們通過使用 SMOTE&#xff08;Synthetic Minority Over-sampling Technique&#xff09;進行過采樣&#xff0c;增加少數…

repmgr+pgbouncer實現對業務透明的高可用切換

本方案說明 PostgreSQL repmgr&#xff1a;實現主從自動故障檢測與切換&#xff08;Failover&#xff09;。PgBouncer&#xff1a;作為連接池&#xff0c;屏蔽后端數據庫變動&#xff0c;提供透明連接。動態配置更新&#xff1a;通過repmgr組件的promote_command階段觸發腳本…

查找服務器上存在線程泄露的進程

以下是一個改進的命令&#xff0c;可以列出所有線程數大于200的進程及其PID和線程數&#xff1a; find /proc -maxdepth 1 -type d -regex /proc/[0-9] -exec sh -c for pid_dir dopid$(basename "$pid_dir")if [ -f "$pid_dir/status" ]; thenthreads$(aw…

Facebook 開源多季節性時間序列數據預測工具:Prophet 飽和預測 Saturating Forecasts

文中內容僅限技術學習與代碼實踐參考&#xff0c;市場存在不確定性&#xff0c;技術分析需謹慎驗證&#xff0c;不構成任何投資建議。 Prophet 是一種基于加法模型的時間序列數據預測程序&#xff0c;在該模型中&#xff0c;非線性趨勢與年、周、日季節性以及節假日效應相匹配。…

從單線程到云原生:Redis 二十年演進全景與內在機理深剖

——從 1.0 到 7.2&#xff0c;一窺數據結構、網絡模型、持久化、復制、高可用與生態協同的底層脈絡&#xff08;一&#xff09;序章&#xff1a;為什么是 Redis 1999 年&#xff0c;Salvatore Sanfilippo 在開發一個實時訪客分析系統時&#xff0c;發現傳統磁盤型數據庫無法在…

得了甲亢軍隊文職體檢能過嗎

根據軍隊文職體檢現行標準&#xff0c;甲亢患者能否通過體檢需分情況判定&#xff0c;核心取決于病情控制狀態、治療結果及穩定時長。結合《軍隊選拔軍官和文職人員體檢通用標準》及補充規定&#xff0c;具體分析如下&#xff1a;?? 一、可直接通過體檢的情況臨床治愈滿1年且…

【編程語言】C、C++、C#深度對比:三種語言的演進歷程與應用場景

一、語言概述與歷史背景 &#xff08;一&#xff09;C語言&#xff1a;系統編程的基石誕生背景 1972年由Dennis Ritchie在貝爾實驗室開發為了重寫UNIX操作系統而創造從B語言演化而來&#xff0c;增加了數據類型設計目標&#xff1a;簡潔、高效、可移植設計哲學 “相信程序員”&…

《計算機網絡》實驗報告五 DNS協議分析與測量

目 錄 1、實驗目的 2、實驗環境 3、實驗內容 3.1 查看和配置本機的DNS系統 3.2 DNS信息測量 3.3 DNS協議分析 4、實驗結果與分析 4.1 查看和配置本機的DNS系統 4.2 DNS信息測量 4.3 DNS協議分析 5、實驗小結 5.1 問題與解決辦法&#xff1a; 5.2 心得體會&#x…

Python工廠方法模式詳解:從理論到實戰

一、工廠方法模式核心概念 工廠方法模式&#xff08;Factory Method Pattern&#xff09;是一種創建型設計模式&#xff0c;屬于經典23種設計模式之一。其核心思想是&#xff1a;定義一個創建對象的接口&#xff0c;但將具體對象的實例化過程延遲到子類中實現。這種模式通過引入…