? 實現目標
-
按下空格鍵 → 獲取攝像頭當前畫面;
-
將圖片上傳給 大模型 接口,讓其“看圖說話”;
-
獲取返回描述后,以字幕形式展示在圖像畫面上;
-
持續顯示識別結果,直到下次按空格。
🧠 需要準備的
-
? Python 環境(3.8+)
-
? OpenCV (
cv2
) -
? 阿里千問 API 的 Key
-
? 安裝以下依賴:
pip install opencv-python requests pillow
需要同時保留 YOLOv8 的實時目標檢測,并在按下空格時截屏上傳給 大模型 接口做“看圖說話”,然后將返回的中文描述以字幕形式疊加在畫面中。
? 總體功能整合:
功能項 | 描述 |
---|---|
? YOLOv8 實時檢測 | 保留,檢測物體并標注 |
? 空格觸發 千問 描述 | 按下空格鍵拍照并上傳 |
? 顯示中文字幕 | 將 DeepSeek 返回文字疊加到圖像上 |
? 中文字體支持 | 使用 PIL 實現中文繪圖支持 |
import cv2
import numpy as np
from ultralytics import YOLO
from PIL import ImageFont, ImageDraw, Image
import requests
from io import BytesIO
import os
from openai import OpenAI
import base64
from typing import Optional, Union
import logging
import json
import time
from collections import deque
import threading# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)# 文字
current_caption = ""
# 存儲字幕:(text, timestamp)
captions = deque()def img2text(image_path: str, prompt: str = "這張圖片是什么,一句話來描述") -> Optional[Union[str, dict]]:global current_caption # 關鍵:修改全局變量"""將圖像轉換為文本描述Args:image_path (str): 圖像文件路徑prompt (str): 提示詞,默認為"這張圖片是什么"Returns:Optional[Union[str, dict]]: API響應結果,失敗時返回None"""# base64_image = image_to_base64(image_path)base64_image = image_pathif not base64_image:return Noneclient = OpenAI(api_key="sk-02128251fc324da9800c2553d67fa2ca",base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",)try:completion = client.chat.completions.create(model="qwen-vl-plus-2025-01-25",messages=[{"role": "user","content": [{"type": "text", "text": prompt},{"type": "image_url","image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}]}],# stream=False,stream=True,stream_options={"include_usage": False})# print(completion.model_dump_json())full_text = ""# for chunk in completion:# result = json.loads(chunk.model_dump_json())# text = result["choices"][0]["delta"]["content"]# current_caption = text# print("響應:",current_caption)current_line = ""for chunk in completion:result = json.loads(chunk.model_dump_json())word = result["choices"][0]["delta"]["content"]current_line += wordprint("響應:", word)if "。" in word or "?" in word or "!" in word:timestamp = time.time()captions.append((current_line.strip(), timestamp))current_line = ""if current_line.strip(): # 最后一段未結束的也保留captions.append((current_line.strip(), time.time()))except Exception as e:logger.error(f"API調用錯誤: {e}")return None# ========== 中文字體 ==========
font_path = "font/AlimamaDaoLiTi-Regular.ttf" # MacOS 示例
font = ImageFont.truetype(font_path, 28)# ========== 加載 YOLO 模型 ==========
model = YOLO("/Users/lianying/Desktop/yolo/yolov8n.pt") # 可換為 yolov8n.pt/yolov8s.pt 等# ========== 攝像頭 ==========
cap = cv2.VideoCapture(0)while True:ret, frame = cap.read()if not ret:break# YOLOv8 實時檢測results = model(frame, verbose=False)[0]annotated_frame = results.plot() # 獲取帶標注的幀# 疊加 DeepSeek 返回的字幕(如果有)frame_pil = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))draw = ImageDraw.Draw(frame_pil)# 繪制字幕(只顯示近3秒的)now = time.time()frame_pil = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))draw = ImageDraw.Draw(frame_pil)y_offset = 30for caption, ts in list(captions):if now - ts <= 3:draw.rectangle([20, y_offset - 10, 1200, y_offset + 35], fill=(0, 0, 0, 128))draw.text((30, y_offset), caption, font=font, fill="yellow")print(caption)y_offset += 50else:captions.popleft() # 刪除過期字幕frame_final = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)cv2.imshow("YOLOv8 實時檢測 + 圖像描述(按空格)", frame_final)key = cv2.waitKey(1)if key == 27: # ESC 退出breakelif key == 32: # 空格觸發print("正在發送當前幀給 DeepSeek,請稍等...")_, buffer = cv2.imencode(".jpg", frame)image_bytes = BytesIO(buffer.tobytes())base64_str = base64.b64encode(buffer).decode('utf-8')print("Base64 (PNG):", base64_str[:50] + "...")img2text(base64_str)cap.release()
cv2.destroyAllWindows()
? 按下空格的時候為什么卡頓?
你這段代碼里,空格按下時調用的是這個同步過程:img2text(base64_str)
這一段是網絡請求 + AI處理 + 流式輸出 + 截圖處理,在本地和遠程服務器之間來回傳輸數據,因此:
-
YOLO 視頻幀不再刷新;
-
cv2.imshow()
阻塞; -
顯示“卡住”不動,直到接口響應。
? 如何解決卡頓問題?
?? 使用 線程 異步處理圖片描述,避免主線程阻塞:
在你主程序中添加多線程工具包:
import threading
將觸發圖像描述的代碼改成:
elif key == 32: # 空格觸發print("正在發送當前幀給 DeepSeek,請稍等...")# 異步處理圖像上傳與字幕生成def async_caption():_, buffer = cv2.imencode(".jpg", frame)base64_str = base64.b64encode(buffer).decode('utf-8')img2text(base64_str) # 你之前寫的函數threading.Thread(target=async_caption, daemon=True).start()
? 整體流程優化后效果
-
主線程負責視頻幀采集 + YOLO 檢測 + 字幕顯示
-
圖像描述調用單獨在線程中執行,后臺上傳+接收,不影響主線程
-
效果:按下空格后,攝像頭不會卡頓;字幕幾秒后自動出現
? 完整代碼
import cv2
import numpy as np
from ultralytics import YOLO
from PIL import ImageFont, ImageDraw, Image
import requests
from io import BytesIO
import os
from openai import OpenAI
import base64
from typing import Optional, Union
import logging
import json
import time
from collections import deque
import threading# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)# 文字
current_caption = ""
# 存儲字幕:(text, timestamp)
captions = deque()def img2text(image_path: str, prompt: str = "這張圖片是什么,一句話來描述") -> Optional[Union[str, dict]]:global current_caption # 關鍵:修改全局變量"""將圖像轉換為文本描述Args:image_path (str): 圖像文件路徑prompt (str): 提示詞,默認為"這張圖片是什么"Returns:Optional[Union[str, dict]]: API響應結果,失敗時返回None"""# base64_image = image_to_base64(image_path)base64_image = image_pathif not base64_image:return Noneclient = OpenAI(api_key="sk-02128251fc324da9800c2553d67fa2ca",base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",)try:completion = client.chat.completions.create(model="qwen-vl-plus-2025-01-25",messages=[{"role": "user","content": [{"type": "text", "text": prompt},{"type": "image_url","image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}]}],# stream=False,stream=True,stream_options={"include_usage": False})# print(completion.model_dump_json())full_text = ""# for chunk in completion:# result = json.loads(chunk.model_dump_json())# text = result["choices"][0]["delta"]["content"]# current_caption = text# print("響應:",current_caption)current_line = ""for chunk in completion:result = json.loads(chunk.model_dump_json())word = result["choices"][0]["delta"]["content"]current_line += wordprint("響應:", word)if "。" in word or "?" in word or "!" in word:timestamp = time.time()captions.append((current_line.strip(), timestamp))current_line = ""if current_line.strip(): # 最后一段未結束的也保留captions.append((current_line.strip(), time.time()))except Exception as e:logger.error(f"API調用錯誤: {e}")return None# ========== 中文字體 ==========
font_path = "font/AlimamaDaoLiTi-Regular.ttf" # MacOS 示例
font = ImageFont.truetype(font_path, 28)# ========== 加載 YOLO 模型 ==========
model = YOLO("/Users/lianying/Desktop/yolo/yolov8n.pt") # 可換為 yolov8n.pt/yolov8s.pt 等# ========== 攝像頭 ==========
cap = cv2.VideoCapture(0)while True:ret, frame = cap.read()if not ret:break# YOLOv8 實時檢測results = model(frame, verbose=False)[0]annotated_frame = results.plot() # 獲取帶標注的幀# 疊加 DeepSeek 返回的字幕(如果有)frame_pil = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))draw = ImageDraw.Draw(frame_pil)# 繪制字幕(只顯示近3秒的)now = time.time()frame_pil = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))draw = ImageDraw.Draw(frame_pil)y_offset = 30for caption, ts in list(captions):if now - ts <= 3:draw.rectangle([20, y_offset - 10, 1200, y_offset + 35], fill=(0, 0, 0, 128))draw.text((30, y_offset), caption, font=font, fill="yellow")print(caption)y_offset += 50else:captions.popleft() # 刪除過期字幕# if current_caption:# print(current_caption)# draw.text((30, 30), current_caption, font=font, fill="yellow")frame_final = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)cv2.imshow("YOLOv8 實時檢測 + 圖像描述(按空格)", frame_final)key = cv2.waitKey(1)if key == 27: # ESC 退出breakelif key == 32: # 空格觸發print("正在發送當前幀給 DeepSeek,請稍等...")# 異步處理圖像上傳與字幕生成def async_caption():_, buffer = cv2.imencode(".jpg", frame)base64_str = base64.b64encode(buffer).decode('utf-8')img2text(base64_str) # 你之前寫的函數threading.Thread(target=async_caption, daemon=True).start()# elif key == 32: # 空格觸發# print("正在發送當前幀給 DeepSeek,請稍等...")# _, buffer = cv2.imencode(".jpg", frame)# image_bytes = BytesIO(buffer.tobytes())# base64_str = base64.b64encode(buffer).decode('utf-8')# print("Base64 (PNG):", base64_str[:50] + "...")# img2text(base64_str)cap.release()
cv2.destroyAllWindows()
???效果圖:
展示部分截圖,其他不方便展示
視頻:
yolo8實現小藝看世界