這段代碼實現了基于運動補償的前景檢測算法。
主要功能包括:
- 運動補償模塊:使用基于網格的 KLT 特征跟蹤算法計算兩幀之間的運動,然后通過單應性變換實現幀間運動補償。
- 前景檢測模塊:結合兩幀運動補償結果,通過幀間差分計算前景掩碼。
- 異常處理:添加了圖像加載檢查和異常捕獲,提高了代碼的健壯性。
- 路徑處理:自動創建保存目錄,避免因目錄不存在導致的錯誤。
使用時需要提供三幀連續圖像:兩個參考幀和當前幀。代碼會計算出前景掩碼并保存為圖像文件。
import cv2
import numpy as np
import os
import sysdef motion_compensate(frame1, frame2):"""使用基于網格的KLT特征跟蹤實現兩幀之間的運動補償參數:frame1: 前一幀圖像(BGR格式)frame2: 當前幀圖像(BGR格式)返回:compensated: 運動補償后的圖像mask: 補償區域的掩碼avg_dst: 平均運動距離motion_x: x方向平均運動量motion_y: y方向平均運動量homography_matrix: 單應性變換矩陣"""# 設置LK光流參數lk_params = dict(winSize=(15, 15), maxLevel=3,criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.003))# 圖像預處理和網格點生成width = frame2.shape[1]height = frame2.shape[0]scale = 2 # 放大圖像以獲得更精確的跟蹤# 調整圖像大小以提高特征點檢測精度frame1_grid = cv2.resize(frame1, (960 * scale, 540 * scale), dst=None, interpolation=cv2.INTER_CUBIC)frame2_grid = cv2.resize(frame2, (960 * scale, 540 * scale), dst=None, interpolation=cv2.INTER_CUBIC)width_grid = frame2_grid.shape[1]height_grid = frame2_grid.shape[0]gridSizeW = 32 * 2 # 網格寬度gridSizeH = 24 * 2 # 網格高度# 生成網格點作為特征點p1 = []grid_numW = int(width_grid / gridSizeW - 1)grid_numH = int(height_grid / gridSizeH - 1)for i in range(grid_numW):for j in range(grid_numH):# 將點放置在每個網格中心point = (np.float32(i * gridSizeW + gridSizeW / 2.0), np.float32(j * gridSizeH + gridSizeH / 2.0))p1.append(point)p1 = np.array(p1)pts_num = grid_numW * grid_numHpts_prev = p1.reshape(pts_num, 1, 2)# 計算光流pts_cur, st, err = cv2.calcOpticalFlowPyrLK(frame1_grid, frame2_grid, pts_prev, None, **lk_params)# 選擇跟蹤成功的點good_new = pts_cur[st == 1] # 當前幀中的跟蹤點good_old = pts_prev[st == 1] # 前一幀中的跟蹤點# 計算運動距離和位移motion_distance = []translate_x = []translate_y = []for i, (new, old) in enumerate(zip(good_new, good_old)):a, b = new.ravel()c, d = old.ravel()motion_distance0 = np.sqrt((a - c) * (a - c) + (b - d) * (b - d))# 過濾異常大的運動值if motion_distance0 > 50:continuetranslate_x0 = a - ctranslate_y0 = b - dmotion_distance.append(motion_distance0)translate_x.append(translate_x0)translate_y.append(translate_y0)motion_dist = np.array(motion_distance)motion_x = np.mean(np.array(translate_x)) if translate_x else 0motion_y = np.mean(np.array(translate_y)) if translate_y else 0avg_dst = np.mean(motion_dist) if motion_dist.size > 0 else 0# 計算單應性變換矩陣if len(good_old) < 15:# 點太少時使用近似恒等變換homography_matrix = np.array([[0.999, 0, 0], [0, 0.999, 0], [0, 0, 1]])else:# 使用RANSAC算法估計單應性矩陣homography_matrix, status = cv2.findHomography(good_new, good_old, cv2.RANSAC, 3.0)# 應用單應性變換進行運動補償compensated = cv2.warpPerspective(frame1, homography_matrix, (width, height), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)# 生成掩碼以指示變換區域vertex = np.array([[0, 0], [width, 0], [width, height], [0, height]], dtype=np.float32).reshape(-1, 1, 2)homo_inv = np.linalg.inv(homography_matrix)vertex_trans = cv2.perspectiveTransform(vertex, homo_inv)vertex_transformed = np.array(vertex_trans, dtype=np.int32).reshape(1, 4, 2)im = np.zeros(frame1.shape[:2], dtype='uint8')cv2.polylines(im, vertex_transformed, 1, 255)cv2.fillPoly(im, vertex_transformed, 255)mask = 255 - imreturn compensated, mask, avg_dst, motion_x, motion_y, homography_matrixdef FD_mask(lastFrame1, lastFrame2, currentFrame, save_path='data/mask.jpg'):"""使用兩幀運動補償計算前景掩碼參數:lastFrame1: 第一參考幀(BGR格式)lastFrame2: 第二參考幀(BGR格式)currentFrame: 當前幀(BGR格式)save_path: 結果掩碼保存路徑"""# 圖像預處理:高斯模糊和灰度轉換lastFrame1 = cv2.GaussianBlur(lastFrame1, (11, 11), 0)lastFrame1 = cv2.cvtColor(lastFrame1, cv2.COLOR_BGR2GRAY)lastFrame2 = cv2.GaussianBlur(lastFrame2, (11, 11), 0)lastFrame2 = cv2.cvtColor(lastFrame2, cv2.COLOR_BGR2GRAY)currentFrame = cv2.GaussianBlur(currentFrame, (11, 11), 0)currentFrame = cv2.cvtColor(currentFrame, cv2.COLOR_BGR2GRAY)# 計算第一參考幀到第二參考幀的運動補償img_compensate1, mask1, avg_dist1, motion_x1, motion_y1, homo_matrix = motion_compensate(lastFrame1, lastFrame2)frameDiff1 = cv2.absdiff(lastFrame2, img_compensate1)# 計算當前幀到第二參考幀的運動補償img_compensate2, mask2, avg_dist2, motion_x2, motion_y2, homo_matrix2 = motion_compensate(currentFrame, lastFrame2)frameDiff2 = cv2.absdiff(lastFrame2, img_compensate2)# 融合兩個差分結果frameDiff = (frameDiff1 + frameDiff2) / 2# 保存結果os.makedirs(os.path.dirname(save_path), exist_ok=True)cv2.imwrite(save_path, frameDiff)print(f'前景掩碼已保存至: {save_path}')return frameDiffif __name__ == "__main__":# 示例:加載三幀圖像并計算前景掩碼# 請確保這些圖像存在,或者修改為您自己的圖像路徑try:lastFrame1 = cv2.imread('data/Test_images/images/phantom05_0600.jpg')lastFrame3 = cv2.imread('data/Test_images/images/phantom05_0602.jpg')currentFrame = cv2.imread('data/Test_images/images/phantom05_0604.jpg')if lastFrame1 is None or lastFrame3 is None or currentFrame is None:print("錯誤: 無法加載圖像,請檢查文件路徑!")else:FD_mask(lastFrame1, lastFrame3, currentFrame)except Exception as e:print(f"程序執行出錯: {e}")
#include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>
#include <numeric>
#include <cmath>using namespace cv;
using namespace std;struct MotionCompensationResult {Mat compensated;Mat mask;float avg_dst;float motion_x;float motion_y;Mat homography_matrix;
};MotionCompensationResult motion_compensate(const Mat& frame1, const Mat& frame2) {// KLT 跟蹤參數TermCriteria term_criteria(TermCriteria::EPS | TermCriteria::COUNT, 30, 0.003);Size win_size(15, 15);int max_level = 3;// 圖像縮放int scale = 2;Mat frame1_grid, frame2_grid;resize(frame1, frame1_grid, Size(960 * scale, 540 * scale), 0, 0, INTER_CUBIC);resize(frame2, frame2_grid, Size(960 * scale, 540 * scale), 0, 0, INTER_CUBIC);// 創建網格點int gridSizeW = 32 * 2;int gridSizeH = 24 * 2;int grid_numW = static_cast<int>(frame2_grid.cols / gridSizeW - 1);int grid_numH = static_cast<int>(frame2_grid.rows / gridSizeH - 1);vector<Point2f> p1;for (int i = 0; i < grid_numW; i++) {for (int j = 0; j < grid_numH; j++) {p1.push_back(Point2f(i * gridSizeW + gridSizeW / 2.0f, j * gridSizeH + gridSizeH / 2.0f));}}int pts_num = grid_numW * grid_numH;Mat pts_prev = Mat(p1).reshape(2, pts_num);// 計算光流vector<Point2f> pts_cur;vector<uchar> status;vector<float> err;calcOpticalFlowPyrLK(frame1_grid, frame2_grid, pts_prev, pts_cur, status, err, win_size, max_level, term_criteria);// 篩選好點vector<Point2f> good_new, good_old;for (size_t i = 0; i < status.size(); i++) {if (status[i]) {good_new.push_back(pts_cur[i]);good_old.push_back(p1[i]);}}// 計算運動距離和位移vector<float> motion_distance;vector<float> translate_x, translate_y;for (size_t i = 0; i < good_new.size(); i++) {float dx = good_new[i].x - good_old[i].x;float dy = good_new[i].y - good_old[i].y;float dist = sqrt(dx * dx + dy * dy);if (dist > 50) continue;motion_distance.push_back(dist);translate_x.push_back(dx);translate_y.push_back(dy);}// 計算平均值float avg_dst = 0, motion_x = 0, motion_y = 0;if (!motion_distance.empty()) {avg_dst = accumulate(motion_distance.begin(), motion_distance.end(), 0.0f) / motion_distance.size();}if (!translate_x.empty()) {motion_x = accumulate(translate_x.begin(), translate_x.end(), 0.0f) / translate_x.size();motion_y = accumulate(translate_y.begin(), translate_y.end(), 0.0f) / translate_y.size();}// 計算單應性矩陣Mat homography_matrix;if (good_old.size() < 15) {homography_matrix = (Mat_<double>(3, 3) << 0.999, 0, 0, 0, 0.999, 0, 0, 0, 1);} else {homography_matrix = findHomography(good_new, good_old, RANSAC, 3.0);}// 運動補償Mat compensated;warpPerspective(frame1, compensated, homography_matrix, Size(frame1.cols, frame1.rows), INTER_LINEAR + WARP_INVERSE_MAP);// 計算掩膜vector<Point2f> vertex;vertex.push_back(Point2f(0, 0));vertex.push_back(Point2f(frame1.cols, 0));vertex.push_back(Point2f(frame1.cols, frame1.rows));vertex.push_back(Point2f(0, frame1.rows));Mat vertex_mat = Mat(vertex).reshape(2);Mat homo_inv = homography_matrix.inv();vector<Point2f> vertex_trans;perspectiveTransform(vertex_mat, vertex_trans, homo_inv);vector<Point> vertex_transformed;for (const auto& pt : vertex_trans) {vertex_transformed.push_back(Point(static_cast<int>(pt.x), static_cast<int>(pt.y)));}Mat mask = Mat::zeros(frame1.size(), CV_8UC1);vector<vector<Point>> contours;contours.push_back(vertex_transformed);polylines(mask, contours, true, Scalar(255), 1);fillPoly(mask, contours, Scalar(255));mask = 255 - mask;return {compensated, mask, avg_dst, motion_x, motion_y, homography_matrix};
}void FD_mask(const Mat& lastFrame1, const Mat& lastFrame2, const Mat& currentFrame, const string& save_path = "mask.jpg") {// 圖像預處理Mat lastFrame1_gray, lastFrame2_gray, currentFrame_gray;GaussianBlur(lastFrame1, lastFrame1_gray, Size(11, 11), 0);GaussianBlur(lastFrame2, lastFrame2_gray, Size(11, 11), 0);GaussianBlur(currentFrame, currentFrame_gray, Size(11, 11), 0);cvtColor(lastFrame1_gray, lastFrame1_gray, COLOR_BGR2GRAY);cvtColor(lastFrame2_gray, lastFrame2_gray, COLOR_BGR2GRAY);cvtColor(currentFrame_gray, currentFrame_gray, COLOR_BGR2GRAY);// 第一組運動補償auto result1 = motion_compensate(lastFrame1_gray, lastFrame2_gray);Mat frameDiff1;absdiff(lastFrame2_gray, result1.compensated, frameDiff1);// 第二組運動補償auto result2 = motion_compensate(currentFrame_gray, lastFrame2_gray);Mat frameDiff2;absdiff(lastFrame2_gray, result2.compensated, frameDiff2);// 計算最終差分Mat frameDiff;frameDiff1.convertTo(frameDiff1, CV_32F);frameDiff2.convertTo(frameDiff2, CV_32F);frameDiff = (frameDiff1 + frameDiff2) / 2;frameDiff.convertTo(frameDiff, CV_8U);// 保存結果imwrite(save_path, frameDiff);cout << "done!" << endl;
}int main() {// 讀取圖像Mat lastFrame1 = imread("data/Test_images/images/phantom05_0600.jpg");Mat lastFrame3 = imread("data/Test_images/images/phantom05_0602.jpg");Mat currentFrame = imread("data/Test_images/images/phantom05_0604.jpg");/*Mat lastFrame1 = imread("250514_430.bmp");Mat lastFrame3 = imread("250514_463.bmp");Mat currentFrame = imread("250514_490.bmp");*/// 檢查圖像是否成功加載if (lastFrame1.empty() || lastFrame3.empty() || currentFrame.empty()) {cout << "無法加載圖像!" << endl;return -1;}// 執行幀間差分FD_mask(lastFrame1, lastFrame3, currentFrame);return 0;
}