標題
- 線性位置不變退化
- 估計退化函數
- 采用觀察法估計退化函數
- 采用試驗法估計退化函數
- 采用建模法估計退化函數
- 運動模糊函數
- OpenCV Motion Blur
在這一節中,得到的結果,有些不是很好,我需要再努力多找資料,重新完成學習,如果大佬有相關資料推薦,不勝感激。
線性位置不變退化
# 巴特沃斯帶阻陷波濾波器 BNRF
img_temp = np.zeros([512, 512])
BNF_1 = butterworth_notch_resistant_filter(img_temp, radius=20, uk=-80, vk=60)
BNF_2 = butterworth_notch_resistant_filter(img_temp, radius=10, uk=30, vk=80)
BNF_3 = butterworth_notch_resistant_filter(img_temp, radius=10, uk=-30, vk=80)plt.figure(figsize=(16, 16))
plt.subplot(221), plt.imshow(BNF_1, 'gray'), plt.title('BNF_1')
plt.subplot(222), plt.imshow(BNF_2, 'gray'), plt.title('BNF_2')
plt.subplot(223), plt.imshow(BNF_3, 'gray'), plt.title('BNF_3')BNF_dst = BNF_1 * BNF_2 * BNF_3plt.subplot(224), plt.imshow(BNF_dst, 'gray'), plt.title('BNF_dst')plt.tight_layout()
plt.show()
估計退化函數
In this section, I think I still got some problem have to sort out, when I have some more time or some more reading.
采用觀察法估計退化函數
選擇一個信號內容很強的區域(如一個高對比度區域)表示為g(x,y)g(x, y)g(x,y),令f^(x,y)\hat{f}(x, y)f^?(x,y)表示為處理后的子圖像,則有:
Hs(u,v)=Gs(u,v)F^s(u,v)(5.66)H_{s}(u, v) = \frac{G_{s}(u, v)}{\hat{F}_{s}(u, v)} \tag{5.66}Hs?(u,v)=F^s?(u,v)Gs?(u,v)?(5.66)
根據位置不變的假設來推斷完整的退化函數H(u,v)H(u, v)H(u,v)
采用試驗法估計退化函數
一個沖激由一個亮點來模擬,這個點應亮到能降低噪聲對可忽略值的影響。一個沖激的傅里葉變換是一個常量:
H(u,v)=G(u,v)A(5.67)H(u, v) = \frac{G(u, v)}{A} \tag{5.67}H(u,v)=AG(u,v)?(5.67)
# 試驗法估計退化函數
img_impulse = cv2.imread("DIP_Figures/DIP3E_Original_Images_CH05/Fig0524(a)(impulse).tif", 0)
img_blurred = cv2.imread("DIP_Figures/DIP3E_Original_Images_CH05/Fig0524(b)(blurred-impulse).tif", 0)fig = plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1), plt.imshow(img_impulse, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 2, 2), plt.imshow(img_blurred, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.tight_layout()
plt.show()
下面兩個例子,你會看到傅立葉變換后,頻譜圖像的美。把頻譜圖像上了顏色后,更是美極啦
# 傅里葉變換
fp_impulse = pad_image(img_impulse)
impluse_cen = centralized_2d(fp_impulse)
fft_impulse = np.fft.fft2(impluse_cen)
impulse_spectrume = np.log(1 + spectrum_fft(fft_impulse))fp_blurred = pad_image(img_blurred)
blurred_cen = centralized_2d(fp_blurred)
fft_blurred = np.fft.fft2(blurred_cen)
blurred_spectrum = np.log(1 + spectrum_fft(fft_blurred))H = fft_blurred / fft_impulseh_spectrum = np.log(1 + spectrum_fft(H))
h_spectrum = h_spectrum / h_spectrum.max()fig = plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1), plt.imshow(impulse_spectrume, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 2), plt.imshow(blurred_spectrum, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 3), plt.imshow(h_spectrum, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.tight_layout()
plt.show()
# 一些傅里葉變換
img_temp = np.zeros([256, 256])
# H = butterworth_low_pass_filter(img_temp, 10, 500)
H = 1 - butterworth_band_resistant_filter(img_temp, img_temp.shape, radius=50, w=5, n=5)
fp_blurred = pad_image(H)
blurred_cen = centralized_2d(fp_blurred)
fft_blurred = np.fft.fft2(blurred_cen)
blurred_spectrum = np.log(1 + spectrum_fft(fft_blurred))fig = plt.figure(figsize=(15, 15))
plt.imshow(blurred_spectrum, cmap='PiYG'), plt.xticks([]), plt.yticks([])
# plt.savefig("bbrf_4.png", dpi=300, quality=100)
# plt.subplot(1, 3, 1), plt.imshow(impulse_spectrume, cmap='gray'), plt.xticks([]), plt.yticks([])
# plt.subplot(1, 3, 2), plt.imshow(blurred_spectrum, cmap='gray'), plt.xticks([]), plt.yticks([])
# # plt.subplot(1, 3, 3), plt.imshow(h_spectrum, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.tight_layout()
plt.show()
采用建模法估計退化函數
H(u,v)=e?k(u2+v2)56(5.68)H(u,v) = e^{-k(u^2 + v^2)^{\frac{5}{6}}} \tag{5.68}H(u,v)=e?k(u2+v2)65?(5.68)
關于頻率矩形的中心,可用如下函數
H(u,v)=e?k((u?P/2)2+(v?Q/2)2)56H(u, v) = e^{-k((u - P/2)^2 + (v - Q/2)^2 \ \ )^{\frac{5}{6}}}H(u,v)=e?k((u?P/2)2+(v?Q/2)2??)65?
參加書上P247頁,運動導的圖像模糊的退化過程,是否用錯?
這個問題已經得到解決啦,解決方案如下。
def modeling_degrade(img, k=1):"""modeling degradation fuction, math: $$H(u,v) = e^{-k(u^2 +v^2)^{\frac{5}{6}}}$$param: img: input imgparam: k: """N, M = img.shape[:2]u = np.arange(M)v = np.arange(N)u, v = np.meshgrid(u, v)temp = (u - M//2)**2 + (v - N//2)**2kernel = np.exp(-k * np.power(temp, 5/6))return kernel
# 不填充,結果與書上一致啦
def get_degenerate_image(img, img_deg):"""不填充圖像做傅里葉變換后與退化函數做乘積,再反傅里葉變換"""# FFT--------------------------------------------fft = np.fft.fft2(img)# FFT * H(u, v)----------------------------------fft_huv = fft * img_deg# IFFT-------------------------------------------ifft = np.fft.ifft2(fft_huv)return ifftimg_ori = cv2.imread('DIP_Figures/DIP3E_Original_Images_CH05/Fig0525(a)(aerial_view_no_turb).tif', 0)# k = [1, 0.1, 0.01, 0.001, 0.0025, 0.00025]
k = [0.0025, 0.001, 0.00025]fp_cen = centralized_2d(img_ori)fig = plt.figure(figsize=(12, 12))
for i in range(len(k) + 1):ax = fig.add_subplot(2, 2, i+1, xticks=[], yticks=[])if i == 0:ax.imshow(img_ori, 'gray'), ax.set_title(f"Original")else:img_deg = modeling_degrade(fp_cen, k=k[i-1])ifft = get_degenerate_image(fp_cen, img_deg)img_new = centralized_2d(ifft.real)img_new = np.clip(img_new, 0, img_new.max())img_new = np.uint8(normalize(img_new) * 255)ax.imshow(img_new, 'gray')ax.set_title(f"k = {k[i-1]}")
plt.tight_layout()
plt.show()
運動模糊函數
H(u,v)=Tπ(ua+vb)sin[π(ua+vb)]e?jπ(ua+vb)H(u,v) =\frac{T}{\pi(ua + vb)}sin[\pi(ua+vb)]e^{-j\pi(ua+vb)}H(u,v)=π(ua+vb)T?sin[π(ua+vb)]e?jπ(ua+vb)
下面的代碼可能比較混亂,因為實驗過程,而得出的結果不太好,還沒有整理。需要繼續學習后,再完成整理。
img_ori = cv2.imread('DIP_Figures/DIP3E_Original_Images_CH05/Fig0526(a)(original_DIP).tif', 0)
def motion_huv(img, a, b, T):eps = 1e-8M, N = img.shape[1], img.shape[0]u = np.arange(1, M+1)v = np.arange(1, N+1)u, v = np.meshgrid(u, v)temp = np.pi * (u * a + v * b)kernel = (T * np.sin(temp) * np.exp(-temp*1j) /(temp + eps))return kernel
# 對圖片進行運動模糊
def make_blurred(img, PSF, eps):#=====================
# fft = np.fft.fft2(img)
# # fft_shift = np.fft.fftshift(fft)# fft_psf = fft * PSF# ifft = np.fft.ifft2(fft_psf)
# # ifft_shift = np.fft.ifftshift(ifft)
# blurred = abs(ifft.real)#=========================M, N = img.shape[:2]fp = pad_image(img, mode='constant')fp_cen = centralized_2d(fp)img_fft = np.fft.fft2(fp_cen)img_fft_psf = img_fft * PSFifft = np.fft.ifft2(img_fft_psf)blurred = centralized_2d(ifft.real)[:N, :M]
# # blurred = ifft.real[:N, :M]return blurred
def get_motion_dsf(image_size, motion_angle, motion_dis):PSF = np.zeros(image_size) # 點擴散函數x_center = (image_size[0] - 1) / 2y_center = (image_size[1] - 1) / 2sin_val = np.sin(motion_angle * np.pi / 180)cos_val = np.cos(motion_angle * np.pi / 180)# 將對應角度上motion_dis個點置成1for i in range(motion_dis):x_offset = round(sin_val * i)y_offset = round(cos_val * i)PSF[int(x_center - x_offset), int(y_center + y_offset)] = 1return PSF / PSF.sum() # 歸一化
img_motion = get_motion_dsf((480, 480), 70, 200)plt.figure(figsize=(10, 8))
plt.subplot(121), plt.imshow(img_motion,'gray'), plt.title('img_motion')
plt.show()
OpenCV Motion Blur
def motion_blur(image, degree=12, angle=45):"""create motion blur using opencvparam: image: input imageparam: degree: the size of the blurryparam: angle: blur anglereturn uint8 image"""image = np.array(image)# 這里生成任意角度的運動模糊kernel的矩陣, degree越大,模糊程度越高M = cv2.getRotationMatrix2D((degree / 2, degree / 2), angle, 1)motion_blur_kernel = np.diag(np.ones(degree))motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M, (degree, degree))motion_blur_kernel = motion_blur_kernel / degreeblurred = cv2.filter2D(image, -1, motion_blur_kernel)# convert to uint8cv2.normalize(blurred, blurred, 0, 255, cv2.NORM_MINMAX)blurred = np.array(blurred, dtype=np.uint8)return blurred
# 運動模糊圖像
img_ori = cv2.imread('DIP_Figures/DIP3E_Original_Images_CH05/Fig0526(a)(original_DIP).tif', 0)img_blur = motion_blur(img_ori, degree=75, angle=15)plt.figure(figsize=(12, 8))
plt.subplot(121), plt.imshow(img_ori,'gray'), plt.title('img_deg')
plt.subplot(122), plt.imshow(img_blur,'gray'), plt.title('high_pass')
plt.show()