计算机视觉之哈里斯角点特征图像匹配

发布于:2023-01-04 ⋅ 阅读:(246) ⋅ 点赞:(0)

# skimage feature 哈里斯角点检测
from matplotlib import pylab as pylab 
from skimage.io import imread 
from skimage.color import rgb2gray 
from skimage.feature import corner_harris, corner_subpix, corner_peaks 
from skimage.transform import warp, SimilarityTransform, AffineTransform,resize 
import cv2 
import numpy as np 
from skimage import data ,io
from skimage.util import img_as_float 
from skimage.exposure import rescale_intensity 
from skimage.measure import ransac 
# image = data.astronaut()
# # io.imshow(image)
# # image_gray = rgb2gray(image) 
# # coordinates = corner_harris(image_gray, k =0.001) 
# # image[coordinates>0.01*coordinates.max()]=[255,0,0] 
# # pylab.figure(figsize=(20,10)) 
# # pylab.imshow(image), pylab.axis('off'), pylab.show() 
# image_gray = rgb2gray(image) 
# coordinates = corner_harris(image_gray, k =0.001) 
# image[coordinates > 0.04*coordinates.max()] = [255,0,0] # threshold for an optimal value, depends on the image 
# corner_coordinates = corner_peaks(coordinates) 
# coordinates_subpix = corner_subpix(image_gray, corner_coordinates, window_size=11) 
# pylab.figure(figsize=(20,20)) 
# pylab.subplot(211), pylab.imshow(image, cmap='inferno') 
# pylab.plot(coordinates_subpix[:, 1], coordinates_subpix[:, 0], 'r.', 
# markersize=5, label='subpixel') 
# pylab.legend(prop={'size': 20}), pylab.axis('off') 
# pylab.subplot(212), pylab.imshow(image, interpolation='nearest') 
# pylab.plot(corner_coordinates[:, 1], corner_coordinates[:, 0], 'bo', markersize=5)
# pylab.plot(coordinates_subpix[:, 1], coordinates_subpix[:, 0], 'r+', 
# markersize=10), pylab.axis('off') 
# pylab.tight_layout(), pylab.show()
#基于 RANSAC 算法和哈里斯角点特征的鲁棒图像匹配
temple = rgb2gray(img_as_float(data.astronaut()))
image_original = np.zeros(shape=(temple.shape[0],temple.shape[1],3),dtype="float32")
image_original[..., 0] = temple 
gradient_row, gradient_col = (np.mgrid[0:image_original.shape[0], 0:image_original.shape[1]] / float(image_original.shape[0])) 
image_original[..., 1] = gradient_row 
image_original[..., 2] = gradient_col 
image_original = rescale_intensity(image_original) 
image_original_gray = rgb2gray(image_original)
#仿射变换
affine_trans = AffineTransform(scale=(0.8, 0.9), rotation=0.1,translation=(120, -20)) 
image_warped = warp(image_original, affine_trans .inverse,output_shape=image_original.shape) 
image_warped_gray = rgb2gray(image_warped)
#角点检测
coordinates = corner_harris(image_original_gray) 
coordinates[coordinates > 0.01*coordinates.max()] = 1 
coordinates_original = corner_peaks(coordinates, threshold_rel=0.0001,min_distance=5) 
coordinates = corner_harris(image_warped_gray) 
coordinates[coordinates > 0.01*coordinates.max()] = 1 
coordinates_warped = corner_peaks(coordinates, threshold_rel=0.0001,min_distance=5)
coordinates_original_subpix = corner_subpix(image_original_gray, 
coordinates_original, window_size=9) 
coordinates_warped_subpix = corner_subpix(image_warped_gray, coordinates_warped, window_size=9) 
#确定子像素角点位置
def gaussian_weights(window_ext, sigma=1): 
     y, x = np.mgrid[-window_ext:window_ext+1, -window_ext:window_ext+1] 
     g_w = np.zeros(y.shape, dtype = np.double) 
     g_w[:] = np.exp(-0.5 * (x**2 / sigma**2 + y**2 / sigma**2)) 
     g_w /= 2 * np.pi * sigma * sigma 
     return g_w 
def match_corner(coordinates, window_ext=3): 
     row, col = np.round(coordinates).astype(np.intp) 
     window_original = image_original[row-window_ext:row+window_ext+1, col-window_ext:col+window_ext+1, :] 
     weights = gaussian_weights(window_ext, 3) 
     weights = np.dstack((weights, weights, weights)) 
     SSDs = [] 
     for coord_row, coord_col in coordinates_warped:  
             window_warped = image_warped[coord_row-window_ext:coord_row+window_ext+1, coord_col-window_ext:coord_col+window_ext+1, :] 
             if window_original.shape == window_warped.shape: 
                 SSD = np.sum(weights * (window_original - window_warped)**2) 
                 SSDs.append(SSD) 
     min_idx = np.argmin(SSDs) if len(SSDs) > 0 else -1 
     return coordinates_warped_subpix[min_idx] if min_idx >= 0 else [None]
source, destination = [], [] 
for coordinates in coordinates_original_subpix: 
     coordinates1 = match_corner(coordinates) 
     if any(coordinates1) and len(coordinates1) > 0 and not all(np.isnan(coordinates1)): 
         source.append(coordinates) 
         destination.append(coordinates1) 
     source1 = np.array(source) 
     destination1 = np.array(destination) 
model = AffineTransform() 
model.estimate(source1, destination1)
model_robust, inliers = ransac((source1, destination1), AffineTransform, min_samples=3, residual_threshold=2, max_trials=100) 
outliers = inliers == False
print(affine_trans.scale, affine_trans.translation, affine_trans.rotation) 
# (0.8, 0.9) [ 120. -20.] 0.09999999999999999 
print(model.scale, model.translation, model.rotation) 
# (0.8982412101241938, 0.8072777593937368) [ -20.45123966 114.92297156] 
-0.10225420334222493 
print(model_robust.scale, model_robust.translation, model_robust.rotation) 
# (0.9001524425730119, 0.8000362790749188) [ -19.87491292 119.83016533] 
-0.09990858564132575
#k可视化对应点
fig, axes = pylab.subplots(nrows=2, ncols=1, figsize=(20,15)) 
pylab.gray() 
inlier_idxs = np.nonzero(inliers)[0] 
plot_matches(axes[0], image_original_gray, image_warped_gray, source, destination, np.column_stack((inlier_idxs, inlier_idxs)), matches_color='b') 
axes[0].axis('off'), axes[0].set_title('Correct correspondences', size=20) 
outlier_idxs = np.nonzero(outliers)[0] 
plot_matches(axes[1], image_original_gray, image_warped_gray, source, destination, np.column_stack((outlier_idxs, outlier_idxs)), matches_color='row') 
axes[1].axis('off'), axes[1].set_title('Faulty correspondences', size=20) 
fig.tight_layout(), pylab.show()