import cv2 import numpy as np SOURCE_IMAGE1='../lisbon1.jpg' SOURCE_IMAGE2='../lisbon2.jpg' OUTPUT_IMAGE1='../keypoints_lisbon1.jpg' OUTPUT_IMAGE2='../keypoints_lisbon2.jpg' MATCHING_IMAGE='flann_matching_lisbon1_lisbon2.jpg' ## képek beolvasása img1 = cv2.imread(SOURCE_IMAGE1); img2 = cv2.imread(SOURCE_IMAGE2); h1,w1 = img1.shape[:2] h2,w2 = img2.shape[:2] border_top_bottom = int(h1/2) print(str(border_top_bottom)) border_left_right = int(w1/2) #img1_pad = cv2.copyMakeBorder(img1, border_top_bottom, border_top_bottom, border_left_right, border_left_right, cv2.BORDER_CONSTANT, value=[0,0,0] ) #img2_pad = cv2.copyMakeBorder(img2, border_top_bottom, border_top_bottom, border_left_right, border_left_right, cv2.BORDER_CONSTANT, value=[0,0,0] ) ## a képet szürkeárnyalatossá konvertáljuk gray_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) gray_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) ## jellemzőpontok detektálása surf = cv2.xfeatures2d.SURF_create() keypoints1 = surf.detect(gray_img1, None) keypoints2 = surf.detect(gray_img2, None) ## kulcspont leírók számítása keypoints1, descriptors1 = surf.compute(gray_img1, keypoints1) keypoints2, descriptors2 = surf.compute(gray_img2, keypoints2) ## pontpárok keresése # FLANN parameterek FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params,search_params) matches = flann.knnMatch(descriptors1,descriptors2,k=2) ## ez kNN-alapú, ## minden pontnak két lehetséges párja lehet # csak a jó párosításokat tároljuk el, amelyek átmentek a Lowe-teszten good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) points1 = [] points2 = [] #goodKeypoints1 = np.float32([ keypoints1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) #goodKeypoints2 = np.float32([ keypoints2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) for m in good: points1.append(keypoints1[m.queryIdx].pt) points2.append(keypoints2[m.trainIdx].pt) points1, points2 = np.float32((points1, points2)) #points1 = cv2.KeyPoint_convert(goodKeypoints1) #points2 = cv2.KeyPoint_convert(goodKeypoints2) H, mask = cv2.findHomography(points1, points2, cv2.RANSAC) matchesMask = mask.ravel().tolist() # azok a pontok, amelyek szerepelnek a párosításban img2_size = (img2.shape[1], img2.shape[0]) img1_size = (img1.shape[1], img1.shape[0]) deformedImg2 = cv2.warpPerspective(img1, H, img1_size ) img2_copy = img2 for x in range(0, img1.shape[1]): for y in range(0, img1.shape[0]): if (np.array_equal(deformedImg2[y,x],np.array([0,0,0]))): img2_copy[y,x] = img2[y,x] else: img2_copy[y,x] = deformedImg2[y,x] cv2.imwrite("deformed2.png", img2_copy)