Add Java and Python code for the following features2d tutorials: Harris corner detector, Shi-Tomasi corner detector, Creating your own corner detector, Detecting corners location in subpixels, Feature Detection, Feature Description, Feature Matching with FLANN, Features2D + Homography to find a known object. Use Lowe's ratio test to filter the matches.
This commit is contained in:
@@ -0,0 +1,70 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
import random as rng
|
||||
|
||||
source_window = 'Image'
|
||||
maxTrackbar = 25
|
||||
rng.seed(12345)
|
||||
|
||||
def goodFeaturesToTrack_Demo(val):
|
||||
maxCorners = max(val, 1)
|
||||
|
||||
# Parameters for Shi-Tomasi algorithm
|
||||
qualityLevel = 0.01
|
||||
minDistance = 10
|
||||
blockSize = 3
|
||||
gradientSize = 3
|
||||
useHarrisDetector = False
|
||||
k = 0.04
|
||||
|
||||
# Copy the source image
|
||||
copy = np.copy(src)
|
||||
|
||||
# Apply corner detection
|
||||
corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \
|
||||
blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k)
|
||||
|
||||
# Draw corners detected
|
||||
print('** Number of corners detected:', corners.shape[0])
|
||||
radius = 4
|
||||
for i in range(corners.shape[0]):
|
||||
cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
|
||||
|
||||
# Show what you got
|
||||
cv.namedWindow(source_window)
|
||||
cv.imshow(source_window, copy)
|
||||
|
||||
# Set the needed parameters to find the refined corners
|
||||
winSize = (5, 5)
|
||||
zeroZone = (-1, -1)
|
||||
criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 40, 0.001)
|
||||
|
||||
# Calculate the refined corner locations
|
||||
corners = cv.cornerSubPix(src_gray, corners, winSize, zeroZone, criteria)
|
||||
|
||||
# Write them down
|
||||
for i in range(corners.shape[0]):
|
||||
print(" -- Refined Corner [", i, "] (", corners[i,0,0], ",", corners[i,0,1], ")")
|
||||
|
||||
# Load source image and convert it to gray
|
||||
parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.')
|
||||
parser.add_argument('--input', help='Path to input image.', default='../data/pic3.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
src = cv.imread(args.input)
|
||||
if src is None:
|
||||
print('Could not open or find the image:', args.input)
|
||||
exit(0)
|
||||
|
||||
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
|
||||
|
||||
# Create a window and a trackbar
|
||||
cv.namedWindow(source_window)
|
||||
maxCorners = 10 # initial threshold
|
||||
cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo)
|
||||
cv.imshow(source_window, src)
|
||||
goodFeaturesToTrack_Demo(maxCorners)
|
||||
|
||||
cv.waitKey()
|
||||
+80
@@ -0,0 +1,80 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
import random as rng
|
||||
|
||||
myHarris_window = 'My Harris corner detector'
|
||||
myShiTomasi_window = 'My Shi Tomasi corner detector'
|
||||
myHarris_qualityLevel = 50
|
||||
myShiTomasi_qualityLevel = 50
|
||||
max_qualityLevel = 100
|
||||
rng.seed(12345)
|
||||
|
||||
def myHarris_function(val):
|
||||
myHarris_copy = np.copy(src)
|
||||
myHarris_qualityLevel = max(val, 1)
|
||||
|
||||
for i in range(src_gray.shape[0]):
|
||||
for j in range(src_gray.shape[1]):
|
||||
if Mc[i,j] > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel:
|
||||
cv.circle(myHarris_copy, (j,i), 4, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
|
||||
|
||||
cv.imshow(myHarris_window, myHarris_copy)
|
||||
|
||||
def myShiTomasi_function(val):
|
||||
myShiTomasi_copy = np.copy(src)
|
||||
myShiTomasi_qualityLevel = max(val, 1)
|
||||
|
||||
for i in range(src_gray.shape[0]):
|
||||
for j in range(src_gray.shape[1]):
|
||||
if myShiTomasi_dst[i,j] > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel:
|
||||
cv.circle(myShiTomasi_copy, (j,i), 4, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
|
||||
|
||||
cv.imshow(myShiTomasi_window, myShiTomasi_copy)
|
||||
|
||||
# Load source image and convert it to gray
|
||||
parser = argparse.ArgumentParser(description='Code for Creating your own corner detector tutorial.')
|
||||
parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg')
|
||||
args = parser.parse_args()
|
||||
|
||||
src = cv.imread(args.input)
|
||||
if src is None:
|
||||
print('Could not open or find the image:', args.input)
|
||||
exit(0)
|
||||
|
||||
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
|
||||
|
||||
# Set some parameters
|
||||
blockSize = 3
|
||||
apertureSize = 3
|
||||
|
||||
# My Harris matrix -- Using cornerEigenValsAndVecs
|
||||
myHarris_dst = cv.cornerEigenValsAndVecs(src_gray, blockSize, apertureSize)
|
||||
|
||||
# calculate Mc
|
||||
Mc = np.empty(src_gray.shape, dtype=np.float32)
|
||||
for i in range(src_gray.shape[0]):
|
||||
for j in range(src_gray.shape[1]):
|
||||
lambda_1 = myHarris_dst[i,j,0]
|
||||
lambda_2 = myHarris_dst[i,j,1]
|
||||
Mc[i,j] = lambda_1*lambda_2 - 0.04*pow( ( lambda_1 + lambda_2 ), 2 )
|
||||
|
||||
myHarris_minVal, myHarris_maxVal, _, _ = cv.minMaxLoc(Mc)
|
||||
|
||||
# Create Window and Trackbar
|
||||
cv.namedWindow(myHarris_window)
|
||||
cv.createTrackbar('Quality Level:', myHarris_window, myHarris_qualityLevel, max_qualityLevel, myHarris_function)
|
||||
myHarris_function(myHarris_qualityLevel)
|
||||
|
||||
# My Shi-Tomasi -- Using cornerMinEigenVal
|
||||
myShiTomasi_dst = cv.cornerMinEigenVal(src_gray, blockSize, apertureSize)
|
||||
|
||||
myShiTomasi_minVal, myShiTomasi_maxVal, _, _ = cv.minMaxLoc(myShiTomasi_dst)
|
||||
|
||||
# Create Window and Trackbar
|
||||
cv.namedWindow(myShiTomasi_window)
|
||||
cv.createTrackbar('Quality Level:', myShiTomasi_window, myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function)
|
||||
myShiTomasi_function(myShiTomasi_qualityLevel)
|
||||
|
||||
cv.waitKey()
|
||||
+58
@@ -0,0 +1,58 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
import random as rng
|
||||
|
||||
source_window = 'Image'
|
||||
maxTrackbar = 100
|
||||
rng.seed(12345)
|
||||
|
||||
def goodFeaturesToTrack_Demo(val):
|
||||
maxCorners = max(val, 1)
|
||||
|
||||
# Parameters for Shi-Tomasi algorithm
|
||||
qualityLevel = 0.01
|
||||
minDistance = 10
|
||||
blockSize = 3
|
||||
gradientSize = 3
|
||||
useHarrisDetector = False
|
||||
k = 0.04
|
||||
|
||||
# Copy the source image
|
||||
copy = np.copy(src)
|
||||
|
||||
# Apply corner detection
|
||||
corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \
|
||||
blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k)
|
||||
|
||||
# Draw corners detected
|
||||
print('** Number of corners detected:', corners.shape[0])
|
||||
radius = 4
|
||||
for i in range(corners.shape[0]):
|
||||
cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
|
||||
|
||||
# Show what you got
|
||||
cv.namedWindow(source_window)
|
||||
cv.imshow(source_window, copy)
|
||||
|
||||
# Load source image and convert it to gray
|
||||
parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.')
|
||||
parser.add_argument('--input', help='Path to input image.', default='../data/pic3.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
src = cv.imread(args.input)
|
||||
if src is None:
|
||||
print('Could not open or find the image:', args.input)
|
||||
exit(0)
|
||||
|
||||
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
|
||||
|
||||
# Create a window and a trackbar
|
||||
cv.namedWindow(source_window)
|
||||
maxCorners = 23 # initial threshold
|
||||
cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo)
|
||||
cv.imshow(source_window, src)
|
||||
goodFeaturesToTrack_Demo(maxCorners)
|
||||
|
||||
cv.waitKey()
|
||||
@@ -0,0 +1,55 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
source_window = 'Source image'
|
||||
corners_window = 'Corners detected'
|
||||
max_thresh = 255
|
||||
|
||||
def cornerHarris_demo(val):
|
||||
thresh = val
|
||||
|
||||
# Detector parameters
|
||||
blockSize = 2
|
||||
apertureSize = 3
|
||||
k = 0.04
|
||||
|
||||
# Detecting corners
|
||||
dst = cv.cornerHarris(src_gray, blockSize, apertureSize, k)
|
||||
|
||||
# Normalizing
|
||||
dst_norm = np.empty(dst.shape, dtype=np.float32)
|
||||
cv.normalize(dst, dst_norm, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
|
||||
dst_norm_scaled = cv.convertScaleAbs(dst_norm)
|
||||
|
||||
# Drawing a circle around corners
|
||||
for i in range(dst_norm.shape[0]):
|
||||
for j in range(dst_norm.shape[1]):
|
||||
if int(dst_norm[i,j]) > thresh:
|
||||
cv.circle(dst_norm_scaled, (j,i), 5, (0), 2)
|
||||
|
||||
# Showing the result
|
||||
cv.namedWindow(corners_window)
|
||||
cv.imshow(corners_window, dst_norm_scaled)
|
||||
|
||||
# Load source image and convert it to gray
|
||||
parser = argparse.ArgumentParser(description='Code for Harris corner detector tutorial.')
|
||||
parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg')
|
||||
args = parser.parse_args()
|
||||
|
||||
src = cv.imread(args.input)
|
||||
if src is None:
|
||||
print('Could not open or find the image:', args.input)
|
||||
exit(0)
|
||||
|
||||
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
|
||||
|
||||
# Create a window and a trackbar
|
||||
cv.namedWindow(source_window)
|
||||
thresh = 200 # initial threshold
|
||||
cv.createTrackbar('Threshold: ', source_window, thresh, max_thresh, cornerHarris_demo)
|
||||
cv.imshow(source_window, src)
|
||||
cornerHarris_demo(thresh)
|
||||
|
||||
cv.waitKey()
|
||||
@@ -0,0 +1,35 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.')
|
||||
parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png')
|
||||
parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
|
||||
img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
|
||||
if img1 is None or img2 is None:
|
||||
print('Could not open or find the images!')
|
||||
exit(0)
|
||||
|
||||
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||
minHessian = 400
|
||||
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
|
||||
keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
|
||||
keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
|
||||
|
||||
#-- Step 2: Matching descriptor vectors with a brute force matcher
|
||||
# Since SURF is a floating-point descriptor NORM_L2 is used
|
||||
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE)
|
||||
matches = matcher.match(descriptors1, descriptors2)
|
||||
|
||||
#-- Draw matches
|
||||
img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches)
|
||||
|
||||
#-- Show detected matches
|
||||
cv.imshow('Matches', img_matches)
|
||||
|
||||
cv.waitKey()
|
||||
@@ -0,0 +1,27 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.')
|
||||
parser.add_argument('--input', help='Path to input image.', default='../data/box.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
src = cv.imread(args.input, cv.IMREAD_GRAYSCALE)
|
||||
if src is None:
|
||||
print('Could not open or find the image:', args.input)
|
||||
exit(0)
|
||||
|
||||
#-- Step 1: Detect the keypoints using SURF Detector
|
||||
minHessian = 400
|
||||
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
|
||||
keypoints = detector.detect(src)
|
||||
|
||||
#-- Draw keypoints
|
||||
img_keypoints = np.empty((src.shape[0], src.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawKeypoints(src, keypoints, img_keypoints)
|
||||
|
||||
#-- Show detected (drawn) keypoints
|
||||
cv.imshow('SURF Keypoints', img_keypoints)
|
||||
|
||||
cv.waitKey()
|
||||
+43
@@ -0,0 +1,43 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
|
||||
parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png')
|
||||
parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
|
||||
img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
|
||||
if img1 is None or img2 is None:
|
||||
print('Could not open or find the images!')
|
||||
exit(0)
|
||||
|
||||
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||
minHessian = 400
|
||||
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
|
||||
keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
|
||||
keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
|
||||
|
||||
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
|
||||
# Since SURF is a floating-point descriptor NORM_L2 is used
|
||||
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
|
||||
knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2)
|
||||
|
||||
#-- Filter matches using the Lowe's ratio test
|
||||
ratio_thresh = 0.7
|
||||
good_matches = []
|
||||
for matches in knn_matches:
|
||||
if len(matches) > 1:
|
||||
if matches[0].distance / matches[1].distance <= ratio_thresh:
|
||||
good_matches.append(matches[0])
|
||||
|
||||
#-- Draw matches
|
||||
img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
|
||||
|
||||
#-- Show detected matches
|
||||
cv.imshow('Good Matches', img_matches)
|
||||
|
||||
cv.waitKey()
|
||||
+78
@@ -0,0 +1,78 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
|
||||
parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png')
|
||||
parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
img_object = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
|
||||
img_scene = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
|
||||
if img_object is None or img_scene is None:
|
||||
print('Could not open or find the images!')
|
||||
exit(0)
|
||||
|
||||
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||
minHessian = 400
|
||||
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
|
||||
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
|
||||
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
|
||||
|
||||
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
|
||||
# Since SURF is a floating-point descriptor NORM_L2 is used
|
||||
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
|
||||
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
|
||||
|
||||
#-- Filter matches using the Lowe's ratio test
|
||||
ratio_thresh = 0.75
|
||||
good_matches = []
|
||||
for matches in knn_matches:
|
||||
if len(matches) > 1:
|
||||
if matches[0].distance / matches[1].distance <= ratio_thresh:
|
||||
good_matches.append(matches[0])
|
||||
|
||||
#-- Draw matches
|
||||
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
|
||||
|
||||
#-- Localize the object
|
||||
obj = np.empty((len(good_matches),2), dtype=np.float32)
|
||||
scene = np.empty((len(good_matches),2), dtype=np.float32)
|
||||
for i in range(len(good_matches)):
|
||||
#-- Get the keypoints from the good matches
|
||||
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
|
||||
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
|
||||
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
|
||||
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
|
||||
|
||||
H, _ = cv.findHomography(obj, scene, cv.RANSAC)
|
||||
|
||||
#-- Get the corners from the image_1 ( the object to be "detected" )
|
||||
obj_corners = np.empty((4,1,2), dtype=np.float32)
|
||||
obj_corners[0,0,0] = 0
|
||||
obj_corners[0,0,1] = 0
|
||||
obj_corners[1,0,0] = img_object.shape[1]
|
||||
obj_corners[1,0,1] = 0
|
||||
obj_corners[2,0,0] = img_object.shape[1]
|
||||
obj_corners[2,0,1] = img_object.shape[0]
|
||||
obj_corners[3,0,0] = 0
|
||||
obj_corners[3,0,1] = img_object.shape[0]
|
||||
|
||||
scene_corners = cv.perspectiveTransform(obj_corners, H)
|
||||
|
||||
#-- Draw lines between the corners (the mapped object in the scene - image_2 )
|
||||
cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
|
||||
(int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
|
||||
cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
|
||||
(int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
|
||||
cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
|
||||
(int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
|
||||
cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
|
||||
(int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
|
||||
|
||||
#-- Show detected matches
|
||||
cv.imshow('Good Matches & Object detection', img_matches)
|
||||
|
||||
cv.waitKey()
|
||||
Reference in New Issue
Block a user