aboutsummaryrefslogtreecommitdiff
path: root/autoalign.py
blob: 661c860cc146efa1aa7f8b30e83f9ccc095b6c69 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 10 17:13:35 2019

@author: franklin
"""

import cv2
import numpy as np

MAX_FEATURES = 1000

# percent of matches to use
GOOD_MATCH_PERCENT = .5

#def manualAlign(im1, im2):

# returns unwarped image, homography, matches
# im1 is input image, im2 is reference
def alignImages(im1, im2, mask1=None, mask2=None):
  # Convert images to grayscale
  im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
  im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

  # Detect ORB features and compute descriptors.
  orb = cv2.ORB_create(MAX_FEATURES)
  keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, mask1)
  keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, mask2)

  print(keypoints1[0], descriptors1)
  # Match features.
  #matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
  matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
  if descriptors1 is None or descriptors2 is None:
    return None, None, None

  matches = matcher.match(descriptors1, descriptors2, None)

  # Sort matches by score
  matches.sort(key=lambda x: x.distance, reverse=False)

  # Remove not so good matches
  numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
  matches = matches[:numGoodMatches]

  # Draw top matches
  imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
  #cv2.imwrite("matches.jpg", imMatches)

  # Extract location of good matches
  points1 = np.zeros((len(matches), 2), dtype=np.float32)
  points2 = np.zeros((len(matches), 2), dtype=np.float32)

  for i, match in enumerate(matches):
    points1[i, :] = keypoints1[match.queryIdx].pt
    points2[i, :] = keypoints2[match.trainIdx].pt

  # Find homography
  if len(points1) == 0:
    return None, None, None
  h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

  # Use homography
  height, width, channels = im2.shape
  #print(h, points1, points2)
  if h is not None:
    im1Reg = cv2.warpPerspective(im1, h, (width, height))

    return im1Reg, h, imMatches
  return None, h, imMatches