aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFranklin Wei <franklin@rockbox.org>2019-12-09 01:20:26 -0500
committerFranklin Wei <franklin@rockbox.org>2019-12-09 01:20:26 -0500
commit98224b1be453c99fa07457848e9c6b3adce5f314 (patch)
tree8792e60e64071c6fd31d61941f767ae2d70e0d44
parent929bacc2867b42690d28a18cee57a285d9987a51 (diff)
downloadsloreg-98224b1be453c99fa07457848e9c6b3adce5f314.zip
sloreg-98224b1be453c99fa07457848e9c6b3adce5f314.tar.gz
sloreg-98224b1be453c99fa07457848e9c6b3adce5f314.tar.bz2
sloreg-98224b1be453c99fa07457848e9c6b3adce5f314.tar.xz
Add old python code
-rw-r--r--autoalign.py71
-rw-r--r--corneralign.py20
-rwxr-xr-xeccalign.py45
-rw-r--r--filtered_screenshot_08.12.2019.pngbin0 -> 218914 bytes
-rw-r--r--filtered_screenshot_08.12.2019_5.pngbin0 -> 186176 bytes
-rwxr-xr-xgabor.py30
-rwxr-xr-xregister.py71
-rw-r--r--util.py81
8 files changed, 318 insertions, 0 deletions
diff --git a/autoalign.py b/autoalign.py
new file mode 100644
index 0000000..661c860
--- /dev/null
+++ b/autoalign.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Oct 10 17:13:35 2019
+
+@author: franklin
+"""
+
+import cv2
+import numpy as np
+
+MAX_FEATURES = 1000
+
+# percent of matches to use
+GOOD_MATCH_PERCENT = .5
+
+#def manualAlign(im1, im2):
+
+# returns unwarped image, homography, matches
+# im1 is input image, im2 is reference
+def alignImages(im1, im2, mask1=None, mask2=None):
+ # Convert images to grayscale
+ im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
+ im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
+
+ # Detect ORB features and compute descriptors.
+ orb = cv2.ORB_create(MAX_FEATURES)
+ keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, mask1)
+ keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, mask2)
+
+ print(keypoints1[0], descriptors1)
+ # Match features.
+ #matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
+ matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
+ if descriptors1 is None or descriptors2 is None:
+ return None, None, None
+
+ matches = matcher.match(descriptors1, descriptors2, None)
+
+ # Sort matches by score
+ matches.sort(key=lambda x: x.distance, reverse=False)
+
+ # Remove not so good matches
+ numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
+ matches = matches[:numGoodMatches]
+
+ # Draw top matches
+ imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
+ #cv2.imwrite("matches.jpg", imMatches)
+
+ # Extract location of good matches
+ points1 = np.zeros((len(matches), 2), dtype=np.float32)
+ points2 = np.zeros((len(matches), 2), dtype=np.float32)
+
+ for i, match in enumerate(matches):
+ points1[i, :] = keypoints1[match.queryIdx].pt
+ points2[i, :] = keypoints2[match.trainIdx].pt
+
+ # Find homography
+ if len(points1) == 0:
+ return None, None, None
+ h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
+
+ # Use homography
+ height, width, channels = im2.shape
+ #print(h, points1, points2)
+ if h is not None:
+ im1Reg = cv2.warpPerspective(im1, h, (width, height))
+
+ return im1Reg, h, imMatches
+ return None, h, imMatches
diff --git a/corneralign.py b/corneralign.py
new file mode 100644
index 0000000..c295a9f
--- /dev/null
+++ b/corneralign.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+import cv2
+import numpy as np
+from util import *
+
+def alignImages(im1, im2, mask1=None, mask2=None):
+ # Convert images to grayscale
+ im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
+ im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
+ corners = cv2.goodFeaturesToTrack(im1Gray, 100, .01, 10)
+ corners = np.int0(corners)
+ img = im1
+ for corner in corners:
+ x,y = corner.ravel()
+ cv2.circle(img,(x,y),3,255,-1)
+
+ showResized('corners', img)
+
+ return img, None, None
diff --git a/eccalign.py b/eccalign.py
new file mode 100755
index 0000000..0213cdf
--- /dev/null
+++ b/eccalign.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+import cv2
+import numpy as np
+from util import *
+
+# Taken from https://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/
+
+# im1 is reference, im2 is input
+def alignImages(im2, im1):
+ # Convert images to grayscale
+ im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
+ im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
+
+ # Find size of image1
+ sz = im1.shape
+
+ # Define the motion model
+ warp_mode = cv2.MOTION_AFFINE
+
+ # Define 2x3 or 3x3 matrices and initialize the matrix to identity
+ if warp_mode == cv2.MOTION_HOMOGRAPHY :
+ warp_matrix = np.eye(3, 3, dtype=np.float32)
+ else :
+ warp_matrix = np.eye(2, 3, dtype=np.float32)
+
+ # Specify the number of iterations.
+ number_of_iterations = 200
+
+ # Specify the threshold of the increment
+ # in the correlation coefficient between two iterations
+ termination_eps = 1e-10
+
+ # Define termination criteria
+ criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
+
+ # Run the ECC algorithm. The results are stored in warp_matrix.
+ (cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria, None, 5)
+
+ if warp_mode == cv2.MOTION_HOMOGRAPHY :
+ # Use warpPerspective for Homography
+ im2_aligned = cv2.warpPerspective (im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
+ else :
+ # Use warpAffine for Translation, Euclidean and Affine
+ im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
+ return im2_aligned
diff --git a/filtered_screenshot_08.12.2019.png b/filtered_screenshot_08.12.2019.png
new file mode 100644
index 0000000..dd27324
--- /dev/null
+++ b/filtered_screenshot_08.12.2019.png
Binary files differ
diff --git a/filtered_screenshot_08.12.2019_5.png b/filtered_screenshot_08.12.2019_5.png
new file mode 100644
index 0000000..da3d5fe
--- /dev/null
+++ b/filtered_screenshot_08.12.2019_5.png
Binary files differ
diff --git a/gabor.py b/gabor.py
new file mode 100755
index 0000000..07d1e0e
--- /dev/null
+++ b/gabor.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+import cv2
+import numpy as np
+from util import *
+
+KSIZE=21
+
+for i in range(1, 22 + 1):
+ path="SLO Data for registration/SLO001/SLO_subject001_frame" + str(i) + ".png"
+ img = cv2.imread(path)
+ img = ~img # invert
+
+ img = img[:] * 2
+
+ # cv2.getGaborKernel(ksize, sigma, theta, lambda, gamma, psi, ktype)
+ # ksize - size of gabor filter (n, n)
+ # sigma - standard deviation of the gaussian function
+ # theta - orientation of the normal to the parallel stripes
+ # lambda - wavelength of the sunusoidal factor
+ # gamma - spatial aspect ratio
+ # psi - phase offset
+ # ktype - type and range of values that each pixel in the gabor kernel can hold
+ kern = cv2.getGaborKernel((KSIZE, KSIZE), 8.0, np.pi/4, 10.0, 0.5, 0, ktype=cv2.CV_32F)
+ showResized("frame", img)
+ showResized("kern", kern)
+
+ filt = cv2.filter2D(img, cv2.CV_8U, kern)
+ showResized("filtered", filt)
+ cv2.waitKey(0)
diff --git a/register.py b/register.py
new file mode 100755
index 0000000..7489cb8
--- /dev/null
+++ b/register.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+import cv2
+import numpy as np
+#from autoalign import alignImages
+#from corneralign import alignImages
+from eccalign import alignImages
+from util import *
+
+sat_region=None
+
+# inverted mask to ignore roi
+def maskFromRegion(shape, roi):
+ x,y,w,h = roi
+ mask = np.zeros(shape, dtype=np.uint8)
+ mask[:] = 0xff
+ cv2.rectangle(mask, (x,y), (x+w, y+h), (0x00), thickness = -1)
+ return mask
+
+def combine(composite, frame):
+# mask1 = mask2 = maskFromRegion(frame.shape[:2], sat_region)
+# mask1 &= maskFromRegion(frame.shape[:2], cv2.selectROI(frame))
+
+# showResized('mask1', mask1)
+ aligned = alignImages(frame, composite)
+# mask1, mask2)
+ #return clipAndConvert((composite + aligned) * .5)
+ showResized("aligned", aligned)
+ showResized("frame", frame)
+ #showResized("matches", matches)
+ #print(np.linalg.det(hm))
+
+ showResized('diff', abs(aligned.astype(np.float32) - composite.astype(np.float32)))
+ overlaid = np.array([aligned.shape[:2], 3])
+ overlaid[:,:,1] = aligned[:,:]
+ showResized('overlay', overlaid)
+
+ return composite, aligned
+
+def gammaCorrect(image, gamma=1.0):
+ # build a lookup table mapping the pixel values [0, 255] to
+ # their adjusted gamma values
+ invGamma = 1.0 / gamma
+ table = np.array([((i / 255.0) ** invGamma) * 255
+ for i in np.arange(0, 256)]).astype("uint8")
+
+ # apply gamma correction using the lookup table
+ return cv2.LUT(image, table)
+
+composite=None
+
+while True:
+ for i in range(1, 22 + 1):
+ path="SLO Data for registration/IR_frame_best/IR_frame_best" + str(i) + ".png"
+ img = cv2.imread(path)
+ print(path)
+ squareImg = cv2.resize(img, (500, 500))
+ squareImg = squareImg[50:-50, 50:-50]
+
+ squareImg = gammaCorrect(squareImg, 1)
+
+ if composite is None:
+# print("Select saturation region")
+# sat_region = cv2.selectROI(squareImg)
+ composite = squareImg
+ else:
+ composite, aligned = combine(composite, squareImg)
+
+ showResized("reference", composite)
+ while cv2.waitKey(50) != 32:
+ pass
diff --git a/util.py b/util.py
new file mode 100644
index 0000000..e50d342
--- /dev/null
+++ b/util.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Oct 10 17:39:12 2019
+
+@author: franklin
+"""
+
+import numpy as np
+import cv2
+
+def clipAndConvert(img):
+ img = np.clip(img, 0, 255)
+ img = img.astype('uint8')
+ return img
+
+def normalizeAndConvert(img):
+ return cv2.normalize(img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_8U)
+
+def showResized(window, img):
+ if img is not None:
+ scaled = cv2.resize(clipAndConvert(img), (512, 512))
+ cv2.imshow(window, scaled)
+
+# scale colors of an image so lights are lighter and darks are darker
+def increaseContrast(img, s = 1.1):
+ img = img.astype('float32')
+ img = ((img - 128) * s + 128)
+ #print(img)
+ return img
+
+def averageColorOfRegion(im, x1, x2, y1, y2):
+ # Find the average color of a rectangular region
+ region = im[y1:y2,x1:x2]
+ return np.average(np.average(region, axis=0), axis = 0)
+
+# OpenCV stores colors as BGR
+RED_COMP = 2
+GREEN_COMP = 1
+BLUE_COMP = 0
+
+# Color space mapping information
+#
+# These variables describe how the camera and projector colors map to
+# each other.
+#
+# More specifically, we have six variables which describe the mapping:
+#
+# camera_Rmin, camera_Rmax
+# camera_Gmin, camera_Gmax
+# camera_Bmin, camera_Bmax
+#
+# R_min, G_min, and B_min are obtained by projecting a black image
+# (R,G,B=0). The three maximums are obtained by projecting pure red,
+# green, and blue. X_min and X_max describe the camera's color
+# response when shown a zero X component or a 255 R component,
+# respectively.
+#
+# We can then model the projector -> camera mapping as
+#
+# camera_R = camera_Rmin + (R_max - R_min) / 255 * projector_R
+#
+# The reverse mapping is
+#
+# projector_R = (camera_R - camera_Rmin) / (camera_Rmax - camera_Rmin) * 255
+
+# image is a 3D numpy array [y, x, color]
+# black is the minimum B, G, R components
+# maxColors is the maximum B, G, R components
+
+# this will return an unnormalized float32 matrix
+# map camera colors to projector color space
+def cameraToProjectorColor(image, black, maxColors):
+ return (image - black).astype('float32') * 255 / (maxColors - black)
+ #print (maxColors - black)
+ #return image
+
+# inverse of the above
+# projector color to camera color
+def projectorToCameraColor(image, black, maxColors):
+ return black + (maxColors - black) / 255 * image.astype('float32')