aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFranklin Wei <franklin@rockbox.org>2019-11-08 13:56:31 -0500
committerFranklin Wei <franklin@rockbox.org>2019-11-08 13:56:31 -0500
commit409ddec8c53a147a6c6e80e6d5a33f714f38b215 (patch)
tree07b5ee9ebdb0079c592fba8f5259622d92aea10e
parent36ad236d538eb5f7d4a4cff4df2e3d9763fef8d9 (diff)
downloadregentester-409ddec8c53a147a6c6e80e6d5a33f714f38b215.zip
regentester-409ddec8c53a147a6c6e80e6d5a33f714f38b215.tar.gz
regentester-409ddec8c53a147a6c6e80e6d5a33f714f38b215.tar.bz2
regentester-409ddec8c53a147a6c6e80e6d5a33f714f38b215.tar.xz
Rename main.ino and add video tachometer code
-rw-r--r--main/main.ino (renamed from main.ino)1
-rwxr-xr-xtachometer.py155
-rw-r--r--util.py81
3 files changed, 237 insertions, 0 deletions
diff --git a/main.ino b/main/main.ino
index f9cde91..57ba737 100644
--- a/main.ino
+++ b/main/main.ino
@@ -6,6 +6,7 @@ void setup() {
pinMode(2, OUTPUT);
digitalWrite(2, LOW);
+
pinMode(3, OUTPUT);
digitalWrite(3, LOW);
diff --git a/tachometer.py b/tachometer.py
new file mode 100755
index 0000000..ae0b11e
--- /dev/null
+++ b/tachometer.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+"""
+Simply display the contents of the webcam with optional mirroring using OpenCV
+via the new Pythonic cv2 interface. Press <esc> to quit.
+"""
+
+import cv2
+from util import *
+import matplotlib.pyplot as plt
+import time
+from scipy.interpolate import interp1d
+
+CAMERA_IDX=2
+
+# samples to keep
+WINDOW=100
+
+WIDTH=320
+HEIGHT=180
+
+# number of stripes on wheel
+TICS_PER_REV = 6
+
+def show_webcam(mirror=False):
+ cam = cv2.VideoCapture(CAMERA_IDX)
+
+ cam.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT)
+ cam.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH)
+
+def getROI(cam):
+ ret_val, img = cam.read()
+ return cv2.selectROI(img)
+
+# get index of first value in lst higher than target
+def get_first_higher(lst, target):
+ #print("finding index of ", target)
+ for i in range(len(lst)):
+ if lst[i] > target:
+ return i
+ print("WARNING: window size is too small to get desired RPM range")
+ return len(lst) - 1 # last
+
+def getFFT(data, rate):
+ """Given some data and rate, returns FFTfreq and FFT (half)."""
+ data = data * np.hamming(len(data))
+ fft = np.fft.fft(data)
+ fft = np.abs(fft)
+# fft = 10 * np.log10(fft)
+ freq = np.fft.fftfreq(len(fft), 1.0 / rate)
+ return freq[:int(len(freq) / 2)], fft[:int(len(fft) / 2)]
+
+def getRPM(x_data, y_data):
+ delta_t = x_data[-1] - x_data[0]
+ nsamples = len(x_data)
+ supersample = 2
+ f_t = interp1d([x-x_data[0] for x in x_data], y_data, kind='cubic')
+ eval_x = np.linspace(0, delta_t, nsamples*supersample)
+ eval_y = f_t(eval_x)
+ rate = nsamples * supersample / delta_t
+ freq, fft = getFFT(eval_y, rate)
+
+ RPM_LOW = 5
+ RPM_HIGH = 180
+
+ # get low and high freq bins
+ lo = get_first_higher(freq, RPM_LOW / 60 * TICS_PER_REV)
+ hi = get_first_higher(freq, RPM_HIGH / 60 * TICS_PER_REV)
+ #lo_freq = freq[lo]
+ #hi_freq = freq[hi - 1]
+
+ #print("RPM range:", freq[lo]*60 / TICS_PER_REV, freq[hi - 1]*60 / TICS_PER_REV)
+
+ # bandpass filter
+ freq = freq[lo:hi]
+ fft = fft[lo:hi]
+
+ # convert to RPM
+ freq = 60*freq / TICS_PER_REV
+ lo_freq = freq[0]
+ hi_freq = freq[-1]
+
+ freq_interp = interp1d(freq, fft, kind='cubic')
+
+ interp_res = 10
+
+ freqs = np.linspace(lo_freq, hi_freq, len(freq) * interp_res)
+
+ interpolated_freqs = freq_interp(freqs)
+
+ #ax.lines[0].set_data( freqs, interpolated_freqs ) # set plot data
+
+ #print(freq[2:], fft[2:])
+ #ax.relim() # recompute the data limits
+ #ax.autoscale_view() # automatic axis scaling
+ #fig.canvas.flush_events() # update the plot and take care of window eve
+ # print(eval_x, eval_y)
+ peakRPM = np.where(interpolated_freqs == np.amax(interpolated_freqs))
+ peakRPM=freqs[peakRPM[0]][0]
+ print(peakRPM)
+ return peakRPM
+
+def main():
+ cam = cv2.VideoCapture(CAMERA_IDX)
+ cam.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT)
+ cam.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH)
+ cam.read()
+ cam.read()
+
+ roi=getROI(cam)
+ x,y,w,h = roi
+ plt.ion()
+ fig = plt.figure()
+ ax = plt.subplot(1,1,1)
+ ax.set_xlabel('RPM')
+ ax.set_ylabel('Intensity')
+
+ x_data, y_data = [], []
+ ax.plot(x_data , y_data , 'ko-' , markersize = 10 ) # add an empty line to the plot
+ fig.show() # show the window (figure will be in foreground, but the user may move it to background)
+
+ while True:
+ ret_val, img = cam.read()
+ cv2.imshow('my webcam', img)
+ extracted_region = img[y:y+h, x:x+w]
+ cv2.imshow('extracted', extracted_region)
+ gray=cv2.cvtColor(extracted_region, cv2.COLOR_BGR2GRAY)
+ avg=averageColorOfRegion(extracted_region, 0, h, 0, w)
+ avg=np.average(avg, axis=0)
+
+ t = time.time()
+ x_data.append(t)
+ y_data.append(avg)
+ if len(x_data) > WINDOW:
+ x_data.pop(0)
+ y_data.pop(0)
+
+# ax.lines[0].set_data( x_data,y_data ) # set plot data
+# ax.relim() # recompute the data limits
+# ax.autoscale_view() # automatic axis scaling
+# fig.canvas.flush_events() # update the plot and take care of window events (like resizing etc.)
+ if len(x_data) >= WINDOW:
+ print(getRPM(x_data, y_data))
+
+ # print(avg)
+
+ if cv2.waitKey(1) == 27:
+ break # esc to quit
+ cv2.destroyAllWindows()
+
+ plt.plot(x_data, y_data)
+ plt.show()
+
+if __name__ == '__main__':
+ main()
diff --git a/util.py b/util.py
new file mode 100644
index 0000000..d7343c8
--- /dev/null
+++ b/util.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Oct 10 17:39:12 2019
+
+@author: franklin
+"""
+
+import numpy as np
+import cv2
+
+def clipAndConvert(img):
+ img = np.clip(img, 0, 255)
+ img = img.astype('uint8')
+ return img
+
+def normalizeAndConvert(img):
+ return cv2.normalize(img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_8U)
+
+def showResized(window, img):
+ if img is not None:
+ scaled = cv2.resize(clipAndConvert(img), (640, 480))
+ cv2.imshow(window, scaled)
+
+# scale colors of an image so lights are lighter and darks are darker
+def increaseContrast(img, s = 1.1):
+ img = img.astype('float32')
+ img = ((img - 128) * s + 128)
+ #print(img)
+ return img
+
+def averageColorOfRegion(im, x1, x2, y1, y2):
+ # Find the average color of a rectangular region
+ region = im[y1:y2,x1:x2]
+ return np.average(np.average(region, axis=0), axis = 0)
+
+# OpenCV stores colors as BGR
+RED_COMP = 2
+GREEN_COMP = 1
+BLUE_COMP = 0
+
+# Color space mapping information
+#
+# These variables describe how the camera and projector colors map to
+# each other.
+#
+# More specifically, we have six variables which describe the mapping:
+#
+# camera_Rmin, camera_Rmax
+# camera_Gmin, camera_Gmax
+# camera_Bmin, camera_Bmax
+#
+# R_min, G_min, and B_min are obtained by projecting a black image
+# (R,G,B=0). The three maximums are obtained by projecting pure red,
+# green, and blue. X_min and X_max describe the camera's color
+# response when shown a zero X component or a 255 R component,
+# respectively.
+#
+# We can then model the projector -> camera mapping as
+#
+# camera_R = camera_Rmin + (R_max - R_min) / 255 * projector_R
+#
+# The reverse mapping is
+#
+# projector_R = (camera_R - camera_Rmin) / (camera_Rmax - camera_Rmin) * 255
+
+# image is a 3D numpy array [y, x, color]
+# black is the minimum B, G, R components
+# maxColors is the maximum B, G, R components
+
+# this will return an unnormalized float32 matrix
+# map camera colors to projector color space
+def cameraToProjectorColor(image, black, maxColors):
+ return (image - black).astype('float32') * 255 / (maxColors - black)
+ #print (maxColors - black)
+ #return image
+
+# inverse of the above
+# projector color to camera color
+def projectorToCameraColor(image, black, maxColors):
+ return black + (maxColors - black) / 255 * image.astype('float32')