From 6aa165edb8e8c30b8632292f842f38afe0c354e6 Mon Sep 17 00:00:00 2001 From: Bart Moyaers Date: Wed, 6 Nov 2019 14:41:57 +0100 Subject: [PATCH] first commit --- background_subtraction_heat.py | 66 ++++++++++++++++++++++++++++++++++ bgsub_test.py | 22 ++++++++++++ live_heatmap_diff_test.py | 63 ++++++++++++++++++++++++++++++++ optical_flow_test.py | 59 ++++++++++++++++++++++++++++++ video_test.py | 21 +++++++++++ 5 files changed, 231 insertions(+) create mode 100644 background_subtraction_heat.py create mode 100644 bgsub_test.py create mode 100644 live_heatmap_diff_test.py create mode 100644 optical_flow_test.py create mode 100644 video_test.py diff --git a/background_subtraction_heat.py b/background_subtraction_heat.py new file mode 100644 index 0000000..7f99b95 --- /dev/null +++ b/background_subtraction_heat.py @@ -0,0 +1,66 @@ +import numpy as np +from typing import List +import cv2 + +class BackgroundHeatmap: + def __init__(self, capture): + self.heatmap = np.array([]) + self.cap = capture + self.backsub = cv2.createBackgroundSubtractorMOG2() + # self.backsub = cv2.bgsegm_BackgroundSubtractorGSOC() + # self.backsub = cv2.back() + + # Fill up with first frame + ret, frame = self.cap.read() + self.lastframe = self.backsub.apply(frame) + self.lastsum = self.to_floats(self.lastframe) + + @staticmethod + def to_grayscale(frame): + return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + @staticmethod + def to_floats(frame): + return cv2.normalize(frame, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) + + @staticmethod + def float_to_gray(frame): + return cv2.normalize(frame, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) + + @staticmethod + def gray_to_heat(frame): + return cv2.applyColorMap(frame, cv2.COLORMAP_JET) + + def update(self): + ret, frame = self.cap.read() + self.lastframe = self.backsub.apply(frame) + self.lastsum += self.to_floats(self.lastframe) + self.heatmap = self.gray_to_heat( + self.float_to_gray( + self.to_floats(self.lastsum) + ) + ) + +cap = cv2.VideoCapture(0) +diffsum = BackgroundHeatmap(cap) + +# Load diffsum up with first +first = True +while(True): + # Update heatmap + diffsum.update() + + # Display the resulting frame + cv2.imshow('Heatmap', diffsum.heatmap) + cv2.imshow('Backsub', diffsum.lastframe) + + if first: + cv2.moveWindow("Backsub", 1000, 100) + first = False + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# When everything done, release the capture +diffsum.cap.release() +cv2.destroyAllWindows() \ No newline at end of file diff --git a/bgsub_test.py b/bgsub_test.py new file mode 100644 index 0000000..b8e19af --- /dev/null +++ b/bgsub_test.py @@ -0,0 +1,22 @@ +from __future__ import print_function +import cv2 as cv + + +# backSub = cv.createBackgroundSubtractorMOG2() +backSub = cv.createBackgroundSubtractorKNN() + +capture = cv.VideoCapture(0) + +while True: + ret, frame = capture.read() + if frame is None: + break + + fgMask = backSub.apply(frame) + + cv.imshow('Frame', frame) + cv.imshow('FG Mask', fgMask) + + keyboard = cv.waitKey(30) + if keyboard == 'q' or keyboard == 27: + break \ No newline at end of file diff --git a/live_heatmap_diff_test.py b/live_heatmap_diff_test.py new file mode 100644 index 0000000..ae2de36 --- /dev/null +++ b/live_heatmap_diff_test.py @@ -0,0 +1,63 @@ +import numpy as np +from typing import List +import cv2 + +class DiffSumHeatmap: + def __init__(self, capture): + self.diffs: List[np.array] = [] + self.heatmap = np.array([]) + self.cap = capture + + # Fill up with first frame + ret, frame = self.cap.read() + self.lastframe = frame + self.lastsum = self.to_floats(self.to_grayscale(frame)) + + @staticmethod + def to_grayscale(frame): + return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + @staticmethod + def to_floats(frame): + return cv2.normalize(frame, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) + + @staticmethod + def float_to_gray(frame): + return cv2.normalize(frame, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) + + @staticmethod + def gray_to_heat(frame): + return cv2.applyColorMap(frame, cv2.COLORMAP_JET) + + def update(self): + ret, frame = self.cap.read() + self.diffs.append( + self.to_grayscale( + cv2.absdiff(self.lastframe, frame) + ) + ) + self.lastsum += self.diffs[-1] + self.heatmap = self.gray_to_heat( + self.float_to_gray( + self.to_floats(self.lastsum) + ) + ) + self.lastframe = frame + +cap = cv2.VideoCapture(0) +diffsum = DiffSumHeatmap(cap) + +# Load diffsum up with first + +while(True): + # Update heatmap + diffsum.update() + + # Display the resulting frame + cv2.imshow('frame', diffsum.heatmap) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# When everything done, release the capture +diffsum.cap.release() +cv2.destroyAllWindows()\ \ No newline at end of file diff --git a/optical_flow_test.py b/optical_flow_test.py new file mode 100644 index 0000000..2401a2c --- /dev/null +++ b/optical_flow_test.py @@ -0,0 +1,59 @@ +import numpy as np +import cv2 + +cap = cv2.VideoCapture(0) + +# params for ShiTomasi corner detection +feature_params = dict( maxCorners = 100, + qualityLevel = 0.3, + minDistance = 7, + blockSize = 7 ) + +# Parameters for lucas kanade optical flow +lk_params = dict( winSize = (15,15), + maxLevel = 2, + criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + +# Create some random colors +color = np.random.randint(0,255,(100,3)) + +# Take first frame and find corners in it +ret, old_frame = cap.read() +old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) +p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params) + +# Create a mask image for drawing purposes +mask = np.zeros_like(old_frame) + +counter = 0 +while(1): + ret,frame = cap.read() + frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + # calculate optical flow + p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) + + # Select good points + good_new = p1[st==1] + good_old = p0[st==1] + + # draw the tracks + for i,(new,old) in enumerate(zip(good_new,good_old)): + a,b = new.ravel() + c,d = old.ravel() + mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2) + frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1) + img = cv2.add(frame,mask) + + cv2.imshow('frame',img) + k = cv2.waitKey(30) & 0xff + if k == 27: + break + + # Now update the previous frame and previous points + old_gray = frame_gray.copy() + p0 = good_new.reshape(-1,1,2) + counter += 1 + +cv2.destroyAllWindows() +cap.release() \ No newline at end of file diff --git a/video_test.py b/video_test.py new file mode 100644 index 0000000..95c159b --- /dev/null +++ b/video_test.py @@ -0,0 +1,21 @@ +import numpy as np +import cv2 + +cap = cv2.VideoCapture(0) + +while(True): + # Capture frame-by-frame + ret, frame = cap.read() + + # Our operations on the frame come here + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + color = cv2.applyColorMap(gray, cv2.COLORMAP_JET) + + # Display the resulting frame + cv2.imshow('frame',color) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# When everything done, release the capture +cap.release() +cv2.destroyAllWindows() \ No newline at end of file