How to detect if object is stationary OpenCV
Question:
I am trying to print a message when a detected objected has stopped moving for 5 seconds.
I can detect faces and whether motion occurs between frames. What method can I use to combine these and print a message when a face has been detected, but not motion, for 5 seconds?
I’ve been trying to use something similar to the time.time()
method used that prints a message once per second when a face is detected, but I can’t quite figure out the correct logic to check that the motion detection code hasn’t triggered.
import cv2 as cv
import time
# Open Webcam
cap = cv.VideoCapture(0)
# Define ret and size as frame info from webcam
# ret, size = cap.read()
# Define rows, cols, and '_' as the return from size.shape
# rows, cols, _ = size.shape
# Print results
# print('Rows', rows)
# print('Cols', cols)
# Face Detection haar_cascade
haar_cascade = cv.CascadeClassifier('haar_face.xml')
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
# Start Time
t1 = time.time()
# Do the following when webcam is open
while True:
ret, frame = cap.read()
ret, frame1 = cap.read()
ret, frame2 = cap.read()
frame = cv.resize(frame, None, fx=1, fy=1, interpolation=cv.INTER_AREA)
# Divide Frame into Regions of Interest (ROI)
ROI1 = frame[0:180, 0:320]
ROI2 = frame[0:180, 320:640]
ROI3 = frame[180:360, 0:320]
ROI4 = frame[180:360, 320:640]
# Detect faces in each ROI
faces_rect1 = haar_cascade.detectMultiScale(ROI1, scaleFactor=1.1, minNeighbors=5)
faces_rect2 = haar_cascade.detectMultiScale(ROI2, scaleFactor=1.1, minNeighbors=5)
faces_rect3 = haar_cascade.detectMultiScale(ROI3, scaleFactor=1.1, minNeighbors=5)
faces_rect4 = haar_cascade.detectMultiScale(ROI4, scaleFactor=1.1, minNeighbors=5)
# Draw rectangles around detected faces
for (x, y, w, h) in faces_rect1:
cv.rectangle(ROI1, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
if (t2 - t1) > 1:
print('I SEE YOU IN 1')
t1 = time.time() # reset start time
for (x, y, w, h) in faces_rect2:
cv.rectangle(ROI2, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
if (t2 - t1) > 1:
print('I SEE YOU IN 2')
t1 = time.time() # reset start time
for (x, y, w, h) in faces_rect3:
cv.rectangle(ROI3, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
if (t2 - t1) > 1:
print('I SEE YOU IN 3')
t1 = time.time() # reset start time
for (x, y, w, h) in faces_rect4:
cv.rectangle(ROI4, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
if (t2 - t1) > 1:
print('I SEE YOU IN 4')
t1 = time.time() # reset start time
# Show all video feeds
cv.imshow('ROI1', ROI1)
cv.imshow('ROI2', ROI2)
cv.imshow('ROI3', ROI3)
cv.imshow('ROI4', ROI4)
# Detect Motion
# Modify frames to detect contours
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5,5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
# Find contours
contours, _ = cv.findContours(dilated, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Draw rectangles around detected contours
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 1000:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0,255,0), thickness=2)
cv.putText(frame1, 'Status: {}'.format('Movement'), (10,20), cv.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), thickness=3)
# cv.drawContours(frame1, contours, -1, (0,255,0), thickness=2)
# Show Motion Detected Feed
cv.imshow('Motion Feed', frame1)
# Press ESC to break
c = cv.waitKey(1)
if c == 27:
break
cap.release()
cv.destroyAllWindows()
Answers:
This is how I accomplished my goal:
# Draw rectangles around detected contours
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 1000:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0,255,0), thickness=2)
cv.putText(frame1, 'Status: {}'.format('Movement'), (10,20), cv.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), thickness=3)
t3 = time.time()
In the motion detection code, I added t3
to reset the timer used further on.
# Draw rectangles around detected faces
for (x, y, w, h) in faces_rect1:
cv.rectangle(ROI1, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
t4 = time.time()
if msgcnt1 == []:
print('I SEE YOU IN 1')
msgcnt1.append('x')
if (t4 - t3) > 5:
print('No motion detected')
t3 = time.time() # reset start time
the t4
time starts when a face is detected. Since t3
restarts whenever motion is detected, the detected face is considered stationary if t4 - t3
reaches 5 and the message is printed.
# Initialize a list to store the current centroids
centroids = []
# Loop over each detected bbox
for i, bbox in enumerate(bboxes):
x1, y1, x2, y2 = bbox
score = scores[i]
# Calculate the centroid of the bbox
centroid_x = (x1 + x2) / 2
centroid_y = (y1 + y2) / 2
centroids.append((centroid_x, centroid_y))
# Check if the centroid is moving
if prev_centroids is not None:
dx = centroid_x - prev_centroids[i][0]
dy = centroid_y - prev_centroids[i][1]
distance = np.sqrt(dx ** 2 + dy ** 2)
if distance > threshold:
# The centroid is moving
# (do something here, e.g. draw a green bounding box)
pass
else:
# The centroid is stationary
# (do something here, e.g. draw a red bounding box)
pass
else:
# This is the first frame, so all centroids are stationary
# (do something here, e.g. draw a red bounding box)
pass
# Update the list of previous centroids
prev_centroids = centroids
I am trying to print a message when a detected objected has stopped moving for 5 seconds.
I can detect faces and whether motion occurs between frames. What method can I use to combine these and print a message when a face has been detected, but not motion, for 5 seconds?
I’ve been trying to use something similar to the time.time()
method used that prints a message once per second when a face is detected, but I can’t quite figure out the correct logic to check that the motion detection code hasn’t triggered.
import cv2 as cv
import time
# Open Webcam
cap = cv.VideoCapture(0)
# Define ret and size as frame info from webcam
# ret, size = cap.read()
# Define rows, cols, and '_' as the return from size.shape
# rows, cols, _ = size.shape
# Print results
# print('Rows', rows)
# print('Cols', cols)
# Face Detection haar_cascade
haar_cascade = cv.CascadeClassifier('haar_face.xml')
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
# Start Time
t1 = time.time()
# Do the following when webcam is open
while True:
ret, frame = cap.read()
ret, frame1 = cap.read()
ret, frame2 = cap.read()
frame = cv.resize(frame, None, fx=1, fy=1, interpolation=cv.INTER_AREA)
# Divide Frame into Regions of Interest (ROI)
ROI1 = frame[0:180, 0:320]
ROI2 = frame[0:180, 320:640]
ROI3 = frame[180:360, 0:320]
ROI4 = frame[180:360, 320:640]
# Detect faces in each ROI
faces_rect1 = haar_cascade.detectMultiScale(ROI1, scaleFactor=1.1, minNeighbors=5)
faces_rect2 = haar_cascade.detectMultiScale(ROI2, scaleFactor=1.1, minNeighbors=5)
faces_rect3 = haar_cascade.detectMultiScale(ROI3, scaleFactor=1.1, minNeighbors=5)
faces_rect4 = haar_cascade.detectMultiScale(ROI4, scaleFactor=1.1, minNeighbors=5)
# Draw rectangles around detected faces
for (x, y, w, h) in faces_rect1:
cv.rectangle(ROI1, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
if (t2 - t1) > 1:
print('I SEE YOU IN 1')
t1 = time.time() # reset start time
for (x, y, w, h) in faces_rect2:
cv.rectangle(ROI2, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
if (t2 - t1) > 1:
print('I SEE YOU IN 2')
t1 = time.time() # reset start time
for (x, y, w, h) in faces_rect3:
cv.rectangle(ROI3, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
if (t2 - t1) > 1:
print('I SEE YOU IN 3')
t1 = time.time() # reset start time
for (x, y, w, h) in faces_rect4:
cv.rectangle(ROI4, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
if (t2 - t1) > 1:
print('I SEE YOU IN 4')
t1 = time.time() # reset start time
# Show all video feeds
cv.imshow('ROI1', ROI1)
cv.imshow('ROI2', ROI2)
cv.imshow('ROI3', ROI3)
cv.imshow('ROI4', ROI4)
# Detect Motion
# Modify frames to detect contours
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5,5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
# Find contours
contours, _ = cv.findContours(dilated, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Draw rectangles around detected contours
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 1000:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0,255,0), thickness=2)
cv.putText(frame1, 'Status: {}'.format('Movement'), (10,20), cv.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), thickness=3)
# cv.drawContours(frame1, contours, -1, (0,255,0), thickness=2)
# Show Motion Detected Feed
cv.imshow('Motion Feed', frame1)
# Press ESC to break
c = cv.waitKey(1)
if c == 27:
break
cap.release()
cv.destroyAllWindows()
This is how I accomplished my goal:
# Draw rectangles around detected contours
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 1000:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0,255,0), thickness=2)
cv.putText(frame1, 'Status: {}'.format('Movement'), (10,20), cv.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), thickness=3)
t3 = time.time()
In the motion detection code, I added t3
to reset the timer used further on.
# Draw rectangles around detected faces
for (x, y, w, h) in faces_rect1:
cv.rectangle(ROI1, (x, y), (x+w,y+h), (0,255,0), thickness=2)
t2 = time.time()
t4 = time.time()
if msgcnt1 == []:
print('I SEE YOU IN 1')
msgcnt1.append('x')
if (t4 - t3) > 5:
print('No motion detected')
t3 = time.time() # reset start time
the t4
time starts when a face is detected. Since t3
restarts whenever motion is detected, the detected face is considered stationary if t4 - t3
reaches 5 and the message is printed.
# Initialize a list to store the current centroids
centroids = []
# Loop over each detected bbox
for i, bbox in enumerate(bboxes):
x1, y1, x2, y2 = bbox
score = scores[i]
# Calculate the centroid of the bbox
centroid_x = (x1 + x2) / 2
centroid_y = (y1 + y2) / 2
centroids.append((centroid_x, centroid_y))
# Check if the centroid is moving
if prev_centroids is not None:
dx = centroid_x - prev_centroids[i][0]
dy = centroid_y - prev_centroids[i][1]
distance = np.sqrt(dx ** 2 + dy ** 2)
if distance > threshold:
# The centroid is moving
# (do something here, e.g. draw a green bounding box)
pass
else:
# The centroid is stationary
# (do something here, e.g. draw a red bounding box)
pass
else:
# This is the first frame, so all centroids are stationary
# (do something here, e.g. draw a red bounding box)
pass
# Update the list of previous centroids
prev_centroids = centroids