樹莓派智慧小車結合攝像頭opencv進行物體追蹤
在開發樹莓派智慧小車的路上已經走了一年多時間了, 起初做了以下簡單的遠端控制和避障功能,
之前確實安裝過幾次opencv都倒在了cmake編譯的路上, 但有問題就得解決。 翻了好幾個帖子終於找到了一個靠譜的。 用了一個下午的時間終於安裝成功了。 安裝的教程篇幅過長且容易被頭條認為成抄襲所以就在發到評論區吧。 然後問題來了, opencv安裝好了, 怎麼實現物體追蹤呢。 我開始在github上找案列,
來說說detect.py小車物體追蹤的源碼。 detect.py中物體追蹤是怎麼實現的呢, 首先它需要捕捉一個frame邊框並確定一個物體去追蹤。 在確定了所要追蹤的物體之後, 小車將保持對物體的追蹤。 源碼中定義了前後左右和停止的動作。 當被鎖定的物體移動時, 小車則根據物體的位置作出回應即追蹤物體前進。
附detect.py源碼:
#導入一些必須的包
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import serial
import syslog
import time
import numpy as np
import RPi.GPIO as GPIO
# 定義捕捉的畫面尺寸
width = 320
height = 240
tracking_width = 40
tracking_height = 40
auto_mode = 0
#如下定義小車前後左右的功能函數
def t_stop():
GPIO.output(11, False)
GPIO.output(12, False)
GPIO.output(15, False)
GPIO.output(16, False)
def t_up():
GPIO.output(11, True)
GPIO.output(12, False)
GPIO.output(15, True)
GPIO.output(16, False)
time.sleep(0.05)
GPIO.output(11, False)
GPIO.output(12, False)
GPIO.output(15, False)
GPIO.output(16, False)
time.sleep(0.3)
def t_down():
GPIO.output(11, False)
GPIO.output(12, True)
GPIO.output(15, False)
GPIO.output(16, True)
def t_left():
GPIO.output(11, False)
GPIO.output(12, True)
GPIO.output(15, True)
GPIO.output(16, False)
time.sleep(0.05)
GPIO.output(11, False)
GPIO.output(12, False)
GPIO.output(15, False)
GPIO.output(16, False)
time.sleep(0.3)
def t_right():
GPIO.output(11, True)
GPIO.output(12, False)
GPIO.output(15, False)
GPIO.output(16, True)
time.sleep(0.05)
GPIO.output(11, False)
GPIO.output(12, False)
GPIO.output(15, False)
GPIO.output(16, False)
time.sleep(0.3)
def t_open():
GPIO.setup(22,GPIO.OUT)
GPIO.output(22,GPIO.LOW)
def t_close():
GPIO.setup(22,GPIO.IN)
def check_for_direction(position_x):
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(12,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
GPIO.setup(16,GPIO.OUT)
GPIO.setup(38,GPIO.OUT)
if position_x == 0 or position_x == width:
print 'out of bound'
t_stop()
if position_x
print 'move right!'
t_right()
elif position_x >= ((width-tracking_width)/2 + tracking_width):
print 'move left!'
t_left()
else:
# print 'move front'
t_up()
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
圖文無關
camera.resolution = (width, height)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(width, height))
rawCapture2 = PiRGBArray(camera, size=(width, height))
# allow the camera to warmup
time.sleep(0.1)
# set the ROI (Region of Interest)
c,r,w,h = (width/2 - tracking_width/2), (height/2 - tracking_height/2), tracking_width, tracking_height
track_window = (c,r,w,h)
# capture single frame of tracking image
camera.capture(rawCapture2, format='bgr')
# create mask and normalized histogram
roi = rawCapture2.array[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array([0,30,32]), np.array([180,255,255]))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
# filtering for tracking algorithm
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
cv2.rectangle(image, (x,y), (x+w,y+h), 255, 2)
cv2.putText(image, 'Tracked', (x-25, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
# show the frame
cv2.imshow("Raspberry Pi RC Car", image)
key = cv2.waitKey(1) & 0xFF
check_for_direction(x)
time.sleep(0.01)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)