share some python implementation code
#! /usr/bin/env python
# coding=utf-8import cv2
img = cv2.imread("trace_border2.bmp")[img_h, img_w, img_channel]= img.shape
trace =[]
start_x =0
start_y =0
gray = img[:,:,1]for h inrange(img_h):for w inrange(img_w):if(gray[h,w]128):
gray[h,w]=255else:
gray[h,w]=0
# Python jumps out of multiple loops
# https://www.cnblogs.com/xiaojiayu/p/5195316.html
classgetoutofloop(Exception): pass
try:for h inrange(img_h -2):for w inrange(img_w -2):if gray[h,w]==0:
start_x = w
start_y = h
raise getoutofloop
except getoutofloop:
pass
print("Start Point (%d %d)"%(start_x, start_y))
trace.append([start_x, start_y])
# 8 Neighborhood clockwise search
neighbor =[[-1,-1],[0,-1],[1,-1],[1,0],[1,1],[0,1],[-1,1],[-1,0]]
neighbor_len =len(neighbor)
# Start from the upper left of the current point,
# If the upper left is also a black dot(Boundary point):
# Rotate the search direction 90 i counterclockwise-=2
# otherwise:
# Rotate the search direction clockwise 45 i+=1
i =0
cur_x = start_x + neighbor[i][0]
cur_y = start_y + neighbor[i][1]
is_contour_point =0try:whilenot((cur_x == start_x)and(cur_y == start_y)):
is_contour_point =0while is_contour_point ==0:
# neighbor_x = cur_x +if gray[cur_y, cur_x]==0:
is_contour_point =1
trace.append([cur_x, cur_y])
i -=2if i <0:
i += neighbor_len
else:
i +=1if i = neighbor_len:
i -= neighbor_len
# print(i)
cur_x = cur_x + neighbor[i][0]
cur_y = cur_y + neighbor[i][1]
except:print("throw error")for i inrange(len(trace)-1):
cv2.line(img,(trace[i][0],trace[i][1]),(trace[i+1][0], trace[i+1][1]),(0,0,255),3)
cv2.imshow("img", img)
cv2.waitKey(10)
cv2.rectangle(img,(start_x, start_y),(start_x +20, start_y +20),(255,0,0),2)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyWindow("img")
The search process, the red marking line is as follows:
Supplementary knowledge: Python achieves target tracking (opencv)
1. Single target tracking
import cv2
importsys(major_ver, minor_ver, subminor_ver)=(cv2.__version__).split('.')print(major_ver, minor_ver, subminor_ver)if __name__ =='__main__':
# Create tracker
tracker_type ='MIL'
tracker = cv2.TrackerMIL_create()
# Read in video
video = cv2.VideoCapture("./data/1.mp4")
# Read in the first frame
ok, frame = video.read()if not ok:print('Cannot read video file')
sys.exit()
# Define a bounding box
bbox =(287,23,86,320)
bbox = cv2.selectROI(frame, False)
# Initialize with the first frame
ok = tracker.init(frame, bbox)while True:
ok, frame = video.read()if not ok:break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Cakculate FPS
fps = cv2.getTickFrequency()/(cv2.getTickCount()- timer)
# Draw bonding box
if ok:
p1 =(int(bbox[0]),int(bbox[1]))
p2 =(int(bbox[0]+ bbox[2]),int(bbox[1]+ bbox[3]))
cv2.rectangle(frame, p1, p2,(255,0,0),2,1)else:
cv2.putText(frame,"Tracking failed detected",(100,80), cv2.FONT_HERSHEY_SIMPLEX,0.75,(0,0,255),2)
# Display tracker type
cv2.putText(frame, tracker_type+"Tracker",(100,20), cv2.FONT_HERSHEY_SIMPLEX,0.75,(50,170,50),2)
# Show FPS
cv2.putText(frame,"FPS:"+str(fps),(100,50), cv2.FONT_HERSHEY_SIMPLEX,0.75,(50,170,50),2)
# Result
cv2.imshow("Tracking", frame)
# Exit
k = cv2.waitKey(1)&0xffif k ==27:break
2. Multi-target tracking
When using GOTURN as a tracker, you must put goturn.caffemodel and goturn.prototxt in the working directory to run. The link to solve the problem is https://stackoverflow.com/questions/48802603/getting-deep-learning-tracker-goturn-to- run-opencv-python
import cv2
importsys(major_ver, minor_ver, subminor_ver)=(cv2.__version__).split('.')print(major_ver, minor_ver, subminor_ver)if __name__ =='__main__':
# Create tracker
# ' BOOSTING','MIL','KCF','TLD','MEDIANFLOW','GOTURN','MOSSE'
tracker_type ='MIL'
tracker = cv2.MultiTracker_create()
# Create window
cv2.namedWindow("Tracking")
# Read in video
video = cv2.VideoCapture("./data/1.mp4")
# Read in the first frame
ok, frame = video.read()if not ok:print('Cannot read video file')
sys.exit()
# Define a bounding box
box1 = cv2.selectROI("Tracking", frame)
box2 = cv2.selectROI("Tracking", frame)
box3 = cv2.selectROI("Tracking", frame)
# Initialize with the first frame
ok = tracker.add(cv2.TrackerMIL_create(), frame, box1)
ok1 = tracker.add(cv2.TrackerMIL_create(), frame, box2)
ok2 = tracker.add(cv2.TrackerMIL_create(), frame, box3)while True:
ok, frame = video.read()if not ok:break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, boxes = tracker.update(frame)print(ok, boxes)
# Cakculate FPS
fps = cv2.getTickFrequency()/(cv2.getTickCount()- timer)for box in boxes:
# Draw bonding box
if ok:
p1 =(int(box[0]),int(box[1]))
p2 =(int(box[0]+ box[2]),int(box[1]+ box[3]))
cv2.rectangle(frame, p1, p2,(255,0,0),2,1)else:
cv2.putText(frame,"Tracking failed detected",(100,80), cv2.FONT_HERSHEY_SIMPLEX,0.75,(0,0,255),2)
# Display tracker type
cv2.putText(frame, tracker_type+"Tracker",(100,20), cv2.FONT_HERSHEY_SIMPLEX,0.75,(50,170,50),2)
# Show FPS
cv2.putText(frame,"FPS:"+str(fps),(100,50), cv2.FONT_HERSHEY_SIMPLEX,0.75,(50,170,50),2)
# Result
cv2.imshow("Tracking", frame)
# Exit
k = cv2.waitKey(1)&0xffif k ==27:break
The above python implementation of image outer boundary tracking operation is all the content shared by the editor, I hope to give you a reference.
Recommended Posts