python实现单⽬标、多⽬标、多尺度、⾃定义特征的KCF
跟踪算法(实例代码)
单⽬标跟踪:
直接调⽤opencv中封装的tracker即可。
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 17:50:47 2020
第四章 kcf跟踪
django项目实例@author: youxinlin
"""
import cv2
from items import MessageItem
import time
import numpy as np
'''
监视者模块,负责⼊侵检测,⽬标跟踪
'''
class WatchDog(object):
#⼊侵检测者模块,⽤于⼊侵检测
def __init__(self,frame=None):
#运动检测器构造函数
self._background = None
if frame is not None:
self._background = cv2.GaussianBlur(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY),(21,21),0)
self.es = StructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
def isWorking(self):
#运动检测器是否⼯作
return self._background is not None
def startWorking(self,frame):
#运动检测器开始⼯作
if frame is not None:
self._background = cv2.GaussianBlur(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (21, 21), 0)
def stopWorking(self):
#运动检测器结束⼯作
self._background = None
def analyze(self,frame):
#运动检测
if frame is None or self._background is None:
return
sample_frame = cv2.GaussianBlur(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY),(21,21),0)
diff = cv2.absdiff(self._background,sample_frame)
diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)[1]
diff = cv2.dilate(diff, self.es, iterations=2)
image, cnts, hierarchy = cv2.py(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
coordinate = []
bigC = None
bigMulti = 0
for c in cnts:
urArea(c) < 1500:
continue
(x,y,w,h) = cv2.boundingRect(c)
if w * h > bigMulti:
bigMulti = w * h
bigC = ((x,y),(x+w,y+h))
if bigC:
coordinate.append(bigC)
message = {"coord":coordinate}
message['msg'] = None
return MessageItem(frame,message)
class Tracker(object):
'''
追踪者模块,⽤于追踪指定⽬标
'''
def __init__(self,tracker_type = "BOOSTING",draw_coord = True):
'''
初始化追踪器种类
'''
#获得opencv版本
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
self.isWorking = False
self.draw_coord = draw_coord
#构造追踪器
if int(minor_ver) < 3:
else:
if tracker_type == 'BOOSTING':
if tracker_type == 'MIL':
if tracker_type == 'KCF':
if tracker_type == 'TLD':
if tracker_type == 'MEDIANFLOW':
if tracker_type == 'GOTURN':
def initWorking(self,frame,box):
'''
追踪器⼯作初始化
frame:初始化追踪画⾯
box:追踪的区域
'''
if acker:
raise Exception("追踪器未初始化")
status = acker.init(frame,box)
if not status:
raise Exception("追踪器⼯作初始化失败")
self.isWorking = True
def track(self,frame):
'''
开启追踪
'''
message = None
if self.isWorking:
d = acker.update(frame)
if status:
message = {"coord":[((d[0]), d[1])),(d[0] + d[2]), d[1] + d[3])))]}    if self.draw_coord:
p1 = (d[0]), d[1]))
p2 = (d[0] + d[2]), d[1] + d[3]))
message['msg'] = "is tracking"
return MessageItem(frame,message)
class ObjectTracker(object):
def __init__(self,dataSet):
self.cascade = cv2.CascadeClassifier(dataSet)
def track(self,frame):
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = self.cascade.detectMultiScale(gray,1.03,5)
for (x,y,w,h) in faces:
return frame
if __name__ == '__main__' :
# tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
tracker = Tracker(tracker_type="KCF")
# video = cv2.VideoCapture(0)
# video = cv2.VideoCapture("v")
video = cv2.VideoCapture(r"/Users/youxinlin/Desktop/video_data/complex1.MOV")
ok, frame = ad()
bbox = cv2.selectROI(frame, False)
tracker.initWorking(frame,bbox)
while True:
_,frame = ad();
if(_):
item = ack(frame);
cv2.imshow("track",Frame())
k = cv2.waitKey(1) & 0xff
if k == 27:
break
附带items.py,放在同个⽂件夹下:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 17:51:04 2020
@author: youxinlin
"""
import json
from utils import IOUtil
'''
信息封装类
'''
class MessageItem(object):
#⽤于封装信息的类,包含图⽚和其他信息
def __init__(self,frame,message):
self._frame = frame
self._message = message
def getFrame(self):
#图⽚信息
return self._frame
def getMessage(self):
#⽂字信息,json格式
return self._message
def getBase64Frame(self):
#返回base64格式的图⽚,将BGR图像转化为RGB图像
jepg = IOUtil.array_to_bytes(self._frame[...,::-1])
return IOUtil.bytes_to_base64(jepg)
def getBase64FrameByte(self):
#返回base64格式图⽚的bytes
return Base64Frame())
def getJson(self):
#获得json数据格式
dicdata = {"frame":Base64Frame().decode(),"message":Message()}  return json.dumps(dicdata)
def getBinaryFrame(self):
return IOUtil.array_to_bytes(self._frame[...,::-1])
utils.py:也放在同⼀个⽂件夹下。
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 17:51:40 2020
@author: youxinlin
"""
import time
import numpy
import base64
import os
import logging
import sys
from PIL import Image
from io import BytesIO
#⼯具类
class IOUtil(object):
#流操作⼯具类
@staticmethod
def array_to_bytes(pic,formatter="jpeg",quality=70):
'''
静态⽅法,将numpy数组转化⼆进制流
:param pic: numpy数组
:param format: 图⽚格式
:param quality:压缩⽐,压缩⽐越⾼,产⽣的⼆进制数据越短
:return:
'''
stream = BytesIO()
picture = Image.fromarray(pic)
picture.save(stream,format=formatter,quality=quality)
jepg = value()
stream.close()
return jepg
@staticmethod
def bytes_to_base64(byte):
'''
静态⽅法,bytes转base64编码
:param byte:
:return:
'''
return base64.b64encode(byte)
@staticmethod
def transport_rgb(frame):
'''
将bgr图像转化为rgb图像,或者将rgb图像转化为bgr图像
'''
return frame[...,::-1]
@staticmethod
def byte_to_package(bytes,cmd,var=1):
'''
将每⼀帧的图⽚流的⼆进制数据进⾏分包
:param byte: ⼆进制⽂件
:
param cmd:命令
:return:
'''
head = [ver,len(byte),cmd]
headPack = struct.pack("!3I", *head)
senddata = headPack+byte
return senddata
@staticmethod
def mkdir(filePath):
'''
创建⽂件夹
'''
if not ists(filePath):
os.mkdir(filePath)
@staticmethod
def countCenter(box):
'''
计算⼀个矩形的中⼼
'''
return (int(abs(box[0][0] - box[1][0])*0.5) + box[0][0],int(abs(box[0][1] - box[1][1])*0.5) +box[0][1]) @staticmethod
def countBox(center):
'''
根据两个点计算出,x,y,c,r
'''
return (center[0][0],center[0][1],center[1][0]-center[0][0],center[1][1]-center[0][1])
@staticmethod
def getImageFileName():
return time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())+'.png'
多⽬标跟踪:
和单⽬标差不多,改⽤MultiTracker_create()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 18:02:33 2020
多⽬标跟踪
@author: youxinlin
"""import numpy as np
import cv2
import sys
'''
if len(sys.argv) != 2:
print('Input video name is missing')
exit()
'''
print('Select multiple tracking targets')
cv2.namedWindow("tracking")
camera = cv2.VideoCapture(r"/Users/youxinlin/Desktop/video_data/complex6.MOV")
#camera = cv2.VideoCapture(0)
tracker = cv2.MultiTracker_create() #多⽬标跟踪
a= cv2.Tracker_c
init_once = False
ok, ad()
if not ok:
print('Failed to read video')
exit()
bbox1 = cv2.selectROI('tracking', image)
bbox2 = cv2.selectROI('tracking', image)
bbox3 = cv2.selectROI('tracking', image)
while camera.isOpened():
ok, ad()
if not ok:
print ('no image to read')
break
if not init_once:
ok = tracker.add(cv2.TrackerKCF_create(),image,bbox1)
ok = tracker.add(cv2.TrackerKCF_create( ),image, bbox2)
ok = tracker.add(cv2.TrackerKCF_create(),image, bbox3)
init_once = True
ok, boxes = tracker.update(image)
for newbox in boxes:
p1 = (int(newbox[0]), int(newbox[1]))
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.imshow('tracking', image)
k = cv2.waitKey(1)
if k == 27 : break # esc pressed
多尺度检测的KCF、⾃定义所⽤特征的KCF
在⼀些场景下,不想使⽤默认的hog特征跟踪,或需要对⽐不同特征的跟踪效果,那么封装好的⽅法似乎不可⽤,需要可以⾃⼰撸⼀波kcf的代码,从⽽使⽤⾃⼰设定的特征。
总结
以上所述是⼩编给⼤家介绍的python实现单⽬标、多⽬标、多尺度、⾃定义特征的KCF跟踪算法,希望对⼤家有所帮助,如果⼤家有任何疑问请给我留⾔,⼩编会及时回复⼤家的。在此也⾮常感谢⼤家对⽹站的⽀持!如果你觉得本⽂对你有帮助,欢迎转载,烦请注明出处,谢谢!

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。