OpenCV—python 视频分析背景提取与前景提取
⽂章⽬录
OpenCV中⽀持的两种背景提取算法都是 基于模型密度评估,然后在 像素级对图像进⾏前景与背景分类的⽅法,它们具有相同的假设前提– 各个像素之间是没有相关性的,跟它们算法思想不同的⽅法主要是基于马尔可夫随机场理论,认为每个像素跟周围的像素是有相关性关系,但是基于马尔可夫随机场的⽅法速度与执⾏效率都堪忧!所以OpenCV中没有实现。基于像素分类的背景分析⽅法⾃适应的背景提取(⽆参数化/ KNN)基于GMM的背景提取
基于模糊积分的背景提取这些背景建模的⽅法⼀般都可以分为如下三步完成背景初始化阶段(背景建模提取)前景检测阶段(视频分析,前景对象检测)
背景维护与更新(视频分析过程中)
⼀、算法
实现对前景与背景像素级别的建模,最常见的是RGB像素的概率密度分布,当对象没有变化的时候,通过连续的帧进⾏建模⽣成背景模型
⾼斯混合模型(GMM)⽅式正好满⾜这种⽅式,对⾼斯混合模型中的每个componet进⾏建模,计算表达如下:
N =p (FG ∣)x (t )p (BD ∣)x (t )p (∣FG )p (FG )
x (t )p (∣BG )p (BG )
x (t )p (∣BG )>x (t )c (=thr p (∣FG )/p (BG ))x (t )(∣X ,BG +p ^x T FG )=N (;,I )m =1∑M
π^m x μ^m σ^m 2
在更新的时候,会考虑分布直接相似程度,当马⽒距离相似度⼩于3的时候,肯能GMM的主成分维持不变,当⼤于3以后就会当成⼀个新的componet,丢弃前⾯最⼩的,维持模型。参数  ⽤来控制更新。
基于GMM的核密度估算需要考虑初始输⼊componet数⽬参数、OpenCV中实现的另外⼀种⽅法是基于
简单的核密度估算⽅法,然后通过KNN对输出的每个像素进⾏前景与背景分类,实现了更加快速的背景分析。⾮参数话的模型更新上述两种⽅法都是基于像素分类,采⽤⾮此即彼的⽅法,没有考虑到像素之间相似度的关联性,在实际应⽤场景中有些情况会带来问题。所
以还有⼀种相似度进⾏模糊积分决策⽅法,它的算法流程如下:
其中颜⾊相似性度量如下:
α←π^m +π^m α(o −(t ))
π^m ←μ^m +μ^m o (α/)m (t )π
^m δm ←σ^m 2+σ^m 2o (α/)−m (t )π^m δm T δm σ^m
2(∣X ,BG )≈p ^non −parametric x T b K TV 1
m =t −T ∑t
(m )(D ∣∣−∣∣x (m )x )S (x ,y )=k C ⎩⎪⎪⎨⎪⎪⎧I (x ,y )
k B I (x ,y )k C 1I (x ,y )k C I (x ,y )k B if if if I (x ,y )<I (x ,y )k C k B
I (x ,y )=I (x ,y )k C k B I (x ,y )>I (x ,y )k C k B
纹理相似度度量(纹理特征LBP特征)
OpenCV在release模块中相关API
Ptr <BackgroundSubtractorMOG2> cv ::createBackgroundSubtractorMOG2(
int  history = 500,
double  varThreshold = 16,
bool detectShadows = true
)
参数解释
History 表⽰的是历史帧数多少,这个跟作者论⽂提到的采样有关计算模型建⽴有关系
varThreshold 表⽰马⽒距离的阈值
detectShadows 是否检测阴影
⼆、代码
MOG2(Mixture of Gaussian)
import  cv2
capture = cv2.VideoCapture (r "C:\Users\xxx\Videos\mouse.mp4")
mog = ateBackgroundSubtractorMOG2()
se = StructuringElement (cv2.MORPH_RECT , (3, 3))
while  True :
ret , image = capture .read ()
if  ret is  True :
fgmask = mog .apply (image )
ret , binary = cv2.threshold (fgmask , 220, 255, cv2.THRESH_BINARY )
binary = phologyEx (binary , cv2.MORPH_OPEN , se )
backgimage = mog .getBackgroundImage ()
cv2.imshow ("backgimage", backgimage )
cv2.imshow ("frame", image )
cv2.imshow ("binary", binary )
c = cv2.waitKey (50)
if  c == 27:
break
else :
break
cv2.destroyAllWindows ()
MOG2(Mixture of Gaussian ) 与 KNN 对⽐
S (x ,y )=T ⎩⎪⎨⎪⎧L (x ,y )
B L (x ,y )
C 1L (x ,y )C L (x ,y )B if if if L (x ,y )<L (x ,y )C B L (x ,y )=L (x ,y )C B L (x ,y )>L (x ,y )
C B
import cv2
cap = cv2.VideoCapture('./data/mouse.mp4')
knn_sub = ateBackgroundSubtractorKNN()
mog2_sub = ateBackgroundSubtractorMOG2()
while True:
ret, frame = ad()
if not ret:
break
mog_sub_mask = mog2_sub.apply(frame)
knn_sub_mask = knn_sub.apply(frame)
cv2.imshow('original', frame)
cv2.imshow('MOG2', mog_sub_mask)
cv2.imshow('KNN', knn_sub_mask)
key = cv2.waitKey(30)&0xff
if key ==27or key ==ord('q'):
break
cv2.destroyAllWindows()
Kmeans
检测的物体需要⾊彩相近,才能有好效果
'''
Extract panel :kmeans聚类
'''
import cv2
import numpy as np
import math
def panelAbstract(srcImage):
#  read pic shape
imgHeight,imgWidth = srcImage.shape[:2]
imgHeight =int(imgHeight);imgWidth =int(imgWidth)
# 均值聚类提取前景:⼆维转⼀维
imgVec = np.shape((-1,3)))
criteria =(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,10,1.0)
flags = cv2.KMEANS_RANDOM_CENTERS
ret,label,clusCenter = cv2.kmeans(imgVec,2,None,criteria,10,flags)
clusCenter = np.uint8(clusCenter)
clusResult = clusCenter[label.flatten()]
imgres = shape((srcImage.shape))
imgres = cv2.cvtColor(imgres,cv2.COLOR_BGR2GRAY)
bwThresh =int((np.max(imgres)+np.min(imgres))/2)
_,thresh = cv2.threshold(imgres,bwThresh,255,cv2.THRESH_BINARY_INV)
threshRotate = ([thresh,thresh,thresh])
# 确定前景外接矩形
#find contours
imgCnt,contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)    minvalx = np.max([imgHeight,imgWidth]);maxvalx =0
minvaly = np.max([imgHeight,imgWidth]);maxvaly =0
maxconArea =0;maxAreaPos =-1
for i in range(len(contours)):
if maxconArea < urArea(contours[i]):
maxconArea = urArea(contours[i])
maxAreaPos = i
objCont = contours[maxAreaPos]
# 旋转校正前景
rect = cv2.minAreaRect(objCont)
for j in range(len(objCont)):
minvaly = np.min([minvaly,objCont[j][0][0]])
maxvaly = np.max([maxvaly,objCont[j][0][0]])
maxvaly = np.max([maxvaly,objCont[j][0][0]])
minvalx = np.min([minvalx,objCont[j][0][1]])
maxvalx = np.max([maxvalx,objCont[j][0][1]])
if rect[2]<=-45:
rotAgl =90+rect[2]
else:
rotAgl = rect[2]
if rotAgl ==0:
panelImg = srcImage[minvalx:maxvalx,minvaly:maxvaly,:]
else:
rotCtr = rect[0]
rotCtr =(int(rotCtr[0]),int(rotCtr[1]))
rotMdl = RotationMatrix2D(rotCtr,rotAgl,1)
imgHeight,imgWidth = srcImage.shape[:2]
#图像的旋转
dstHeight = math.sqrt(imgWidth *imgWidth + imgHeight*imgHeight)
dstRotimg = cv2.warpAffine(threshRotate,rotMdl,(int(dstHeight),int(dstHeight)))
dstImage = cv2.warpAffine(srcImage,rotMdl,(int(dstHeight),int(dstHeight)))
dstRotimg = cv2.cvtColor(dstRotimg,cv2.COLOR_BGR2GRAY)
_,dstRotBW = cv2.threshold(dstRotimg,127,255,0)
imgCnt,contours, hierarchy = cv2.findContours(dstRotBW,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)        maxcntArea =0;maxAreaPos =-1
for i in range(len(contours)):
if maxcntArea < urArea(contours[i]):
maxcntArea = urArea(contours[i])
maxAreaPos = i
x,y,w,h = cv2.boundingRect(contours[maxAreaPos])
#提取前景:panel
panelImg = dstImage[int(y):int(y+h),int(x):int(x+w),:]
return panelImg
if __name__=="__main__":
srcImage = cv2.imread('mouse.png')
a=panelAbstract(srcImage)
学python看谁的视频比较好cv2.imshow('figa',a)
cv2.waitKey(0)
cv2.destroyAllWindows()
⾏⼈检测代码

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。