python+OpenCV Hoglによる歩行者検出

2672 ワード

一、HOGDescriptor()関数
OpenCVのHOGDescriptor()を呼び出す
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

二、コード
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time    : 2017/11/20 11:23
# @File    : faceDetect.py
# @Software: PyCharm

import sys


from PIL import Image, ImageTk
from imutils.object_detection import non_max_suppression
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2

global x_CB, y_CB,x,y,w,h,ex_,ey_

# face_model = "lbpcascade_frontalface.xml"
# face_model = "haarcascade_profileface.xml"
# face_model = "haarcascade_frontalface_alt2.xml"
face_model = "haarcascade_frontalface_default.xml"
eyes_model = "haarcascade_eye.xml"
out_file = "output.avi"
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())




def faceDetect(img):
    global x_CB, y_CB,x,y,w,h,ex_,ey_
    # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #        
    (rects, weights) = hog.detectMultiScale(img, winStride=(4, 4),
                                            padding=(8, 8), scale=1.05)
    # print(hog.detectMultiScale(image, winStride=(4, 4),
    #                            padding=(8, 8), scale=1.05))

    #        
    # for (x, y, w, h) in rects:
    #     cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)

    #                       ,              ???
    rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
    pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

    #        

    for (xA, yA, xB, yB) in pick:
        cv2.rectangle(img, (xA, yA), (xB, yB), (0, 255, 0), 2)
        #  
        x_CB=int(0.5*(xA+xB))
        y_CB=int(0.5*(yA+yB))


        print('       :', x_CB, y_CB )


    return img


def main():
    cap = cv2.VideoCapture(0)
    # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
    # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
    # face_cascade = cv2.CascadeClassifier(face_model)
    # eyeCascade = cv2.CascadeClassifier(eyes_model)
    # fourcc = cv2.VideoWriter_fourcc(*'flv1')  # 'F', 'L', 'V', '1'
    # video = cv2.VideoWriter(out_file, fourcc, 20.0, (width, height))
    while(True):
        ret, frame = cap.read()
        if ret == True:
            frame = faceDetect(frame)

        cv2.imshow("    ", frame)
        # video.write(frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # video.release()
    cap.release()
    cv2.destroyAllWindows()


main()