Kinect SDK 1.5 Face Tracking----->Opencvで表示された超簡略バージョン

10843 ワード

[2012-10-10]この文書で使用されているマルチスレッドの効果はよくありません.マイクロソフトの例のwaitForMultiibjectsメソッドのカーネルイベントメソッドを使用してください.
この2日間は新しいSDK 1を読みます.5のコードは、それと組み合わせて顔認識の問題を引き起こすため、その顔追跡コードを研究しなければならない.何度も挫折したが、最終的にはマスク推測さえできた.どのように使うか、さらに詳細な問題については、マイクロソフトに関する文章を自分で読む必要があります.マイクロソフト関連サイトのアドレスをクリックしてリンクを開きます.
次はコードです
VS2010+opencv2.3.1+Kinect SDK1.5
駆動とか、みんなでインストールして、基礎のある学生はまず基礎をマスターします.コードは私が以前更新したSDK 1に基づいています.5基礎の上の、よくわからない場合は、まず前の文章を読みます~
// win32_KinectFaceTracking.cpp :              。
//
#include "stdafx.h"
//----------------------------------------------------
#define  _WINDOWS
#include 

HRESULT VisualizeFaceModel(IFTImage* pColorImg, IFTModel* pModel, FT_CAMERA_CONFIG const* pCameraConfig, FLOAT const* pSUCoef, 
	FLOAT zoomFactor, POINT viewOffset, IFTResult* pAAMRlt, UINT32 color);
//----------------------------------------------------
#include 
#include 
#include 
#include 
#include 
#include 
#include "opencv2\opencv.hpp"

using namespace std;
using namespace cv;
#include 
#include 
#include 
#include 
#include "NuiApi.h"
#define COLOR_WIDTH		640
#define COLOR_HIGHT		480
#define DEPTH_WIDTH		320
#define DEPTH_HIGHT		240
#define SKELETON_WIDTH 640
#define SKELETON_HIGHT 480
#define CHANNEL			3
BYTE buf[DEPTH_WIDTH*DEPTH_HIGHT*CHANNEL];
int drawColor(HANDLE h);
int drawDepth(HANDLE h);
int drawSkeleton();
//---face tracking------------------------------------------
BYTE *colorBuffer,*depthBuffer;
IFTImage* pColorFrame;
IFTImage* pDepthFrame;
FT_VECTOR3D m_hint3D[2];
//-----------------------------------------------------------------------------------
HANDLE h1;
HANDLE h3;
HANDLE h5;
HANDLE h2;
HANDLE h4;
DWORD WINAPI VideoFunc(LPVOID pParam)
{
//	cout<pFrameTexture;
	NUI_LOCKED_RECT LockedRect;
	pTexture->LockRect( 0, &LockedRect, NULL, 0 );
	if( LockedRect.Pitch != 0 )
	{
		BYTE * pBuffer = (BYTE*) LockedRect.pBits;
		colorBuffer	=	pBuffer;
		memcpy(pColorFrame->GetBuffer(), PBYTE(LockedRect.pBits), min(pColorFrame->GetBufferSize(), UINT(pTexture->BufferLen())));

		Mat temp(COLOR_HIGHT,COLOR_WIDTH,CV_8UC4,pBuffer);
		imshow("b",temp);
		waitKey(1);
	}
	NuiImageStreamReleaseFrame( h, pImageFrame );
	return 0;
}

int drawDepth(HANDLE h)
{
	const NUI_IMAGE_FRAME * pImageFrame = NULL;
	HRESULT hr = NuiImageStreamGetNextFrame( h, 0, &pImageFrame );
	if( FAILED( hr ) )
	{
		cout<pFrameTexture;
	NUI_LOCKED_RECT LockedRect;
	pTexture->LockRect( 0, &LockedRect, NULL, 0 );
	if( LockedRect.Pitch != 0 )
	{
		USHORT * pBuff = (USHORT*) LockedRect.pBits;
//		depthBuffer = pBuff;
		memcpy(pDepthFrame->GetBuffer(), PBYTE(LockedRect.pBits), min(pDepthFrame->GetBufferSize(), UINT(pTexture->BufferLen())));

		for(int i=0;i>3;
			BYTE scale = 255 - (BYTE)(256*realDepth/0x0fff);
			buf[CHANNEL*i] = buf[CHANNEL*i+1] = buf[CHANNEL*i+2] = 0;
			switch( index )
			{
			case 0:
				buf[CHANNEL*i]=scale/2;
				buf[CHANNEL*i+1]=scale/2;
				buf[CHANNEL*i+2]=scale/2;
				break;
			case 1:
				buf[CHANNEL*i]=scale;
				break;
			case 2:
				buf[CHANNEL*i+1]=scale;
				break;
			case 3:
				buf[CHANNEL*i+2]=scale;
				break;
			case 4:
				buf[CHANNEL*i]=scale;
				buf[CHANNEL*i+1]=scale;
				break;
			case 5:
				buf[CHANNEL*i]=scale;
				buf[CHANNEL*i+2]=scale;
				break;
			case 6:
				buf[CHANNEL*i+1]=scale;
				buf[CHANNEL*i+2]=scale;
				break;
			case 7:
				buf[CHANNEL*i]=255-scale/2;
				buf[CHANNEL*i+1]=255-scale/2;
				buf[CHANNEL*i+2]=255-scale/2;
				break;
			}
		}
		Mat b(DEPTH_HIGHT,DEPTH_WIDTH,CV_8UC3,buf);
		imshow("depth",b);
		waitKey(1);
	}
	NuiImageStreamReleaseFrame( h, pImageFrame );
	return 0;
}

int drawSkeleton()
{
	NUI_SKELETON_FRAME SkeletonFrame;
	cv::Point pt[20];
	Mat skeletonMat=Mat(SKELETON_HIGHT,SKELETON_WIDTH,CV_8UC3,Scalar(0,0,0));
	HRESULT hr = NuiSkeletonGetNextFrame( 0, &SkeletonFrame );
	if( FAILED( hr ) )
	{
		cout<Initialize(&myCameraConfig, &depthConfig, NULL, NULL);
	if( FAILED(hr) )
	{
		return -2;// Handle errors
	}
	// Create IFTResult to hold a face tracking result
	IFTResult* pFTResult = NULL;
	hr = pFT->CreateFTResult(&pFTResult);
	if(FAILED(hr))
	{
		return -11;
	}
	// prepare Image and SensorData for 640x480 RGB images

	if(!pColorFrame)
	{
		return -12;// Handle errors
	}
	// Attach assumes that the camera code provided by the application
	// is filling the buffer cameraFrameBuffer
//	pColorFrame->Attach(640, 480, colorBuffer, FTIMAGEFORMAT_UINT8_B8G8R8X8, 640*3);

	hr = pColorFrame->Allocate(640, 480, FTIMAGEFORMAT_UINT8_B8G8R8X8);
	if (FAILED(hr))
	{
		return hr;
	}
	hr = pDepthFrame->Allocate(320, 240, FTIMAGEFORMAT_UINT16_D13P3);
	if (FAILED(hr))
	{
		return hr;
	}
	FT_SENSOR_DATA sensorData;
	sensorData.pVideoFrame = pColorFrame;
	sensorData.pDepthFrame = pDepthFrame;
	sensorData.ZoomFactor = 1.0f;
	POINT point;point.x=0;point.y=0;
	sensorData.ViewOffset = point;

	bool isTracked = false;
	int iFaceTrackTimeCount=0;
	// Track a face
	while ( true )
	{
		// Call your camera method to process IO and fill the camera buffer
	//	cameraObj.ProcessIO(cameraFrameBuffer); // replace with your method
		if(!isTracked)
		{
			hr = pFT->StartTracking(&sensorData, NULL, m_hint3D, pFTResult);
			if(SUCCEEDED(hr) && SUCCEEDED(pFTResult->GetStatus()))
			{
				isTracked = true;
			}
			else
			{
				// Handle errors
				isTracked = false;
			}
		}
		else
		{
			// Continue tracking. It uses a previously known face position,
			// so it is an inexpensive call.
			hr = pFT->ContinueTracking(&sensorData, m_hint3D, pFTResult);
			if(FAILED(hr) || FAILED (pFTResult->GetStatus()))
			{
				// Handle errors
				isTracked = false;
			}
		}
		if(isTracked)
			{printf("    !!!!!!!!!!!!!!!
"); IFTModel* ftModel; HRESULT hr = pFT->GetFaceModel(&ftModel); FLOAT* pSU = NULL; UINT numSU; BOOL suConverged; pFT->GetShapeUnits(NULL, &pSU, &numSU, &suConverged); POINT viewOffset = {0, 0}; hr = VisualizeFaceModel(pColorFrame, ftModel, &myCameraConfig, pSU, 1.0, viewOffset, pFTResult, 0x00FFFF00); if(FAILED(hr)) printf(" !!
"); Mat tempMat(COLOR_HIGHT,COLOR_WIDTH,CV_8UC4,pColorFrame->GetBuffer()); imshow("faceTracking",tempMat); waitKey(1); } //printf("%d
",pFTResult->GetStatus()); // Do something with pFTResult. Sleep(16); iFaceTrackTimeCount++; if(iFaceTrackTimeCount>16*1000) break; // Terminate on some criteria. } // Clean up. pFTResult->Release(); pColorFrame->Release(); pFT->Release(); CloseHandle(hThread1); CloseHandle(hThread2); CloseHandle(hThread3); Sleep(60000); NuiShutdown(); return 0; } HRESULT VisualizeFaceModel(IFTImage* pColorImg, IFTModel* pModel, FT_CAMERA_CONFIG const* pCameraConfig, FLOAT const* pSUCoef, FLOAT zoomFactor, POINT viewOffset, IFTResult* pAAMRlt, UINT32 color) { if (!pColorImg || !pModel || !pCameraConfig || !pSUCoef || !pAAMRlt) { return E_POINTER; } HRESULT hr = S_OK; UINT vertexCount = pModel->GetVertexCount(); FT_VECTOR2D* pPts2D = reinterpret_cast(_malloca(sizeof(FT_VECTOR2D) * vertexCount)); if (pPts2D) { FLOAT *pAUs; UINT auCount; hr = pAAMRlt->GetAUCoefficients(&pAUs, &auCount); if (SUCCEEDED(hr)) { FLOAT scale, rotationXYZ[3], translationXYZ[3]; hr = pAAMRlt->Get3DPose(&scale, rotationXYZ, translationXYZ); if (SUCCEEDED(hr)) { hr = pModel->GetProjectedShape(pCameraConfig, zoomFactor, viewOffset, pSUCoef, pModel->GetSUCount(), pAUs, auCount, scale, rotationXYZ, translationXYZ, pPts2D, vertexCount); if (SUCCEEDED(hr)) { POINT* p3DMdl = reinterpret_cast(_malloca(sizeof(POINT) * vertexCount)); if (p3DMdl) { for (UINT i = 0; i < vertexCount; ++i) { p3DMdl[i].x = LONG(pPts2D[i].x + 0.5f); p3DMdl[i].y = LONG(pPts2D[i].y + 0.5f); } FT_TRIANGLE* pTriangles; UINT triangleCount; hr = pModel->GetTriangles(&pTriangles, &triangleCount); if (SUCCEEDED(hr)) { struct EdgeHashTable { UINT32* pEdges; UINT edgesAlloc; void Insert(int a, int b) { UINT32 v = (min(a, b) << 16) | max(a, b); UINT32 index = (v + (v << 8)) * 49157, i; for (i = 0; i < edgesAlloc - 1 && pEdges[(index + i) & (edgesAlloc - 1)] && v != pEdges[(index + i) & (edgesAlloc - 1)]; ++i) { } pEdges[(index + i) & (edgesAlloc - 1)] = v; } } eht; eht.edgesAlloc = 1 << UINT(log(2.f * (1 + vertexCount + triangleCount)) / log(2.f)); eht.pEdges = reinterpret_cast(_malloca(sizeof(UINT32) * eht.edgesAlloc)); if (eht.pEdges) { ZeroMemory(eht.pEdges, sizeof(UINT32) * eht.edgesAlloc); for (UINT i = 0; i < triangleCount; ++i) { eht.Insert(pTriangles[i].i, pTriangles[i].j); eht.Insert(pTriangles[i].j, pTriangles[i].k); eht.Insert(pTriangles[i].k, pTriangles[i].i); } for (UINT i = 0; i < eht.edgesAlloc; ++i) { if(eht.pEdges[i] != 0) { pColorImg->DrawLine(p3DMdl[eht.pEdges[i] >> 16], p3DMdl[eht.pEdges[i] & 0xFFFF], color, 1); } } _freea(eht.pEdges); } // Render the face rect in magenta RECT rectFace; hr = pAAMRlt->GetFaceRect(&rectFace); if (SUCCEEDED(hr)) { POINT leftTop = {rectFace.left, rectFace.top}; POINT rightTop = {rectFace.right - 1, rectFace.top}; POINT leftBottom = {rectFace.left, rectFace.bottom - 1}; POINT rightBottom = {rectFace.right - 1, rectFace.bottom - 1}; UINT32 nColor = 0xff00ff; SUCCEEDED(hr = pColorImg->DrawLine(leftTop, rightTop, nColor, 1)) && SUCCEEDED(hr = pColorImg->DrawLine(rightTop, rightBottom, nColor, 1)) && SUCCEEDED(hr = pColorImg->DrawLine(rightBottom, leftBottom, nColor, 1)) && SUCCEEDED(hr = pColorImg->DrawLine(leftBottom, leftTop, nColor, 1)); } } _freea(p3DMdl); } else { hr = E_OUTOFMEMORY; } } } } _freea(pPts2D); } else { hr = E_OUTOFMEMORY; } return hr; }

最後に写真をあげます
免積分下载VS 2010プロジェクトソースアドレス~