読者です 読者をやめる 読者になる 読者になる

kivantium活動日記

プログラムを使っていろいろやります

Kinectで青いボールの位置認識

OpenCV

大阪大学井村さんのラベリングクラスを使いました。

#include <iostream>
#include <opencv2/opencv.hpp>
#include <cstdio>
#include "Labeling.h"
using namespace cv;
using namespace std;
int num;
int main(){
    try {
        VideoCapture capture(0);
        Mat RGBMap,Cloud,valid;
        IplImage ipl_RGB;
        IplImage *imgR,*imgG=0,*imgB;
        IplImage *imgThreshold_R, *imgThreshold_G, *imgThreshold_B,*imgResult,*imgTmp,*RGB_image;
        int x,y;
        short *dst;
        LabelingBS labeling;
        RegionInfoBS    *ri;
        dst = new short[640 * 480];

        imgThreshold_R = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
        imgThreshold_G = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
        imgThreshold_B = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
        imgResult = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
        imgTmp = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
        imgR = cvCreateImage(cvSize(640,480),IPL_DEPTH_8U,1); //Red
        imgG = cvCreateImage(cvSize(640,480),IPL_DEPTH_8U,1); //Green
        imgB = cvCreateImage(cvSize(640,480),IPL_DEPTH_8U,1); //Blue
        while ( 1 ) {
            // データの更新を待つ
            capture.grab();
            // RGBを取得して表示
            capture.retrieve( RGBMap, CV_CAP_OPENNI_BGR_IMAGE);
            ipl_RGB = RGBMap;
            RGB_image = &ipl_RGB;
            cvSplit(RGB_image,imgB,imgG,imgR,NULL);
            // 赤の要素が100以上で、緑と青より1.5倍以上あるピクセルを抽出
            cvThreshold(imgB, imgThreshold_B, 100, 255, CV_THRESH_BINARY);
            cvDiv(imgB, imgG, imgTmp, 10); // 10倍
            cvThreshold(imgTmp, imgThreshold_G, 18, 255, CV_THRESH_BINARY);
            cvDiv(imgB, imgR, imgTmp, 10);
            cvThreshold(imgTmp, imgThreshold_R, 18, 255, CV_THRESH_BINARY);
            cvAnd(imgThreshold_G, imgThreshold_R, imgTmp, NULL);
            cvAnd(imgTmp, imgThreshold_B, imgResult, NULL);
            cvAnd(imgThreshold_G, imgThreshold_R, imgTmp, NULL);
            cvAnd(imgTmp, imgThreshold_B, imgResult, NULL);

            //ラベリング
            labeling.Exec((uchar *)imgResult->imageData, dst, imgResult->width, imgResult->height, true, 30);
            if(labeling.GetNumOfResultRegions()!=0){
                ri = labeling.GetResultRegionInfo( 0 );
                //四角形の描画
                int ltop_x,ltop_y,rbottom_x,rbottom_y;
                ri->GetMin(ltop_x,ltop_y);
                ri->GetMax(rbottom_x, rbottom_y);
                cvLine(RGB_image,cvPoint(ltop_x, ltop_y), cvPoint(rbottom_x, ltop_y), CV_RGB(255, 255, 255));
                cvLine(RGB_image,cvPoint(rbottom_x, ltop_y), cvPoint(rbottom_x, rbottom_y), CV_RGB(255, 255, 255));
                cvLine(RGB_image,cvPoint(rbottom_x, rbottom_y), cvPoint(ltop_x, rbottom_y), CV_RGB(255, 255, 255));
                cvLine(RGB_image,cvPoint(ltop_x, rbottom_y), cvPoint(ltop_x, ltop_y), CV_RGB(255, 255, 255));
                float f_x, f_y;
                ri->GetCenter(f_x, f_y);
                x = (int)f_x;
                y = (int)f_y;
                capture.retrieve( Cloud, CV_CAP_OPENNI_POINT_CLOUD_MAP);
                capture.retrieve( valid, CV_CAP_OPENNI_VALID_DEPTH_MASK);
                if(valid.at<unsigned char>(y,x)==0xff){
                    Vec3f s = Cloud.at<Vec3f>(y, x);
                    printf("%f %f %f\n",s[0],s[1],s[2]);  //座標を表示
                }else{
                    printf("invalid!\n");
                }
            }

            cvShowImage("Window", RGB_image );
            if ( waitKey( 10 ) >= 0 ) {
                break;
          }
        }
        destroyAllWindows();
    }
  catch ( ... ) {
      cout << "exception!!" << endl;
  }
}
実行結果