• 欢迎访问开心洋葱网站,在线教程,推荐使用最新版火狐浏览器和Chrome浏览器访问本网站,欢迎加入开心洋葱 QQ群
  • 为方便开心洋葱网用户,开心洋葱官网已经开启复制功能!
  • 欢迎访问开心洋葱网站,手机也能访问哦~欢迎加入开心洋葱多维思维学习平台 QQ群
  • 如果您觉得本站非常有看点,那么赶紧使用Ctrl+D 收藏开心洋葱吧~~~~~~~~~~~~~!
  • 由于近期流量激增,小站的ECS没能经的起亲们的访问,本站依然没有盈利,如果各位看如果觉着文字不错,还请看官给小站打个赏~~~~~~~~~~~~~!

ROS下进行人脸识别并输出人脸坐标位置

人工智能 很不专业 1135次浏览 0个评论

功能包下载链接:https://download.csdn.net/download/qq_42145185/12265062   ROS下进行人脸识别并输出人脸坐标位置   ROS下进行人脸识别并输出人脸坐标位置   启动命令:roslaunch face_tracker_pkg start_tracking.launch   核心检测代码:

/*
* This code will track the faces using ROS 
*/
 
 
 
//ROS headers
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
 
//Open-CV headers
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/objdetect.hpp"
 
//Centroid message headers
#include <face_tracker_pkg/centroid.h>
 
//OpenCV window name
static const std::string OPENCV_WINDOW = "raw_image_window";
static const std::string OPENCV_WINDOW_1 = "face_detector";
 
 
using namespace std;
using namespace cv;
 
class Face_Detector
{
  ros::NodeHandle nh_;
  image_transport::ImageTransport it_;
  image_transport::Subscriber image_sub_;
  image_transport::Publisher image_pub_;
 
  ros::Publisher face_centroid_pub;
 
  face_tracker_pkg::centroid face_centroid;
 
  string input_image_topic, output_image_topic, haar_file_face;
  int face_tracking, display_original_image, display_tracking_image, center_offset, screenmaxx;
 
  
public:
  Face_Detector()
    : it_(nh_)
  {
 
 
  //Loading Default values 加载默认值
 
 
  input_image_topic = "/usb_cam/image_raw";
  output_image_topic = "/face_detector/raw_image";
  haar_file_face = "/home/dobot/face_detect_ws/src/face_tracker_pkg/data/face.xml";
  face_tracking = 1;
  display_original_image = 1;
  display_tracking_image = 1;
  screenmaxx = 640;
  center_offset = 100;
 
  //Accessing parameters from track.yaml 从track.yaml访问参数
 
  try{
  nh_.getParam("image_input_topic", input_image_topic);
  nh_.getParam("face_detected_image_topic", output_image_topic);
  nh_.getParam("haar_file_face", haar_file_face);
  nh_.getParam("face_tracking", face_tracking);
  nh_.getParam("display_original_image", display_original_image);
  nh_.getParam("display_tracking_image", display_tracking_image);
  nh_.getParam("center_offset", center_offset);
  nh_.getParam("screenmaxx", screenmaxx);
 
  ROS_INFO("Successfully Loaded tracking parameters");
  }
 
  catch(int e)
  {
   
      ROS_WARN("Parameters are not properly loaded from file, loading defaults");
	
  }
 
    // Subscribe to input video feed and publish output video feed
    //订阅输入视频提要并发布输出视频提要
    image_sub_ = it_.subscribe(input_image_topic, 1, 
      &Face_Detector::imageCb, this);
    image_pub_ = it_.advertise(output_image_topic, 1);
   
    face_centroid_pub = nh_.advertise<face_tracker_pkg::centroid>("/face_centroid",10);
 
 
  }
 
  ~Face_Detector()
  {
    if( display_original_image == 1 or display_tracking_image == 1)
    	cv::destroyWindow(OPENCV_WINDOW);
  }
 
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {
 
    cv_bridge::CvImagePtr cv_ptr;
    namespace enc = sensor_msgs::image_encodings;
 
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }
 
 
	    string cascadeName = haar_file_face;
            CascadeClassifier cascade;
	    if( !cascade.load( cascadeName ) )
	    {
		cerr << "ERROR: Could not load classifier cascade" << endl;
		
	    }
 
 
	    if (display_original_image == 1){
		imshow("Original Image", cv_ptr->image);
	    }
 
            detectAndDraw( cv_ptr->image, cascade );
 
            image_pub_.publish(cv_ptr->toImageMsg());
 
            waitKey(30);
  
}
 
void detectAndDraw( Mat& img, CascadeClassifier& cascade)
{
    double t = 0;
    double scale = 1;
    vector<Rect> faces, faces2;
    const static Scalar colors[] =
    {
        Scalar(255,0,0),
        Scalar(255,128,0),
        Scalar(255,255,0),
        Scalar(0,255,0),
        Scalar(0,128,255),
        Scalar(0,255,255),
        Scalar(0,0,255),
        Scalar(255,0,255)
    };
    Mat gray, smallImg;
 
    cvtColor( img, gray, COLOR_BGR2GRAY );
    double fx = 1 / scale ;
    //缩小或者放大函数至某一个大小
    resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR );
    //直方图均衡化,,用于提高图像的质量
    equalizeHist( smallImg, smallImg );
    
    //统计代码运行时间
    t = (double)cvGetTickCount();
    //人脸检测
    cascade.detectMultiScale( smallImg, faces,
        1.1, 15, 0
        |CASCADE_SCALE_IMAGE,
        Size(30, 30) );
   
    t = (double)cvGetTickCount() - t;
 
    for ( size_t i = 0; i < faces.size(); i++ )
    {
        Rect r = faces[i];
        Mat smallImgROI;
        vector<Rect> nestedObjects;
        Point center;
        Scalar color = colors[i%8];
        int radius;
 
        double aspect_ratio = (double)r.width/r.height;
        if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
        {
	    //cvRound对一个double型的数进行四舍五入,并返回一个整型数 
            center.x = cvRound((r.x + r.width*0.5)*scale);
            center.y = cvRound((r.y + r.height*0.5)*scale);
            radius = cvRound((r.width + r.height)*0.25*scale);
            circle( img, center, radius, color, 3, 8, 0 );
 
   	    face_centroid.x = center.x;
   	    face_centroid.y = center.y;
 
  
            //Publishing centroid of detected face 发布检测到的人脸的质心
  	    face_centroid_pub.publish(face_centroid);
 
        }
        else
            rectangle( img, cvPoint(cvRound(r.x*scale), cvRound(r.y*scale)),
                       cvPoint(cvRound((r.x + r.width-1)*scale), cvRound((r.y + r.height-1)*scale)),
                       color, 3, 8, 0);
 
    }
 
    //Adding lines and left | right sections 添加行和左| 正确的部分
 
    Point pt1, pt2,pt3,pt4,pt5,pt6;
 
    //Center line
    pt1.x = screenmaxx / 2;
    pt1.y = 0;
 
    pt2.x = screenmaxx / 2;
    pt2.y = 480;
 
 
    //Left center threshold
    pt3.x = (screenmaxx / 2) - center_offset;
    pt3.y = 0;
 
    pt4.x = (screenmaxx / 2) - center_offset;
    pt4.y = 480;
 
    //Right center threshold
    pt5.x = (screenmaxx / 2) + center_offset;
    pt5.y = 0;
 
    pt6.x = (screenmaxx / 2) + center_offset;
    pt6.y = 480;
 
 
    line(img,  pt1,  pt2, Scalar(0, 0, 255),0.2);
    line(img,  pt3,  pt4, Scalar(0, 255, 0),0.2);
    line(img,  pt5,  pt6, Scalar(0, 255, 0),0.2);
 
 
    putText(img, "Left", cvPoint(50,240), FONT_HERSHEY_SIMPLEX, 1, cvScalar(255,0,0), 2, CV_AA);
    putText(img, "Center", cvPoint(280,240), FONT_HERSHEY_SIMPLEX, 1, cvScalar(0,0,255), 2, CV_AA);
    putText(img, "Right", cvPoint(480,240), FONT_HERSHEY_SIMPLEX, 1, cvScalar(255,0,0), 2, CV_AA);
 
    if (display_tracking_image == 1){
 
    	imshow( "Face tracker", img );
     }
 
}
 
 
 
};
 
int main(int argc, char** argv)
{
  ros::init(argc, argv, "Face tracker");
  Face_Detector ic;
  ros::spin();
  return 0;
}

 


开心洋葱 , 版权所有丨如未注明 , 均为原创丨未经授权请勿修改 , 转载请注明ROS下进行人脸识别并输出人脸坐标位置
喜欢 (0)

您必须 登录 才能发表评论!

加载中……