@import url(http://www.shnenglu.com/CuteSoft_Client/CuteEditor/Load.ashx?type=style&file=SyntaxHighlighter.css);@import url(/css/cuteeditor.css); 買了這個(gè)板子有好久了,去年放著沒心思搞,今天覺得該干點(diǎn)事了。
我的目標(biāo)是要使用tx1上的攝像頭來抓取視頻幀并做識(shí)別。
首先,jetson tx-1的板載攝像頭是不提供默認(rèn)v4l2的驅(qū)動(dòng)的,所以我自己給它找了驅(qū)動(dòng),但是這個(gè)驅(qū)動(dòng)是基于gstreamer的。
所以,我整合了一套gstreamer的管道命令,如下:

export CLIENT_IP=127.0.0.1

gst-launch-1.0 nvcamerasrc fpsRange="30 30" intent=3 ! nvvidconv flip-method=6 ! 'video/x-raw(memory:NVMM), width=(int)960, height=(int)540, format=(string)I420, framerate=(fraction)30/1' ! omxh264enc control-rate=2 bitrate=4000000 ! 'video/x-h264, stream-format=(string)byte-stream' ! h264parse ! queue ! omxh264dec ! nvvidconv ! 'video/x-raw, format=(string)UYVY' ! videoconvert ! jpegenc quality=30 ! rtpjpegpay ! udpsink host=$CLIENT_IP port=5000 sync=false async=false


這段代碼把攝像頭上的內(nèi)容抓取并壓縮為960p的30幀的h264格式的視頻流,并通過udp協(xié)議丟出到板卡的5000號(hào)端口上。
以上是gstreamer的服務(wù)器端,接下來,可以將內(nèi)容抓取下來看看。

客戶端的命令我就不寫了,直接使用代碼。這段代碼是從youtube上的jetson的openCV教程組裝來的,自己找了一下opencv處理gstreamer的API,搭上就可以用。

#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>

using namespace cv;

int main(intchar**)
{
  VideoCapture input("./stream.sdp");
  if(!input.isOpened()){  // check if we succeeded                                                                                                                                                                     
    std::cout<< "open failed" << std::endl;
    return -1;
  }

  Mat img, img_gray;
  OrbFeatureDetector detector(7000);
  vector<KeyPoint> img_keypoints, car_keypoints;
  Mat img_descriptors, car_descriptors;

  input.read(img);
  Mat car;
  img(Rect(400, 320, 150, 100)).copyTo(car);

  detector(car, Mat(), car_keypoints, car_descriptors);
  drawKeypoints(car, car_keypoints, car);
  for(;;)
    {
      if(!input.read(img))
        break;
      detector(img, Mat(), img_keypoints, img_descriptors);
      drawKeypoints(img, img_keypoints, img);


      BFMatcher matcher;
      vector<DMatch> matches;
      matcher.match(car_descriptors, img_descriptors, matches);

      vector<Point2f> car_points, img_points;
      for(int i=0; i < matches.size(); ++i){
        car_points.push_back(car_keypoints[matches[i].queryIdx].pt);
        img_points.push_back(img_keypoints[matches[i].queryIdx].pt);
      }
      std::cout<<"car points count = " << car_points.size() << std::endl;

      if(car_points.size() >= 4){
        Matx33f H = findHomography(car_points, img_points, CV_RANSAC);

        vector<Point> car_border, img_border;
        car_border.push_back(Point(0, 0));
        car_border.push_back(Point(0, car.rows));
        car_border.push_back(Point(car.cols, car.rows));
        car_border.push_back(Point(car.cols, 0));
        for (size_t i = 0; i < car_border.size(); ++i){
          Vec3f p = H * Vec3f(car_border[i].x, car_border[i].y, 1);
          img_border.push_back(Point(p[0]/p[2], p[1]/p[2]));
        }
        polylines(img, img_border, true, CV_RGB(255, 255, 0));
        Mat img_matches;
        drawMatches(car, car_keypoints, img, img_keypoints, matches, img_matches);
        imshow("img_matches", img_matches);
      }
      // imshow("car", car);                                                                                                                                                                                           
      
// imshow("img", img);                                                                                                                                                                                           
      if(waitKey(27) >= 0) break;
    }
  // the camera will be deinitialized automatically in VideoCapture destructor                                                                                                                                         
  return 0;
}
編譯的makefile會(huì)麻煩一點(diǎn):先寫一個(gè)cmake的配置文件CMakeLists.txt
cmake_minimum_required(VERSION 2.8)

project(hello)

find_package(OpenCV REQUIRED)

include_directories(${OpenCV_INCLUDE_DIRS})

add_executable(cv_hello hello.cpp)

target_link_libraries(cv_hello ${OpenCV_LIBS})

執(zhí)行命令:cmake  ./ && make
然后生成cv_hello,可以執(zhí)行。
關(guān)鍵配置文件是stream.sdp文件,這個(gè)文件幫我減少了命令行的麻煩。內(nèi)容如下:
[stream.sdp]
c=IN IP4 127.0.0.1
m=video 5000 RTP/AVP 96
a=rtpmap:96 JPEG/4000000

全部?jī)?nèi)容結(jié)束。可以看到攝像頭的視頻內(nèi)容了