about 3 years ago

測試了一個東西,過程中查的東西有點多,筆記一下。

算盤

打算在Unity裡面用OpenCV,但是可能只是想試試看某個方法,所以直接購買從Unity Asset Store不太划算。參考了一些Project,考慮用Unity接Android plugin,然後再用Android接OpenCV。所以這兩天在弄要放的OpenCV Code還有移植到Android上。

原本cpp code

主要是來追蹤一個影片拍攝時鏡頭移動的幅度和方向。把前後影格的Feature抓出來後用Match起來,然後畫出來。

#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/nonfree/features2d.hpp>

#include <iostream>
#include <cassert>
#include <algorithm>

using namespace cv;
using namespace std;

void drawFlow(Mat& img, 
    const vector<DMatch>& matches,
    const vector<KeyPoint>& kp1,
    const vector<KeyPoint>& kp2){

    if(!matches.size()) return;

    double cut_dist = 100;
    // cut_dist might use the acc data from phone

    for(auto dm: matches){
        Point pt1 = kp1[dm.queryIdx].pt,
              pt2 = kp2[dm.trainIdx].pt;
        if(norm(pt1-pt2) >= cut_dist) continue;
        circle(img, pt1, 1, Scalar(255, 0, 0));
        circle(img, pt2, 1, Scalar(0, 255, 0));
        line(img, pt1, pt2, Scalar(255, 0, 0));
    }
    return;
}


int main(){
    VideoCapture cap("v1.3gp");
    SurfFeatureDetector detector(1000);
    FlannBasedMatcher matcher;

    vector<KeyPoint> kp0, kp1;
    Mat frame[10];
    int counter = 0;

    while(cap.read(frame[counter])){
        if(counter == 0)
            detector.detect(frame[counter], kp0);
        detector.detect(frame[counter], kp1);

        Mat dis1, dis2;
        detector.compute(frame[0]      , kp0, dis1);
        detector.compute(frame[counter], kp1, dis2);
        
        vector<DMatch> matches;
        matcher.match(dis1, dis2, matches);

        while(matches.size() >= 25)
            matches.pop_back();
        
        Mat output(frame[counter]);

        drawFlow(output, matches, kp0, kp1);

        imshow("surf match", output);
        waitKey(0);
        counter = (counter+1)%2;
    }
    return 0;
}

移植Android

把OpenCV弄到Eclipse是參考Android Development with OpenCV

然後就遇到了一些問題

SURF 屬於 nonfree 模組

改用了BRISK

Detector, Extractor, Matcher需要在OpenCV module load進來後才可以初始化

要擺在LoaderCallback裡面

private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
  @Override
  public void onManagerConnected(int status) {
    switch (status) {
      case LoaderCallbackInterface.SUCCESS:{
            Log.i(TAG, "OpenCV loaded successfully");
            mOpenCvCameraView.enableView();
            detector = FeatureDetector.create(FeatureDetector.BRISK);
            extractor = DescriptorExtractor.create(DescriptorExtractor.BRISK);
            matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
      } break;
      default:{
        super.onManagerConnected(status);
      } break;
    }
  }
};

最後,改好的Code長這樣

private Mat[] frame = new Mat[10];
private int counter = 0;
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
  frame[counter] = inputFrame.rgba();
  
  MatOfKeyPoint kp0 = new MatOfKeyPoint();
  MatOfKeyPoint kp1 = new MatOfKeyPoint();
  detector.detect(frame[0], kp0);
  detector.detect(frame[counter], kp1);
  
  Mat[] descriptors = new Mat[2];
  descriptors[0] = new Mat();
  descriptors[1] = new Mat();
  extractor.compute(frame[0], kp0, descriptors[0]);
  extractor.compute(frame[counter], kp1, descriptors[1]);  
  
  MatOfDMatch matches = new MatOfDMatch();
  matcher.match(descriptors[0], descriptors[1], matches);
  
  DMatch[] arrdm = matches.toArray();
  KeyPoint[] arrkp0 = kp0.toArray();
  KeyPoint[] arrkp1 = kp1.toArray();
  
  Mat toshow = inputFrame.rgba();
  
  for(int lx = 0;lx < arrdm.length;lx++){
    DMatch dm  = arrdm[lx];
    org.opencv.core.Point p0 = arrkp0[dm.queryIdx].pt,
    p1 = arrkp1[dm.trainIdx].pt;
    double dx = p0.x-p1.x, dy = p0.y-p1.y;
    double dist2 = dx*dx+dy*dy;
    if(dist2 > 1000) continue;
    Core.circle(toshow, p0, 1, new Scalar(255, 0, 0));
    Core.circle(toshow, p1, 1, new Scalar(0, 255, 0));
    Core.line(toshow, p0, p1, new Scalar(255, 0, 0));
  }
  
  counter = (counter+1)%5;
  return toshow;
}

在Android上的效果

由於有Motion Blur,效果沒有很好。不知道如果用其他Sensor(像是角度或加速器)做修正,有沒有辦法優化。

待做

  • 抓其他Sensor資料做修正
  • 有看到這篇 a visual odometry framework robust to motion blur,不過暫時先收著。
  • Vuforia 好像有提供抓取Camera Positon的算法。
← [Cardboard] 解謎RPG互動方式測試 [筆記][2] Unity Track Device Movement →
 
comments powered by Disqus