Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3 | PointedEar | 1 | /*------------------------------------------------------------------------------------------*\ |
2 | This file contains material supporting chapter 10 of the cookbook: |
||
3 | Computer Vision Programming using the OpenCV Library. |
||
4 | by Robert Laganiere, Packt Publishing, 2011. |
||
5 | |||
6 | This program is free software; permission is hereby granted to use, copy, modify, |
||
7 | and distribute this source code, or portions thereof, for any purpose, without fee, |
||
8 | subject to the restriction that the copyright notice may not be removed |
||
9 | or altered from any source or altered source distribution. |
||
10 | The software is released on an as-is basis and without any warranties of any kind. |
||
11 | In particular, the software is not guaranteed to be fault-tolerant or free from failure. |
||
12 | The author disclaims all warranties with regard to this software, any use, |
||
13 | and any consequent failure, is purely the responsibility of the user. |
||
14 | |||
15 | Copyright (C) 2010-2011 Robert Laganiere, www.laganiere.name |
||
16 | \*------------------------------------------------------------------------------------------*/ |
||
17 | |||
18 | #if !defined FTRACKER |
||
19 | #define FTRACKER |
||
20 | |||
21 | #include <string> |
||
22 | #include <vector> |
||
23 | #include <opencv2/core/core.hpp> |
||
24 | #include <opencv2/highgui/highgui.hpp> |
||
25 | #include <opencv2/imgproc/imgproc.hpp> |
||
26 | #include <opencv2/features2d/features2d.hpp> |
||
27 | #include <opencv2/video/tracking.hpp> |
||
28 | |||
29 | #include "videoprocessor.h" |
||
30 | |||
31 | class FeatureTracker : public FrameProcessor { |
||
32 | |||
33 | cv::Mat gray; // current gray-level image |
||
34 | cv::Mat gray_prev; // previous gray-level image |
||
35 | std::vector<cv::Point2f> points[2]; // tracked features from 0->1 |
||
36 | std::vector<cv::Point2f> initial; // initial position of tracked points |
||
37 | std::vector<cv::Point2f> features; // detected features |
||
38 | int max_count; // maximum number of features to detect |
||
39 | double qlevel; // quality level for feature detection |
||
40 | double minDist; // minimum distance between two feature points |
||
41 | std::vector<uchar> status; // status of tracked features |
||
42 | std::vector<float> err; // error in tracking |
||
43 | |||
44 | public: |
||
45 | |||
46 | FeatureTracker() : max_count(500), qlevel(0.01), minDist(10.) {} |
||
47 | |||
48 | // processing method |
||
49 | void process(cv:: Mat &frame, cv:: Mat &output) { |
||
50 | |||
51 | // convert to gray-level image |
||
52 | cv::cvtColor(frame, gray, CV_BGR2GRAY); |
||
53 | frame.copyTo(output); |
||
54 | |||
55 | // 1. if new feature points must be added |
||
56 | if(addNewPoints()) |
||
57 | { |
||
58 | // detect feature points |
||
59 | detectFeaturePoints(); |
||
60 | // add the detected features to the currently tracked features |
||
61 | points[0].insert(points[0].end(),features.begin(),features.end()); |
||
62 | initial.insert(initial.end(),features.begin(),features.end()); |
||
63 | } |
||
64 | |||
65 | // for first image of the sequence |
||
66 | if(gray_prev.empty()) |
||
67 | gray.copyTo(gray_prev); |
||
68 | |||
69 | // 2. track features |
||
70 | cv::calcOpticalFlowPyrLK(gray_prev, gray, // 2 consecutive images |
||
71 | points[0], // input point position in first image |
||
72 | points[1], // output point postion in the second image |
||
73 | status, // tracking success |
||
74 | err); // tracking error |
||
75 | |||
76 | // 2. loop over the tracked points to reject the undesirables |
||
77 | int k=0; |
||
78 | for( int i= 0; i < points[1].size(); i++ ) { |
||
79 | |||
80 | // do we keep this point? |
||
81 | if (acceptTrackedPoint(i)) { |
||
82 | |||
83 | // keep this point in vector |
||
84 | initial[k]= initial[i]; |
||
85 | points[1][k++] = points[1][i]; |
||
86 | } |
||
87 | } |
||
88 | |||
89 | // eliminate unsuccesful points |
||
90 | points[1].resize(k); |
||
91 | initial.resize(k); |
||
92 | |||
93 | // 3. handle the accepted tracked points |
||
94 | handleTrackedPoints(frame, output); |
||
95 | |||
96 | // 4. current points and image become previous ones |
||
97 | std::swap(points[1], points[0]); |
||
98 | cv::swap(gray_prev, gray); |
||
99 | } |
||
100 | |||
101 | // feature point detection |
||
102 | void detectFeaturePoints() { |
||
103 | |||
104 | // detect the features |
||
105 | cv::goodFeaturesToTrack(gray, // the image |
||
106 | features, // the output detected features |
||
107 | max_count, // the maximum number of features |
||
108 | qlevel, // quality level |
||
109 | minDist); // min distance between two features |
||
110 | } |
||
111 | |||
112 | // determine if new points should be added |
||
113 | bool addNewPoints() { |
||
114 | |||
115 | // if too few points |
||
116 | return points[0].size()<=10; |
||
117 | } |
||
118 | |||
119 | // determine which tracked point should be accepted |
||
120 | bool acceptTrackedPoint(int i) { |
||
121 | |||
122 | return status[i] && |
||
123 | // if point has moved |
||
124 | (abs(points[0][i].x-points[1][i].x)+ |
||
125 | (abs(points[0][i].y-points[1][i].y))>2); |
||
126 | } |
||
127 | |||
128 | // handle the currently tracked points |
||
129 | void handleTrackedPoints(cv:: Mat &frame, cv:: Mat &output) { |
||
130 | |||
131 | // for all tracked points |
||
132 | for(int i= 0; i < points[1].size(); i++ ) { |
||
133 | |||
134 | // draw line and circle |
||
135 | cv::line(output, initial[i], points[1][i], cv::Scalar(255,255,255)); |
||
136 | cv::circle(output, points[1][i], 3, cv::Scalar(255,255,255),-1); |
||
137 | } |
||
138 | } |
||
139 | }; |
||
140 | |||
141 | #endif |