1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
|
#include <opencv2/highgui.hpp>
#include <opencv2/objdetect/aruco_detector.hpp>
#include <iostream>
#include "aruco_samples_utility.hpp"
using namespace std;
using namespace cv;
namespace {
const char* about = "Basic marker detection";
//! [aruco_detect_markers_keys]
const char* keys =
"{d | 0 | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16,"
"DICT_APRILTAG_16h5=17, DICT_APRILTAG_25h9=18, DICT_APRILTAG_36h10=19, DICT_APRILTAG_36h11=20}"
"{cd | | Input file with custom dictionary }"
"{v | | Input from video or image file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{c | | Camera intrinsic parameters. Needed for camera pose }"
"{l | 0.1 | Marker side length (in meters). Needed for correct scale in camera pose }"
"{dp | | File of marker detector parameters }"
"{r | | show rejected candidates too }"
"{refine | | Corner refinement: CORNER_REFINE_NONE=0, CORNER_REFINE_SUBPIX=1,"
"CORNER_REFINE_CONTOUR=2, CORNER_REFINE_APRILTAG=3}";
//! [aruco_detect_markers_keys]
const string refineMethods[4] = {
"None",
"Subpixel",
"Contour",
"AprilTag"
};
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
bool showRejected = parser.has("r");
bool estimatePose = parser.has("c");
float markerLength = parser.get<float>("l");
aruco::DetectorParameters detectorParams = readDetectorParamsFromCommandLine(parser);
aruco::Dictionary dictionary = readDictionatyFromCommandLine(parser);
if (parser.has("refine")) {
// override cornerRefinementMethod read from config file
int user_method = parser.get<aruco::CornerRefineMethod>("refine");
if (user_method < 0 || user_method >= 4)
{
std::cout << "Corner refinement method should be in range 0..3" << std::endl;
return 0;
}
detectorParams.cornerRefinementMethod = user_method;
}
std::cout << "Corner refinement method: " << refineMethods[detectorParams.cornerRefinementMethod] << std::endl;
int camId = parser.get<int>("ci");
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
if(!parser.check()) {
parser.printErrors();
return 0;
}
//! [aruco_pose_estimation1]
Mat camMatrix, distCoeffs;
if(estimatePose) {
// You can read camera parameters from tutorial_camera_params.yml
readCameraParamsFromCommandLine(parser, camMatrix, distCoeffs);
}
//! [aruco_pose_estimation1]
//! [aruco_detect_markers]
cv::aruco::ArucoDetector detector(dictionary, detectorParams);
cv::VideoCapture inputVideo;
int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
double totalTime = 0;
int totalIterations = 0;
//! [aruco_pose_estimation2]
// set coordinate system
cv::Mat objPoints(4, 1, CV_32FC3);
objPoints.ptr<Vec3f>(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0);
//! [aruco_pose_estimation2]
while(inputVideo.grab()) {
cv::Mat image, imageCopy;
inputVideo.retrieve(image);
double tick = (double)getTickCount();
//! [aruco_pose_estimation3]
vector<int> ids;
vector<vector<Point2f> > corners, rejected;
// detect markers and estimate pose
detector.detectMarkers(image, corners, ids, rejected);
size_t nMarkers = corners.size();
vector<Vec3d> rvecs(nMarkers), tvecs(nMarkers);
if(estimatePose && !ids.empty()) {
// Calculate pose for each marker
for (size_t i = 0; i < nMarkers; i++) {
solvePnP(objPoints, corners.at(i), camMatrix, distCoeffs, rvecs.at(i), tvecs.at(i));
}
}
//! [aruco_pose_estimation3]
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;
totalIterations++;
if(totalIterations % 30 == 0) {
cout << "Detection Time = " << currentTime * 1000 << " ms "
<< "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
}
//! [aruco_draw_pose_estimation]
// draw results
image.copyTo(imageCopy);
if(!ids.empty()) {
cv::aruco::drawDetectedMarkers(imageCopy, corners, ids);
if(estimatePose) {
for(unsigned int i = 0; i < ids.size(); i++)
cv::drawFrameAxes(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i], markerLength * 1.5f, 2);
}
}
//! [aruco_draw_pose_estimation]
if(showRejected && !rejected.empty())
cv::aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255));
imshow("out", imageCopy);
char key = (char)waitKey(waitTime);
if(key == 27) break;
}
//! [aruco_detect_markers]
return 0;
}
|