File: oak_basic_infer.cpp

package info (click to toggle)
opencv 4.10.0%2Bdfsg-5
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 282,092 kB
  • sloc: cpp: 1,178,079; xml: 682,621; python: 49,092; lisp: 31,150; java: 25,469; ansic: 11,039; javascript: 6,085; sh: 1,214; cs: 601; perl: 494; objc: 210; makefile: 173
file content (122 lines) | stat: -rw-r--r-- 4,173 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#include <algorithm>
#include <iostream>
#include <sstream>

#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/gapi.hpp>
#include <opencv2/gapi/core.hpp>
#include <opencv2/gapi/imgproc.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/infer/parsers.hpp>
#include <opencv2/gapi/render.hpp>
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/highgui.hpp>

#include <opencv2/gapi/oak/oak.hpp>
#include <opencv2/gapi/oak/infer.hpp>

const std::string keys =
    "{ h help              |             | Print this help message }"
    "{ detector            |             | Path to compiled .blob face detector model }"
    "{ duration            | 100         | Number of frames to pull from camera and run inference on }";

namespace custom {

G_API_NET(FaceDetector, <cv::GMat(cv::GFrame)>, "sample.custom.face-detector");

using GDetections = cv::GArray<cv::Rect>;
using GSize       = cv::GOpaque<cv::Size>;
using GPrims      = cv::GArray<cv::gapi::wip::draw::Prim>;

G_API_OP(BBoxes, <GPrims(GDetections)>, "sample.custom.b-boxes") {
    static cv::GArrayDesc outMeta(const cv::GArrayDesc &) {
        return cv::empty_array_desc();
    }
};

GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) {
    // This kernel converts the rectangles into G-API's
    // rendering primitives
    static void run(const std::vector<cv::Rect> &in_face_rcs,
                          std::vector<cv::gapi::wip::draw::Prim> &out_prims) {
        out_prims.clear();
        const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) {
            return cv::gapi::wip::draw::Rect(rc, clr, 2);
        };
        for (auto &&rc : in_face_rcs) {
            out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0))); // green
        }
    }
};

} // namespace custom

int main(int argc, char *argv[]) {
    cv::CommandLineParser cmd(argc, argv, keys);
    if (cmd.has("help")) {
        cmd.printMessage();
        return 0;
    }

    const auto det_name = cmd.get<std::string>("detector");
    const auto duration = cmd.get<int>("duration");

    if (det_name.empty()) {
        std::cerr << "FATAL: path to detection model is not provided for the sample."
                  << "Please specify it with --detector options."
                  << std::endl;
        return 1;
    }

    // Prepare G-API kernels and networks packages:
    auto detector = cv::gapi::oak::Params<custom::FaceDetector>(det_name);
    auto networks = cv::gapi::networks(detector);

    auto kernels = cv::gapi::combine(
        cv::gapi::kernels<custom::OCVBBoxes>(),
        cv::gapi::oak::kernels());

    auto args = cv::compile_args(kernels, networks);

    // Initialize graph structure
    cv::GFrame in;
    cv::GFrame copy = cv::gapi::oak::copy(in); // NV12 transfered to host + passthrough copy for infer
    cv::GOpaque<cv::Size> sz = cv::gapi::streaming::size(copy);

    // infer is not affected by the actual copy here
    cv::GMat blob = cv::gapi::infer<custom::FaceDetector>(copy);
    // FIXME: OAK infer detects faces slightly out of frame bounds
    cv::GArray<cv::Rect> rcs = cv::gapi::parseSSD(blob, sz, 0.5f, true, false);
    auto rendered = cv::gapi::wip::draw::renderFrame(copy, custom::BBoxes::on(rcs));
    // on-the-fly conversion NV12->BGR
    cv::GMat out = cv::gapi::streaming::BGR(rendered);

    auto pipeline  = cv::GComputation(cv::GIn(in), cv::GOut(out, rcs))
        .compileStreaming(std::move(args));

    // Graph execution
    pipeline.setSource(cv::gapi::wip::make_src<cv::gapi::oak::ColorCamera>());
    pipeline.start();

    cv::Mat out_mat;
    std::vector<cv::Rect> out_dets;
    int frames = 0;
    while (pipeline.pull(cv::gout(out_mat, out_dets))) {
        std::string name = "oak_infer_frame_" + std::to_string(frames) + ".png";

        cv::imwrite(name, out_mat);

        if (!out_dets.empty()) {
            std::cout << "Got " << out_dets.size() << " detections on frame #" << frames << std::endl;
        }

        ++frames;
        if (frames == duration) {
            pipeline.stop();
            break;
        }
    }
    std::cout << "Pipeline finished. Processed " << frames << " frames" << std::endl;
    return 0;
}