File: Example_opencv.cpp

package info (click to toggle)
libopenshot 0.5.0%2Bdfsg1-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 31,228 kB
  • sloc: cpp: 32,692; python: 92; sh: 77; makefile: 21; ruby: 5
file content (283 lines) | stat: -rw-r--r-- 10,026 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
/**
 * @file
 * @brief Source file for Example Executable (example app for libopenshot)
 * @author Jonathan Thomas <jonathan@openshot.org>
 *
 * @ref License
 */

// Copyright (c) 2008-2019 OpenShot Studios, LLC
//
// SPDX-License-Identifier: LGPL-3.0-or-later

#include <fstream>
#include <iostream>
#include <memory>
#include "CVTracker.h"
#include "CVStabilization.h"
#include "CVObjectDetection.h"

#include "Clip.h"
#include "EffectBase.h"
#include "EffectInfo.h"
#include "Frame.h"
#include "CrashHandler.h"

using namespace openshot;
using namespace std;

/*
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
The following methods are just for getting JSON info to the pre-processing effects
*/

string jsonFormat(string key, string value, string type="string"); // Format variables to the needed JSON format
string trackerJson(cv::Rect2d r, bool onlyProtoPath); // Set variable values for tracker effect
string stabilizerJson(bool onlyProtoPath); // Set variable values for stabilizer effect
string objectDetectionJson(bool onlyProtoPath); // Set variable values for object detector effect

/*
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
*/

// Show the pre-processed clip on the screen
void displayClip(openshot::Clip &r9){

    // Opencv display window
    cv::namedWindow("Display Image", cv::WINDOW_NORMAL );

    // Get video lenght
    int videoLenght = r9.Reader()->info.video_length;

    // Loop through the clip and show it with the effects, if any
    for (long int frame = 0; frame < videoLenght; frame++)
    {
        int frame_number = frame;
        // Get the frame
        std::shared_ptr<openshot::Frame> f = r9.GetFrame(frame_number);
        // Grab OpenCV::Mat image
        cv::Mat cvimage = f->GetImageCV();

        // Display the frame
        cv::imshow("Display Image", cvimage);

        // Press ESC on keyboard to exit
        char c=(char)cv::waitKey(25);
        if(c==27)
            break;
    }
    // Destroy all remaining windows
    cv::destroyAllWindows();
}

int main(int argc, char* argv[]) {

    // Set pre-processing effects
    bool TRACK_DATA = true;
    bool SMOOTH_VIDEO = false;
    bool OBJECT_DETECTION_DATA = false;

    // Get media path
    std::stringstream path;
    path << TEST_MEDIA_PATH << ((OBJECT_DETECTION_DATA) ? "run.mp4" : "test.avi");
    //  run.mp4 --> Used for object detector
    // test.avi --> Used for tracker and stabilizer

    // Thread controller just for the pre-processing constructors, it won't be used
    ProcessingController processingController;

    // Open clip
    openshot::Clip r9(path.str());
    r9.Open();

    // Apply tracking effect on the clip
    if(TRACK_DATA){

        // Take the bounding box coordinates
        cv::Mat roi = r9.GetFrame(0)->GetImageCV();
        cv::Rect2d r = cv::selectROI(roi);
        cv::destroyAllWindows();

        // Create a tracker object by passing a JSON string and a thread controller, this last one won't be used
        // JSON info: path to save the tracked data, type of tracker and bbox coordinates
        CVTracker tracker(trackerJson(r, false), processingController);

        // Start the tracking
        tracker.trackClip(r9, 0, 0, true);
        // Save the tracked data
        tracker.SaveTrackedData();

        // Create a tracker effect
        EffectBase* e = EffectInfo().CreateEffect("Tracker");

        // Pass a JSON string with the saved tracked data
        // The effect will read and save the tracking in a map::<frame,data_struct>
        e->SetJson(trackerJson(r, true));
        // Add the effect to the clip
        r9.AddEffect(e);
    }

    // Apply stabilizer effect on the clip
    if(SMOOTH_VIDEO){

        // Create a stabilizer object by passing a JSON string and a thread controller, this last one won't be used
        // JSON info: path to save the stabilized data and smoothing window value
        CVStabilization stabilizer(stabilizerJson(false), processingController);

        // Start the stabilization
        stabilizer.stabilizeClip(r9, 0, 100, true);
        // Save the stabilization data
        stabilizer.SaveStabilizedData();

        // Create a stabilizer effect
        EffectBase* e = EffectInfo().CreateEffect("Stabilizer");

        // Pass a JSON string with the saved stabilized data
        // The effect will read and save the stabilization in a map::<frame,data_struct>
        e->SetJson(stabilizerJson(true));
        // Add the effect to the clip
        r9.AddEffect(e);
    }

    // Apply object detection effect on the clip
    if(OBJECT_DETECTION_DATA){

        // Create a object detection object by passing a JSON string and a thread controller, this last one won't be used
        // JSON info: path to save the detection data, processing devicee, model weights, model configuration and class names
        CVObjectDetection objectDetection(objectDetectionJson(false), processingController);

        // Start the object detection
        objectDetection.detectObjectsClip(r9, 0, 100, true);
        // Save the object detection data
        objectDetection.SaveObjDetectedData();

        // Create a object detector effect
        EffectBase* e = EffectInfo().CreateEffect("ObjectDetection");

        // Pass a JSON string with the saved detections data
        // The effect will read and save the detections in a map::<frame,data_struct>

        e->SetJson(objectDetectionJson(true));
        // Add the effect to the clip
        r9.AddEffect(e);
    }

    // Show the pre-processed clip on the screen
    displayClip(r9);

    // Close timeline
    r9.Close();

	std::cout << "Completed successfully!" << std::endl;

    return 0;
}



/*
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||

The following methods are just for getting JSON info to the pre-processing effects

||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
*/



string jsonFormat(string key, string value, string type){
    stringstream jsonFormatMessage;
    jsonFormatMessage << ( "\"" + key + "\": " );

    if(type == "string")
        jsonFormatMessage << ( "\"" + value + "\"" );
    if(type == "rstring")
        jsonFormatMessage <<  value;
    if(type == "int")
        jsonFormatMessage << stoi(value);
    if(type == "float")
        jsonFormatMessage << (float)stof(value);
    if(type == "double")
        jsonFormatMessage << (double)stof(value);
    if (type == "bool")
        jsonFormatMessage << ((value == "true" || value == "1") ? "true" : "false");

    return jsonFormatMessage.str();
}

// Return JSON string for the tracker effect
string trackerJson(cv::Rect2d r, bool onlyProtoPath){

    // Define path to save tracked data
    string protobufDataPath = "kcf_tracker.data";
    // Set the tracker
    string tracker = "KCF";

    // Construct all the composition of the JSON string
    string protobuf_data_path = jsonFormat("protobuf_data_path", protobufDataPath);
    string trackerType = jsonFormat("tracker-type", tracker);
    string bboxCoords = jsonFormat(
                                    "region",
                                            "{" + jsonFormat("x", to_string(r.x), "int") +
                                            "," + jsonFormat("y", to_string(r.y), "int") +
                                            "," + jsonFormat("width", to_string(r.width), "int") +
                                            "," + jsonFormat("height", to_string(r.height), "int") +
                                            "," + jsonFormat("first-frame", to_string(0), "int") +
                                            "}",
                                    "rstring");

    // Return only the the protobuf path in JSON format
    if(onlyProtoPath)
        return "{" + protobuf_data_path + "}";
    // Return all the parameters for the pre-processing effect
    else
        return "{" + protobuf_data_path + "," + trackerType + "," + bboxCoords + "}";
}

// Return JSON string for the stabilizer effect
string stabilizerJson(bool onlyProtoPath){

    // Define path to save stabilized data
    string protobufDataPath = "example_stabilizer.data";
    // Set smoothing window value
    string smoothingWindow = "30";

    // Construct all the composition of the JSON string
    string protobuf_data_path = jsonFormat("protobuf_data_path", protobufDataPath);
    string smoothing_window = jsonFormat("smoothing_window", smoothingWindow, "int");

    // Return only the the protobuf path in JSON format
    if(onlyProtoPath)
        return "{" + protobuf_data_path + "}";
    // Return all the parameters for the pre-processing effect
    else
        return "{" + protobuf_data_path + "," + smoothing_window + "}";
}

string objectDetectionJson(bool onlyProtoPath){

    // Define path to save object detection data
    string protobufDataPath = "example_object_detection.data";
    // Define processing device
    string processingDevice = "GPU";
    // Set path to model configuration file
    string modelConfiguration = "yolov3.cfg";
    // Set path to model weights
    string modelWeights = "yolov3.weights";
    // Set path to class names file
    string classesFile = "obj.names";

    // Construct all the composition of the JSON string
    string protobuf_data_path = jsonFormat("protobuf_data_path", protobufDataPath);
    string processing_device = jsonFormat("processing_device", processingDevice);
    string model_configuration = jsonFormat("model_configuration", modelConfiguration);
    string model_weights = jsonFormat("model_weights", modelWeights);
    string classes_file = jsonFormat("classes_file", classesFile);

    // Return only the the protobuf path in JSON format
    if(onlyProtoPath)
        return "{" + protobuf_data_path + "}";
    else
        return "{" + protobuf_data_path + "," + processing_device + "," + model_configuration + ","
                + model_weights + "," + classes_file + "}";
}