1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
|
Using the Facemark API {#tutorial_facemark_usage}
==========================================================
Goals
----
In this tutorial will helps you to
- Create a Facemark object.
- Set a user defined face detector for the facemark algorithm
- Train the algorithm.
- Use the trained model to detect the facial landmarks from a given image.
Preparation
---------
Before you continue with this tutorial, you should download the dataset of facial landmarks detection.
We suggest you to download the helen dataset which can be retrieved at <http://www.ifp.illinois.edu/~vuongle2/helen/> (Caution! The algorithm requires around 9GB of RAM to train on this dataset).
Make sure that the annotation format is supported by the API, the contents in annotation file should look like the following snippet:
@code
version: 1
n_points: 68
{
212.716603 499.771793
230.232816 566.290071
...
}
@endcode
The next thing to do is to make 2 text files containing the list of image files and annotation files respectively. Make sure that the order or image and annotation in both files are matched. Furthermore, it is advised to use absolute path instead of relative path.
Example to make the file list in Linux machine
@code
ls $PWD/trainset/*.jpg > images_train.txt
ls $PWD/trainset/*.pts > annotation_train.txt
@endcode
example of content in the images_train.txt
@code
/home/user/helen/trainset/100032540_1.jpg
/home/user/helen/trainset/100040721_1.jpg
/home/user/helen/trainset/100040721_2.jpg
/home/user/helen/trainset/1002681492_1.jpg
@endcode
example of content in the annotation_train.txt
@code
/home/user/helen/trainset/100032540_1.pts
/home/user/helen/trainset/100040721_1.pts
/home/user/helen/trainset/100040721_2.pts
/home/user/helen/trainset/1002681492_1.pts
@endcode
Creating the facemark object
---------
@code
/*create the facemark instance*/
FacemarkLBF::Params params;
params.model_filename = "helen.model"; // the trained model will be saved using this filename
Ptr<Facemark> facemark = FacemarkLBF::create(params);
@endcode
Set a custom face detector function
---------
Firstly, you need to create your own face detector function, you might also need to create a `struct` to save the custom parameter. Alternatively, you can just make these parameter hard coded within the `myDetector` function.
@code
struct Conf {
cv::String model_path;
double scaleFactor;
Conf(cv::String s, double d){
model_path = s;
scaleFactor = d;
face_detector.load(model_path);
};
CascadeClassifier face_detector;
};
bool myDetector(InputArray image, OutputArray faces, Conf *conf){
Mat gray;
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
equalizeHist(gray, gray);
std::vector<Rect> faces_;
conf->face_cascade.detectMultiScale(gray, faces_, conf->scaleFactor, 2, CASCADE_SCALE_IMAGE, Size(30, 30) );
Mat(faces_).copyTo(faces);
return true;
}
@endcode
The following snippet demonstrates how to set the custom detector to the facemark object and use it to detect the faces. Keep in mind that some facemark object might use the face detector during the training process.
@code
Conf config("../data/lbpcascade_frontalface.xml", 1.4);
facemark->setFaceDetector(myDetector, &config); // we must guarantee proper lifetime of "config" object
@endcode
Here is the snippet for detecting face using the user defined face detector function.
@code
Mat img = imread("../data/himym3.jpg");
std::vector<cv::Rect> faces;
facemark->getFaces(img, faces, config);
for(int j=0;j<faces.size();j++){
cv::rectangle(img, faces[j], cv::Scalar(255,0,255));
}
imshow("result", img);
waitKey(0);
@endcode
Training a facemark object
----
- First of all, you need to set the training parameters
@code
params.n_landmarks = 68; // number of landmark points
params.initShape_n = 10; // number of multiplier for make data augmentation
params.stages_n=5; // amount of refinement stages
params.tree_n=6; // number of tree in the model for each landmark point
params.tree_depth=5; //he depth of decision tree
facemark = FacemarkLBF::create(params);
@endcode
- And then, you need to load the file list from the dataset that you have prepared.
@code
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList("images_train.txt","annotation_train.txt",images_train,landmarks_train);
@endcode
- The next step is to add training samples into the facemark object.
@code
Mat image;
std::vector<Point2f> facial_points;
for(size_t i=0;i<images_train.size();i++){
image = imread(images_train[i].c_str());
loadFacePoints(landmarks_train[i],facial_points);
facemark->addTrainingSample(image, facial_points);
}
@endcode
- execute the training process
@code
/*train the Algorithm*/
facemark->training();
@endcode
Use the trained model to detect the facial landmarks from a given image.
-----
- First of all, load the trained model. You can also download the pre-trained model in this link <https://raw.githubusercontent.com/kurnianggoro/GSOC2017/master/data/lbfmodel.yaml>
@code
facemark->loadModel(params.model_filename);
@endcode
- Detect the faces
@code
facemark->getFaces(img, faces, config);
@endcode
- Perform the fitting process
@code
std::vector<std::vector<Point2f> > landmarks;
facemark->fit(img, faces, landmarks);
@endcode
- Display the result
@code
for(int j=0;j<faces.size();j++){
face::drawFacemarks(img, landmarks[j], Scalar(0,0,255));
}
imshow("result", img);
waitKey(0);
@endcode
|