1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../InferenceTestImage.hpp"
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/TContainer.hpp>
#include <armnnUtils/Permute.hpp>
#include <algorithm>
#include <fstream>
#include <iterator>
#include <string>
// Parameters used in normalizing images
struct NormalizationParameters
{
float scale{ 1.0 };
std::array<float, 3> mean{ { 0.0, 0.0, 0.0 } };
std::array<float, 3> stddev{ { 1.0, 1.0, 1.0 } };
};
enum class SupportedFrontend
{
TFLite = 0,
};
/** Get normalization parameters.
* Note that different flavours of models and different model data types have different normalization methods.
* This tool currently only supports TF and TFLite models
*
* @param[in] modelFormat One of the supported frontends
* @param[in] outputType Output type of the image tensor, also the type of the intended model
*/
NormalizationParameters GetNormalizationParameters(const SupportedFrontend& modelFormat,
const armnn::DataType& outputType)
{
NormalizationParameters normParams;
// Explicitly set default parameters
normParams.scale = 1.0;
normParams.mean = { 0.0, 0.0, 0.0 };
normParams.stddev = { 1.0, 1.0, 1.0 };
switch (modelFormat)
{
case SupportedFrontend::TFLite:
default:
switch (outputType)
{
case armnn::DataType::Float32:
normParams.scale = 127.5;
normParams.mean = { 1.0, 1.0, 1.0 };
break;
case armnn::DataType::Signed32:
normParams.mean = { 128.0, 128.0, 128.0 };
break;
case armnn::DataType::QAsymmU8:
break;
case armnn::DataType::QAsymmS8:
normParams.mean = { 128.0, 128.0, 128.0 };
break;
default:
break;
}
break;
}
return normParams;
}
/** Prepare raw image tensor data by loading the image from imagePath and preprocessing it.
*
* @param[in] imagePath Path to the image file
* @param[in] newWidth The new width of the output image tensor
* @param[in] newHeight The new height of the output image tensor
* @param[in] normParams Normalization parameters for the normalization of the image
* @param[in] batchSize Batch size
* @param[in] outputLayout Data layout of the output image tensor
*/
template <typename ElemType>
std::vector<ElemType> PrepareImageTensor(const std::string& imagePath,
unsigned int newWidth,
unsigned int newHeight,
const NormalizationParameters& normParams,
unsigned int batchSize = 1,
const armnn::DataLayout& outputLayout = armnn::DataLayout::NHWC);
// Prepare float32 image tensor
template <>
std::vector<float> PrepareImageTensor<float>(const std::string& imagePath,
unsigned int newWidth,
unsigned int newHeight,
const NormalizationParameters& normParams,
unsigned int batchSize,
const armnn::DataLayout& outputLayout)
{
// Generate image tensor
std::vector<float> imageData;
InferenceTestImage testImage(imagePath.c_str());
if (newWidth == 0)
{
newWidth = testImage.GetWidth();
}
if (newHeight == 0)
{
newHeight = testImage.GetHeight();
}
// Resize the image to new width and height or keep at original dimensions if the new width and height are specified
// as 0 Centre/Normalise the image.
imageData = testImage.Resize(newWidth, newHeight, CHECK_LOCATION(),
InferenceTestImage::ResizingMethods::BilinearAndNormalized, normParams.mean,
normParams.stddev, normParams.scale);
if (outputLayout == armnn::DataLayout::NCHW)
{
// Convert to NCHW format
const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
armnn::TensorShape dstShape({ batchSize, 3, newHeight, newWidth });
std::vector<float> tempImage(imageData.size());
armnnUtils::Permute(dstShape, NHWCToArmNN, imageData.data(), tempImage.data(), sizeof(float));
imageData.swap(tempImage);
}
return imageData;
}
// Prepare int32 image tensor
template <>
std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
unsigned int newWidth,
unsigned int newHeight,
const NormalizationParameters& normParams,
unsigned int batchSize,
const armnn::DataLayout& outputLayout)
{
// Get float32 image tensor
std::vector<float> imageDataFloat =
PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
// Convert to int32 image tensor with static cast
std::vector<int> imageDataInt;
imageDataInt.reserve(imageDataFloat.size());
std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataInt),
[](float val) { return static_cast<int>(val); });
return imageDataInt;
}
// Prepare qasymmu8 image tensor
template <>
std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
unsigned int newWidth,
unsigned int newHeight,
const NormalizationParameters& normParams,
unsigned int batchSize,
const armnn::DataLayout& outputLayout)
{
// Get float32 image tensor
std::vector<float> imageDataFloat =
PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
std::vector<uint8_t> imageDataQasymm8;
imageDataQasymm8.reserve(imageDataFloat.size());
// Convert to uint8 image tensor with static cast
std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymm8),
[](float val) { return static_cast<uint8_t>(val); });
return imageDataQasymm8;
}
// Prepare qasymms8 image tensor
template <>
std::vector<int8_t> PrepareImageTensor<int8_t>(const std::string& imagePath,
unsigned int newWidth,
unsigned int newHeight,
const NormalizationParameters& normParams,
unsigned int batchSize,
const armnn::DataLayout& outputLayout)
{
// Get float32 image tensor
std::vector<float> imageDataFloat =
PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
std::vector<int8_t> imageDataQasymms8;
imageDataQasymms8.reserve(imageDataFloat.size());
// Convert to uint8 image tensor with static cast
std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8),
[](float val) { return static_cast<uint8_t>(val); });
return imageDataQasymms8;
}
/** Write image tensor to ofstream
*
* @param[in] imageData Image tensor data
* @param[in] imageTensorFile Output filestream (ofstream) to which the image tensor data is written
*/
template <typename ElemType>
void WriteImageTensorImpl(const std::vector<ElemType>& imageData, std::ofstream& imageTensorFile)
{
std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<ElemType>(imageTensorFile, " "));
}
// For uint8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
// numerical values
template <>
void WriteImageTensorImpl<uint8_t>(const std::vector<uint8_t>& imageData, std::ofstream& imageTensorFile)
{
std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
}
// For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
// numerical values
template <>
void WriteImageTensorImpl<int8_t>(const std::vector<int8_t>& imageData, std::ofstream& imageTensorFile)
{
std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
}
|