1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
|
/*
* Copyright (c) 2016-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ARM_COMPUTE_TENSORSHAPE_H
#define ARM_COMPUTE_TENSORSHAPE_H
#include "arm_compute/core/Dimensions.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/utils/misc/Utility.h"
#include <algorithm>
#include <array>
#include <functional>
#include <numeric>
namespace arm_compute
{
/** Shape of a tensor */
class TensorShape : public Dimensions<size_t>
{
public:
/** Constructor to initialize the tensor shape.
*
* @param[in] dims Values to initialize the dimensions.
*/
template <typename... Ts>
TensorShape(Ts... dims) : Dimensions{dims...}
{
// Initialize unspecified dimensions to 1
if (_num_dimensions > 0)
{
std::fill(_id.begin() + _num_dimensions, _id.end(), 1);
}
// Correct number dimensions to ignore trailing dimensions of size 1
apply_dimension_correction();
}
/** Allow instances of this class to be copy constructed */
TensorShape(const TensorShape &) = default;
/** Allow instances of this class to be copied */
TensorShape &operator=(const TensorShape &) = default;
/** Allow instances of this class to be move constructed */
TensorShape(TensorShape &&) = default;
/** Allow instances of this class to be moved */
TensorShape &operator=(TensorShape &&) = default;
/** Default destructor */
~TensorShape() = default;
/** Accessor to set the value of one of the dimensions.
*
* @param[in] dimension Dimension for which the value is set.
* @param[in] value Value to be set for the dimension.
* @param[in] apply_dim_correction (Optional) Flag to state whether apply dimension correction after setting one dimension. E.g. when permuting NCHW -> NHWC, 1x1x2 would become 2x1x1, but _num_dimensions should be 3 rather than 1.
* @param[in] increase_dim_unit (Optional) Set to true if new unit dimensions increase the number of dimensions of the shape.
*
* @return *this.
*/
TensorShape &set(size_t dimension, size_t value, bool apply_dim_correction = true, bool increase_dim_unit = true)
{
// Clear entire shape if one dimension is zero
if (value == 0)
{
_num_dimensions = 0;
std::fill(_id.begin(), _id.end(), 0);
}
else
{
// Make sure all empty dimensions are filled with 1
std::fill(_id.begin() + _num_dimensions, _id.end(), 1);
// Set the specified dimension and increase the number of dimensions if
// necessary
Dimensions::set(dimension, value, increase_dim_unit);
// Correct number dimensions to ignore trailing dimensions of size 1
if (apply_dim_correction)
{
apply_dimension_correction();
}
}
return *this;
}
/** Accessor to remove the dimension n from the tensor shape.
*
* @note The upper dimensions of the tensor shape will be shifted down by 1
*
* @param[in] n Dimension to remove
* @param[in] apply_dim_correction (Optional) Flag to state whether apply dimension correction (removing trailing dimensions with size of 1) after removing a dimension.
*/
void remove_dimension(size_t n, bool apply_dim_correction = true)
{
ARM_COMPUTE_ERROR_ON(_num_dimensions < 1);
ARM_COMPUTE_ERROR_ON(n >= _num_dimensions);
std::copy(_id.begin() + n + 1, _id.end(), _id.begin() + n);
// Reduce number of dimensions
_num_dimensions--;
// Make sure all empty dimensions are filled with 1
std::fill(_id.begin() + _num_dimensions, _id.end(), 1);
// Correct number dimensions to ignore trailing dimensions of size 1
if (apply_dim_correction)
{
apply_dimension_correction();
}
}
/** Collapse the first n dimensions.
*
* @param[in] n Number of dimensions to collapse into @p first
* @param[in] first Dimensions into which the following @p n are collapsed.
*/
void collapse(size_t n, size_t first = 0)
{
Dimensions::collapse(n, first);
// Make sure all empty dimensions are filled with 1
std::fill(_id.begin() + _num_dimensions, _id.end(), 1);
}
/** Shifts right the tensor shape increasing its dimensions
*
* @param[in] step Rotation step
*/
void shift_right(size_t step)
{
ARM_COMPUTE_ERROR_ON(step > TensorShape::num_max_dimensions - num_dimensions());
std::rotate(begin(), begin() + TensorShape::num_max_dimensions - step, end());
_num_dimensions += step;
// Correct number dimensions to ignore trailing dimensions of size 1
apply_dimension_correction();
}
/** Return a copy with collapsed dimensions starting from a given point.
*
* @param[in] start Starting point of collapsing dimensions.
*
* @return A copy with collapse dimensions starting from start.
*/
TensorShape collapsed_from(size_t start) const
{
TensorShape copy(*this);
copy.collapse(num_dimensions() - start, start);
return copy;
}
/** Collapses all dimensions to a single linear total size.
*
* @return The total tensor size in terms of elements.
*/
size_t total_size() const
{
return std::accumulate(_id.begin(), _id.end(), 1, std::multiplies<size_t>());
}
/** Collapses given dimension and above.
*
* @param[in] dimension Size of the wanted dimension
*
* @return The linear size of the collapsed dimensions
*/
size_t total_size_upper(size_t dimension) const
{
ARM_COMPUTE_ERROR_ON(dimension >= TensorShape::num_max_dimensions);
return std::accumulate(_id.begin() + dimension, _id.end(), 1, std::multiplies<size_t>());
}
/** Compute size of dimensions lower than the given one.
*
* @param[in] dimension Upper boundary.
*
* @return The linear size of the collapsed dimensions.
*/
size_t total_size_lower(size_t dimension) const
{
ARM_COMPUTE_ERROR_ON(dimension > TensorShape::num_max_dimensions);
return std::accumulate(_id.begin(), _id.begin() + dimension, 1, std::multiplies<size_t>());
}
/** If shapes are broadcast compatible, return the broadcasted shape.
*
* Two tensor shapes are broadcast compatible if for each dimension, they're equal or one of them is 1.
*
* If two shapes are compatible, each dimension in the broadcasted shape is the max of the original dimensions.
*
* @param[in] shapes Tensor shapes.
*
* @return The broadcasted shape or an empty shape if the shapes are not broadcast compatible.
*/
template <typename... Shapes>
static TensorShape broadcast_shape(const Shapes &...shapes)
{
TensorShape bc_shape;
auto broadcast = [&bc_shape](const TensorShape &other)
{
if (bc_shape.num_dimensions() == 0)
{
bc_shape = other;
}
else if (other.num_dimensions() != 0)
{
for (size_t d = 0; d < TensorShape::num_max_dimensions; ++d)
{
const size_t dim_min = std::min(bc_shape[d], other[d]);
const size_t dim_max = std::max(bc_shape[d], other[d]);
if ((dim_min != 1) && (dim_min != dim_max))
{
bc_shape = TensorShape{0U};
break;
}
bc_shape.set(d, dim_max);
}
}
};
utility::for_each(broadcast, shapes...);
return bc_shape;
}
private:
/** Remove trailing dimensions of size 1 from the reported number of dimensions. */
void apply_dimension_correction()
{
for (int i = static_cast<int>(_num_dimensions) - 1; i > 0; --i)
{
if (_id[i] == 1)
{
--_num_dimensions;
}
else
{
break;
}
}
}
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_TENSORSHAPE_H*/
|