1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
|
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/Tensor.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <armnn/backends/IMemoryManager.hpp>
#include <armnn/backends/Workload.hpp>
#include <armnn/backends/WorkloadInfo.hpp>
namespace armnn
{
class ITensorHandle;
} // namespace armnn
namespace
{
template <typename QueueDescriptor>
void AddInputToWorkload(QueueDescriptor& descriptor,
armnn::WorkloadInfo& info,
const armnn::TensorInfo& tensorInfo,
armnn::ITensorHandle* tensorHandle)
{
descriptor.m_Inputs.push_back(tensorHandle);
info.m_InputTensorInfos.push_back(tensorInfo);
}
template <typename QueueDescriptor>
void AddOutputToWorkload(QueueDescriptor& descriptor,
armnn::WorkloadInfo& info,
const armnn::TensorInfo& tensorInfo,
armnn::ITensorHandle* tensorHandle)
{
descriptor.m_Outputs.push_back(tensorHandle);
info.m_OutputTensorInfos.push_back(tensorInfo);
}
template <typename QueueDescriptor>
void SetWorkloadInput(QueueDescriptor& descriptor,
armnn::WorkloadInfo& info,
unsigned int index,
const armnn::TensorInfo& tensorInfo,
armnn::ITensorHandle* tensorHandle)
{
descriptor.m_Inputs[index] = tensorHandle;
info.m_InputTensorInfos[index] = tensorInfo;
}
template <typename QueueDescriptor>
void SetWorkloadOutput(QueueDescriptor& descriptor,
armnn::WorkloadInfo& info,
unsigned int index,
const armnn::TensorInfo& tensorInfo,
armnn::ITensorHandle* tensorHandle)
{
descriptor.m_Outputs[index] = tensorHandle;
info.m_OutputTensorInfos[index] = tensorInfo;
}
inline void ExecuteWorkload(armnn::IWorkload& workload,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool memoryManagementRequested = true)
{
const bool manageMemory = memoryManager && memoryManagementRequested;
// Acquire working memory (if needed)
if (manageMemory)
{
memoryManager->Acquire();
}
// Perform PostAllocationConfiguration
workload.PostAllocationConfigure();
// Execute the workload
workload.Execute();
// Release working memory (if needed)
if (manageMemory)
{
memoryManager->Release();
}
}
inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Optional<armnn::DataType> weightsType)
{
if (!weightsType)
{
return weightsType;
}
switch(weightsType.value())
{
case armnn::DataType::BFloat16:
case armnn::DataType::Float16:
case armnn::DataType::Float32:
return weightsType;
case armnn::DataType::QAsymmS8:
case armnn::DataType::QAsymmU8:
case armnn::DataType::QSymmS8:
case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
} // anonymous namespace
|