21 constexpr
unsigned int width = 2;
22 constexpr
unsigned int height = 3;
27 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(tensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(tensorInfo);
41 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
42 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
53 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
57 inputHandle->Allocate();
58 outputHandle->Allocate();
62 workload->PostAllocationConfigure();
67 ret.
outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
float m_Min
Minimum value.
boost::multi_array< T, n > outputExpected
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< float, 2 > FakeQuantizationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
Contains information about inputs and outputs to a layer.
float m_Max
Maximum value.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)