ArmNN
 21.02
FloorTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "FloorTestImpl.hpp"
7 
11 
12 #include <test/TensorHelpers.hpp>
13 
14 template<armnn::DataType ArmnnType, typename T>
16  armnn::IWorkloadFactory& workloadFactory,
18  const armnn::ITensorHandleFactory& tensorHandleFactory)
19 {
20  IgnoreUnused(memoryManager);
21  armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType);
22  inputTensorInfo.SetQuantizationScale(0.1f);
23 
24  armnn::TensorInfo outputTensorInfo(inputTensorInfo);
25  outputTensorInfo.SetQuantizationScale(0.1f);
26 
27  auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
28  { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
29  1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f },
30  inputTensorInfo));
31 
32  LayerTestResult<T, 4> ret(outputTensorInfo);
33  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, ConvertToDataType<ArmnnType>(
34  { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
35  1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f },
36  outputTensorInfo));
37 
38  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
39  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
40 
43  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
44  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
45 
46  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
47 
48  inputHandle->Allocate();
49  outputHandle->Allocate();
50 
51  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
52 
53  workload->Execute();
54 
55  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
56 
57  return ret;
58 }
59 
60 //
61 // Explicit template specializations
62 //
63 
65 SimpleFloorTest<armnn::DataType::Float32>(
66  armnn::IWorkloadFactory& workloadFactory,
68  const armnn::ITensorHandleFactory& tensorHandleFactory);
69 
71 SimpleFloorTest<armnn::DataType::Float16>(
72  armnn::IWorkloadFactory& workloadFactory,
74  const armnn::ITensorHandleFactory& tensorHandleFactory);
75 
76 
78 SimpleFloorTest<armnn::DataType::QSymmS16>(
79  armnn::IWorkloadFactory& workloadFactory,
81  const armnn::ITensorHandleFactory& tensorHandleFactory);
boost::multi_array< T, n > outputExpected
void IgnoreUnused(Ts &&...)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
boost::multi_array< T, n > output
LayerTestResult< T, 4 > SimpleFloorTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)