From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- .../_elementwise_unary_test_impl_8hpp_source.xhtml | 143 +++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 20.02/_elementwise_unary_test_impl_8hpp_source.xhtml (limited to '20.02/_elementwise_unary_test_impl_8hpp_source.xhtml') diff --git a/20.02/_elementwise_unary_test_impl_8hpp_source.xhtml b/20.02/_elementwise_unary_test_impl_8hpp_source.xhtml new file mode 100644 index 0000000000..7ca60fff12 --- /dev/null +++ b/20.02/_elementwise_unary_test_impl_8hpp_source.xhtml @@ -0,0 +1,143 @@ + + + + + + + + + + + + + +ArmNN: src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
ElementwiseUnaryTestImpl.hpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "LayerTestResult.hpp"
9 
10 #include <armnn/ArmNN.hpp>
11 
12 #include <ResolveType.hpp>
13 
18 
22 
23 #include <test/TensorHelpers.hpp>
24 
25 #include <memory>
26 
27 std::unique_ptr<armnn::IWorkload> CreateWorkload(
28  const armnn::IWorkloadFactory& workloadFactory,
29  const armnn::WorkloadInfo& info,
30  const armnn::ElementwiseUnaryQueueDescriptor& descriptor);
31 
32 template <std::size_t NumDims,
33  armnn::DataType ArmnnType,
34  typename T = armnn::ResolveType<ArmnnType>>
36  armnn::IWorkloadFactory & workloadFactory,
39  const unsigned int shape[NumDims],
40  std::vector<float> values,
41  float quantScale,
42  int quantOffset,
43  const unsigned int outShape[NumDims],
44  std::vector<float> outValues,
45  float outQuantScale,
46  int outQuantOffset)
47 {
48  armnn::TensorInfo inputTensorInfo{NumDims, shape, ArmnnType};
49  armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnType};
50 
51  inputTensorInfo.SetQuantizationScale(quantScale);
52  inputTensorInfo.SetQuantizationOffset(quantOffset);
53 
54  outputTensorInfo.SetQuantizationScale(outQuantScale);
55  outputTensorInfo.SetQuantizationOffset(outQuantOffset);
56 
57  auto input = MakeTensor<T, NumDims>(inputTensorInfo, ConvertToDataType<ArmnnType>(values, inputTensorInfo));
58 
59  LayerTestResult<T, NumDims> ret(outputTensorInfo);
60 
61  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
63 
66  qDesc.m_Parameters = desc;
68  AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
69  AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
70  auto workload = CreateWorkload(workloadFactory, info, qDesc);
71 
72  inputHandle->Allocate();
73  outputHandle->Allocate();
74 
75  CopyDataToITensorHandle(inputHandle.get(), input.origin());
76 
77  workload->PostAllocationConfigure();
78  ExecuteWorkload(*workload, memoryManager);
79 
80  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
81 
82  ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, ConvertToDataType<ArmnnType>(outValues,
83  inputTensorInfo));
84  return ret;
85 }
86 
87 template <std::size_t NumDims,
88  armnn::DataType ArmnnType,
89  typename T = armnn::ResolveType<ArmnnType>>
91  armnn::IWorkloadFactory & workloadFactory,
94  const unsigned int shape[NumDims],
95  std::vector<float> values,
96  const unsigned int outShape[NumDims],
97  std::vector<float> outValues,
98  float quantScale = 1.0f,
99  int quantOffset = 0)
100 {
101  return ElementwiseUnaryTestHelper<NumDims, ArmnnType>(
102  workloadFactory,
103  memoryManager,
104  op,
105  shape,
106  values,
107  quantScale,
108  quantOffset,
109  outShape,
110  outValues,
111  quantScale,
112  quantOffset);
113 }
+ + + + + +
boost::multi_array< T, n > outputExpected
+ +
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
+ + + + +
DataType
Definition: Types.hpp:32
+ +
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
+
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
+
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const armnn::ElementwiseUnaryQueueDescriptor &descriptor)
+
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
+
boost::multi_array< T, n > output
+ +
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
+
UnaryOperation
Definition: Types.hpp:87
+ +
Contains information about inputs and outputs to a layer.
+ +
LayerTestResult< T, NumDims > ElementwiseUnaryTestHelper(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::UnaryOperation op, const unsigned int shape[NumDims], std::vector< float > values, float quantScale, int quantOffset, const unsigned int outShape[NumDims], std::vector< float > outValues, float outQuantScale, int outQuantOffset)
+ +
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+ + + + -- cgit v1.2.1