11 #include <boost/test/unit_test.hpp> 13 using namespace armnn;
20 struct Workload0 :
BaseWorkload<ElementwiseUnaryQueueDescriptor>
33 int* inVals =
static_cast<int*
>(m_Data.m_Inputs[0][0].Map());
34 int* outVals =
static_cast<int*
>(m_Data.m_Outputs[0][0].Map());
36 for (
unsigned int i = 0; i < m_Data.m_Inputs[0][0].GetShape().GetNumElements(); ++i)
38 outVals[i] = inVals[i] * outVals[i];
39 inVals[i] = outVals[i];
45 int* inVals =
static_cast<int*
>(desc.
m_Inputs[0][0].Map());
46 int* outVals =
static_cast<int*
>(desc.
m_Outputs[0][0].Map());
48 for (
unsigned int i = 0; i < desc.
m_Inputs[0][0].GetShape().GetNumElements(); ++i)
50 outVals[i] = inVals[i] + outVals[i];
51 inVals[i] = outVals[i];
61 struct Workload1 :
BaseWorkload<ElementwiseUnaryQueueDescriptor>
70 int* inVals =
static_cast<int*
>(m_Data.m_Inputs[0][0].Map());
71 int* outVals =
static_cast<int*
>(m_Data.m_Outputs[0][0].Map());
73 for (
unsigned int i = 0; i < m_Data.m_Inputs[0][0].GetShape().GetNumElements(); ++i)
75 outVals[i] = inVals[i] * outVals[i];
76 inVals[i] = outVals[i];
81 void ValidateTensor(
ITensorHandle* tensorHandle,
int expectedValue)
83 int* actualOutput =
static_cast<int*
>(tensorHandle->
Map());
85 bool allValuesCorrect =
true;
88 if (actualOutput[i] != expectedValue)
90 allValuesCorrect =
false;
94 BOOST_CHECK(allValuesCorrect);
97 template<
typename Workload>
105 elementwiseUnaryQueueDescriptor.
m_Inputs = std::vector<ITensorHandle*>{inputTensor};
106 elementwiseUnaryQueueDescriptor.
m_Outputs = std::vector<ITensorHandle*>{outputTensor};
108 return std::make_unique<Workload>(elementwiseUnaryQueueDescriptor, workloadInfo);
115 int inVals[5]{2, 2, 2, 2, 2};
116 int outVals[5]{1, 1, 1, 1, 1};
118 int expectedExecuteval = 2;
119 int expectedExecuteAsyncval = 3;
127 std::unique_ptr<Workload0> workload0 = CreateWorkload<Workload0>(
info, &syncInput0, &syncOutput0);
129 workload0.get()->Execute();
135 workingMemDescriptor0.
m_Inputs = std::vector<ITensorHandle*>{&asyncInput0};
136 workingMemDescriptor0.
m_Outputs = std::vector<ITensorHandle*>{&asyncOutput0};
138 workload0.get()->ExecuteAsync(workingMemDescriptor0);
141 ValidateTensor(workingMemDescriptor0.
m_Outputs[0], expectedExecuteAsyncval);
142 ValidateTensor(workingMemDescriptor0.
m_Inputs[0], expectedExecuteAsyncval);
144 ValidateTensor(&workload0.get()->GetQueueDescriptor()->m_Outputs[0][0], expectedExecuteval);
145 ValidateTensor(&workload0.get()->GetQueueDescriptor()->m_Inputs[0][0], expectedExecuteval);
152 std::vector<int> inVals{2, 2, 2, 2, 2};
153 std::vector<int> outVals{1, 1, 1, 1, 1};
154 std::vector<int> defaultVals{0, 0, 0, 0, 0};
156 int expectedExecuteval = 2;
165 std::unique_ptr<Workload1> workload1 = CreateWorkload<Workload1>(
info, &defaultInput, &defaultOutput);
171 workingMemDescriptor.
m_Inputs = std::vector<ITensorHandle*>{&asyncInput};
172 workingMemDescriptor.
m_Outputs = std::vector<ITensorHandle*>{&asyncOutput};
174 workload1.get()->ExecuteAsync(workingMemDescriptor);
178 ValidateTensor(workingMemDescriptor.
m_Outputs[0], expectedExecuteval);
179 ValidateTensor(workingMemDescriptor.
m_Inputs[0], expectedExecuteval);
185 unsigned int vecSize = 1000;
188 std::vector<int> inVals1(vecSize, 2);
189 std::vector<int> outVals1(vecSize, 1);
190 std::vector<int> inVals2(vecSize, 5);
191 std::vector<int> outVals2(vecSize, -1);
193 std::vector<int> defaultVals(vecSize, 0);
195 int expectedExecuteval1 = 4;
196 int expectedExecuteval2 = 25;
207 std::unique_ptr<Workload1> workload = CreateWorkload<Workload1>(
info, &defaultInput, &defaultOutput);
213 workingMemDescriptor1.
m_Inputs = std::vector<ITensorHandle*>{&asyncInput1};
214 workingMemDescriptor1.
m_Outputs = std::vector<ITensorHandle*>{&asyncOutput1};
221 workingMemDescriptor2.
m_Inputs = std::vector<ITensorHandle*>{&asyncInput2};
222 workingMemDescriptor2.
m_Outputs = std::vector<ITensorHandle*>{&asyncOutput2};
224 std::thread thread1 = std::thread([&]()
226 workload.get()->ExecuteAsync(workingMemDescriptor1);
227 workload.get()->ExecuteAsync(workingMemDescriptor1);
230 std::thread thread2 = std::thread([&]()
232 workload.get()->ExecuteAsync(workingMemDescriptor2);
233 workload.get()->ExecuteAsync(workingMemDescriptor2);
239 ValidateTensor(workingMemDescriptor1.
m_Outputs[0], expectedExecuteval1);
240 ValidateTensor(workingMemDescriptor1.
m_Inputs[0], expectedExecuteval1);
242 ValidateTensor(workingMemDescriptor2.
m_Outputs[0], expectedExecuteval2);
243 ValidateTensor(workingMemDescriptor2.
m_Inputs[0], expectedExecuteval2);
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
Copyright (c) 2021 ARM Limited and Contributors.
std::vector< ITensorHandle * > m_Inputs
std::vector< TensorInfo > m_InputTensorInfos
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< TensorInfo > m_OutputTensorInfos
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
BOOST_AUTO_TEST_SUITE_END()
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs