29 #include <boost/test/unit_test.hpp> 30 #include <boost/test/execution_monitor.hpp> 39 std::vector<float> expectedOutput =
41 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
42 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
45 ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(
defaultBackends,
53 std::vector<float> expectedOutput =
55 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
56 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
59 ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmU8>(
defaultBackends,
67 std::vector<float> expectedOutput =
69 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
70 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
73 ElementwiseUnarySimpleEndToEnd<armnn::DataType::QSymmS16>(
defaultBackends,
91 using namespace armnn;
123 auto error = runtime->LoadNetwork(netId, std::move(optNet));
127 std::vector<uint8_t> inputData
131 std::vector<uint8_t> outputData(5);
139 {0,
armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
143 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
146 BOOST_TEST(outputData[0] == 0);
147 BOOST_TEST(outputData[1] == 0);
148 BOOST_TEST(outputData[2] == 0);
149 BOOST_TEST(outputData[3] == 255);
150 BOOST_TEST(outputData[4] == 0);
157 using namespace armnn;
186 runtime->LoadNetwork(netId, std::move(optNet));
189 std::vector<float> input1Data
191 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
193 std::vector<float> input2Data
195 100.f, 200.f, 300.f, 400.f, 500.f, 600.f, 700.f, 800.f, 900.f, 1000.f, 1100.f, 1200.f
197 std::vector<float> outputData(12);
206 {0,
armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
210 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
213 BOOST_TEST(outputData[0] == 101);
214 BOOST_TEST(outputData[1] == 202);
215 BOOST_TEST(outputData[2] == 303);
216 BOOST_TEST(outputData[3] == 404);
217 BOOST_TEST(outputData[4] == 505);
218 BOOST_TEST(outputData[5] == 606);
219 BOOST_TEST(outputData[6] == 707);
220 BOOST_TEST(outputData[7] == 808);
221 BOOST_TEST(outputData[8] == 909);
222 BOOST_TEST(outputData[9] == 1010);
223 BOOST_TEST(outputData[10] == 1111);
224 BOOST_TEST(outputData[11] == 1212);
229 using namespace armnn;
243 activation1Descriptor.
m_A = 1.f;
244 activation1Descriptor.
m_B = -1.f;
250 activation2Descriptor.
m_A = 6.0f;
256 activation3Descriptor.
m_A = 5.0f;
257 activation3Descriptor.
m_B = 2.0f;
284 runtime->LoadNetwork(netId, std::move(optNet));
287 const std::vector<float> inputData{ 3.f, 5.f, 2.f, 3.f, 7.f, 0.f, -2.f, -1.f, 3.f, 3.f };
289 std::vector<float> output1Data(inputData.size());
290 std::vector<float> output2Data(inputData.size());
291 std::vector<float> output3Data(inputData.size());
299 {0,
armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), output1Data.data())},
300 {1,
armnn::Tensor(runtime->GetOutputTensorInfo(netId, 1), output2Data.data())},
301 {2,
armnn::Tensor(runtime->GetOutputTensorInfo(netId, 2), output3Data.data())}
305 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
308 BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f }));
309 BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f }));
310 BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f }));
315 using namespace armnn;
344 runtime->LoadNetwork(netId, std::move(optNet));
347 std::vector<float> input1Data
349 1.0f, 2.0f, 3.0f, 4.0f
351 std::vector<float> input2Data
353 2.0f, 1.0f, 5.0f, 2.0f
355 std::vector<float> outputData(4);
364 {0,
armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
368 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
371 BOOST_TEST(outputData[0] == 1);
372 BOOST_TEST(outputData[1] == 1);
373 BOOST_TEST(outputData[2] == 3);
374 BOOST_TEST(outputData[3] == 2);
379 const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
380 0, 0, 0, 0, 1, 1, 1, 1 });
389 const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
390 0, 0, 0, 0, 0, 0, 0, 0 });
399 const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
400 0, 0, 0, 0, 1, 1, 1, 1 });
409 const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
410 0, 0, 0, 0, 0, 0, 0, 0 });
419 const std::vector<uint8_t> expectedOutput({ 1, 0, 1, 1, 0, 0,
429 const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
439 const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
442 ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(
defaultBackends,
449 const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
452 ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(
defaultBackends,
697 0.0f, 0.0f, 0.0f, 0.0f,
698 0.0f, 1.0f, 0.0f, 0.0f,
699 0.0f, -1.0f, 0.0f, 0.0f,
700 0.0f, 0.0f, 0.0f, 0.0f,
701 0.0f, 1.0f, 0.0f, 0.0f,
702 0.0f, 0.0f, 0.0f, 0.0f
704 std::vector<float>
scores({
713 0.5f, 0.5f, 1.0f, 1.0f,
714 0.5f, 0.5f, 1.0f, 1.0f,
715 0.5f, 0.5f, 1.0f, 1.0f,
716 0.5f, 10.5f, 1.0f, 1.0f,
717 0.5f, 10.5f, 1.0f, 1.0f,
718 0.5f, 100.5f, 1.0f, 1.0f
738 boxEncodingsInfo.SetQuantizationOffset(1);
745 0.0f, 0.0f, 0.0f, 0.0f,
746 0.0f, 1.0f, 0.0f, 0.0f,
747 0.0f, -1.0f, 0.0f, 0.0f,
748 0.0f, 0.0f, 0.0f, 0.0f,
749 0.0f, 1.0f, 0.0f, 0.0f,
750 0.0f, 0.0f, 0.0f, 0.0f
752 std::vector<float>
scores({
761 0.5f, 0.5f, 1.0f, 1.0f,
762 0.5f, 0.5f, 1.0f, 1.0f,
763 0.5f, 0.5f, 1.0f, 1.0f,
764 0.5f, 10.5f, 1.0f, 1.0f,
765 0.5f, 10.5f, 1.0f, 1.0f,
766 0.5f, 100.5f, 1.0f, 1.0f
777 1.0f, 1, 0.01f, 0, 0.5f, 0);
783 0.0f, 0.0f, 0.0f, 0.0f,
784 0.0f, 1.0f, 0.0f, 0.0f,
785 0.0f, -1.0f, 0.0f, 0.0f,
786 0.0f, 0.0f, 0.0f, 0.0f,
787 0.0f, 1.0f, 0.0f, 0.0f,
788 0.0f, 0.0f, 0.0f, 0.0f
790 std::vector<float>
scores({
799 0.5f, 0.5f, 1.0f, 1.0f,
800 0.5f, 0.5f, 1.0f, 1.0f,
801 0.5f, 0.5f, 1.0f, 1.0f,
802 0.5f, 10.5f, 1.0f, 1.0f,
803 0.5f, 10.5f, 1.0f, 1.0f,
804 0.5f, 100.5f, 1.0f, 1.0f
816 boxEncodingsInfo.SetQuantizationOffset(1);
823 0.0f, 0.0f, 0.0f, 0.0f,
824 0.0f, 1.0f, 0.0f, 0.0f,
825 0.0f, -1.0f, 0.0f, 0.0f,
826 0.0f, 0.0f, 0.0f, 0.0f,
827 0.0f, 1.0f, 0.0f, 0.0f,
828 0.0f, 0.0f, 0.0f, 0.0f
830 std::vector<float>
scores({
839 0.5f, 0.5f, 1.0f, 1.0f,
840 0.5f, 0.5f, 1.0f, 1.0f,
841 0.5f, 0.5f, 1.0f, 1.0f,
842 0.5f, 10.5f, 1.0f, 1.0f,
843 0.5f, 10.5f, 1.0f, 1.0f,
844 0.5f, 100.5f, 1.0f, 1.0f
855 1.0f, 1, 0.01f, 0, 0.5f, 0);
1034 TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
1040 TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
1046 TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
1052 TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
1058 TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
1064 TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
1291 #if !defined(__ANDROID__) BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
void SpaceToDepthNchwEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
static IRuntimePtr Create(const CreationOptions &options)
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
CPU Execution: Reference C++ kernels.
void InstanceNormalizationNhwcEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
std::vector< armnn::BackendId > defaultBackends
void LogSoftmaxEndToEndTest(const std::vector< armnn::BackendId > &defaultBackends)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32)
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
void InstanceNormalizationNchwEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
Copyright (c) 2021 ARM Limited and Contributors.
void QLstmEndToEnd(const std::vector< armnn::BackendId > &backends)
void SpaceToDepthNhwcEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void SpaceToDepthNhwcEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
void SpaceToDepthNchwEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
std::vector< uint8_t > qBoxEncodings(boxEncodings.size(), 0)
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
void SetQuantizationScale(float scale)
std::vector< uint8_t > qAnchors(anchors.size(), 0)
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
An ActivationDescriptor for the ActivationLayer.
min(a, max(b, input)) ReLu1 & ReLu6.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
BOOST_AUTO_TEST_SUITE_END()
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
void QuantizeData(uint8_t *quant, const float *dequant, const TensorInfo &info)
std::vector< uint8_t > qScores(scores.size(), 0)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void InstanceNormalizationNhwcEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
void SetQuantizationOffset(int32_t offset)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
A SoftmaxDescriptor for the SoftmaxLayer.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
unsigned int GetNumElements() const
void InstanceNormalizationNchwEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })