aboutsummaryrefslogtreecommitdiff
path: root/test/Concat.cpp
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2019-05-22 18:00:04 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-05-29 09:27:40 +0100
commit7b1e41f454609e6066d0e23f5f74de2f93fe87b4 (patch)
tree3626ac87f9bd8bdd90d43f7835850ffc9ec51a58 /test/Concat.cpp
parent9972b4012765ba659a4a693d3e918c89f71eda7c (diff)
downloadandroid-nn-driver-7b1e41f454609e6066d0e23f5f74de2f93fe87b4.tar.gz
IVGCVSW-3119 Rename Merger to Concat
!armnn:1209 Change-Id: Ic493e5cdfe479e459342d7c7c9d77c63f859fa30 Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Diffstat (limited to 'test/Concat.cpp')
-rw-r--r--test/Concat.cpp490
1 files changed, 490 insertions, 0 deletions
diff --git a/test/Concat.cpp b/test/Concat.cpp
new file mode 100644
index 00000000..b5ea689e
--- /dev/null
+++ b/test/Concat.cpp
@@ -0,0 +1,490 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "DriverTestHelpers.hpp"
+#include "TestTensor.hpp"
+#include <boost/array.hpp>
+#include <boost/test/unit_test.hpp>
+#include <boost/test/data/test_case.hpp>
+#include <log/log.h>
+
+
+BOOST_AUTO_TEST_SUITE(ConcatTests)
+
+using namespace android::hardware;
+using namespace driverTestHelpers;
+using namespace armnn_driver;
+
+namespace
+{
+
+#ifndef ARMCOMPUTECL_ENABLED
+ static const boost::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
+#else
+ static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
+#endif
+
+void
+ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
+ int32_t concatAxis,
+ const TestTensor & expectedOutputTensor,
+ armnn::Compute computeDevice,
+ ErrorStatus expectedPrepareStatus=ErrorStatus::NONE,
+ ErrorStatus expectedExecStatus=ErrorStatus::NONE)
+{
+ std::unique_ptr<ArmnnDriver> driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice));
+ V1_0::Model model{};
+
+ hidl_vec<uint32_t> modelInputIds;
+ modelInputIds.resize(inputs.size()+1);
+ for (uint32_t i = 0; i<inputs.size(); ++i)
+ {
+ modelInputIds[i] = i;
+ AddInputOperand(model, inputs[i]->GetDimensions());
+ }
+ modelInputIds[inputs.size()] = inputs.size(); // add an id for the axis too
+ AddIntOperand(model, concatAxis);
+ AddOutputOperand(model, expectedOutputTensor.GetDimensions());
+
+ // make the concat operation
+ model.operations.resize(1);
+ model.operations[0].type = V1_0::OperationType::CONCATENATION;
+ model.operations[0].inputs = modelInputIds;
+ model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
+
+ // make the prepared model
+ ErrorStatus prepareStatus=ErrorStatus::NONE;
+ android::sp<V1_0::IPreparedModel> preparedModel = PrepareModelWithStatus(model,
+ *driver,
+ prepareStatus,
+ expectedPrepareStatus);
+ BOOST_TEST(prepareStatus == expectedPrepareStatus);
+ if (prepareStatus != ErrorStatus::NONE)
+ {
+ // prepare failed, we cannot continue
+ return;
+ }
+
+ BOOST_TEST(preparedModel.get() != nullptr);
+ if (preparedModel.get() == nullptr)
+ {
+ // don't spoil other tests if prepare failed
+ return;
+ }
+
+ // construct the request
+ hidl_vec<RequestArgument> inputArguments;
+ hidl_vec<RequestArgument> outputArguments;
+ inputArguments.resize(inputs.size());
+ outputArguments.resize(1);
+
+ // the request's memory pools will follow the same order as
+ // the inputs
+ for (uint32_t i = 0; i<inputs.size(); ++i)
+ {
+ DataLocation inloc = {};
+ inloc.poolIndex = i;
+ inloc.offset = 0;
+ inloc.length = inputs[i]->GetNumElements() * sizeof(float);
+ RequestArgument input = {};
+ input.location = inloc;
+ input.dimensions = inputs[i]->GetDimensions();
+ inputArguments[i] = input;
+ }
+
+ // and an additional memory pool is needed for the output
+ {
+ DataLocation outloc = {};
+ outloc.poolIndex = inputs.size();
+ outloc.offset = 0;
+ outloc.length = expectedOutputTensor.GetNumElements() * sizeof(float);
+ RequestArgument output = {};
+ output.location = outloc;
+ output.dimensions = expectedOutputTensor.GetDimensions();
+ outputArguments[0] = output;
+ }
+
+ // make the request based on the arguments
+ Request request = {};
+ request.inputs = inputArguments;
+ request.outputs = outputArguments;
+
+ // set the input data
+ for (uint32_t i = 0; i<inputs.size(); ++i)
+ {
+ AddPoolAndSetData(inputs[i]->GetNumElements(),
+ request,
+ inputs[i]->GetData());
+ }
+
+ // add memory for the output
+ android::sp<IMemory> outMemory = AddPoolAndGetData(expectedOutputTensor.GetNumElements(), request);
+ float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
+
+ // run the execution
+ auto execStatus = Execute(preparedModel, request, expectedExecStatus);
+ BOOST_TEST(execStatus == expectedExecStatus);
+
+ if (execStatus == ErrorStatus::NONE)
+ {
+ // check the result if there was no error
+ const float * expectedOutput = expectedOutputTensor.GetData();
+ for (unsigned int i=0; i<expectedOutputTensor.GetNumElements();++i)
+ {
+ BOOST_TEST(outdata[i] == expectedOutput[i]);
+ }
+ }
+}
+
+} // namespace <anonymous>
+
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxis0, COMPUTE_DEVICES)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{3,1,1,1},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(ConcatAxis0_NoInterleave, COMPUTE_DEVICES)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{2,1,2,1},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{3,1,2,1},{4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10, 11}};
+
+ TestTensor expected{armnn::TensorShape{6,1,2,1},{0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxis1, COMPUTE_DEVICES)
+{
+ int32_t axis = 1;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,3,1,1},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(ConcatAxis1_NoInterleave, COMPUTE_DEVICES)
+{
+ int32_t axis = 1;
+ TestTensor aIn{armnn::TensorShape{1,2,2,1},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,3,2,1},{4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10, 11}};
+
+ TestTensor expected{armnn::TensorShape{1,6,2,1},{0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxis1_DoInterleave, COMPUTE_DEVICES)
+{
+ int32_t axis = 1;
+ TestTensor aIn{armnn::TensorShape{2,2,1,1},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{2,3,1,1},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{2,1,1,1},{10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{2,6,1,1},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxis2, COMPUTE_DEVICES)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,1,3,1},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(ConcatAxis2_NoInterleave, COMPUTE_DEVICES)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,3,2},{4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,2},{10, 11}};
+
+ TestTensor expected{armnn::TensorShape{1,1,6,2},{0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxis2_DoInterleave, COMPUTE_DEVICES)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,2,2,1},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,2,3,1},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,2,1,1},{10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{1,2,6,1},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxis3, COMPUTE_DEVICES)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,1,1,3},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxis3_DoInterleave, COMPUTE_DEVICES)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(AxisTooBig, COMPUTE_DEVICES)
+{
+ int32_t axis = 4;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{0}};
+
+ // The axis must be within the range of [-rank(values), rank(values))
+ // see: https://www.tensorflow.org/api_docs/python/tf/concat
+ TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
+}
+
+BOOST_DATA_TEST_CASE(AxisTooSmall, COMPUTE_DEVICES)
+{
+ int32_t axis = -5;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1,1},{0}};
+
+ // The axis must be within the range of [-rank(values), rank(values))
+ // see: https://www.tensorflow.org/api_docs/python/tf/concat
+ TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
+}
+
+BOOST_DATA_TEST_CASE(TooFewInputs, COMPUTE_DEVICES)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+
+ // We need at least two tensors to concatenate
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ ConcatTestImpl({&aIn}, axis, aIn, sample, expectedParserStatus);
+}
+
+BOOST_DATA_TEST_CASE(MismatchedInputDimensions, COMPUTE_DEVICES)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor mismatched{armnn::TensorShape{1,1,1,1},{10}};
+
+ TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ // The input dimensions must be compatible
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, sample, expectedParserStatus);
+}
+
+BOOST_DATA_TEST_CASE(MismatchedInputRanks, COMPUTE_DEVICES)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,1,2},{0,1}};
+ TestTensor bIn{armnn::TensorShape{1,1},{4}};
+ TestTensor expected{armnn::TensorShape{1,1,3},{0,1,4}};
+
+ // The input dimensions must be compatible
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ ConcatTestImpl({&aIn, &bIn}, axis, expected, sample, expectedParserStatus);
+}
+
+BOOST_DATA_TEST_CASE(MismatchedOutputDimensions, COMPUTE_DEVICES)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
+ 11}};
+
+ TestTensor mismatched{armnn::TensorShape{1,1,6,2},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ // The input and output dimensions must be compatible
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
+}
+
+BOOST_DATA_TEST_CASE(MismatchedOutputRank, COMPUTE_DEVICES)
+{
+ int32_t axis = 3;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
+ 11}};
+
+ TestTensor mismatched{armnn::TensorShape{6,2},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ // The input and output ranks must match
+ ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
+}
+
+BOOST_DATA_TEST_CASE(ValidNegativeAxis, COMPUTE_DEVICES)
+{
+ // this is the same as 3
+ // see: https://www.tensorflow.org/api_docs/python/tf/concat
+ int32_t axis = -1;
+ TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxisZero3D, COMPUTE_DEVICES)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{3,1,1},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxisOne3D, COMPUTE_DEVICES)
+{
+ int32_t axis = 1;
+ TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,3,1},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxisTwo3D, COMPUTE_DEVICES)
+{
+ int32_t axis = 2;
+ TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,1,3},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxisZero2D, COMPUTE_DEVICES)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{3,1},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxisOne2D, COMPUTE_DEVICES)
+{
+ int32_t axis = 1;
+ TestTensor aIn{armnn::TensorShape{1,1},{0}};
+ TestTensor bIn{armnn::TensorShape{1,1},{1}};
+ TestTensor cIn{armnn::TensorShape{1,1},{2}};
+
+ TestTensor expected{armnn::TensorShape{1,3},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_DATA_TEST_CASE(SimpleConcatAxisZero1D, COMPUTE_DEVICES)
+{
+ int32_t axis = 0;
+ TestTensor aIn{armnn::TensorShape{1},{0}};
+ TestTensor bIn{armnn::TensorShape{1},{1}};
+ TestTensor cIn{armnn::TensorShape{1},{2}};
+
+ TestTensor expected{armnn::TensorShape{3},{0,1,2}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+}
+
+BOOST_AUTO_TEST_SUITE_END()