aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/1.0/Convolution2D.cpp16
-rw-r--r--test/1.0/FullyConnectedReshape.cpp37
-rw-r--r--test/1.0/Lstm.cpp64
-rw-r--r--test/1.1/Convolution2D.cpp21
-rw-r--r--test/1.1/Lstm.cpp64
-rw-r--r--test/1.1/Mean.cpp207
-rw-r--r--test/1.1/Transpose.cpp116
-rw-r--r--test/1.2/Capabilities.cpp35
-rw-r--r--test/1.2/Dilation.cpp27
-rw-r--r--test/1.2/Lstm.cpp83
-rw-r--r--test/1.2/Mean.cpp204
-rw-r--r--test/1.2/UnidirectionalSequenceLstm.cpp40
-rw-r--r--test/1.3/QLstm.cpp85
-rw-r--r--test/1.3/QosTests.cpp33
-rw-r--r--test/Android.mk115
-rw-r--r--test/Concat.cpp687
-rw-r--r--test/Concurrent.cpp26
-rw-r--r--test/Convolution2D.hpp75
-rw-r--r--test/Dilation.hpp71
-rw-r--r--test/DriverTestHelpers.cpp27
-rw-r--r--test/DriverTestHelpers.hpp18
-rw-r--r--test/FullyConnected.cpp71
-rw-r--r--test/GenericLayerTests.cpp60
-rw-r--r--test/Lstm.hpp53
-rw-r--r--test/SystemProperties.cpp27
-rw-r--r--test/TestHalfTensor.cpp33
-rw-r--r--test/TestHalfTensor.hpp38
-rw-r--r--test/TestTensor.cpp5
-rw-r--r--test/TestTensor.hpp10
-rw-r--r--test/Tests.cpp34
-rw-r--r--test/UnidirectionalSequenceLstm.hpp1419
-rw-r--r--test/UtilsTests.cpp73
32 files changed, 3032 insertions, 842 deletions
diff --git a/test/1.0/Convolution2D.cpp b/test/1.0/Convolution2D.cpp
index 9a5d2393..2af09157 100644
--- a/test/1.0/Convolution2D.cpp
+++ b/test/1.0/Convolution2D.cpp
@@ -1,19 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "../DriverTestHelpers.hpp"
#include "../Convolution2D.hpp"
-#include "../../1.0/HalPolicy.hpp"
-#include <boost/test/unit_test.hpp>
#include <log/log.h>
#include <OperationsUtils.h>
-BOOST_AUTO_TEST_SUITE(Convolution2DTests)
-
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -29,14 +24,17 @@ void SetModelFp16Flag(V1_0::Model&, bool)
} // namespace driverTestHelpers
-BOOST_AUTO_TEST_CASE(ConvValidPadding_Hal_1_0)
+DOCTEST_TEST_SUITE("Convolution2DTests_1.0")
+{
+
+DOCTEST_TEST_CASE("ConvValidPadding_Hal_1_0")
{
PaddingTestImpl<hal_1_0::HalPolicy>(android::nn::kPaddingValid);
}
-BOOST_AUTO_TEST_CASE(ConvSamePadding_Hal_1_0)
+DOCTEST_TEST_CASE("ConvSamePadding_Hal_1_0")
{
PaddingTestImpl<hal_1_0::HalPolicy>(android::nn::kPaddingSame);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/1.0/FullyConnectedReshape.cpp b/test/1.0/FullyConnectedReshape.cpp
index 72c90ca5..e481f2d2 100644
--- a/test/1.0/FullyConnectedReshape.cpp
+++ b/test/1.0/FullyConnectedReshape.cpp
@@ -1,42 +1,39 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../DriverTestHelpers.hpp"
-#include "../../1.0/FullyConnected.hpp"
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(FullyConnectedReshapeTests)
-
-BOOST_AUTO_TEST_CASE(TestFlattenFullyConnectedInput)
+DOCTEST_TEST_SUITE("FullyConnectedReshapeTests")
+{
+DOCTEST_TEST_CASE("TestFlattenFullyConnectedInput")
{
using armnn::TensorShape;
// Pass through 2d input
- BOOST_TEST(FlattenFullyConnectedInput(TensorShape({2,2048}), TensorShape({512, 2048})) ==
- TensorShape({2, 2048}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({2,2048}),
+ TensorShape({512, 2048})) == TensorShape({2, 2048}));
// Trivial flattening of batched channels
- BOOST_TEST(FlattenFullyConnectedInput(TensorShape({97,1,1,2048}), TensorShape({512, 2048})) ==
- TensorShape({97, 2048}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({97,1,1,2048}),
+ TensorShape({512, 2048})) == TensorShape({97, 2048}));
// Flatten single batch of rows
- BOOST_TEST(FlattenFullyConnectedInput(TensorShape({1,97,1,2048}), TensorShape({512, 2048})) ==
- TensorShape({97, 2048}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({1,97,1,2048}),
+ TensorShape({512, 2048})) == TensorShape({97, 2048}));
// Flatten single batch of columns
- BOOST_TEST(FlattenFullyConnectedInput(TensorShape({1,1,97,2048}), TensorShape({512, 2048})) ==
- TensorShape({97, 2048}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({1,1,97,2048}),
+ TensorShape({512, 2048})) == TensorShape({97, 2048}));
// Move batches into input dimension
- BOOST_TEST(FlattenFullyConnectedInput(TensorShape({50,1,1,10}), TensorShape({512, 20})) ==
- TensorShape({25, 20}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({50,1,1,10}),
+ TensorShape({512, 20})) == TensorShape({25, 20}));
// Flatten single batch of 3D data (e.g. convolution output)
- BOOST_TEST(FlattenFullyConnectedInput(TensorShape({1,16,16,10}), TensorShape({512, 2560})) ==
- TensorShape({1, 2560}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({1,16,16,10}),
+ TensorShape({512, 2560})) == TensorShape({1, 2560}));
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/1.0/Lstm.cpp b/test/1.0/Lstm.cpp
index 5f0a209d..6b3e7042 100644
--- a/test/1.0/Lstm.cpp
+++ b/test/1.0/Lstm.cpp
@@ -1,34 +1,60 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../Lstm.hpp"
-#include <boost/test/data/test_case.hpp>
-
-BOOST_AUTO_TEST_SUITE(LstmTests)
-
using namespace armnn_driver;
-BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
+DOCTEST_TEST_SUITE("LstmTests_1.0_CpuRef")
{
- LstmNoCifgNoPeepholeNoProjection<hal_1_0::HalPolicy>(sample);
-}
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
-{
- LstmCifgPeepholeNoProjection<hal_1_0::HalPolicy>(sample);
-}
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.0_armnn::Compute::CpuRef")
+ {
+ LstmNoCifgNoPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.0_CpuRef")
+ {
+ LstmCifgPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.0_CpuRef")
+ {
+ LstmNoCifgPeepholeProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.0_CpuRef")
+ {
+ LstmCifgPeepholeNoProjectionBatch2<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
+ }
-BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
-{
- LstmNoCifgPeepholeProjection<hal_1_0::HalPolicy>(sample);
}
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
+#if defined(ARMCOMPUTECL_ENABLED)
+DOCTEST_TEST_SUITE("LstmTests_1.0_GpuAcc")
{
- LstmCifgPeepholeNoProjectionBatch2<hal_1_0::HalPolicy>(sample);
-}
-BOOST_AUTO_TEST_SUITE_END()
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.0_GpuAcc")
+ {
+ LstmNoCifgNoPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.0_GpuAcc")
+ {
+ LstmCifgPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.0_GpuAcc")
+ {
+ LstmNoCifgPeepholeProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.0_GpuAcc")
+ {
+ LstmCifgPeepholeNoProjectionBatch2<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+}
+#endif
diff --git a/test/1.1/Convolution2D.cpp b/test/1.1/Convolution2D.cpp
index 32d5018c..4601f760 100644
--- a/test/1.1/Convolution2D.cpp
+++ b/test/1.1/Convolution2D.cpp
@@ -1,19 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "../DriverTestHelpers.hpp"
#include "../Convolution2D.hpp"
-#include "../../1.1/HalPolicy.hpp"
-#include <boost/test/unit_test.hpp>
#include <log/log.h>
#include <OperationsUtils.h>
-BOOST_AUTO_TEST_SUITE(Convolution2DTests)
-
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -29,24 +24,28 @@ void SetModelFp16Flag(V1_1::Model& model, bool fp16Enabled)
} // namespace driverTestHelpers
-BOOST_AUTO_TEST_CASE(ConvValidPadding_Hal_1_1)
+
+DOCTEST_TEST_SUITE("Convolution2DTests_1.1")
+{
+
+DOCTEST_TEST_CASE("ConvValidPadding_Hal_1_1")
{
PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingValid);
}
-BOOST_AUTO_TEST_CASE(ConvSamePadding_Hal_1_1)
+DOCTEST_TEST_CASE("ConvSamePadding_Hal_1_1")
{
PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingSame);
}
-BOOST_AUTO_TEST_CASE(ConvValidPaddingFp16Flag_Hal_1_1)
+DOCTEST_TEST_CASE("ConvValidPaddingFp16Flag_Hal_1_1")
{
PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingValid, true);
}
-BOOST_AUTO_TEST_CASE(ConvSamePaddingFp16Flag_Hal_1_1)
+DOCTEST_TEST_CASE("ConvSamePaddingFp16Flag_Hal_1_1")
{
PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingSame, true);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/1.1/Lstm.cpp b/test/1.1/Lstm.cpp
index 703597e5..cbdf6b14 100644
--- a/test/1.1/Lstm.cpp
+++ b/test/1.1/Lstm.cpp
@@ -1,34 +1,60 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../Lstm.hpp"
-#include <boost/test/data/test_case.hpp>
-
-BOOST_AUTO_TEST_SUITE(LstmTests)
-
using namespace armnn_driver;
-BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
+DOCTEST_TEST_SUITE("LstmTests_1.1_CpuRef")
{
- LstmNoCifgNoPeepholeNoProjection<hal_1_1::HalPolicy>(sample);
-}
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
-{
- LstmCifgPeepholeNoProjection<hal_1_1::HalPolicy>(sample);
-}
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.1_armnn::Compute::CpuRef")
+ {
+ LstmNoCifgNoPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.1_CpuRef")
+ {
+ LstmCifgPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.1_CpuRef")
+ {
+ LstmNoCifgPeepholeProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.1_CpuRef")
+ {
+ LstmCifgPeepholeNoProjectionBatch2<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
+ }
-BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
-{
- LstmNoCifgPeepholeProjection<hal_1_1::HalPolicy>(sample);
}
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
+#if defined(ARMCOMPUTECL_ENABLED)
+DOCTEST_TEST_SUITE("LstmTests_1.1_GpuAcc")
{
- LstmCifgPeepholeNoProjectionBatch2<hal_1_1::HalPolicy>(sample);
-}
-BOOST_AUTO_TEST_SUITE_END()
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.1_GpuAcc")
+ {
+ LstmNoCifgNoPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.1_GpuAcc")
+ {
+ LstmCifgPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.1_GpuAcc")
+ {
+ LstmNoCifgPeepholeProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.1_GpuAcc")
+ {
+ LstmCifgPeepholeNoProjectionBatch2<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+}
+#endif
diff --git a/test/1.1/Mean.cpp b/test/1.1/Mean.cpp
index c9a5a6d3..70bdc3d3 100644
--- a/test/1.1/Mean.cpp
+++ b/test/1.1/Mean.cpp
@@ -1,19 +1,15 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../DriverTestHelpers.hpp"
#include "../TestTensor.hpp"
-#include "../1.1/HalPolicy.hpp"
-
-#include <boost/test/data/test_case.hpp>
+#include <1.1/HalPolicy.hpp>
#include <array>
-BOOST_AUTO_TEST_SUITE(MeanTests)
-
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -24,12 +20,6 @@ using RequestArgument = V1_0::RequestArgument;
namespace
{
-#ifndef ARMCOMPUTECL_ENABLED
- static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
- static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
-#endif
-
void MeanTestImpl(const TestTensor& input,
const hidl_vec<uint32_t>& axisDimensions,
const int32_t* axisValues,
@@ -94,64 +84,177 @@ void MeanTestImpl(const TestTensor& input,
if (preparedModel.get() != nullptr)
{
V1_0::ErrorStatus execStatus = Execute(preparedModel, request);
- BOOST_TEST(execStatus == V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
}
const float* expectedOutputData = expectedOutput.GetData();
for (unsigned int i = 0; i < expectedOutput.GetNumElements(); i++)
{
- BOOST_TEST(outputData[i] == expectedOutputData[i]);
+ DOCTEST_CHECK(outputData[i] == expectedOutputData[i]);
}
}
} // anonymous namespace
-BOOST_DATA_TEST_CASE(MeanNoKeepDimsTest, COMPUTE_DEVICES)
+DOCTEST_TEST_SUITE("MeanTests_CpuRef")
{
- TestTensor input{ armnn::TensorShape{ 4, 3, 2 }, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
- 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
- 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
- hidl_vec<uint32_t> axisDimensions = { 2 };
- int32_t axisValues[] = { 0, 1 };
- int32_t keepDims = 0;
- TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
-
- MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, sample);
-}
-BOOST_DATA_TEST_CASE(MeanKeepDimsTest, COMPUTE_DEVICES)
-{
- TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
- hidl_vec<uint32_t> axisDimensions = { 1 };
- int32_t axisValues[] = { 2 };
- int32_t keepDims = 1;
- TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+ DOCTEST_TEST_CASE("MeanNoKeepDimsTest_CpuRef")
+ {
+ TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("MeanKeepDimsTest_CpuRef")
+ {
+ TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("MeanFp16EnabledNoKeepDimsTest_CpuRef")
+ {
+ TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("MeanFp16EnabledKeepDimsTest_CpuRef")
+ {
+ TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef);
+ }
- MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, sample);
}
-BOOST_DATA_TEST_CASE(MeanFp16NoKeepDimsTest, COMPUTE_DEVICES)
+#ifdef ARMCOMPUTECL_ENABLED
+DOCTEST_TEST_SUITE("MeanTests_CpuAcc")
{
- TestTensor input{ armnn::TensorShape{ 4, 3, 2 }, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
- 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
- 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
- hidl_vec<uint32_t> axisDimensions = { 2 };
- int32_t axisValues[] = { 0, 1 };
- int32_t keepDims = 0;
- TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
-
- MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, sample);
+ DOCTEST_TEST_CASE("MeanNoKeepDimsTest_CpuAcc")
+ {
+ TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("MeanKeepDimsTest_CpuAcc")
+ {
+ TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("MeanFp16EnabledNoKeepDimsTest_CpuAcc")
+ {
+ TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("MeanFp16EnabledKeepDimsTest_CpuAcc")
+ {
+ TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc);
+ }
}
-BOOST_DATA_TEST_CASE(MeanFp16KeepDimsTest, COMPUTE_DEVICES)
+DOCTEST_TEST_SUITE("MeanTests_GpuAcc")
{
- TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
- hidl_vec<uint32_t> axisDimensions = { 1 };
- int32_t axisValues[] = { 2 };
- int32_t keepDims = 1;
- TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+ DOCTEST_TEST_CASE("MeanNoKeepDimsTest_GpuAcc")
+ {
+ TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::GpuAcc);
+ }
- MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, sample);
-}
+ DOCTEST_TEST_CASE("MeanKeepDimsTest_GpuAcc")
+ {
+ TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("MeanFp16EnabledNoKeepDimsTest_GpuAcc")
+ {
+ TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+ 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc);
+ }
-BOOST_AUTO_TEST_SUITE_END()
+ DOCTEST_TEST_CASE("MeanFp16EnabledKeepDimsTest_GpuAcc")
+ {
+ TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0f, 2.0f } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc);
+ }
+}
+#endif
diff --git a/test/1.1/Transpose.cpp b/test/1.1/Transpose.cpp
index 206f9b98..5499e0d6 100644
--- a/test/1.1/Transpose.cpp
+++ b/test/1.1/Transpose.cpp
@@ -1,24 +1,18 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "OperationsUtils.h"
#include "../DriverTestHelpers.hpp"
#include "../TestTensor.hpp"
-
-#include "../1.1/HalPolicy.hpp"
-
-#include <boost/test/unit_test.hpp>
-#include <boost/test/data/test_case.hpp>
+#include <1.1/HalPolicy.hpp>
#include <log/log.h>
+#include <OperationsUtils.h>
#include <array>
#include <cmath>
-BOOST_AUTO_TEST_SUITE(TransposeTests)
-
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -29,12 +23,6 @@ using RequestArgument = V1_0::RequestArgument;
namespace
{
-#ifndef ARMCOMPUTECL_ENABLED
- static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
- static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
-#endif
-
void TransposeTestImpl(const TestTensor & inputs, int32_t perm[],
const TestTensor & expectedOutputTensor, armnn::Compute computeDevice)
{
@@ -98,38 +86,100 @@ void TransposeTestImpl(const TestTensor & inputs, int32_t perm[],
const float * expectedOutput = expectedOutputTensor.GetData();
for (unsigned int i = 0; i < expectedOutputTensor.GetNumElements(); ++i)
{
- BOOST_TEST(outdata[i] == expectedOutput[i]);
+ DOCTEST_CHECK(outdata[i] == expectedOutput[i]);
}
}
} // namespace
-BOOST_DATA_TEST_CASE(Transpose , COMPUTE_DEVICES)
+DOCTEST_TEST_SUITE("TransposeTests_CpuRef")
{
- int32_t perm[] = {2, 3, 1, 0};
- TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
- TestTensor expected{armnn::TensorShape{2, 2, 2, 1},{1, 5, 2, 6, 3, 7, 4, 8}};
+ DOCTEST_TEST_CASE("Transpose_CpuRef")
+ {
+ int32_t perm[] = {2, 3, 1, 0};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+ TestTensor expected{armnn::TensorShape{2, 2, 2, 1},{1, 5, 2, 6, 3, 7, 4, 8}};
+
+ TransposeTestImpl(input, perm, expected, armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("TransposeNHWCToArmNN_CpuRef")
+ {
+ int32_t perm[] = {0, 3, 1, 2};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
+ TestTensor expected{armnn::TensorShape{1, 3, 2, 2},{1, 11, 21, 31, 2, 12, 22, 32, 3, 13, 23, 33}};
- TransposeTestImpl(input, perm, expected, sample);
+ TransposeTestImpl(input, perm, expected, armnn::Compute::CpuRef);
+ }
+ DOCTEST_TEST_CASE("TransposeArmNNToNHWC_CpuRef")
+ {
+ int32_t perm[] = {0, 2, 3, 1};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+ TestTensor expected{armnn::TensorShape{1, 2, 2, 2},{1, 5, 2, 6, 3, 7, 4, 8}};
+
+ TransposeTestImpl(input, perm, expected, armnn::Compute::CpuRef);
+ }
}
-BOOST_DATA_TEST_CASE(TransposeNHWCToArmNN , COMPUTE_DEVICES)
+#ifdef ARMCOMPUTECL_ENABLED
+DOCTEST_TEST_SUITE("TransposeTests_CpuAcc")
{
- int32_t perm[] = {0, 3, 1, 2};
- TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
- TestTensor expected{armnn::TensorShape{1, 3, 2, 2},{1, 11, 21, 31, 2, 12, 22, 32, 3, 13, 23, 33}};
+ DOCTEST_TEST_CASE("Transpose_CpuAcc")
+ {
+ int32_t perm[] = {2, 3, 1, 0};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+ TestTensor expected{armnn::TensorShape{2, 2, 2, 1},{1, 5, 2, 6, 3, 7, 4, 8}};
+
+ TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("TransposeNHWCToArmNN_CpuAcc")
+ {
+ int32_t perm[] = {0, 3, 1, 2};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
+ TestTensor expected{armnn::TensorShape{1, 3, 2, 2},{1, 11, 21, 31, 2, 12, 22, 32, 3, 13, 23, 33}};
+
+ TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("TransposeArmNNToNHWC_CpuAcc")
+ {
+ int32_t perm[] = {0, 2, 3, 1};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+ TestTensor expected{armnn::TensorShape{1, 2, 2, 2},{1, 5, 2, 6, 3, 7, 4, 8}};
- TransposeTestImpl(input, perm, expected, sample);
+ TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
+ }
}
-BOOST_DATA_TEST_CASE(TransposeArmNNToNHWC , COMPUTE_DEVICES)
+DOCTEST_TEST_SUITE("TransposeTests_GpuAcc")
{
- int32_t perm[] = {0, 2, 3, 1};
- TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
- TestTensor expected{armnn::TensorShape{1, 2, 2, 2},{1, 5, 2, 6, 3, 7, 4, 8}};
+ DOCTEST_TEST_CASE("Transpose_GpuAcc")
+ {
+ int32_t perm[] = {2, 3, 1, 0};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+ TestTensor expected{armnn::TensorShape{2, 2, 2, 1},{1, 5, 2, 6, 3, 7, 4, 8}};
- TransposeTestImpl(input, perm, expected, sample);
-}
+ TransposeTestImpl(input, perm, expected, armnn::Compute::GpuAcc);
+ }
-BOOST_AUTO_TEST_SUITE_END()
+ DOCTEST_TEST_CASE("TransposeNHWCToArmNN_GpuAcc")
+ {
+ int32_t perm[] = {0, 3, 1, 2};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
+ TestTensor expected{armnn::TensorShape{1, 3, 2, 2},{1, 11, 21, 31, 2, 12, 22, 32, 3, 13, 23, 33}};
+
+ TransposeTestImpl(input, perm, expected, armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("TransposeArmNNToNHWC_GpuAcc")
+ {
+ int32_t perm[] = {0, 2, 3, 1};
+ TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+ TestTensor expected{armnn::TensorShape{1, 2, 2, 2},{1, 5, 2, 6, 3, 7, 4, 8}};
+
+ TransposeTestImpl(input, perm, expected, armnn::Compute::GpuAcc);
+ }
+}
+#endif
diff --git a/test/1.2/Capabilities.cpp b/test/1.2/Capabilities.cpp
index 15ecf968..41d5ee53 100644
--- a/test/1.2/Capabilities.cpp
+++ b/test/1.2/Capabilities.cpp
@@ -1,15 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "../../1.2/ArmnnDriverImpl.hpp"
-
+#include "../DriverTestHelpers.hpp"
#include "Utils.h"
-#include <armnn/utility/Assert.hpp>
-
-#include <boost/test/unit_test.hpp>
+#include <1.2/ArmnnDriverImpl.hpp>
#include <sys/system_properties.h>
@@ -62,15 +59,14 @@ void CheckOperandType(const V1_2::Capabilities& capabilities, V1_2::OperandType
{
using namespace armnn_driver::hal_1_2;
V1_0::PerformanceInfo perfInfo = android::nn::lookup(capabilities.operandPerformance, type);
- ARMNN_ASSERT(perfInfo.execTime == execTime);
- ARMNN_ASSERT(perfInfo.powerUsage == powerUsage);
+ DOCTEST_CHECK(perfInfo.execTime == execTime);
+ DOCTEST_CHECK(perfInfo.powerUsage == powerUsage);
}
-BOOST_FIXTURE_TEST_SUITE(CapabilitiesTests, CapabilitiesFixture)
-
-BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesWithRuntime)
+DOCTEST_TEST_SUITE("CapabilitiesTests")
+{
+DOCTEST_TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesWithRuntime")
{
- using namespace armnn_driver::hal_1_2;
using namespace android::nn;
auto getCapabilitiesFn = [&](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities)
@@ -94,7 +90,8 @@ BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesWithRuntime)
CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX);
CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
- ARMNN_ASSERT(error == V1_0::ErrorStatus::NONE);
+ bool result = (error == V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(result);
};
__system_property_set("Armnn.operandTypeTensorFloat32Performance.execTime", "2.0f");
@@ -121,12 +118,11 @@ BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesWithRuntime)
armnn::IRuntime::CreationOptions options;
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
+ armnn_driver::hal_1_2::ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
}
-BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesUndefined)
+DOCTEST_TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesUndefined")
{
- using namespace armnn_driver::hal_1_2;
using namespace android::nn;
float defaultValue = .1f;
@@ -155,13 +151,14 @@ BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesUndefined)
CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX);
CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
- ARMNN_ASSERT(error == V1_0::ErrorStatus::NONE);
+ bool result = (error == V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(result);
};
armnn::IRuntime::CreationOptions options;
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
+ armnn_driver::hal_1_2::ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/test/1.2/Dilation.cpp b/test/1.2/Dilation.cpp
index 1a7ba4b4..c9182a7c 100644
--- a/test/1.2/Dilation.cpp
+++ b/test/1.2/Dilation.cpp
@@ -1,17 +1,16 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../Dilation.hpp"
-#include "../../1.2/HalPolicy.hpp"
+#include <1.2/HalPolicy.hpp>
-#include <boost/test/data/test_case.hpp>
-
-BOOST_AUTO_TEST_SUITE(DilationTests)
+DOCTEST_TEST_SUITE("DilationTests")
+{
-BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingNoDilation)
+DOCTEST_TEST_CASE("ConvolutionExplicitPaddingNoDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = false;
@@ -21,7 +20,7 @@ BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingNoDilation)
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingDilation)
+DOCTEST_TEST_CASE("ConvolutionExplicitPaddingDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = false;
@@ -31,7 +30,7 @@ BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingDilation)
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingNoDilation)
+DOCTEST_TEST_CASE("ConvolutionImplicitPaddingNoDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = false;
@@ -41,7 +40,7 @@ BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingNoDilation)
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingDilation)
+DOCTEST_TEST_CASE("ConvolutionImplicitPaddingDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = false;
@@ -51,7 +50,7 @@ BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingDilation)
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingNoDilation)
+DOCTEST_TEST_CASE("DepthwiseConvolutionExplicitPaddingNoDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = true;
@@ -61,7 +60,7 @@ BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingNoDilation)
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingDilation)
+DOCTEST_TEST_CASE("DepthwiseConvolutionExplicitPaddingDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = true;
@@ -71,7 +70,7 @@ BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingDilation)
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingNoDilation)
+DOCTEST_TEST_CASE("DepthwiseConvolutionImplicitPaddingNoDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = true;
@@ -81,7 +80,7 @@ BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingNoDilation)
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingDilation)
+DOCTEST_TEST_CASE("DepthwiseConvolutionImplicitPaddingDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = true;
@@ -91,4 +90,4 @@ BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingDilation)
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/test/1.2/Lstm.cpp b/test/1.2/Lstm.cpp
index 03f7fe47..7a2b3942 100644
--- a/test/1.2/Lstm.cpp
+++ b/test/1.2/Lstm.cpp
@@ -1,51 +1,72 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../Lstm.hpp"
-#include <boost/test/data/test_case.hpp>
-
-BOOST_AUTO_TEST_SUITE(LstmTests)
-
using namespace armnn_driver;
-BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
+#if defined(ARMNNREF_ENABLED)
+DOCTEST_TEST_SUITE("LstmTests_1.2_CpuRef")
{
- LstmNoCifgNoPeepholeNoProjection<hal_1_2::HalPolicy>(sample);
-}
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
-{
- LstmCifgPeepholeNoProjection<hal_1_2::HalPolicy>(sample);
-}
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.2_armnn::Compute::CpuRef")
+ {
+ LstmNoCifgNoPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+ }
-BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
-{
- LstmNoCifgPeepholeProjection<hal_1_2::HalPolicy>(sample);
-}
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.2_CpuRef")
+ {
+ LstmCifgPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+ }
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
-{
- LstmCifgPeepholeNoProjectionBatch2<hal_1_2::HalPolicy>(sample);
-}
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.2_CpuRef")
+ {
+ LstmNoCifgPeepholeProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+ }
-BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionNoClippingLayerNormTest, COMPUTE_DEVICES)
-{
- LstmNoCifgPeepholeProjectionNoClippingLayerNorm<hal_1_2::HalPolicy>(sample);
-}
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.2_CpuRef")
+ {
+ LstmCifgPeepholeNoProjectionBatch2<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("QuantizedLstmTest_1.2_CpuRef")
+ {
+ QuantizedLstm<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+ }
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeProjectionNoClippingLayerNormTest, COMPUTE_DEVICES)
-{
- LstmCifgPeepholeProjectionNoClippingLayerNorm<hal_1_2::HalPolicy>(sample);
}
+#endif
#if defined(ARMCOMPUTECL_ENABLED)
-BOOST_DATA_TEST_CASE(QuantizedLstmTest, COMPUTE_DEVICES)
+DOCTEST_TEST_SUITE("LstmTests_1.2_GpuAcc")
{
- QuantizedLstm<hal_1_2::HalPolicy>(sample);
+
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.2_GpuAcc")
+ {
+ LstmNoCifgNoPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.2_GpuAcc")
+ {
+ LstmCifgPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.2_GpuAcc")
+ {
+ LstmNoCifgPeepholeProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.2_GpuAcc")
+ {
+ LstmCifgPeepholeNoProjectionBatch2<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("QuantizedLstmTest_1.2_GpuAcc")
+ {
+ QuantizedLstm<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+ }
+
}
#endif
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/1.2/Mean.cpp b/test/1.2/Mean.cpp
new file mode 100644
index 00000000..a2a8b7a1
--- /dev/null
+++ b/test/1.2/Mean.cpp
@@ -0,0 +1,204 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../DriverTestHelpers.hpp"
+#include "../TestHalfTensor.hpp"
+
+#include <1.2/HalPolicy.hpp>
+
+#include <array>
+
+using Half = half_float::half;
+
+using namespace android::hardware;
+using namespace driverTestHelpers;
+using namespace armnn_driver;
+
+using HalPolicy = hal_1_2::HalPolicy;
+using RequestArgument = V1_0::RequestArgument;
+
+namespace
+{
+
+void MeanTestImpl(const TestHalfTensor& input,
+ const hidl_vec<uint32_t>& axisDimensions,
+ const int32_t* axisValues,
+ int32_t keepDims,
+ const TestHalfTensor& expectedOutput,
+ bool fp16Enabled,
+ armnn::Compute computeDevice)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice, fp16Enabled));
+
+ HalPolicy::Model model = {};
+
+ AddInputOperand<HalPolicy>(model, input.GetDimensions(), V1_2::OperandType::TENSOR_FLOAT16);
+
+ AddTensorOperand<HalPolicy>(model,
+ axisDimensions,
+ const_cast<int32_t*>(axisValues),
+ HalPolicy::OperandType::TENSOR_INT32);
+
+ AddIntOperand<HalPolicy>(model, keepDims);
+
+ AddOutputOperand<HalPolicy>(model, expectedOutput.GetDimensions(), V1_2::OperandType::TENSOR_FLOAT16);
+
+ model.operations.resize(1);
+ model.operations[0].type = HalPolicy::OperationType::MEAN;
+ model.operations[0].inputs = hidl_vec<uint32_t>{ 0, 1, 2 };
+ model.operations[0].outputs = hidl_vec<uint32_t>{ 3 };
+ model.relaxComputationFloat32toFloat16 = fp16Enabled;
+
+ //android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
+ android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, *driver);
+
+ // The request's memory pools will follow the same order as the inputs
+ V1_0::DataLocation inLoc = {};
+ inLoc.poolIndex = 0;
+ inLoc.offset = 0;
+ inLoc.length = input.GetNumElements() * sizeof(Half);
+ RequestArgument inArg = {};
+ inArg.location = inLoc;
+ inArg.dimensions = input.GetDimensions();
+
+ // An additional memory pool is needed for the output
+ V1_0::DataLocation outLoc = {};
+ outLoc.poolIndex = 1;
+ outLoc.offset = 0;
+ outLoc.length = expectedOutput.GetNumElements() * sizeof(Half);
+ RequestArgument outArg = {};
+ outArg.location = outLoc;
+ outArg.dimensions = expectedOutput.GetDimensions();
+
+ // Make the request based on the arguments
+ V1_0::Request request = {};
+ request.inputs = hidl_vec<RequestArgument>{ inArg };
+ request.outputs = hidl_vec<RequestArgument>{ outArg };
+
+ // Set the input data
+ AddPoolAndSetData(input.GetNumElements(), request, input.GetData());
+
+ // Add memory for the output
+ android::sp<IMemory> outMemory = AddPoolAndGetData<Half>(expectedOutput.GetNumElements(), request);
+ const Half* outputData = static_cast<const Half*>(static_cast<void*>(outMemory->getPointer()));
+
+ if (preparedModel.get() != nullptr)
+ {
+ V1_0::ErrorStatus execStatus = Execute(preparedModel, request);
+ DOCTEST_CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
+ }
+
+ const Half* expectedOutputData = expectedOutput.GetData();
+ for (unsigned int i = 0; i < expectedOutput.GetNumElements(); i++)
+ {
+ DOCTEST_CHECK(outputData[i] == expectedOutputData[i]);
+ }
+}
+
+} // anonymous namespace
+
+DOCTEST_TEST_SUITE("MeanTests_1.2_CpuRef")
+{
+
+DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_CpuRef")
+{
+ using namespace half_float::literal;
+
+ TestHalfTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ { 1.0_h, 2.0_h, 3.0_h, 4.0_h, 5.0_h, 6.0_h, 7.0_h, 8.0_h, 9.0_h, 10.0_h,
+ 11.0_h, 12.0_h, 13.0_h, 14.0_h, 15.0_h, 16.0_h, 17.0_h, 18.0_h, 19.0_h,
+ 20.0_h, 21.0_h, 22.0_h, 23.0_h, 24.0_h } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestHalfTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0_h, 13.0_h } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_CpuRef")
+{
+ using namespace half_float::literal;
+
+ TestHalfTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0_h, 1.0_h, 2.0_h, 2.0_h, 3.0_h, 3.0_h } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestHalfTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0_h, 2.0_h } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef);
+}
+
+}
+
+#ifdef ARMCOMPUTECL_ENABLED
+DOCTEST_TEST_SUITE("MeanTests_1.2_CpuAcc")
+{
+ DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_CpuAcc")
+ {
+ using namespace half_float::literal;
+
+ std::vector<Half> in = { 1.0_h, 2.0_h, 3.0_h, 4.0_h, 5.0_h, 6.0_h, 7.0_h, 8.0_h, 9.0_h, 10.0_h,
+ 11.0_h, 12.0_h, 13.0_h, 14.0_h, 15.0_h, 16.0_h, 17.0_h, 18.0_h, 19.0_h,
+ 20.0_h, 21.0_h, 22.0_h, 23.0_h, 24.0_h };
+ TestHalfTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ in};
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ std::vector<Half> out = { 12.0_h, 13.0_h };
+ TestHalfTensor expectedOutput{ armnn::TensorShape{ 2 }, out };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_CpuAcc")
+ {
+ using namespace half_float::literal;
+
+ std::vector<Half> in = { 1.0_h, 1.0_h, 2.0_h, 2.0_h, 3.0_h, 3.0_h };
+ TestHalfTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, in };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ std::vector<Half> out = { 2.0_h, 2.0_h };
+ TestHalfTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, out };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc);
+ }
+}
+
+DOCTEST_TEST_SUITE("MeanTests_1.2_GpuAcc")
+{
+ DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_GpuAcc")
+ {
+ using namespace half_float::literal;
+
+ TestHalfTensor input{ armnn::TensorShape{ 4, 3, 2 },
+ { 1.0_h, 2.0_h, 3.0_h, 4.0_h, 5.0_h, 6.0_h, 7.0_h, 8.0_h, 9.0_h, 10.0_h,
+ 11.0_h, 12.0_h, 13.0_h, 14.0_h, 15.0_h, 16.0_h, 17.0_h, 18.0_h, 19.0_h,
+ 20.0_h, 21.0_h, 22.0_h, 23.0_h, 24.0_h } };
+ hidl_vec<uint32_t> axisDimensions = { 2 };
+ int32_t axisValues[] = { 0, 1 };
+ int32_t keepDims = 0;
+ TestHalfTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0_h, 13.0_h } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_GpuAcc")
+ {
+ using namespace half_float::literal;
+
+ TestHalfTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0_h, 1.0_h, 2.0_h, 2.0_h, 3.0_h, 3.0_h } };
+ hidl_vec<uint32_t> axisDimensions = { 1 };
+ int32_t axisValues[] = { 2 };
+ int32_t keepDims = 1;
+ TestHalfTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, { 2.0_h, 2.0_h } };
+
+ MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc);
+ }
+}
+#endif
diff --git a/test/1.2/UnidirectionalSequenceLstm.cpp b/test/1.2/UnidirectionalSequenceLstm.cpp
new file mode 100644
index 00000000..fd35aa41
--- /dev/null
+++ b/test/1.2/UnidirectionalSequenceLstm.cpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../UnidirectionalSequenceLstm.hpp"
+
+using namespace armnn_driver;
+
+DOCTEST_TEST_SUITE("UnidirectionalSequenceLstmTests_1.2_CpuRef")
+{
+
+ DOCTEST_TEST_CASE("UnidirectionalSequenceLstmLayerFloat32Test_1.2_CpuRef")
+ {
+ UnidirectionalSequenceLstmLayerFloat32TestImpl<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("UnidirectionalSequenceLstmLayerFloat32TimeMajorTest_1.2_CpuRef")
+ {
+ UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionTest_1.2_CpuRef")
+ {
+ UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionTestImpl<hal_1_2::HalPolicy>
+ (armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTest_1.2_CpuRef")
+ {
+ UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<hal_1_2::HalPolicy>
+ (armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest_1.2_CpuRef")
+ {
+ UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTestImpl<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+ }
+
+} \ No newline at end of file
diff --git a/test/1.3/QLstm.cpp b/test/1.3/QLstm.cpp
index 27e52a60..08466195 100644
--- a/test/1.3/QLstm.cpp
+++ b/test/1.3/QLstm.cpp
@@ -1,23 +1,14 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../DriverTestHelpers.hpp"
-#include "../TestTensor.hpp"
-#include "../1.3/HalPolicy.hpp"
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <boost/test/unit_test.hpp>
-#include <boost/test/data/test_case.hpp>
-#include <boost/math/special_functions/relative_difference.hpp>
+#include <1.3/HalPolicy.hpp>
#include <array>
-BOOST_AUTO_TEST_SUITE(QLSTMTests)
-
using ArmnnDriver = armnn_driver::ArmnnDriver;
using DriverOptions = armnn_driver::DriverOptions;
@@ -26,6 +17,8 @@ using namespace android::hardware;
using HalPolicy = hal_1_3::HalPolicy;
+static const float TOLERANCE = 1.0f;
+
namespace
{
@@ -42,26 +35,6 @@ RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int
return inputRequestArgument;
}
-// Returns true if the relative difference between two float values is less than the tolerance value given.
-// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
-bool TolerantCompareEqual(float a, float b, float tolerance = 1.0f)
-{
- float rd;
- if (a == 0.0f)
- {
- rd = fabs(b);
- }
- else if (b == 0.0f)
- {
- rd = fabs(a);
- }
- else
- {
- rd = boost::math::relative_difference(a, b);
- }
- return rd < tolerance;
-}
-
// Helper function to create an OperandLifeTime::NO_VALUE for testing.
// To be used on optional input operands that have no values - these are valid and should be tested.
HalPolicy::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
@@ -85,12 +58,6 @@ void ExecuteModel(const armnn_driver::hal_1_3::HalPolicy::Model& model,
}
}
-#ifndef ARMCOMPUTECL_ENABLED
-static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::CpuAcc }};
-#endif
-
// Add our own tests here since we skip the qlstm tests which Google supplies (because of non-const weights)
void QLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
const std::vector<int8_t>& inputValue,
@@ -527,8 +494,9 @@ void QLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
// check the results
for (size_t i = 0; i < outputStateOutValue.size(); ++i)
{
- BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
- "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
+ DOCTEST_CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ).epsilon(TOLERANCE),
+ "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != "
+ << outputStateOutData[i]);
}
// CELL STATE OUTPUT Does not match currently: IVGCVSW-4860 Verify remaining VTS tests (2) for QLSTM
@@ -541,8 +509,8 @@ void QLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
for (size_t i = 0; i < outputValue.size(); ++i)
{
- BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
- "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ).epsilon(TOLERANCE),
+ "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
}
}
@@ -1028,19 +996,38 @@ void DynamicOutputQLstmWithNoProjection(armnn::Compute compute)
} // anonymous namespace
// Support is not added yet
-//BOOST_DATA_TEST_CASE(QLSTMWithProjectionTest, COMPUTE_DEVICES)
+//TEST_CASE(QLSTMWithProjectionTest, COMPUTE_DEVICES)
//{
// QLstmWithProjection(sample);
//}
-BOOST_DATA_TEST_CASE(QLSTMWithNoProjectionTest, COMPUTE_DEVICES)
+DOCTEST_TEST_SUITE("QLSTMTests_CpuRef")
{
- QLstmWithNoProjection(sample);
-}
-BOOST_DATA_TEST_CASE(DynamicOutputQLSTMWithNoProjectionTest, COMPUTE_DEVICES)
-{
- DynamicOutputQLstmWithNoProjection(sample);
+ DOCTEST_TEST_CASE("QLSTMWithNoProjectionTest_CpuRef")
+ {
+ QLstmWithNoProjection(armnn::Compute::CpuRef);
+ }
+
+ DOCTEST_TEST_CASE("DynamicOutputQLstmWithNoProjection_CpuRef")
+ {
+ DynamicOutputQLstmWithNoProjection(armnn::Compute::CpuRef);
+ }
+
}
+#ifdef ARMCOMPUTECL_ENABLED
+DOCTEST_TEST_SUITE("QLSTMTests_CpuAcc")
+{
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+ DOCTEST_TEST_CASE("QLSTMWithNoProjectionTest_CpuAcc")
+ {
+ QLstmWithNoProjection(armnn::Compute::CpuAcc);
+ }
+
+ DOCTEST_TEST_CASE("DynamicOutputQLstmWithNoProjection_CpuAcc")
+ {
+ DynamicOutputQLstmWithNoProjection(armnn::Compute::CpuAcc);
+ }
+
+}
+#endif
diff --git a/test/1.3/QosTests.cpp b/test/1.3/QosTests.cpp
index 9fd66880..cd8ac33c 100644
--- a/test/1.3/QosTests.cpp
+++ b/test/1.3/QosTests.cpp
@@ -4,18 +4,11 @@
//
#include "../DriverTestHelpers.hpp"
-#include "../TestTensor.hpp"
-#include "../1.3/HalPolicy.hpp"
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <boost/test/unit_test.hpp>
-#include <boost/test/data/test_case.hpp>
-
-
-BOOST_AUTO_TEST_SUITE(QosTests)
+#include <1.3/HalPolicy.hpp>
+DOCTEST_TEST_SUITE("QosTests")
+{
using ArmnnDriver = armnn_driver::ArmnnDriver;
using DriverOptions = armnn_driver::DriverOptions;
@@ -40,13 +33,7 @@ void ExecuteModel(const armnn_driver::hal_1_3::HalPolicy::Model& model,
}
}
-#ifndef ARMCOMPUTECL_ENABLED
-static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::CpuAcc }};
-#endif
-
-BOOST_AUTO_TEST_CASE(ConcurrentExecuteWithQosPriority)
+DOCTEST_TEST_CASE("ConcurrentExecuteWithQosPriority")
{
ALOGI("ConcurrentExecuteWithQOSPriority: entry");
@@ -102,7 +89,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecuteWithQosPriority)
preparedModelsSize++;
}
- BOOST_TEST(maxRequests == preparedModelsSize);
+ DOCTEST_CHECK(maxRequests == preparedModelsSize);
// construct the request data
V1_0::DataLocation inloc = {};
@@ -162,7 +149,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecuteWithQosPriority)
ALOGI("ConcurrentExecuteWithQOSPriority: waiting for callbacks");
for (size_t i = 0; i < maxRequests; ++i)
{
- ARMNN_ASSERT(cb[i]);
+ DOCTEST_CHECK(cb[i]);
cb[i]->wait();
}
@@ -172,15 +159,15 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecuteWithQosPriority)
{
if (i < 15)
{
- BOOST_TEST(outdata[i][0] == 152);
+ DOCTEST_CHECK(outdata[i][0] == 152);
}
else if (i < 30)
{
- BOOST_TEST(outdata[i][0] == 141);
+ DOCTEST_CHECK(outdata[i][0] == 141);
}
else
{
- BOOST_TEST(outdata[i][0] == 159);
+ DOCTEST_CHECK(outdata[i][0] == 159);
}
}
@@ -189,4 +176,4 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecuteWithQosPriority)
} // anonymous namespace
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/test/Android.mk b/test/Android.mk
index 1da26e40..8621182c 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -1,5 +1,5 @@
#
-# Copyright © 2017 ARM Ltd. All rights reserved.
+# Copyright © 2017, 2022 ARM Ltd. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -9,6 +9,7 @@ LOCAL_PATH := $(call my-dir)
#
OPENCL_HEADER_PATH := $(LOCAL_PATH)/../../mali/product/khronos/original
ARMNN_HEADER_PATH := $(LOCAL_PATH)/../armnn/include
+ARMNN_PROFILING_PATH := $(LOCAL_PATH)/../armnn/profiling
ARMNN_THIRD_PARTY_PATH := $(LOCAL_PATH)/../armnn/third-party
ARMNN_UTILS_HEADER_PATH := $(LOCAL_PATH)/../armnn/src/armnnUtils
ARMNN_DRIVER_HEADER_PATH := $(LOCAL_PATH)/..
@@ -39,6 +40,7 @@ LOCAL_C_INCLUDES := \
$(OPENCL_HEADER_PATH) \
$(NN_HEADER_PATH) \
$(ARMNN_HEADER_PATH) \
+ $(ARMNN_PROFILING_PATH) \
$(ARMNN_THIRD_PARTY_PATH) \
$(ARMNN_UTILS_HEADER_PATH) \
$(ARMNN_DRIVER_HEADER_PATH)
@@ -50,6 +52,22 @@ LOCAL_CFLAGS := \
-O0 \
-UNDEBUG
+# The variable to enable/disable the CL backend (ARMNN_COMPUTE_CL_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMCOMPUTECL_ENABLED
+endif # ARMNN_COMPUTE_CL_ENABLED == 1
+# The variable to enable/disable the NEON backend (ARMNN_COMPUTE_NEON_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_COMPUTE_NEON_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMCOMPUTENEON_ENABLED
+endif # ARMNN_COMPUTE_NEON_ENABLED == 1
+# The variable to enable/disable the REFERENCE backend (ARMNN_REF_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_REF_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMNNREF_ENABLED
+endif # ARMNN_REF_ENABLED == 1
+
# Required to build with the changes made to the Android ML framework specific to Android R
ifeq ($(ANDROID_R),1)
LOCAL_CFLAGS+= \
@@ -61,11 +79,6 @@ LOCAL_CFLAGS+= \
-DARMNN_ANDROID_S
endif # S or later
-ifeq ($(Q_OR_LATER),1)
-LOCAL_CFLAGS += \
- -DBOOST_NO_AUTO_PTR
-endif # PLATFORM_VERSION == Q or later
-
LOCAL_SRC_FILES := \
1.0/Convolution2D.cpp \
1.0/FullyConnectedReshape.cpp \
@@ -78,13 +91,14 @@ LOCAL_SRC_FILES := \
DriverTestHelpers.cpp \
SystemProperties.cpp \
Concat.cpp \
- TestTensor.cpp
+ TestTensor.cpp \
+ TestHalfTensor.cpp
LOCAL_STATIC_LIBRARIES := \
libneuralnetworks_common \
- libboost_unit_test_framework \
libflatbuffers-framework \
- arm_compute_library
+ arm_compute_library \
+ $(ARMNN_BACKEND_STATIC_LIBRARIES)
LOCAL_WHOLE_STATIC_LIBRARIES := \
libarmnn-driver@1.0
@@ -155,6 +169,7 @@ LOCAL_C_INCLUDES := \
$(OPENCL_HEADER_PATH) \
$(NN_HEADER_PATH) \
$(ARMNN_HEADER_PATH) \
+ $(ARMNN_PROFILING_PATH) \
$(ARMNN_THIRD_PARTY_PATH) \
$(ARMNN_UTILS_HEADER_PATH) \
$(ARMNN_DRIVER_HEADER_PATH)
@@ -167,6 +182,22 @@ LOCAL_CFLAGS := \
-UNDEBUG \
-DARMNN_ANDROID_NN_V1_1
+# The variable to enable/disable the CL backend (ARMNN_COMPUTE_CL_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMCOMPUTECL_ENABLED
+endif # ARMNN_COMPUTE_CL_ENABLED == 1
+# The variable to enable/disable the NEON backend (ARMNN_COMPUTE_NEON_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_COMPUTE_NEON_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMCOMPUTENEON_ENABLED
+endif # ARMNN_COMPUTE_NEON_ENABLED == 1
+# The variable to enable/disable the REFERENCE backend (ARMNN_REF_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_REF_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMNNREF_ENABLED
+endif # ARMNN_REF_ENABLED == 1
+
# Required to build with the changes made to the Android ML framework specific to Android R
ifeq ($(ANDROID_R),1)
LOCAL_CFLAGS+= \
@@ -178,11 +209,6 @@ LOCAL_CFLAGS+= \
-DARMNN_ANDROID_S
endif # S or later
-ifeq ($(Q_OR_LATER),1)
-LOCAL_CFLAGS += \
- -DBOOST_NO_AUTO_PTR
-endif # PLATFORM_VERSION == Q or later
-
LOCAL_SRC_FILES := \
1.0/Convolution2D.cpp \
1.1/Convolution2D.cpp \
@@ -198,13 +224,14 @@ LOCAL_SRC_FILES := \
DriverTestHelpers.cpp \
SystemProperties.cpp \
Concat.cpp \
- TestTensor.cpp
+ TestTensor.cpp \
+ TestHalfTensor.cpp
LOCAL_STATIC_LIBRARIES := \
libneuralnetworks_common \
- libboost_unit_test_framework \
libflatbuffers-framework \
- arm_compute_library
+ arm_compute_library \
+ $(ARMNN_BACKEND_STATIC_LIBRARIES)
LOCAL_WHOLE_STATIC_LIBRARIES := \
libarmnn-driver@1.1
@@ -267,6 +294,7 @@ LOCAL_C_INCLUDES := \
$(OPENCL_HEADER_PATH) \
$(NN_HEADER_PATH) \
$(ARMNN_HEADER_PATH) \
+ $(ARMNN_PROFILING_PATH) \
$(ARMNN_THIRD_PARTY_PATH) \
$(ARMNN_UTILS_HEADER_PATH) \
$(ARMNN_DRIVER_HEADER_PATH)
@@ -277,9 +305,24 @@ LOCAL_CFLAGS := \
-Werror \
-O0 \
-UNDEBUG \
- -DBOOST_NO_AUTO_PTR \
-DARMNN_ANDROID_NN_V1_2
+# The variable to enable/disable the CL backend (ARMNN_COMPUTE_CL_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMCOMPUTECL_ENABLED
+endif # ARMNN_COMPUTE_CL_ENABLED == 1
+# The variable to enable/disable the NEON backend (ARMNN_COMPUTE_NEON_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_COMPUTE_NEON_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMCOMPUTENEON_ENABLED
+endif # ARMNN_COMPUTE_NEON_ENABLED == 1
+# The variable to enable/disable the REFERENCE backend (ARMNN_REF_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_REF_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMNNREF_ENABLED
+endif # ARMNN_REF_ENABLED == 1
+
# Required to build with the changes made to the Android ML framework specific to Android R
ifeq ($(ANDROID_R),1)
LOCAL_CFLAGS+= \
@@ -298,9 +341,11 @@ LOCAL_SRC_FILES := \
1.1/Transpose.cpp \
1.2/Dilation.cpp \
1.2/Capabilities.cpp \
+ 1.2/Mean.cpp \
1.0/Lstm.cpp \
1.1/Lstm.cpp \
1.2/Lstm.cpp \
+ 1.2/UnidirectionalSequenceLstm.cpp \
Tests.cpp \
UtilsTests.cpp \
Concurrent.cpp \
@@ -309,13 +354,14 @@ LOCAL_SRC_FILES := \
DriverTestHelpers.cpp \
SystemProperties.cpp \
Concat.cpp \
- TestTensor.cpp
+ TestTensor.cpp \
+ TestHalfTensor.cpp
LOCAL_STATIC_LIBRARIES := \
libneuralnetworks_common \
- libboost_unit_test_framework \
libflatbuffers-framework \
- arm_compute_library
+ arm_compute_library \
+ $(ARMNN_BACKEND_STATIC_LIBRARIES)
LOCAL_WHOLE_STATIC_LIBRARIES := \
libarmnn-driver@1.2
@@ -374,6 +420,7 @@ LOCAL_C_INCLUDES := \
$(OPENCL_HEADER_PATH) \
$(NN_HEADER_PATH) \
$(ARMNN_HEADER_PATH) \
+ $(ARMNN_PROFILING_PATH) \
$(ARMNN_THIRD_PARTY_PATH) \
$(ARMNN_UTILS_HEADER_PATH) \
$(ARMNN_DRIVER_HEADER_PATH)
@@ -384,9 +431,24 @@ LOCAL_CFLAGS := \
-Werror \
-O0 \
-UNDEBUG \
- -DBOOST_NO_AUTO_PTR \
-DARMNN_ANDROID_NN_V1_3
+# The variable to enable/disable the CL backend (ARMNN_COMPUTE_CL_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMCOMPUTECL_ENABLED
+endif # ARMNN_COMPUTE_CL_ENABLED == 1
+# The variable to enable/disable the NEON backend (ARMNN_COMPUTE_NEON_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_COMPUTE_NEON_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMCOMPUTENEON_ENABLED
+endif # ARMNN_COMPUTE_NEON_ENABLED == 1
+# The variable to enable/disable the REFERENCE backend (ARMNN_REF_ENABLED) is declared in android-nn-driver/Android.mk
+ifeq ($(ARMNN_REF_ENABLED),1)
+LOCAL_CFLAGS += \
+ -DARMNNREF_ENABLED
+endif # ARMNN_REF_ENABLED == 1
+
ifeq ($(ANDROID_R),1)
LOCAL_CFLAGS+= \
-DARMNN_ANDROID_R
@@ -404,9 +466,11 @@ LOCAL_SRC_FILES := \
1.1/Transpose.cpp \
1.2/Dilation.cpp \
1.2/Capabilities.cpp \
+ 1.2/Mean.cpp \
1.0/Lstm.cpp \
1.1/Lstm.cpp \
1.2/Lstm.cpp \
+ 1.2/UnidirectionalSequenceLstm.cpp \
1.3/QLstm.cpp \
1.3/QosTests.cpp \
Tests.cpp \
@@ -417,13 +481,14 @@ LOCAL_SRC_FILES := \
DriverTestHelpers.cpp \
SystemProperties.cpp \
Concat.cpp \
- TestTensor.cpp
+ TestTensor.cpp \
+ TestHalfTensor.cpp
LOCAL_STATIC_LIBRARIES := \
libneuralnetworks_common \
- libboost_unit_test_framework \
libflatbuffers-framework \
- arm_compute_library
+ arm_compute_library \
+ $(ARMNN_BACKEND_STATIC_LIBRARIES)
LOCAL_WHOLE_STATIC_LIBRARIES := \
libarmnn-driver@1.3
diff --git a/test/Concat.cpp b/test/Concat.cpp
index 54ee8a23..fc4a56cf 100644
--- a/test/Concat.cpp
+++ b/test/Concat.cpp
@@ -1,21 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "DriverTestHelpers.hpp"
#include "TestTensor.hpp"
-#include "../1.0/HalPolicy.hpp"
-
-#include <boost/test/unit_test.hpp>
-#include <boost/test/data/test_case.hpp>
-
#include <array>
#include <log/log.h>
-
-BOOST_AUTO_TEST_SUITE(ConcatTests)
-
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -26,12 +19,6 @@ using RequestArgument = V1_0::RequestArgument;
namespace
{
-#ifndef ARMCOMPUTECL_ENABLED
- static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
- static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
-#endif
-
void
ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
int32_t concatAxis,
@@ -61,19 +48,19 @@ ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
// make the prepared model
- V1_0::ErrorStatus prepareStatus=V1_0::ErrorStatus::NONE;
+ V1_0::ErrorStatus prepareStatus = V1_0::ErrorStatus::NONE;
android::sp<V1_0::IPreparedModel> preparedModel = PrepareModelWithStatus(model,
*driver,
prepareStatus,
expectedPrepareStatus);
- BOOST_TEST(prepareStatus == expectedPrepareStatus);
+ DOCTEST_CHECK((int)prepareStatus == (int)expectedPrepareStatus);
if (prepareStatus != V1_0::ErrorStatus::NONE)
{
// prepare failed, we cannot continue
return;
}
- BOOST_TEST(preparedModel.get() != nullptr);
+ DOCTEST_CHECK(preparedModel.get() != nullptr);
if (preparedModel.get() == nullptr)
{
// don't spoil other tests if prepare failed
@@ -130,9 +117,9 @@ ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
// run the execution
- ARMNN_ASSERT(preparedModel.get() != nullptr);
+ DOCTEST_CHECK(preparedModel.get() != nullptr);
auto execStatus = Execute(preparedModel, request, expectedExecStatus);
- BOOST_TEST(execStatus == expectedExecStatus);
+ DOCTEST_CHECK((int)execStatus == (int)expectedExecStatus);
if (execStatus == V1_0::ErrorStatus::NONE)
{
@@ -140,359 +127,607 @@ ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
const float * expectedOutput = expectedOutputTensor.GetData();
for (unsigned int i=0; i<expectedOutputTensor.GetNumElements();++i)
{
- BOOST_TEST(outdata[i] == expectedOutput[i]);
+ DOCTEST_CHECK(outdata[i] == expectedOutput[i]);
}
}
}
-} // namespace <anonymous>
-
-
-BOOST_DATA_TEST_CASE(SimpleConcatAxis0, COMPUTE_DEVICES)
+/// Test cases...
+void SimpleConcatAxis0(armnn::Compute computeDevice)
{
int32_t axis = 0;
- TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{3,1,1,1},{0,1,2}};
-
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ TestTensor expected{armnn::TensorShape{3, 1, 1, 1}, {0, 1, 2}};
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(ConcatAxis0_NoInterleave, COMPUTE_DEVICES)
+void ConcatAxis0NoInterleave(armnn::Compute computeDevice)
{
int32_t axis = 0;
- TestTensor aIn{armnn::TensorShape{2,1,2,1},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{3,1,2,1},{4, 5,
- 6, 7,
- 8, 9}};
- TestTensor cIn{armnn::TensorShape{1,1,2,1},{10, 11}};
-
- TestTensor expected{armnn::TensorShape{6,1,2,1},{0, 1,
- 2, 3,
- 4, 5,
- 6, 7,
- 8, 9,
- 10, 11}};
-
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ TestTensor aIn{armnn::TensorShape{2, 1, 2, 1}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{3, 1, 2, 1}, {4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10, 11}};
+
+ TestTensor expected{armnn::TensorShape{6, 1, 2, 1}, {0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxis1, COMPUTE_DEVICES)
+void SimpleConcatAxis1(armnn::Compute computeDevice)
{
int32_t axis = 1;
- TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{1,3,1,1},{0,1,2}};
+ TestTensor expected{armnn::TensorShape{1, 3, 1, 1}, {0, 1, 2}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(ConcatAxis1_NoInterleave, COMPUTE_DEVICES)
+void ConcatAxis1NoInterleave(armnn::Compute computeDevice)
{
int32_t axis = 1;
- TestTensor aIn{armnn::TensorShape{1,2,2,1},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{1,3,2,1},{4, 5,
- 6, 7,
- 8, 9}};
- TestTensor cIn{armnn::TensorShape{1,1,2,1},{10, 11}};
-
- TestTensor expected{armnn::TensorShape{1,6,2,1},{0, 1,
- 2, 3,
- 4, 5,
- 6, 7,
- 8, 9,
- 10, 11}};
-
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ TestTensor aIn{armnn::TensorShape{1, 2, 2, 1}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1, 3, 2, 1}, {4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10, 11}};
+
+ TestTensor expected{armnn::TensorShape{1, 6, 2, 1}, {0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxis1_DoInterleave, COMPUTE_DEVICES)
+void SimpleConcatAxis1DoInterleave(armnn::Compute computeDevice)
{
int32_t axis = 1;
- TestTensor aIn{armnn::TensorShape{2,2,1,1},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{2,3,1,1},{4, 5, 6,
- 7, 8, 9}};
- TestTensor cIn{armnn::TensorShape{2,1,1,1},{10,
- 11}};
-
- TestTensor expected{armnn::TensorShape{2,6,1,1},{0, 1, 4, 5, 6, 10,
- 2, 3, 7, 8, 9, 11}};
+ TestTensor aIn{armnn::TensorShape{2, 2, 1, 1}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{2, 3, 1, 1}, {4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{2, 1, 1, 1}, {10,
+ 11}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ TestTensor expected{armnn::TensorShape{2, 6, 1, 1}, {0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxis2, COMPUTE_DEVICES)
+void SimpleConcatAxis2(armnn::Compute computeDevice)
{
int32_t axis = 2;
- TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{1,1,3,1},{0,1,2}};
+ TestTensor expected{armnn::TensorShape{1, 1, 3, 1}, {0, 1, 2}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(ConcatAxis2_NoInterleave, COMPUTE_DEVICES)
+void ConcatAxis2NoInterleave(armnn::Compute computeDevice)
{
int32_t axis = 2;
- TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{1,1,3,2},{4, 5,
- 6, 7,
- 8, 9}};
- TestTensor cIn{armnn::TensorShape{1,1,1,2},{10, 11}};
-
- TestTensor expected{armnn::TensorShape{1,1,6,2},{0, 1,
- 2, 3,
- 4, 5,
- 6, 7,
- 8, 9,
- 10, 11}};
-
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 3, 2}, {4, 5,
+ 6, 7,
+ 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 1, 2}, {10, 11}};
+
+ TestTensor expected{armnn::TensorShape{1, 1, 6, 2}, {0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxis2_DoInterleave, COMPUTE_DEVICES)
+void SimpleConcatAxis2DoInterleave(armnn::Compute computeDevice)
{
int32_t axis = 2;
- TestTensor aIn{armnn::TensorShape{1,2,2,1},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{1,2,3,1},{4, 5, 6,
- 7, 8, 9}};
- TestTensor cIn{armnn::TensorShape{1,2,1,1},{10,
- 11}};
-
- TestTensor expected{armnn::TensorShape{1,2,6,1},{0, 1, 4, 5, 6, 10,
- 2, 3, 7, 8, 9, 11}};
+ TestTensor aIn{armnn::TensorShape{1, 2, 2, 1}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1, 2, 3, 1}, {4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1, 2, 1, 1}, {10,
+ 11}};
+
+ TestTensor expected{armnn::TensorShape{1, 2, 6, 1}, {0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxis3, COMPUTE_DEVICES)
+void SimpleConcatAxis3(armnn::Compute computeDevice)
{
int32_t axis = 3;
- TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{1,1,1,3},{0,1,2}};
+ TestTensor expected{armnn::TensorShape{1, 1, 1, 3}, {0, 1, 2}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxis3_DoInterleave, COMPUTE_DEVICES)
+void SimpleConcatAxis3DoInterleave(armnn::Compute computeDevice)
{
int32_t axis = 3;
- TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
- 7, 8, 9}};
- TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
- 11}};
-
- TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
- 2, 3, 7, 8, 9, 11}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
+ 11}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(AxisTooBig, COMPUTE_DEVICES)
+void AxisTooBig(armnn::Compute computeDevice)
{
int32_t axis = 4;
- TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
// The axis must be within the range of [-rank(values), rank(values))
// see: https://www.tensorflow.org/api_docs/python/tf/concat
- TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor uncheckedOutput{armnn::TensorShape{1, 1, 1, 1}, {0}};
V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
+ ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, computeDevice, expectedParserStatus);
}
-BOOST_DATA_TEST_CASE(AxisTooSmall, COMPUTE_DEVICES)
+void AxisTooSmall(armnn::Compute computeDevice)
{
int32_t axis = -5;
- TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
// The axis must be within the range of [-rank(values), rank(values))
// see: https://www.tensorflow.org/api_docs/python/tf/concat
- TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor uncheckedOutput{armnn::TensorShape{1, 1, 1, 1}, {0}};
V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
+ ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, computeDevice, expectedParserStatus);
}
-BOOST_DATA_TEST_CASE(TooFewInputs, COMPUTE_DEVICES)
+void TooFewInputs(armnn::Compute computeDevice)
{
int32_t axis = 0;
- TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
// We need at least two tensors to concatenate
V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- ConcatTestImpl({&aIn}, axis, aIn, sample, expectedParserStatus);
+ ConcatTestImpl({&aIn}, axis, aIn, computeDevice, expectedParserStatus);
}
-BOOST_DATA_TEST_CASE(MismatchedInputDimensions, COMPUTE_DEVICES)
+void MismatchedInputDimensions(armnn::Compute computeDevice)
{
int32_t axis = 3;
- TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
- 7, 8, 9}};
- TestTensor mismatched{armnn::TensorShape{1,1,1,1},{10}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+ 7, 8, 9}};
+ TestTensor mismatched{armnn::TensorShape{1, 1, 1, 1}, {10}};
- TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
- 2, 3, 7, 8, 9, 11}};
+ TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
// The input dimensions must be compatible
V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, sample, expectedParserStatus);
+ ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, computeDevice, expectedParserStatus);
}
-BOOST_DATA_TEST_CASE(MismatchedInputRanks, COMPUTE_DEVICES)
+void MismatchedInputRanks(armnn::Compute computeDevice)
{
int32_t axis = 2;
- TestTensor aIn{armnn::TensorShape{1,1,2},{0,1}};
- TestTensor bIn{armnn::TensorShape{1,1},{4}};
- TestTensor expected{armnn::TensorShape{1,1,3},{0,1,4}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 2}, {0, 1}};
+ TestTensor bIn{armnn::TensorShape{1, 1}, {4}};
+ TestTensor expected{armnn::TensorShape{1, 1, 3}, {0, 1, 4}};
// The input dimensions must be compatible
V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- ConcatTestImpl({&aIn, &bIn}, axis, expected, sample, expectedParserStatus);
+ ConcatTestImpl({&aIn, &bIn}, axis, expected, computeDevice, expectedParserStatus);
}
-BOOST_DATA_TEST_CASE(MismatchedOutputDimensions, COMPUTE_DEVICES)
+void MismatchedOutputDimensions(armnn::Compute computeDevice)
{
int32_t axis = 3;
- TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
- 7, 8, 9}};
- TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
- 11}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
+ 11}};
- TestTensor mismatched{armnn::TensorShape{1,1,6,2},{0, 1, 4, 5, 6, 10,
- 2, 3, 7, 8, 9, 11}};
+ TestTensor mismatched{armnn::TensorShape{1, 1, 6, 2}, {0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
// The input and output dimensions must be compatible
V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, computeDevice, expectedParserStatus);
}
-BOOST_DATA_TEST_CASE(MismatchedOutputRank, COMPUTE_DEVICES)
+void MismatchedOutputRank(armnn::Compute computeDevice)
{
int32_t axis = 3;
- TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
- 7, 8, 9}};
- TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
- 11}};
-
- TestTensor mismatched{armnn::TensorShape{6,2},{0, 1, 4, 5, 6, 10,
- 2, 3, 7, 8, 9, 11}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
+ 11}};
+
+ TestTensor mismatched{armnn::TensorShape{6, 2}, {0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
// The input and output ranks must match
V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, computeDevice, expectedParserStatus);
}
-BOOST_DATA_TEST_CASE(ValidNegativeAxis, COMPUTE_DEVICES)
+void ValidNegativeAxis(armnn::Compute computeDevice)
{
// this is the same as 3
// see: https://www.tensorflow.org/api_docs/python/tf/concat
int32_t axis = -1;
- TestTensor aIn{armnn::TensorShape{1,1,2,2},{0, 1,
- 2, 3}};
- TestTensor bIn{armnn::TensorShape{1,1,2,3},{4, 5, 6,
- 7, 8, 9}};
- TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
- 11}};
-
- TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
- 2, 3, 7, 8, 9, 11}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+ 2, 3}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+ 7, 8, 9}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
+ 11}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
+ 2, 3, 7, 8, 9, 11}};
+
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxisZero3D, COMPUTE_DEVICES)
+void SimpleConcatAxisZero3D(armnn::Compute computeDevice)
{
int32_t axis = 0;
- TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{3,1,1},{0,1,2}};
+ TestTensor expected{armnn::TensorShape{3, 1, 1}, {0, 1, 2}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxisOne3D, COMPUTE_DEVICES)
+void SimpleConcatAxisOne3D(armnn::Compute computeDevice)
{
int32_t axis = 1;
- TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{1,3,1},{0,1,2}};
+ TestTensor expected{armnn::TensorShape{1, 3, 1}, {0, 1, 2}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxisTwo3D, COMPUTE_DEVICES)
+void SimpleConcatAxisTwo3D(armnn::Compute computeDevice)
{
int32_t axis = 2;
- TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{1,1,3},{0,1,2}};
+ TestTensor expected{armnn::TensorShape{1, 1, 3}, {0, 1, 2}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxisZero2D, COMPUTE_DEVICES)
+void SimpleConcatAxisZero2D(armnn::Compute computeDevice)
{
int32_t axis = 0;
- TestTensor aIn{armnn::TensorShape{1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{3,1},{0,1,2}};
+ TestTensor expected{armnn::TensorShape{3, 1}, {0, 1, 2}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxisOne2D, COMPUTE_DEVICES)
+void SimpleConcatAxisOne2D(armnn::Compute computeDevice)
{
int32_t axis = 1;
- TestTensor aIn{armnn::TensorShape{1,1},{0}};
- TestTensor bIn{armnn::TensorShape{1,1},{1}};
- TestTensor cIn{armnn::TensorShape{1,1},{2}};
+ TestTensor aIn{armnn::TensorShape{1, 1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1, 1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1, 1}, {2}};
- TestTensor expected{armnn::TensorShape{1,3},{0,1,2}};
+ TestTensor expected{armnn::TensorShape{1, 3}, {0, 1, 2}};
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
}
-BOOST_DATA_TEST_CASE(SimpleConcatAxisZero1D, COMPUTE_DEVICES)
+void SimpleConcatAxisZero1D(armnn::Compute computeDevice)
{
int32_t axis = 0;
- TestTensor aIn{armnn::TensorShape{1},{0}};
- TestTensor bIn{armnn::TensorShape{1},{1}};
- TestTensor cIn{armnn::TensorShape{1},{2}};
+ TestTensor aIn{armnn::TensorShape{1}, {0}};
+ TestTensor bIn{armnn::TensorShape{1}, {1}};
+ TestTensor cIn{armnn::TensorShape{1}, {2}};
+
+ TestTensor expected{armnn::TensorShape{3}, {0, 1, 2}};
+ ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
+}
+
+} // namespace <anonymous>
+
+DOCTEST_TEST_SUITE("ConcatTests_CpuRef")
+{
+
+DOCTEST_TEST_CASE("SimpleConcatAxis0")
+{
+ SimpleConcatAxis0(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("ConcatAxis0NoInterleave")
+{
+ ConcatAxis0NoInterleave(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis1")
+{
+ SimpleConcatAxis1(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("ConcatAxis1NoInterleave")
+{
+ ConcatAxis1NoInterleave(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis1DoInterleave")
+{
+ SimpleConcatAxis1DoInterleave(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis2")
+{
+ SimpleConcatAxis2(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("ConcatAxis2NoInterleave")
+{
+ ConcatAxis2NoInterleave(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis2DoInterleave")
+{
+ SimpleConcatAxis2DoInterleave(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis3")
+{
+ SimpleConcatAxis3(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis3DoInterleave")
+{
+ SimpleConcatAxis3DoInterleave(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("AxisTooBig")
+{
+ AxisTooBig(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("AxisTooSmall")
+{
+ AxisTooSmall(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("TooFewInputs")
+{
+ TooFewInputs(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("MismatchedInputDimensions")
+{
+ MismatchedInputDimensions(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("MismatchedInputRanks")
+{
+ MismatchedInputRanks(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("MismatchedOutputDimensions")
+{
+ MismatchedOutputDimensions(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("MismatchedOutputRank")
+{
+ MismatchedOutputRank(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("ValidNegativeAxis")
+{
+ ValidNegativeAxis(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisZero3D")
+{
+ SimpleConcatAxisZero3D(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisOne3D")
+{
+ SimpleConcatAxisOne3D(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisTwo3D")
+{
+ SimpleConcatAxisTwo3D(armnn::Compute::CpuRef);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisZero2D")
+{
+ SimpleConcatAxisZero2D(armnn::Compute::CpuRef);
+}
- TestTensor expected{armnn::TensorShape{3},{0,1,2}};
+DOCTEST_TEST_CASE("SimpleConcatAxisOne2D")
+{
+ SimpleConcatAxisOne2D(armnn::Compute::CpuRef);
+}
- ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+DOCTEST_TEST_CASE("SimpleConcatAxisZero1D")
+{
+ SimpleConcatAxisZero1D(armnn::Compute::CpuRef);
+}
+
+}
+
+#ifdef ARMCOMPUTECL_ENABLED
+DOCTEST_TEST_SUITE("ConcatTests_GpuAcc")
+{
+
+DOCTEST_TEST_CASE("SimpleConcatAxis0")
+{
+ SimpleConcatAxis0(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("ConcatAxis0NoInterleave")
+{
+ ConcatAxis0NoInterleave(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis1")
+{
+ SimpleConcatAxis1(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("ConcatAxis1NoInterleave")
+{
+ ConcatAxis1NoInterleave(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis1DoInterleave")
+{
+ SimpleConcatAxis1DoInterleave(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis2")
+{
+ SimpleConcatAxis2(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("ConcatAxis2NoInterleave")
+{
+ ConcatAxis2NoInterleave(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis2DoInterleave")
+{
+ SimpleConcatAxis2DoInterleave(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis3")
+{
+ SimpleConcatAxis3(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxis3DoInterleave")
+{
+ SimpleConcatAxis3DoInterleave(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("AxisTooBig")
+{
+ AxisTooBig(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("AxisTooSmall")
+{
+ AxisTooSmall(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("TooFewInputs")
+{
+ TooFewInputs(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("MismatchedInputDimensions")
+{
+ MismatchedInputDimensions(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("MismatchedInputRanks")
+{
+ MismatchedInputRanks(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("MismatchedOutputDimensions")
+{
+ MismatchedOutputDimensions(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("MismatchedOutputRank")
+{
+ MismatchedOutputRank(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("ValidNegativeAxis")
+{
+ ValidNegativeAxis(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisZero3D")
+{
+ SimpleConcatAxisZero3D(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisOne3D")
+{
+ SimpleConcatAxisOne3D(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisTwo3D")
+{
+ SimpleConcatAxisTwo3D(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisZero2D")
+{
+ SimpleConcatAxisZero2D(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisOne2D")
+{
+ SimpleConcatAxisOne2D(armnn::Compute::GpuAcc);
+}
+
+DOCTEST_TEST_CASE("SimpleConcatAxisZero1D")
+{
+ SimpleConcatAxisZero1D(armnn::Compute::GpuAcc);
}
-BOOST_AUTO_TEST_SUITE_END()
+}// End of GpuAcc Test Suite
+#endif \ No newline at end of file
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
index 50ba0e9f..71119cde 100644
--- a/test/Concurrent.cpp
+++ b/test/Concurrent.cpp
@@ -1,17 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "DriverTestHelpers.hpp"
-
-#include "../1.0/HalPolicy.hpp"
-#include <boost/test/unit_test.hpp>
+#include "DriverTestHelpers.hpp"
#include <log/log.h>
-BOOST_AUTO_TEST_SUITE(ConcurrentDriverTests)
-
+DOCTEST_TEST_SUITE("ConcurrentDriverTests")
+{
using ArmnnDriver = armnn_driver::ArmnnDriver;
using DriverOptions = armnn_driver::DriverOptions;
using HalPolicy = armnn_driver::hal_1_0::HalPolicy;
@@ -26,7 +23,7 @@ using namespace armnn_driver;
// The main point of this test is to check that multiple requests can be
// executed without waiting for the callback from previous execution.
// The operations performed are not significant.
-BOOST_AUTO_TEST_CASE(ConcurrentExecute)
+DOCTEST_TEST_CASE("ConcurrentExecute")
{
ALOGI("ConcurrentExecute: entry");
@@ -64,7 +61,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
}
}
- BOOST_TEST(maxRequests == preparedModelsSize);
+ DOCTEST_CHECK(maxRequests == preparedModelsSize);
// construct the request data
V1_0::DataLocation inloc = {};
@@ -85,15 +82,16 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
// build the requests
V1_0::Request requests[maxRequests];
+ android::sp<IMemory> inMemory[maxRequests];
android::sp<IMemory> outMemory[maxRequests];
+ float indata[] = {2, 32, 16};
float* outdata[maxRequests];
for (size_t i = 0; i < maxRequests; ++i)
{
requests[i].inputs = hidl_vec<RequestArgument>{input};
requests[i].outputs = hidl_vec<RequestArgument>{output};
// set the input data (matching source test)
- float indata[] = {2, 32, 16};
- AddPoolAndSetData<float>(3, requests[i], indata);
+ inMemory[i] = AddPoolAndSetData<float>(3, requests[i], indata);
// add memory for the output
outMemory[i] = AddPoolAndGetData<float>(1, requests[i]);
outdata[i] = static_cast<float*>(static_cast<void*>(outMemory[i]->getPointer()));
@@ -111,7 +109,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
ALOGI("ConcurrentExecute: waiting for callbacks");
for (size_t i = 0; i < maxRequests; ++i)
{
- ARMNN_ASSERT(cb[i]);
+ DOCTEST_CHECK(cb[i]);
cb[i]->wait();
}
@@ -119,9 +117,9 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
ALOGI("ConcurrentExecute: validating results");
for (size_t i = 0; i < maxRequests; ++i)
{
- BOOST_TEST(outdata[i][0] == 152);
+ DOCTEST_CHECK(outdata[i][0] == 152);
}
ALOGI("ConcurrentExecute: exit");
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp
index c3f9d48c..cc26f68f 100644
--- a/test/Convolution2D.hpp
+++ b/test/Convolution2D.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,13 +7,10 @@
#include "DriverTestHelpers.hpp"
-#include <boost/test/unit_test.hpp>
#include <log/log.h>
#include <OperationsUtils.h>
-BOOST_AUTO_TEST_SUITE(Convolution2DTests)
-
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -25,11 +22,11 @@ namespace driverTestHelpers
#define ARMNN_ANDROID_FP16_TEST(result, fp16Expectation, fp32Expectation, fp16Enabled) \
if (fp16Enabled) \
{ \
- BOOST_TEST((result == fp16Expectation || result == fp32Expectation), result << \
+ DOCTEST_CHECK_MESSAGE((result == fp16Expectation || result == fp32Expectation), result << \
" does not match either " << fp16Expectation << "[fp16] or " << fp32Expectation << "[fp32]"); \
} else \
{ \
- BOOST_TEST(result == fp32Expectation); \
+ DOCTEST_CHECK(result == fp32Expectation); \
}
void SetModelFp16Flag(V1_0::Model& model, bool fp16Enabled);
@@ -55,22 +52,22 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled
// add operands
float weightValue[] = {1.f, -1.f, 0.f, 1.f};
- float biasValue[] = {0.f};
+ float biasValue[] = {0.f};
- AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 3, 1});
- AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 2, 1}, weightValue);
- AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand<HalPolicy>(model, (int32_t)paddingScheme); // padding
+ AddInputOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 2, 3, 1});
+ AddTensorOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 2, 2, 1}, weightValue);
+ AddTensorOperand<HalPolicy>(model, hidl_vec < uint32_t > {1}, biasValue);
+ AddIntOperand<HalPolicy>(model, (int32_t) paddingScheme); // padding
AddIntOperand<HalPolicy>(model, 2); // stride x
AddIntOperand<HalPolicy>(model, 2); // stride y
AddIntOperand<HalPolicy>(model, 0); // no activation
- AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, outSize, 1});
+ AddOutputOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 1, outSize, 1});
// make the convolution operation
model.operations.resize(1);
model.operations[0].type = HalOperationType::CONV_2D;
- model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3, 4, 5, 6};
- model.operations[0].outputs = hidl_vec<uint32_t>{7};
+ model.operations[0].inputs = hidl_vec < uint32_t > {0, 1, 2, 3, 4, 5, 6};
+ model.operations[0].outputs = hidl_vec < uint32_t > {7};
// make the prepared model
SetModelFp16Flag(model, fp16Enabled);
@@ -78,24 +75,24 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled
// construct the request
V1_0::DataLocation inloc = {};
- inloc.poolIndex = 0;
- inloc.offset = 0;
- inloc.length = 6 * sizeof(float);
- RequestArgument input = {};
- input.location = inloc;
- input.dimensions = hidl_vec<uint32_t>{};
+ inloc.poolIndex = 0;
+ inloc.offset = 0;
+ inloc.length = 6 * sizeof(float);
+ RequestArgument input = {};
+ input.location = inloc;
+ input.dimensions = hidl_vec < uint32_t > {};
V1_0::DataLocation outloc = {};
- outloc.poolIndex = 1;
- outloc.offset = 0;
- outloc.length = outSize * sizeof(float);
- RequestArgument output = {};
- output.location = outloc;
- output.dimensions = hidl_vec<uint32_t>{};
+ outloc.poolIndex = 1;
+ outloc.offset = 0;
+ outloc.length = outSize * sizeof(float);
+ RequestArgument output = {};
+ output.location = outloc;
+ output.dimensions = hidl_vec < uint32_t > {};
V1_0::Request request = {};
- request.inputs = hidl_vec<RequestArgument>{input};
- request.outputs = hidl_vec<RequestArgument>{output};
+ request.inputs = hidl_vec < RequestArgument > {input};
+ request.outputs = hidl_vec < RequestArgument > {output};
// set the input data (matching source test)
float indata[] = {1024.25f, 1.f, 0.f, 3.f, -1, -1024.25f};
@@ -114,19 +111,17 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled
// check the result
switch (paddingScheme)
{
- case android::nn::kPaddingValid:
- ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
- break;
- case android::nn::kPaddingSame:
- ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
- BOOST_TEST(outdata[1] == 0.f);
- break;
- default:
- BOOST_TEST(false);
- break;
+ case android::nn::kPaddingValid:
+ ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
+ break;
+ case android::nn::kPaddingSame:
+ ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
+ DOCTEST_CHECK(outdata[1] == 0.f);
+ break;
+ default:
+ DOCTEST_CHECK(false);
+ break;
}
}
} // namespace driverTestHelpers
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/Dilation.hpp b/test/Dilation.hpp
index d0189c96..dbd24933 100644
--- a/test/Dilation.hpp
+++ b/test/Dilation.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,17 +7,12 @@
#include "DriverTestHelpers.hpp"
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/test/unit_test.hpp>
-
#include <numeric>
-BOOST_AUTO_TEST_SUITE(DilationTests)
-
using namespace armnn;
-using namespace boost;
using namespace driverTestHelpers;
struct DilationTestOptions
@@ -35,7 +30,7 @@ struct DilationTestOptions
bool m_HasDilation;
};
-class DilationTestVisitor : public LayerVisitorBase<VisitorThrowingPolicy>
+class DilationTestVisitor : public StrategyBase<ThrowingStrategy>
{
public:
DilationTestVisitor() :
@@ -47,32 +42,32 @@ public:
m_ExpectedDilationY{expectedDilationY}
{}
- void VisitConvolution2dLayer(const IConnectableLayer *layer,
- const Convolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char *name = nullptr) override
- {
- IgnoreUnused(layer);
- IgnoreUnused(weights);
- IgnoreUnused(biases);
- IgnoreUnused(name);
-
- CheckDilationParams(descriptor);
- }
-
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char *name = nullptr) override
+ void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+ const armnn::BaseDescriptor& descriptor,
+ const std::vector<armnn::ConstTensor>& constants,
+ const char* name,
+ const armnn::LayerBindingId id = 0) override
{
- IgnoreUnused(layer);
- IgnoreUnused(weights);
- IgnoreUnused(biases);
- IgnoreUnused(name);
-
- CheckDilationParams(descriptor);
+ armnn::IgnoreUnused(layer, constants, id, name);
+ switch (layer->GetType())
+ {
+ case armnn::LayerType::Constant:
+ break;
+ case armnn::LayerType::Convolution2d:
+ {
+ CheckDilationParams(static_cast<const armnn::Convolution2dDescriptor&>(descriptor));
+ break;
+ }
+ case armnn::LayerType::DepthwiseConvolution2d:
+ {
+ CheckDilationParams(static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor));
+ break;
+ }
+ default:
+ {
+ m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+ }
+ }
}
private:
@@ -82,8 +77,8 @@ private:
template<typename ConvolutionDescriptor>
void CheckDilationParams(const ConvolutionDescriptor& descriptor)
{
- BOOST_CHECK_EQUAL(descriptor.m_DilationX, m_ExpectedDilationX);
- BOOST_CHECK_EQUAL(descriptor.m_DilationY, m_ExpectedDilationY);
+ CHECK_EQ(descriptor.m_DilationX, m_ExpectedDilationX);
+ CHECK_EQ(descriptor.m_DilationY, m_ExpectedDilationY);
}
};
@@ -169,11 +164,9 @@ void DilationTestImpl(const DilationTestOptions& options)
data.m_OutputSlotForOperand = std::vector<IOutputSlot*>(model.operands.size(), nullptr);
bool ok = HalPolicy::ConvertOperation(model.operations[0], model, data);
- BOOST_CHECK(ok);
+ DOCTEST_CHECK(ok);
// check if dilation params are as expected
DilationTestVisitor visitor = options.m_HasDilation ? DilationTestVisitor(2, 2) : DilationTestVisitor();
- data.m_Network->Accept(visitor);
+ data.m_Network->ExecuteStrategy(visitor);
}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index 8e8d7bef..1f9fc1ee 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -1,10 +1,10 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "DriverTestHelpers.hpp"
#include <log/log.h>
-#include <boost/test/unit_test.hpp>
namespace android
{
@@ -139,10 +139,10 @@ android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_0::Model& mode
driver.prepareModel(model, cb);
prepareStatus = cb->GetErrorStatus();
- BOOST_TEST(prepareStatus == expectedStatus);
+ DOCTEST_CHECK((int)prepareStatus == (int)expectedStatus);
if (expectedStatus == V1_0::ErrorStatus::NONE)
{
- BOOST_TEST((cb->GetPreparedModel() != nullptr));
+ DOCTEST_CHECK((cb->GetPreparedModel() != nullptr));
}
return cb->GetPreparedModel();
}
@@ -158,10 +158,10 @@ android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& mode
driver.prepareModel_1_1(model, V1_1::ExecutionPreference::LOW_POWER, cb);
prepareStatus = cb->GetErrorStatus();
- BOOST_TEST(prepareStatus == expectedStatus);
+ DOCTEST_CHECK((int)prepareStatus == (int)expectedStatus);
if (expectedStatus == V1_0::ErrorStatus::NONE)
{
- BOOST_TEST((cb->GetPreparedModel() != nullptr));
+ DOCTEST_CHECK((cb->GetPreparedModel() != nullptr));
}
return cb->GetPreparedModel();
}
@@ -184,10 +184,10 @@ android::sp<V1_2::IPreparedModel> PrepareModelWithStatus_1_2(const armnn_driver:
driver.prepareModel_1_2(model, V1_1::ExecutionPreference::LOW_POWER, emptyHandle1, emptyHandle2, emptyToken, cb);
prepareStatus = cb->GetErrorStatus();
- BOOST_TEST(prepareStatus == expectedStatus);
+ DOCTEST_CHECK((int)prepareStatus == (int)expectedStatus);
if (expectedStatus == V1_0::ErrorStatus::NONE)
{
- BOOST_TEST((cb->GetPreparedModel_1_2() != nullptr));
+ DOCTEST_CHECK((cb->GetPreparedModel_1_2() != nullptr));
}
return cb->GetPreparedModel_1_2();
}
@@ -219,7 +219,7 @@ android::sp<V1_3::IPreparedModel> PrepareModelWithStatus_1_3(const armnn_driver:
prepareStatus = cb->Get_1_3_ErrorStatus();
if (prepareStatus == V1_3::ErrorStatus::NONE)
{
- BOOST_TEST((cb->GetPreparedModel_1_3() != nullptr));
+ DOCTEST_CHECK((cb->GetPreparedModel_1_3() != nullptr));
}
return cb->GetPreparedModel_1_3();
}
@@ -230,10 +230,10 @@ V1_0::ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
const V1_0::Request& request,
V1_0::ErrorStatus expectedStatus)
{
- BOOST_TEST(preparedModel.get() != nullptr);
+ DOCTEST_CHECK(preparedModel.get() != nullptr);
android::sp<ExecutionCallback> cb(new ExecutionCallback());
V1_0::ErrorStatus execStatus = preparedModel->execute(request, cb);
- BOOST_TEST(execStatus == expectedStatus);
+ DOCTEST_CHECK((int)execStatus == (int)expectedStatus);
ALOGI("Execute: waiting for callback to be invoked");
cb->wait();
return execStatus;
@@ -242,9 +242,10 @@ V1_0::ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
android::sp<ExecutionCallback> ExecuteNoWait(android::sp<V1_0::IPreparedModel> preparedModel,
const V1_0::Request& request)
{
- BOOST_TEST(preparedModel.get() != nullptr);
+ DOCTEST_CHECK(preparedModel.get() != nullptr);
android::sp<ExecutionCallback> cb(new ExecutionCallback());
- BOOST_TEST(preparedModel->execute(request, cb) == V1_0::ErrorStatus::NONE);
+ V1_0::ErrorStatus execStatus = preparedModel->execute(request, cb);
+ DOCTEST_CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
ALOGI("ExecuteNoWait: returning callback object");
return cb;
}
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index d37fbf26..98be0903 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -10,10 +10,16 @@
#include "../ArmnnDriver.hpp"
#include <iosfwd>
-#include <boost/test/unit_test.hpp>
-
#include <android/hidl/allocator/1.0/IAllocator.h>
+// Some of the short name macros from 'third-party/doctest/doctest.h' clash with macros in
+// 'system/core/base/include/android-base/logging.h' so we use the full DOCTEST macro names
+#ifndef DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES
+#define DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES
+#endif // DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES
+
+#include <doctest/doctest.h>
+
using RequestArgument = V1_0::RequestArgument;
using ::android::hidl::allocator::V1_0::IAllocator;
@@ -167,7 +173,7 @@ android::sp<IMemory> AddPoolAndGetData(uint32_t size, V1_0::Request& request)
android::sp<IAllocator> allocator = IAllocator::getService("ashmem");
allocator->allocate(sizeof(T) * size, [&](bool success, const hidl_memory& mem) {
- BOOST_TEST(success);
+ DOCTEST_CHECK(success);
pool = mem;
});
@@ -180,13 +186,15 @@ android::sp<IMemory> AddPoolAndGetData(uint32_t size, V1_0::Request& request)
}
template<typename T>
-void AddPoolAndSetData(uint32_t size, V1_0::Request& request, const T* data)
+android::sp<IMemory> AddPoolAndSetData(uint32_t size, V1_0::Request& request, const T* data)
{
android::sp<IMemory> memory = AddPoolAndGetData<T>(size, request);
T* dst = static_cast<T*>(static_cast<void*>(memory->getPointer()));
memcpy(dst, data, size * sizeof(T));
+
+ return memory;
}
template<typename HalPolicy,
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
index a68a5870..4717357b 100644
--- a/test/FullyConnected.cpp
+++ b/test/FullyConnected.cpp
@@ -1,17 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "DriverTestHelpers.hpp"
-
-#include "../1.0/HalPolicy.hpp"
-#include <boost/test/unit_test.hpp>
+#include "DriverTestHelpers.hpp"
#include <log/log.h>
-BOOST_AUTO_TEST_SUITE(FullyConnectedTests)
-
+DOCTEST_TEST_SUITE("FullyConnectedTests")
+{
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -19,7 +16,7 @@ using namespace armnn_driver;
using HalPolicy = hal_1_0::HalPolicy;
// Add our own test here since we fail the fc tests which Google supplies (because of non-const weights)
-BOOST_AUTO_TEST_CASE(FullyConnected)
+DOCTEST_TEST_CASE("FullyConnected")
{
// this should ideally replicate fully_connected_float.model.cpp
// but that uses slightly weird dimensions which I don't think we need to support for now
@@ -83,10 +80,10 @@ BOOST_AUTO_TEST_CASE(FullyConnected)
}
// check the result
- BOOST_TEST(outdata[0] == 152);
+ DOCTEST_CHECK(outdata[0] == 152);
}
-BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
+DOCTEST_TEST_CASE("TestFullyConnected4dInput")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -165,17 +162,17 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
}
// check the result
- BOOST_TEST(outdata[0] == 1);
- BOOST_TEST(outdata[1] == 2);
- BOOST_TEST(outdata[2] == 3);
- BOOST_TEST(outdata[3] == 4);
- BOOST_TEST(outdata[4] == 5);
- BOOST_TEST(outdata[5] == 6);
- BOOST_TEST(outdata[6] == 7);
- BOOST_TEST(outdata[7] == 8);
+ DOCTEST_CHECK(outdata[0] == 1);
+ DOCTEST_CHECK(outdata[1] == 2);
+ DOCTEST_CHECK(outdata[2] == 3);
+ DOCTEST_CHECK(outdata[3] == 4);
+ DOCTEST_CHECK(outdata[4] == 5);
+ DOCTEST_CHECK(outdata[5] == 6);
+ DOCTEST_CHECK(outdata[6] == 7);
+ DOCTEST_CHECK(outdata[7] == 8);
}
-BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
+DOCTEST_TEST_CASE("TestFullyConnected4dInputReshape")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -254,17 +251,17 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
}
// check the result
- BOOST_TEST(outdata[0] == 1);
- BOOST_TEST(outdata[1] == 2);
- BOOST_TEST(outdata[2] == 3);
- BOOST_TEST(outdata[3] == 4);
- BOOST_TEST(outdata[4] == 5);
- BOOST_TEST(outdata[5] == 6);
- BOOST_TEST(outdata[6] == 7);
- BOOST_TEST(outdata[7] == 8);
+ DOCTEST_CHECK(outdata[0] == 1);
+ DOCTEST_CHECK(outdata[1] == 2);
+ DOCTEST_CHECK(outdata[2] == 3);
+ DOCTEST_CHECK(outdata[3] == 4);
+ DOCTEST_CHECK(outdata[4] == 5);
+ DOCTEST_CHECK(outdata[5] == 6);
+ DOCTEST_CHECK(outdata[6] == 7);
+ DOCTEST_CHECK(outdata[7] == 8);
}
-BOOST_AUTO_TEST_CASE(TestFullyConnectedWeightsAsInput)
+DOCTEST_TEST_CASE("TestFullyConnectedWeightsAsInput")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -366,14 +363,14 @@ BOOST_AUTO_TEST_CASE(TestFullyConnectedWeightsAsInput)
}
// check the result
- BOOST_TEST(outdata[0] == 1);
- BOOST_TEST(outdata[1] == 2);
- BOOST_TEST(outdata[2] == 3);
- BOOST_TEST(outdata[3] == 4);
- BOOST_TEST(outdata[4] == 5);
- BOOST_TEST(outdata[5] == 6);
- BOOST_TEST(outdata[6] == 7);
- BOOST_TEST(outdata[7] == 8);
+ DOCTEST_CHECK(outdata[0] == 1);
+ DOCTEST_CHECK(outdata[1] == 2);
+ DOCTEST_CHECK(outdata[2] == 3);
+ DOCTEST_CHECK(outdata[3] == 4);
+ DOCTEST_CHECK(outdata[4] == 5);
+ DOCTEST_CHECK(outdata[5] == 6);
+ DOCTEST_CHECK(outdata[6] == 7);
+ DOCTEST_CHECK(outdata[7] == 8);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
index 188c7b1c..bd86a885 100644
--- a/test/GenericLayerTests.cpp
+++ b/test/GenericLayerTests.cpp
@@ -1,16 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "DriverTestHelpers.hpp"
-
-#include "../1.0/HalPolicy.hpp"
-#include <boost/test/unit_test.hpp>
+#include "DriverTestHelpers.hpp"
#include <log/log.h>
-BOOST_AUTO_TEST_SUITE(GenericLayerTests)
+DOCTEST_TEST_SUITE("GenericLayerTests")
+{
using namespace android::hardware;
using namespace driverTestHelpers;
@@ -18,7 +16,7 @@ using namespace armnn_driver;
using HalPolicy = hal_1_0::HalPolicy;
-BOOST_AUTO_TEST_CASE(GetSupportedOperations)
+DOCTEST_TEST_CASE("GetSupportedOperations")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -52,9 +50,9 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model0.operations[0].outputs = hidl_vec<uint32_t>{4};
driver->getSupportedOperations(model0, cb);
- BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
- BOOST_TEST(supported.size() == (size_t)1);
- BOOST_TEST(supported[0] == true);
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.size() == (size_t)1);
+ DOCTEST_CHECK(supported[0] == true);
V1_0::Model model1 = {};
@@ -81,8 +79,8 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
driver->getSupportedOperations(model1, cb);
- BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
- BOOST_TEST(supported.empty());
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
+ DOCTEST_CHECK(supported.empty());
// Test Broadcast on add/mul operators
HalPolicy::Model model2 = {};
@@ -114,10 +112,10 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model2.operations[1].outputs = hidl_vec<uint32_t>{4};
driver->getSupportedOperations(model2, cb);
- BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
- BOOST_TEST(supported.size() == (size_t)2);
- BOOST_TEST(supported[0] == true);
- BOOST_TEST(supported[1] == true);
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.size() == (size_t)2);
+ DOCTEST_CHECK(supported[0] == true);
+ DOCTEST_CHECK(supported[1] == true);
V1_0::Model model3 = {};
@@ -143,9 +141,9 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model3.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
driver->getSupportedOperations(model3, cb);
- BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
- BOOST_TEST(supported.size() == (size_t)1);
- BOOST_TEST(supported[0] == false);
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.size() == (size_t)1);
+ DOCTEST_CHECK(supported[0] == false);
HalPolicy::Model model4 = {};
@@ -158,14 +156,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model4.operations[0].outputs = hidl_vec<uint32_t>{0};
driver->getSupportedOperations(model4, cb);
- BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
- BOOST_TEST(supported.empty());
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
+ DOCTEST_CHECK(supported.empty());
}
// The purpose of this test is to ensure that when encountering an unsupported operation
// it is skipped and getSupportedOperations() continues (rather than failing and stopping).
// As per IVGCVSW-710.
-BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
+DOCTEST_TEST_CASE("UnsupportedLayerContinueOnFailure")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -240,16 +238,16 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
// We are testing that the unsupported layers return false and the test continues rather than failing and stopping
driver->getSupportedOperations(model, cb);
- BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
- BOOST_TEST(supported.size() == (size_t)3);
- BOOST_TEST(supported[0] == false);
- BOOST_TEST(supported[1] == true);
- BOOST_TEST(supported[2] == false);
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.size() == (size_t)3);
+ DOCTEST_CHECK(supported[0] == false);
+ DOCTEST_CHECK(supported[1] == true);
+ DOCTEST_CHECK(supported[2] == false);
}
// The purpose of this test is to ensure that when encountering an failure
// during mem pool mapping we properly report an error to the framework via a callback
-BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
+DOCTEST_TEST_CASE("ModelToINetworkConverterMemPoolFail")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -268,8 +266,8 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
// Memory pool mapping should fail, we should report an error
driver->getSupportedOperations(model, cb);
- BOOST_TEST((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
- BOOST_TEST(supported.empty());
+ DOCTEST_CHECK((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.empty());
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/Lstm.hpp b/test/Lstm.hpp
index 2cb3c264..93f2f32d 100644
--- a/test/Lstm.hpp
+++ b/test/Lstm.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -9,8 +9,6 @@
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/math/special_functions/relative_difference.hpp>
-
#include <array>
using ArmnnDriver = armnn_driver::ArmnnDriver;
@@ -40,26 +38,6 @@ RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int
return inputRequestArgument;
}
-// Returns true if the relative difference between two float values is less than the tolerance value given.
-// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
-bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
-{
- float rd;
- if (a == 0.0f)
- {
- rd = fabs(b);
- }
- else if (b == 0.0f)
- {
- rd = fabs(a);
- }
- else
- {
- rd = boost::math::relative_difference(a, b);
- }
- return rd < tolerance;
-}
-
// Helper function to create an OperandLifeTime::NO_VALUE for testing.
// To be used on optional input operands that have no values - these are valid and should be tested.
V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
@@ -100,12 +78,6 @@ void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::h
} // anonymous namespace
-#ifndef ARMCOMPUTECL_ENABLED
-static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
-#endif
-
// Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
template <typename HalPolicy>
void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
@@ -394,18 +366,20 @@ void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
// check the results
for (size_t i = 0; i < outputStateOutValue.size(); ++i)
{
- BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
- "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
+ DOCTEST_CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ),
+ "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != "
+ << outputStateOutData[i]);
}
for (size_t i = 0; i < cellStateOutValue.size(); ++i)
{
- BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
- "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
+ DOCTEST_CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
+ "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != "
+ << cellStateOutData[i]);
}
for (size_t i = 0; i < outputValue.size(); ++i)
{
- BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
- "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
+ "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
}
}
@@ -669,13 +643,14 @@ void QuantizedLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
// check the results
for (size_t i = 0; i < cellStateOutValue.size(); ++i)
{
- BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i], 1.0f),
- "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
+ DOCTEST_CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
+ "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != "
+ << cellStateOutData[i]);
}
for (size_t i = 0; i < outputValue.size(); ++i)
{
- BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i], 1.0f),
- "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
+ "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
}
}
diff --git a/test/SystemProperties.cpp b/test/SystemProperties.cpp
index e1a2632e..ef952964 100644
--- a/test/SystemProperties.cpp
+++ b/test/SystemProperties.cpp
@@ -1,57 +1,58 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "DriverTestHelpers.hpp"
-#include <boost/test/unit_test.hpp>
#include <log/log.h>
-#include "../SystemPropertiesUtils.hpp"
+#include <SystemPropertiesUtils.hpp>
-BOOST_AUTO_TEST_SUITE(SystemProperiesTests)
+DOCTEST_TEST_SUITE("SystemProperiesTests")
+{
-BOOST_AUTO_TEST_CASE(SystemProperties)
+DOCTEST_TEST_CASE("SystemProperties")
{
// Test default value
{
auto p = __system_property_find("thisDoesNotExist");
- BOOST_TEST((p == nullptr));
+ DOCTEST_CHECK((p == nullptr));
int defaultValue = ParseSystemProperty("thisDoesNotExist", -4);
- BOOST_TEST((defaultValue == -4));
+ DOCTEST_CHECK((defaultValue == -4));
}
// Test default value from bad data type
{
__system_property_set("thisIsNotFloat", "notfloat");
float defaultValue = ParseSystemProperty("thisIsNotFloat", 0.1f);
- BOOST_TEST((defaultValue == 0.1f));
+ DOCTEST_CHECK((defaultValue == 0.1f));
}
// Test fetching bool values
{
__system_property_set("myTestBool", "1");
bool b = ParseSystemProperty("myTestBool", false);
- BOOST_TEST((b == true));
+ DOCTEST_CHECK((b == true));
}
{
__system_property_set("myTestBool", "0");
bool b = ParseSystemProperty("myTestBool", true);
- BOOST_TEST((b == false));
+ DOCTEST_CHECK((b == false));
}
// Test fetching int
{
__system_property_set("myTestInt", "567");
int i = ParseSystemProperty("myTestInt", 890);
- BOOST_TEST((i==567));
+ DOCTEST_CHECK((i==567));
}
// Test fetching float
{
__system_property_set("myTestFloat", "1.2f");
float f = ParseSystemProperty("myTestFloat", 3.4f);
- BOOST_TEST((f==1.2f));
+ DOCTEST_CHECK((f==1.2f));
}
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/TestHalfTensor.cpp b/test/TestHalfTensor.cpp
new file mode 100644
index 00000000..12cdc427
--- /dev/null
+++ b/test/TestHalfTensor.cpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TestHalfTensor.hpp"
+
+namespace driverTestHelpers
+{
+
+hidl_vec<uint32_t> TestHalfTensor::GetDimensions() const
+{
+ hidl_vec<uint32_t> dimensions;
+ dimensions.resize(m_Shape.GetNumDimensions());
+ for (uint32_t i=0; i<m_Shape.GetNumDimensions(); ++i)
+ {
+ dimensions[i] = m_Shape[i];
+ }
+ return dimensions;
+}
+
+unsigned int TestHalfTensor::GetNumElements() const
+{
+ return m_Shape.GetNumElements();
+}
+
+const Half * TestHalfTensor::GetData() const
+{
+ DOCTEST_CHECK(m_Data.empty() == false);
+ return &m_Data[0];
+}
+
+} // namespace driverTestHelpers
diff --git a/test/TestHalfTensor.hpp b/test/TestHalfTensor.hpp
new file mode 100644
index 00000000..2b7870f4
--- /dev/null
+++ b/test/TestHalfTensor.hpp
@@ -0,0 +1,38 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <ArmnnDriver.hpp>
+#include "DriverTestHelpers.hpp"
+
+#include <half/half.hpp>
+
+using Half = half_float::half;
+
+namespace driverTestHelpers
+{
+
+class TestHalfTensor
+{
+public:
+ TestHalfTensor(const armnn::TensorShape & shape,
+ const std::vector<Half> & data)
+ : m_Shape{shape}
+ , m_Data{data}
+ {
+ DOCTEST_CHECK(m_Shape.GetNumElements() == m_Data.size());
+ }
+
+ hidl_vec<uint32_t> GetDimensions() const;
+ unsigned int GetNumElements() const;
+ const Half * GetData() const;
+
+private:
+ armnn::TensorShape m_Shape;
+ std::vector<Half> m_Data;
+};
+
+} // driverTestHelpers
diff --git a/test/TestTensor.cpp b/test/TestTensor.cpp
index e6cb446f..39bcd5a6 100644
--- a/test/TestTensor.cpp
+++ b/test/TestTensor.cpp
@@ -1,7 +1,8 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "TestTensor.hpp"
namespace driverTestHelpers
@@ -25,7 +26,7 @@ unsigned int TestTensor::GetNumElements() const
const float * TestTensor::GetData() const
{
- ARMNN_ASSERT(m_Data.empty() == false);
+ DOCTEST_CHECK(m_Data.empty() == false);
return &m_Data[0];
}
diff --git a/test/TestTensor.hpp b/test/TestTensor.hpp
index 1cd1950d..b0613eb2 100644
--- a/test/TestTensor.hpp
+++ b/test/TestTensor.hpp
@@ -1,12 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
-#include "../ArmnnDriver.hpp"
+#pragma once
-#include <armnn/utility/Assert.hpp>
+#include <ArmnnDriver.hpp>
+#include "DriverTestHelpers.hpp"
namespace driverTestHelpers
{
@@ -19,7 +19,7 @@ public:
: m_Shape{shape}
, m_Data{data}
{
- ARMNN_ASSERT(m_Shape.GetNumElements() == m_Data.size());
+ DOCTEST_CHECK(m_Shape.GetNumElements() == m_Data.size());
}
hidl_vec<uint32_t> GetDimensions() const;
diff --git a/test/Tests.cpp b/test/Tests.cpp
index 0ef142d9..4628414e 100644
--- a/test/Tests.cpp
+++ b/test/Tests.cpp
@@ -1,31 +1,35 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#define LOG_TAG "ArmnnDriverTests"
-#define BOOST_TEST_MODULE armnn_driver_tests
-#include <boost/test/unit_test.hpp>
#include <log/log.h>
-#include "DriverTestHelpers.hpp"
+#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#endif
-BOOST_AUTO_TEST_SUITE(DriverTests)
+#include "DriverTestHelpers.hpp"
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
-BOOST_AUTO_TEST_CASE(Init)
+DOCTEST_TEST_SUITE("DriverTests")
+{
+
+DOCTEST_TEST_CASE("Init")
{
// Making the driver object on the stack causes a weird libc error, so make it on the heap instead
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
V1_0::DeviceStatus status = driver->getStatus();
- // Note double-parentheses to avoid compile error from Boost trying to printf the DeviceStatus
- BOOST_TEST((status == V1_0::DeviceStatus::AVAILABLE));
+ // Note double-parentheses to avoid compile error from doctest trying to printf the DeviceStatus
+ DOCTEST_CHECK((status == V1_0::DeviceStatus::AVAILABLE));
}
-BOOST_AUTO_TEST_CASE(TestCapabilities)
+DOCTEST_TEST_CASE("TestCapabilities")
{
// Making the driver object on the stack causes a weird libc error, so make it on the heap instead
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -41,11 +45,11 @@ BOOST_AUTO_TEST_CASE(TestCapabilities)
driver->getCapabilities(cb);
- BOOST_TEST((int)error == (int)V1_0::ErrorStatus::NONE);
- BOOST_TEST(cap.float32Performance.execTime > 0.f);
- BOOST_TEST(cap.float32Performance.powerUsage > 0.f);
- BOOST_TEST(cap.quantized8Performance.execTime > 0.f);
- BOOST_TEST(cap.quantized8Performance.powerUsage > 0.f);
+ DOCTEST_CHECK((int)error == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(cap.float32Performance.execTime > 0.f);
+ DOCTEST_CHECK(cap.float32Performance.powerUsage > 0.f);
+ DOCTEST_CHECK(cap.quantized8Performance.execTime > 0.f);
+ DOCTEST_CHECK(cap.quantized8Performance.powerUsage > 0.f);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/UnidirectionalSequenceLstm.hpp b/test/UnidirectionalSequenceLstm.hpp
new file mode 100644
index 00000000..75b7a8d4
--- /dev/null
+++ b/test/UnidirectionalSequenceLstm.hpp
@@ -0,0 +1,1419 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "DriverTestHelpers.hpp"
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <array>
+
+using ArmnnDriver = armnn_driver::ArmnnDriver;
+using DriverOptions = armnn_driver::DriverOptions;
+using RequestArgument = V1_0::RequestArgument;
+
+#ifdef ARMNN_ANDROID_S
+#include <nnapi/Types.h>
+#endif
+
+using namespace driverTestHelpers;
+using namespace android::hardware;
+
+namespace
+{
+
+template<typename T>
+RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
+{
+ V1_0::DataLocation inputInloc = {};
+ inputInloc.poolIndex = poolIndex;
+ inputInloc.offset = 0;
+ inputInloc.length = value.size() * sizeof(T);
+ RequestArgument inputRequestArgument = {};
+ inputRequestArgument.location = inputInloc;
+ inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
+ return inputRequestArgument;
+}
+
+// Helper function to create an OperandLifeTime::NO_VALUE for testing.
+// To be used on optional input operands that have no values - these are valid and should be tested.
+V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
+{
+ // Only create a NO_VALUE for optional operands that have no elements
+ if (dimensions.size() == 0 || dimensions[0] == 0)
+ {
+ return V1_0::OperandLifeTime::NO_VALUE;
+ }
+ return V1_0::OperandLifeTime::CONSTANT_COPY;
+}
+
+template<typename HalModel>
+void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const V1_0::Request& request)
+{
+ android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
+ if (preparedModel.get() != nullptr)
+ {
+ Execute(preparedModel, request);
+ }
+}
+
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
+
+template<>
+void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
+ armnn_driver::ArmnnDriver& driver,
+ const V1_0::Request& request)
+{
+ android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
+ if (preparedModel.get() != nullptr)
+ {
+ Execute(preparedModel, request);
+ }
+}
+
+#endif
+
+} // anonymous namespace
+
+// Add our own tests here since we fail the unidirectional sequence lstm
+// tests which Google supplies (because of non-const weights)
+template <typename HalPolicy>
+void UnidirectionalSequenceLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
+ const std::vector<float>& inputValue,
+ const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
+ const std::vector<float>& inputToInputWeightsValue,
+ const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
+ const std::vector<float>& inputToForgetWeightsValue,
+ const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
+ const std::vector<float>& inputToCellWeightsValue,
+ const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
+ const std::vector<float>& inputToOutputWeightsValue,
+ const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
+ const std::vector<float>& recurrentToInputWeightsValue,
+ const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
+ const std::vector<float>& recurrentToForgetWeightsValue,
+ const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
+ const std::vector<float>& recurrentToCellWeightsValue,
+ const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
+ const std::vector<float>& recurrentToOutputWeightsValue,
+ const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
+ const std::vector<float>& cellToInputWeightsValue,
+ const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
+ const std::vector<float>& cellToForgetWeightsValue,
+ const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
+ const std::vector<float>& cellToOutputWeightsValue,
+ const hidl_vec<uint32_t>& inputGateBiasDimensions,
+ const std::vector<float>& inputGateBiasValue,
+ const hidl_vec<uint32_t>& forgetGateBiasDimensions,
+ const std::vector<float>& forgetGateBiasValue,
+ const hidl_vec<uint32_t>& cellBiasDimensions,
+ const std::vector<float>& cellBiasValue,
+ const hidl_vec<uint32_t>& outputGateBiasDimensions,
+ const std::vector<float>& outputGateBiasValue,
+ const hidl_vec<uint32_t>& projectionWeightsDimensions,
+ const std::vector<float>& projectionWeightsValue,
+ const hidl_vec<uint32_t>& projectionBiasDimensions,
+ const std::vector<float>& projectionBiasValue,
+ const hidl_vec<uint32_t>& outputStateInDimensions,
+ const std::vector<float>& outputStateInValue,
+ const hidl_vec<uint32_t>& cellStateInDimensions,
+ const std::vector<float>& cellStateInValue,
+ const hidl_vec<uint32_t>& activationFunctionDimensions,
+ const std::vector<int32_t>& activationFunctionValue,
+ const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
+ const std::vector<float>& cellClippingThresholdValue,
+ const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
+ const std::vector<float>& projectionClippingThresholdValue,
+ const bool& timeMajorValue,
+ const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
+ const std::vector<float>& inputLayerNormWeightsValue,
+ const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
+ const std::vector<float>& forgetLayerNormWeightsValue,
+ const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
+ const std::vector<float>& cellLayerNormWeightsValue,
+ const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
+ const std::vector<float>& outputLayerNormWeightsValue,
+ const hidl_vec<uint32_t>& outputDimensions,
+ const std::vector<float>& outputValue,
+ const hidl_vec<uint32_t>&, // outputStateOutDimensions,
+ const std::vector<float>&, // outputStateOutValue,
+ const hidl_vec<uint32_t>&, // cellStateOutDimensions,
+ const std::vector<float>&, // cellStateOutValue,
+ armnn::Compute compute,
+ float epsilonValue = 0)
+{
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
+ using Model = typename HalPolicy::Model;
+ Model model = {};
+
+ // Inputs:
+ // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
+ // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
+ AddInputOperand<HalPolicy>(model, inputDimensions);
+
+ // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size], where “num_units” corresponds to the number of cell units.
+ AddTensorOperand<HalPolicy>(model,
+ inputToInputWeightsDimensions,
+ inputToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(inputToInputWeightsDimensions));
+ // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
+ // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
+ // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
+ // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+ // “num_units”), or the second dimension of the “projection_weights”, if defined.
+ AddTensorOperand<HalPolicy>(model,
+ recurrentToInputWeightsDimensions,
+ recurrentToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
+ // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
+ // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
+ // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
+ // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ AddTensorOperand<HalPolicy>(model,
+ cellToInputWeightsDimensions,
+ cellToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(cellToInputWeightsDimensions));
+ // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ AddTensorOperand<HalPolicy>(model,
+ cellToForgetWeightsDimensions,
+ cellToForgetWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(cellToForgetWeightsDimensions));
+ // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ AddTensorOperand<HalPolicy>(model,
+ cellToOutputWeightsDimensions,
+ cellToOutputWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(cellToOutputWeightsDimensions));
+ // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ AddTensorOperand<HalPolicy>(model,
+ inputGateBiasDimensions,
+ inputGateBiasValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(inputGateBiasDimensions));
+ // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
+ // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
+ // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
+ // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [output_size, num_units].
+ AddTensorOperand<HalPolicy>(model,
+ projectionWeightsDimensions,
+ projectionWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(projectionWeightsDimensions));
+ // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
+ AddTensorOperand<HalPolicy>(model,
+ projectionBiasDimensions,
+ projectionBiasValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(projectionBiasDimensions));
+
+ // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+ AddInputOperand<HalPolicy>(model, outputStateInDimensions);
+ // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+ AddInputOperand<HalPolicy>(model, cellStateInDimensions);
+
+ // Constant scalar values (the VTS test adds these as tensors of dim {})
+ // 20: The activation function: A value indicating the activation function:
+ // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
+ AddTensorOperand<HalPolicy>(model,
+ activationFunctionDimensions,
+ activationFunctionValue,
+ HalPolicy::OperandType::INT32);
+ // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
+ // If set to 0.0 then clipping is disabled.
+ AddTensorOperand<HalPolicy>(model,
+ cellClippingThresholdDimensions,
+ cellClippingThresholdValue,
+ HalPolicy::OperandType::FLOAT32);
+ // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
+ // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ AddTensorOperand<HalPolicy>(model,
+ projectionClippingThresholdDimensions,
+ projectionClippingThresholdValue,
+ HalPolicy::OperandType::FLOAT32);
+
+ // 23: Time-major if true, batch-major if false.
+ AddBoolOperand<HalPolicy>(model, timeMajorValue);
+
+ // Normalization:
+ // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at input gate.
+ AddTensorOperand<HalPolicy>(model,
+ inputLayerNormWeightsDimensions,
+ inputLayerNormWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(inputLayerNormWeightsDimensions));
+ // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at forget gate.
+ AddTensorOperand<HalPolicy>(model,
+ forgetLayerNormWeightsDimensions,
+ forgetLayerNormWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(forgetLayerNormWeightsDimensions));
+ // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at cell gate.
+ AddTensorOperand<HalPolicy>(model,
+ cellLayerNormWeightsDimensions,
+ cellLayerNormWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(cellLayerNormWeightsDimensions));
+ // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at output gate.
+ AddTensorOperand<HalPolicy>(model,
+ outputLayerNormWeightsDimensions,
+ outputLayerNormWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(outputLayerNormWeightsDimensions));
+
+ // Outputs:
+ // 00: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape: if time-major:
+ // [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
+ AddOutputOperand<HalPolicy>(model, outputDimensions);
+ // 01: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, output_size]. This output is optional and can be omitted. If this output
+ // is present then output #2 must be present as well.
+ //AddOutputOperand<HalPolicy>(model, hiddenStateOutDimensions);
+ // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, num_units]. This output is optional and can be omitted.
+ //AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
+
+ // make the lstm operation
+ model.operations.resize(1);
+ model.operations[0].type = HalPolicy::OperationType::UNIDIRECTIONAL_SEQUENCE_LSTM;
+
+ model.operations[0].inputs = hidl_vec<uint32_t> {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27};
+ model.operations[0].outputs = hidl_vec<uint32_t> {28};
+
+ // define the input values
+ hidl_vec<RequestArgument> inputArguments;
+ inputArguments.resize(3);
+
+ inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
+ inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
+ inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
+
+ // define the expected output values
+ hidl_vec<RequestArgument> outputArguments;
+ outputArguments.resize(1);
+
+ outputArguments[0] = CreateRequestArgument<float>(outputValue, 3);
+
+ V1_0::Request request = {};
+ request.inputs = inputArguments;
+ request.outputs = outputArguments;
+
+ // set the input data
+ AddPoolAndSetData(inputValue.size(), request, inputValue.data());
+ AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
+ AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
+
+ // add memory for the outputs
+ android::sp<IMemory> outputMemory = AddPoolAndGetData<float>(outputValue.size(), request);
+ float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
+
+ // make the prepared model and run the execution
+ ExecuteModel(model, *driver, request);
+
+ // check the results
+ if (epsilonValue != 0)
+ {
+ for (size_t i = 0; i < outputValue.size(); ++i)
+ {
+ DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx(outputData[i]).epsilon(epsilonValue),
+ "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ }
+ }
+ else
+ {
+ for (size_t i = 0; i < outputValue.size(); ++i)
+ {
+ DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx(outputData[i]),
+ "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ }
+ }
+}
+
+template<typename HalPolicy>
+void UnidirectionalSequenceLstmLayerFloat32TestImpl(armnn::Compute compute)
+{
+ uint32_t batchSize = 3;
+ uint32_t timeSize = 2;
+ uint32_t inputSize = 3;
+ uint32_t outputSize = 4;
+ uint32_t numUnits = outputSize;
+
+ // Inputs:
+ // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
+ // [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
+ // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
+ hidl_vec<uint32_t> inputDimensions{batchSize, timeSize, inputSize};
+ std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2.};
+
+ // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size], where “num_units” corresponds to the number of cell units.
+ hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToInputWeightsValue{-0.49536117f, -0.0556083915f, -0.102400711f,
+ -0.117484632f, 0.3298470976f, -0.1179017122f,
+ 0.214305695f, 0.42135173085f, 0.003878414626f,
+ -0.348303917f, -0.1881275477f, 0.0343011027f};
+ // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToForgetWeightsValue{0.2415594226f, 0.15400093799f, 0.4566498398f,
+ -0.3810434485f, 0.268383264f, -0.009807467424f,
+ -0.3522925403f, -0.24275735512f, -0.28344226125f,
+ 0.13512269116f, -0.4932442977f, -0.10039821991f};
+ // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
+ hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToCellWeightsValue{-0.2504855627f, 0.184490025045f, -0.2480507493f,
+ 0.386399507f, -0.259465157985f, -0.16545993089f,
+ -0.4230232555f, 0.341664791103f, -0.18127849691f,
+ -0.2277662414f, -0.55275535589f, 0.34184026718f};
+ // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToOutputWeightsValue{0.2303854227f, 0.5218806862f, -0.4865379333f,
+ 0.53969591851f, 0.23393625035f, -0.27140527306f,
+ 0.50009280443f, 0.07511717046f, 0.3998299249f,
+ -0.51717478049f, 0.1889653282f, -0.367323637f};
+ // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+ // “num_units”), or the second dimension of the “projection_weights”, if defined.
+ hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToInputWeightsValue{-0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
+ -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
+ 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
+ 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f};
+ // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToForgetWeightsValue{-0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+ -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+ -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+ -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f};
+ // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToCellWeightsValue{-0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+ -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+ 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+ 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f};
+ // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToOutputWeightsValue{-0.32921677827f, 0.32624614238f, -0.1388191282f,
+ -0.17879831790f, -0.15185534954f, -0.16918526583f,
+ -0.10087361183f, -0.5436913968f, 0.016758225858f,
+ 0.30454617738f, -0.41493862867f, -0.005565764375f,
+ -0.12584099173f, -0.12319286912f, 0.2407919466f,
+ -0.08879069983f};
+ // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
+ std::vector<float> cellToInputWeightsValue;
+ // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
+ std::vector<float> cellToForgetWeightsValue;
+ // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
+ std::vector<float> cellToOutputWeightsValue;
+ // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
+ std::vector<float> inputGateBiasValue(numUnits, 0.0f);
+ // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
+ std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
+ // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellBiasDimensions{numUnits};
+ std::vector<float> cellBiasValue(numUnits, 0.0f);
+ // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
+ std::vector<float> outputGateBiasValue(numUnits, 0.0f);
+ // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [output_size, num_units].
+ hidl_vec<uint32_t> projectionWeightsDimensions{0};
+ std::vector<float> projectionWeightsValue;
+ // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
+ hidl_vec<uint32_t> projectionBiasDimensions{0};
+ std::vector<float> projectionBiasValue;
+
+ // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+ hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
+ std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
+ // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+ hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
+ std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
+
+ // Constant scalar values (the VTS test adds these as tensors of dim {})
+ // 20: The activation function: A value indicating the activation function:
+ // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
+ hidl_vec<uint32_t> activationFunctionDimensions{};
+ std::vector<int32_t> activationFunctionValue{4};
+ // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
+ // If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> cellClippingThresholdDimensions{};
+ std::vector<float> cellClippingThresholdValue{10.0f};
+ // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
+ // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
+ std::vector<float> projectionClippingThresholdValue{0.f};
+
+ // 23: Time-major if true, batch-major if false.
+ bool timeMajorValue = false;
+
+ // Normalization:
+ // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at input gate.
+ hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
+ std::vector<float> inputLayerNormWeightsValue;
+ // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at forget gate.
+ hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
+ std::vector<float> forgetLayerNormWeightsValue;
+ // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at cell gate.
+ hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
+ std::vector<float> cellLayerNormWeightsValue;
+ // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at output gate.
+ hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
+ std::vector<float> outputLayerNormWeightsValue;
+
+ // Outputs:
+ // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape: if time-major:
+ // [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
+ hidl_vec<uint32_t> outputDimensions{batchSize, timeSize, outputSize};
+ std::vector<float> outputValue{-0.07149004f, -0.1621171f, -0.17516759f, -0.0232934225f,
+ -0.16810727f, -0.41412935f, -0.5498753f, -0.00803578f,
+ -0.06687349f, 0.204077631f, -0.4276504f, -0.03123213f,
+ -0.12000261f, -0.0941918f, -0.45639035f, -0.02870186f,
+ -0.03429216f, 0.20824050f, -0.6569892f, -0.004152651f,
+ -0.10493034f, 0.14210969f, -0.58347696f, -0.03297536f};
+
+ // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, output_size]. This output is optional and can be omitted. If this output
+ // is present then output #2 must be present as well.
+ hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
+ std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
+ // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, num_units]. This output is optional and can be omitted.
+ hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
+ std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
+
+ UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
+ inputToInputWeightsDimensions, inputToInputWeightsValue,
+ inputToForgetWeightsDimensions, inputToForgetWeightsValue,
+ inputToCellWeightsDimensions, inputToCellWeightsValue,
+ inputToOutputWeightsDimensions, inputToOutputWeightsValue,
+ recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
+ recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
+ recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
+ recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
+ cellToInputWeightsDimensions, cellToInputWeightsValue,
+ cellToForgetWeightsDimensions, cellToForgetWeightsValue,
+ cellToOutputWeightsDimensions, cellToOutputWeightsValue,
+ inputGateBiasDimensions, inputGateBiasValue,
+ forgetGateBiasDimensions, forgetGateBiasValue,
+ cellBiasDimensions, cellBiasValue,
+ outputGateBiasDimensions, outputGateBiasValue,
+ projectionWeightsDimensions, projectionWeightsValue,
+ projectionBiasDimensions, projectionBiasValue,
+ outputStateInDimensions, outputStateInValue,
+ cellStateInDimensions, cellStateInValue,
+ activationFunctionDimensions, activationFunctionValue,
+ cellClippingThresholdDimensions, cellClippingThresholdValue,
+ projectionClippingThresholdDimensions,
+ projectionClippingThresholdValue,
+ timeMajorValue,
+ inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
+ forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
+ cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
+ outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
+ outputDimensions, outputValue,
+ hiddenStateOutDimensions, hiddenStateOutValue,
+ cellStateOutDimensions, cellStateOutValue,
+ compute);
+}
+
+template<typename HalPolicy>
+void UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl(armnn::Compute compute)
+{
+ uint32_t batchSize = 3;
+ uint32_t timeSize = 2;
+ uint32_t inputSize = 3;
+ uint32_t outputSize = 4;
+ uint32_t numUnits = outputSize;
+
+ // Inputs:
+ // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
+ // [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
+ // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
+ hidl_vec<uint32_t> inputDimensions{timeSize, batchSize, inputSize};
+ std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2.};
+
+ // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size], where “num_units” corresponds to the number of cell units.
+ hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToInputWeightsValue{0.27277296781539917f, 0.3813590407371521f, -0.394489049911499f,
+ 0.2782636880874634f, -0.3793870210647583f, -0.018918335437774658f,
+ 0.2724653482437134f, -0.19314253330230713f, -0.2947450876235962f,
+ -0.30253493785858154f, 0.4241350293159485f, -0.22560018301010132f};
+ // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToForgetWeightsValue{-0.2667974531650543f, -0.05505800247192383f, -0.20932340621948242f,
+ -0.14345619082450867f, 0.09666192531585693f, -0.2604355812072754f,
+ -0.2681812047958374f, -0.3314584493637085f, 0.4485899806022644f,
+ -0.23467743396759033f, 0.5072842240333557f, -0.4192768931388855f};
+ // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
+ hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToCellWeightsValue{-0.15782442688941956f, -0.027530014514923096f, 0.4789854884147644f,
+ 0.23227906227111816f, 0.28259342908859253f, -0.030095696449279785f,
+ 0.10071521997451782f, -0.08535495400428772f, 0.18563997745513916f,
+ -0.3049069046974182f, -0.478048175573349f, 0.025234103202819824f};
+ // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToOutputWeightsValue{-0.04584759473800659f, -0.2716066539287567f, 0.012970447540283203f,
+ -0.4729190170764923f, -0.37422770261764526f, 0.49352723360061646f,
+ 0.3163864016532898f, -0.436781644821167f, -0.33074596524238586f,
+ -0.32885751128196716f, -0.40959352254867554f, -0.2124689817428589f};
+ // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+ // “num_units”), or the second dimension of the “projection_weights”, if defined.
+ hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToInputWeightsValue{0.23788475990f, -0.24948765337f, 0.50044941902f,
+ 0.14431896805f, -0.115940228137f, -0.717082679f,
+ -0.17208620906f, 0.17850610617f, -0.16702319684f,
+ -0.11384502053f, -0.309785276245f, -0.3316611672f,
+ 0.52380162477f, -0.06839632987f, -0.391478359627f,
+ -0.10756178963f};
+ // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToForgetWeightsValue{0.11383482068f, 0.1676601767f, -0.08550968004f, 0.03399394089f,
+ 0.08042152225f, -0.2133381964f, 0.05182432704f, 0.38161808255f,
+ -0.5018365979f, -0.08043262364f, 0.07894329014f, -0.07547105155f,
+ 0.12047368288f, 0.2986997961f, 0.0485043078f, -0.13372567296f};
+ // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToCellWeightsValue{0.0433832928545f, 0.07587072294f, -0.120520234107f, 0.604576051f,
+ -0.434353142986f, 0.009314475068f, 0.005085289478f, 0.08488202038f,
+ -0.00025437487886f, 0.15245915082f, -0.1936587542f, 0.004754020f,
+ -0.1582719236f, 0.3307867646f, 0.0236605107784f, 0.307716339826f};
+ // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToOutputWeightsValue{-0.079031050201f, 0.041414566286f, -0.583727357285f,
+ 0.1025384515f, -0.172372072937f, 0.09214124082f,
+ 0.178184121827f, -0.2439443916f, 0.104485116899f,
+ 0.2600405514f, 0.064414866268f, 0.24141204357f,
+ 0.281875759363f, -0.14234502664f, 0.15126448862f,
+ -0.24421440064f};
+ // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
+ std::vector<float> cellToInputWeightsValue;
+ // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
+ std::vector<float> cellToForgetWeightsValue;
+ // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
+ std::vector<float> cellToOutputWeightsValue;
+ // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
+ std::vector<float> inputGateBiasValue(numUnits, 0.0f);
+ // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
+ std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
+ // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellBiasDimensions{numUnits};
+ std::vector<float> cellBiasValue(numUnits, 0.0f);
+ // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
+ std::vector<float> outputGateBiasValue(numUnits, 0.0f);
+ // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [output_size, num_units].
+ hidl_vec<uint32_t> projectionWeightsDimensions{0};
+ std::vector<float> projectionWeightsValue;
+ // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
+ hidl_vec<uint32_t> projectionBiasDimensions{0};
+ std::vector<float> projectionBiasValue;
+
+ // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+ hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
+ std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
+ // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+ hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
+ std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
+
+ // Constant scalar values (the VTS test adds these as tensors of dim {})
+ // 20: The activation function: A value indicating the activation function:
+ // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
+ hidl_vec<uint32_t> activationFunctionDimensions{};
+ std::vector<int32_t> activationFunctionValue{4};
+ // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
+ // If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> cellClippingThresholdDimensions{};
+ std::vector<float> cellClippingThresholdValue{10.0f};
+ // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
+ // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
+ std::vector<float> projectionClippingThresholdValue{0.f};
+
+ // 23: Time-major if true, batch-major if false.
+ bool timeMajorValue = true;
+
+ // Normalization:
+ // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at input gate.
+ hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
+ std::vector<float> inputLayerNormWeightsValue;
+ // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at forget gate.
+ hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
+ std::vector<float> forgetLayerNormWeightsValue;
+ // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at cell gate.
+ hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
+ std::vector<float> cellLayerNormWeightsValue;
+ // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at output gate.
+ hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
+ std::vector<float> outputLayerNormWeightsValue;
+
+ // Outputs:
+ // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape: if time-major:
+ // [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
+ hidl_vec<uint32_t> outputDimensions{timeSize, batchSize, outputSize};
+ std::vector<float> outputValue{0.135657698f, 0.124672532f, 0.0212090332f, -0.0530203655f,
+ 0.106138252f, 0.0404792242f, 0.0151643595f, -0.00675163185f,
+ -0.0128514022f, 0.0644884035f, 0.0709072053f, -0.0454045124f,
+ 0.16288602f, 0.16649379f, 0.02770456f, -0.03698075f,
+ 0.11171641f, 0.043119f , 0.0762981f , -0.01228541f,
+ 0.10439701f, 0.21439962f, 0.11919238f, -0.08390583f};
+
+ // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, output_size]. This output is optional and can be omitted. If this output
+ // is present then output #2 must be present as well.
+ hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
+ std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
+ // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, num_units]. This output is optional and can be omitted.
+ hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
+ std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
+
+ UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
+ inputToInputWeightsDimensions, inputToInputWeightsValue,
+ inputToForgetWeightsDimensions, inputToForgetWeightsValue,
+ inputToCellWeightsDimensions, inputToCellWeightsValue,
+ inputToOutputWeightsDimensions, inputToOutputWeightsValue,
+ recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
+ recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
+ recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
+ recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
+ cellToInputWeightsDimensions, cellToInputWeightsValue,
+ cellToForgetWeightsDimensions, cellToForgetWeightsValue,
+ cellToOutputWeightsDimensions, cellToOutputWeightsValue,
+ inputGateBiasDimensions, inputGateBiasValue,
+ forgetGateBiasDimensions, forgetGateBiasValue,
+ cellBiasDimensions, cellBiasValue,
+ outputGateBiasDimensions, outputGateBiasValue,
+ projectionWeightsDimensions, projectionWeightsValue,
+ projectionBiasDimensions, projectionBiasValue,
+ outputStateInDimensions, outputStateInValue,
+ cellStateInDimensions, cellStateInValue,
+ activationFunctionDimensions, activationFunctionValue,
+ cellClippingThresholdDimensions, cellClippingThresholdValue,
+ projectionClippingThresholdDimensions,
+ projectionClippingThresholdValue,
+ timeMajorValue,
+ inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
+ forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
+ cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
+ outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
+ outputDimensions, outputValue,
+ hiddenStateOutDimensions, hiddenStateOutValue,
+ cellStateOutDimensions, cellStateOutValue,
+ compute);
+}
+
+template<typename HalPolicy>
+void UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::Compute compute)
+{
+ uint32_t batchSize = 2;
+ uint32_t timeSize = 3;
+ uint32_t inputSize = 4;
+ uint32_t outputSize = 5;
+ uint32_t numUnits = 6;
+
+ // Inputs:
+ // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
+ // [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
+ // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
+ hidl_vec<uint32_t> inputDimensions{batchSize, timeSize, inputSize};
+ std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2.,
+ 1., 2., 3., 4., 5., 4.};
+
+ // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size], where “num_units” corresponds to the number of cell units.
+ hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToInputWeightsValue{0.021393683f, 0.06124551f, 0.046905167f, -0.014657677f,
+ -0.03149463f, 0.09171803f, 0.14647801f, 0.10797193f,
+ -0.0057968358f, 0.0019193048f, -0.2726754f, 0.10154029f,
+ -0.018539885f, 0.080349885f, -0.10262385f, -0.022599787f,
+ -0.09121155f, -0.008675967f, -0.045206103f, -0.0821282f,
+ -0.008045952f, 0.015478081f, 0.055217247f, 0.038719587f};
+ // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToForgetWeightsValue{-0.0018401089f, -0.004852237f, 0.03698424f, 0.014181704f,
+ 0.028273236f, -0.016726194f, -0.05249759f, -0.10204261f,
+ 0.00861066f, -0.040979505f, -0.009899187f, 0.01923892f,
+ -0.028177269f, -0.08535103f, -0.14585495f, 0.10662567f,
+ -0.01909731f, -0.017883534f, -0.0047269356f, -0.045103323f,
+ 0.0030784295f, 0.076784775f, 0.07463696f, 0.094531395f};
+ // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
+ hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToCellWeightsValue{-0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
+ -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
+ -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
+ -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
+ -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
+ 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f};
+ // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToOutputWeightsValue{-0.0998932f, -0.07201956f, -0.052803773f, -0.15629593f,
+ -0.15001918f, -0.07650751f, 0.02359855f, -0.075155355f,
+ -0.08037709f, -0.15093534f, 0.029517552f, -0.04751393f,
+ 0.010350531f, -0.02664851f, -0.016839722f, -0.023121163f,
+ 0.0077019283f, 0.012851257f, -0.05040649f, -0.0129761f,
+ -0.021737747f, -0.038305793f, -0.06870586f, -0.01481247f};
+ // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+ // “num_units”), or the second dimension of the “projection_weights”, if defined.
+ hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToInputWeightsValue{-0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
+ -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
+ -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
+ -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
+ 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
+ 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
+ -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
+ 0.14283475f, -0.07390571f};
+ // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToForgetWeightsValue{-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
+ 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
+ -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
+ 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
+ 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
+ -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
+ -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
+ 0.061878487f, -0.04729229f};
+ // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToCellWeightsValue{-0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
+ 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
+ 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
+ -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
+ 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
+ 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
+ -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
+ -0.019443132f, -0.030755889f};
+ // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToOutputWeightsValue{0.025825322f, -0.05813119f, 0.09495884f,
+ -0.045984812f,-0.01255415f, -0.0026479573f,
+ -0.08196161f, -0.054914974f, -0.0046604523f,
+ -0.029587349f, -0.044576716f, -0.07480124f,
+ -0.082868785f, 0.023254942f, 0.027502948f,
+ -0.0039728214f, -0.08683098f, -0.08116779f,
+ -0.014675607f, -0.037924774f, -0.023314456f,
+ -0.007401714f, -0.09255757f, 0.029460307f,
+ -0.08829125f, -0.005139627f, -0.08989442f,
+ -0.0555066f, 0.13596267f, 0.025062224f};
+ // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
+ std::vector<float> cellToInputWeightsValue{0.040369894f, 0.030746894f, 0.24704495f,
+ 0.018586371f, -0.037586458f, -0.15312155f};
+ // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
+ std::vector<float> cellToForgetWeightsValue{-0.01998659f, -0.15568835f, -0.24248174f,
+ -0.012770197f, 0.041331276f, -0.072311886f};
+ // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
+ std::vector<float> cellToOutputWeightsValue{0.08286371f, -0.08261836f, -0.51210177f,
+ 0.002913762f, 0.17764764f, -0.5495371f};
+ // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
+ std::vector<float> inputGateBiasValue{0.02234832f, 0.14757581f, 0.18176508f,
+ 0.10380666f, 0.053110216f, -0.06928846f};
+ // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
+ std::vector<float> forgetGateBiasValue{0.035185695f, -0.042891346f, -0.03032477f,
+ 0.23027696f, 0.11098921f, 0.08989442f};
+ // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellBiasDimensions{numUnits};
+ std::vector<float> cellBiasValue{-0.024379363f, 0.0055531194f, 0.23377132f,
+ 0.033463873f, -0.1483596f, 0.029460307f};
+ // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
+ std::vector<float> outputGateBiasValue{0.046159424f, -0.0012809046f, 0.03563469f,
+ 0.12648113f, 0.027195795f, 0.35373217f};
+ // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [output_size, num_units].
+ hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
+ std::vector<float> projectionWeightsValue{-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
+ 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
+ -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
+ -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
+ 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
+ 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f};
+ // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
+ hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
+ std::vector<float> projectionBiasValue(outputSize, 0.f);
+
+ // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+ hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
+ std::vector<float> outputStateInValue(batchSize * outputSize, 0.f);
+ // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+ hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
+ std::vector<float> cellStateInValue(batchSize * numUnits, 0.f);
+
+ // Constant scalar values (the VTS test adds these as tensors of dim {})
+ // 20: The activation function: A value indicating the activation function:
+ // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
+ hidl_vec<uint32_t> activationFunctionDimensions{};
+ std::vector<int32_t> activationFunctionValue{4};
+ // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
+ // If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> cellClippingThresholdDimensions{};
+ std::vector<float> cellClippingThresholdValue{10.0f};
+ // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
+ // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
+ std::vector<float> projectionClippingThresholdValue{0.f};
+
+ // 23: Time-major if true, batch-major if false.
+ bool timeMajorValue = false;
+
+ // Normalization:
+ // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at input gate.
+ hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
+ std::vector<float> inputLayerNormWeightsValue;
+ // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at forget gate.
+ hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
+ std::vector<float> forgetLayerNormWeightsValue;
+ // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at cell gate.
+ hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
+ std::vector<float> cellLayerNormWeightsValue;
+ // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at output gate.
+ hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
+ std::vector<float> outputLayerNormWeightsValue;
+
+ // Outputs:
+ // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape: if time-major:
+ // [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
+ hidl_vec<uint32_t> outputDimensions{batchSize, timeSize, outputSize};
+ std::vector<float> outputValue{-0.0135612f, -0.0263441f, 0.0314008f, -0.00883455f, 0.00763052f,
+ -0.00126877f, -0.0292959f, 0.0449957f, -0.00976195f, -0.00492338f,
+ -0.0175702f, -0.0431753f, 0.0597117f, -0.0169154f, 0.0142087f,
+ 0.00472515f, -0.0196355f, 0.0342524f, -0.00407936f, -0.0253189f,
+ -0.00512944f, -0.0293754f, 0.0512771f, -0.0151874f, -0.0246433f,
+ -0.00744986f, -0.0345103f, 0.0450666f, -0.00944991f, 0.0127171f};
+
+ // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, output_size]. This output is optional and can be omitted. If this output
+ // is present then output #2 must be present as well.
+ hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
+ std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
+ // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, num_units]. This output is optional and can be omitted.
+ hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
+ std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
+
+ UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
+ inputToInputWeightsDimensions, inputToInputWeightsValue,
+ inputToForgetWeightsDimensions, inputToForgetWeightsValue,
+ inputToCellWeightsDimensions, inputToCellWeightsValue,
+ inputToOutputWeightsDimensions, inputToOutputWeightsValue,
+ recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
+ recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
+ recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
+ recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
+ cellToInputWeightsDimensions, cellToInputWeightsValue,
+ cellToForgetWeightsDimensions, cellToForgetWeightsValue,
+ cellToOutputWeightsDimensions, cellToOutputWeightsValue,
+ inputGateBiasDimensions, inputGateBiasValue,
+ forgetGateBiasDimensions, forgetGateBiasValue,
+ cellBiasDimensions, cellBiasValue,
+ outputGateBiasDimensions, outputGateBiasValue,
+ projectionWeightsDimensions, projectionWeightsValue,
+ projectionBiasDimensions, projectionBiasValue,
+ outputStateInDimensions, outputStateInValue,
+ cellStateInDimensions, cellStateInValue,
+ activationFunctionDimensions, activationFunctionValue,
+ cellClippingThresholdDimensions, cellClippingThresholdValue,
+ projectionClippingThresholdDimensions,
+ projectionClippingThresholdValue,
+ timeMajorValue,
+ inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
+ forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
+ cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
+ outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
+ outputDimensions, outputValue,
+ hiddenStateOutDimensions, hiddenStateOutValue,
+ cellStateOutDimensions, cellStateOutValue,
+ compute, 0.0031454);
+}
+
+template<typename HalPolicy>
+void UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::Compute compute)
+{
+ uint32_t batchSize = 3;
+ uint32_t timeSize = 2;
+ uint32_t inputSize = 3;
+ uint32_t outputSize = 4;
+ uint32_t numUnits = 5;
+
+ // Inputs:
+ // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
+ // [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
+ // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
+ hidl_vec<uint32_t> inputDimensions{batchSize, timeSize, inputSize};
+ std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2.};
+
+ // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size], where “num_units” corresponds to the number of cell units.
+ hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToInputWeightsValue{-0.49536117f, -0.0556083915f, -0.102400711f,
+ -0.117484632f, 0.3298470976f, -0.1179017122f,
+ 0.214305695f, 0.42135173085f, 0.003878414626f,
+ -0.348303917f, -0.1881275477f, 0.0343011027f,
+ -0.38837709614f, -0.05636804124f, 0.4259087456f};
+ // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToForgetWeightsValue{0.2415594226f, 0.15400093799f, 0.4566498398f,
+ -0.3810434485f, 0.268383264f, -0.009807467424f,
+ -0.3522925403f, -0.24275735512f, -0.28344226125f,
+ 0.13512269116f, -0.4932442977f, -0.10039821991f,
+ 0.2726137042f, 0.09216640889f, -0.06551410215f};
+ // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
+ hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToCellWeightsValue{-0.2504855627f, 0.184490025045f, -0.2480507493f,
+ 0.386399507f, -0.259465157985f, -0.16545993089f,
+ -0.4230232555f, 0.341664791103f, -0.18127849691f,
+ -0.2277662414f, -0.55275535589f, 0.34184026718f,
+ 0.3954237699f, -0.19407111404f, 0.30412107706f};
+ // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToOutputWeightsValue{0.2303854227f, 0.5218806862f, -0.4865379333f,
+ 0.53969591851f, 0.23393625035f, -0.27140527306f,
+ 0.50009280443f, 0.07511717046f, 0.3998299249f,
+ -0.51717478049f, 0.1889653282f, -0.367323637f,
+ -0.12584099173f, -0.12319286912f, 0.2407919466f};
+ // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+ // “num_units”), or the second dimension of the “projection_weights”, if defined.
+ hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToInputWeightsValue{-0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
+ -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
+ 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
+ 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f,
+ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f};
+ // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToForgetWeightsValue{-0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+ -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+ -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+ -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f,
+ 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f};
+ // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToCellWeightsValue{-0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+ -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+ 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+ 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f,
+ 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f};
+ // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToOutputWeightsValue{-0.32921677827f, 0.32624614238f, -0.1388191282f,
+ -0.17879831790f,-0.15185534954f, -0.16918526583f,
+ -0.10087361183f, -0.5436913968f, 0.016758225858f,
+ 0.30454617738f, -0.41493862867f, -0.005565764375f,
+ -0.12584099173f, -0.12319286912f, 0.2407919466f,
+ -0.08879069983f, 0.11178309f, 0.09481031f,
+ -0.26424935f, 0.46261835f};
+ // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
+ std::vector<float> cellToInputWeightsValue{0.05f, 0.1f, 0.25f, 0.15f, -0.02f};
+ // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
+ std::vector<float> cellToForgetWeightsValue{-0.02f, -0.15f, -0.25f, -0.03f, 0.15f};
+ // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
+ std::vector<float> cellToOutputWeightsValue{0.1f, -0.1f, -0.5f, 0.05f, 0.01f};
+ // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
+ std::vector<float> inputGateBiasValue{0.03f, 0.15f, 0.22f, 0.38f, 0.05f};
+ // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
+ std::vector<float> forgetGateBiasValue{0.1f, -0.3f, -0.2f, 0.1f, 0.4f};
+ // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellBiasDimensions{numUnits};
+ std::vector<float> cellBiasValue{-0.05f, 0.72f, 0.25f, 0.08f, 0.1f};
+ // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
+ std::vector<float> outputGateBiasValue{0.05f, -0.01f, 0.2f, 0.1f, -0.2f};
+ // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [output_size, num_units].
+ hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
+ std::vector<float> projectionWeightsValue{-0.1f, 0.2f, 0.01f, -0.2f,
+ 0.1f, 0.5f, 0.3f, 0.08f,
+ 0.07f, 0.2f, -0.4f, 0.2f,
+ 0.5f, -0.4f, 0.3f, -0.2f,
+ 0.3f, 0.08f, -0.07f, 0.2f};
+ // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
+ hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
+ std::vector<float> projectionBiasValue(outputSize, 0.f);
+
+ // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+ hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
+ std::vector<float> outputStateInValue(batchSize * outputSize, 0.f);
+ // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+ hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
+ std::vector<float> cellStateInValue(batchSize * numUnits, 0.f);
+
+ // Constant scalar values (the VTS test adds these as tensors of dim {})
+ // 20: The activation function: A value indicating the activation function:
+ // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
+ hidl_vec<uint32_t> activationFunctionDimensions{};
+ std::vector<int32_t> activationFunctionValue{4};
+ // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
+ // If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> cellClippingThresholdDimensions{};
+ std::vector<float> cellClippingThresholdValue{10.0f};
+ // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
+ // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
+ std::vector<float> projectionClippingThresholdValue{0.f};
+
+ // 23: Time-major if true, batch-major if false.
+ bool timeMajorValue = false;
+
+ // Normalization:
+ // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at input gate.
+ hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
+ std::vector<float> inputLayerNormWeightsValue{0.1f, 0.2f, 0.3f, 0.5f, 0.8f};
+ // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at forget gate.
+ hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
+ std::vector<float> forgetLayerNormWeightsValue{0.1f, 0.2f, 0.3f, 0.5f, 0.2f};
+ // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at cell gate.
+ hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
+ std::vector<float> cellLayerNormWeightsValue{0.7f, 0.2f, 0.3f, 0.8f, 0.5f};
+ // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at output gate.
+ hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
+ std::vector<float> outputLayerNormWeightsValue{0.6f, 0.2f, 0.2f, 0.5f, 0.1f};
+
+ // Outputs:
+ // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape: if time-major:
+ // [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
+ hidl_vec<uint32_t> outputDimensions{batchSize, timeSize, outputSize};
+ std::vector<float> outputValue{0.0642256f, 0.0343966f, 0.184122f, 0.114717f,
+ 0.11458f, 0.0407109f, 0.300327f, 0.174301f,
+ 0.0864761f, 0.0362912f, 0.178635f, 0.115689f,
+ 0.108008f, 0.0386623f, 0.273471f, 0.167115f,
+ 0.0859545f, 0.0331481f, 0.186051f, 0.11888f,
+ 0.106649f, 0.0276847f, 0.229863f, 0.166958f};
+
+ // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, output_size]. This output is optional and can be omitted. If this output
+ // is present then output #2 must be present as well.
+ hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
+ std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
+ // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, num_units]. This output is optional and can be omitted.
+ hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
+ std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
+
+ UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
+ inputToInputWeightsDimensions, inputToInputWeightsValue,
+ inputToForgetWeightsDimensions, inputToForgetWeightsValue,
+ inputToCellWeightsDimensions, inputToCellWeightsValue,
+ inputToOutputWeightsDimensions, inputToOutputWeightsValue,
+ recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
+ recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
+ recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
+ recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
+ cellToInputWeightsDimensions, cellToInputWeightsValue,
+ cellToForgetWeightsDimensions, cellToForgetWeightsValue,
+ cellToOutputWeightsDimensions, cellToOutputWeightsValue,
+ inputGateBiasDimensions, inputGateBiasValue,
+ forgetGateBiasDimensions, forgetGateBiasValue,
+ cellBiasDimensions, cellBiasValue,
+ outputGateBiasDimensions, outputGateBiasValue,
+ projectionWeightsDimensions, projectionWeightsValue,
+ projectionBiasDimensions, projectionBiasValue,
+ outputStateInDimensions, outputStateInValue,
+ cellStateInDimensions, cellStateInValue,
+ activationFunctionDimensions, activationFunctionValue,
+ cellClippingThresholdDimensions, cellClippingThresholdValue,
+ projectionClippingThresholdDimensions,
+ projectionClippingThresholdValue,
+ timeMajorValue,
+ inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
+ forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
+ cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
+ outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
+ outputDimensions, outputValue,
+ hiddenStateOutDimensions, hiddenStateOutValue,
+ cellStateOutDimensions, cellStateOutValue,
+ compute);
+}
+
+template<typename HalPolicy>
+void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTestImpl(armnn::Compute compute)
+{
+ uint32_t batchSize = 3;
+ uint32_t timeSize = 2;
+ uint32_t inputSize = 3;
+ uint32_t outputSize = 4;
+ uint32_t numUnits = outputSize;
+
+ // Inputs:
+ // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
+ // [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
+ // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
+ hidl_vec<uint32_t> inputDimensions{batchSize, timeSize, inputSize};
+ std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
+ 3., 2., 1., 2., 3., 4.,
+ 5., 4., 3., 2., 1., 2.};
+
+ // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size], where “num_units” corresponds to the number of cell units.
+ hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
+ std::vector<float> inputToInputWeightsValue;
+ // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToForgetWeightsValue{0.2415594226f, 0.15400093799f, 0.4566498398f,
+ -0.3810434485f, 0.268383264f, -0.009807467424f,
+ -0.3522925403f, -0.24275735512f, -0.28344226125f,
+ 0.13512269116f, -0.4932442977f, -0.10039821991f};
+ // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
+ hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToCellWeightsValue{-0.2504855627f, 0.184490025045f, -0.2480507493f,
+ 0.386399507f, -0.259465157985f, -0.16545993089f,
+ -0.4230232555f, 0.341664791103f, -0.18127849691f,
+ -0.2277662414f, -0.55275535589f, 0.34184026718f};
+ // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
+ std::vector<float> inputToOutputWeightsValue{0.2303854227f, 0.5218806862f, -0.4865379333f,
+ 0.53969591851f, 0.23393625035f, -0.27140527306f,
+ 0.50009280443f, 0.07511717046f, 0.3998299249f,
+ -0.51717478049f, 0.1889653282f, -0.367323637f};
+ // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+ // “num_units”), or the second dimension of the “projection_weights”, if defined.
+ hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0};
+ std::vector<float> recurrentToInputWeightsValue;
+ // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToForgetWeightsValue{-0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+ -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+ -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+ -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f};
+ // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToCellWeightsValue{-0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+ -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+ 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+ 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f};
+ // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, output_size].
+ hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
+ std::vector<float> recurrentToOutputWeightsValue{-0.32921677827f, 0.32624614238f, -0.1388191282f,
+ -0.17879831790f, -0.15185534954f, -0.16918526583f,
+ -0.10087361183f, -0.5436913968f, 0.016758225858f,
+ 0.30454617738f, -0.41493862867f, -0.005565764375f,
+ -0.12584099173f, -0.12319286912f, 0.2407919466f,
+ -0.08879069983f};
+ // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
+ std::vector<float> cellToInputWeightsValue;
+ // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
+ std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
+ // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
+ std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
+ // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> inputGateBiasDimensions{0};
+ std::vector<float> inputGateBiasValue;
+ // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
+ std::vector<float> forgetGateBiasValue{1., 1., 1., 1.};
+ // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> cellBiasDimensions{numUnits};
+ std::vector<float> cellBiasValue{0., 0., 0., 0.};
+ // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+ hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
+ std::vector<float> outputGateBiasValue{0., 0., 0., 0.};
+ // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [output_size, num_units].
+ hidl_vec<uint32_t> projectionWeightsDimensions{0};
+ std::vector<float> projectionWeightsValue;
+ // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
+ hidl_vec<uint32_t> projectionBiasDimensions{0};
+ std::vector<float> projectionBiasValue;
+
+ // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+ hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
+ std::vector<float> outputStateInValue(batchSize * outputSize, 0.f);
+ // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+ hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
+ std::vector<float> cellStateInValue(batchSize * numUnits, 0.f);
+
+ // Constant scalar values (the VTS test adds these as tensors of dim {})
+ // 20: The activation function: A value indicating the activation function:
+ // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
+ hidl_vec<uint32_t> activationFunctionDimensions{};
+ std::vector<int32_t> activationFunctionValue{4};
+ // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
+ // If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> cellClippingThresholdDimensions{};
+ std::vector<float> cellClippingThresholdValue{10.0f};
+ // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
+ // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
+ std::vector<float> projectionClippingThresholdValue{0.f};
+
+ // 23: Time-major if true, batch-major if false.
+ bool timeMajorValue = false;
+
+ // Normalization:
+ // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at input gate.
+ hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
+ std::vector<float> inputLayerNormWeightsValue;
+ // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at forget gate.
+ hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
+ std::vector<float> forgetLayerNormWeightsValue;
+ // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at cell gate.
+ hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
+ std::vector<float> cellLayerNormWeightsValue;
+ // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
+ // Used to rescale normalized inputs to activation at output gate.
+ hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
+ std::vector<float> outputLayerNormWeightsValue;
+
+ // Outputs:
+ // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape: if time-major:
+ // [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
+ hidl_vec<uint32_t> outputDimensions{batchSize, timeSize, outputSize};
+ std::vector<float> outputValue{-0.0129257f, -0.070531f, -0.153508f, -0.0392391f,
+ -0.0300169f, -0.195717f, -0.528679f, -0.0818106f,
+ -0.0332748f, 0.155429f, -0.353966f, -0.0801505f,
+ -0.032312f, -0.0407911f, -0.435053f, -0.0932317f,
+ -0.0108233f, 0.165584f, -0.640424f, -0.0447535f,
+ -0.031675f, 0.125987f, -0.526695f, -0.110093f};
+
+ // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, output_size]. This output is optional and can be omitted. If this output
+ // is present then output #2 must be present as well.
+ hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
+ std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
+ // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
+ // [batch_size, num_units]. This output is optional and can be omitted.
+ hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
+ std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
+
+ UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
+ inputToInputWeightsDimensions, inputToInputWeightsValue,
+ inputToForgetWeightsDimensions, inputToForgetWeightsValue,
+ inputToCellWeightsDimensions, inputToCellWeightsValue,
+ inputToOutputWeightsDimensions, inputToOutputWeightsValue,
+ recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
+ recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
+ recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
+ recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
+ cellToInputWeightsDimensions, cellToInputWeightsValue,
+ cellToForgetWeightsDimensions, cellToForgetWeightsValue,
+ cellToOutputWeightsDimensions, cellToOutputWeightsValue,
+ inputGateBiasDimensions, inputGateBiasValue,
+ forgetGateBiasDimensions, forgetGateBiasValue,
+ cellBiasDimensions, cellBiasValue,
+ outputGateBiasDimensions, outputGateBiasValue,
+ projectionWeightsDimensions, projectionWeightsValue,
+ projectionBiasDimensions, projectionBiasValue,
+ outputStateInDimensions, outputStateInValue,
+ cellStateInDimensions, cellStateInValue,
+ activationFunctionDimensions, activationFunctionValue,
+ cellClippingThresholdDimensions, cellClippingThresholdValue,
+ projectionClippingThresholdDimensions,
+ projectionClippingThresholdValue,
+ timeMajorValue,
+ inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
+ forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
+ cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
+ outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
+ outputDimensions, outputValue,
+ hiddenStateOutDimensions, hiddenStateOutValue,
+ cellStateOutDimensions, cellStateOutValue,
+ compute);
+} \ No newline at end of file
diff --git a/test/UtilsTests.cpp b/test/UtilsTests.cpp
index de84bb49..68d7b501 100644
--- a/test/UtilsTests.cpp
+++ b/test/UtilsTests.cpp
@@ -1,23 +1,18 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "DriverTestHelpers.hpp"
-#include <boost/test/unit_test.hpp>
#include <log/log.h>
-#include "../Utils.hpp"
#include <armnn/src/armnn/OptimizedNetworkImpl.hpp>
#include <fstream>
-#include <iomanip>
#include <memory>
#include <armnn/INetwork.hpp>
-#include "armnn/NetworkFwd.hpp"
-
-#include <Filesystem.hpp>
+#include <armnnUtils/Filesystem.hpp>
using namespace android;
using namespace android::nn;
@@ -50,7 +45,7 @@ public:
return stream.good() ? ::armnn::Status::Success : ::armnn::Status::Failure;
}
- ::armnn::profiling::ProfilingGuid GetGuid() const final { return ::armnn::profiling::ProfilingGuid(0); }
+ ::arm::pipe::ProfilingGuid GetGuid() const final { return ::arm::pipe::ProfilingGuid(0); }
void UpdateMockSerializedContent(const std::string& mockSerializedContent)
{
@@ -64,7 +59,6 @@ private:
} // armnn namespace
-BOOST_AUTO_TEST_SUITE(UtilsTests)
// The following are helpers for writing unit tests for the driver.
namespace
@@ -78,10 +72,9 @@ public:
ExportNetworkGraphFixture()
: ExportNetworkGraphFixture("/data")
{}
+
ExportNetworkGraphFixture(const std::string& requestInputsAndOutputsDumpDir)
- : m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
- , m_FileName()
- , m_FileStream()
+ : m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir), m_FileName(), m_FileStream()
{
// Set the name of the output .dot file.
// NOTE: the export now uses a time stamp to name the file so we
@@ -97,7 +90,7 @@ public:
m_FileStream.close();
// Ignore any error (such as file not found).
- (void)remove(m_FileName.c_str());
+ (void) remove(m_FileName.c_str());
}
bool FileExists()
@@ -147,10 +140,12 @@ private:
};
-
} // namespace
-BOOST_AUTO_TEST_CASE(ExportToEmptyDirectory)
+DOCTEST_TEST_SUITE("UtilsTests")
+{
+
+DOCTEST_TEST_CASE("ExportToEmptyDirectory")
{
// Set the fixture for this test.
ExportNetworkGraphFixture fixture("");
@@ -167,13 +162,13 @@ BOOST_AUTO_TEST_CASE(ExportToEmptyDirectory)
// Export the mock optimized network.
fixture.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
- fixture.m_RequestInputsAndOutputsDumpDir);
+ fixture.m_RequestInputsAndOutputsDumpDir);
// Check that the output file does not exist.
- BOOST_TEST(!fixture.FileExists());
+ DOCTEST_CHECK(!fixture.FileExists());
}
-BOOST_AUTO_TEST_CASE(ExportNetwork)
+DOCTEST_TEST_CASE("ExportNetwork")
{
// Set the fixture for this test.
ExportNetworkGraphFixture fixture;
@@ -191,16 +186,16 @@ BOOST_AUTO_TEST_CASE(ExportNetwork)
// Export the mock optimized network.
fixture.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
- fixture.m_RequestInputsAndOutputsDumpDir);
+ fixture.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- BOOST_TEST(fixture.FileExists());
+ DOCTEST_CHECK(fixture.FileExists());
// Check that the content of the output file matches the mock content.
- BOOST_TEST(fixture.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture.GetFileContent() == mockSerializedContent);
}
-BOOST_AUTO_TEST_CASE(ExportNetworkOverwriteFile)
+DOCTEST_TEST_CASE("ExportNetworkOverwriteFile")
{
// Set the fixture for this test.
ExportNetworkGraphFixture fixture;
@@ -217,13 +212,13 @@ BOOST_AUTO_TEST_CASE(ExportNetworkOverwriteFile)
// Export the mock optimized network.
fixture.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
- fixture.m_RequestInputsAndOutputsDumpDir);
+ fixture.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- BOOST_TEST(fixture.FileExists());
+ DOCTEST_CHECK(fixture.FileExists());
// Check that the content of the output file matches the mock content.
- BOOST_TEST(fixture.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture.GetFileContent() == mockSerializedContent);
// Update the mock serialized content of the network.
mockSerializedContent = "This is ANOTHER mock serialized content!";
@@ -235,16 +230,16 @@ BOOST_AUTO_TEST_CASE(ExportNetworkOverwriteFile)
// Export the mock optimized network.
fixture.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork2,
- fixture.m_RequestInputsAndOutputsDumpDir);
+ fixture.m_RequestInputsAndOutputsDumpDir);
// Check that the output file still exists and that it has the correct name.
- BOOST_TEST(fixture.FileExists());
+ DOCTEST_CHECK(fixture.FileExists());
// Check that the content of the output file matches the mock content.
- BOOST_TEST(fixture.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture.GetFileContent() == mockSerializedContent);
}
-BOOST_AUTO_TEST_CASE(ExportMultipleNetworks)
+DOCTEST_TEST_CASE("ExportMultipleNetworks")
{
// Set the fixtures for this test.
ExportNetworkGraphFixture fixture1;
@@ -263,32 +258,32 @@ BOOST_AUTO_TEST_CASE(ExportMultipleNetworks)
// Export the mock optimized network.
fixture1.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
- fixture1.m_RequestInputsAndOutputsDumpDir);
+ fixture1.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- BOOST_TEST(fixture1.FileExists());
+ DOCTEST_CHECK(fixture1.FileExists());
// Check that the content of the output file matches the mock content.
- BOOST_TEST(fixture1.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture1.GetFileContent() == mockSerializedContent);
// Export the mock optimized network.
fixture2.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
- fixture2.m_RequestInputsAndOutputsDumpDir);
+ fixture2.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- BOOST_TEST(fixture2.FileExists());
+ DOCTEST_CHECK(fixture2.FileExists());
// Check that the content of the output file matches the mock content.
- BOOST_TEST(fixture2.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture2.GetFileContent() == mockSerializedContent);
// Export the mock optimized network.
fixture3.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
- fixture3.m_RequestInputsAndOutputsDumpDir);
+ fixture3.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- BOOST_TEST(fixture3.FileExists());
+ DOCTEST_CHECK(fixture3.FileExists());
// Check that the content of the output file matches the mock content.
- BOOST_TEST(fixture3.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture3.GetFileContent() == mockSerializedContent);
}
-BOOST_AUTO_TEST_SUITE_END()
+}