aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-06-14 15:45:03 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-06-17 10:03:58 +0000
commit44cfd848c1913f87a77c0427450dba93ba47fb94 (patch)
treea0260bf155f06879042a30c2e8dafe66fc9718a4 /test
parentcd700e4f0db201bc3066605058dc1c87d483833f (diff)
downloadandroid-nn-driver-44cfd848c1913f87a77c0427450dba93ba47fb94.tar.gz
IVGCVSW-3283 Add test for converting CONV2D and DEPTHWISE_CONV2D operators with dilation params
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I51a9c71d7a277ab530ac35faea2e8a069c134f45
Diffstat (limited to 'test')
-rw-r--r--test/1.1/Mean.cpp23
-rw-r--r--test/1.1/Transpose.cpp32
-rw-r--r--test/1.2/Dilation.cpp94
-rw-r--r--test/Android.mk86
-rw-r--r--test/Concat.cpp23
-rw-r--r--test/Concurrent.cpp22
-rw-r--r--test/Convolution2D.hpp16
-rw-r--r--test/Dilation.hpp179
-rw-r--r--test/DriverTestHelpers.hpp127
-rw-r--r--test/FullyConnected.cpp48
-rw-r--r--test/GenericLayerTests.cpp111
-rw-r--r--test/Lstm.cpp119
12 files changed, 690 insertions, 190 deletions
diff --git a/test/1.1/Mean.cpp b/test/1.1/Mean.cpp
index cf9ddcb2..6e96d84b 100644
--- a/test/1.1/Mean.cpp
+++ b/test/1.1/Mean.cpp
@@ -6,6 +6,8 @@
#include "../DriverTestHelpers.hpp"
#include "../TestTensor.hpp"
+#include "../1.1/HalPolicy.hpp"
+
#include <boost/array.hpp>
#include <boost/test/data/test_case.hpp>
@@ -15,6 +17,8 @@ using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
+using HalPolicy = hal_1_1::HalPolicy;
+
namespace
{
@@ -34,14 +38,21 @@ void MeanTestImpl(const TestTensor& input,
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice, fp16Enabled));
- V1_1::Model model = {};
- AddInputOperand (model, input.GetDimensions());
- AddTensorOperand(model, axisDimensions, const_cast<int32_t*>(axisValues), V1_0::OperandType::TENSOR_INT32);
- AddIntOperand (model, keepDims);
- AddOutputOperand(model, expectedOutput.GetDimensions());
+ HalPolicy::Model model = {};
+
+ AddInputOperand<HalPolicy>(model, input.GetDimensions());
+
+ AddTensorOperand<HalPolicy>(model,
+ axisDimensions,
+ const_cast<int32_t*>(axisValues),
+ HalPolicy::OperandType::TENSOR_INT32);
+
+ AddIntOperand<HalPolicy>(model, keepDims);
+
+ AddOutputOperand<HalPolicy>(model, expectedOutput.GetDimensions());
model.operations.resize(1);
- model.operations[0].type = V1_1::OperationType::MEAN;
+ model.operations[0].type = HalPolicy::OperationType::MEAN;
model.operations[0].inputs = hidl_vec<uint32_t>{ 0, 1, 2 };
model.operations[0].outputs = hidl_vec<uint32_t>{ 3 };
model.relaxComputationFloat32toFloat16 = fp16Enabled;
diff --git a/test/1.1/Transpose.cpp b/test/1.1/Transpose.cpp
index e32a25fe..f2c77b3f 100644
--- a/test/1.1/Transpose.cpp
+++ b/test/1.1/Transpose.cpp
@@ -2,14 +2,19 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+#include "OperationsUtils.h"
+
#include "../DriverTestHelpers.hpp"
-#include <boost/test/unit_test.hpp>
-#include <boost/array.hpp>
-#include <log/log.h>
#include "../TestTensor.hpp"
-#include "OperationsUtils.h"
+
+#include "../1.1/HalPolicy.hpp"
+
+#include <boost/array.hpp>
+#include <boost/test/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
+#include <log/log.h>
+
#include <cmath>
BOOST_AUTO_TEST_SUITE(TransposeTests)
@@ -18,6 +23,8 @@ using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
+using HalPolicy = hal_1_1::HalPolicy;
+
namespace
{
@@ -31,14 +38,19 @@ void TransposeTestImpl(const TestTensor & inputs, int32_t perm[],
const TestTensor & expectedOutputTensor, armnn::Compute computeDevice)
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice));
- V1_1::Model model = {};
+ HalPolicy::Model model = {};
- AddInputOperand(model,inputs.GetDimensions());
- AddTensorOperand(model, hidl_vec<uint32_t>{4}, perm, V1_0::OperandType::TENSOR_INT32);
- AddOutputOperand(model, expectedOutputTensor.GetDimensions());
+ AddInputOperand<HalPolicy>(model,inputs.GetDimensions());
+
+ AddTensorOperand<HalPolicy>(model,
+ hidl_vec<uint32_t>{4},
+ perm,
+ HalPolicy::OperandType::TENSOR_INT32);
+
+ AddOutputOperand<HalPolicy>(model, expectedOutputTensor.GetDimensions());
model.operations.resize(1);
- model.operations[0].type = V1_1::OperationType::TRANSPOSE;
+ model.operations[0].type = HalPolicy::OperationType::TRANSPOSE;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1};
model.operations[0].outputs = hidl_vec<uint32_t>{2};
@@ -84,8 +96,8 @@ void TransposeTestImpl(const TestTensor & inputs, int32_t perm[],
{
BOOST_TEST(outdata[i] == expectedOutput[i]);
}
-
}
+
} // namespace
BOOST_DATA_TEST_CASE(Transpose , COMPUTE_DEVICES)
diff --git a/test/1.2/Dilation.cpp b/test/1.2/Dilation.cpp
new file mode 100644
index 00000000..1a7ba4b4
--- /dev/null
+++ b/test/1.2/Dilation.cpp
@@ -0,0 +1,94 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../Dilation.hpp"
+
+#include "../../1.2/HalPolicy.hpp"
+
+#include <boost/test/data/test_case.hpp>
+
+BOOST_AUTO_TEST_SUITE(DilationTests)
+
+BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingNoDilation)
+{
+ DilationTestOptions options;
+ options.m_IsDepthwiseConvolution = false;
+ options.m_IsPaddingExplicit = true;
+ options.m_HasDilation = false;
+
+ DilationTestImpl<hal_1_2::HalPolicy>(options);
+}
+
+BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingDilation)
+{
+ DilationTestOptions options;
+ options.m_IsDepthwiseConvolution = false;
+ options.m_IsPaddingExplicit = true;
+ options.m_HasDilation = true;
+
+ DilationTestImpl<hal_1_2::HalPolicy>(options);
+}
+
+BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingNoDilation)
+{
+ DilationTestOptions options;
+ options.m_IsDepthwiseConvolution = false;
+ options.m_IsPaddingExplicit = false;
+ options.m_HasDilation = false;
+
+ DilationTestImpl<hal_1_2::HalPolicy>(options);
+}
+
+BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingDilation)
+{
+ DilationTestOptions options;
+ options.m_IsDepthwiseConvolution = false;
+ options.m_IsPaddingExplicit = false;
+ options.m_HasDilation = true;
+
+ DilationTestImpl<hal_1_2::HalPolicy>(options);
+}
+
+BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingNoDilation)
+{
+ DilationTestOptions options;
+ options.m_IsDepthwiseConvolution = true;
+ options.m_IsPaddingExplicit = true;
+ options.m_HasDilation = false;
+
+ DilationTestImpl<hal_1_2::HalPolicy>(options);
+}
+
+BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingDilation)
+{
+ DilationTestOptions options;
+ options.m_IsDepthwiseConvolution = true;
+ options.m_IsPaddingExplicit = true;
+ options.m_HasDilation = true;
+
+ DilationTestImpl<hal_1_2::HalPolicy>(options);
+}
+
+BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingNoDilation)
+{
+ DilationTestOptions options;
+ options.m_IsDepthwiseConvolution = true;
+ options.m_IsPaddingExplicit = false;
+ options.m_HasDilation = false;
+
+ DilationTestImpl<hal_1_2::HalPolicy>(options);
+}
+
+BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingDilation)
+{
+ DilationTestOptions options;
+ options.m_IsDepthwiseConvolution = true;
+ options.m_IsPaddingExplicit = false;
+ options.m_HasDilation = true;
+
+ DilationTestImpl<hal_1_2::HalPolicy>(options);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/test/Android.mk b/test/Android.mk
index 8f62149e..0ba319b7 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -203,3 +203,89 @@ include $(BUILD_EXECUTABLE)
endif # PLATFORM_VERSION == 9
+ifeq ($(Q_OR_LATER),1)
+# The following target is available starting from Android Q
+
+##########################
+# armnn-driver-tests@1.2 #
+##########################
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := armnn-driver-tests@1.2
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_ARM_MODE := arm
+LOCAL_PROPRIETARY_MODULE := true
+
+# Mark source files as dependent on Android.mk
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_C_INCLUDES := \
+ $(OPENCL_HEADER_PATH) \
+ $(NN_HEADER_PATH) \
+ $(ARMNN_HEADER_PATH) \
+ $(ARMNN_DRIVER_HEADER_PATH)
+
+LOCAL_CFLAGS := \
+ -std=c++14 \
+ -fexceptions \
+ -Werror \
+ -O0 \
+ -UNDEBUG \
+ -DARMNN_ANDROID_Q \
+ -DARMNN_ANDROID_NN_V1_2
+
+LOCAL_SRC_FILES := \
+ 1.0/Convolution2D.cpp \
+ 1.1/Convolution2D.cpp \
+ 1.1/Mean.cpp \
+ 1.1/Transpose.cpp \
+ 1.2/Dilation.cpp \
+ Tests.cpp \
+ UtilsTests.cpp \
+ Concurrent.cpp \
+ FullyConnected.cpp \
+ GenericLayerTests.cpp \
+ DriverTestHelpers.cpp \
+ SystemProperties.cpp \
+ Lstm.cpp \
+ Concat.cpp \
+ TestTensor.cpp
+
+LOCAL_STATIC_LIBRARIES := \
+ libneuralnetworks_common \
+ libboost_log \
+ libboost_system \
+ libboost_unit_test_framework \
+ libboost_thread \
+ armnn-arm_compute
+
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+ libarmnn-driver@1.2
+
+LOCAL_SHARED_LIBRARIES := \
+ libbase \
+ libcutils \
+ libfmq \
+ libhidlbase \
+ libhidltransport \
+ libhidlmemory \
+ liblog \
+ libnativewindow \
+ libtextclassifier_hash \
+ libui \
+ libutils \
+ android.hardware.neuralnetworks@1.0 \
+ android.hardware.neuralnetworks@1.1 \
+ android.hardware.neuralnetworks@1.2 \
+ android.hidl.allocator@1.0 \
+ android.hidl.memory@1.0
+
+ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
+LOCAL_SHARED_LIBRARIES+= \
+ libOpenCL
+endif
+
+include $(BUILD_EXECUTABLE)
+
+endif # PLATFORM_VERSION == Q \ No newline at end of file
diff --git a/test/Concat.cpp b/test/Concat.cpp
index b5ea689e..02d66cb8 100644
--- a/test/Concat.cpp
+++ b/test/Concat.cpp
@@ -4,11 +4,14 @@
//
#include "DriverTestHelpers.hpp"
#include "TestTensor.hpp"
+
+#include "../1.0/HalPolicy.hpp"
+
#include <boost/array.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
-#include <log/log.h>
+#include <log/log.h>
BOOST_AUTO_TEST_SUITE(ConcatTests)
@@ -16,6 +19,8 @@ using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
+using HalPolicy = hal_1_0::HalPolicy;
+
namespace
{
@@ -34,31 +39,31 @@ ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
ErrorStatus expectedExecStatus=ErrorStatus::NONE)
{
std::unique_ptr<ArmnnDriver> driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice));
- V1_0::Model model{};
+ HalPolicy::Model model{};
hidl_vec<uint32_t> modelInputIds;
modelInputIds.resize(inputs.size()+1);
for (uint32_t i = 0; i<inputs.size(); ++i)
{
modelInputIds[i] = i;
- AddInputOperand(model, inputs[i]->GetDimensions());
+ AddInputOperand<HalPolicy>(model, inputs[i]->GetDimensions());
}
modelInputIds[inputs.size()] = inputs.size(); // add an id for the axis too
- AddIntOperand(model, concatAxis);
- AddOutputOperand(model, expectedOutputTensor.GetDimensions());
+ AddIntOperand<HalPolicy>(model, concatAxis);
+ AddOutputOperand<HalPolicy>(model, expectedOutputTensor.GetDimensions());
// make the concat operation
model.operations.resize(1);
- model.operations[0].type = V1_0::OperationType::CONCATENATION;
+ model.operations[0].type = HalPolicy::OperationType::CONCATENATION;
model.operations[0].inputs = modelInputIds;
model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
// make the prepared model
ErrorStatus prepareStatus=ErrorStatus::NONE;
android::sp<V1_0::IPreparedModel> preparedModel = PrepareModelWithStatus(model,
- *driver,
- prepareStatus,
- expectedPrepareStatus);
+ *driver,
+ prepareStatus,
+ expectedPrepareStatus);
BOOST_TEST(prepareStatus == expectedPrepareStatus);
if (prepareStatus != ErrorStatus::NONE)
{
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
index 0848a88c..87ac2e80 100644
--- a/test/Concurrent.cpp
+++ b/test/Concurrent.cpp
@@ -3,13 +3,19 @@
// SPDX-License-Identifier: MIT
//
#include "DriverTestHelpers.hpp"
+
+#include "../1.0/HalPolicy.hpp"
+
#include <boost/test/unit_test.hpp>
+
#include <log/log.h>
BOOST_AUTO_TEST_SUITE(ConcurrentDriverTests)
-using ArmnnDriver = armnn_driver::ArmnnDriver;
+using ArmnnDriver = armnn_driver::ArmnnDriver;
using DriverOptions = armnn_driver::DriverOptions;
+using HalPolicy = armnn_driver::hal_1_0::HalPolicy;
+
using namespace android::nn;
using namespace android::hardware;
using namespace driverTestHelpers;
@@ -24,22 +30,22 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
ALOGI("ConcurrentExecute: entry");
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- V1_0::Model model = {};
+ HalPolicy::Model model = {};
// add operands
int32_t actValue = 0;
float weightValue[] = {2, 4, 1};
float biasValue[] = {4};
- AddInputOperand(model, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
+ AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3});
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand<HalPolicy>(model, actValue);
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1});
// make the fully connected operation
model.operations.resize(1);
- model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
+ model.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp
index c8d573d7..ec43ae35 100644
--- a/test/Convolution2D.hpp
+++ b/test/Convolution2D.hpp
@@ -57,14 +57,14 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled
float weightValue[] = {1.f, -1.f, 0.f, 1.f};
float biasValue[] = {0.f};
- AddInputOperand(model, hidl_vec<uint32_t>{1, 2, 3, 1});
- AddTensorOperand(model, hidl_vec<uint32_t>{1, 2, 2, 1}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand(model, (int32_t)paddingScheme); // padding
- AddIntOperand(model, 2); // stride x
- AddIntOperand(model, 2); // stride y
- AddIntOperand(model, 0); // no activation
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, outSize, 1});
+ AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 3, 1});
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 2, 1}, weightValue);
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand<HalPolicy>(model, (int32_t)paddingScheme); // padding
+ AddIntOperand<HalPolicy>(model, 2); // stride x
+ AddIntOperand<HalPolicy>(model, 2); // stride y
+ AddIntOperand<HalPolicy>(model, 0); // no activation
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, outSize, 1});
// make the convolution operation
model.operations.resize(1);
diff --git a/test/Dilation.hpp b/test/Dilation.hpp
new file mode 100644
index 00000000..adc9947d
--- /dev/null
+++ b/test/Dilation.hpp
@@ -0,0 +1,179 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "DriverTestHelpers.hpp"
+
+#include <armnn/LayerVisitorBase.hpp>
+
+#include <boost/core/ignore_unused.hpp>
+#include <boost/test/unit_test.hpp>
+
+#include <numeric>
+
+BOOST_AUTO_TEST_SUITE(DilationTests)
+
+using namespace armnn;
+using namespace boost;
+using namespace driverTestHelpers;
+
+struct DilationTestOptions
+{
+ DilationTestOptions() :
+ m_IsDepthwiseConvolution{false},
+ m_IsPaddingExplicit{false},
+ m_HasDilation{false}
+ {}
+
+ ~DilationTestOptions() = default;
+
+ bool m_IsDepthwiseConvolution;
+ bool m_IsPaddingExplicit;
+ bool m_HasDilation;
+};
+
+class DilationTestVisitor : public LayerVisitorBase<VisitorThrowingPolicy>
+{
+public:
+ DilationTestVisitor() :
+ DilationTestVisitor(1u, 1u)
+ {}
+
+ DilationTestVisitor(uint32_t expectedDilationX, uint32_t expectedDilationY) :
+ m_ExpectedDilationX{expectedDilationX},
+ m_ExpectedDilationY{expectedDilationY}
+ {}
+
+ void VisitConvolution2dLayer(const IConnectableLayer *layer,
+ const Convolution2dDescriptor& descriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char *name = nullptr) override
+ {
+ ignore_unused(layer);
+ ignore_unused(weights);
+ ignore_unused(biases);
+ ignore_unused(name);
+
+ CheckDilationParams(descriptor);
+ }
+
+ void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const ConstTensor& weights,
+ const Optional<ConstTensor>& biases,
+ const char *name = nullptr) override
+ {
+ ignore_unused(layer);
+ ignore_unused(weights);
+ ignore_unused(biases);
+ ignore_unused(name);
+
+ CheckDilationParams(descriptor);
+ }
+
+private:
+ uint32_t m_ExpectedDilationX;
+ uint32_t m_ExpectedDilationY;
+
+ template<typename ConvolutionDescriptor>
+ void CheckDilationParams(const ConvolutionDescriptor& descriptor)
+ {
+ BOOST_CHECK_EQUAL(descriptor.m_DilationX, m_ExpectedDilationX);
+ BOOST_CHECK_EQUAL(descriptor.m_DilationY, m_ExpectedDilationY);
+ }
+};
+
+template<typename HalPolicy>
+void DilationTestImpl(const DilationTestOptions& options)
+{
+ using HalModel = typename HalPolicy::Model;
+ using HalOperationType = typename HalPolicy::OperationType;
+
+ const armnn::Compute backend = armnn::Compute::CpuRef;
+ auto driver = std::make_unique<ArmnnDriver>(DriverOptions(backend, false));
+ HalModel model = {};
+
+ // add operands
+ std::vector<float> weightData(9, 1.0f);
+ std::vector<float> biasData(1, 0.0f );
+
+ // input
+ AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3, 3, 1});
+
+ // weights & biases
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3, 3, 1}, weightData.data());
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasData.data());
+
+ uint32_t numInputs = 3u;
+ // padding
+ if (options.m_IsPaddingExplicit)
+ {
+ AddIntOperand<HalPolicy>(model, 1);
+ AddIntOperand<HalPolicy>(model, 1);
+ AddIntOperand<HalPolicy>(model, 1);
+ AddIntOperand<HalPolicy>(model, 1);
+ numInputs += 4;
+ }
+ else
+ {
+ AddIntOperand<HalPolicy>(model, android::nn::kPaddingSame);
+ numInputs += 1;
+ }
+
+ AddIntOperand<HalPolicy>(model, 2); // stride x
+ AddIntOperand<HalPolicy>(model, 2); // stride y
+ numInputs += 2;
+
+ if (options.m_IsDepthwiseConvolution)
+ {
+ AddIntOperand<HalPolicy>(model, 1); // depth multiplier
+ numInputs++;
+ }
+
+ AddIntOperand<HalPolicy>(model, 0); // no activation
+ numInputs += 1;
+
+ // dilation
+ if (options.m_HasDilation)
+ {
+ AddBoolOperand<HalPolicy>(model, false); // default data layout
+
+ AddIntOperand<HalPolicy>(model, 2); // dilation X
+ AddIntOperand<HalPolicy>(model, 2); // dilation Y
+
+ numInputs += 3;
+ }
+
+ // output
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 1, 1});
+
+ // set up the convolution operation
+ model.operations.resize(1);
+ model.operations[0].type = options.m_IsDepthwiseConvolution ?
+ HalOperationType::DEPTHWISE_CONV_2D : HalOperationType::CONV_2D;
+
+ std::vector<uint32_t> inputs(numInputs);
+ std::iota(inputs.begin(), inputs.end(), 0u);
+ std::vector<uint32_t> outputs = { numInputs };
+
+ model.operations[0].inputs = hidl_vec<uint32_t>(inputs);
+ model.operations[0].outputs = hidl_vec<uint32_t>(outputs);
+
+ // convert model
+ ConversionData data({backend});
+ data.m_Network = armnn::INetwork::Create();
+ data.m_OutputSlotForOperand = std::vector<IOutputSlot*>(model.operands.size(), nullptr);
+
+ bool ok = HalPolicy::ConvertOperation(model.operations[0], model, data);
+ BOOST_CHECK(ok);
+
+ // check if dilation params are as expected
+ DilationTestVisitor visitor = options.m_HasDilation ? DilationTestVisitor(2, 2) : DilationTestVisitor();
+ data.m_Network->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index 4a8f607e..428359e2 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -73,29 +73,58 @@ android::sp<IMemory> AddPoolAndGetData(uint32_t size, Request& request);
void AddPoolAndSetData(uint32_t size, Request& request, const float* data);
-template<typename HalModel>
-void AddOperand(HalModel& model, const V1_0::Operand& op)
+template<typename HalPolicy,
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperand = typename HalPolicy::Operand>
+void AddOperand(HalModel& model, const HalOperand& op)
{
model.operands.resize(model.operands.size() + 1);
model.operands[model.operands.size() - 1] = op;
}
-template<typename HalModel>
+template<typename HalPolicy, typename HalModel = typename HalPolicy::Model>
void AddIntOperand(HalModel& model, int32_t value)
{
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
+
DataLocation location = {};
location.offset = model.operandValues.size();
location.length = sizeof(int32_t);
- V1_0::Operand op = {};
- op.type = V1_0::OperandType::INT32;
- op.dimensions = hidl_vec<uint32_t>{};
- op.lifetime = V1_0::OperandLifeTime::CONSTANT_COPY;
- op.location = location;
+ HalOperand op = {};
+ op.type = HalOperandType::INT32;
+ op.dimensions = hidl_vec<uint32_t>{};
+ op.lifetime = HalOperandLifeTime::CONSTANT_COPY;
+ op.location = location;
model.operandValues.resize(model.operandValues.size() + location.length);
*reinterpret_cast<int32_t*>(&model.operandValues[location.offset]) = value;
+ AddOperand<HalPolicy>(model, op);
+}
+
+template<typename HalPolicy, typename HalModel = typename HalPolicy::Model>
+void AddBoolOperand(HalModel& model, bool value)
+{
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
+
+ DataLocation location = {};
+ location.offset = model.operandValues.size();
+ location.length = sizeof(uint8_t);
+
+ HalOperand op = {};
+ op.type = HalOperandType::BOOL;
+ op.dimensions = hidl_vec<uint32_t>{};
+ op.lifetime = HalOperandLifeTime::CONSTANT_COPY;
+ op.location = location;
+
+ model.operandValues.resize(model.operandValues.size() + location.length);
+ *reinterpret_cast<uint8_t*>(&model.operandValues[location.offset]) = static_cast<uint8_t>(value);
+
AddOperand<HalModel>(model, op);
}
@@ -108,13 +137,19 @@ OperandType TypeToOperandType<float>();
template<>
OperandType TypeToOperandType<int32_t>();
-template<typename HalModel, typename T>
+template<typename HalPolicy,
+ typename T,
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType,
+ typename HalOperandLifeTime = typename HalPolicy::OperandLifeTime>
void AddTensorOperand(HalModel& model,
const hidl_vec<uint32_t>& dimensions,
const T* values,
- V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32,
- V1_0::OperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY)
+ HalOperandType operandType = HalOperandType::TENSOR_FLOAT32,
+ HalOperandLifeTime operandLifeTime = HalOperandLifeTime::CONSTANT_COPY)
{
+ using HalOperand = typename HalPolicy::Operand;
+
uint32_t totalElements = 1;
for (uint32_t dim : dimensions)
{
@@ -124,16 +159,16 @@ void AddTensorOperand(HalModel& model,
DataLocation location = {};
location.length = totalElements * sizeof(T);
- if(operandLifeTime == V1_0::OperandLifeTime::CONSTANT_COPY)
+ if(operandLifeTime == HalOperandLifeTime::CONSTANT_COPY)
{
location.offset = model.operandValues.size();
}
- V1_0::Operand op = {};
- op.type = operandType;
- op.dimensions = dimensions;
- op.lifetime = V1_0::OperandLifeTime::CONSTANT_COPY;
- op.location = location;
+ HalOperand op = {};
+ op.type = operandType;
+ op.dimensions = dimensions;
+ op.lifetime = HalOperandLifeTime::CONSTANT_COPY;
+ op.location = location;
model.operandValues.resize(model.operandValues.size() + location.length);
for (uint32_t i = 0; i < totalElements; i++)
@@ -141,48 +176,62 @@ void AddTensorOperand(HalModel& model,
*(reinterpret_cast<T*>(&model.operandValues[location.offset]) + i) = values[i];
}
- AddOperand<HalModel>(model, op);
+ AddOperand<HalPolicy>(model, op);
}
-template<typename HalModel, typename T>
+template<typename HalPolicy,
+ typename T,
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType,
+ typename HalOperandLifeTime = typename HalPolicy::OperandLifeTime>
void AddTensorOperand(HalModel& model,
const hidl_vec<uint32_t>& dimensions,
const std::vector<T>& values,
- V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32,
- V1_0::OperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY)
+ HalOperandType operandType = HalPolicy::OperandType::TENSOR_FLOAT32,
+ HalOperandLifeTime operandLifeTime = HalOperandLifeTime::CONSTANT_COPY)
{
- AddTensorOperand<HalModel, T>(model, dimensions, values.data(), operandType, operandLifeTime);
+ AddTensorOperand<HalPolicy, T>(model, dimensions, values.data(), operandType, operandLifeTime);
}
-template<typename HalModel>
+template<typename HalPolicy,
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType>
void AddInputOperand(HalModel& model,
const hidl_vec<uint32_t>& dimensions,
- V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32)
+ HalOperandType operandType = HalOperandType::TENSOR_FLOAT32)
{
- V1_0::Operand op = {};
- op.type = operandType;
- op.scale = operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
- op.dimensions = dimensions;
- op.lifetime = V1_0::OperandLifeTime::MODEL_INPUT;
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
- AddOperand<HalModel>(model, op);
+ HalOperand op = {};
+ op.type = operandType;
+ op.scale = operandType == HalOperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
+ op.dimensions = dimensions;
+ op.lifetime = HalOperandLifeTime::MODEL_INPUT;
+
+ AddOperand<HalPolicy>(model, op);
model.inputIndexes.resize(model.inputIndexes.size() + 1);
model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1;
}
-template<typename HalModel>
+template<typename HalPolicy,
+ typename HalModel = typename HalPolicy::Model,
+ typename HalOperandType = typename HalPolicy::OperandType>
void AddOutputOperand(HalModel& model,
const hidl_vec<uint32_t>& dimensions,
- V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32)
+ HalOperandType operandType = HalOperandType::TENSOR_FLOAT32)
{
- V1_0::Operand op = {};
- op.type = operandType;
- op.scale = operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
- op.dimensions = dimensions;
- op.lifetime = V1_0::OperandLifeTime::MODEL_OUTPUT;
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
- AddOperand<HalModel>(model, op);
+ HalOperand op = {};
+ op.type = operandType;
+ op.scale = operandType == HalOperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
+ op.dimensions = dimensions;
+ op.lifetime = HalOperandLifeTime::MODEL_OUTPUT;
+
+ AddOperand<HalPolicy>(model, op);
model.outputIndexes.resize(model.outputIndexes.size() + 1);
model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1;
@@ -204,7 +253,7 @@ android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& mode
template<typename HalModel>
android::sp<V1_0::IPreparedModel> PrepareModel(const HalModel& model,
- armnn_driver::ArmnnDriver& driver)
+ armnn_driver::ArmnnDriver& driver)
{
ErrorStatus prepareStatus = ErrorStatus::NONE;
return PrepareModelWithStatus(model, driver, prepareStatus);
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
index 6ab63ff2..de885153 100644
--- a/test/FullyConnected.cpp
+++ b/test/FullyConnected.cpp
@@ -3,7 +3,11 @@
// SPDX-License-Identifier: MIT
//
#include "DriverTestHelpers.hpp"
+
+#include "../1.0/HalPolicy.hpp"
+
#include <boost/test/unit_test.hpp>
+
#include <log/log.h>
BOOST_AUTO_TEST_SUITE(FullyConnectedTests)
@@ -12,6 +16,8 @@ using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
+using HalPolicy = hal_1_0::HalPolicy;
+
// Add our own test here since we fail the fc tests which Google supplies (because of non-const weights)
BOOST_AUTO_TEST_CASE(FullyConnected)
{
@@ -19,22 +25,22 @@ BOOST_AUTO_TEST_CASE(FullyConnected)
// but that uses slightly weird dimensions which I don't think we need to support for now
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- V1_0::Model model = {};
+ HalPolicy::Model model = {};
// add operands
int32_t actValue = 0;
float weightValue[] = {2, 4, 1};
float biasValue[] = {4};
- AddInputOperand(model, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
+ AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3});
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand<HalPolicy>(model, actValue);
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1});
// make the fully connected operation
model.operations.resize(1);
- model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
+ model.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
@@ -90,7 +96,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
sup = supported;
};
- V1_0::Model model = {};
+ HalPolicy::Model model = {};
// operands
int32_t actValue = 0;
@@ -105,15 +111,15 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
float biasValue[] = {0, 0, 0, 0, 0, 0, 0, 0};
// fully connected operation
- AddInputOperand(model, hidl_vec<uint32_t>{1, 1, 1, 8});
- AddTensorOperand(model, hidl_vec<uint32_t>{8, 8}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{8}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 8});
+ AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 1, 8});
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{8, 8}, weightValue);
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{8}, biasValue);
+ AddIntOperand<HalPolicy>(model, actValue);
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 8});
model.operations.resize(1);
- model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
+ model.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0,1,2,3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
@@ -177,7 +183,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
sup = supported;
};
- V1_0::Model model = {};
+ HalPolicy::Model model = {};
// operands
int32_t actValue = 0;
@@ -192,15 +198,15 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
float biasValue[] = {0, 0, 0, 0, 0, 0, 0, 0};
// fully connected operation
- AddInputOperand(model, hidl_vec<uint32_t>{1, 2, 2, 2});
- AddTensorOperand(model, hidl_vec<uint32_t>{8, 8}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{8}, biasValue);
- AddIntOperand(model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 8});
+ AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 2, 2});
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{8, 8}, weightValue);
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{8}, biasValue);
+ AddIntOperand<HalPolicy>(model, actValue);
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 8});
model.operations.resize(1);
- model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
+ model.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0,1,2,3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
index ccd4caa2..6b51fb93 100644
--- a/test/GenericLayerTests.cpp
+++ b/test/GenericLayerTests.cpp
@@ -3,7 +3,11 @@
// SPDX-License-Identifier: MIT
//
#include "DriverTestHelpers.hpp"
+
+#include "../1.0/HalPolicy.hpp"
+
#include <boost/test/unit_test.hpp>
+
#include <log/log.h>
BOOST_AUTO_TEST_SUITE(GenericLayerTests)
@@ -12,6 +16,8 @@ using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
+using HalPolicy = hal_1_0::HalPolicy;
+
BOOST_AUTO_TEST_CASE(GetSupportedOperations)
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -25,23 +31,23 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
supported = _supported;
};
- V1_0::Model model0 = {};
+ HalPolicy::Model model0 = {};
// Add operands
int32_t actValue = 0;
float weightValue[] = {2, 4, 1};
float biasValue[] = {4};
- AddInputOperand (model0, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model0, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model0, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand (model0, actValue);
- AddOutputOperand(model0, hidl_vec<uint32_t>{1, 1});
+ AddInputOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 3});
+ AddTensorOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand<HalPolicy>(model0, actValue);
+ AddOutputOperand<HalPolicy>(model0, hidl_vec<uint32_t>{1, 1});
model0.operations.resize(1);
// Make a correct fully connected operation
- model0.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
+ model0.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED;
model0.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model0.operations[0].outputs = hidl_vec<uint32_t>{4};
@@ -52,23 +58,24 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
V1_0::Model model1 = {};
- AddInputOperand (model1, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model1, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model1, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand (model1, actValue);
- AddOutputOperand(model1, hidl_vec<uint32_t>{1, 1});
+ AddInputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 3});
+ AddTensorOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1}, biasValue);
+ AddIntOperand<HalPolicy>(model1, actValue);
+ AddOutputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 1});
model1.operations.resize(2);
// Make a correct fully connected operation
- model1.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
+ model1.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED;
model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model1.operations[0].outputs = hidl_vec<uint32_t>{4};
// Add an incorrect fully connected operation
- AddIntOperand (model1, actValue);
- AddOutputOperand(model1, hidl_vec<uint32_t>{1, 1});
- model1.operations[1].type = V1_0::OperationType::FULLY_CONNECTED;
+ AddIntOperand<HalPolicy>(model1, actValue);
+ AddOutputOperand<HalPolicy>(model1, hidl_vec<uint32_t>{1, 1});
+
+ model1.operations[1].type = HalPolicy::OperationType::FULLY_CONNECTED;
model1.operations[1].inputs = hidl_vec<uint32_t>{4}; // Only 1 input operand, expected 4
model1.operations[1].outputs = hidl_vec<uint32_t>{5};
@@ -89,21 +96,21 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
#endif
// Test Broadcast on add/mul operators
- V1_0::Model model2 = {};
+ HalPolicy::Model model2 = {};
- AddInputOperand (model2, hidl_vec<uint32_t>{1, 1, 3, 4});
- AddInputOperand (model2, hidl_vec<uint32_t>{4});
- AddIntOperand (model2, actValue);
- AddOutputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
- AddOutputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
+ AddInputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
+ AddInputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{4});
+ AddIntOperand<HalPolicy>(model2, actValue);
+ AddOutputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
+ AddOutputOperand<HalPolicy>(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
model2.operations.resize(2);
- model2.operations[0].type = V1_0::OperationType::ADD;
+ model2.operations[0].type = HalPolicy::OperationType::ADD;
model2.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2};
model2.operations[0].outputs = hidl_vec<uint32_t>{3};
- model2.operations[1].type = V1_0::OperationType::MUL;
+ model2.operations[1].type = HalPolicy::OperationType::MUL;
model2.operations[1].inputs = hidl_vec<uint32_t>{0, 1, 2};
model2.operations[1].outputs = hidl_vec<uint32_t>{4};
@@ -115,14 +122,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
V1_0::Model model3 = {};
- AddInputOperand (model3, hidl_vec<uint32_t>{1, 1, 1, 8});
- AddIntOperand (model3, 2);
- AddOutputOperand(model3, hidl_vec<uint32_t>{1, 2, 2, 2});
+ AddInputOperand<HalPolicy>(model3, hidl_vec<uint32_t>{1, 1, 1, 8});
+ AddIntOperand<HalPolicy>(model3, 2);
+ AddOutputOperand<HalPolicy>(model3, hidl_vec<uint32_t>{1, 2, 2, 2});
model3.operations.resize(1);
// Add unsupported operation, should return no error but we don't support it
- model3.operations[0].type = V1_0::OperationType::DEPTH_TO_SPACE;
+ model3.operations[0].type = HalPolicy::OperationType::DEPTH_TO_SPACE;
model3.operations[0].inputs = hidl_vec<uint32_t>{0, 1};
model3.operations[0].outputs = hidl_vec<uint32_t>{2};
@@ -131,14 +138,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
BOOST_TEST(supported.size() == (size_t)1);
BOOST_TEST(supported[0] == false);
- V1_0::Model model4 = {};
+ HalPolicy::Model model4 = {};
- AddIntOperand(model4, 0);
+ AddIntOperand<HalPolicy>(model4, 0);
model4.operations.resize(1);
// Add invalid operation
- model4.operations[0].type = static_cast<V1_0::OperationType>(100);
+ model4.operations[0].type = static_cast<HalPolicy::OperationType>(100);
model4.operations[0].outputs = hidl_vec<uint32_t>{0};
driver->getSupportedOperations(model4, cb);
@@ -162,7 +169,7 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
supported = _supported;
};
- V1_0::Model model = {};
+ HalPolicy::Model model = {};
// Operands
int32_t actValue = 0;
@@ -170,36 +177,46 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
float biasValue[] = {4};
// HASHTABLE_LOOKUP is unsupported at the time of writing this test, but any unsupported layer will do
- AddInputOperand (model, hidl_vec<uint32_t>{1, 1, 3, 4}, V1_0::OperandType::TENSOR_INT32);
- AddInputOperand (model, hidl_vec<uint32_t>{4}, V1_0::OperandType::TENSOR_INT32);
- AddInputOperand (model, hidl_vec<uint32_t>{1, 1, 3, 4});
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4}, V1_0::OperandType::TENSOR_QUANT8_ASYMM);
+ AddInputOperand<HalPolicy>(model,
+ hidl_vec<uint32_t>{1, 1, 3, 4},
+ HalPolicy::OperandType::TENSOR_INT32);
+ AddInputOperand<HalPolicy>(model,
+ hidl_vec<uint32_t>{4},
+ HalPolicy::OperandType::TENSOR_INT32);
+ AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
+
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
+ AddOutputOperand<HalPolicy>(model,
+ hidl_vec<uint32_t>{1, 1, 3, 4},
+ HalPolicy::OperandType::TENSOR_QUANT8_ASYMM);
// Fully connected is supported
- AddInputOperand (model, hidl_vec<uint32_t>{1, 3});
- AddTensorOperand(model, hidl_vec<uint32_t>{1, 3}, weightValue);
- AddTensorOperand(model, hidl_vec<uint32_t>{1}, biasValue);
- AddIntOperand (model, actValue);
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1});
+ AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3});
+
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 3}, weightValue);
+ AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
+
+ AddIntOperand<HalPolicy>(model, actValue);
+
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1});
// EMBEDDING_LOOKUP is unsupported
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
+ AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, 3, 4});
model.operations.resize(3);
// Unsupported
- model.operations[0].type = V1_0::OperationType::HASHTABLE_LOOKUP;
+ model.operations[0].type = HalPolicy::OperationType::HASHTABLE_LOOKUP;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2};
model.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
// Supported
- model.operations[1].type = V1_0::OperationType::FULLY_CONNECTED;
+ model.operations[1].type = HalPolicy::OperationType::FULLY_CONNECTED;
model.operations[1].inputs = hidl_vec<uint32_t>{5, 6, 7, 8};
model.operations[1].outputs = hidl_vec<uint32_t>{9};
// Unsupported
- model.operations[2].type = V1_0::OperationType::EMBEDDING_LOOKUP;
+ model.operations[2].type = HalPolicy::OperationType::EMBEDDING_LOOKUP;
model.operations[2].inputs = hidl_vec<uint32_t>{1, 2};
model.operations[2].outputs = hidl_vec<uint32_t>{10};
@@ -227,7 +244,7 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
supported = _supported;
};
- V1_0::Model model = {};
+ HalPolicy::Model model = {};
model.pools = hidl_vec<hidl_memory>{hidl_memory("Unsuported hidl memory type", nullptr, 0)};
diff --git a/test/Lstm.cpp b/test/Lstm.cpp
index 56812326..579524ca 100644
--- a/test/Lstm.cpp
+++ b/test/Lstm.cpp
@@ -5,6 +5,8 @@
#include "DriverTestHelpers.hpp"
#include "OperationsUtils.h"
+#include "../1.0/HalPolicy.hpp"
+
#include <boost/array.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
@@ -15,8 +17,10 @@
BOOST_AUTO_TEST_SUITE(LstmTests)
-using ArmnnDriver = armnn_driver::ArmnnDriver;
+using ArmnnDriver = armnn_driver::ArmnnDriver;
using DriverOptions = armnn_driver::DriverOptions;
+using HalPolicy = armnn_driver::hal_1_0::HalPolicy;
+
using namespace driverTestHelpers;
using namespace android::hardware;
@@ -128,99 +132,130 @@ void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
armnn::Compute compute)
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
- V1_0::Model model = {};
+ HalPolicy::Model model = {};
// Inputs:
// 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
// “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
- AddInputOperand(model, inputDimensions);
+ AddInputOperand<HalPolicy>(model, inputDimensions);
// 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size], where “num_units” corresponds to the number of cell units.
- AddTensorOperand(model, inputToInputWeightsDimensions, inputToInputWeightsValue, V1_0::OperandType::TENSOR_FLOAT32,
- CreateNoValueLifeTime(inputToInputWeightsDimensions));
+ AddTensorOperand<HalPolicy>(model,
+ inputToInputWeightsDimensions,
+ inputToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(inputToInputWeightsDimensions));
// 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size].
- AddTensorOperand(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
- // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
- AddTensorOperand(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
+ AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
+ // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+ // [num_units, input_size].
+ AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
// 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, input_size].
- AddTensorOperand(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
+ AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
// 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
// “num_units”), or the second dimension of the “projection_weights”, if defined.
- AddTensorOperand(model, recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
- V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
+ AddTensorOperand<HalPolicy>(model,
+ recurrentToInputWeightsDimensions,
+ recurrentToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
// 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
- AddTensorOperand(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
+ AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
// 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
- AddTensorOperand(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
+ AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
// 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [num_units, output_size].
- AddTensorOperand(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
+ AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
// 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- AddTensorOperand(model, cellToInputWeightsDimensions, cellToInputWeightsValue,
- V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToInputWeightsDimensions));
+ AddTensorOperand<HalPolicy>(model,
+ cellToInputWeightsDimensions,
+ cellToInputWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(cellToInputWeightsDimensions));
// 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- AddTensorOperand(model, cellToForgetWeightsDimensions, cellToForgetWeightsValue,
- V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToForgetWeightsDimensions));
+ AddTensorOperand<HalPolicy>(model,
+ cellToForgetWeightsDimensions,
+ cellToForgetWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(cellToForgetWeightsDimensions));
// 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- AddTensorOperand(model, cellToOutputWeightsDimensions, cellToOutputWeightsValue,
- V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToOutputWeightsDimensions));
+ AddTensorOperand<HalPolicy>(model,
+ cellToOutputWeightsDimensions,
+ cellToOutputWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(cellToOutputWeightsDimensions));
// 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- AddTensorOperand(model, inputGateBiasDimensions, inputGateBiasValue,
- V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(inputGateBiasDimensions));
+ AddTensorOperand<HalPolicy>(model,
+ inputGateBiasDimensions,
+ inputGateBiasValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(inputGateBiasDimensions));
// 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- AddTensorOperand(model, forgetGateBiasDimensions, forgetGateBiasValue);
+ AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
// 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- AddTensorOperand(model, cellBiasDimensions, cellBiasValue);
+ AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
// 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
- AddTensorOperand(model, outputGateBiasDimensions, outputGateBiasValue);
+ AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
// 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
// [output_size, num_units].
- AddTensorOperand(model, projectionWeightsDimensions, projectionWeightsValue,
- V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionWeightsDimensions));
+ AddTensorOperand<HalPolicy>(model,
+ projectionWeightsDimensions,
+ projectionWeightsValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(projectionWeightsDimensions));
// 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
- AddTensorOperand(model, projectionBiasDimensions, projectionBiasValue,
- V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionBiasDimensions));
+ AddTensorOperand<HalPolicy>(model,
+ projectionBiasDimensions,
+ projectionBiasValue,
+ HalPolicy::OperandType::TENSOR_FLOAT32,
+ CreateNoValueLifeTime(projectionBiasDimensions));
// 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
- AddInputOperand(model, outputStateInDimensions);
+ AddInputOperand<HalPolicy>(model, outputStateInDimensions);
// 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
- AddInputOperand(model, cellStateInDimensions);
+ AddInputOperand<HalPolicy>(model, cellStateInDimensions);
// Constant scalar values (the VTS test adds these as tensors of dim {})
// 20: The activation function: A value indicating the activation function:
// 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
- AddTensorOperand(model, activationFunctionDimensions,
- activationFunctionValue, V1_0::OperandType::INT32);
+ AddTensorOperand<HalPolicy>(model,
+ activationFunctionDimensions,
+ activationFunctionValue,
+ HalPolicy::OperandType::INT32);
// 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
// If set to 0.0 then clipping is disabled.
- AddTensorOperand(model, cellClippingThresholdDimensions,
- cellClippingThresholdValue, V1_0::OperandType::FLOAT32);
+ AddTensorOperand<HalPolicy>(model,
+ cellClippingThresholdDimensions,
+ cellClippingThresholdValue,
+ HalPolicy::OperandType::FLOAT32);
// 22: The clipping threshold: for the output from the projection layer, such that values are bound within
// [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
- AddTensorOperand(model, projectionClippingThresholdDimensions,
- projectionClippingThresholdValue, V1_0::OperandType::FLOAT32);
+ AddTensorOperand<HalPolicy>(model,
+ projectionClippingThresholdDimensions,
+ projectionClippingThresholdValue,
+ HalPolicy::OperandType::FLOAT32);
// Outputs:
// 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
// CIFG, or [batch_size, num_units * 3] without CIFG.
- AddOutputOperand(model, scratchBufferDimensions);
+ AddOutputOperand<HalPolicy>(model, scratchBufferDimensions);
// 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
- AddOutputOperand(model, outputStateOutDimensions);
+ AddOutputOperand<HalPolicy>(model, outputStateOutDimensions);
// 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
- AddOutputOperand(model, cellStateOutDimensions);
+ AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
// 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
// effectively the same as the current “output state (out)” value.
- AddOutputOperand(model, outputDimensions);
+ AddOutputOperand<HalPolicy>(model, outputDimensions);
// make the lstm operation
model.operations.resize(1);
- model.operations[0].type = V1_0::OperationType::LSTM;
+ model.operations[0].type = HalPolicy::OperationType::LSTM;
model.operations[0].inputs =
hidl_vec<uint32_t> {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};