aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-10-04 15:43:17 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:58 +0100
commit5eec11db435a94ba5046ba74edc5c9c412a64e9d (patch)
tree6a3d9b9126ae449fc5e83838b2eb1765a67d6aad
parentd806792f7fbdae2cfa8dcb1eb59b9400b84741da (diff)
downloadarmnn-5eec11db435a94ba5046ba74edc5c9c412a64e9d.tar.gz
IVGCVSW-1964 : replace optional biases with home-grown Optional
!android-nn-driver:151788 Change-Id: Ibdc41d09b8df05e7a0360dcb8a060860dfb1bd99
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/Exceptions.hpp5
-rw-r--r--include/armnn/ILayerSupport.hpp8
-rw-r--r--include/armnn/LayerSupport.hpp7
-rw-r--r--include/armnn/Optional.hpp123
-rw-r--r--src/armnn/LayerSupport.cpp4
-rw-r--r--src/armnn/test/OptionalTest.cpp63
-rw-r--r--src/backends/ILayerSupport.cpp4
-rw-r--r--src/backends/WorkloadFactory.cpp12
-rw-r--r--src/backends/cl/ClLayerSupport.cpp4
-rw-r--r--src/backends/cl/ClLayerSupport.hpp6
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.hpp4
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.hpp3
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp4
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp6
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dBaseWorkload.hpp4
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.hpp4
-rw-r--r--src/backends/reference/RefLayerSupport.cpp4
-rw-r--r--src/backends/reference/RefLayerSupport.hpp7
24 files changed, 239 insertions, 60 deletions
diff --git a/Android.mk b/Android.mk
index d181054525..95b10b9533 100644
--- a/Android.mk
+++ b/Android.mk
@@ -193,6 +193,7 @@ LOCAL_SRC_FILES := \
src/armnn/test/OpenClTimerTest.cpp \
src/armnn/test/ProfilingEventTest.cpp \
src/armnn/test/ObservableTest.cpp \
+ src/armnn/test/OptionalTest.cpp \
src/backends/test/IsLayerSupportedTest.cpp \
src/backends/test/Reference.cpp \
src/backends/test/WorkloadDataValidation.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 56b0935654..f0fe5a1525 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -152,6 +152,7 @@ list(APPEND armnn_sources
include/armnn/IRuntime.hpp
include/armnn/ILayerSupport.hpp
include/armnn/INetwork.hpp
+ include/armnn/Optional.hpp
include/armnn/Tensor.hpp
include/armnn/TensorFwd.hpp
include/armnn/Types.hpp
@@ -376,6 +377,7 @@ if(BUILD_UNIT_TESTS)
src/armnn/test/GraphUtils.hpp
src/armnn/test/InstrumentTests.cpp
src/armnn/test/ObservableTest.cpp
+ src/armnn/test/OptionalTest.cpp
src/backends/test/IsLayerSupportedTest.cpp
src/backends/test/IsLayerSupportedTestImpl.hpp
src/backends/test/Reference.cpp
diff --git a/include/armnn/Exceptions.hpp b/include/armnn/Exceptions.hpp
index 4f3bea0e30..89b6f2cfbb 100644
--- a/include/armnn/Exceptions.hpp
+++ b/include/armnn/Exceptions.hpp
@@ -95,6 +95,11 @@ class GraphValidationException : public Exception
using Exception::Exception;
};
+class BadOptionalAccessException : public Exception
+{
+ using Exception::Exception;
+};
+
template <typename ExceptionType>
void ConditionalThrow(bool condition, const std::string& message)
{
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 7962393f34..b9b41b7fcf 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -5,9 +5,9 @@
#pragma once
#include <armnn/DescriptorsFwd.hpp>
-
-#include <boost/optional.hpp>
+#include <armnn/Optional.hpp>
#include <vector>
+#include <cctype>
namespace armnn
{
@@ -61,7 +61,7 @@ public:
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024) const;
@@ -69,7 +69,7 @@ public:
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024) const;
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 25e888e71e..31874fe944 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -7,8 +7,7 @@
#include <armnn/DescriptorsFwd.hpp>
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
-
-#include <boost/optional.hpp>
+#include <armnn/Optional.hpp>
namespace armnn
{
@@ -60,7 +59,7 @@ bool IsConvolution2dSupported(Compute compute,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
@@ -69,7 +68,7 @@ bool IsDepthwiseConvolutionSupported(Compute compute,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
diff --git a/include/armnn/Optional.hpp b/include/armnn/Optional.hpp
new file mode 100644
index 0000000000..6fc207f425
--- /dev/null
+++ b/include/armnn/Optional.hpp
@@ -0,0 +1,123 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "Exceptions.hpp"
+
+namespace armnn
+{
+
+// NOTE: the members of the Optional class don't follow the ArmNN
+// coding convention because the interface to be close to
+// the C++-17 interface so we can easily migrate to std::optional
+// later.
+
+template <typename T>
+class Optional final
+{
+public:
+ Optional(T&& value)
+ : m_HasValue{true}
+ {
+ new (m_Storage) T(value);
+ }
+
+ Optional(const T& value)
+ : m_HasValue{true}
+ {
+ new (m_Storage) T(value);
+ }
+
+ Optional(const Optional& other)
+ : m_HasValue{false}
+ {
+ *this = other;
+ }
+
+ Optional() noexcept
+ : m_HasValue{false}
+ {
+ }
+
+ ~Optional()
+ {
+ reset();
+ }
+
+ operator bool() const noexcept
+ {
+ return has_value();
+ }
+
+ Optional& operator=(T&& value)
+ {
+ reset();
+ new (m_Storage) T(value);
+ m_HasValue = true;
+ return *this;
+ }
+
+ Optional& operator=(const T& value)
+ {
+ reset();
+ new(m_Storage) T(value);
+ m_HasValue = true;
+ return *this;
+ }
+
+ Optional& operator=(const Optional& other)
+ {
+ reset();
+ if (other.has_value())
+ {
+ new (m_Storage) T(other.value());
+ m_HasValue = true;
+ }
+
+ return *this;
+ }
+
+ const T& value() const
+ {
+ if (!has_value())
+ {
+ throw BadOptionalAccessException("Optional has no value");
+ }
+
+ auto valuePtr = reinterpret_cast<const T*>(m_Storage);
+ return *valuePtr;
+ }
+
+ T& value()
+ {
+ if (!has_value())
+ {
+ throw BadOptionalAccessException("Optional has no value");
+ }
+
+ auto valuePtr = reinterpret_cast<T*>(m_Storage);
+ return *valuePtr;
+ }
+
+ bool has_value() const noexcept
+ {
+ return m_HasValue;
+ }
+
+ void reset()
+ {
+ if (has_value())
+ {
+ value().T::~T();
+ m_HasValue = false;
+ }
+ }
+
+private:
+ alignas(alignof(T)) unsigned char m_Storage[sizeof(T)];
+ bool m_HasValue;
+};
+
+}
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 9561136d85..3758ed40f6 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -134,7 +134,7 @@ bool IsConvolution2dSupported(Compute compute,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
@@ -166,7 +166,7 @@ bool IsDepthwiseConvolutionSupported(Compute compute,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
diff --git a/src/armnn/test/OptionalTest.cpp b/src/armnn/test/OptionalTest.cpp
new file mode 100644
index 0000000000..1b5aaa7db6
--- /dev/null
+++ b/src/armnn/test/OptionalTest.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <boost/test/unit_test.hpp>
+
+#include <armnn/Optional.hpp>
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(OptionalTests)
+
+BOOST_AUTO_TEST_CASE(SimpleStringTests)
+{
+ armnn::Optional<std::string> optionalString;
+ BOOST_TEST(optionalString == false);
+ BOOST_TEST(optionalString.has_value() == false);
+
+ optionalString = std::string("Hello World");
+ BOOST_TEST(optionalString == true);
+ BOOST_TEST(optionalString.has_value() == true);
+ BOOST_TEST(optionalString.value() == "Hello World");
+
+ armnn::Optional<std::string> otherString;
+ otherString = optionalString;
+ BOOST_TEST(otherString == true);
+ BOOST_TEST(otherString.value() == "Hello World");
+
+ optionalString.reset();
+ BOOST_TEST(optionalString == false);
+ BOOST_TEST(optionalString.has_value() == false);
+
+ const std::string stringValue("Hello World");
+ armnn::Optional<std::string> optionalString2(stringValue);
+ BOOST_TEST(optionalString2 == true);
+ BOOST_TEST(optionalString2.has_value() == true);
+ BOOST_TEST(optionalString2.value() == "Hello World");
+
+ armnn::Optional<std::string> optionalString3(std::move(optionalString2));
+ BOOST_TEST(optionalString3 == true);
+ BOOST_TEST(optionalString3.has_value() == true);
+ BOOST_TEST(optionalString3.value() == "Hello World");
+}
+
+BOOST_AUTO_TEST_CASE(SimpleIntTests)
+{
+ const int intValue = 123;
+
+ armnn::Optional<int> optionalInt;
+ BOOST_TEST(optionalInt == false);
+ BOOST_TEST(optionalInt.has_value() == false);
+
+ optionalInt = intValue;
+ BOOST_TEST(optionalInt == true);
+ BOOST_TEST(optionalInt.has_value() == true);
+ BOOST_TEST(optionalInt.value() == intValue);
+
+ armnn::Optional<int> otherOptionalInt;
+ otherOptionalInt = optionalInt;
+ BOOST_TEST(otherOptionalInt == true);
+ BOOST_TEST(otherOptionalInt.value() == intValue);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/ILayerSupport.cpp b/src/backends/ILayerSupport.cpp
index c0446e93ba..ff4b80d22b 100644
--- a/src/backends/ILayerSupport.cpp
+++ b/src/backends/ILayerSupport.cpp
@@ -89,7 +89,7 @@ bool ILayerSupport::IsConvolution2dSupported(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength) const
{
@@ -100,7 +100,7 @@ bool ILayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength) const
{
diff --git a/src/backends/WorkloadFactory.cpp b/src/backends/WorkloadFactory.cpp
index dc9c1bc624..05919d6d95 100644
--- a/src/backends/WorkloadFactory.cpp
+++ b/src/backends/WorkloadFactory.cpp
@@ -144,11 +144,11 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute,
const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
// Construct optional biases object based on the value of m_BiasEnabled
- boost::optional<TensorInfo> biases(boost::none);
+ Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- biases = boost::make_optional(
- OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)));
+ biases =
+ OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
}
result = IsConvolution2dSupported(compute,
@@ -181,11 +181,11 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute,
const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
// Construct optional biases object based on the value of m_BiasEnabled
- boost::optional<TensorInfo> biases(boost::none);
+ Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- biases = boost::make_optional(
- OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)));
+ biases =
+ OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
}
result = IsDepthwiseConvolutionSupported(compute,
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index e23c70ec30..09dfab9924 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -212,7 +212,7 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
@@ -228,7 +228,7 @@ bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 80e8488b3b..314ac4c73f 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -9,8 +9,6 @@
#include <armnn/Tensor.hpp>
#include <armnn/ArmNN.hpp>
-#include <boost/optional.hpp>
-
namespace armnn
{
@@ -50,14 +48,14 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsDivisionSupportedCl(const TensorInfo& input0,
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 521711becc..301859ee1b 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -24,7 +24,7 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases)
+ const Optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -35,9 +35,9 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.is_initialized());
+ BOOST_ASSERT(biases.has_value());
- aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
index 14a39f3b25..a5de87639b 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -13,8 +13,6 @@
#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-#include <boost/optional.hpp>
-
#include <memory>
namespace armnn
@@ -24,7 +22,7 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases);
+ const Optional<TensorInfo>& biases);
class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
{
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.cpp
index 5a036db922..53ac3bae99 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.cpp
@@ -21,7 +21,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases)
+ const Optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -32,9 +32,9 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.is_initialized());
+ BOOST_ASSERT(biases.has_value());
- aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.hpp b/src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.hpp
index 9d5cde30b6..27aec8ecdd 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.hpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.hpp
@@ -6,7 +6,6 @@
#pragma once
#include <backends/Workload.hpp>
-#include <boost/optional.hpp>
#include <arm_compute/runtime/CL/CLFunctions.h>
@@ -17,7 +16,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases);
+ const Optional<TensorInfo>& biases);
template<armnn::DataType... dataTypes>
class ClDepthwiseConvolutionBaseWorkload : public TypedWorkload<DepthwiseConvolution2dQueueDescriptor, dataTypes...>
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index bc1f96be3f..ef70fbd370 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -198,7 +198,7 @@ bool IsConvolution2dSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
@@ -214,7 +214,7 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 439c45f59e..8b674c6460 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -8,8 +8,6 @@
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
-#include <boost/optional.hpp>
-
namespace armnn
{
@@ -53,7 +51,7 @@ bool IsConvolution2dSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
@@ -61,7 +59,7 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsDivisionSupportedNeon(const TensorInfo& input0,
diff --git a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
index 02edabfd9c..8da3e47249 100644
--- a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
@@ -21,7 +21,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases)
+ const Optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -32,9 +32,9 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.is_initialized());
+ BOOST_ASSERT(biases.has_value());
- aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.hpp b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.hpp
index 6af89c1f01..1cd30c70f9 100644
--- a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.hpp
@@ -13,8 +13,6 @@
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-#include <boost/optional.hpp>
-
#include <memory>
namespace armnn
@@ -24,7 +22,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases);
+ const Optional<TensorInfo>& biases);
template<armnn::DataType... dataTypes>
class NeonConvolution2dBaseWorkload : public TypedWorkload<Convolution2dQueueDescriptor, dataTypes...>
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.cpp
index ef60b3238d..aa535adec9 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.cpp
@@ -14,7 +14,7 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases)
+ const Optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo =
armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
@@ -28,9 +28,9 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.is_initialized());
+ BOOST_ASSERT(biases.has_value());
- aclBiasesInfo = armcomputetensorutils::BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+ aclBiasesInfo = armcomputetensorutils::BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.hpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.hpp
index 982992a363..ffee50861a 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.hpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.hpp
@@ -7,8 +7,6 @@
#include <backends/neon/workloads/NeonWorkloadUtils.hpp>
-#include <boost/optional.hpp>
-
namespace armnn
{
@@ -16,6 +14,6 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases);
+ const Optional<TensorInfo>& biases);
} // namespace armnn
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 1ca3d5b6d6..a42efb748f 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -87,7 +87,7 @@ bool IsConvolution2dSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
ignore_unused(descriptor);
@@ -104,7 +104,7 @@ bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported)
{
ignore_unused(output);
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 0fac886234..dcc5dd3ddf 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -8,9 +8,6 @@
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
#include <layers/LstmLayer.hpp>
-#include <boost/optional.hpp>
-
-#include <boost/optional.hpp>
namespace armnn
{
@@ -46,14 +43,14 @@ bool IsConvolution2dSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const boost::optional<TensorInfo>& biases,
+ const Optional<TensorInfo>& biases,
std::string* reasonIfUnsupported = nullptr);
bool IsDivisionSupportedRef(const TensorInfo& input0,