aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2022-05-11 20:12:17 +0100
committerCathal Corbett <cathal.corbett@arm.com>2022-05-12 13:41:56 +0100
commit4b19d2249e3b8f9216ec5b410fad20c41b4c6053 (patch)
treeb7511932f11b2c27f39e1e76ac290ba3baca0fda
parent50bc39e401532f9aa838e3d9b1f3e3ae5845e37f (diff)
downloadarmnn-4b19d2249e3b8f9216ec5b410fad20c41b4c6053.tar.gz
IVGCVSW-6940 ConstTensorsAsInput: DepthwiseConvolution2d - Complete ACL
* Added backend specific optimization & test for CpuAcc and GpuAcc: PermuteDepthwiseConv2dWeights Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I600476b2e9c557a39818a574c1091c9d650b21b1
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/optimizations/All.hpp1
-rw-r--r--src/armnn/optimizations/PermuteDepthwiseConv2dWeights.hpp81
-rw-r--r--src/armnn/test/optimizations/PermuteDepthwiseConv2dWeightsTests.cpp116
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp23
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp41
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp97
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp79
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp84
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp5
13 files changed, 410 insertions, 136 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 549222bd7a..d2ebd4cde6 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1167,6 +1167,11 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
auto backendObjPtr = backends.find(selectedBackend)->second.get();
ARMNN_ASSERT(backendObjPtr);
+ if(selectedBackend == armnn::Compute::GpuAcc || selectedBackend == armnn::Compute::CpuAcc)
+ {
+ Optimizer::Pass(optGraph, MakeOptimizations(optimizations::PermuteDepthwiseConv2dWeights()));
+ }
+
// Select sub-graphs based on backend
SubgraphViewSelector::Subgraphs subgraphs =
SubgraphViewSelector::SelectSubgraphs(optGraph,
diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp
index 2bc54d993d..38c4ac9462 100644
--- a/src/armnn/optimizations/All.hpp
+++ b/src/armnn/optimizations/All.hpp
@@ -18,6 +18,7 @@
#include "OptimizeInversePermutes.hpp"
#include "PermuteAsReshape.hpp"
#include "PermuteAndBatchToSpaceAsDepthToSpace.hpp"
+#include "PermuteDepthwiseConv2dWeights.hpp"
#include "RedirectMembersToConstantInputs.hpp"
#include "SquashEqualSiblings.hpp"
#include "TransposeAsReshape.hpp" \ No newline at end of file
diff --git a/src/armnn/optimizations/PermuteDepthwiseConv2dWeights.hpp b/src/armnn/optimizations/PermuteDepthwiseConv2dWeights.hpp
new file mode 100644
index 0000000000..d49ddb9f68
--- /dev/null
+++ b/src/armnn/optimizations/PermuteDepthwiseConv2dWeights.hpp
@@ -0,0 +1,81 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "Optimization.hpp"
+#include "NetworkUtils.hpp"
+
+#include <armnnUtils/Permute.hpp>
+
+#include <fmt/format.h>
+
+namespace armnn
+{
+namespace optimizations
+{
+
+class PermuteDepthwiseConv2dWeightsImpl
+{
+public:
+
+ void Run(Graph& graph, Layer& layer) const
+ {
+ if (layer.GetType() == LayerType::DepthwiseConvolution2d)
+ {
+ AddPermuteLayer(graph, PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&layer));
+ }
+ }
+
+protected:
+ PermuteDepthwiseConv2dWeightsImpl() = default;
+ ~PermuteDepthwiseConv2dWeightsImpl() = default;
+
+private:
+ /// ArmNN format for weights for depthwise is [1, H, W, C] independently of the input/output layout
+ ///
+ /// ACL format for weights for depthwise is:
+ /// - [1, H, W, C] for [N, H, W, C] input/output layout (matches with ArmNN)
+ /// - [1, C, H, W] for [N, C, H, W] input/output layout
+ ///
+ /// Therefore ArmNN weights have to be permuted when input/output layout is [N, C, H, W] to pass them to ACL.
+ static void AddPermuteLayer(Graph& graph, DepthwiseConvolution2dLayer* layer)
+ {
+ TensorInfo inputInfo = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+ TensorInfo weightInfo = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
+ if (layer->GetParameters().m_DataLayout == armnn::DataLayout::NHWC)
+ {
+ // No permutation required. Input and weights data layouts are the same.
+ return;
+ }
+ else if (layer->GetParameters().m_DataLayout == armnn::DataLayout::NCHW)
+ {
+ // Weights permutation required. Weights [N,H,W,C] and input [N,C,H,W] data layouts are different.
+ // [ 1, H, W, I*M] --> [ 1, I * M, H, W ]
+ PermutationVector permutationVector = { 0, 2, 3, 1 };
+ TensorInfo weightsPermuted = armnnUtils::Permuted(weightInfo, permutationVector);
+
+ // Inserts NewLayer so layers don't need to be re-sorted.
+ PermuteLayer* permuteLayer =
+ graph.InsertNewLayer<PermuteLayer>(layer->GetInputSlot(1),
+ PermuteDescriptor(permutationVector),
+ "permute_layer");
+ permuteLayer->GetOutputSlot().SetTensorInfo(weightsPermuted);
+
+ // Assign Permute BackendId to be the same as the Depthwise Conv2d BackendId.
+ // Needed as backends have already been assigned at this stage.
+ permuteLayer->SetBackendId(layer->GetBackendId());
+ }
+ else
+ {
+ throw InvalidArgumentException(fmt::format("Unknown data layout for tensor info conversion: {}",
+ GetDataLayoutName(layer->GetParameters().m_DataLayout)));
+ }
+ }
+};
+
+using PermuteDepthwiseConv2dWeights = OptimizeForType<Layer, PermuteDepthwiseConv2dWeightsImpl>;
+
+} // namespace optimizations
+} // namespace armnn
diff --git a/src/armnn/test/optimizations/PermuteDepthwiseConv2dWeightsTests.cpp b/src/armnn/test/optimizations/PermuteDepthwiseConv2dWeightsTests.cpp
new file mode 100644
index 0000000000..24dab7f779
--- /dev/null
+++ b/src/armnn/test/optimizations/PermuteDepthwiseConv2dWeightsTests.cpp
@@ -0,0 +1,116 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../armnnTestUtils/GraphUtils.hpp"
+#include "../armnnTestUtils/TestUtils.hpp"
+
+#include <armnn/INetwork.hpp>
+
+#include <doctest/doctest.h>
+
+using namespace armnn;
+
+namespace
+{
+#if defined(ARMCOMPUTENEON_ENABLED) || defined(ARMCOMPUTECL_ENABLED)
+armnn::INetworkPtr CreateSimpleDepthwiseConv2dNetwork(const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::TensorInfo& weightsTensorInfo,
+ armnn::DepthwiseConvolution2dDescriptor descriptor)
+{
+ armnn::INetworkPtr network(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "input");
+ armnn::IConnectableLayer* weightsInputLayer = network->AddInputLayer(1, "weights_input");
+ armnn::IConnectableLayer* depthwiseLayer = network->AddDepthwiseConvolution2dLayer(descriptor, "depthwise_conv2d");
+ armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
+
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ weightsInputLayer->GetOutputSlot(0).SetTensorInfo(weightsTensorInfo);
+ depthwiseLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ inputLayer->GetOutputSlot(0).Connect(depthwiseLayer->GetInputSlot(0));
+ weightsInputLayer->GetOutputSlot(0).Connect(depthwiseLayer->GetInputSlot(1));
+ depthwiseLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ return network;
+}
+
+void PermuteDepthwiseConv2dWeightsTestRunner(INetworkPtr& network,
+ const TensorShape& outputShape,
+ Compute backendId)
+{
+ // Create ArmNN runtime
+ IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions());
+
+ // Optimise ArmNN network
+ IOptimizedNetworkPtr optNet = Optimize(*network, {backendId}, run->GetDeviceSpec());
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ CHECK(graph.GetNumLayers() == 5);
+ CHECK(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<PermuteLayer>,
+ &IsLayerOfType<DepthwiseConvolution2dLayer>,
+ &IsLayerOfType<OutputLayer>));
+
+ armnn::Layer* const permuteLayer = GetFirstLayerWithName(graph, "permute_layer");
+ CHECK(permuteLayer);
+
+ // Swap original shape to compare with new shape.
+ unsigned int weightsShape[] = {outputShape[0], outputShape[1], outputShape[2], outputShape[3]};
+
+ // Tensorshape and the data type are correct
+ // [ 1, H, W, I*M] --> [ 1, I * M, H, W ]
+ TensorShape newShape = permuteLayer->GetOutputSlot().GetTensorInfo().GetShape();
+ CHECK((newShape[0] == weightsShape[0]));
+ CHECK((newShape[1] == weightsShape[3]));
+ CHECK((newShape[2] == weightsShape[1]));
+ CHECK((newShape[3] == weightsShape[2]));
+}
+
+void PermuteDepthwiseConv2dWeightsTest(Compute backendId)
+{
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 3 }, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({ 1, 2 }, armnn::DataType::Float32);
+ armnn::TensorInfo weightsTensorInfo({ 2, 6 }, armnn::DataType::Float32);
+
+ DepthwiseConvolution2dDescriptor descriptor;
+ descriptor.m_BiasEnabled = false;
+
+ armnn::INetworkPtr network = CreateSimpleDepthwiseConv2dNetwork(inputTensorInfo,
+ outputTensorInfo,
+ weightsTensorInfo,
+ descriptor);
+
+ PermuteDepthwiseConv2dWeightsTestRunner(network,
+ weightsTensorInfo.GetShape(),
+ backendId);
+}
+#endif
+}
+
+#if defined(ARMCOMPUTECL_ENABLED)
+TEST_SUITE("Optimizer_PermuteDepthwiseConv2dWeightsGpuAcc")
+{
+TEST_CASE("PermuteDepthwiseConv2dWeightsGpuAccTest")
+{
+ PermuteDepthwiseConv2dWeightsTest(Compute::GpuAcc);
+}
+}
+#endif
+
+#if defined(ARMCOMPUTENEON_ENABLED)
+TEST_SUITE("Optimizer_PermuteDepthwiseConv2dWeightsCpuAcc")
+{
+TEST_CASE("PermuteDepthwiseConv2dWeightsCpuAccTest")
+{
+ PermuteDepthwiseConv2dWeightsTest(Compute::CpuAcc);
+}
+}
+#endif
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index e476eb38a1..1960332ccf 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -8,6 +8,8 @@
#include "armnn/Exceptions.hpp"
#include <armnn/Descriptors.hpp>
+#include <fmt/format.h>
+
namespace armnn
{
namespace armcomputetensorutils
@@ -342,5 +344,26 @@ arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo
}
}
+unsigned int ComputeDepthwiseConv2dDepthMultiplier(armnn::DataLayout layout,
+ const arm_compute::TensorShape& weightsShape,
+ const arm_compute::TensorShape& inputShape)
+{
+ unsigned int depthMultiplier;
+ if (layout == armnn::DataLayout::NHWC)
+ {
+ depthMultiplier = static_cast<uint32_t>(weightsShape[0]) / static_cast<uint32_t>(inputShape[0]);
+ }
+ else if (layout == armnn::DataLayout::NCHW)
+ {
+ depthMultiplier = static_cast<uint32_t>(weightsShape[2]) / static_cast<uint32_t>(inputShape[2]);
+ }
+ else
+ {
+ throw InvalidArgumentException(fmt::format("Unknown data layout for tensor conversion: {}",
+ GetDataLayoutName(layout)));
+ }
+ return depthMultiplier;
+}
+
} // namespace armcomputetensorutils
} // namespace armnn
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index 31992b93c6..ee8240f3b8 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -77,6 +77,11 @@ arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsign
/// Gets the appropriate PixelValue for the TensorInfo DataType
arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue);
+/// Computes the depth multiplier parameter for the Depthwise Conv2d ACL workload.
+unsigned int ComputeDepthwiseConv2dDepthMultiplier(armnn::DataLayout layout,
+ const arm_compute::TensorShape& weightsShape,
+ const arm_compute::TensorShape& inputShape);
+
/// Utility function used to setup an arm_compute::PadStrideInfo object from an armnn layer descriptor.
template <typename Descriptor>
arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor &descriptor)
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 7a46741964..289f780fba 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1416,24 +1416,6 @@ void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
}
- const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
-
- // Expected weight shape: [ 1, H, W, I*M ] - This shape does NOT depend on the data layout
- // inputChannels * channelMultiplier should be equal to outputChannels.
- const unsigned int numWeightOutputChannels = weightTensorInfo.GetShape()[3]; // I*M=Cout
- const unsigned int numOutputChannels = outputTensorInfo.GetShape()[channelIndex];
- if (numWeightOutputChannels != numOutputChannels)
- {
- throw InvalidArgumentException(fmt::format(
- "{0}: The weight format in armnn is expected to be [1, H, W, Cout]."
- "But 4th dimension is not equal to Cout. Cout = {1} Provided weight shape: [{2}, {3}, {4}, {5}]",
- descriptorName,
- numOutputChannels,
- weightTensorInfo.GetShape()[0],
- weightTensorInfo.GetShape()[1],
- weightTensorInfo.GetShape()[2],
- weightTensorInfo.GetShape()[3]));
- }
if (weightTensorInfo.GetShape()[0] != 1)
{
throw InvalidArgumentException(fmt::format(
@@ -1446,6 +1428,29 @@ void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
weightTensorInfo.GetShape()[3]));
}
+ const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
+ const unsigned int numWeightOutputChannelsRefFormat = weightTensorInfo.GetShape()[3];
+ const unsigned int numWeightOutputChannelsAclFormat = weightTensorInfo.GetShape()[1];
+ const unsigned int numOutputChannels = outputTensorInfo.GetShape()[channelIndex];
+
+ // Weights format has two valid options: [1, H, W, Cout] (CpuRef) or [1, Cout, H, W] (CpuAcc/GpuAcc).
+ bool validRefFormat = (numWeightOutputChannelsRefFormat == numOutputChannels);
+ bool validAclFormat = (numWeightOutputChannelsAclFormat == numOutputChannels);
+
+ if (!(validRefFormat || validAclFormat))
+ {
+ throw InvalidArgumentException(fmt::format(
+ "{0}: The weight format in armnn is expected to be [1, H, W, Cout] (CpuRef) or [1, Cout, H, W] "
+ "(CpuAcc/GpuAcc). But neither the 4th (CpuRef) or 2nd (CpuAcc/GpuAcc) dimension is equal to Cout."
+ "Cout = {1} Provided weight shape: [{2}, {3}, {4}, {5}]",
+ descriptorName,
+ numOutputChannels,
+ weightTensorInfo.GetShape()[0],
+ weightTensorInfo.GetShape()[1],
+ weightTensorInfo.GetShape()[2],
+ weightTensorInfo.GetShape()[3]));
+ }
+
ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
Optional<TensorInfo> optionalBiasTensorInfo;
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index d2ae16af0c..b045530abc 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -175,12 +175,14 @@ std::tuple<TensorInfo, unsigned int> Convert1HWOTensorInfoToAcl(const TensorInfo
TensorInfo weightsPermuted;
if (dataLayout == armnn::DataLayout::NHWC)
{
- // No permutation required. Data layouts are the same.
+ // No permutation required. Input and weights data layouts are the same.
aclDepthMultiplier = weightInfo.GetShape()[3] / inputInfo.GetShape()[3];
weightsPermuted = weightInfo;
}
+
else if (dataLayout == armnn::DataLayout::NCHW)
{
+ // Weights permutation required. Weights [N,H,W,C] and input [N,C,H,W] data layouts are different.
// [ 1, H, W, I*M] --> [ 1, I * M, H, W ]
aclDepthMultiplier = weightInfo.GetShape()[3] / inputInfo.GetShape()[1];
PermutationVector permutationVector{ 0, 2, 3, 1 };
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 4203fed23a..74c65e271c 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -1713,6 +1713,20 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
inputData = tmp;
}
+ std::vector<T> kernelData;
+ kernelData.assign(kernel.data(), kernel.data() + kernelHeight * kernelWidth * outputChannels);
+ if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
+ workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
+ {
+ if (layout == armnn::DataLayout::NCHW)
+ {
+ std::vector<T> tmp(kernelData.size());
+ kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
+ armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
+ kernelData = tmp;
+ }
+ }
+
// Construct the output data, with bias applied, as appropriate.
std::vector<T> outputData;
outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
@@ -1751,8 +1765,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
// 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
// Cannot PolymorphicDowncast from ScopedTensorHandle->RefTensorHandle.
// Need to PolymorphicDowncast from ITensorHandle->RefTensorHandle.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
- AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernel.data()); // required for ConstantTensor
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
+ AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
@@ -1881,6 +1895,18 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
kernelDesc.GetQuantizationScale(),
kernelDesc.GetQuantizationOffset()));
+ if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
+ workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
+ {
+ if (layout == armnn::DataLayout::NCHW)
+ {
+ std::vector<T> tmp(kernelData.size());
+ kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
+ armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
+ kernelData = tmp;
+ }
+ }
+
// Manually calculated.
std::vector<T> outputImage(
QuantizedVector<T>({ 0.f, 0.f },
@@ -2077,6 +2103,18 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
kernelDesc.GetQuantizationScale(),
kernelDesc.GetQuantizationOffset()));
+ if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
+ workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
+ {
+ if (layout == armnn::DataLayout::NCHW)
+ {
+ std::vector<T> tmp(kernelData.size());
+ kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
+ armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
+ kernelData = tmp;
+ }
+ }
+
// Manually calculated.
std::vector<T> originalOutputImage = std::vector<T>(
QuantizedVector<T>({
@@ -2251,6 +2289,20 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
biasDesc.SetQuantizationOffset(0);
}
+ std::vector<T> kernelData;
+ kernelData.assign(originalKernel.data(), originalKernel.data() + kernelHeight*kernelWidth*outputChannels);
+ if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
+ workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
+ {
+ if (layout == armnn::DataLayout::NCHW)
+ {
+ std::vector<T> tmp(kernelData.size());
+ kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
+ armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
+ kernelData = tmp;
+ }
+ }
+
// Construct input data
std::vector<T> input;
input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
@@ -2309,8 +2361,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
// See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
// 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
// 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- AllocateAndCopyDataToITensorHandle(&weightsTensor, originalKernel.data()); // required for QueueDescriptor
- AllocateAndCopyDataToITensorHandle(weightsHandle.get(), originalKernel.data()); // required for ConstantTensor
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
+ AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
@@ -3029,22 +3081,37 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasDesc, 1028, 0.0f, 255.0f);
+ armnn::TensorInfo aclKernelDescriptor = kernelDesc;
+ std::vector<T> aclKernelData;
+ aclKernelData.assign(kernel.data(), kernel.data() + kernelHeight * kernelWidth * outputChannels);
+ if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
+ workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
+ {
+ if (layout == armnn::DataLayout::NCHW)
+ {
+ std::vector<T> tmp(kernel.size());
+ aclKernelDescriptor.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
+ armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernel.data(), tmp.data(), sizeof(T));
+ aclKernelData = tmp;
+ }
+ }
+
std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
+ std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(aclKernelDescriptor);
std::unique_ptr<armnn::ITensorHandle> biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle weightsTensor(aclKernelDescriptor);
armnn::ScopedTensorHandle biasTensor(biasDesc);
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
- AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
+ AddInputToWorkload(data, info, aclKernelDescriptor, weightsHandle.get());
AddInputToWorkload(data, info, biasDesc, biasHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -3052,8 +3119,8 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
// See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
// 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
// 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
- AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernel.data());
- AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
+ AllocateAndCopyDataToITensorHandle(weightsHandle.get(), aclKernelData.data());
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, aclKernelData.data());
AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
@@ -3788,6 +3855,18 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
1, 1, 1, 1
};
+ if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
+ workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
+ {
+ if (layout == armnn::DataLayout::NCHW)
+ {
+ std::vector<int8_t> tmp(kernelData.size());
+ kernelInfo.SetShape(armnnUtils::Permuted(kernelInfo.GetShape(), {0, 2, 3, 1}));
+ armnnUtils::Permute(kernelInfo.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(int8_t));
+ kernelData = tmp;
+ }
+ }
+
std::vector<int32_t> biasData =
{
4, 4, 4, 4
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index f6a071ab98..9a4cad3ef0 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -33,8 +33,15 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
- // ArmNN's weight format is usually [ M, I, H, W ] but for depthwise its [ 1, H, W, I*M]
- // Permute to [ 1, I * M, H, W ] (if NCHW) as required by the compute library
+ // ArmNN format for weights for depthwise is [1, H, W, C] independently of the input/output layout
+ //
+ // ACL format for weights for depthwise is:
+ // - [1, H, W, C] for [N, H, W, C] input/output layout (matches with ArmNN)
+ // - [1, C, H, W] for [N, C, H, W] input/output layout
+ //
+ // Therefore ArmNN weights have to be permuted when input/output layout is [N, C, H, W] to pass them to ACL.
+ // The PermuteDepthwiseConv2dWeights backend optimization takes care of this, but it has not been performed yet,
+ // so we do the permute here for the TensorInfo weights.
unsigned int aclDepthMultiplier;
TensorInfo weightsPermuted;
std::tie(weightsPermuted, aclDepthMultiplier) = Convert1HWOTensorInfoToAcl(weights, input,descriptor.m_DataLayout);
@@ -43,12 +50,10 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
arm_compute::TensorInfo aclBiasesInfo;
- arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
-
+ arm_compute::TensorInfo* optionalAclBiasesInfo = nullptr;
if (descriptor.m_BiasEnabled)
{
ARMNN_ASSERT(biases.has_value());
-
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
@@ -93,41 +98,36 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClDepthwiseConvolutionWorkload_Construct",
descriptor.m_Parameters,
detailsInfo,
- this->GetGuid());
-
- // ArmNN's weight format is usually [ M, I, H, W ] but for depthwise its [ 1, H, W, I*M]
- // Permute to [ 1, I * M, H, W ] (if NCHW), as required by the compute library
- ConstTensor weightPermuted;
- unsigned int depthMultiplier;
- std::unique_ptr<unsigned char[]> permuteBuffer(new unsigned char[m_Data.m_Weight->GetTensorInfo().GetNumBytes()]);
- std::tie(weightPermuted, depthMultiplier) = Convert1HWOTensorToAcl(m_Data.m_Weight,
- info.m_InputTensorInfos[0],
- m_Data.m_Parameters.m_DataLayout,
- permuteBuffer.get());
+ GetGuid());
- // Convert the weights into the compute library format
- m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightPermuted.GetInfo(), m_Data.m_Parameters.m_DataLayout);
+ m_Data.ValidateInputsOutputs("ClDepthwiseConv2dWorkload", descriptor.m_Parameters.GetNumInputs(), 1);
+
+ arm_compute::ICLTensor& input = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ICLTensor& weights = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensorInfo* weightsInfo = weights.info();
+ arm_compute::ITensorInfo* inputInfo = input.info();
+ auto weightsShape = weightsInfo->tensor_shape();
+ auto inputShape = inputInfo->tensor_shape();
+ // The PermuteDepthwiseConv2dWeights backend optimization has been performed,
+ // converting weights to have the same data layout as input.
+ unsigned int depthMultiplier =
+ ComputeDepthwiseConv2dDepthMultiplier(m_Data.m_Parameters.m_DataLayout, weightsShape, inputShape);
+
+ arm_compute::ICLTensor* bias = nullptr;
if (m_Data.m_Parameters.m_BiasEnabled)
{
- m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
+ bias = &PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
}
const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
m_Data.m_Parameters.m_DilationX,
m_Data.m_Parameters.m_DilationY);
-
- std::string name = std::string("ClDepthwiseConvolutionWorkload");
- m_Data.ValidateInputsOutputs(name, 1, 1);
-
- arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
+ weights.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
@@ -141,8 +141,8 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
static_cast<arm_compute::CLDepthwiseConvolutionLayer*>(m_DepthwiseConvolutionLayer.get())->configure(
clCompileContext,
&input,
- m_KernelTensor.get(),
- m_BiasTensor.get(),
+ &weights,
+ bias,
&output,
padStrideInfo,
depthMultiplier,
@@ -150,28 +150,11 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
aclDilationInfo);
}
ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
-
- ScopedTensorHandle weightsPermutedHandle(weightPermuted);
- InitializeArmComputeClTensorData(*m_KernelTensor, &weightsPermutedHandle);
-
- if (m_BiasTensor)
- {
- InitializeArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias);
- }
-
- m_DepthwiseConvolutionLayer->prepare();
- FreeUnusedTensors();
-}
-
-void ClDepthwiseConvolutionWorkload::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_KernelTensor);
- FreeTensorIfUnused(m_BiasTensor);
}
void ClDepthwiseConvolutionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDepthwiseConvolutionWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDepthwiseConvolutionWorkload_Execute", GetGuid());
ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp
index 7a99d6c466..e410ff9207 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.hpp
@@ -34,11 +34,6 @@ public:
protected:
std::unique_ptr<arm_compute::IFunction> m_DepthwiseConvolutionLayer;
-
- std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
- std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
-
- void FreeUnusedTensors();
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index 42a476c6ca..b122be62ce 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -33,11 +33,18 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
const Optional<TensorInfo>& biases,
const ActivationDescriptor* activationDescriptor)
{
- const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
-
- // ArmNN's weight format is usually [ M, I, H, W ] but for depthwise its [ 1, H, W, I*M]
- // Permute to [ 1, I * M, H, W ] (if NCHW), as required by the compute library
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+
+ // ArmNN format for weights for depthwise is [1, H, W, C] independently of the input/output layout
+ //
+ // ACL format for weights for depthwise is:
+ // - [1, H, W, C] for [N, H, W, C] input/output layout (matches with ArmNN)
+ // - [1, C, H, W] for [N, C, H, W] input/output layout
+ //
+ // Therefore ArmNN weights have to be permuted when input/output layout is [N, C, H, W] to pass them to ACL.
+ // The PermuteDepthwiseConv2dWeights backend optimization takes care of this, but it has not been performed yet,
+ // so we do the permute here for the TensorInfo weights.
unsigned int aclDepthMultiplier;
TensorInfo weightsPermuted;
std::tie(weightsPermuted, aclDepthMultiplier) = Convert1HWOTensorInfoToAcl(weights, input, descriptor.m_DataLayout);
@@ -47,11 +54,9 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
arm_compute::TensorInfo aclBiasesInfo;
arm_compute::TensorInfo* optionalAclBiasesInfo = nullptr;
-
if (descriptor.m_BiasEnabled)
{
ARMNN_ASSERT(biases.has_value());
-
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
@@ -78,40 +83,34 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
const WorkloadInfo& info)
: NeonBaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
{
- // ArmNN's weight format for depthwise is [ 1, H, W, I*M ]
- auto& weightInfo = m_Data.m_Weight->GetTensorInfo();
-
- ConstTensor weightsPermuted;
- unsigned int depthMultiplier;
- std::unique_ptr<unsigned char[]> permuteBuffer(new unsigned char[weightInfo.GetNumBytes()]);
- std::tie(weightsPermuted, depthMultiplier) = Convert1HWOTensorToAcl(m_Data.m_Weight,
- info.m_InputTensorInfos[0],
- m_Data.m_Parameters.m_DataLayout,
- permuteBuffer.get());
-
- // Convert the weights into the compute library format
- m_KernelTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightsPermuted.GetInfo(), m_Data.m_Parameters.m_DataLayout);
-
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& weights = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor* biasesPtr = nullptr;
if (m_Data.m_Parameters.m_BiasEnabled)
{
- m_BiasTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
+ biasesPtr = &PolymorphicDowncast<IAclTensorHandle *>(m_Data.m_Inputs[2])->GetTensor();
}
- const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
- m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY);
+ arm_compute::ITensorInfo* weightsInfo = weights.info();
+ arm_compute::ITensorInfo* inputInfo = input.info();
+ auto weightsShape = weightsInfo->tensor_shape();
+ auto inputShape = inputInfo->tensor_shape();
- m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", 1, 1);
+ // The PermuteDepthwiseConv2dWeights backend optimization has been performed,
+ // converting weights to have the same data layout as input.
+ unsigned int depthMultiplier =
+ ComputeDepthwiseConv2dDepthMultiplier(m_Data.m_Parameters.m_DataLayout, weightsShape, inputShape);
- IAclTensorHandle* inputTensorHandle = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0]);
- IAclTensorHandle* outputTensorHandle = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0]);
+ const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
+ m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY);
- arm_compute::ITensor& input = inputTensorHandle->GetTensor();
- arm_compute::ITensor& output = outputTensorHandle->GetTensor();
+ uint32_t numInputs = m_Data.m_Parameters.m_BiasEnabled ? 3: 2;
+ m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", numInputs, 1);
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
+ weights.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
@@ -121,8 +120,8 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
static_cast<arm_compute::NEDepthwiseConvolutionLayer*>(
m_pDepthwiseConvolutionLayer.get())->configure(&input,
- m_KernelTensor.get(),
- m_BiasTensor.get(),
+ &weights,
+ biasesPtr,
&output,
padStrideInfo,
depthMultiplier,
@@ -144,34 +143,19 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonDepthwiseConvolution2dWorkload_Construct",
descriptor.m_Parameters,
detailsInfo,
- this->GetGuid());
+ GetGuid());
ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
- ScopedTensorHandle weightsPermutedHandle(weightsPermuted);
- InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
-
- if (m_Data.m_Parameters.m_BiasEnabled)
- {
- InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
- }
-
m_pDepthwiseConvolutionLayer->prepare();
- FreeUnusedTensors();
}
void NeonDepthwiseConvolutionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonDepthwiseConvolutionWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonDepthwiseConvolutionWorkload_Execute", GetGuid());
ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
}
-void NeonDepthwiseConvolutionWorkload::FreeUnusedTensors()
-{
- FreeTensorIfUnused(m_KernelTensor);
- FreeTensorIfUnused(m_BiasTensor);
-}
-
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp
index 45c646aa44..5402091893 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.hpp
@@ -33,11 +33,6 @@ public:
private:
mutable std::unique_ptr<arm_compute::IFunction> m_pDepthwiseConvolutionLayer;
-
- std::unique_ptr<arm_compute::Tensor> m_KernelTensor;
- std::unique_ptr<arm_compute::Tensor> m_BiasTensor;
-
- void FreeUnusedTensors();
};
} // namespace armnn