From c1d07143c3d01df58dd5f0e4a10b38b7bd3565d4 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 11 Dec 2019 18:32:43 +0000 Subject: MLCE-143 Fixed driver crash during CTS tests MLCE-144 Fix cts MAX_POOL_2D_V1_0 tests * Only apply the Optimization when the base ReshapeLayer is connected to the child ReshapeLayer and no other Layer. Signed-off-by: Finn Williams Signed-off-by: Mike Kelly Change-Id: Id1215e8b1c06d7bdb77905fec9649a8ec26436f0 --- .../optimizations/OptimizeConsecutiveReshapes.hpp | 10 ++++++- .../backendsCommon/test/Pooling2dTestImpl.hpp | 12 ++++---- src/backends/reference/workloads/Pooling2d.cpp | 35 +++++++++++++++------- 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp index 5047d5d678..e2d4a2dcc3 100644 --- a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp +++ b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp @@ -14,7 +14,7 @@ namespace optimizations class OptimizeConsecutiveReshapesImpl { public: - /// Run for every connection between a base RashapeLayer and a child ReshapeLayer. + /// Run for every connection between a base ReshapeLayer and a child ReshapeLayer. /// Inserts an equivalent ReshapeLayer that bypasses both for that connection. void Run(Graph& graph, InputSlot& connection) const { @@ -29,12 +29,20 @@ public: const TensorInfo& inInfo = parentOut->GetTensorInfo(); const TensorInfo& outInfo = child.GetOutputHandler().GetTensorInfo(); + // This Optimization is only appropriate when the base ReshapeLayer is connected to the child ReshapeLayer + // and no other Layer. + if (base.GetOutputSlot(0).GetNumConnections() > 1) + { + return; + } + if (inInfo.GetShape() != outInfo.GetShape()) { // Inserts equivalent reshape before base layer. const std::string name = std::string("merged-") + base.GetName() + std::string("-with-") + child.GetName(); const ReshapeDescriptor descriptor{outInfo.GetShape()}; auto& newReshape = *graph.InsertNewLayer(base.GetInputSlot(0), descriptor, name.c_str()); + // Sets tensor info for new layer. newReshape.GetOutputHandler().SetTensorInfo(outInfo); // Reconnects base with original parent. diff --git a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp index 5edf9c802f..da80d64f8d 100644 --- a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp @@ -223,17 +223,17 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f, 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f, - 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f, - 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f, + 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f, + 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f, 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f, 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f, - 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f, - 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f + 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f, + 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f, + 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f })); } diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp index f2532cac03..cf83f8ce2b 100644 --- a/src/backends/reference/workloads/Pooling2d.cpp +++ b/src/backends/reference/workloads/Pooling2d.cpp @@ -107,9 +107,9 @@ namespace } } - bool OnPaddingOnly(int start, int end, int maxRange, int padding) + bool OnPaddingOnly(int start, int end, int maxRange) { - if (end <= 0 || start > (maxRange - padding)) + if (end <= 0 || start > maxRange) { return true; } @@ -187,34 +187,49 @@ void Pooling2d(Decoder& rInputDecoder, { for (int yOutput = 0; yOutput < heightOutput; yOutput++) { + // Calculate values independent of the x axis + int hstart = (yOutput * strideY) - padTop; + int hend = hstart + poolHeight; + // Clamp the pooling region inside the valid input area (which includes the padding). + // This is necessary because the final pooling in a row may overlap beyond the padding. + hend = std::min(hend, heightInput + padBottom); + + int height = hend - hstart; + bool hclamped = ClampRange(hstart, hend, heightInput); + for (int xOutput = 0; xOutput < widthOutput; xOutput++) { - int hstart = (yOutput * strideY) - padTop; int wstart = (xOutput * strideX) - padLeft; - int hend = hstart + poolHeight; int wend = wstart + poolWidth; // Clamp the pooling region inside the valid input area (which includes the padding). // This is necessary because the final pooling in a row may overlap beyond the padding. - hend = std::min(hend, heightInput + padBottom); wend = std::min(wend, widthInput + padRight); float result = defaultInitializer; - float poolAreaSize = boost::numeric_cast((hend - hstart) * (wend - wstart)); + float poolAreaSize = boost::numeric_cast(height * (wend - wstart)); // Special case: when the pooling kernel is over a padding region and the padding // size is larger or equal to the kernel and the kernel only covers // padding and no real values, then we initialize the result as zero // by convention. This is because we need to choose a value here and // all values we have are padding, which we ignore. - if (OnPaddingOnly(hstart, hend, heightInput, padBottom) || - OnPaddingOnly(wstart, wend, widthInput, padRight)) + if (OnPaddingOnly(hstart, hend, heightInput) || + OnPaddingOnly(wstart, wend, widthInput)) { result = 0.0f; + + unsigned int outputIndex = dataLayout.GetIndex(outputShape, + boost::numeric_cast(n), + boost::numeric_cast(c), + boost::numeric_cast(yOutput), + boost::numeric_cast(xOutput)); + rOutputEncoder[outputIndex]; + rOutputEncoder.Set(result); + continue; } - bool clamped = ClampRange(wstart, wend, widthInput); - clamped |= ClampRange(hstart, hend, heightInput); + bool clamped = hclamped |= ClampRange(wstart, wend, widthInput); if (clamped && params.m_PaddingMethod == PaddingMethod::Exclude) { -- cgit v1.2.1