aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2021-10-14 12:35:58 +0100
committerMike Kelly <mike.kelly@arm.com>2021-10-14 12:35:58 +0100
commite2d611e4502fb5dce8b8a398ccfc8d7ef29da96b (patch)
tree79d9178420924d57ed0ab23db46ff57159279886
parent4bd8f7c96e01e081276c376fe5c34018e7b70d17 (diff)
downloadandroid-nn-driver-e2d611e4502fb5dce8b8a398ccfc8d7ef29da96b.tar.gz
IVGCVSW-6428 Remove asserts
* Changed asserts to check for errors and return appropriate values or throw exceptions * Changed unit tests to use Doctest's long macro names as the short macro names clashed with Android's Logging macros * Removed unused #includes * Clarified ambiguous #includes Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Ice92a37590df727fd581d3be5ff2716665f26a13
-rw-r--r--1.0/HalPolicy.cpp19
-rw-r--r--ArmnnPreparedModel.cpp3
-rw-r--r--ArmnnPreparedModel_1_2.cpp3
-rw-r--r--ArmnnPreparedModel_1_3.cpp3
-rw-r--r--ConversionUtils.cpp20
-rw-r--r--ConversionUtils.hpp165
-rw-r--r--ConversionUtils_1_2.hpp71
-rw-r--r--ConversionUtils_1_3.hpp17
-rw-r--r--DriverOptions.cpp3
-rw-r--r--ModelToINetworkConverter.cpp15
-rw-r--r--RequestThread.cpp7
-rw-r--r--RequestThread_1_3.cpp10
-rw-r--r--Utils.cpp39
-rw-r--r--test/1.0/Convolution2D.cpp13
-rw-r--r--test/1.0/FullyConnectedReshape.cpp27
-rw-r--r--test/1.0/Lstm.cpp34
-rw-r--r--test/1.1/Convolution2D.cpp17
-rw-r--r--test/1.1/Lstm.cpp32
-rw-r--r--test/1.1/Mean.cpp42
-rw-r--r--test/1.1/Transpose.cpp38
-rw-r--r--test/1.2/Capabilities.cpp55
-rw-r--r--test/1.2/Dilation.cpp25
-rw-r--r--test/1.2/Lstm.cpp38
-rw-r--r--test/1.3/QLstm.cpp34
-rw-r--r--test/1.3/QosTests.cpp21
-rw-r--r--test/Concat.cpp117
-rw-r--r--test/Concurrent.cpp16
-rw-r--r--test/Convolution2D.hpp11
-rw-r--r--test/Dilation.hpp6
-rw-r--r--test/DriverTestHelpers.cpp27
-rw-r--r--test/DriverTestHelpers.hpp35
-rw-r--r--test/FullyConnected.cpp67
-rw-r--r--test/GenericLayerTests.cpp57
-rw-r--r--test/Lstm.hpp27
-rw-r--r--test/SystemProperties.cpp26
-rw-r--r--test/TestTensor.cpp5
-rw-r--r--test/TestTensor.hpp10
-rw-r--r--test/Tests.cpp26
-rw-r--r--test/UtilsTests.cpp43
39 files changed, 670 insertions, 554 deletions
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index d4d61211..c4a219c9 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -579,7 +579,10 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
}
armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the SoftmaxLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
@@ -638,7 +641,10 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod
}
armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the SpaceToDepthLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
@@ -704,9 +710,10 @@ bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& m
}
armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
-
- assert(layer != nullptr);
-
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ResizeLayer", __func__);
+ }
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
input.Connect(layer->GetInputSlot(0));
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index f14560a1..e5746b8c 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -12,7 +12,6 @@
#include <OperationsUtils.h>
#include <ValidateHal.h>
-#include <cassert>
#include <cinttypes>
#ifdef ARMNN_ANDROID_S
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index 5a10d546..2e378801 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -14,7 +14,6 @@
#include <ExecutionBurstServer.h>
#include <ValidateHal.h>
-#include <cassert>
#include <cinttypes>
#ifdef ARMNN_ANDROID_S
diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp
index 16ea113c..a5032360 100644
--- a/ArmnnPreparedModel_1_3.cpp
+++ b/ArmnnPreparedModel_1_3.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
// Note: the ArmnnFencedExecutionCallback and code snippet in the executeFenced() function
@@ -19,7 +19,6 @@
#include <ExecutionBurstServer.h>
#include <ValidateHal.h>
-#include <cassert>
#include <cinttypes>
#ifdef ARMNN_ANDROID_S
diff --git a/ConversionUtils.cpp b/ConversionUtils.cpp
index d59af994..7eb7343e 100644
--- a/ConversionUtils.cpp
+++ b/ConversionUtils.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,11 @@ bool LayerInputHandle::IsValid() const
void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
{
- ARMNN_ASSERT(IsValid());
+ if (!IsValid())
+ {
+ throw armnn::RuntimeException("LayerInputHandle is invalid");
+ }
+
if (m_OutputSlot)
{
m_OutputSlot->Connect(inputSlot);
@@ -40,7 +44,10 @@ void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
void LayerInputHandle::Disconnect(armnn::IInputSlot& inputSlot)
{
- ARMNN_ASSERT(IsValid());
+ if (!IsValid())
+ {
+ throw armnn::RuntimeException("LayerInputHandle is invalid");
+ }
if (m_OutputSlot)
{
m_OutputSlot->Disconnect(inputSlot);
@@ -116,8 +123,11 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
armnn::IConnectableLayer* prevLayer,
ConversionData& data)
{
- ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
-
+ if (prevLayer->GetNumOutputSlots() != 1)
+ {
+ Fail("%s: Incorrect Number of OutputSlots expected 1 was %i", __func__, prevLayer->GetNumOutputSlots());
+ return nullptr;
+ }
prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
armnn::IConnectableLayer* activationLayer = prevLayer;
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index ca5c99ec..1d182fad 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -9,7 +9,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/BackendHelper.hpp>
-#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
@@ -277,7 +276,10 @@ armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
- ARMNN_ASSERT(reshapeLayer != nullptr);
+ if (!reshapeLayer)
+ {
+ throw armnn::RuntimeException("ReshapeLayer is null");
+ }
// Attach the input layer to the reshape layer
inputLayer.Connect(reshapeLayer->GetInputSlot(0));
@@ -291,7 +293,10 @@ bool BroadcastTensor(LayerInputHandle& input0,
armnn::IConnectableLayer* startLayer,
ConversionData& data)
{
- ARMNN_ASSERT(startLayer != nullptr);
+ if (!startLayer)
+ {
+ throw armnn::RuntimeException("StartLayer is null");
+ }
const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
@@ -346,7 +351,11 @@ bool BroadcastTensor(LayerInputHandle& input0,
return false;
}
- ARMNN_ASSERT(data.m_Network != nullptr);
+ if (!data.m_Network)
+ {
+ throw armnn::RuntimeException("Network is null");
+ }
+
armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
if (input0IsSmaller)
@@ -506,9 +515,10 @@ armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& inp
{
// Add swizzle layer
armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
-
- ARMNN_ASSERT(layer != nullptr);
-
+ if (!layer)
+ {
+ throw armnn::RuntimeException("TransposeLayer is null");
+ }
// Connect input to swizzle layer
input.Connect(layer->GetInputSlot(0));
@@ -630,7 +640,11 @@ bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
{
bool needPermute = false;
- ARMNN_ASSERT(numberOfDimensions >= 3);
+
+ if (numberOfDimensions < 3)
+ {
+ return Fail("%s: Invalid numberOfDimensions: %i < 3", __func__, numberOfDimensions);
+ }
// ArmNN uses Compute Library subtensors to perform concatenation
// This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
@@ -696,13 +710,18 @@ const HalOperand* GetInputOperand(const HalOperation& operation,
{
if (failOnIndexOutOfBounds)
{
- Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
+ Fail("%s: Invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
}
return nullptr;
}
// Model should have been validated beforehand
- ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
+ if (operation.inputs[inputIndex] >= getMainModel(model).operands.size())
+ {
+ Fail("%s: invalid model index: %i >= %i", __func__, inputIndex, getMainModel(model).operands.size());
+ return nullptr;
+ }
+
return &getMainModel(model).operands[operation.inputs[inputIndex]];
}
@@ -721,8 +740,11 @@ const HalOperand* GetOutputOperand(const HalOperation& operation,
}
// Model should have been validated beforehand
- ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
-
+ if (operation.inputs[outputIndex] >= getMainModel(model).operands.size())
+ {
+ Fail("%s: invalid model index: %i >= %i", __func__, outputIndex, getMainModel(model).operands.size());
+ return nullptr;
+ }
return &getMainModel(model).operands[operation.outputs[outputIndex]];
}
@@ -1439,7 +1461,7 @@ bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
// Type one dynamic tensors require the previous layer's output shape for inference
for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
{
- if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
+ if (!layer.GetInputSlot(inputSlotIndex).GetConnection())
{
return false;
}
@@ -1596,7 +1618,10 @@ bool ConvertToActivation(const HalOperation& operation,
}
armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ActivationLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -1978,8 +2003,10 @@ bool ConvertArgMinMax(const HalOperation& operation,
}
armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
- assert(layer != nullptr);
-
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ArgMinMaxLayer", __func__);
+ }
input0.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2110,7 +2137,11 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model,
}
}
- ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
+ if (inputShapes.size() != inputHandles.size())
+ {
+ return Fail("%s: invalid model input shapes size doesn't match input handles sise: %i != %i", __func__,
+ inputShapes.size(), inputHandles.size());
+ }
if (inputsHaveBeenReshaped)
{
@@ -2217,11 +2248,19 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model,
}
armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ConcatLayer", __func__);
+ }
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Connect inputs to the layer
const int numInputSlots = layer->GetNumInputSlots();
- assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
+
+ if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
+ {
+ return Fail("%s: invalid model input slots size doesn't match input handles sise: %i != %i", __func__,
+ static_cast<std::size_t>(numInputSlots), inputHandles.size());
+ }
for (int i = 0; i < numInputSlots; ++i)
{
// connect the input directly to the merge (concat) layer
@@ -2265,7 +2304,10 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model,
if (isDynamicTensor)
{
// Infer the output shapes of concat if outputs are type 1 dynamic
- ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
+ if (!layer->GetOutputSlot(0).IsTensorInfoSet())
+ {
+ return Fail("%s: TensorInfo is not set", __func__);
+ }
if (!ValidateConcatOutputShape(inputShapes,
layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
concatDim))
@@ -2520,7 +2562,10 @@ bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, C
}
armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the DepthToSpaceLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2727,7 +2772,10 @@ bool ConvertDequantize(const HalOperation& operation, const HalModel& model, Con
}
armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the DequantizeLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2850,7 +2898,10 @@ bool ConvertFloor(const HalOperation& operation, const HalModel& model, Conversi
}
armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the FloorLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2935,7 +2986,11 @@ DequantizeResult DequantizeIfRequired(size_t operand_index,
}
const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
- ARMNN_ASSERT(operand);
+
+ if (!operand)
+ {
+ return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
+ }
if (!IsQSymm8(*operand))
{
@@ -2959,7 +3014,11 @@ DequantizeResult DequantizeIfRequired(size_t operand_index,
for (size_t i = 0; i < dequantizedBufferLength; ++i)
{
float* dstPtr = dequantizedBuffer.get();
- ARMNN_ASSERT(dstPtr);
+
+ if (!dstPtr)
+ {
+ return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
+ }
*dstPtr++ = quantizedBuffer[i] * quantizationScale;
}
@@ -3135,7 +3194,10 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model,
reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
- assert(reshapeLayer != nullptr);
+ if (!reshapeLayer)
+ {
+ return Fail("%s: could not add the reshapeLayer", __func__);
+ }
input.Connect(reshapeLayer->GetInputSlot(0));
reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
@@ -3215,7 +3277,10 @@ bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model
}
armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the L2NormalizationLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -3300,9 +3365,11 @@ bool ConvertLocalResponseNormalization(const HalOperation& operation,
return false;
}
-
armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the NormalizationLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -3399,7 +3466,10 @@ bool ConvertMean(const HalOperation& operation, const HalModel& model, Conversio
}
armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the MeanLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -3542,7 +3612,10 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData&
}
armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the PadLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -3627,7 +3700,10 @@ bool ConvertReshape(const HalOperation& operation, const HalModel& model, Conver
}
armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ReshapeLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -3783,7 +3859,10 @@ bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, Conver
}
armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ReshapeLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
@@ -3918,7 +3997,10 @@ bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, C
}
armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the StridedSliceLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -4002,7 +4084,10 @@ bool ConvertTranspose(const HalOperation& operation, const HalModel& model, Conv
}
armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the TransposeLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -4097,7 +4182,10 @@ bool ConvertBatchToSpaceNd(const HalOperation& operation,
}
armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the BatchToSpaceNdLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -4212,7 +4300,10 @@ bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model,
}
armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the BatchToSpaceLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index 404ff32d..acf787f3 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -163,7 +163,10 @@ bool ConvertCast(const HalOperation& operation,
}
IConnectableLayer* layer = data.m_Network->AddCastLayer();
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the CastLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -315,7 +318,10 @@ bool ConvertComparison_1_2(const HalOperation& operation,
}
IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ComparisonLayer", __func__);
+ }
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
if (!isReshapeSupported)
@@ -732,7 +738,10 @@ bool ConvertElementwiseUnary(const HalOperation& operation,
}
IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ElementwiseUnaryLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -814,7 +823,10 @@ bool ConvertExpandDims(const HalOperation& operation, const HalModel& model, Con
}
IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ReshapeLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -899,7 +911,10 @@ bool ConvertGather(const HalOperation& operation, const HalModel& model, Convers
}
IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the GatherLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
indices.Connect(layer->GetInputSlot(1));
@@ -1456,9 +1471,8 @@ bool ConvertLogSoftmax(const HalOperation& operation, const HalModel& model, Con
IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
if (!layer)
{
- return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
+ return Fail("%s: Could not add the LogSoftmaxLayer", __func__);
}
-
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -1516,7 +1530,10 @@ bool ConvertMaximum(const HalOperation& operation, const HalModel& model, Conver
}
IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the MaximumLayer", __func__);
+ }
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
if (!isReshapeSupported)
{
@@ -1578,7 +1595,10 @@ bool ConvertMinimum(const HalOperation& operation, const HalModel& model, Conver
}
IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the MinimumLayer", __func__);
+ }
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
if (!isReshapeSupported)
{
@@ -1690,7 +1710,10 @@ bool ConvertPadV2(const HalOperation& operation, const HalModel& model, Conversi
}
IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the PadLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -1751,10 +1774,9 @@ bool ConvertPrelu(const HalOperation& operation, const HalModel& model, Conversi
}
IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
-
if (!layer)
{
- return Fail("%s: AddPreluLayer failed", __func__);
+ return Fail("%s: Could not add the PreluLayer", __func__);
}
bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
@@ -1815,7 +1837,10 @@ bool ConvertQuantize(const HalOperation& operation, const HalModel& model, Conve
}
IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the QuantizeLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2139,7 +2164,10 @@ bool ConvertReduce(const HalOperation& operation,
}
armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ReduceLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2288,7 +2316,10 @@ bool ConvertResize(const HalOperation& operation,
}
IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ResizeLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2363,7 +2394,10 @@ bool ConvertSpaceToDepth(const HalOperation& operation, const HalModel& model, C
}
IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the SpaceToDephLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2453,7 +2487,10 @@ bool ConvertSoftmax(const HalOperation& operation, const HalModel& model, Conver
}
IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the SoftmaxLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
diff --git a/ConversionUtils_1_3.hpp b/ConversionUtils_1_3.hpp
index 150735e9..059b79f0 100644
--- a/ConversionUtils_1_3.hpp
+++ b/ConversionUtils_1_3.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -144,7 +144,10 @@ bool ConvertFill(const HalOperation& operation, const HalModel& model, Conversio
}
IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the FillLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
@@ -212,7 +215,10 @@ bool ConvertLogicalBinary(const HalOperation& operation,
}
IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the LogicalBinaryLayer", __func__);
+ }
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
if (!isReshapeSupported)
@@ -776,7 +782,10 @@ bool ConvertRank(const HalOperation& operation, const HalModel& model, Conversio
}
armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
- assert(layer != nullptr);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the RankLayer", __func__);
+ }
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, &outInfo);
diff --git a/DriverOptions.cpp b/DriverOptions.cpp
index 5b67aa36..8fd5c477 100644
--- a/DriverOptions.cpp
+++ b/DriverOptions.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,6 @@
#include <cxxopts/cxxopts.hpp>
#include <algorithm>
-#include <cassert>
#include <functional>
#include <string>
#include <sstream>
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 4665ef16..2dc302ed 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,7 +35,6 @@ ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(const std::vector<
{
m_ConversionResult = ConversionResult::UnsupportedFeature;
ALOGE("%s: Unexpected exception: %s", __func__, e.what());
- assert(false);
}
}
@@ -194,7 +193,12 @@ void ModelToINetworkConverter<HalPolicy>::Convert()
const std::string layerName = "Output_" + std::to_string(i);
armnn::IConnectableLayer* layer = m_Data.m_Network->AddOutputLayer(i, layerName.c_str());
- assert(m_Data.m_OutputSlotForOperand[outputIndex]);
+ if (!m_Data.m_OutputSlotForOperand[outputIndex])
+ {
+ Fail("%s: OutputSlot %i does not exist", __func__, outputIndex);
+ m_ConversionResult = ConversionResult::UnsupportedFeature;
+ break;
+ }
m_Data.m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
}
}
@@ -210,7 +214,10 @@ template<typename HalPolicy>
bool ModelToINetworkConverter<HalPolicy>::IsOperationSupported(uint32_t operationIndex) const
{
std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
- assert(it != m_OperationSupported.end());
+ if (it == m_OperationSupported.end())
+ {
+ return Fail("%s: Unrecognised Operation Index: %i", __func__, operationIndex);
+ }
return it->second;
}
diff --git a/RequestThread.cpp b/RequestThread.cpp
index 927af922..783e351d 100644
--- a/RequestThread.cpp
+++ b/RequestThread.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,8 +17,6 @@
#include "ArmnnPreparedModel_1_3.hpp"
#endif
-#include <armnn/utility/Assert.hpp>
-
#include <log/log.h>
using namespace android;
@@ -134,8 +132,7 @@ void RequestThread<PreparedModel, HalVersion, CallbackContext>::Process()
default:
// this should be unreachable
- ALOGE("RequestThread::Process() - invalid message type");
- ARMNN_ASSERT_MSG(false, "ArmNN: RequestThread: invalid message type");
+ throw armnn::RuntimeException("ArmNN: RequestThread: invalid message type");
}
}
}
diff --git a/RequestThread_1_3.cpp b/RequestThread_1_3.cpp
index 59fa70ed..6133e290 100644
--- a/RequestThread_1_3.cpp
+++ b/RequestThread_1_3.cpp
@@ -1,15 +1,12 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#define LOG_TAG "ArmnnDriver"
-#include "RequestThread_1_3.hpp"
-
#include "ArmnnPreparedModel_1_3.hpp"
-
-#include <armnn/utility/Assert.hpp>
+#include "RequestThread_1_3.hpp"
#include <log/log.h>
@@ -178,8 +175,7 @@ void RequestThread_1_3<PreparedModel, HalVersion, CallbackContext>::Process()
default:
// this should be unreachable
- ALOGE("RequestThread_1_3::Process() - invalid message type");
- ARMNN_ASSERT_MSG(false, "ArmNN: RequestThread_1_3: invalid message type");
+ throw armnn::RuntimeException("ArmNN: RequestThread_1_3: invalid message type");
}
}
}
diff --git a/Utils.cpp b/Utils.cpp
index dcee44a1..9b52f5eb 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -13,10 +13,8 @@
#include <armnnUtils/Permute.hpp>
#include <armnn/Utils.hpp>
-#include <armnn/utility/Assert.hpp>
#include <log/log.h>
-#include <cassert>
#include <cerrno>
#include <cinttypes>
#include <sstream>
@@ -34,14 +32,17 @@ const armnn::PermutationVector g_DontPermute{};
void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensorInfo, const void* input, void* output,
const armnn::PermutationVector& mappings)
{
- assert(tensorInfo.GetNumDimensions() == 4U);
-
+ if (tensorInfo.GetNumDimensions() != 4U)
+ {
+ throw armnn::InvalidArgumentException("NumDimensions must be 4");
+ }
armnn::DataType dataType = tensorInfo.GetDataType();
switch (dataType)
{
case armnn::DataType::Float16:
case armnn::DataType::Float32:
case armnn::DataType::QAsymmU8:
+ case armnn::DataType::QSymmS16:
case armnn::DataType::QSymmS8:
case armnn::DataType::QAsymmS8:
// First swizzle tensor info
@@ -50,15 +51,17 @@ void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensorInfo, const void*
armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
break;
default:
- ALOGW("Unknown armnn::DataType for swizzling");
- assert(0);
+ throw armnn::InvalidArgumentException("Unknown DataType for swizzling");
}
}
void* GetMemoryFromPool(V1_0::DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
{
// find the location within the pool
- assert(location.poolIndex < memPools.size());
+ if (location.poolIndex >= memPools.size())
+ {
+ throw armnn::InvalidArgumentException("The poolIndex is greater than the memPools size.");
+ }
const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
@@ -185,9 +188,10 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
if (perChannel)
{
- // ExtraParams is expected to be of type channelQuant
- ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
- V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
+ if (operand.extraParams.getDiscriminator() != V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
+ {
+ throw armnn::InvalidArgumentException("ExtraParams is expected to be of type channelQuant");
+ }
auto perAxisQuantParams = operand.extraParams.channelQuant();
@@ -286,9 +290,10 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
if (perChannel)
{
// ExtraParams is expected to be of type channelQuant
- ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
- V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
-
+ if (operand.extraParams.getDiscriminator() != V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
+ {
+ throw armnn::InvalidArgumentException("ExtraParams is expected to be of type channelQuant");
+ }
auto perAxisQuantParams = operand.extraParams.channelQuant();
ret.SetQuantizationScales(perAxisQuantParams.scales);
@@ -485,7 +490,11 @@ void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
return;
}
- ARMNN_ASSERT(profiler);
+ if (!profiler)
+ {
+ ALOGW("profiler was null");
+ return;
+ }
// Set the name of the output profiling file.
fs::path dumpPath = dumpDir;
diff --git a/test/1.0/Convolution2D.cpp b/test/1.0/Convolution2D.cpp
index c833d89b..2af09157 100644
--- a/test/1.0/Convolution2D.cpp
+++ b/test/1.0/Convolution2D.cpp
@@ -1,18 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "../DriverTestHelpers.hpp"
#include "../Convolution2D.hpp"
-#include "../../1.0/HalPolicy.hpp"
-#include <doctest/doctest.h>
#include <log/log.h>
#include <OperationsUtils.h>
-
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -28,16 +24,15 @@ void SetModelFp16Flag(V1_0::Model&, bool)
} // namespace driverTestHelpers
-TEST_SUITE("Convolution2DTests_1.0")
+DOCTEST_TEST_SUITE("Convolution2DTests_1.0")
{
-
-TEST_CASE("ConvValidPadding_Hal_1_0")
+DOCTEST_TEST_CASE("ConvValidPadding_Hal_1_0")
{
PaddingTestImpl<hal_1_0::HalPolicy>(android::nn::kPaddingValid);
}
-TEST_CASE("ConvSamePadding_Hal_1_0")
+DOCTEST_TEST_CASE("ConvSamePadding_Hal_1_0")
{
PaddingTestImpl<hal_1_0::HalPolicy>(android::nn::kPaddingSame);
}
diff --git a/test/1.0/FullyConnectedReshape.cpp b/test/1.0/FullyConnectedReshape.cpp
index 4585c95b..e481f2d2 100644
--- a/test/1.0/FullyConnectedReshape.cpp
+++ b/test/1.0/FullyConnectedReshape.cpp
@@ -1,36 +1,39 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../DriverTestHelpers.hpp"
-#include "../../1.0/FullyConnected.hpp"
-#include <doctest/doctest.h>
-
-TEST_SUITE("FullyConnectedReshapeTests")
+DOCTEST_TEST_SUITE("FullyConnectedReshapeTests")
{
-TEST_CASE("TestFlattenFullyConnectedInput")
+DOCTEST_TEST_CASE("TestFlattenFullyConnectedInput")
{
using armnn::TensorShape;
// Pass through 2d input
- CHECK(FlattenFullyConnectedInput(TensorShape({2,2048}), TensorShape({512, 2048})) == TensorShape({2, 2048}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({2,2048}),
+ TensorShape({512, 2048})) == TensorShape({2, 2048}));
// Trivial flattening of batched channels
- CHECK(FlattenFullyConnectedInput(TensorShape({97,1,1,2048}), TensorShape({512, 2048})) == TensorShape({97, 2048}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({97,1,1,2048}),
+ TensorShape({512, 2048})) == TensorShape({97, 2048}));
// Flatten single batch of rows
- CHECK(FlattenFullyConnectedInput(TensorShape({1,97,1,2048}), TensorShape({512, 2048})) == TensorShape({97, 2048}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({1,97,1,2048}),
+ TensorShape({512, 2048})) == TensorShape({97, 2048}));
// Flatten single batch of columns
- CHECK(FlattenFullyConnectedInput(TensorShape({1,1,97,2048}), TensorShape({512, 2048})) == TensorShape({97, 2048}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({1,1,97,2048}),
+ TensorShape({512, 2048})) == TensorShape({97, 2048}));
// Move batches into input dimension
- CHECK(FlattenFullyConnectedInput(TensorShape({50,1,1,10}), TensorShape({512, 20})) == TensorShape({25, 20}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({50,1,1,10}),
+ TensorShape({512, 20})) == TensorShape({25, 20}));
// Flatten single batch of 3D data (e.g. convolution output)
- CHECK(FlattenFullyConnectedInput(TensorShape({1,16,16,10}), TensorShape({512, 2560})) == TensorShape({1, 2560}));
+ DOCTEST_CHECK(FlattenFullyConnectedInput(TensorShape({1,16,16,10}),
+ TensorShape({512, 2560})) == TensorShape({1, 2560}));
}
}
diff --git a/test/1.0/Lstm.cpp b/test/1.0/Lstm.cpp
index 0833fd65..6b3e7042 100644
--- a/test/1.0/Lstm.cpp
+++ b/test/1.0/Lstm.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,44 +7,54 @@
using namespace armnn_driver;
-TEST_SUITE("LstmTests_1.0_CpuRef")
+DOCTEST_TEST_SUITE("LstmTests_1.0_CpuRef")
{
- TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.0_armnn::Compute::CpuRef")
+
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.0_armnn::Compute::CpuRef")
{
LstmNoCifgNoPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.0_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.0_CpuRef")
{
LstmCifgPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.0_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.0_CpuRef")
{
LstmNoCifgPeepholeProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.0_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.0_CpuRef")
{
LstmCifgPeepholeNoProjectionBatch2<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
}
+
}
#if defined(ARMCOMPUTECL_ENABLED)
-TEST_SUITE("LstmTests_1.0_GpuAcc")
+DOCTEST_TEST_SUITE("LstmTests_1.0_GpuAcc")
{
- TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.0_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.0_GpuAcc")
{
LstmNoCifgNoPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.0_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.0_GpuAcc")
{
LstmCifgPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.0_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.0_GpuAcc")
{
LstmNoCifgPeepholeProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.0_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.0_GpuAcc")
{
LstmCifgPeepholeNoProjectionBatch2<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
}
+
}
-#endif \ No newline at end of file
+#endif
diff --git a/test/1.1/Convolution2D.cpp b/test/1.1/Convolution2D.cpp
index 0daa4728..4601f760 100644
--- a/test/1.1/Convolution2D.cpp
+++ b/test/1.1/Convolution2D.cpp
@@ -1,19 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "../DriverTestHelpers.hpp"
#include "../Convolution2D.hpp"
-#include "../../1.1/HalPolicy.hpp"
-#include <doctest/doctest.h>
#include <log/log.h>
#include <OperationsUtils.h>
-
-
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
@@ -30,25 +25,25 @@ void SetModelFp16Flag(V1_1::Model& model, bool fp16Enabled)
} // namespace driverTestHelpers
-TEST_SUITE("Convolution2DTests_1.1")
+DOCTEST_TEST_SUITE("Convolution2DTests_1.1")
{
-TEST_CASE("ConvValidPadding_Hal_1_1")
+DOCTEST_TEST_CASE("ConvValidPadding_Hal_1_1")
{
PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingValid);
}
-TEST_CASE("ConvSamePadding_Hal_1_1")
+DOCTEST_TEST_CASE("ConvSamePadding_Hal_1_1")
{
PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingSame);
}
-TEST_CASE("ConvValidPaddingFp16Flag_Hal_1_1")
+DOCTEST_TEST_CASE("ConvValidPaddingFp16Flag_Hal_1_1")
{
PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingValid, true);
}
-TEST_CASE("ConvSamePaddingFp16Flag_Hal_1_1")
+DOCTEST_TEST_CASE("ConvSamePaddingFp16Flag_Hal_1_1")
{
PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingSame, true);
}
diff --git a/test/1.1/Lstm.cpp b/test/1.1/Lstm.cpp
index 2699ec4c..cbdf6b14 100644
--- a/test/1.1/Lstm.cpp
+++ b/test/1.1/Lstm.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,44 +7,54 @@
using namespace armnn_driver;
-TEST_SUITE("LstmTests_1.1_CpuRef")
+DOCTEST_TEST_SUITE("LstmTests_1.1_CpuRef")
{
- TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.1_armnn::Compute::CpuRef")
+
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.1_armnn::Compute::CpuRef")
{
LstmNoCifgNoPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.1_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.1_CpuRef")
{
LstmCifgPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.1_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.1_CpuRef")
{
LstmNoCifgPeepholeProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.1_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.1_CpuRef")
{
LstmCifgPeepholeNoProjectionBatch2<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
}
+
}
#if defined(ARMCOMPUTECL_ENABLED)
-TEST_SUITE("LstmTests_1.1_GpuAcc")
+DOCTEST_TEST_SUITE("LstmTests_1.1_GpuAcc")
{
- TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.1_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.1_GpuAcc")
{
LstmNoCifgNoPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.1_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.1_GpuAcc")
{
LstmCifgPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.1_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.1_GpuAcc")
{
LstmNoCifgPeepholeProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.1_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.1_GpuAcc")
{
LstmCifgPeepholeNoProjectionBatch2<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
}
+
}
#endif
diff --git a/test/1.1/Mean.cpp b/test/1.1/Mean.cpp
index c7c5a9b5..34c29bad 100644
--- a/test/1.1/Mean.cpp
+++ b/test/1.1/Mean.cpp
@@ -1,14 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../DriverTestHelpers.hpp"
#include "../TestTensor.hpp"
-#include "../1.1/HalPolicy.hpp"
-
-#include <doctest/doctest.h>
+#include <1.1/HalPolicy.hpp>
#include <array>
@@ -86,21 +84,22 @@ void MeanTestImpl(const TestTensor& input,
if (preparedModel.get() != nullptr)
{
V1_0::ErrorStatus execStatus = Execute(preparedModel, request);
- CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
}
const float* expectedOutputData = expectedOutput.GetData();
for (unsigned int i = 0; i < expectedOutput.GetNumElements(); i++)
{
- CHECK(outputData[i] == expectedOutputData[i]);
+ DOCTEST_CHECK(outputData[i] == expectedOutputData[i]);
}
}
} // anonymous namespace
-TEST_SUITE("MeanTests_CpuRef")
+DOCTEST_TEST_SUITE("MeanTests_CpuRef")
{
- TEST_CASE("MeanNoKeepDimsTest_CpuRef")
+
+ DOCTEST_TEST_CASE("MeanNoKeepDimsTest_CpuRef")
{
TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
{ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
@@ -114,7 +113,7 @@ TEST_SUITE("MeanTests_CpuRef")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuRef);
}
- TEST_CASE("MeanKeepDimsTest_CpuRef")
+ DOCTEST_TEST_CASE("MeanKeepDimsTest_CpuRef")
{
TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
hidl_vec<uint32_t> axisDimensions = { 1 };
@@ -125,7 +124,7 @@ TEST_SUITE("MeanTests_CpuRef")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuRef);
}
- TEST_CASE("MeanFp16NoKeepDimsTest_CpuRef")
+ DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_CpuRef")
{
TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
{ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
@@ -139,7 +138,7 @@ TEST_SUITE("MeanTests_CpuRef")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef);
}
- TEST_CASE("MeanFp16KeepDimsTest_CpuRef")
+ DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_CpuRef")
{
TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
hidl_vec<uint32_t> axisDimensions = { 1 };
@@ -149,12 +148,13 @@ TEST_SUITE("MeanTests_CpuRef")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef);
}
+
}
#ifdef ARMCOMPUTECL_ENABLED
-TEST_SUITE("MeanTests_CpuAcc")
+DOCTEST_TEST_SUITE("MeanTests_CpuAcc")
{
- TEST_CASE("MeanNoKeepDimsTest_CpuAcc")
+ DOCTEST_TEST_CASE("MeanNoKeepDimsTest_CpuAcc")
{
TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
{ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
@@ -168,7 +168,7 @@ TEST_SUITE("MeanTests_CpuAcc")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuAcc);
}
- TEST_CASE("MeanKeepDimsTest_CpuAcc")
+ DOCTEST_TEST_CASE("MeanKeepDimsTest_CpuAcc")
{
TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
hidl_vec<uint32_t> axisDimensions = { 1 };
@@ -179,7 +179,7 @@ TEST_SUITE("MeanTests_CpuAcc")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuAcc);
}
- TEST_CASE("MeanFp16NoKeepDimsTest_CpuAcc")
+ DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_CpuAcc")
{
TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
{ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
@@ -193,7 +193,7 @@ TEST_SUITE("MeanTests_CpuAcc")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc);
}
- TEST_CASE("MeanFp16KeepDimsTest_CpuAcc")
+ DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_CpuAcc")
{
TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
hidl_vec<uint32_t> axisDimensions = { 1 };
@@ -205,9 +205,9 @@ TEST_SUITE("MeanTests_CpuAcc")
}
}
-TEST_SUITE("MeanTests_GpuAcc")
+DOCTEST_TEST_SUITE("MeanTests_GpuAcc")
{
- TEST_CASE("MeanNoKeepDimsTest_GpuAcc")
+ DOCTEST_TEST_CASE("MeanNoKeepDimsTest_GpuAcc")
{
TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
{ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
@@ -221,7 +221,7 @@ TEST_SUITE("MeanTests_GpuAcc")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::GpuAcc);
}
- TEST_CASE("MeanKeepDimsTest_GpuAcc")
+ DOCTEST_TEST_CASE("MeanKeepDimsTest_GpuAcc")
{
TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
hidl_vec<uint32_t> axisDimensions = { 1 };
@@ -232,7 +232,7 @@ TEST_SUITE("MeanTests_GpuAcc")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::GpuAcc);
}
- TEST_CASE("MeanFp16NoKeepDimsTest_GpuAcc")
+ DOCTEST_TEST_CASE("MeanFp16NoKeepDimsTest_GpuAcc")
{
TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
{ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
@@ -246,7 +246,7 @@ TEST_SUITE("MeanTests_GpuAcc")
MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc);
}
- TEST_CASE("MeanFp16KeepDimsTest_GpuAcc")
+ DOCTEST_TEST_CASE("MeanFp16KeepDimsTest_GpuAcc")
{
TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
hidl_vec<uint32_t> axisDimensions = { 1 };
diff --git a/test/1.1/Transpose.cpp b/test/1.1/Transpose.cpp
index 4c4dc349..5499e0d6 100644
--- a/test/1.1/Transpose.cpp
+++ b/test/1.1/Transpose.cpp
@@ -1,17 +1,14 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "OperationsUtils.h"
#include "../DriverTestHelpers.hpp"
#include "../TestTensor.hpp"
-
-#include "../1.1/HalPolicy.hpp"
-
-#include <doctest/doctest.h>
+#include <1.1/HalPolicy.hpp>
#include <log/log.h>
+#include <OperationsUtils.h>
#include <array>
#include <cmath>
@@ -89,15 +86,15 @@ void TransposeTestImpl(const TestTensor & inputs, int32_t perm[],
const float * expectedOutput = expectedOutputTensor.GetData();
for (unsigned int i = 0; i < expectedOutputTensor.GetNumElements(); ++i)
{
- CHECK(outdata[i] == expectedOutput[i]);
+ DOCTEST_CHECK(outdata[i] == expectedOutput[i]);
}
}
} // namespace
-TEST_SUITE("TransposeTests_CpuRef")
+DOCTEST_TEST_SUITE("TransposeTests_CpuRef")
{
- TEST_CASE("Transpose_CpuRef")
+ DOCTEST_TEST_CASE("Transpose_CpuRef")
{
int32_t perm[] = {2, 3, 1, 0};
TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
@@ -106,7 +103,7 @@ TEST_SUITE("TransposeTests_CpuRef")
TransposeTestImpl(input, perm, expected, armnn::Compute::CpuRef);
}
- TEST_CASE("TransposeNHWCToArmNN_CpuRef")
+ DOCTEST_TEST_CASE("TransposeNHWCToArmNN_CpuRef")
{
int32_t perm[] = {0, 3, 1, 2};
TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
@@ -114,7 +111,7 @@ TEST_SUITE("TransposeTests_CpuRef")
TransposeTestImpl(input, perm, expected, armnn::Compute::CpuRef);
}
- TEST_CASE("TransposeArmNNToNHWC_CpuRef")
+ DOCTEST_TEST_CASE("TransposeArmNNToNHWC_CpuRef")
{
int32_t perm[] = {0, 2, 3, 1};
TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
@@ -125,9 +122,9 @@ TEST_SUITE("TransposeTests_CpuRef")
}
#ifdef ARMCOMPUTECL_ENABLED
-TEST_SUITE("TransposeTests_CpuAcc")
+DOCTEST_TEST_SUITE("TransposeTests_CpuAcc")
{
- TEST_CASE("Transpose_CpuAcc")
+ DOCTEST_TEST_CASE("Transpose_CpuAcc")
{
int32_t perm[] = {2, 3, 1, 0};
TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
@@ -136,7 +133,7 @@ TEST_SUITE("TransposeTests_CpuAcc")
TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
}
- TEST_CASE("TransposeNHWCToArmNN_CpuAcc")
+ DOCTEST_TEST_CASE("TransposeNHWCToArmNN_CpuAcc")
{
int32_t perm[] = {0, 3, 1, 2};
TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
@@ -144,7 +141,8 @@ TEST_SUITE("TransposeTests_CpuAcc")
TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
}
- TEST_CASE("TransposeArmNNToNHWC_CpuAcc")
+
+ DOCTEST_TEST_CASE("TransposeArmNNToNHWC_CpuAcc")
{
int32_t perm[] = {0, 2, 3, 1};
TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
@@ -153,9 +151,10 @@ TEST_SUITE("TransposeTests_CpuAcc")
TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
}
}
-TEST_SUITE("TransposeTests_GpuAcc")
+
+DOCTEST_TEST_SUITE("TransposeTests_GpuAcc")
{
- TEST_CASE("Transpose_GpuAcc")
+ DOCTEST_TEST_CASE("Transpose_GpuAcc")
{
int32_t perm[] = {2, 3, 1, 0};
TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
@@ -164,7 +163,7 @@ TEST_SUITE("TransposeTests_GpuAcc")
TransposeTestImpl(input, perm, expected, armnn::Compute::GpuAcc);
}
- TEST_CASE("TransposeNHWCToArmNN_GpuAcc")
+ DOCTEST_TEST_CASE("TransposeNHWCToArmNN_GpuAcc")
{
int32_t perm[] = {0, 3, 1, 2};
TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
@@ -172,7 +171,8 @@ TEST_SUITE("TransposeTests_GpuAcc")
TransposeTestImpl(input, perm, expected, armnn::Compute::GpuAcc);
}
- TEST_CASE("TransposeArmNNToNHWC_GpuAcc")
+
+ DOCTEST_TEST_CASE("TransposeArmNNToNHWC_GpuAcc")
{
int32_t perm[] = {0, 2, 3, 1};
TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
diff --git a/test/1.2/Capabilities.cpp b/test/1.2/Capabilities.cpp
index aa0c6423..41d5ee53 100644
--- a/test/1.2/Capabilities.cpp
+++ b/test/1.2/Capabilities.cpp
@@ -1,39 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "../../1.2/ArmnnDriverImpl.hpp"
-
+#include "../DriverTestHelpers.hpp"
#include "Utils.h"
-#include <armnn/utility/Assert.hpp>
-
-// Un-define some of the macros as they clash in 'third-party/doctest/doctest.h'
-// and 'system/core/base/include/android-base/logging.h'
-// macro redefined error[-Werror,-Wmacro-redefined]
-#ifdef CHECK
-#undef CHECK
-#endif
-#ifdef CHECK_EQ
-#undef CHECK_EQ
-#endif
-#ifdef CHECK_NE
-#undef CHECK_NE
-#endif
-#ifdef CHECK_GT
-#undef CHECK_GT
-#endif
-#ifdef CHECK_LT
-#undef CHECK_LT
-#endif
-#ifdef CHECK_GE
-#undef CHECK_GE
-#endif
-#ifdef CHECK_LE
-#undef CHECK_LE
-#endif
-#include <doctest/doctest.h>
+#include <1.2/ArmnnDriverImpl.hpp>
#include <sys/system_properties.h>
@@ -86,15 +59,14 @@ void CheckOperandType(const V1_2::Capabilities& capabilities, V1_2::OperandType
{
using namespace armnn_driver::hal_1_2;
V1_0::PerformanceInfo perfInfo = android::nn::lookup(capabilities.operandPerformance, type);
- ARMNN_ASSERT(perfInfo.execTime == execTime);
- ARMNN_ASSERT(perfInfo.powerUsage == powerUsage);
+ DOCTEST_CHECK(perfInfo.execTime == execTime);
+ DOCTEST_CHECK(perfInfo.powerUsage == powerUsage);
}
-TEST_SUITE("CapabilitiesTests")
+DOCTEST_TEST_SUITE("CapabilitiesTests")
{
-TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesWithRuntime")
+DOCTEST_TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesWithRuntime")
{
- using namespace armnn_driver::hal_1_2;
using namespace android::nn;
auto getCapabilitiesFn = [&](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities)
@@ -118,7 +90,8 @@ TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesWithRuntime")
CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX);
CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
- ARMNN_ASSERT(error == V1_0::ErrorStatus::NONE);
+ bool result = (error == V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(result);
};
__system_property_set("Armnn.operandTypeTensorFloat32Performance.execTime", "2.0f");
@@ -145,12 +118,11 @@ TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesWithRuntime")
armnn::IRuntime::CreationOptions options;
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
+ armnn_driver::hal_1_2::ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
}
-TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesUndefined")
+DOCTEST_TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesUndefined")
{
- using namespace armnn_driver::hal_1_2;
using namespace android::nn;
float defaultValue = .1f;
@@ -179,13 +151,14 @@ TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesUndefined")
CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX);
CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
- ARMNN_ASSERT(error == V1_0::ErrorStatus::NONE);
+ bool result = (error == V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(result);
};
armnn::IRuntime::CreationOptions options;
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
+ armnn_driver::hal_1_2::ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
}
} \ No newline at end of file
diff --git a/test/1.2/Dilation.cpp b/test/1.2/Dilation.cpp
index e1cde9fd..c9182a7c 100644
--- a/test/1.2/Dilation.cpp
+++ b/test/1.2/Dilation.cpp
@@ -1,17 +1,16 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../Dilation.hpp"
-#include "../../1.2/HalPolicy.hpp"
+#include <1.2/HalPolicy.hpp>
-#include <doctest/doctest.h>
-
-TEST_SUITE("DilationTests")
+DOCTEST_TEST_SUITE("DilationTests")
{
-TEST_CASE("ConvolutionExplicitPaddingNoDilation")
+
+DOCTEST_TEST_CASE("ConvolutionExplicitPaddingNoDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = false;
@@ -21,7 +20,7 @@ TEST_CASE("ConvolutionExplicitPaddingNoDilation")
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-TEST_CASE("ConvolutionExplicitPaddingDilation")
+DOCTEST_TEST_CASE("ConvolutionExplicitPaddingDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = false;
@@ -31,7 +30,7 @@ TEST_CASE("ConvolutionExplicitPaddingDilation")
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-TEST_CASE("ConvolutionImplicitPaddingNoDilation")
+DOCTEST_TEST_CASE("ConvolutionImplicitPaddingNoDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = false;
@@ -41,7 +40,7 @@ TEST_CASE("ConvolutionImplicitPaddingNoDilation")
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-TEST_CASE("ConvolutionImplicitPaddingDilation")
+DOCTEST_TEST_CASE("ConvolutionImplicitPaddingDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = false;
@@ -51,7 +50,7 @@ TEST_CASE("ConvolutionImplicitPaddingDilation")
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-TEST_CASE("DepthwiseConvolutionExplicitPaddingNoDilation")
+DOCTEST_TEST_CASE("DepthwiseConvolutionExplicitPaddingNoDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = true;
@@ -61,7 +60,7 @@ TEST_CASE("DepthwiseConvolutionExplicitPaddingNoDilation")
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-TEST_CASE("DepthwiseConvolutionExplicitPaddingDilation")
+DOCTEST_TEST_CASE("DepthwiseConvolutionExplicitPaddingDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = true;
@@ -71,7 +70,7 @@ TEST_CASE("DepthwiseConvolutionExplicitPaddingDilation")
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-TEST_CASE("DepthwiseConvolutionImplicitPaddingNoDilation")
+DOCTEST_TEST_CASE("DepthwiseConvolutionImplicitPaddingNoDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = true;
@@ -81,7 +80,7 @@ TEST_CASE("DepthwiseConvolutionImplicitPaddingNoDilation")
DilationTestImpl<hal_1_2::HalPolicy>(options);
}
-TEST_CASE("DepthwiseConvolutionImplicitPaddingDilation")
+DOCTEST_TEST_CASE("DepthwiseConvolutionImplicitPaddingDilation")
{
DilationTestOptions options;
options.m_IsDepthwiseConvolution = true;
diff --git a/test/1.2/Lstm.cpp b/test/1.2/Lstm.cpp
index 70fbf70f..59d5d381 100644
--- a/test/1.2/Lstm.cpp
+++ b/test/1.2/Lstm.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,52 +7,64 @@
using namespace armnn_driver;
-TEST_SUITE("LstmTests_1.2_CpuRef")
+DOCTEST_TEST_SUITE("LstmTests_1.2_CpuRef")
{
- TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.2_armnn::Compute::CpuRef")
+
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.2_armnn::Compute::CpuRef")
{
LstmNoCifgNoPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.2_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.2_CpuRef")
{
LstmCifgPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.2_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.2_CpuRef")
{
LstmNoCifgPeepholeProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.2_CpuRef")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.2_CpuRef")
{
LstmCifgPeepholeNoProjectionBatch2<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
}
- TEST_CASE("QuantizedLstmTest_1.2_CpuRef")
+
+ DOCTEST_TEST_CASE("QuantizedLstmTest_1.2_CpuRef")
{
QuantizedLstm<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
}
+
}
#if defined(ARMCOMPUTECL_ENABLED)
-TEST_SUITE("LstmTests_1.2_GpuAcc")
+DOCTEST_TEST_SUITE("LstmTests_1.2_GpuAcc")
{
- TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.2_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.2_GpuAcc")
{
LstmNoCifgNoPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.2_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.2_GpuAcc")
{
LstmCifgPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.2_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.2_GpuAcc")
{
LstmNoCifgPeepholeProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.2_GpuAcc")
+
+ DOCTEST_TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.2_GpuAcc")
{
LstmCifgPeepholeNoProjectionBatch2<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
}
- TEST_CASE("QuantizedLstmTest_1.2_GpuAcc")
+
+ DOCTEST_TEST_CASE("QuantizedLstmTest_1.2_GpuAcc")
{
QuantizedLstm<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
}
+
}
#endif
diff --git a/test/1.3/QLstm.cpp b/test/1.3/QLstm.cpp
index 82acba6e..08466195 100644
--- a/test/1.3/QLstm.cpp
+++ b/test/1.3/QLstm.cpp
@@ -1,16 +1,11 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "../DriverTestHelpers.hpp"
-#include "../TestTensor.hpp"
-#include "../1.3/HalPolicy.hpp"
-
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <doctest/doctest.h>
+#include <1.3/HalPolicy.hpp>
#include <array>
@@ -499,8 +494,9 @@ void QLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
// check the results
for (size_t i = 0; i < outputStateOutValue.size(); ++i)
{
- CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ).epsilon(TOLERANCE),
- "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
+ DOCTEST_CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ).epsilon(TOLERANCE),
+ "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != "
+ << outputStateOutData[i]);
}
// CELL STATE OUTPUT Does not match currently: IVGCVSW-4860 Verify remaining VTS tests (2) for QLSTM
@@ -513,8 +509,8 @@ void QLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
for (size_t i = 0; i < outputValue.size(); ++i)
{
- CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ).epsilon(TOLERANCE),
- "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ).epsilon(TOLERANCE),
+ "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
}
}
@@ -1005,29 +1001,33 @@ void DynamicOutputQLstmWithNoProjection(armnn::Compute compute)
// QLstmWithProjection(sample);
//}
-TEST_SUITE("QLSTMTests_CpuRef")
+DOCTEST_TEST_SUITE("QLSTMTests_CpuRef")
{
- TEST_CASE("QLSTMWithNoProjectionTest_CpuRef")
+
+ DOCTEST_TEST_CASE("QLSTMWithNoProjectionTest_CpuRef")
{
QLstmWithNoProjection(armnn::Compute::CpuRef);
}
- TEST_CASE("DynamicOutputQLstmWithNoProjection_CpuRef")
+ DOCTEST_TEST_CASE("DynamicOutputQLstmWithNoProjection_CpuRef")
{
DynamicOutputQLstmWithNoProjection(armnn::Compute::CpuRef);
}
+
}
#ifdef ARMCOMPUTECL_ENABLED
-TEST_SUITE("QLSTMTests_CpuAcc")
+DOCTEST_TEST_SUITE("QLSTMTests_CpuAcc")
{
- TEST_CASE("QLSTMWithNoProjectionTest_CpuAcc")
+
+ DOCTEST_TEST_CASE("QLSTMWithNoProjectionTest_CpuAcc")
{
QLstmWithNoProjection(armnn::Compute::CpuAcc);
}
- TEST_CASE("DynamicOutputQLstmWithNoProjection_CpuAcc")
+ DOCTEST_TEST_CASE("DynamicOutputQLstmWithNoProjection_CpuAcc")
{
DynamicOutputQLstmWithNoProjection(armnn::Compute::CpuAcc);
}
+
}
#endif
diff --git a/test/1.3/QosTests.cpp b/test/1.3/QosTests.cpp
index 3b064052..cd8ac33c 100644
--- a/test/1.3/QosTests.cpp
+++ b/test/1.3/QosTests.cpp
@@ -4,15 +4,10 @@
//
#include "../DriverTestHelpers.hpp"
-#include "../TestTensor.hpp"
-#include "../1.3/HalPolicy.hpp"
+#include <1.3/HalPolicy.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("QosTests")
+DOCTEST_TEST_SUITE("QosTests")
{
using ArmnnDriver = armnn_driver::ArmnnDriver;
using DriverOptions = armnn_driver::DriverOptions;
@@ -38,7 +33,7 @@ void ExecuteModel(const armnn_driver::hal_1_3::HalPolicy::Model& model,
}
}
-TEST_CASE("ConcurrentExecuteWithQosPriority")
+DOCTEST_TEST_CASE("ConcurrentExecuteWithQosPriority")
{
ALOGI("ConcurrentExecuteWithQOSPriority: entry");
@@ -94,7 +89,7 @@ TEST_CASE("ConcurrentExecuteWithQosPriority")
preparedModelsSize++;
}
- CHECK(maxRequests == preparedModelsSize);
+ DOCTEST_CHECK(maxRequests == preparedModelsSize);
// construct the request data
V1_0::DataLocation inloc = {};
@@ -154,7 +149,7 @@ TEST_CASE("ConcurrentExecuteWithQosPriority")
ALOGI("ConcurrentExecuteWithQOSPriority: waiting for callbacks");
for (size_t i = 0; i < maxRequests; ++i)
{
- ARMNN_ASSERT(cb[i]);
+ DOCTEST_CHECK(cb[i]);
cb[i]->wait();
}
@@ -164,15 +159,15 @@ TEST_CASE("ConcurrentExecuteWithQosPriority")
{
if (i < 15)
{
- CHECK(outdata[i][0] == 152);
+ DOCTEST_CHECK(outdata[i][0] == 152);
}
else if (i < 30)
{
- CHECK(outdata[i][0] == 141);
+ DOCTEST_CHECK(outdata[i][0] == 141);
}
else
{
- CHECK(outdata[i][0] == 159);
+ DOCTEST_CHECK(outdata[i][0] == 159);
}
}
diff --git a/test/Concat.cpp b/test/Concat.cpp
index d39375af..fc4a56cf 100644
--- a/test/Concat.cpp
+++ b/test/Concat.cpp
@@ -1,14 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "DriverTestHelpers.hpp"
#include "TestTensor.hpp"
-#include "../1.0/HalPolicy.hpp"
-
-#include <doctest/doctest.h>
-
#include <array>
#include <log/log.h>
@@ -56,14 +53,14 @@ ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
*driver,
prepareStatus,
expectedPrepareStatus);
- CHECK((int)prepareStatus == (int)expectedPrepareStatus);
+ DOCTEST_CHECK((int)prepareStatus == (int)expectedPrepareStatus);
if (prepareStatus != V1_0::ErrorStatus::NONE)
{
// prepare failed, we cannot continue
return;
}
- CHECK(preparedModel.get() != nullptr);
+ DOCTEST_CHECK(preparedModel.get() != nullptr);
if (preparedModel.get() == nullptr)
{
// don't spoil other tests if prepare failed
@@ -120,9 +117,9 @@ ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
// run the execution
- ARMNN_ASSERT(preparedModel.get() != nullptr);
+ DOCTEST_CHECK(preparedModel.get() != nullptr);
auto execStatus = Execute(preparedModel, request, expectedExecStatus);
- CHECK((int)execStatus == (int)expectedExecStatus);
+ DOCTEST_CHECK((int)execStatus == (int)expectedExecStatus);
if (execStatus == V1_0::ErrorStatus::NONE)
{
@@ -130,7 +127,7 @@ ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
const float * expectedOutput = expectedOutputTensor.GetData();
for (unsigned int i=0; i<expectedOutputTensor.GetNumElements();++i)
{
- CHECK(outdata[i] == expectedOutput[i]);
+ DOCTEST_CHECK(outdata[i] == expectedOutput[i]);
}
}
}
@@ -483,125 +480,125 @@ void SimpleConcatAxisZero1D(armnn::Compute computeDevice)
} // namespace <anonymous>
-TEST_SUITE("ConcatTests_CpuRef")
+DOCTEST_TEST_SUITE("ConcatTests_CpuRef")
{
-TEST_CASE("SimpleConcatAxis0")
+DOCTEST_TEST_CASE("SimpleConcatAxis0")
{
SimpleConcatAxis0(armnn::Compute::CpuRef);
}
-TEST_CASE("ConcatAxis0NoInterleave")
+DOCTEST_TEST_CASE("ConcatAxis0NoInterleave")
{
ConcatAxis0NoInterleave(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxis1")
+DOCTEST_TEST_CASE("SimpleConcatAxis1")
{
SimpleConcatAxis1(armnn::Compute::CpuRef);
}
-TEST_CASE("ConcatAxis1NoInterleave")
+DOCTEST_TEST_CASE("ConcatAxis1NoInterleave")
{
ConcatAxis1NoInterleave(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxis1DoInterleave")
+DOCTEST_TEST_CASE("SimpleConcatAxis1DoInterleave")
{
SimpleConcatAxis1DoInterleave(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxis2")
+DOCTEST_TEST_CASE("SimpleConcatAxis2")
{
SimpleConcatAxis2(armnn::Compute::CpuRef);
}
-TEST_CASE("ConcatAxis2NoInterleave")
+DOCTEST_TEST_CASE("ConcatAxis2NoInterleave")
{
ConcatAxis2NoInterleave(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxis2DoInterleave")
+DOCTEST_TEST_CASE("SimpleConcatAxis2DoInterleave")
{
SimpleConcatAxis2DoInterleave(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxis3")
+DOCTEST_TEST_CASE("SimpleConcatAxis3")
{
SimpleConcatAxis3(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxis3DoInterleave")
+DOCTEST_TEST_CASE("SimpleConcatAxis3DoInterleave")
{
SimpleConcatAxis3DoInterleave(armnn::Compute::CpuRef);
}
-TEST_CASE("AxisTooBig")
+DOCTEST_TEST_CASE("AxisTooBig")
{
AxisTooBig(armnn::Compute::CpuRef);
}
-TEST_CASE("AxisTooSmall")
+DOCTEST_TEST_CASE("AxisTooSmall")
{
AxisTooSmall(armnn::Compute::CpuRef);
}
-TEST_CASE("TooFewInputs")
+DOCTEST_TEST_CASE("TooFewInputs")
{
TooFewInputs(armnn::Compute::CpuRef);
}
-TEST_CASE("MismatchedInputDimensions")
+DOCTEST_TEST_CASE("MismatchedInputDimensions")
{
MismatchedInputDimensions(armnn::Compute::CpuRef);
}
-TEST_CASE("MismatchedInputRanks")
+DOCTEST_TEST_CASE("MismatchedInputRanks")
{
MismatchedInputRanks(armnn::Compute::CpuRef);
}
-TEST_CASE("MismatchedOutputDimensions")
+DOCTEST_TEST_CASE("MismatchedOutputDimensions")
{
MismatchedOutputDimensions(armnn::Compute::CpuRef);
}
-TEST_CASE("MismatchedOutputRank")
+DOCTEST_TEST_CASE("MismatchedOutputRank")
{
MismatchedOutputRank(armnn::Compute::CpuRef);
}
-TEST_CASE("ValidNegativeAxis")
+DOCTEST_TEST_CASE("ValidNegativeAxis")
{
ValidNegativeAxis(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxisZero3D")
+DOCTEST_TEST_CASE("SimpleConcatAxisZero3D")
{
SimpleConcatAxisZero3D(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxisOne3D")
+DOCTEST_TEST_CASE("SimpleConcatAxisOne3D")
{
SimpleConcatAxisOne3D(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxisTwo3D")
+DOCTEST_TEST_CASE("SimpleConcatAxisTwo3D")
{
SimpleConcatAxisTwo3D(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxisZero2D")
+DOCTEST_TEST_CASE("SimpleConcatAxisZero2D")
{
SimpleConcatAxisZero2D(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxisOne2D")
+DOCTEST_TEST_CASE("SimpleConcatAxisOne2D")
{
SimpleConcatAxisOne2D(armnn::Compute::CpuRef);
}
-TEST_CASE("SimpleConcatAxisZero1D")
+DOCTEST_TEST_CASE("SimpleConcatAxisZero1D")
{
SimpleConcatAxisZero1D(armnn::Compute::CpuRef);
}
@@ -609,125 +606,125 @@ TEST_CASE("SimpleConcatAxisZero1D")
}
#ifdef ARMCOMPUTECL_ENABLED
-TEST_SUITE("ConcatTests_GpuAcc")
+DOCTEST_TEST_SUITE("ConcatTests_GpuAcc")
{
-TEST_CASE("SimpleConcatAxis0")
+DOCTEST_TEST_CASE("SimpleConcatAxis0")
{
SimpleConcatAxis0(armnn::Compute::GpuAcc);
}
-TEST_CASE("ConcatAxis0NoInterleave")
+DOCTEST_TEST_CASE("ConcatAxis0NoInterleave")
{
ConcatAxis0NoInterleave(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxis1")
+DOCTEST_TEST_CASE("SimpleConcatAxis1")
{
SimpleConcatAxis1(armnn::Compute::GpuAcc);
}
-TEST_CASE("ConcatAxis1NoInterleave")
+DOCTEST_TEST_CASE("ConcatAxis1NoInterleave")
{
ConcatAxis1NoInterleave(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxis1DoInterleave")
+DOCTEST_TEST_CASE("SimpleConcatAxis1DoInterleave")
{
SimpleConcatAxis1DoInterleave(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxis2")
+DOCTEST_TEST_CASE("SimpleConcatAxis2")
{
SimpleConcatAxis2(armnn::Compute::GpuAcc);
}
-TEST_CASE("ConcatAxis2NoInterleave")
+DOCTEST_TEST_CASE("ConcatAxis2NoInterleave")
{
ConcatAxis2NoInterleave(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxis2DoInterleave")
+DOCTEST_TEST_CASE("SimpleConcatAxis2DoInterleave")
{
SimpleConcatAxis2DoInterleave(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxis3")
+DOCTEST_TEST_CASE("SimpleConcatAxis3")
{
SimpleConcatAxis3(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxis3DoInterleave")
+DOCTEST_TEST_CASE("SimpleConcatAxis3DoInterleave")
{
SimpleConcatAxis3DoInterleave(armnn::Compute::GpuAcc);
}
-TEST_CASE("AxisTooBig")
+DOCTEST_TEST_CASE("AxisTooBig")
{
AxisTooBig(armnn::Compute::GpuAcc);
}
-TEST_CASE("AxisTooSmall")
+DOCTEST_TEST_CASE("AxisTooSmall")
{
AxisTooSmall(armnn::Compute::GpuAcc);
}
-TEST_CASE("TooFewInputs")
+DOCTEST_TEST_CASE("TooFewInputs")
{
TooFewInputs(armnn::Compute::GpuAcc);
}
-TEST_CASE("MismatchedInputDimensions")
+DOCTEST_TEST_CASE("MismatchedInputDimensions")
{
MismatchedInputDimensions(armnn::Compute::GpuAcc);
}
-TEST_CASE("MismatchedInputRanks")
+DOCTEST_TEST_CASE("MismatchedInputRanks")
{
MismatchedInputRanks(armnn::Compute::GpuAcc);
}
-TEST_CASE("MismatchedOutputDimensions")
+DOCTEST_TEST_CASE("MismatchedOutputDimensions")
{
MismatchedOutputDimensions(armnn::Compute::GpuAcc);
}
-TEST_CASE("MismatchedOutputRank")
+DOCTEST_TEST_CASE("MismatchedOutputRank")
{
MismatchedOutputRank(armnn::Compute::GpuAcc);
}
-TEST_CASE("ValidNegativeAxis")
+DOCTEST_TEST_CASE("ValidNegativeAxis")
{
ValidNegativeAxis(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxisZero3D")
+DOCTEST_TEST_CASE("SimpleConcatAxisZero3D")
{
SimpleConcatAxisZero3D(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxisOne3D")
+DOCTEST_TEST_CASE("SimpleConcatAxisOne3D")
{
SimpleConcatAxisOne3D(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxisTwo3D")
+DOCTEST_TEST_CASE("SimpleConcatAxisTwo3D")
{
SimpleConcatAxisTwo3D(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxisZero2D")
+DOCTEST_TEST_CASE("SimpleConcatAxisZero2D")
{
SimpleConcatAxisZero2D(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxisOne2D")
+DOCTEST_TEST_CASE("SimpleConcatAxisOne2D")
{
SimpleConcatAxisOne2D(armnn::Compute::GpuAcc);
}
-TEST_CASE("SimpleConcatAxisZero1D")
+DOCTEST_TEST_CASE("SimpleConcatAxisZero1D")
{
SimpleConcatAxisZero1D(armnn::Compute::GpuAcc);
}
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
index 2ea6eb06..4113a8da 100644
--- a/test/Concurrent.cpp
+++ b/test/Concurrent.cpp
@@ -1,15 +1,13 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "DriverTestHelpers.hpp"
-#include "../1.0/HalPolicy.hpp"
+#include "DriverTestHelpers.hpp"
-#include <doctest/doctest.h>
#include <log/log.h>
-TEST_SUITE("ConcurrentDriverTests")
+DOCTEST_TEST_SUITE("ConcurrentDriverTests")
{
using ArmnnDriver = armnn_driver::ArmnnDriver;
using DriverOptions = armnn_driver::DriverOptions;
@@ -25,7 +23,7 @@ using namespace armnn_driver;
// The main point of this test is to check that multiple requests can be
// executed without waiting for the callback from previous execution.
// The operations performed are not significant.
-TEST_CASE("ConcurrentExecute")
+DOCTEST_TEST_CASE("ConcurrentExecute")
{
ALOGI("ConcurrentExecute: entry");
@@ -63,7 +61,7 @@ TEST_CASE("ConcurrentExecute")
}
}
- CHECK(maxRequests == preparedModelsSize);
+ DOCTEST_CHECK(maxRequests == preparedModelsSize);
// construct the request data
V1_0::DataLocation inloc = {};
@@ -110,7 +108,7 @@ TEST_CASE("ConcurrentExecute")
ALOGI("ConcurrentExecute: waiting for callbacks");
for (size_t i = 0; i < maxRequests; ++i)
{
- ARMNN_ASSERT(cb[i]);
+ DOCTEST_CHECK(cb[i]);
cb[i]->wait();
}
@@ -118,7 +116,7 @@ TEST_CASE("ConcurrentExecute")
ALOGI("ConcurrentExecute: validating results");
for (size_t i = 0; i < maxRequests; ++i)
{
- CHECK(outdata[i][0] == 152);
+ DOCTEST_CHECK(outdata[i][0] == 152);
}
ALOGI("ConcurrentExecute: exit");
}
diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp
index 540cdd7b..cc26f68f 100644
--- a/test/Convolution2D.hpp
+++ b/test/Convolution2D.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,7 +7,6 @@
#include "DriverTestHelpers.hpp"
-#include <doctest/doctest.h>
#include <log/log.h>
#include <OperationsUtils.h>
@@ -23,11 +22,11 @@ namespace driverTestHelpers
#define ARMNN_ANDROID_FP16_TEST(result, fp16Expectation, fp32Expectation, fp16Enabled) \
if (fp16Enabled) \
{ \
- CHECK_MESSAGE((result == fp16Expectation || result == fp32Expectation), result << \
+ DOCTEST_CHECK_MESSAGE((result == fp16Expectation || result == fp32Expectation), result << \
" does not match either " << fp16Expectation << "[fp16] or " << fp32Expectation << "[fp32]"); \
} else \
{ \
- CHECK(result == fp32Expectation); \
+ DOCTEST_CHECK(result == fp32Expectation); \
}
void SetModelFp16Flag(V1_0::Model& model, bool fp16Enabled);
@@ -117,10 +116,10 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled
break;
case android::nn::kPaddingSame:
ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
- CHECK(outdata[1] == 0.f);
+ DOCTEST_CHECK(outdata[1] == 0.f);
break;
default:
- CHECK(false);
+ DOCTEST_CHECK(false);
break;
}
}
diff --git a/test/Dilation.hpp b/test/Dilation.hpp
index c8adbe81..86c13361 100644
--- a/test/Dilation.hpp
+++ b/test/Dilation.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -10,8 +10,6 @@
#include <armnn/StrategyBase.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <doctest/doctest.h>
-
#include <numeric>
using namespace armnn;
@@ -164,7 +162,7 @@ void DilationTestImpl(const DilationTestOptions& options)
data.m_OutputSlotForOperand = std::vector<IOutputSlot*>(model.operands.size(), nullptr);
bool ok = HalPolicy::ConvertOperation(model.operations[0], model, data);
- CHECK(ok);
+ DOCTEST_CHECK(ok);
// check if dilation params are as expected
DilationTestVisitor visitor = options.m_HasDilation ? DilationTestVisitor(2, 2) : DilationTestVisitor();
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index 44e6e725..1f9fc1ee 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -1,12 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "DriverTestHelpers.hpp"
#include <log/log.h>
-#include <doctest/doctest.h>
-
namespace android
{
namespace hardware
@@ -140,10 +139,10 @@ android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_0::Model& mode
driver.prepareModel(model, cb);
prepareStatus = cb->GetErrorStatus();
- CHECK((int)prepareStatus == (int)expectedStatus);
+ DOCTEST_CHECK((int)prepareStatus == (int)expectedStatus);
if (expectedStatus == V1_0::ErrorStatus::NONE)
{
- CHECK((cb->GetPreparedModel() != nullptr));
+ DOCTEST_CHECK((cb->GetPreparedModel() != nullptr));
}
return cb->GetPreparedModel();
}
@@ -159,10 +158,10 @@ android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& mode
driver.prepareModel_1_1(model, V1_1::ExecutionPreference::LOW_POWER, cb);
prepareStatus = cb->GetErrorStatus();
- CHECK((int)prepareStatus == (int)expectedStatus);
+ DOCTEST_CHECK((int)prepareStatus == (int)expectedStatus);
if (expectedStatus == V1_0::ErrorStatus::NONE)
{
- CHECK((cb->GetPreparedModel() != nullptr));
+ DOCTEST_CHECK((cb->GetPreparedModel() != nullptr));
}
return cb->GetPreparedModel();
}
@@ -185,10 +184,10 @@ android::sp<V1_2::IPreparedModel> PrepareModelWithStatus_1_2(const armnn_driver:
driver.prepareModel_1_2(model, V1_1::ExecutionPreference::LOW_POWER, emptyHandle1, emptyHandle2, emptyToken, cb);
prepareStatus = cb->GetErrorStatus();
- CHECK((int)prepareStatus == (int)expectedStatus);
+ DOCTEST_CHECK((int)prepareStatus == (int)expectedStatus);
if (expectedStatus == V1_0::ErrorStatus::NONE)
{
- CHECK((cb->GetPreparedModel_1_2() != nullptr));
+ DOCTEST_CHECK((cb->GetPreparedModel_1_2() != nullptr));
}
return cb->GetPreparedModel_1_2();
}
@@ -220,7 +219,7 @@ android::sp<V1_3::IPreparedModel> PrepareModelWithStatus_1_3(const armnn_driver:
prepareStatus = cb->Get_1_3_ErrorStatus();
if (prepareStatus == V1_3::ErrorStatus::NONE)
{
- CHECK((cb->GetPreparedModel_1_3() != nullptr));
+ DOCTEST_CHECK((cb->GetPreparedModel_1_3() != nullptr));
}
return cb->GetPreparedModel_1_3();
}
@@ -231,10 +230,10 @@ V1_0::ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
const V1_0::Request& request,
V1_0::ErrorStatus expectedStatus)
{
- CHECK(preparedModel.get() != nullptr);
+ DOCTEST_CHECK(preparedModel.get() != nullptr);
android::sp<ExecutionCallback> cb(new ExecutionCallback());
V1_0::ErrorStatus execStatus = preparedModel->execute(request, cb);
- CHECK((int)execStatus == (int)expectedStatus);
+ DOCTEST_CHECK((int)execStatus == (int)expectedStatus);
ALOGI("Execute: waiting for callback to be invoked");
cb->wait();
return execStatus;
@@ -243,10 +242,10 @@ V1_0::ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
android::sp<ExecutionCallback> ExecuteNoWait(android::sp<V1_0::IPreparedModel> preparedModel,
const V1_0::Request& request)
{
- CHECK(preparedModel.get() != nullptr);
+ DOCTEST_CHECK(preparedModel.get() != nullptr);
android::sp<ExecutionCallback> cb(new ExecutionCallback());
V1_0::ErrorStatus execStatus = preparedModel->execute(request, cb);
- CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
ALOGI("ExecuteNoWait: returning callback object");
return cb;
}
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index 36deeab4..383c8fc2 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -12,30 +12,13 @@
#include <iosfwd>
#include <android/hidl/allocator/1.0/IAllocator.h>
-// Un-define some of the macros as they clash in 'third-party/doctest/doctest.h'
-// and 'system/core/base/include/android-base/logging.h'
-// macro redefined error[-Werror,-Wmacro-redefined]
-#ifdef CHECK
-#undef CHECK
-#endif
-#ifdef CHECK_EQ
-#undef CHECK_EQ
-#endif
-#ifdef CHECK_NE
-#undef CHECK_NE
-#endif
-#ifdef CHECK_GT
-#undef CHECK_GT
-#endif
-#ifdef CHECK_LT
-#undef CHECK_LT
-#endif
-#ifdef CHECK_GE
-#undef CHECK_GE
-#endif
-#ifdef CHECK_LE
-#undef CHECK_LE
-#endif
+// Some of the short name macros from 'third-party/doctest/doctest.h' clash with macros in
+// 'system/core/base/include/android-base/logging.h' so we use the full DOCTEST macro names
+#ifndef DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES
+#define DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES
+#endif // DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES
+
+#include <doctest/doctest.h>
using RequestArgument = V1_0::RequestArgument;
using ::android::hidl::allocator::V1_0::IAllocator;
@@ -190,7 +173,7 @@ android::sp<IMemory> AddPoolAndGetData(uint32_t size, V1_0::Request& request)
android::sp<IAllocator> allocator = IAllocator::getService("ashmem");
allocator->allocate(sizeof(T) * size, [&](bool success, const hidl_memory& mem) {
- ARMNN_ASSERT(success);
+ DOCTEST_CHECK(success);
pool = mem;
});
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
index 704de44a..4717357b 100644
--- a/test/FullyConnected.cpp
+++ b/test/FullyConnected.cpp
@@ -1,16 +1,13 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "DriverTestHelpers.hpp"
-
-#include "../1.0/HalPolicy.hpp"
-#include <doctest/doctest.h>
+#include "DriverTestHelpers.hpp"
#include <log/log.h>
-TEST_SUITE("FullyConnectedTests")
+DOCTEST_TEST_SUITE("FullyConnectedTests")
{
using namespace android::hardware;
using namespace driverTestHelpers;
@@ -19,7 +16,7 @@ using namespace armnn_driver;
using HalPolicy = hal_1_0::HalPolicy;
// Add our own test here since we fail the fc tests which Google supplies (because of non-const weights)
-TEST_CASE("FullyConnected")
+DOCTEST_TEST_CASE("FullyConnected")
{
// this should ideally replicate fully_connected_float.model.cpp
// but that uses slightly weird dimensions which I don't think we need to support for now
@@ -83,10 +80,10 @@ TEST_CASE("FullyConnected")
}
// check the result
- CHECK(outdata[0] == 152);
+ DOCTEST_CHECK(outdata[0] == 152);
}
-TEST_CASE("TestFullyConnected4dInput")
+DOCTEST_TEST_CASE("TestFullyConnected4dInput")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -165,17 +162,17 @@ TEST_CASE("TestFullyConnected4dInput")
}
// check the result
- CHECK(outdata[0] == 1);
- CHECK(outdata[1] == 2);
- CHECK(outdata[2] == 3);
- CHECK(outdata[3] == 4);
- CHECK(outdata[4] == 5);
- CHECK(outdata[5] == 6);
- CHECK(outdata[6] == 7);
- CHECK(outdata[7] == 8);
+ DOCTEST_CHECK(outdata[0] == 1);
+ DOCTEST_CHECK(outdata[1] == 2);
+ DOCTEST_CHECK(outdata[2] == 3);
+ DOCTEST_CHECK(outdata[3] == 4);
+ DOCTEST_CHECK(outdata[4] == 5);
+ DOCTEST_CHECK(outdata[5] == 6);
+ DOCTEST_CHECK(outdata[6] == 7);
+ DOCTEST_CHECK(outdata[7] == 8);
}
-TEST_CASE("TestFullyConnected4dInputReshape")
+DOCTEST_TEST_CASE("TestFullyConnected4dInputReshape")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -254,17 +251,17 @@ TEST_CASE("TestFullyConnected4dInputReshape")
}
// check the result
- CHECK(outdata[0] == 1);
- CHECK(outdata[1] == 2);
- CHECK(outdata[2] == 3);
- CHECK(outdata[3] == 4);
- CHECK(outdata[4] == 5);
- CHECK(outdata[5] == 6);
- CHECK(outdata[6] == 7);
- CHECK(outdata[7] == 8);
+ DOCTEST_CHECK(outdata[0] == 1);
+ DOCTEST_CHECK(outdata[1] == 2);
+ DOCTEST_CHECK(outdata[2] == 3);
+ DOCTEST_CHECK(outdata[3] == 4);
+ DOCTEST_CHECK(outdata[4] == 5);
+ DOCTEST_CHECK(outdata[5] == 6);
+ DOCTEST_CHECK(outdata[6] == 7);
+ DOCTEST_CHECK(outdata[7] == 8);
}
-TEST_CASE("TestFullyConnectedWeightsAsInput")
+DOCTEST_TEST_CASE("TestFullyConnectedWeightsAsInput")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -366,14 +363,14 @@ TEST_CASE("TestFullyConnectedWeightsAsInput")
}
// check the result
- CHECK(outdata[0] == 1);
- CHECK(outdata[1] == 2);
- CHECK(outdata[2] == 3);
- CHECK(outdata[3] == 4);
- CHECK(outdata[4] == 5);
- CHECK(outdata[5] == 6);
- CHECK(outdata[6] == 7);
- CHECK(outdata[7] == 8);
+ DOCTEST_CHECK(outdata[0] == 1);
+ DOCTEST_CHECK(outdata[1] == 2);
+ DOCTEST_CHECK(outdata[2] == 3);
+ DOCTEST_CHECK(outdata[3] == 4);
+ DOCTEST_CHECK(outdata[4] == 5);
+ DOCTEST_CHECK(outdata[5] == 6);
+ DOCTEST_CHECK(outdata[6] == 7);
+ DOCTEST_CHECK(outdata[7] == 8);
}
}
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
index 99e0c622..bd86a885 100644
--- a/test/GenericLayerTests.cpp
+++ b/test/GenericLayerTests.cpp
@@ -1,16 +1,13 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "DriverTestHelpers.hpp"
-
-#include "../1.0/HalPolicy.hpp"
-#include <doctest/doctest.h>
+#include "DriverTestHelpers.hpp"
#include <log/log.h>
-TEST_SUITE("GenericLayerTests")
+DOCTEST_TEST_SUITE("GenericLayerTests")
{
using namespace android::hardware;
@@ -19,7 +16,7 @@ using namespace armnn_driver;
using HalPolicy = hal_1_0::HalPolicy;
-TEST_CASE("GetSupportedOperations")
+DOCTEST_TEST_CASE("GetSupportedOperations")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -53,9 +50,9 @@ TEST_CASE("GetSupportedOperations")
model0.operations[0].outputs = hidl_vec<uint32_t>{4};
driver->getSupportedOperations(model0, cb);
- CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
- CHECK(supported.size() == (size_t)1);
- CHECK(supported[0] == true);
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.size() == (size_t)1);
+ DOCTEST_CHECK(supported[0] == true);
V1_0::Model model1 = {};
@@ -82,8 +79,8 @@ TEST_CASE("GetSupportedOperations")
driver->getSupportedOperations(model1, cb);
- CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
- CHECK(supported.empty());
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
+ DOCTEST_CHECK(supported.empty());
// Test Broadcast on add/mul operators
HalPolicy::Model model2 = {};
@@ -115,10 +112,10 @@ TEST_CASE("GetSupportedOperations")
model2.operations[1].outputs = hidl_vec<uint32_t>{4};
driver->getSupportedOperations(model2, cb);
- CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
- CHECK(supported.size() == (size_t)2);
- CHECK(supported[0] == true);
- CHECK(supported[1] == true);
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.size() == (size_t)2);
+ DOCTEST_CHECK(supported[0] == true);
+ DOCTEST_CHECK(supported[1] == true);
V1_0::Model model3 = {};
@@ -144,9 +141,9 @@ TEST_CASE("GetSupportedOperations")
model3.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
driver->getSupportedOperations(model3, cb);
- CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
- CHECK(supported.size() == (size_t)1);
- CHECK(supported[0] == false);
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.size() == (size_t)1);
+ DOCTEST_CHECK(supported[0] == false);
HalPolicy::Model model4 = {};
@@ -159,14 +156,14 @@ TEST_CASE("GetSupportedOperations")
model4.operations[0].outputs = hidl_vec<uint32_t>{0};
driver->getSupportedOperations(model4, cb);
- CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
- CHECK(supported.empty());
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
+ DOCTEST_CHECK(supported.empty());
}
// The purpose of this test is to ensure that when encountering an unsupported operation
// it is skipped and getSupportedOperations() continues (rather than failing and stopping).
// As per IVGCVSW-710.
-TEST_CASE("UnsupportedLayerContinueOnFailure")
+DOCTEST_TEST_CASE("UnsupportedLayerContinueOnFailure")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -241,16 +238,16 @@ TEST_CASE("UnsupportedLayerContinueOnFailure")
// We are testing that the unsupported layers return false and the test continues rather than failing and stopping
driver->getSupportedOperations(model, cb);
- CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
- CHECK(supported.size() == (size_t)3);
- CHECK(supported[0] == false);
- CHECK(supported[1] == true);
- CHECK(supported[2] == false);
+ DOCTEST_CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.size() == (size_t)3);
+ DOCTEST_CHECK(supported[0] == false);
+ DOCTEST_CHECK(supported[1] == true);
+ DOCTEST_CHECK(supported[2] == false);
}
// The purpose of this test is to ensure that when encountering an failure
// during mem pool mapping we properly report an error to the framework via a callback
-TEST_CASE("ModelToINetworkConverterMemPoolFail")
+DOCTEST_TEST_CASE("ModelToINetworkConverterMemPoolFail")
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -269,8 +266,8 @@ TEST_CASE("ModelToINetworkConverterMemPoolFail")
// Memory pool mapping should fail, we should report an error
driver->getSupportedOperations(model, cb);
- CHECK((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
- CHECK(supported.empty());
+ DOCTEST_CHECK((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(supported.empty());
}
}
diff --git a/test/Lstm.hpp b/test/Lstm.hpp
index e3844464..93f2f32d 100644
--- a/test/Lstm.hpp
+++ b/test/Lstm.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -9,8 +9,6 @@
#include <armnn/utility/IgnoreUnused.hpp>
-#include <doctest/doctest.h>
-
#include <array>
using ArmnnDriver = armnn_driver::ArmnnDriver;
@@ -368,18 +366,20 @@ void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
// check the results
for (size_t i = 0; i < outputStateOutValue.size(); ++i)
{
- CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ),
- "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
+ DOCTEST_CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ),
+ "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != "
+ << outputStateOutData[i]);
}
for (size_t i = 0; i < cellStateOutValue.size(); ++i)
{
- CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
- "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
+ DOCTEST_CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
+ "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != "
+ << cellStateOutData[i]);
}
for (size_t i = 0; i < outputValue.size(); ++i)
{
- CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
- "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
+ "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
}
}
@@ -643,13 +643,14 @@ void QuantizedLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
// check the results
for (size_t i = 0; i < cellStateOutValue.size(); ++i)
{
- CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
- "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
+ DOCTEST_CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
+ "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != "
+ << cellStateOutData[i]);
}
for (size_t i = 0; i < outputValue.size(); ++i)
{
- CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
- "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+ DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
+ "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
}
}
diff --git a/test/SystemProperties.cpp b/test/SystemProperties.cpp
index b1b6013e..ef952964 100644
--- a/test/SystemProperties.cpp
+++ b/test/SystemProperties.cpp
@@ -1,57 +1,57 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "DriverTestHelpers.hpp"
#include <log/log.h>
-#include "../SystemPropertiesUtils.hpp"
-
-#include <doctest/doctest.h>
+#include <SystemPropertiesUtils.hpp>
-TEST_SUITE("SystemProperiesTests")
+DOCTEST_TEST_SUITE("SystemProperiesTests")
{
-TEST_CASE("SystemProperties")
+
+DOCTEST_TEST_CASE("SystemProperties")
{
// Test default value
{
auto p = __system_property_find("thisDoesNotExist");
- CHECK((p == nullptr));
+ DOCTEST_CHECK((p == nullptr));
int defaultValue = ParseSystemProperty("thisDoesNotExist", -4);
- CHECK((defaultValue == -4));
+ DOCTEST_CHECK((defaultValue == -4));
}
// Test default value from bad data type
{
__system_property_set("thisIsNotFloat", "notfloat");
float defaultValue = ParseSystemProperty("thisIsNotFloat", 0.1f);
- CHECK((defaultValue == 0.1f));
+ DOCTEST_CHECK((defaultValue == 0.1f));
}
// Test fetching bool values
{
__system_property_set("myTestBool", "1");
bool b = ParseSystemProperty("myTestBool", false);
- CHECK((b == true));
+ DOCTEST_CHECK((b == true));
}
{
__system_property_set("myTestBool", "0");
bool b = ParseSystemProperty("myTestBool", true);
- CHECK((b == false));
+ DOCTEST_CHECK((b == false));
}
// Test fetching int
{
__system_property_set("myTestInt", "567");
int i = ParseSystemProperty("myTestInt", 890);
- CHECK((i==567));
+ DOCTEST_CHECK((i==567));
}
// Test fetching float
{
__system_property_set("myTestFloat", "1.2f");
float f = ParseSystemProperty("myTestFloat", 3.4f);
- CHECK((f==1.2f));
+ DOCTEST_CHECK((f==1.2f));
}
}
diff --git a/test/TestTensor.cpp b/test/TestTensor.cpp
index e6cb446f..39bcd5a6 100644
--- a/test/TestTensor.cpp
+++ b/test/TestTensor.cpp
@@ -1,7 +1,8 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "TestTensor.hpp"
namespace driverTestHelpers
@@ -25,7 +26,7 @@ unsigned int TestTensor::GetNumElements() const
const float * TestTensor::GetData() const
{
- ARMNN_ASSERT(m_Data.empty() == false);
+ DOCTEST_CHECK(m_Data.empty() == false);
return &m_Data[0];
}
diff --git a/test/TestTensor.hpp b/test/TestTensor.hpp
index 1cd1950d..b0613eb2 100644
--- a/test/TestTensor.hpp
+++ b/test/TestTensor.hpp
@@ -1,12 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
-#include "../ArmnnDriver.hpp"
+#pragma once
-#include <armnn/utility/Assert.hpp>
+#include <ArmnnDriver.hpp>
+#include "DriverTestHelpers.hpp"
namespace driverTestHelpers
{
@@ -19,7 +19,7 @@ public:
: m_Shape{shape}
, m_Data{data}
{
- ARMNN_ASSERT(m_Shape.GetNumElements() == m_Data.size());
+ DOCTEST_CHECK(m_Shape.GetNumElements() == m_Data.size());
}
hidl_vec<uint32_t> GetDimensions() const;
diff --git a/test/Tests.cpp b/test/Tests.cpp
index a3a38b91..4628414e 100644
--- a/test/Tests.cpp
+++ b/test/Tests.cpp
@@ -1,35 +1,35 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#define LOG_TAG "ArmnnDriverTests"
#include <log/log.h>
-#include "DriverTestHelpers.hpp"
-
#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#endif
-#include <doctest/doctest.h>
+
+#include "DriverTestHelpers.hpp"
using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;
-TEST_SUITE("DriverTests")
+DOCTEST_TEST_SUITE("DriverTests")
{
-TEST_CASE("Init")
+DOCTEST_TEST_CASE("Init")
{
// Making the driver object on the stack causes a weird libc error, so make it on the heap instead
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
V1_0::DeviceStatus status = driver->getStatus();
// Note double-parentheses to avoid compile error from doctest trying to printf the DeviceStatus
- CHECK((status == V1_0::DeviceStatus::AVAILABLE));
+ DOCTEST_CHECK((status == V1_0::DeviceStatus::AVAILABLE));
}
-TEST_CASE("TestCapabilities")
+DOCTEST_TEST_CASE("TestCapabilities")
{
// Making the driver object on the stack causes a weird libc error, so make it on the heap instead
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -45,11 +45,11 @@ TEST_CASE("TestCapabilities")
driver->getCapabilities(cb);
- CHECK((int)error == (int)V1_0::ErrorStatus::NONE);
- CHECK(cap.float32Performance.execTime > 0.f);
- CHECK(cap.float32Performance.powerUsage > 0.f);
- CHECK(cap.quantized8Performance.execTime > 0.f);
- CHECK(cap.quantized8Performance.powerUsage > 0.f);
+ DOCTEST_CHECK((int)error == (int)V1_0::ErrorStatus::NONE);
+ DOCTEST_CHECK(cap.float32Performance.execTime > 0.f);
+ DOCTEST_CHECK(cap.float32Performance.powerUsage > 0.f);
+ DOCTEST_CHECK(cap.quantized8Performance.execTime > 0.f);
+ DOCTEST_CHECK(cap.quantized8Performance.powerUsage > 0.f);
}
}
diff --git a/test/UtilsTests.cpp b/test/UtilsTests.cpp
index fcb0c4f5..c2d8bb4a 100644
--- a/test/UtilsTests.cpp
+++ b/test/UtilsTests.cpp
@@ -1,24 +1,19 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "DriverTestHelpers.hpp"
#include <log/log.h>
-#include "../Utils.hpp"
#include <armnn/src/armnn/OptimizedNetworkImpl.hpp>
#include <fstream>
-#include <iomanip>
#include <memory>
#include <armnn/INetwork.hpp>
-#include "armnn/NetworkFwd.hpp"
#include <armnnUtils/Filesystem.hpp>
-#include <doctest/doctest.h>
-
using namespace android;
using namespace android::nn;
using namespace android::hardware;
@@ -147,10 +142,10 @@ private:
} // namespace
-TEST_SUITE("UtilsTests")
+DOCTEST_TEST_SUITE("UtilsTests")
{
-TEST_CASE("ExportToEmptyDirectory")
+DOCTEST_TEST_CASE("ExportToEmptyDirectory")
{
// Set the fixture for this test.
ExportNetworkGraphFixture fixture("");
@@ -170,10 +165,10 @@ TEST_CASE("ExportToEmptyDirectory")
fixture.m_RequestInputsAndOutputsDumpDir);
// Check that the output file does not exist.
- CHECK(!fixture.FileExists());
+ DOCTEST_CHECK(!fixture.FileExists());
}
-TEST_CASE("ExportNetwork")
+DOCTEST_TEST_CASE("ExportNetwork")
{
// Set the fixture for this test.
ExportNetworkGraphFixture fixture;
@@ -194,13 +189,13 @@ TEST_CASE("ExportNetwork")
fixture.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- CHECK(fixture.FileExists());
+ DOCTEST_CHECK(fixture.FileExists());
// Check that the content of the output file matches the mock content.
- CHECK(fixture.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture.GetFileContent() == mockSerializedContent);
}
-TEST_CASE("ExportNetworkOverwriteFile")
+DOCTEST_TEST_CASE("ExportNetworkOverwriteFile")
{
// Set the fixture for this test.
ExportNetworkGraphFixture fixture;
@@ -220,10 +215,10 @@ TEST_CASE("ExportNetworkOverwriteFile")
fixture.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- CHECK(fixture.FileExists());
+ DOCTEST_CHECK(fixture.FileExists());
// Check that the content of the output file matches the mock content.
- CHECK(fixture.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture.GetFileContent() == mockSerializedContent);
// Update the mock serialized content of the network.
mockSerializedContent = "This is ANOTHER mock serialized content!";
@@ -238,13 +233,13 @@ TEST_CASE("ExportNetworkOverwriteFile")
fixture.m_RequestInputsAndOutputsDumpDir);
// Check that the output file still exists and that it has the correct name.
- CHECK(fixture.FileExists());
+ DOCTEST_CHECK(fixture.FileExists());
// Check that the content of the output file matches the mock content.
- CHECK(fixture.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture.GetFileContent() == mockSerializedContent);
}
-TEST_CASE("ExportMultipleNetworks")
+DOCTEST_TEST_CASE("ExportMultipleNetworks")
{
// Set the fixtures for this test.
ExportNetworkGraphFixture fixture1;
@@ -266,29 +261,29 @@ TEST_CASE("ExportMultipleNetworks")
fixture1.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- CHECK(fixture1.FileExists());
+ DOCTEST_CHECK(fixture1.FileExists());
// Check that the content of the output file matches the mock content.
- CHECK(fixture1.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture1.GetFileContent() == mockSerializedContent);
// Export the mock optimized network.
fixture2.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
fixture2.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- CHECK(fixture2.FileExists());
+ DOCTEST_CHECK(fixture2.FileExists());
// Check that the content of the output file matches the mock content.
- CHECK(fixture2.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture2.GetFileContent() == mockSerializedContent);
// Export the mock optimized network.
fixture3.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
fixture3.m_RequestInputsAndOutputsDumpDir);
// Check that the output file exists and that it has the correct name.
- CHECK(fixture3.FileExists());
+ DOCTEST_CHECK(fixture3.FileExists());
// Check that the content of the output file matches the mock content.
- CHECK(fixture3.GetFileContent() == mockSerializedContent);
+ DOCTEST_CHECK(fixture3.GetFileContent() == mockSerializedContent);
}
}