aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp2
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp8
-rw-r--r--src/backends/reference/test/ArgMinMaxTests.cpp12
-rw-r--r--src/backends/reference/workloads/ArgMinMax.cpp12
-rw-r--r--src/backends/reference/workloads/ArgMinMax.hpp3
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt2
-rw-r--r--src/backends/reference/workloads/RefArgMinMaxWorkload.cpp13
7 files changed, 34 insertions, 18 deletions
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index f9335058c2..98b5adafbc 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -31,6 +31,8 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multi
return arm_compute::DataType::QASYMM8;
case armnn::DataType::QSymmS16:
return arm_compute::DataType::QSYMM16;
+ case armnn::DataType::Signed64:
+ return arm_compute::DataType::S64;
case armnn::DataType::QSymmS8:
{
return multiScales ? arm_compute::DataType::QSYMM8_PER_CHANNEL : arm_compute::DataType::QSYMM8;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 07ce14b763..ff97fc7f41 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -623,9 +623,10 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
- if (outputTensorInfo.GetDataType() != DataType::Signed32)
+ if (outputTensorInfo.GetDataType() != DataType::Signed32 &&
+ outputTensorInfo.GetDataType() != DataType::Signed64)
{
- throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32.");
+ throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32 or Int64.");
}
std::vector<DataType> supportedInputTypes =
@@ -636,7 +637,8 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
- DataType::Signed32
+ DataType::Signed32,
+ DataType::Signed64
};
ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
diff --git a/src/backends/reference/test/ArgMinMaxTests.cpp b/src/backends/reference/test/ArgMinMaxTests.cpp
index 201a2c0c2e..dce15b29ef 100644
--- a/src/backends/reference/test/ArgMinMaxTests.cpp
+++ b/src/backends/reference/test/ArgMinMaxTests.cpp
@@ -12,11 +12,11 @@ BOOST_AUTO_TEST_SUITE(RefArgMinMax)
BOOST_AUTO_TEST_CASE(ArgMinTest)
{
const armnn::TensorInfo inputInfo({ 1, 2, 3 } , armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Signed64);
std::vector<float> inputValues({ 1.0f, 5.0f, 3.0f, 4.0f, 2.0f, 6.0f});
- std::vector<int32_t> outputValues(outputInfo.GetNumElements());
- std::vector<int32_t> expectedValues({ 0, 1, 0 });
+ std::vector<int64_t> outputValues(outputInfo.GetNumElements());
+ std::vector<int64_t> expectedValues({ 0, 1, 0 });
ArgMinMax(*armnn::MakeDecoder<float>(inputInfo, inputValues.data()),
outputValues.data(),
@@ -35,11 +35,11 @@ BOOST_AUTO_TEST_CASE(ArgMinTest)
BOOST_AUTO_TEST_CASE(ArgMaxTest)
{
const armnn::TensorInfo inputInfo({ 1, 2, 3 } , armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Signed64);
std::vector<float> inputValues({ 1.0f, 5.0f, 3.0f, 4.0f, 2.0f, 6.0f });
- std::vector<int32_t> outputValues(outputInfo.GetNumElements());
- std::vector<int32_t> expectedValues({ 1, 0, 1 });
+ std::vector<int64_t> outputValues(outputInfo.GetNumElements());
+ std::vector<int64_t> expectedValues({ 1, 0, 1 });
ArgMinMax(*armnn::MakeDecoder<float>(inputInfo, inputValues.data()),
outputValues.data(),
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index c455c52e5a..3bf2853a20 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -12,7 +12,8 @@
namespace armnn
{
-void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
+template <typename OUT>
+void ArgMinMax(Decoder<float>& in, OUT* out, const TensorInfo& inputTensorInfo,
const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis)
{
IgnoreUnused(outputTensorInfo);
@@ -39,9 +40,16 @@ void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorIn
tmpIndex = i;
}
}
- out[outer * innerElements + inner] = armnn::numeric_cast<int32_t>(tmpIndex);
+
+ out[outer * innerElements + inner] = armnn::numeric_cast<OUT>(tmpIndex);
}
}
}
+template void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
+ const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis);
+
+template void ArgMinMax(Decoder<float>& in, int64_t* out, const TensorInfo& inputTensorInfo,
+ const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis);
+
} //namespace armnn
diff --git a/src/backends/reference/workloads/ArgMinMax.hpp b/src/backends/reference/workloads/ArgMinMax.hpp
index 5a9c6a8a2a..3958ed7afd 100644
--- a/src/backends/reference/workloads/ArgMinMax.hpp
+++ b/src/backends/reference/workloads/ArgMinMax.hpp
@@ -13,7 +13,8 @@
namespace armnn
{
-void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
+template <typename OUT>
+void ArgMinMax(Decoder<float>& in, OUT *out, const TensorInfo& inputTensorInfo,
const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis);
} //namespace armnn
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 937a32029e..cd9efc96af 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -5,8 +5,6 @@
list(APPEND armnnRefBackendWorkloads_sources
Abs.hpp
- ArgMinMax.cpp
- ArgMinMax.hpp
Activation.cpp
Activation.hpp
ArgMinMax.cpp
diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
index 5f1eb73b61..b7246d5b93 100644
--- a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
@@ -29,10 +29,15 @@ void RefArgMinMaxWorkload::Execute() const
const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]);
- int32_t* output = GetOutputTensorData<int32_t>(0, m_Data);
-
- ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function,
- m_Data.m_Parameters.m_Axis);
+ if (m_Data.m_Parameters.m_Output_Type == armnn::DataType::Signed32) {
+ int32_t *output = GetOutputTensorData<int32_t>(0, m_Data);
+ ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function,
+ m_Data.m_Parameters.m_Axis);
+ } else {
+ int64_t *output = GetOutputTensorData<int64_t>(0, m_Data);
+ ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function,
+ m_Data.m_Parameters.m_Axis);
+ }
}
} //namespace armnn \ No newline at end of file