aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon/workloads')
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonGreaterWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonGreaterWorkload.hpp2
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp2
4 files changed, 4 insertions, 4 deletions
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 56e5552dd3..338c7eb1f6 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -72,7 +72,7 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
m_FullyConnectedLayer.reset(layer.release());
// Allocate
- if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
+ if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QAsymmU8)
{
InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
}
diff --git a/src/backends/neon/workloads/NeonGreaterWorkload.cpp b/src/backends/neon/workloads/NeonGreaterWorkload.cpp
index 62396261e1..6380dfada5 100644
--- a/src/backends/neon/workloads/NeonGreaterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGreaterWorkload.cpp
@@ -44,6 +44,6 @@ void NeonGreaterWorkload<T>::Execute() const
}
template class NeonGreaterWorkload<DataType::Float32>;
-template class NeonGreaterWorkload<DataType::QuantisedAsymm8>;
+template class NeonGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonGreaterWorkload.hpp b/src/backends/neon/workloads/NeonGreaterWorkload.hpp
index df1e07e07b..bcab27e7a6 100644
--- a/src/backends/neon/workloads/NeonGreaterWorkload.hpp
+++ b/src/backends/neon/workloads/NeonGreaterWorkload.hpp
@@ -31,6 +31,6 @@ private:
};
using NeonGreaterFloat32Workload = NeonGreaterWorkload<DataType::Float32>;
-using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QuantisedAsymm8>;
+using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index e9edc8901e..f98fe44039 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -43,7 +43,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
case DataType::Float32:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
break;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
break;
case DataType::QuantizedSymm8PerAxis: