aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/WorkloadData.cpp
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-03-16 16:36:10 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-03-19 15:41:12 +0000
commitea54a01f6bd30f013cbe88ae1751985bc86b6af5 (patch)
tree7edb7d659ea4210c1256beb5edf57601b317c82d /src/backends/backendsCommon/WorkloadData.cpp
parent25334cf3d53fe7fff98776b44a199ca341f62f1a (diff)
downloadarmnn-ea54a01f6bd30f013cbe88ae1751985bc86b6af5.tar.gz
IVGCVSW-4516 Add ConvertFp32ToBf16Layer and Ref workload support
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I9099a4f840fb747336f77d20a0868b64e801a310
Diffstat (limited to 'src/backends/backendsCommon/WorkloadData.cpp')
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp23
1 files changed, 23 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 81aefa94e7..bf26056a97 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2039,6 +2039,29 @@ void ConvertBf16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo
ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
}
+void ConvertFp32ToBf16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 1);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ if (inputTensorInfo.GetDataType() != DataType::Float32)
+ {
+ throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
+ }
+
+ if (outputTensorInfo.GetDataType() != DataType::BFloat16)
+ {
+ throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
+ }
+
+ ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+}
+
void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};