aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/WorkloadData.cpp
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2019-07-01 13:51:07 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2019-07-01 14:48:57 +0100
commit970f43b078eba91c66fb64eadbc9803661ffcda8 (patch)
tree99613d07e62130caff834eadb35f8456b04c63a4 /src/backends/backendsCommon/WorkloadData.cpp
parent6fb339a7d202a9c64d8c7843d630fe8ab7be9f33 (diff)
downloadarmnn-970f43b078eba91c66fb64eadbc9803661ffcda8.tar.gz
IVGCVSW-3365 Add reference workload support for ResizeLayer
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Id551690065dca0686ce597d1f0c14fd73163481e
Diffstat (limited to 'src/backends/backendsCommon/WorkloadData.cpp')
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp89
1 files changed, 66 insertions, 23 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 1d0be5d1ff..e7915dd40b 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -915,12 +915,12 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
std::vector<DataType> supportedTypes =
- {
- DataType::Float16,
- DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
- };
+ {
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
supportedTypes,
@@ -931,29 +931,72 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
"ResizeBilinearQueueDescriptor");
// Resizes bilinear only changes width and height: batch and channel count must match.
+ const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
+ const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
+ if (inputBatchSize != outputBatchSize)
{
- const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
- const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
- if (inputBatchSize != outputBatchSize)
- {
- throw InvalidArgumentException(
- boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
- "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
- }
+ throw InvalidArgumentException(
+ boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
+ "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
}
+ DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
+ const unsigned int inputChannelCount =
+ workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
+ const unsigned int outputChannelCount =
+ workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
+ if (inputChannelCount != outputChannelCount)
{
- DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
- const unsigned int inputChannelCount =
+ throw InvalidArgumentException(
+ boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
+ "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
+ }
+}
+
+void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateNumInputs(workloadInfo, "ResizeQueueDescriptor", 1);
+ ValidateNumOutputs(workloadInfo, "ResizeQueueDescriptor", 1);
+
+ ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeQueueDescriptor", 4, "input");
+ ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeQueueDescriptor", 4, "output");
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ supportedTypes,
+ "ResizeQueueDescriptor");
+
+ ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
+ {workloadInfo.m_InputTensorInfos[0].GetDataType()},
+ "ResizeQueueDescriptor");
+
+ // Resizes only changes width and height: batch and channel count must match.
+ const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
+ const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
+ if (inputBatchSize != outputBatchSize)
+ {
+ throw InvalidArgumentException(
+ boost::str(boost::format("ResizeQueueDescriptor: Input batch size (%1%) "
+ "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
+ }
+
+ DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
+ const unsigned int inputChannelCount =
workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
- const unsigned int outputChannelCount =
+ const unsigned int outputChannelCount =
workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
- if (inputChannelCount != outputChannelCount)
- {
- throw InvalidArgumentException(
- boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
- "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
- }
+ if (inputChannelCount != outputChannelCount)
+ {
+ throw InvalidArgumentException(
+ boost::str(boost::format("ResizeQueueDescriptor: Input channel count (%1%) "
+ "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
}
}