aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/test/DelegateOptionsTestHelper.hpp
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2022-11-07 16:20:48 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2022-11-16 15:22:50 +0000
commit31441595009182c985dacbedc70c41ee6664d070 (patch)
tree248a85295aeff4022c9b395fc97748b0a0aa6b35 /delegate/src/test/DelegateOptionsTestHelper.hpp
parentbd18eab07a8f30492de1e462b1815189014cb8d5 (diff)
downloadarmnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'delegate/src/test/DelegateOptionsTestHelper.hpp')
-rw-r--r--delegate/src/test/DelegateOptionsTestHelper.hpp89
1 files changed, 0 insertions, 89 deletions
diff --git a/delegate/src/test/DelegateOptionsTestHelper.hpp b/delegate/src/test/DelegateOptionsTestHelper.hpp
index 87bf0d6c3d..7e147de31f 100644
--- a/delegate/src/test/DelegateOptionsTestHelper.hpp
+++ b/delegate/src/test/DelegateOptionsTestHelper.hpp
@@ -219,95 +219,6 @@ std::vector<char> CreateCeilTfLiteModel(tflite::TensorType tensorType,
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
-void ReduceFp32ToBf16TestImpl()
-{
- using namespace tflite;
- // Set input data
- std::vector<int32_t> inputShape{ 1, 5, 5, 1 };
- std::vector<int32_t> filterShape{ 1, 3, 3, 1 };
- std::vector<int32_t> biasShape{ 1 };
- std::vector<int32_t> outputShape{ 1, 3, 3, 1 };
-
- std::vector<float> inputValues =
- {
- 1, 5, 2, 3, 5,
- 8, 7, 3, 6, 3,
- 3, 3, 9, 1, 9,
- 4, 1, 8, 1, 3,
- 6, 8, 1, 9, 2
- };
-
- std::vector<float> filterValues =
- {
- 4, 5, 6,
- 0, 0, 0,
- 3, 2, 1
- };
-
- std::vector<float> biasValues = { 5 };
-
- std::vector<float> expectedResult =
- {
- 28, 38, 29,
- 96, 104, 53,
- 31, 55, 24
- };
-
- tflite::Padding padding = Padding_SAME;
-
- std::vector<char> modelBuffer;
- modelBuffer = CreateConv2dTfLiteModel<float>(BuiltinOperator_CONV_2D,
- ::tflite::TensorType_FLOAT32,
- 2,
- 2,
- 1,
- 1,
- padding,
- ActivationFunctionType_NONE,
- inputShape,
- filterShape,
- biasShape,
- outputShape,
- filterValues,
- biasValues);
-
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the Armnn Delegate
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- std::vector<armnn::BackendOptions> backendOptions;
-
- // Enable debug with BF16 enabled
- armnn::OptimizerOptions optimizerOptions(false, true, true, false);
-
- armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueueWorkload
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
- auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
- armnnDelegate::CompareData(expectedResult.data(), armnnDelegateOutputData, expectedResult.size());
- armnnDelegateInterpreter.reset(nullptr);
-}
-
template <typename T>
void DelegateOptionTest(tflite::TensorType tensorType,
const std::vector<armnn::BackendId>& backends,