From ffbd8e7b6c613f7f81102fc9cfa030cc4e329e71 Mon Sep 17 00:00:00 2001 From: Kristofer Jonsson Date: Tue, 15 Jun 2021 17:51:58 +0200 Subject: Using ref kernels Tensorflow reference kernels are bit exact and should be used by the run_platform.py script to generate the expected OFM data. Change-Id: I90e688e753e5330aaaf9002abed23df0493ff99b --- scripts/run_platform.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/scripts/run_platform.py b/scripts/run_platform.py index 0600828..93ba7cd 100755 --- a/scripts/run_platform.py +++ b/scripts/run_platform.py @@ -29,8 +29,7 @@ import subprocess import sys os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' -from tensorflow.lite.python.interpreter import Interpreter - +from tensorflow.lite.python.interpreter import Interpreter, OpResolverType CORE_PLATFORM_PATH = pathlib.Path(__file__).resolve().parents[1] @@ -50,11 +49,11 @@ def build_core_platform(output_folder, target, toolchain): run_cmd(cmake_cmd) - make_cmd = ["make", "-C", build_folder, f"-j{multiprocessing.cpu_count()}"] + make_cmd = ["make", "-C", build_folder, f"-j{multiprocessing.cpu_count()}", "baremetal_custom"] run_cmd(make_cmd) def generate_reference_data(output_folder, non_optimized_model_path, input_path, expected_output_path): - interpreter = Interpreter(model_path=str(non_optimized_model_path.resolve())) + interpreter = Interpreter(model_path=str(non_optimized_model_path.resolve()), experimental_op_resolver_type=OpResolverType.BUILTIN_REF) interpreter.allocate_tensors() input_detail = interpreter.get_input_details()[0] -- cgit v1.2.1