aboutsummaryrefslogtreecommitdiff
path: root/tests/conftest.py
diff options
context:
space:
mode:
authorGergely Nagy <gergely.nagy@arm.com>2023-11-21 12:29:38 +0000
committerGergely Nagy <gergely.nagy@arm.com>2023-12-07 17:09:31 +0000
commit54eec806272b7574a0757c77a913a369a9ecdc70 (patch)
tree2e6484b857b2a68279a2707dbb21e5c26685f4e2 /tests/conftest.py
parent7c50f1d6367186c03a282ac7ecb8fca0f905ba30 (diff)
downloadmlia-54eec806272b7574a0757c77a913a369a9ecdc70.tar.gz
MLIA-835 Invalid JSON output
TFLiteConverter was producing log messages in the output that was not possible to capture and redirect to logging. The solution/workaround is to run it as a subprocess. This change required some refactoring around existing invocations of the converter. Change-Id: I394bd0d49d36e6686cfcb9d658e4aad05326cb87 Signed-off-by: Gergely Nagy <gergely.nagy@arm.com>
Diffstat (limited to 'tests/conftest.py')
-rw-r--r--tests/conftest.py11
1 files changed, 4 insertions, 7 deletions
diff --git a/tests/conftest.py b/tests/conftest.py
index d700206..345eb8d 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -14,9 +14,8 @@ import tensorflow as tf
from mlia.backend.vela.compiler import optimize_model
from mlia.core.context import ExecutionContext
from mlia.nn.rewrite.core.utils.numpy_tfrecord import NumpyTFWriter
-from mlia.nn.tensorflow.utils import convert_to_tflite
+from mlia.nn.tensorflow.tflite_convert import convert_to_tflite
from mlia.nn.tensorflow.utils import save_keras_model
-from mlia.nn.tensorflow.utils import save_tflite_model
from mlia.target.ethos_u.config import EthosUConfiguration
from tests.utils.rewrite import MockTrainingParameters
@@ -93,15 +92,13 @@ def fixture_test_models_path(
save_keras_model(keras_model, tmp_path / TEST_MODEL_KERAS_FILE)
# Un-quantized TensorFlow Lite model (fp32)
- save_tflite_model(
- convert_to_tflite(keras_model, quantized=False),
- tmp_path / TEST_MODEL_TFLITE_FP32_FILE,
+ convert_to_tflite(
+ keras_model, quantized=False, output_path=tmp_path / TEST_MODEL_TFLITE_FP32_FILE
)
# Quantized TensorFlow Lite model (int8)
- tflite_model = convert_to_tflite(keras_model, quantized=True)
tflite_model_path = tmp_path / TEST_MODEL_TFLITE_INT8_FILE
- save_tflite_model(tflite_model, tflite_model_path)
+ convert_to_tflite(keras_model, quantized=True, output_path=tflite_model_path)
# Vela-optimized TensorFlow Lite model (int8)
tflite_vela_model = tmp_path / TEST_MODEL_TFLITE_VELA_FILE