aboutsummaryrefslogtreecommitdiff
path: root/src/mlia/nn/rewrite/core/utils/numpy_tfrecord.py
blob: ac3e875cd0c325175b06a7412dd9a1e6da41fdb4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import random
import tempfile
from collections import defaultdict

import numpy as np

from mlia.nn.rewrite.core.utils.utils import load
from mlia.nn.rewrite.core.utils.utils import save

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf

tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

from tensorflow.lite.python import interpreter as interpreter_wrapper


def make_decode_fn(filename):
    def decode_fn(record_bytes, type_map):
        parse_dict = {
            name: tf.io.FixedLenFeature([], tf.string) for name in type_map.keys()
        }
        example = tf.io.parse_single_example(record_bytes, parse_dict)
        features = {
            n: tf.io.parse_tensor(example[n], tf.as_dtype(t))
            for n, t in type_map.items()
        }
        return features

    meta_filename = filename + ".meta"
    with open(meta_filename) as f:
        type_map = json.load(f)["type_map"]
    return lambda record_bytes: decode_fn(record_bytes, type_map)


def NumpyTFReader(filename):
    decode_fn = make_decode_fn(filename)
    dataset = tf.data.TFRecordDataset(filename)
    return dataset.map(decode_fn)


def numpytf_count(filename):
    meta_filename = filename + ".meta"
    with open(meta_filename) as f:
        return json.load(f)["count"]


class NumpyTFWriter:
    def __init__(self, filename):
        self.filename = filename
        self.meta_filename = filename + ".meta"
        self.writer = tf.io.TFRecordWriter(filename)
        self.type_map = {}
        self.count = 0

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.close()

    def __del__(self):
        self.close()

    def write(self, array_dict):
        type_map = {n: str(a.dtype.name) for n, a in array_dict.items()}
        self.type_map.update(type_map)
        self.count += 1

        feature = {
            n: tf.train.Feature(
                bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(a).numpy()])
            )
            for n, a in array_dict.items()
        }
        example = tf.train.Example(features=tf.train.Features(feature=feature))
        self.writer.write(example.SerializeToString())

    def close(self):
        with open(self.meta_filename, "w") as f:
            meta = {"type_map": self.type_map, "count": self.count}
            json.dump(meta, f)
        self.writer.close()


class TFLiteModel:
    def __init__(self, filename, batch_size=None, num_threads=None):
        if num_threads == 0:
            num_threads = None
        if batch_size == None:
            self.interpreter = interpreter_wrapper.Interpreter(
                model_path=filename, num_threads=num_threads
            )
        else:  # if a batch size is specified, modify the TFLite model to use this size
            with tempfile.TemporaryDirectory() as tmp:
                fb = load(filename)
                for sg in fb.subgraphs:
                    for t in list(sg.inputs) + list(sg.outputs):
                        sg.tensors[t].shape = np.array(
                            [batch_size] + list(sg.tensors[t].shape[1:]), dtype=np.int32
                        )
                tempname = os.path.join(tmp, "rewrite_tmp.tflite")
                save(fb, tempname)
                self.interpreter = interpreter_wrapper.Interpreter(
                    model_path=tempname, num_threads=num_threads
                )

        try:
            self.interpreter.allocate_tensors()
        except RuntimeError:
            self.interpreter = interpreter_wrapper.Interpreter(
                model_path=filename, num_threads=num_threads
            )
            self.interpreter.allocate_tensors()

        # Get input and output tensors.
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()
        details = list(self.input_details) + list(self.output_details)
        self.handle_from_name = {d["name"]: d["index"] for d in details}
        self.shape_from_name = {d["name"]: d["shape"] for d in details}
        self.batch_size = next(iter(self.shape_from_name.values()))[0]

    def __call__(self, named_input):
        """Execute the model on one or a batch of named inputs (a dict of name: numpy array)"""
        input_len = next(iter(named_input.values())).shape[0]
        full_steps = input_len // self.batch_size
        remainder = input_len % self.batch_size

        named_ys = defaultdict(list)
        for i in range(full_steps):
            for name, x_batch in named_input.items():
                x = x_batch[i : i + self.batch_size]
                self.interpreter.set_tensor(self.handle_from_name[name], x)
            self.interpreter.invoke()
            for d in self.output_details:
                named_ys[d["name"]].append(self.interpreter.get_tensor(d["index"]))
        if remainder:
            for name, x_batch in named_input.items():
                x = np.zeros(self.shape_from_name[name]).astype(x_batch.dtype)
                x[:remainder] = x_batch[-remainder:]
                self.interpreter.set_tensor(self.handle_from_name[name], x)
            self.interpreter.invoke()
            for d in self.output_details:
                named_ys[d["name"]].append(
                    self.interpreter.get_tensor(d["index"])[:remainder]
                )
        return {k: np.concatenate(v) for k, v in named_ys.items()}

    def input_tensors(self):
        return [d["name"] for d in self.input_details]

    def output_tensors(self):
        return [d["name"] for d in self.output_details]


def sample_tfrec(input_file, k, output_file):
    total = numpytf_count(input_file)
    next = sorted(random.sample(range(total), k=k), reverse=True)

    reader = NumpyTFReader(input_file)
    with NumpyTFWriter(output_file) as writer:
        for i, data in enumerate(reader):
            if i == next[-1]:
                next.pop()
                writer.write(data)
                if not next:
                    break