Ошибка значения: входные данные Python несовместимы с input_signature:

#python #tensorflow #keras #tensorflow2.0 #tensorflow-lite

#python #tensorflow #keras #tensorflow2.0 #tensorflow-lite

Вопрос:

Системная информация

  • Платформа и дистрибутив ОС: CentOS Linux выпуска 7.7.1908 -версия TensorFlow: 2.3.0

Я следую этому примеру:https://www.tensorflow.org/tutorials/text/image_captioning?hl=en

Он работает так, как и должен быть, и сохраняет контрольные точки, и теперь я хочу преобразовать это в модель TF Lite.

Вот ссылка на полный код преобразования:https://colab.research.google.com/drive/1GJkGcwWvDAWMooTsECzuSRUSPbirADhb?usp=sharing

Вот ссылка на полный код поезда:https://colab.research.google.com/drive/1X2d9WW1EMEzN8Rgva3rtjevP0T_jFccj?usp=sharing

Я также следую isssue #32999

Вот что я запускаю, чтобы сохранить и преобразовать график вывода:

 @tf.function
def evaluate(image):
    hidden = decoder.reset_states(batch_size=1)

    temp_input = tf.expand_dims(load_image(image)[0], 0)
    img_tensor_val = image_features_extract_model(temp_input)
    img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))

    features = encoder(img_tensor_val)

    dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
    result = []

    for i in range(max_length):
        predictions, hidden, attention_weights = decoder(dec_input, features, hidden)

        predicted_id = tf.random.categorical(predictions, 1)[0][0]
        # print(tokenizer.index_word)
        print(predicted_id,predicted_id.dtype)

        # for key,value in tokenizer.index_word.items():
        #     key = tf.convert_to_tensor(key)
        #     tf.dtypes.cast(key,tf.int64)
        #     print(key)

        # print(tokenizer.index_word)

        result.append(predicted_id)

        # if tokenizer.index_word[predicted_id] == '<end>':
        #     return result

        dec_input = tf.expand_dims([predicted_id], 0)

    return result

export_dir = "./"
tflite_enc_input = ''
ckpt.f = evaluate
to_save = evaluate.get_concrete_function('')

converter = tf.lite.TFLiteConverter.from_concrete_functions([to_save])
tflite_model = converter.convert()
  

но я получаю эту ошибку

 ValueError: in user code:

    convert2savedmodel.py:310 evaluate  *
        predictions, hidden, attention_weights = decoder(dec_input, features, hidden)
    /share/nishome/19930072_0/miniconda3/envs/tf2.3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py:985 __call__  **
        outputs = call_fn(inputs, *args, **kwargs)
    /share/nishome/19930072_0/miniconda3/envs/tf2.3/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py:780 __call__
        result = self._call(*args, **kwds)
    /share/nishome/19930072_0/miniconda3/envs/tf2.3/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py:840 _call
        return self._stateless_fn(*args, **kwds)
    /share/nishome/19930072_0/miniconda3/envs/tf2.3/lib/python3.7/site-packages/tensorflow/python/eager/function.py:2828 __call__
        graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
    /share/nishome/19930072_0/miniconda3/envs/tf2.3/lib/python3.7/site-packages/tensorflow/python/eager/function.py:3171 _maybe_define_function
        *args, **kwargs)
    /share/nishome/19930072_0/miniconda3/envs/tf2.3/lib/python3.7/site-packages/tensorflow/python/eager/function.py:2622 canonicalize_function_inputs
        self._flat_input_signature)
    /share/nishome/19930072_0/miniconda3/envs/tf2.3/lib/python3.7/site-packages/tensorflow/python/eager/function.py:2713 _convert_inputs_to_signature
        format_error_message(inputs, input_signature))

    ValueError: Python inputs incompatible with input_signature:
      inputs: (
        Tensor("ExpandDims_1:0", shape=(1, 1), dtype=int32),
        Tensor("cnn__encoder/StatefulPartitionedCall:0", shape=(1, 64, 256), dtype=float32),
        Tensor("zeros:0", shape=(1, 512), dtype=float32))
      input_signature: (
        TensorSpec(shape=(1, 1), dtype=tf.int64, name=None),
        TensorSpec(shape=(1, 64, 256), dtype=tf.float32, name=None),
        TensorSpec(shape=(1, 512), dtype=tf.float32, name=None))

  

Модель кодировщика:

 class CNN_Encoder(tf.keras.Model):
    def __init__(self, embedding):
        super(CNN_Encoder, self).__init__()
        # shape after fc == (batch_size, 64, embedding_dim)
        self.fc = tf.keras.layers.Dense(embedding_dim)

    @tf.function(input_signature=[tf.TensorSpec(shape=(1, 64, features_shape),dtype=tf.dtypes.float32)])
    def call(self, x):
        x = self.fc(x)
        x = tf.nn.relu(x)
        return x
  

Модель декодера:

 class RNN_Decoder(tf.keras.Model):
    def __init__(self, embedding_dim, units, vocab_size):
        super(RNN_Decoder, self).__init__()
        self.units = units

        self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
        self.gru = tf.keras.layers.GRU(self.units,
                                       return_sequences=True,
                                       return_state=True,
                                       recurrent_initializer='glorot_uniform',
                                       unroll = True)
        self.fc1 = tf.keras.layers.Dense(self.units)
        self.fc2 = tf.keras.layers.Dense(vocab_size)

        self.attention = BahdanauAttention(self.units)


    @tf.function(input_signature=[tf.TensorSpec(shape=[1, 1], dtype=tf.int64),
                                  tf.TensorSpec(shape=[1, 64, 256], dtype=tf.float32),
                                  tf.TensorSpec(shape=[1, 512], dtype=tf.float32)])
    def call(self, x , features, hidden):

        context_vector, attention_weights = self.attention(features, hidden)

        #x shape after passing through embedding == (batch_size, 1, embedding_dim)
        x = self.embedding(x)

        #x shape after concatenation == (batch_size, 1, embedding_dim   hidden_size)
        x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)


        output, state = self.gru(x)

        #shape == (batch_size, max_length, hidden_size)
        x = self.fc1(output)

        #x shape == (batch_size, max_length, hidden_size)
        x = tf.reshape(x, (-1, x.shape[2]))

        # output shape == (batch_size * max_length, vocab)
        x = self.fc2(x)

        return x, state, attention_weights

    def reset_states(self, batch_size):
        return tf.zeros((batch_size, self.units))
  

Я просто меняю tf.function на int32, как показано ниже:

@tf.function(input_signature=[tf.TensorSpec(shape=[1, 1], dtype=tf.int32), tf.TensorSpec(shape=[1, 64,256], dtype=tf.float32), tf.TensorSpec(shape=[1, 512], dtype=tf.float32)])

но появилась другая ошибка:

Ошибка значения: входные данные Python несовместимы с input_signature:

 Tensor("ExpandDims_2:0", shape=(1, 1), dtype=int64),
Tensor("cnn__encoder/StatefulPartitionedCall:0", shape=(1, 64, 256), dtype=float32),
Tensor("rnn__decoder/StatefulPartitionedCall:1", shape=(1, 512), dtype=float32))
input_signature: (
TensorSpec(shape=(1, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(1, 64, 256), dtype=tf.float32, name=None),
TensorSpec(shape=(1, 512), dtype=tf.float32, name=None))```
Why the dtypes of inputs change from int64 to int32?