#python #tensorflow
#python #tensorflow
Вопрос:
Я пытаюсь создать пользовательскую метрику точности, как предложено в документах TensorFlow, отслеживая две переменные count и total .
В методе update_state() класса CustomAccuracy мне нужен batch_size для обновления переменной total . Поскольку модель batch_size
предназначена None
для ввода, я получаю 'ValueError: None values not supported.'
Вот пользовательский класс метрик, который я создал:
class CustomAccuracy(tf.keras.metrics.Metric):
def __init__(self, name = 'custom_accuracy', **kwargs):
super().__init__(name = name, **kwargs)
self.count = self.add_weight(name = 'count', initializer = 'zeros')
self.total = self.add_weight(name = 'total', initializer = 'zeros')
self.custom_accuracy = self.add_weight(name = 'custom_acc', initializer = 'zeros')
def update_state(self, y_true, y_pred, sample_weight = None):
correct_values = tf.reduce_sum(tf.cast(tf.argmax(y_pred, axis = 1) == tf.argmax(y_true, axis = 1), "float32"))
self.count.assign_add(correct_values)
self.total.assign_add(tf.constant(y_true.shape[0], dtype = "float32"))
self.custom_accuracy.assign(self.count / self.total)
def result(self):
return self.custom_accuracy
def reset_states(self):
self.count.assign(0.0)
self.total.assign(0.0)
self.custom_accuracy.assign(0.0)
Ошибка, которую я получаю:
Epoch 1/100
1/1 [==============================] - ETA: 0s - loss: 4.8930 - accuracy: 0.7344 - custom_accuracy: 1.0000
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-139-a286f110fac4> in <module>
4 batch_size = 192,
5 epochs = 100,
----> 6 validation_data = (val_data, val_labels),
7 )
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonkerasenginetraining.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonkerasenginetraining.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1131 workers=workers,
1132 use_multiprocessing=use_multiprocessing,
-> 1133 return_dict=True)
1134 val_logs = {'val_' name: val for name, val in val_logs.items()}
1135 epoch_logs.update(val_logs)
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonkerasenginetraining.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonkerasenginetraining.py in evaluate(self, x, y, batch_size, verbose, sample_weight, steps, callbacks, max_queue_size, workers, use_multiprocessing, return_dict)
1377 with trace.Trace('TraceContext', graph_type='test', step_num=step):
1378 callbacks.on_test_batch_begin(step)
-> 1379 tmp_logs = test_function(iterator)
1380 if data_handler.should_sync:
1381 context.async_wait()
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythoneagerdef_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythoneagerdef_function.py in _call(self, *args, **kwds)
812 # In this case we have not created variables on the first call. So we can
813 # run the first trace but we should fail if variables are created.
--> 814 results = self._stateful_fn(*args, **kwds)
815 if self._created_variables:
816 raise ValueError("Creating variables on a non-first call to a function"
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythoneagerfunction.py in __call__(self, *args, **kwargs)
2826 """Calls a graph function specialized to the inputs."""
2827 with self._lock:
-> 2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythoneagerfunction.py in _maybe_define_function(self, args, kwargs)
3208 and self.input_signature is None
3209 and call_context_key in self._function_cache.missed):
-> 3210 return self._define_function_with_shape_relaxation(args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythoneagerfunction.py in _define_function_with_shape_relaxation(self, args, kwargs)
3140
3141 graph_function = self._create_graph_function(
-> 3142 args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
3143 self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function
3144
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythoneagerfunction.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3073 arg_names=arg_names,
3074 override_flat_arg_shapes=override_flat_arg_shapes,
-> 3075 capture_by_value=self._capture_by_value),
3076 self._function_attributes,
3077 function_spec=self.function_spec,
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonframeworkfunc_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythoneagerdef_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonframeworkfunc_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonkerasenginetraining.py:1224 test_function *
return step_function(self, iterator)
<ipython-input-135-4207cf41498c>:12 update_state *
self.total.assign_add(tf.constant(y_true.shape[0], dtype = "float32"))
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonframeworkconstant_op.py:264 constant **
allow_broadcast=True)
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonframeworkconstant_op.py:282 _constant_impl
allow_broadcast=allow_broadcast))
c:usersaniketdocumentsaniketlearning-mlml_envlibsite-packagestensorflowpythonframeworktensor_util.py:444 make_tensor_proto
raise ValueError("None values not supported.")
ValueError: None values not supported.
Вот минимальный пример кода, который воспроизводит вышеупомянутую проблему:
import tensorflow as tf
import numpy as np
# Creating Custom Metric
class CustomAccuracy(tf.keras.metrics.Metric):
def __init__(self, name = 'custom_accuracy', **kwargs):
super().__init__(name = name, **kwargs)
self.count = self.add_weight(name = 'count', initializer = 'zeros')
self.total = self.add_weight(name = 'total', initializer = 'zeros')
self.custom_accuracy = self.add_weight(name = 'custom_acc', initializer = 'zeros')
def update_state(self, y_true, y_pred, sample_weight = None):
correct_values = tf.reduce_sum(tf.cast(tf.argmax(y_pred, axis = 1) == tf.argmax(y_true, axis = 1), "float32"))
self.count.assign_add(correct_values)
self.total.assign_add(tf.constant(y_true.shape[0], dtype = "float32"))
self.custom_accuracy.assign(self.count / self.total)
def result(self):
return self.custom_accuracy
def reset_states(self):
self.count.assign(0.0)
self.total.assign(0.0)
self.custom_accuracy.assign(0.0)
def create_model():
input1 = tf.keras.Input(shape=(13,))
hidden1 = tf.keras.layers.Dense(units = 12, activation='relu')(input1)
hidden2 = tf.keras.layers.Dense(units = 6, activation='relu')(hidden1)
output1 = tf.keras.layers.Dense(units = 2, activation='sigmoid')(hidden2)
model = tf.keras.models.Model(inputs = input1, outputs = output1, name= "functional1")
model.compile(optimizer='adam',
loss= 'binary_crossentropy',
metrics=['accuracy',CustomAccuracy()])
return model
model = create_model()
x1 = np.random.randint(0,10, size = (240,13))
y1 = np.random.randint(0,2, size = (240,2))
history = model.fit(
x = x1,
y = y1,
batch_size = 32,
epochs = 100,
validation_split = 0.2,
)
Все это работает, если я передаю run_eagerly = True
метод компиляции, но мне нужно решение без его использования.
Комментарии:
1. Возможно, это связано с тем, что
graph-mode
тензор передается при вызове.model.fit
вместоeager tensor
. Чтобы выполнить эту работу, как вы упомянули, нужно передатьrun_eagerly=True
метод компиляции или указатьmodel.run_eagerly = True
после компиляции.