Как решить эту ошибку ValueError: размерность 1 в обеих формах должна быть равной проблемой?

#python #keras #tensorflow2.0 #epoch #valueerror

#python #keras #тензорный поток 2.0 #эпоха #ошибка valueerror

Вопрос:

Изменение размера набора данных

 import cv2
from PIL import Image
import numpy as np
import h5py

#threshold function is disabled when resizing input images
path = "masks_train1/"

for i in range(1,220):
    dim = (256, 256) #(w,h)
    image = cv2.imread(path   str(i)   ".jpg", 0)
    resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
    (thresh, im_bw) = cv2.threshold(resized, 128, 255, cv2.THRESH_BINARY)
    cv2.imwrite('mask_train/'   str(i)   '.png', im_bw)


images = []
masks = []
for i in range(1, 220):
    img = Image.open("masks_train1/"   str(i)   ".jpg")
    arr = np.array(img)
    images.append(arr)
    img = Image.open("mask_train/"   str(i)   ".png")
    arr = np.array(img)
    arr = np.expand_dims(arr, -1)
    masks.append(arr)


images = np.array(images)
masks = np.array(masks)
masks.shape
 

выходной сигнал: (219, 256, 256, 1)

 with h5py.File("Dataset_train1.h5", 'w') as hdf:
    hdf.create_dataset('images', data=images, compression='gzip', compression_opts=9)
    hdf.create_dataset('masks', data=masks, compression='gzip', compression_opts=9)
 

Обучающий и тестовый файл Unet

 import numpy as np 
import os
import cv2

from keras.models import *
from keras.layers import *
from keras.optimizers import *

from keras import backend as keras
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import TensorBoard
import matplotlib.pyplot as plt

def unet(input_size = (256, 256, 3)):
    inputs = Input(input_size)
    conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', 
                                               kernel_initializer = 'he_normal'
                                               )(inputs)
    
    conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', 
                                               kernel_initializer = 'he_normal'
                                               )(conv1)
    
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    
    conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', 
                                                kernel_initializer = 'he_normal'
                                                )(pool1)
    
    conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', 
                                                kernel_initializer = 'he_normal'
                                                )(conv2)
    
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    
    conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', 
                                                kernel_initializer = 'he_normal'
                                                )(pool2)
    
    conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same',
                                                kernel_initializer = 'he_normal'
                                                )(conv3)
    
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    
    conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', 
                                                kernel_initializer = 'he_normal'
                                                )(pool3)
    
    conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', 
                                                kernel_initializer = 'he_normal'
                                                )(conv4)
    
    drop4 = Dropout(0.5)(conv4)
    
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', 
                                                 kernel_initializer = 'he_normal'
                                                 )(pool4)
    
    conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', 
                                                 kernel_initializer = 'he_normal'
                                                 )(conv5)
    
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', 
                                              kernel_initializer = 'he_normal'
                                              )(UpSampling2D(size = (2,2))(drop5))
    
    merge6 = concatenate([drop4,up6])
    
    conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same',
                                                kernel_initializer = 'he_normal'
                                                )(merge6)
    
    conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same',
                                                kernel_initializer = 'he_normal'
                                                )(conv6)

    up7 = Conv2D(256, 2, activation = 'relu', padding = 'same',
                                              kernel_initializer = 'he_normal'
                                              )(UpSampling2D(size = (2,2))(conv6))
    
    merge7 = concatenate([conv3,up7])
    
    conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same',
                                                kernel_initializer = 'he_normal'
                                                )(merge7)
    
    conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'
                                                )(conv7)

    
    up8 = Conv2D(128, 2, activation = 'relu', padding = 'same',
                                              kernel_initializer = 'he_normal'
                                              )(UpSampling2D(size = (2,2))(conv7))
    
    merge8 = concatenate([conv2,up8])
    
    conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same',
                                                kernel_initializer = 'he_normal'
                                                )(merge8)
    
    conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same',
                                                kernel_initializer = 'he_normal'
                                                )(conv8)

    up9 = Conv2D(64, 2, activation = 'relu', padding = 'same',
                                             kernel_initializer = 'he_normal'
                                             )(UpSampling2D(size = (2,2))(conv8))
    
    merge9 = concatenate([conv1,up9])
    
    conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same',
                                               kernel_initializer = 'he_normal'
                                               )(merge9)
    
    conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same',
                                               kernel_initializer = 'he_normal'
                                               )(conv9)
    
    conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same',
                                              kernel_initializer = 'he_normal'
                                              )(conv9)
    
    conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)

    model = Model(inputs = inputs, outputs = conv10)

    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])
    
    
    return model


import h5py

print('*'*30)
print('Loading and preprocessing train data...')
print('*'*30)
file = h5py.File('Dataset_train1.h5', 'r')
imgs_train = file.get('images')
imgs_mask_train = file.get('masks')
imgs_train = np.array(imgs_train)
imgs_mask_train = np.array(imgs_mask_train)

imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train)  # mean for data centering
std = np.std(imgs_train)  # std for data normalization

imgs_train -= mean
imgs_train /= std

imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255  # scale masks to [0, 1]


print('*'*30)
print('Creating and compiling model...')
print('*'*30)
model = unet()
model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
tensorboard = TensorBoard(log_dir='tensorboard/', write_graph=True, write_images=True)


model.summary()
Model: "functional_7"
 

Общее количество параметров: 31 032 837
Обучаемые параметры: 31 032 837
Не поддающиеся обучению параметры: 0


 print('*'*30)
print('Fitting model...')
print('*'*30)
history =  model.fit(imgs_train, imgs_mask_train, batch_size=15, epochs=30, verbose=2, shuffle=True,
          validation_split=0.2,
          callbacks=[model_checkpoint, tensorboard])


**output problem**
******************************
Fitting model...
******************************
Epoch 1/30
WARNING:tensorflow:Model was constructed with shape (None, 256, 256, 3) for input Tensor("input_5:0", shape=(None, 256, 256, 3), dtype=float32), but it was called on an input with incompatible shape (None, 600, 1200, 3).
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-22-69f1645fb376> in <module>
      2 print('Fitting model...')
      3 print('*'*30)
----> 4 history =  model.fit(imgs_train, imgs_mask_train, batch_size=15, epochs=30, verbose=2, shuffle=True,
      5           validation_split=0.2,
      6           callbacks=[model_checkpoint, tensorboard])

~.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginetraining.py in _method_wrapper(self, *args, **kwargs)
    106   def _method_wrapper(self, *args, **kwargs):
    107     if not self._in_multi_worker_mode():  # pylint: disable=protected-access
--> 108       return method(self, *args, **kwargs)
    109 
    110     # Running inside `run_distribute_coordinator` already.

~.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginetraining.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1096                 batch_size=batch_size):
   1097               callbacks.on_train_batch_begin(step)
-> 1098               tmp_logs = train_function(iterator)
   1099               if data_handler.should_sync:
   1100                 context.async_wait()

~.condaenvstensorflowlibsite-packagestensorflowpythoneagerdef_function.py in __call__(self, *args, **kwds)
    778       else:
    779         compiler = "nonXla"
--> 780         result = self._call(*args, **kwds)
    781 
    782       new_tracing_count = self._get_tracing_count()

~.condaenvstensorflowlibsite-packagestensorflowpythoneagerdef_function.py in _call(self, *args, **kwds)
    812       # In this case we have not created variables on the first call. So we can
    813       # run the first trace but we should fail if variables are created.
--> 814       results = self._stateful_fn(*args, **kwds)
    815       if self._created_variables:
    816         raise ValueError("Creating variables on a non-first call to a function"

~.condaenvstensorflowlibsite-packagestensorflowpythoneagerfunction.py in __call__(self, *args, **kwargs)
   2826     """Calls a graph function specialized to the inputs."""
   2827     with self._lock:
-> 2828       graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
   2829     return graph_function._filtered_call(args, kwargs)  # pylint: disable=protected-access
   2830 

~.condaenvstensorflowlibsite-packagestensorflowpythoneagerfunction.py in _maybe_define_function(self, args, kwargs)
   3208           and self.input_signature is None
   3209           and call_context_key in self._function_cache.missed):
-> 3210         return self._define_function_with_shape_relaxation(args, kwargs)
   3211 
   3212       self._function_cache.missed.add(call_context_key)

~.condaenvstensorflowlibsite-packagestensorflowpythoneagerfunction.py in _define_function_with_shape_relaxation(self, args, kwargs)
   3139           expand_composites=True)
   3140 
-> 3141     graph_function = self._create_graph_function(
   3142         args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
   3143     self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function

~.condaenvstensorflowlibsite-packagestensorflowpythoneagerfunction.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
   3063     arg_names = base_arg_names   missing_arg_names
   3064     graph_function = ConcreteFunction(
-> 3065         func_graph_module.func_graph_from_py_func(
   3066             self._name,
   3067             self._python_function,

~.condaenvstensorflowlibsite-packagestensorflowpythonframeworkfunc_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
    984         _, original_func = tf_decorator.unwrap(python_func)
    985 
--> 986       func_outputs = python_func(*func_args, **func_kwargs)
    987 
    988       # invariant: `func_outputs` contains only Tensors, CompositeTensors,

~.condaenvstensorflowlibsite-packagestensorflowpythoneagerdef_function.py in wrapped_fn(*args, **kwds)
    598         # __wrapped__ allows AutoGraph to swap in a converted function. We give
    599         # the function a weak reference to itself to avoid a reference cycle.
--> 600         return weak_wrapped_fn().__wrapped__(*args, **kwds)
    601     weak_wrapped_fn = weakref.ref(wrapped_fn)
    602 

~.condaenvstensorflowlibsite-packagestensorflowpythonframeworkfunc_graph.py in wrapper(*args, **kwargs)
    971           except Exception as e:  # pylint:disable=broad-except
    972             if hasattr(e, "ag_error_metadata"):
--> 973               raise e.ag_error_metadata.to_exception(e)
    974             else:
    975               raise
 

ValueError: in user code:

     C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginetraining.py:806
 

train_function *
возвращает step_function(self, итератор)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginetraining.py:796
step_function **
outputs = model.distribute_strategy.run(run_step, аргументы=(данные,))
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythondistributedistribute_lib.py:1211
запустите
return self._extended.call_for_each_replica(fn, аргументы = аргументы, kwargs=kwargs)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythondistributedistribute_lib.py:2585
вызов_for_each_replica
возвращает self._call_for_each_replica(fn, аргументы, kwargs)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythondistributedistribute_lib.py:2945
_call_for_each_replica
возвращает fn(* аргументы, ** kwargs)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginetraining.py:789
run_step выполнить шаг **
outputs = model.train_step(данные)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginetraining.py:747
train_step
y_pred = self(x, training=True)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginebase_layer.py:985
вызов
outputs = call_fn(входные данные, * аргументы, ** kwargs)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginefunctional.py:385
вызов
возвращает self._run_internal_graph(
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginefunctional.py:508
_run_internal_graph
выводит = узел.layer(* args, ** kwargs)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasenginebase_layer.py:985
вызов
outputs = call_fn(входные данные, * аргументы, ** kwargs)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkeraslayersmerge.py:183
вызов
возвращает self._merge_function(входные данные)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkeraslayersmerge.py:522
_merge_function
возвращает K.конкатенация (входные данные, ось= self.axis)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonutildispatch.py:201
обертка
возвращает цель (* аргументы, ** kwargs)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonkerasbackend.py:2881
конкатенация
возвращает array_ops.concat([to_dense(x) для x в тензорах], ось)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonutildispatch.py:201
обертка
возвращает цель (* аргументы, ** kwargs)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonopsarray_ops.py:1654
concat
возвращает gen_array_ops.concat_v2(значения = значения, ось = ось, имя = имя)
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonopsgen_array_ops.py:1220
конкат_в2
_, _, _op, _outputs = _op_def_library._apply_op_helper(
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonframeworkop_def_library.py:742
_apply_op_helper
op = g._create_op_internal(op_type_name, входы, dtypes= Нет,
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonframeworkfunc_graph.py:591
_create_op_internal
возвращает super(FuncGraph, self)._create_op_internal( # pylint: отключить = защищенный доступ
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonframeworkops.py:3477
_create_op_internal
ret = Операция(
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonframeworkops.py:1974
инициализация
self._c_op = _create_c_op(self._graph, node_def, входы,
C:Userspeash.condaenvstensorflowlibsite-packagestensorflowpythonframeworkops.py:1815
_create_c_op
повысить ошибку ValueError(str(e))

     ValueError: Dimension 1 in both shapes must be equal, but are 75 and 74. Shapes are [?,75,150] and [?,74,150]. for '{{node
 

functional_7/concatenate_13/concat}} = ConcatV2[N = 2, T = DT_FLOAT,
Tidx=DT_INT32](functional_7/ dropout_8/ dropout/Mul_1,
functional_7/ conv2d_93/Relu, functional_7/concatenate_13/concat/axis)’
с помощью входных фигур: [?,75,150,512], [?,74,150,512], [] и с
вычисленными входными тензорами: input[2] = <3>.

попробуйте изменить форму обучающих данных, но все равно столкнулись с этой проблемой.