#deep-learning #tensorflow2.0 #point-clouds #semantic-segmentation
Вопрос:
Всем привет, я создаю модель с tensorflow 2 для классификации облаков точек; базовая модель-это poinnet, введите описание ссылки здесь, но я получаю эту ошибку: Ввод 0 слоя dense_261 несовместим со слоем: : ожидаемый min_ndim=2, найден ndim=1. Полная форма получена: (32,)
моя модель:
pt_cloud = Input(shape = (None,3) , dtype=tf.float32, name='pt_cloud')
pt_cloud_transform = TNet(bn_momentum=0.99)(pt_cloud)
pt_cloud_transform = tf.expand_dims(pt_cloud_transform, axis=2)
# for weight-sharing of conv
hidden_64=Conv2D(64, (1, 1), activation=tf.nn.relu)(pt_cloud_transform)
hidden_64=BatchNormalization()(hidden_64)
embed_64=Conv2D(64, (1, 1), activation=tf.nn.relu)(hidden_64)
embed_64=BatchNormalization()(embed_64)
embed_64 = tf.squeeze(embed_64, axis=2)
# Feature transformer (B x N x 64 -> B x N x 64)
embed_64_transform = TNet(bn_momentum=0.99, add_regularization=True)(embed_64)
# Embed to 1024-dim space (B x N x 64 -> B x N x 1024)
embed_64_transform = tf.expand_dims(embed_64_transform, axis=2)
hidden_64=Conv2D(64, (1, 1), activation=tf.nn.relu)(embed_64_transform)
hidden_64 = BatchNormalization()(hidden_64)
hidden_128 = Conv2D(128,(1,1),activation=tf.nn.relu)(hidden_64)
hidden_128=BatchNormalization()(hidden_128)
embed_1024=Conv2D(1024,(1,1),activation=tf.nn.relu)(hidden_128)
embed_1024=BatchNormalization()(embed_1024)
# maxpoolling = MaxPool2D((2,2),padding="same")(embed_1024)
# Global feature vector (B x N x 1024 -> B x 1024)
# global_descriptor = tf.reduce_max(embed_1024, axis=1)
# Fully Connected layers to output k scores (B x 1024 -> B x 5)
#
#
# maxpoolling=Flatten()(maxpoolling)
# GlobalMaxPooling1D
#
#
global_descriptor = tf.reduce_max(embed_1024, axis=1)
hidden_512=Dense(512,activation=tf.nn.relu)(global_descriptor)
hidden_512=BatchNormalization()(hidden_512)
hidden_512=Dropout(rate = 0.2)(hidden_512)
hidden_256=Dense(256,activation=tf.nn.relu)(hidden_512)
hidden_256=BatchNormalization()(hidden_256)
hidden_256=Dropout(rate = 0.2)(hidden_256)
logits=Dense(5,activation="softmax")(hidden_256)
model = Model(inputs=pt_cloud, outputs=logits)
компилятор моей модели:
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
Класс Tnet:
class TNet(Layer):
def __init__(self, add_regularization=False, bn_momentum=0.99, **kwargs):
super(TNet, self).__init__(**kwargs)
self.add_regularization = add_regularization
self.bn_momentum = bn_momentum
self.conv0 = CustomConv(64, (1, 1), strides=(1, 1), bn_momentum=bn_momentum)
self.conv1 = CustomConv(128, (1, 1), strides=(1, 1), bn_momentum=bn_momentum)
self.conv2 = CustomConv(1024, (1, 1), strides=(1, 1), bn_momentum=bn_momentum)
self.fc0 = CustomDense(512, activation=tf.nn.relu, apply_bn=True, bn_momentum=bn_momentum)
self.fc1 = CustomDense(256, activation=tf.nn.relu, apply_bn=True, bn_momentum=bn_momentum)
def build(self, input_shape):
self.K = input_shape[-1]
self.w = self.add_weight(shape=(256, self.K**2), initializer=tf.zeros_initializer,
trainable=True, name='w')
self.b = self.add_weight(shape=(self.K, self.K), initializer=tf.zeros_initializer,
trainable=True, name='b')
# Initialize bias with identity
I = tf.constant(np.eye(self.K), dtype=tf.float32)
self.b = tf.math.add(self.b, I)
def call(self, x, training=None):
input_x = x # BxNxK
# Embed to higher dim for convoultion chanel
x = tf.expand_dims(input_x, axis=2) # BxNx1xK
x = self.conv0(x, training=training)
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = tf.squeeze(x, axis=2) # BxNx1024
# Global features
x = tf.reduce_max(x, axis=1) # Bx1024
# Fully-connected layers
x = self.fc0(x, training=training) # Bx512
x = self.fc1(x, training=training) # Bx256
# Convert to KxK matrix to matmul with input
x = tf.expand_dims(x, axis=1) # Bx1x256
x = tf.matmul(x, self.w) # Bx1xK^2
x = tf.squeeze(x, axis=1)
x = tf.reshape(x, (-1, self.K, self.K))
# Add bias term (initialized to identity matrix)
x = self.b
# Add regularization
if self.add_regularization:
eye = tf.constant(np.eye(self.K), dtype=tf.float32)
x_xT = tf.matmul(x, tf.transpose(x, perm=[0, 2, 1]))
reg_loss = tf.nn.l2_loss(eye - x_xT)
self.add_loss(1e-3 * reg_loss)
return tf.matmul(input_x, x)
def get_config(self):
config = super(TNet, self).get_config()
config.update({
'add_regularization': self.add_regularization,
'bn_momentum': self.bn_momentum})
@classmethod
def from_config(cls, config):
return cls(**config)
and my input data is 500 points with 4 feature(x,y,z,intensity)
x_train shape is: (335,4)
x_train:
array([[681.96111796, 17.79377277, 34.9556893 , 347. ],
[884.93214232, 110.16111602, 99.6468126 , 191. ],
[424.62872855, 253.53727143, 37.58702972, 194. ],
...,
[839.09216552, 131.29408834, 98.34026563, 207. ],
[473.81817483, 500.62097006, 81.01524894, 349. ],
[591.4253249 , 993.33877686, 5.35387471, 205. ]])
и y поезд-это ярлыки
r = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=50)