#python-3.x #artificial-intelligence #conv-neural-network
#python-3.x #искусственный интеллект #conv-нейронная сеть
Вопрос:
Предлагаемый метод может автоматически обнаруживать особенности гиперспектральных изображений при условии, определенном алгоритмами, и достигать правильных и быстрых результатов распознавания.
Здесь я пытался запустить распознавание лиц с использованием метода CNN, но затем я получил сообщение об ошибке, как показано ниже —
**
File "<ipython-input-6-fdb29ac830ce>", line 1, in <module>
runfile('C:/Users/MDIC/Desktop/Face Recognition With CNN.py', wdir='C:/Users/MDIC/Desktop')
File "C:Anaconda3libsite-packagesspyder_kernelscustomizespydercustomize.py", line 786, in runfile
execfile(filename, namespace)
File "C:Anaconda3libsite-packagesspyder_kernelscustomizespydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/MDIC/Desktop/Face Recognition With CNN.py", line 221, in <module>
plt.plot(epochs, val_acc)
File "C:Anaconda3libsite-packagesmatplotlibpyplot.py", line 2811, in plot
is not None else {}), **kwargs)
File "C:Anaconda3libsite-packagesmatplotlib__init__.py", line 1810, in inner
return func(ax, *args, **kwargs)
File "C:Anaconda3libsite-packagesmatplotlibaxes_axes.py", line 1611, in plot
for line in self._get_lines(*args, **kwargs):
File "C:Anaconda3libsite-packagesmatplotlibaxes_base.py", line 393, in _grab_next_args
yield from self._plot_args(this, kwargs)
File "C:Anaconda3libsite-packagesmatplotlibaxes_base.py", line 370, in _plot_args
x, y = self._xy_from_xy(x, y)
File "C:Anaconda3libsite-packagesmatplotlibaxes_base.py", line 231, in _xy_from_xy
"have shapes {} and {}".format(x.shape, y.shape))
ValueError: x and y must have same first dimension, but have shapes (2,) and (1,)
**
Это мое кодирование —
# Importing libraries
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.image as mpimg
import numpy as np
import os
# Preparing dataset
# Setting names of the directies for both sets
base_dir = 'data'
seta ='Man_One'
setb ='Man_Two'
# Each of the sets has three sub directories train, validation and test
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
def prepare_data(base_dir, seta, setb):
# Take the directory names for the base directory and both the sets
# Returns the paths for train, validation for each of the sets
seta_train_dir = os.path.join(train_dir, seta)
setb_train_dir = os.path.join(train_dir, setb)
seta_valid_dir = os.path.join(validation_dir, seta)
setb_valid_dir = os.path.join(validation_dir, setb)
seta_train_fnames = os.listdir(seta_train_dir)
setb_train_fnames = os.listdir(setb_train_dir)
return seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames
seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames = prepare_data(base_dir, seta, setb)
seta_test_dir = os.path.join(test_dir, seta)
setb_test_dir = os.path.join(test_dir, setb)
test_fnames_seta = os.listdir(seta_test_dir)
test_fnames_setb = os.listdir(setb_test_dir)
datagen = ImageDataGenerator(
height_shift_range = 0.2,
width_shift_range = 0.2,
rotation_range = 40,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest')
img_path = os.path.join(seta_train_dir, seta_train_fnames[3])
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) x.shape)
i = 0
for batch in datagen.flow(x, batch_size = 1):
plt.figure(i)
imgplot = plt.imshow(array_to_img(batch[0]))
i = 1
if i % 5 == 0:
break
# Convolutional Neural Network model
# Import TensorFlow libraries
from tensorflow.keras import layers
from tensorflow.keras import Model
img_input = layers.Input(shape = (150, 150, 3))
# 2D Convolution layer with 64 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(64, 3, activation = 'relu')(img_input)
# 2D max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 128 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(128, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 256 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(256, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(512, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(512, 3, activation = 'relu')(x)
# Flatten layer
x = layers.Flatten()(x)
# Fully connected layers and ReLU activation algorithm
x = layers.Dense(1024, activation = 'relu')(x)
x = layers.Dense(1024, activation = 'relu')(x)
x = layers.Dense(1000, activation = 'relu')(x)
# Dropout layers for optimisation
x = layers.Dropout(0.5)(x)
# Fully connected layers and sigmoid activation algorithm
output = layers.Dense(1, activation = 'sigmoid')(x)
model = Model(img_input, output)
model.summary()
import tensorflow as tf
# Using binary_crossentropy as the loss function and
# Adam optimizer as the optimizing function when training
model.compile(loss = 'binary_crossentropy',
optimizer = tf.optimizers.Adam(learning_rate = 0.0005),
metrics = ['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
# 4x4 grid
ncols = 5
nrows = 5
pic_index = 0
# Set up matpotlib fig and size it to fit 5x5 pics
fig = plt.gcf()
fig.set_size_inches(ncols * 5, nrows * 5)
pic_index = 10
next_seta_pix = [os.path.join(seta_train_dir, fname)
for fname in seta_train_fnames[pic_index - 10:pic_index]]
next_setb_pix = [os.path.join(setb_train_dir, fname)
for fname in setb_train_fnames[pic_index - 10:pic_index]]
for i, img_path in enumerate(next_seta_pix next_setb_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i 1)
sp.axis('Off')
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
# Train the model
mymodel = model.fit_generator(
train_generator,
steps_per_epoch = 10,
epochs = 80,
validation_data = validation_generator,
validation_steps = 7,
verbose = 2)
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
successive_outputs = [layer.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)
a_img_files = [os.path.join(seta_train_dir, f) for f in seta_train_fnames]
b_img_files = [os.path.join(setb_train_dir, f) for f in setb_train_fnames]
img_path = random.choice(a_img_files b_img_files)
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) x.shape)
x /= 255
successive_feature_maps = visualization_model.predict(x)
layer_names = [layer.name for layer in model.layers]
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv/maxpool layers
n_features = feature_map.shape[-1]
# The feature map has shape(1, size, size, n_features)
size = feature_map.shape[1]
# Will tile images in this matrix
display_grid = np.zeros((size, size * n_features))
for i in range(n_features):
# Postprocess the feature
x = feature_map[0, :, :, i]
x -= x.mean()
x *= 64
x = 128
x = np.clip(x, 0, 255).astype('float32')
# Will tile each filter into this big horizontal grid
display_grid[:, i * size : (i 1) * size] = x
# Accuracy results for each training and validation epoch
acc = mymodel.history['acc']
val_acc = mymodel.history['val_acc']
# Loss results for each training and validation epoch
loss = mymodel.history['loss']
val_loss = mymodel.history['val_loss']
epochs = range(len(acc))
# Plot accuracy for each training and validation epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.legend(['train', 'val'], loc='center')
plt.figure()
# Plot loss for each training and validation epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
plt.legend(['train', 'val'], loc='center')
plt.figure()
# Testing model on a random train image from set a
train_img = random.choice(seta_train_fnames)
train_image_path = os.path.join(seta_train_dir, train_img)
train_img = load_img(train_image_path, target_size = (150, 150))
plt.figure()
plt.imshow(train_img)
train_img = (np.expand_dims(train_img, 0))
train_img = tf.cast(train_img, tf.float32)
print(train_img.shape)
model.predict(train_img)
# Testing model on a random train image from set b
train_img = random.choice(setb_train_fnames)
train_image_path = os.path.join(setb_train_dir, train_img)
train_img = load_img(train_image_path, target_size = (150, 150))
plt.figure()
plt.imshow(train_img)
train_img = (np.expand_dims(train_img, 0))
train_img = tf.cast(train_img, tf.float32)
print(train_img.shape)
model.predict(train_img)
# Testing a random image from the test set a
cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_seta:
if fname.startswith('.'):
continue
file_path = os.path.join(seta_test_dir, fname)
load_file = load_img(file_path, target_size = (150, 150))
load_file = (np.expand_dims(load_file, 0))
load_file = tf.cast(load_file, tf.float32)
pred_img = model.predict(load_file)
if(pred_img[0]<0.5):
cal_mo =1
elif(pred_img[0]>0.5):
cal_mt =1
else:
print(pred_img[0], "n")
cal_unconclusive =1
alist.append(file_path)
print(alist)
print("Identified as: n")
print("Man_One:", cal_mo)
print("Man_Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mo/(cal_mo cal_mt cal_unconclusive)) * 100)
a = (cal_mo/(cal_mo cal_mt cal_unconclusive)) * 100
# Testing a random image from the test set b
cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_setb:
if fname.startswith('.'):
continue
file_path = os.path.join(setb_test_dir, fname)
load_file = load_img(file_path, target_size = (150, 150))
load_file = (np.expand_dims(load_file, 0))
load_file = tf.cast(load_file, tf.float32)
pred_img = model.predict(load_file)
if(pred_img[0]<0.5):
cal_mo =1
elif(pred_img[0]>0.5):
cal_mt =1
else:
print(pred_img[0], "n")
cal_unconclusive =1
alist.append(file_path)
print(alist)
print("Identified as: n")
print("Man_One:", cal_mo)
print("Man_Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mt/(cal_mo cal_mt cal_unconclusive)) * 100)
b = (cal_mt/(cal_mo cal_mt cal_unconclusive)) * 100
avg = (a b)/2
print("Average Percentage:", avg)
Пожалуйста, внимательно посмотрите на приведенное выше программирование, так как оно немного длинное
Пожалуйста, помогите мне как можно скорее
Большое вам спасибо
Ответ №1:
Возможно, что ваши данные, сгенерированные вашей проверкой, завершаются до достижения 80 эпох обучения. Убедитесь, что у вас есть хотя бы 7*80
изображения проверки.
Затем проверьте количество элементов в вашем : mymodel.history['val_acc']
. Оно должно быть одинаковым для обучения и проверки, если вы используете в epochs = range(len(acc))
качестве значений x для графиков. Проблема в том, что ваш acc
и val_acc
имеют разное количество элементов.
Комментарии:
1. Мои данные приведены ниже — [ Обучающие выборки = 100 для Man_One и обучающие выборки = 100 для Man_Two ] [Проверочные выборки = 20 для Man_One и проверочные выборки = 20 для Man_Two ] [Тестовые выборки = 30 для Man_One и тестовые выборки = 30 для Man_Two ] Не уверен, какая часть неверна ————-
2. Год назад программирование прошло нормально, никаких проблем и даже было включено в мою магистерскую диссертацию — но сегодня внезапно появилась эта ошибка
3. Если я хочу поместить обучающие выборки = 100 и поместить проверочные выборки = 30, то как написать правильный программный код для приведенного выше -пожалуйста, помогите —
4. Если у вас есть только 200 выборок для обучения и 40 для проверки, и у вас есть batch_size 20, тогда вам следует установить для вашего параметра steps_per_epoch значение <= 10 и установить для вашего параметра validation_steps значение <= 2. Попробуйте с 10 и 2 и посмотрите, работает ли это
5. Не могли бы вы, пожалуйста, написать кодировку или инструкцию, основанную на вашей идее, чтобы это было полезно для понимания