Реализация LRP на переобученной модели vgg-16 для прогнозирования и классификации реальных и фальсифицированных изображений

#python #tensorflow #keras #vgg-net

Вопрос:

Мы пытаемся реализовать LRP на переобученной модели VGG-16

Краткое описание нашей модели

Краткое описание нашей модели

 class LayerwiseRelevancePropagation:
def __init__(self, model, alpha=2, epsilon=1e-7):
self.model = model
self.alpha = alpha
self.beta = 1 - alpha
self.epsilon = epsilon

self.names, self.activations, self.weights = utilss.get_model_params(self.model)
self.num_layers = len(self.names)

self.relevance = self.compute_relevances()
self.lrp_runner = K.function(inputs=[self.model.input, ], outputs=[self.relevance, ])
def compute_relevances(self):
r = self.model.output
for i in range(self.num_layers-1):
  if 'dense' in self.names[i   1]:
    r = self.backprop_fc(self.weights[i   1][0], self.weights[i   1][1], self.activations[i], r)
  elif 'flatten' in self.names[i   1]:
    r = self.backprop_flatten(self.activations[i], r)
  elif 'pool' in self.names[i   1]:
    r = self.backprop_max_pool2d(self.activations[i], r)
  elif 'conv' in self.names[i   1]:
    r = self.backprop_conv2d(self.weights[i   1][0], self.weights[i   1][1], self.activations[i], r)
  elif 'dropout' in self.names[i 1]:
    print('ok')
  else:
    raise Exception('Layer not recognized!')
    sys.exit()
return r
def backprop_fc(self, w, b, a, r):
w_p = K.maximum(w, 0.)
b_p = K.maximum(b, 0.)
z_p = K.dot(a, w_p)   b_p   self.epsilon
s_p = r / z_p
c_p = K.dot(s_p, K.transpose(w_p))

w_n = K.minimum(w, 0.)
b_n = K.minimum(b, 0.)
z_n = K.dot(a, w_n)   b_n - self.epsilon
s_n = r / z_n
c_n = K.dot(s_n, K.transpose(w_n))

return a * (self.alpha * c_p   self.beta * c_n)
def backprop_flatten(self, a, r):
shape = a.get_shape().as_list()
shape[0] = -1
return K.reshape(r, shape)
def backprop_max_pool2d(self, a, r, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1)):
xshape = X.get_shape().as_list()
fshape = F.get_shape().as_list()
if len(xshape) != len(fshape):
  F = tf.reshape(F, (-1, int(np.ceil(xshape[1]/2.0)), 
  int(np.ceil(xshape[2]/2.0)), xshape[3]))
  ksize = [1, 2, 2, 1]  if ksize is None else ksize
  strides = [1, 2, 2, 1]  if strides is None else strides
Z = tf.nn.max_pool(X, strides=strides, ksize=ksize, padding=padding)   1e-9
S = F / Z
C = gen_nn_ops._max_pool_grad(X, Z, S, ksize, strides, padding)    
F = X*C
return F
def backprop_conv2d(self, w, b, a, r, strides=(1, 1, 1, 1)):
w_p = K.maximum(w, 0.)
b_p = K.maximum(b, 0.)
z_p = K.conv2d(a, kernel=w_p, strides=strides[1:-1], padding='same')   b_p   self.epsilon
s_p = r / z_p
c_p = conv2d_backprop_input(K.shape(a), w_p, s_p, strides, padding='SAME')

w_n = K.minimum(w, 0.)
b_n = K.minimum(b, 0.)
z_n = K.conv2d(a, kernel=w_n, strides=strides[1:-1], padding='same')   b_n - self.epsilon
s_n = r / z_n
c_n = conv2d_backprop_input(K.shape(a), w_n, s_n, strides, padding='SAME')

return a * (self.alpha * c_p   self.beta * c_n)
def predict_labels(self, images):
return predict_labels(self.model, images)
def run_lrp(self, images):
print("Running LRP on {0} images...".format(len(images)))
return self.lrp_runner([images, ])[0]
def compute_heatmaps(self, images, g=0.2, cmap_type='rainbow', **kwargs):
lrps = self.run_lrp(images)
print("LRP run successfully...")
gammas = get_gammas(lrps, g=g, **kwargs)
print("Gamma Correction completed...")
heatmaps = get_heatmaps(gammas, cmap_type=cmap_type, **kwargs)
return heatmaps
 

This is our LRP code and we are getting two errors in the above code

TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you’re trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.

and

 > <ipython-input-49-0e52a9054e67> in backprop_conv2d(self, w, b, a, r, strides)
      70     b_p = K.maximum(b, 0.)
      71     z_p = K.conv2d(a, kernel=w_p, strides=strides[1:-1], padding='same')   b_p   self.epsilon
 ---> 72     s_p = r / z_p
      73     c_p = conv2d_backprop_input(K.shape(a), w_p, s_p, strides, padding='SAME')
 

ValueError: Dimensions must be equal, but are 2 and 64 for ‘{{node tf.math.truediv_1/truediv}} = RealDiv[T=DT_FLOAT](Placeholder, Placeholder_1)’ with input shapes: [?,2], [?,128,128,64].

We are not able to figure out why we are getting this error as we are new to this LRP implementation