'Can I feed intermediate result back into the CNN and get my final result? (update)

I am new to machine learning.

I got the intermediate result of layer 31 of my CNN using the following code:

conv2d = Model(inputs = self.model_ori.input, outputs= self.model_ori.layers[31].output)
intermediateResult = conv2d.predict(img)

I am trying to load this intermediate result back into the neural network.

Can this be done?

I tried doing the following:

newmodel = keras.Sequential(self.model_ori.layers[32:])
newmodel = newmodel.build(intermediateResult.shape)

I did the above but I got the following error:

A merge layer should be called on a list of inputs. Received: inputs=Tensor("up_sampling2d_2/resize/ResizeNearestNeighbor:0", shape=(1, 26, 26, 128), dtype=float32) (not a list of tensors)

Here is my model summary:

Model: "model"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to
==================================================================================================
 input_1 (InputLayer)           [(None, None, None,  0           []
                                 3)]

 conv2d (Conv2D)                (None, None, None,   432         ['input_1[0][0]']
                                16)

 batch_normalization (BatchNorm  (None, None, None,   64         ['conv2d[0][0]']
 alization)                     16)

 leaky_re_lu (LeakyReLU)        (None, None, None,   0           ['batch_normalization[0][0]']
                                16)

 max_pooling2d (MaxPooling2D)   (None, None, None,   0           ['leaky_re_lu[0][0]']
                                16)

 conv2d_1 (Conv2D)              (None, None, None,   4608        ['max_pooling2d[0][0]']
                                32)

 batch_normalization_1 (BatchNo  (None, None, None,   128        ['conv2d_1[0][0]']
 rmalization)                   32)

 leaky_re_lu_1 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_1[0][0]']
                                32)

 max_pooling2d_1 (MaxPooling2D)  (None, None, None,   0          ['leaky_re_lu_1[0][0]']
                                32)

 conv2d_2 (Conv2D)              (None, None, None,   18432       ['max_pooling2d_1[0][0]']
                                64)

 batch_normalization_2 (BatchNo  (None, None, None,   256        ['conv2d_2[0][0]']
 rmalization)                   64)

 leaky_re_lu_2 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_2[0][0]']
                                64)

 max_pooling2d_2 (MaxPooling2D)  (None, None, None,   0          ['leaky_re_lu_2[0][0]']
                                64)

 conv2d_3 (Conv2D)              (None, None, None,   73728       ['max_pooling2d_2[0][0]']
                                128)

 batch_normalization_3 (BatchNo  (None, None, None,   512        ['conv2d_3[0][0]']
 rmalization)                   128)

 leaky_re_lu_3 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_3[0][0]']
                                128)

 max_pooling2d_3 (MaxPooling2D)  (None, None, None,   0          ['leaky_re_lu_3[0][0]']
                                128)

 conv2d_4 (Conv2D)              (None, None, None,   294912      ['max_pooling2d_3[0][0]']
                                256)

 batch_normalization_4 (BatchNo  (None, None, None,   1024       ['conv2d_4[0][0]']
 rmalization)                   256)

 leaky_re_lu_4 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_4[0][0]']
                                256)

 max_pooling2d_4 (MaxPooling2D)  (None, None, None,   0          ['leaky_re_lu_4[0][0]']
                                256)

 conv2d_5 (Conv2D)              (None, None, None,   1179648     ['max_pooling2d_4[0][0]']
                                512)

 batch_normalization_5 (BatchNo  (None, None, None,   2048       ['conv2d_5[0][0]']
 rmalization)                   512)

 leaky_re_lu_5 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_5[0][0]']
                                512)

 max_pooling2d_5 (MaxPooling2D)  (None, None, None,   0          ['leaky_re_lu_5[0][0]']
                                512)

 conv2d_6 (Conv2D)              (None, None, None,   4718592     ['max_pooling2d_5[0][0]']
                                1024)

 batch_normalization_6 (BatchNo  (None, None, None,   4096       ['conv2d_6[0][0]']
 rmalization)                   1024)

 leaky_re_lu_6 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_6[0][0]']
                                1024)

 conv2d_7 (Conv2D)              (None, None, None,   262144      ['leaky_re_lu_6[0][0]']
                                256)

 batch_normalization_7 (BatchNo  (None, None, None,   1024       ['conv2d_7[0][0]']
 rmalization)                   256)

 leaky_re_lu_7 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_7[0][0]']
                                256)

 conv2d_10 (Conv2D)             (None, None, None,   32768       ['leaky_re_lu_7[0][0]']
                                128)

 batch_normalization_9 (BatchNo  (None, None, None,   512        ['conv2d_10[0][0]']
 rmalization)                   128)

 leaky_re_lu_9 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_9[0][0]']
                                128)

 up_sampling2d (UpSampling2D)   (None, None, None,   0           ['leaky_re_lu_9[0][0]']
                                128)

 concatenate (Concatenate)      (None, None, None,   0           ['up_sampling2d[0][0]',
                                384)                              'leaky_re_lu_4[0][0]']

 conv2d_8 (Conv2D)              (None, None, None,   1179648     ['leaky_re_lu_7[0][0]']
                                512)

 conv2d_11 (Conv2D)             (None, None, None,   884736      ['concatenate[0][0]']
                                256)

 batch_normalization_8 (BatchNo  (None, None, None,   2048       ['conv2d_8[0][0]']
 rmalization)                   512)

 batch_normalization_10 (BatchN  (None, None, None,   1024       ['conv2d_11[0][0]']
 ormalization)                  256)

 leaky_re_lu_8 (LeakyReLU)      (None, None, None,   0           ['batch_normalization_8[0][0]']
                                512)

 leaky_re_lu_10 (LeakyReLU)     (None, None, None,   0           ['batch_normalization_10[0][0]']
                                256)

 conv2d_9 (Conv2D)              (None, None, None,   130815      ['leaky_re_lu_8[0][0]']
                                255)

 conv2d_12 (Conv2D)             (None, None, None,   65535       ['leaky_re_lu_10[0][0]']
                                255)

==================================================================================================
Total params: 8,858,734
Trainable params: 8,852,366
Non-trainable params: 6,368
__________________________________________________________________________________________________
None

Can someone kindly help me out?

Sincerely, Lolcocks.



Solution 1:[1]

It is easy, you can do it by layer.get_weights or set_weights during your running task or the callback function.

It must be same type criteria, model sequences, layer, matrix, or configuration.`

[ Sample 1 ]:

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
        tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
        tf.keras.layers.Normalization(mean=3., variance=2.),
        tf.keras.layers.Normalization(mean=4., variance=6.),
        tf.keras.layers.Conv2DTranspose(2, 3, activation='relu', padding="same", name="Conv2DTranspose_01"),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid'),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(4 * 256), 
        tf.keras.layers.Reshape((4 * 256, 1)),

        tf.keras.layers.LSTM(128, return_sequences=True, return_state=False),
        tf.keras.layers.LSTM(128, name='LSTM256'),
        tf.keras.layers.Dropout(0.2),
])

model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu', name='dense64'))
model.add(tf.keras.layers.Dense(7))
model.summary()

Conv2DTranspose_01 = model.get_layer( name="Conv2DTranspose_01" )
weights_01 = Conv2DTranspose_01.get_weights()[0]
weights_02 = Conv2DTranspose_01.get_weights()[1]

Conv2DTranspose_01.set_weights([weights_01, weights_02])
weights_01 = Conv2DTranspose_02.get_weights()[0]
weights_02 = Conv2DTranspose_02.get_weights()[1]

[ Output ]:

[[[[ 0.12476468 -0.35062584  0.03172061]
   [ 0.24610531  0.17492849  0.21293753]]

  [[-0.0718551   0.12790853  0.15102917]
   [-0.02433318 -0.08994108 -0.30636993]]
...

[ Callback Fn ]:

class custom_callback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs={}):
        print('weights: ' + str(self.model.get_weights()))
        Conv2DTranspose_01 = model.get_layer( name="Conv2DTranspose_01" )
        weights_01 = Conv2DTranspose_01.get_weights()[0]
        weights_02 = Conv2DTranspose_01.get_weights()[1]

Conv2DTranspose_01.set_weights([weights_01, weights_02])

custom_callback = custom_callback()

history = model.fit(x_train, y_train, epochs=3000, batch_size=5 ,validation_data=(x_test, y_test), callbacks=[custom_callback])

Sample

Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source
Solution 1 Martijn Pieters