'Predict Image loose saturation compare to the original after training
I am trying super-resolution of RGB images. I have an input image and output image I run the model and when I predict the output the image loses saturation compared to the original image. The image size is 512x512 pixels. I don't know if this happening because of the conv2dtranspose layer.
ImagedataGenerator
tf.config.run_functions_eagerly(True)
import os
import numpy as np
import cv2
def generator(idir,odir,batch_size,shuffle ):
i_list=os.listdir(idir)
o_list=os.listdir(odir)
batch_index=0
batch_size = batch_size
sample_count=len(i_list)
while True:
input_image_batch=[]
output_image_batch=[]
i_list=os.listdir(idir)
o_list=os.listdir(odir)
batch_size = batch_size
sample_count=len(i_list)
for i in range(batch_index * batch_size, (batch_index + 1) * batch_size ):
#iterate for a batch
j=i % sample_count # cycle j value over range of available images
k=j % batch_size # cycle k value over batch size
if shuffle == True: # if shuffle select a random integer between 0 and sample_count-1 to pick as the image=label pair
m=np.random.randint(low=0, high=sample_count-1, size=None, dtype=int)
else:
m=j
path_to_in_img=os.path.join(idir,i_list[m])
path_to_out_img=os.path.join(odir,i_list[m])
input_image=cv2.imread(path_to_in_img)
input_image=cv2.resize(input_image,(512,512))
input_image = cv2.cvtColor(input_image,cv2.COLOR_BGR2RGB)#create the target image from the input image
output_image=cv2.imread(path_to_out_img)
output_image=cv2.resize(output_image,(512,512))
output_image = cv2.cvtColor(output_image,cv2.COLOR_BGR2RGB)
input_image_batch.append(input_image)
output_image_batch.append(output_image)
input_image_array=np.array(input_image_batch)
input_image_array = input_image_array / 255.0
output_image_array=np.array(output_image_batch)
output_image_array = output_image_array / 255.0
batch_index= batch_index + 1
yield (input_image_array, output_image_array)
if batch_index * batch_size > sample_count:
batch_index=0
Model
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
from keras.metrics import MeanIoU
kernel_initializer = 'he_uniform' # also try 'he_normal' but model not converging...
################################################################
def simple_unet_model(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS):
#Build the model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
#s = Lambda(lambda x: x / 255)(inputs) #No need for this if we normalize our inputs beforehand
s = inputs
#Contraction path
c1 = Conv2D(16, (3, 3), kernel_initializer = kernel_initializer,activation='relu',padding='same')(s)
c1 = Dropout(0.5)(c1)
p1 = MaxPooling2D((2, 2))(c1)
c2 = Conv2D(32, (3, 3), kernel_initializer = kernel_initializer,activation='relu',padding='same')(p1)
c2 = Dropout(0.5)(c2)
p2 = MaxPooling2D((2, 2))(c2)
c3 = Conv2D(64, (3, 3), kernel_initializer = kernel_initializer,activation='relu', padding='same')(p2)
c3 = Dropout(0.5)(c3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = Conv2D(128, (3, 3), kernel_initializer = kernel_initializer, activation='relu', padding='same')(p3)
c4 = Dropout(0.5)(c4)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)
c5 = Conv2D(256, (3, 3), kernel_initializer = kernel_initializer,activation='relu', padding='same')(p4)
c5 = Dropout(0.5)(c5)
#Expansive path
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3),kernel_initializer = kernel_initializer,activation='relu', padding='same')(u6)
c6 = Dropout(0.5)(c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3),kernel_initializer = kernel_initializer,activation='relu', padding='same')(u7)
c7 = Dropout(0.5)(c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3),kernel_initializer = kernel_initializer,activation='relu', padding='same')(u8)
c8 = Dropout(0.5)(c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3),kernel_initializer = kernel_initializer,activation='relu', padding='same')(u9)
c9 = Dropout(0.5)(c9)
outputs = Conv2D(3, (1, 1), activation='sigmoid')(c9)
model = Model(inputs, outputs)
model.summary()
return model
model.compile(optimizer='adam',loss='mean_squared_error',metrics=[psnr,ssim,tf.losses.mean_squared_error])
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
Solution | Source |
---|