I am trying to connect the FC layer and convolutional layers to denoise, but seems like the input data can only pass though the last FC layer, and the result was not good. I wonder what is wrong with my code, and how to fix it
my code is
def build_network(self,
built_for_training=False): # build a network similar with SRCNN, a fully-convolutional netowrk for channel noise estimation
# built_for_train: denote whether the network is build for training or test. If for test, xavier initialization is not needed since the model will be loaded later.
x_in = tf.placeholder(tf.float32, [None, self.net_config.feature_length]) # input data # 576
x_in_reshape = tf.reshape(x_in, (-1, self.net_config.feature_length, 1, 1)) # HY:576
layer_output = {}
for layer in range(self.net_config.total_layers): # construct layers 4
self.conv_filter_name[layer] = format("conv_layer%d" % (layer))
self.bias_name[layer] = format("b%d" % (layer))
self.bia_name[layer] = format("b_01%d" % (layer))
self.weight_name[layer] = format("w_01%d" % (layer))
if layer == 0:
x_input = x_in_reshape
in_channels = 1
else:
x_input = layer_output[layer - 1]
in_channels = self.net_config.feature_map_nums[layer - 1] # np.array([64,32,16,1])
out_channels = self.net_config.feature_map_nums[layer] # np.array([64,32,16,1])
shape = [self.net_config.filter_sizes[layer], 1, in_channels, out_channels]
if built_for_training:
with tf.variable_scope("layer_%d" % layer):
self.conv_filter[layer] = tf.get_variable("w", shape,
tf.float32, tf.contrib.layers.xavier_initializer())
self.bias[layer] = tf.get_variable("b", shape[-1],
tf.float32, tf.contrib.layers.xavier_initializer())
self.bia[layer] = tf.get_variable("b_01", 576,
tf.float32, tf.contrib.layers.xavier_initializer())
self.weight[layer] = tf.get_variable("w_01", [576, 576],
tf.float32, tf.contrib.layers.xavier_initializer())
self.best_conv_filter[layer] = tf.Variable(
tf.ones([self.net_config.filter_sizes[layer], 1, in_channels, out_channels], tf.float32),
dtype=tf.float32)
self.best_bias[layer] = tf.Variable(tf.ones([out_channels], tf.float32), dtype=tf.float32)
self.best_bia[layer] = tf.Variable(tf.ones(576, tf.float32), dtype=tf.float32)
self.best_weight[layer] = tf.Variable(tf.ones([576, 576], tf.float32), dtype=tf.float32)
self.assign_best_conv_filter[layer] = self.best_conv_filter[layer].assign(self.conv_filter[layer])
self.assign_best_bias[layer] = self.best_bias[layer].assign(self.bias[layer])
self.assign_best_bia[layer] = self.best_bia[layer].assign(self.bia[layer])
self.assign_best_weight[layer] = self.best_weight[layer].assign(self.weight[layer])
else:
# just build tensors for testing and their values will be loaded later.
self.conv_filter[layer] = tf.Variable(tf.random_normal([self.net_config.filter_sizes[layer], 1, in_channels, out_channels], 0, 1, tf.float32), dtype=tf.float32, name=self.conv_filter_name[layer])
self.weight[layer] = tf.Variable(tf.random_normal([576,576], 0, 1, tf.float32), dtype=tf.float32, name=self.weight_name[layer])
self.bias[layer] = tf.Variable(tf.random_normal([out_channels], 0, 1, tf.float32), dtype=tf.float32, name=self.bias_name[layer])
self.bia[layer] = tf.Variable(tf.random_normal([576], 0, 1, tf.float32), dtype=tf.float32, name=self.bia_name[layer])
if layer == self.net_config.total_layers - 1:
layer_output[layer] = tf.nn.relu(
tf.nn.conv2d(x_input, self.conv_filter[layer], [1, 1, 1, 1], 'SAME') + self.bias[layer])
else:
# Activation Function
layer_output[layer] = tf.nn.relu(
tf.nn.conv2d(x_input, self.conv_filter[layer], [1, 1, 1, 1], 'SAME') + self.bias[layer])
y_out = layer_output[self.net_config.total_layers - 1]
y_out = tf.reshape(y_out, (-1, self.net_config.label_length)) # +x_in# resnet ?,576
return x_in, y_out
Any guidance would be appreciated, thank you