'TypeError: Fetch argument None has invalid type <class 'NoneType'>, Tensor passing problem

I encapsulated a GCN network with extract_feature.py, but when I called the method in main.py to try get the output of the GCN network in extract_feature.py, it reported the same error as the title.

The extract_feature.py file is as follows:

import tensorflow as tf

import numpy as np import networkx as nx

class Feature_Extract:

def __init__(self, config, env, networkServices):
    self.config = config
    self.num_vnfs = config.num_vnfs
    self.vnf_properties = env.vnf_properties
    self.vnf_bandwidth = env.vnf_bandwidth
    self.containers = networkServices.cells.reshape(config.batch_size, config.high_index, config.max_length)
    self.container_length = networkServices.container_length
    self.batch_length = networkServices.batch_length
    self.relations = networkServices.relations
    self.features = None
    self.gcn_constant = None
    self.H_0 = None

def convert(self):

    for batch in range(self.config.batch_size):
        batch_feature = None
        for slice_id in range(self.batch_length[batch]):
            length = self.container_length[batch][slice_id]
            container = self.containers[batch][slice_id][:length]
            relation = self.relations[batch][slice_id]
            relation = np.array(relation) - 1
            graph = self.list2graph(relation=relation)

            gcn_out = self.gcn_model(graph, container)

            if batch_feature is not None:
                batch_feature = tf.concat([batch_feature, gcn_out], axis=0)
            else:
                batch_feature = gcn_out

        batch_feature = tf.pad(batch_feature, [[0, 10 * (self.config.high_index - self.batch_length[batch] - 1)], [0, 0]])  # 每一批长度不一,padding到统一长度进行合并
        if self.features is not None:
            batch_feature = tf.expand_dims(batch_feature, axis=0)
            self.features = tf.concat([self.features, batch_feature], axis=0)
        else:
            batch_feature = tf.expand_dims(batch_feature, axis=0)
            self.features = batch_feature

    return self.features

def list2graph(self, relation):
    graph = nx.Graph()
    graph.add_nodes_from(np.array(range(self.num_vnfs)))   # 添加所有vnf,把feature与vnf位置对应, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 10类VNF
    graph.add_edges_from(relation)

    return graph

def container_features(self, graph, container):
    cpu_list = [0 for _ in range(self.num_vnfs)]
    mem_list = [0 for _ in range(self.num_vnfs)]
    sto_list = [0 for _ in range(self.num_vnfs)]
    for node in container:
        cpu_list[node - 1] = (self.vnf_properties[node]["cpu_request"])
        mem_list[node - 1] = (self.vnf_properties[node]["memory_request"])
        sto_list[node - 1] = (self.vnf_properties[node]["storage_request"])

    bandwidth = np.zeros((self.num_vnfs, self.num_vnfs))
    for e1, e2 in graph.edges():
        bandwidth[e1 - 1][e2 - 1] = self.vnf_bandwidth[e1][e2]

    H_0 = 0.1 * bandwidth + np.diag(cpu_list) + 0.1 * np.diag(mem_list) + 0.01 * np.diag(sto_list)

    return H_0

def get_graph_matrix(self, graph, container):
    """
                Get the correlation matrix of graph network
                *******************************************
                H^l+1 = sigma(\widetilde{D}^(-1/2)*\widetilde{A}*\widetilde{D}^(-1/2)*H^l*W^l)
                *******************************************
                H^l: Input features of layer i
                H^l+1: Output features of layer i
                sigma: Activation function
                A: Adjacency matrix of graph network
                \widetilde{A}: A+I, I is a identity matrix
                \widetilde{D}: Degree matrix of \widetilde{A}
                W^l: Parameter matrix
            """
    A = nx.adj_matrix(graph)
    A_tilde = A + np.identity(n=A.shape[0])
    D = np.squeeze(np.sum(np.array(A_tilde), axis=1))
    D_tilde_inv_sqrt = np.power(D, -1 / 2)
    D_tilde = np.diag(D_tilde_inv_sqrt)
    H_0 = self.container_features(graph, container)

    return np.dot(np.dot(D_tilde, A_tilde), D_tilde), H_0

def gcn_model(self, graph, container):

    gcn_constant, H_0 = self.get_graph_matrix(graph, container)

    self.gcn_constant = gcn_constant
    self.H_0 = H_0


    with tf.variable_scope('gcn', reuse=tf.AUTO_REUSE):
        wc1 = tf.get_variable('wc1', [self.config.num_vnfs, self.config.gcn_layer[0]], dtype=tf.float64, initializer=tf.contrib.layers.xavier_initializer())
        wc2 = tf.get_variable('wc2', [self.config.gcn_layer[0], self.config.gcn_layer[1]], dtype=tf.float64, initializer=tf.contrib.layers.xavier_initializer())
        wc3 = tf.get_variable('wc3', [self.config.gcn_layer[1], self.config.gcn_layer[2]], dtype=tf.float64, initializer=tf.contrib.layers.xavier_initializer())

        # GCN embedding
        fc1 = tf.matmul(tf.matmul(H_0, gcn_constant), wc1)
        fc1 = tf.nn.relu(fc1)

        fc2 = tf.matmul(tf.matmul(gcn_constant, fc1), wc2)
        fc2 = tf.nn.relu(fc2)

        fc3 = tf.matmul(tf.matmul(gcn_constant, fc2), wc3)
        fc3 = tf.nn.relu(fc3)

        fc4 = tf.layers.dense(inputs=fc3, units=10, activation=tf.nn.relu)

        den_out = tf.pad(fc4, [[0, self.config.max_length - self.num_vnfs],
                               [0, self.config.max_length - self.num_vnfs]])  # [max_length, embeddings]
        gcn_out = tf.cast(den_out, dtype=tf.float32)

    return gcn_out

The way I get the self.feature is:

features = sess.run(extractor.features)

I also tried another way to get the GCN output:

features = extractor.convert()
features = sess.run(features)

But it reported another error:

FailedPreconditionError (see above for traceback): Attempting to use uninitialized value gcn/wc1

But I have done the following initialization in main.py:

sess.run(tf.global_variables_initializer())

Help me PLZ.



Solution 1:[1]

From the answer by Mamim in Attempting to use uninitialized value InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x I found the answer for my question:

I do sess.run(tf.global_variables_initializer()) before I do extractor = Feature_Extract(config, env, networkServices).

What a stupid operation I did!

Solution 2:[2]

It is the graph you input , I guess you are using GNN from configuration where I don't have then I created my graph and it is working since the graph exists.

class config:
    def __init__(self, num_vnfs, batch_size, high_index, max_length):

class environment:
    def __init__(self, vnf_properties, vnf_bandwidth):
        self.vnf_properties = vnf_properties
        self.vnf_bandwidth = vnf_bandwidth

class networkServices:
    def __init__(self, vnf_properties, vnf_bandwidth):

?? It is actually the loss optimized when you looking at the function gcn_model and batch_feature

extractor = Feature_Extract(config, env, nwtx)
with tf.compat.v1.Session() as sess:
    init = tf.keras.initializers.Initializer
    features = extractor.convert()
    features = sess.run(extractor.features)

... Adjacent Matrix

Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source
Solution 1 Acbogu
Solution 2 Martijn Pieters