'How can I use tensorflow-privacy in SeqGAN in a right way?

i'm trying to use DPAdamGaussianOptimizer from tensorflow privacy to switch my original optimizer so i could protect privacy during the training. but when i create the DPAdamGaussianOptimizer as i followed the doc:https://tensorflow.google.cn/responsible_ai/privacy/api_docs/python/tf_privacy/v1 and it looks like this:

class Discriminator(object): """ A CNN for text classification. Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer. """

def __init__(
        self, sequence_length, num_classes, vocab_size,
        embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
    # Placeholders for input, output and dropout
    self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
    self.input_y = tf.placeholder(tf.int64, [None, num_classes], name="input_y")
    # self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
    self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
    # self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

    # Keeping track of l2 regularization loss (optional)
    l2_loss = tf.constant(0.0)
    
    with tf.variable_scope('discriminator'):

        # Embedding layer
        with tf.device('/cpu:0'), tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
                name="W")
            self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
            self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)

        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []
        for filter_size, num_filter in zip(filter_sizes, num_filters):
            with tf.name_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                filter_shape = [filter_size, embedding_size, 1, num_filter]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[num_filter]), name="b")
                conv = tf.nn.conv2d(
                    self.embedded_chars_expanded,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                pooled_outputs.append(pooled)
        
        # Combine all the pooled features
        num_filters_total = sum(num_filters)
        self.h_pool = tf.concat(pooled_outputs, 3)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
        # Add highway
        with tf.name_scope("highway"):
            self.h_highway = highway(self.h_pool_flat, self.h_pool_flat.get_shape()[1], 1, 0)

        # Add dropout
        with tf.name_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_highway, self.dropout_keep_prob)

        # Final (unnormalized) scores and predictions
        with tf.name_scope("output"):
            W = tf.Variable(tf.truncated_normal([num_filters_total, num_classes], stddev=0.1), name="W")
            b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
            self.ypred_for_auc = tf.nn.softmax(self.scores)
            self.predictions = tf.argmax(self.scores, 1, name="predictions")

        # CalculateMean cross-entropy loss
        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

    self.params = [param for param in tf.trainable_variables() if 'discriminator' in param.name]
    # d_optimizer = tf.train.AdamOptimizer(1e-4)
    d_optimizer=dptf.DPAdamGaussianOptimizer(1.0, 0.001, 1e-4)
    # old_grads_and_vars=old_optimizer.compute_gradients(self.loss, self.params, aggregation_method=2)
    # print(old_grads_and_vars)
    grads_and_vars = d_optimizer.compute_gradients(self.loss, self.params)
    print(grads_and_vars)
    self.train_op = d_optimizer.apply_gradients(grads_and_vars)

it showTypeError: Value passed to parameter 'shape' has DataType float32 not in list of allowed values: int32, int64 my tensorflow version is 1.14 ,python version is 3.6 and my tensorflow privacy is 0.3.0 can anyone help me with it,pls.



Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source