TensorFlow train function with multiple layers


I am new to tensor and trying to understand it. I managed to create one layer model. But I would like now to add 2 more. How can I make my train function working? I would like to train it with hundreds of values X and Y. I implemented all values what I need: Weight and Bias of each layer, but I dont understand how can I use them in my train function. And when it will be trained, how can I use it. Like I do in the last part of a code.

import numpy as np

print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))

x = np.array([
    [10, 10, 30, 20],

y = np.array([[10, 1, 1, 1]])

class Model(object):
    def __init__(self, x, y):
        # get random values.
        self.W = tf.Variable(tf.random.normal((len(x), len(x[0]))))
        self.b = tf.Variable(tf.random.normal((len(y),)))
        self.W1 = tf.Variable(tf.random.normal((len(x), len(x[0]))))
        self.b1 = tf.Variable(tf.random.normal((len(y),)))
        self.W2 = tf.Variable(tf.random.normal((len(x), len(x[0]))))
        self.b2 = tf.Variable(tf.random.normal((len(y),)))

    def __call__(self, x):
        out1 = tf.multiply(x, self.W) + self.b
        out2 = tf.multiply(out1, self.W1) + self.b1
        last_layer = tf.multiply(out2, self.W2) + self.b2
        # Input_Leyer = self.W * x + self.b
        return last_layer

def loss(predicted_y, desired_y):
    return tf.reduce_sum(tf.square(predicted_y - desired_y))

optimizer = tf.optimizers.Adam(0.1)
def train(model, inputs, outputs):
    with tf.GradientTape() as t:
        current_loss = loss(model(inputs), outputs)
    grads = t.gradient(current_loss, [model.W, model.b])
    optimizer.apply_gradients(zip(grads, [model.W, model.b]))


model = Model(x, y)

for i in range(10000):
    train(model, x, y)

for i in range(3): 
    InputX = np.array([
        [input(), input(), input(), input()],
    returning = tf.math.multiply(
        InputX, model.W, name=None
    print("I think that output can be:", returning)


Just add new variables to the list:

grads = t.gradient(current_loss, [model.W, model.b, model.W1, model.b1, model.W2, model.b2])
optimizer.apply_gradients(zip(grads, [model.W, model.b, model.W1, model.b1, model.W2, model.b2]))

Answered By – Andrey

Answer Checked By – Katrina (AngularFixing Volunteer)

Leave a Reply

Your email address will not be published.