I would like to know how to add metrics like accuracy,precision and save model to this tensorboard logistic regression:
from tensorflow.keras.datasets import fashion_mnist
from sklearn.model_selection import train_test_split
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train, x_test = x_train/255., x_test/255.
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.15)
x_train = tf.reshape(x_train, shape=(-1, 784))
x_test = tf.reshape(x_test, shape=(-1, 784))
weights = tf.Variable(tf.random.normal(shape=(784, 10), dtype=tf.float64))
biases = tf.Variable(tf.random.normal(shape=(10,), dtype=tf.float64))
def logistic_regression(x):
lr = tf.add(tf.matmul(x, weights), biases)
#return tf.nn.sigmoid(lr)
return lr
def cross_entropy(y_true, y_pred):
y_true = tf.one_hot(y_true, 10)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)
return tf.reduce_mean(loss)
def accuracy(y_true, y_pred):
y_true = tf.cast(y_true, dtype=tf.int32)
preds = tf.cast(tf.argmax(y_pred, axis=1), dtype=tf.int32)
preds = tf.equal(y_true, preds)
return tf.reduce_mean(tf.cast(preds, dtype=tf.float32))
def grad(x, y):
with tf.GradientTape() as tape:
y_pred = logistic_regression(x)
loss_val = cross_entropy(y, y_pred)
return tape.gradient(loss_val, [weights, biases])
n_batches = 10000
learning_rate = 0.01
batch_size = 128
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat().shuffle(x_train.shape[0]).batch(batch_size)
optimizer = tf.optimizers.SGD(learning_rate)
for batch_numb, (batch_xs, batch_ys) in enumerate(dataset.take(n_batches), 1):
gradients = grad(batch_xs, batch_ys)
optimizer.apply_gradients(zip(gradients, [weights, biases]))
y_pred = logistic_regression(batch_xs)
loss = cross_entropy(batch_ys, y_pred)
acc = accuracy(batch_ys, y_pred)
print("Batch number: %i, loss: %f, accuracy: %f" % (batch_numb, loss, acc))
i'm new to tensor and I only got write logs in tensorflow 1.x
When with tf.Session as sess
left from tensorflow i get lost
in the other ways of making code.
Your code will look something like this using simple Tensorflow V2:
Start with the model creation, Logistic regression can be seen as a single layer perceptron with sigmoid activation so we will add an input layer with as many inputs as features and one output layer with sigmoid activation per each output class.
input = tf.keras.Input(shape=(nfeatures))
output = tf.keras.layers.Dense(nclasses,activation='sigmoid')(input)
model = tf.keras.Model(inputs=input,outputs=output,name='MyLinearRegression')
Than we create the optimizer and the loss function:
opt = tf.keras.optimizers.Adadelta()
lss = tf.keras.losses.categorical_crossentropy
met = tf.keras.metrics.Accuracy()
You have to use categorica_crossentropy or sparse_categorical_crossentropy depending on the labels (hot encoded or not). For this loss you may want to change the activation to softmax.
Now we can "compile" the model this way:
model.compile(optimizer=opt,loss=lss,metrics=met)
model.summary()
So now we can create the TensorBoard callback:
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir,write_graph=True,update_freq='batch')
And than train like this:
model.fit(train,epochs=100,callbacks=[tensorboard_callback],validation_data = val)
If your dataset is a numpy dataset you can create a TF dataset like this:
dataset = tf.data.Dataset.from_tensor_slices((features,labels))
train = dataset.take(train_size)
test = dataset.skip(train_size).batch(batchsize)
val = test.skip(test_size).batch(batchsize)
test = test.take(test_size).batch(batchsize)
Where train is your train dataset, val the validation one and test the test dataset.