import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
mnist = tf.keras.datasets.mnist # 28x28 images of hand-written digits 0-9
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = tf.keras.utils.normalize(x_train, axis = 1)
x_test = tf.keras.utils.normalize(x_test, axis = 1)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation = tf.nn.softmax))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=["accuracy"])
model.fit(x_train, y_train, epochs=3)
Epoch 1/3 1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 744us/step - accuracy: 0.8725 - loss: 0.4654 Epoch 2/3 1875/1875 ━━━━━━━━━━━━━━━━━━━━ 1s 739us/step - accuracy: 0.9661 - loss: 0.1098 Epoch 3/3 1875/1875 ━━━━━━━━━━━━━━━━━━━━ 1s 738us/step - accuracy: 0.9781 - loss: 0.0686
<keras.src.callbacks.history.History at 0x2779805a790>
val_loss, val_acc = model.evaluate(x_test, y_test)
print(val_loss, val_acc)
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 462us/step - accuracy: 0.9671 - loss: 0.1060 0.09289506077766418 0.9718000292778015
predictions = model.predict([x_test])
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 660us/step
for i in range(6):
print("Number predicted :", np.argmax(predictions[i]))
plt.imshow(x_test[i])
plt.show()
Number predicted : 7
Number predicted : 2
Number predicted : 1
Number predicted : 0
Number predicted : 4
Number predicted : 1