In [1]:
import keras
keras.__version__
2024-09-02 14:17:39.516771: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-09-02 14:17:39.688172: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: SSE4.1 SSE4.2 AVX AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
Out[1]:
'3.5.0'
In [2]:
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense,Activation
from keras.optimizers import SGD
from tensorflow.python.keras.utils import np_utils


np.random.seed(1671)
In [3]:
(X_train,y_train), (X_test,y_test) = mnist.load_data()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 ━━━━━━━━━━━━━━━━━━━━ 2s 0us/step
In [4]:
print(X_train.shape)
print(X_test.shape)
(60000, 28, 28)
(10000, 28, 28)
In [5]:
RESHAPED=784
NB_CLASSES=10
OPTIMIZER=SGD()
In [6]:
X_train = X_train.reshape(60000,RESHAPED)
In [7]:
X_test = X_test.reshape(10000,RESHAPED)
In [8]:
X_train = X_train.astype('float32')
In [9]:
X_test = X_test.astype('float32')
In [10]:
X_train /=255
X_test /=255
In [11]:
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
In [12]:
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)
In [13]:
model = Sequential()
model.add(Dense(128, input_shape=(RESHAPED,)))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
/opt/conda/lib/python3.11/site-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
In [14]:
model.summary()
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type)                    ┃ Output Shape           ┃       Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense (Dense)                   │ (None, 128)            │       100,480 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ activation (Activation)         │ (None, 128)            │             0 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_1 (Dense)                 │ (None, 128)            │        16,512 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ activation_1 (Activation)       │ (None, 128)            │             0 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_2 (Dense)                 │ (None, 10)             │         1,290 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ activation_2 (Activation)       │ (None, 10)             │             0 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
 Total params: 118,282 (462.04 KB)
 Trainable params: 118,282 (462.04 KB)
 Non-trainable params: 0 (0.00 B)
In [15]:
model.compile(loss="categorical_crossentropy",optimizer=OPTIMIZER,metrics=['accuracy'])
In [16]:
history = model.fit(X_train,Y_train,batch_size=128,epochs=20,verbose=1,validation_split=0.2)
Epoch 1/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 3s 6ms/step - accuracy: 0.4700 - loss: 1.8700 - val_accuracy: 0.8445 - val_loss: 0.7333
Epoch 2/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.8419 - loss: 0.6573 - val_accuracy: 0.8832 - val_loss: 0.4513
Epoch 3/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.8781 - loss: 0.4520 - val_accuracy: 0.8993 - val_loss: 0.3744
Epoch 4/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.8954 - loss: 0.3801 - val_accuracy: 0.9062 - val_loss: 0.3351
Epoch 5/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9030 - loss: 0.3407 - val_accuracy: 0.9126 - val_loss: 0.3113
Epoch 6/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9071 - loss: 0.3266 - val_accuracy: 0.9178 - val_loss: 0.2926
Epoch 7/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9128 - loss: 0.3037 - val_accuracy: 0.9216 - val_loss: 0.2787
Epoch 8/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9175 - loss: 0.2899 - val_accuracy: 0.9236 - val_loss: 0.2678
Epoch 9/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9214 - loss: 0.2752 - val_accuracy: 0.9269 - val_loss: 0.2562
Epoch 10/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9255 - loss: 0.2655 - val_accuracy: 0.9291 - val_loss: 0.2469
Epoch 11/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9281 - loss: 0.2522 - val_accuracy: 0.9317 - val_loss: 0.2379
Epoch 12/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9325 - loss: 0.2379 - val_accuracy: 0.9348 - val_loss: 0.2303
Epoch 13/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9338 - loss: 0.2331 - val_accuracy: 0.9363 - val_loss: 0.2235
Epoch 14/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9361 - loss: 0.2286 - val_accuracy: 0.9389 - val_loss: 0.2187
Epoch 15/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9365 - loss: 0.2214 - val_accuracy: 0.9402 - val_loss: 0.2115
Epoch 16/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9398 - loss: 0.2142 - val_accuracy: 0.9423 - val_loss: 0.2056
Epoch 17/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9408 - loss: 0.2069 - val_accuracy: 0.9423 - val_loss: 0.2024
Epoch 18/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9435 - loss: 0.1975 - val_accuracy: 0.9453 - val_loss: 0.1961
Epoch 19/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9449 - loss: 0.1904 - val_accuracy: 0.9464 - val_loss: 0.1915
Epoch 20/20
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9452 - loss: 0.1891 - val_accuracy: 0.9467 - val_loss: 0.1877
In [17]:
score = model.evaluate(X_test,Y_test,verbose=1)
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9365 - loss: 0.2146
In [18]:
print("Test score:",score[0])
print("Test accuracy:",score[1])
Test score: 0.18653523921966553
Test accuracy: 0.9451000094413757
In [19]:
score
Out[19]:
[0.18653523921966553, 0.9451000094413757]