|
12 | 12 |
|
13 | 13 | """
|
14 | 14 |
|
15 |
| - |
16 | 15 | import tensorflow as tf
|
17 | 16 | from tensorflow.keras import layers, models
|
18 | 17 | from tensorflow.keras.datasets import mnist
|
|
22 | 21 | (X_train, y_train), (X_test, y_test) = mnist.load_data()
|
23 | 22 |
|
24 | 23 | # Normalize the pixel values (0 to 1)
|
25 |
| -X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255 |
26 |
| -X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255 |
| 24 | +X_train = X_train.reshape(-1, 28, 28, 1).astype("float32") / 255 |
| 25 | +X_test = X_test.reshape(-1, 28, 28, 1).astype("float32") / 255 |
27 | 26 |
|
28 | 27 | # Convert labels to one-hot encoding
|
29 | 28 | y_train = to_categorical(y_train, 10)
|
|
33 | 32 | model = models.Sequential()
|
34 | 33 |
|
35 | 34 | # 1st Convolutional Layer
|
36 |
| -model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) |
| 35 | +model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1))) |
37 | 36 | model.add(layers.MaxPooling2D((2, 2)))
|
38 | 37 | model.add(layers.BatchNormalization())
|
39 | 38 |
|
40 | 39 | # 2nd Convolutional Layer
|
41 |
| -model.add(layers.Conv2D(64, (3, 3), activation='relu')) |
| 40 | +model.add(layers.Conv2D(64, (3, 3), activation="relu")) |
42 | 41 | model.add(layers.MaxPooling2D((2, 2)))
|
43 | 42 | model.add(layers.BatchNormalization())
|
44 | 43 |
|
45 | 44 | # 3rd Convolutional Layer
|
46 |
| -model.add(layers.Conv2D(128, (3, 3), activation='relu')) |
| 45 | +model.add(layers.Conv2D(128, (3, 3), activation="relu")) |
47 | 46 | model.add(layers.BatchNormalization())
|
48 | 47 |
|
49 | 48 | # Flattening the data before fully connected layers
|
50 | 49 | model.add(layers.Flatten())
|
51 | 50 |
|
52 | 51 | # Fully Connected (Dense) Layer with Dropout for regularization
|
53 |
| -model.add(layers.Dense(128, activation='relu')) |
| 52 | +model.add(layers.Dense(128, activation="relu")) |
54 | 53 | model.add(layers.Dropout(0.5))
|
55 | 54 |
|
56 | 55 | # Output Layer for classification
|
57 |
| -model.add(layers.Dense(10, activation='softmax')) |
| 56 | +model.add(layers.Dense(10, activation="softmax")) |
58 | 57 |
|
59 | 58 | # Compile the model
|
60 |
| -model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) |
| 59 | +model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) |
61 | 60 |
|
62 | 61 | # Display the model summary
|
63 | 62 | model.summary()
|
64 | 63 |
|
65 | 64 | # Train the model
|
66 |
| -history = model.fit(X_train, y_train, epochs=5, batch_size=32, validation_data=(X_test, y_test)) |
| 65 | +history = model.fit( |
| 66 | + X_train, y_train, epochs=5, batch_size=32, validation_data=(X_test, y_test) |
| 67 | +) |
67 | 68 |
|
68 | 69 | # Evaluate the model on test data
|
69 | 70 | test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)
|
70 |
| -print(f'\nTest accuracy: {test_acc}') |
| 71 | +print(f"\nTest accuracy: {test_acc}") |
0 commit comments