728x90
반응형
SMALL
Create pipeline to train model with MNIST dataset
# Import tensorflow and matplot lib
from tensorflow.keras.datasets.mnist import load_data
from matplotlib import pyplot as plt
import tensorflow as tf
# load MNIST dataset
(trainX, trainy), (testX, testy) = load_data()
# Plot example images
for i in range(36):
# Defining subplots for multiple images
plt.subplot(6, 6, i+1)
# plot raw pixel data
plt.imshow(trainX[i], cmap=plt.get_cmap('gray'))
# show the figure
plt.show()
import matplotlib.pyplot as plt
image_index = 7777 # You may select anything up to 60,000
print(trainy[image_index]) # The label is 8
plt.imshow(trainX[image_index], cmap='Greys')
# Reshaping the array to 4-dims so that it can work with the Keras API
trainX = trainX.reshape(trainX.shape[0], 28, 28, 1)
testX = testX.reshape(testX.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
# Values needs to be in float
trainX = trainX.astype('float32')
testX = testX.astype('float32')
# Normalizing the RGB codes (range of 0 to 1) by dividing it to the max RGB value.
trainX /= 255
testX /= 255
print('trainX shape:', trainX.shape)
print('Number of images in trainX', trainX.shape[0])
print('Number of images in testX', testX.shape[0])
# Importing the required Keras modules containing model and layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
# Creating a Sequential Model and adding the layers
model = Sequential()
model.add(Conv2D(28, kernel_size=(3,3), input_shape=input_shape))
model.add(Conv2D(28, (3, 3)))
model.add(Conv2D(28, (3, 3)))
model.add(Flatten()) # Flattening the 2D arrays for fully connected layers
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(10,activation=tf.nn.softmax))
# Show network structure
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=trainX,y=trainy, epochs=5)
Epoch 1/5
1875/1875 [==============================] - 81s 43ms/step - loss: 0.2333 - accuracy: 0.9296
Epoch 2/5
1875/1875 [==============================] - 83s 44ms/step - loss: 0.1355 - accuracy: 0.9592
Use of vizgradcam library to explain instances
git clone https://github.com/gkeechin/vizgradcam
image_index = 4444
plt.imshow(testX[image_index].reshape(28, 28),cmap='Greys')
pred = model.predict(testX[image_index].reshape(1, 28, 28, 1))
print(pred.argmax())
from vizgradcam.gradcam import VizGradCAM
test_img = testX[4444]
VizGradCAM(
model, # Passing the model
test_img # and the test image you want to explain
)
for i in range(16):
# define subplot
plt.rcParams["figure.dpi"] = 250
plt.subplot(4, 4, i+1)
plt.tight_layout()
pred = model.predict(testX[i].reshape(1, 28, 28, 1))
plt.title("Prediction: " + str(pred.argmax()),fontsize = 4)
plt.axis('off')
VizGradCAM(model, testX[i])
728x90
반응형
LIST
'AI-driven Methodology > XAI (eXplainable AI)' 카테고리의 다른 글
[eXplainable AI] ANN-CBR Twins (0) | 2022.05.02 |
---|---|
[eXplainable AI] ProtoPNet (0) | 2022.05.02 |
[eXplainable AI] XAI method : Grad-CAM (0) | 2022.02.10 |
[eXplainable AI] Basic XAI with LIME for CNN Models (0) | 2022.02.10 |
[eXplainable AI] XAI method : LIME (0) | 2022.02.10 |