Search code examples
pythontensorflowconv-neural-networkresnetimage-classification

How to implement Grad-CAM to view activation map/heat maps on TensorFlow ResNet152V2 for image Classification


Hello I'm doing a small project on TensorFlow image classification using ResNet152V2.

I wrote a Train-Predict.py script that's able to train a trained_weights.hdf5 file to successfully predict images of autistic and non-autistic people.

here is script:

#Import Libraries
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import models
from tensorflow.keras.applications import ResNet152V2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dropout, Dense, BatchNormalization, GlobalAveragePooling2D, Conv2D
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from PIL import Image
import cv2





os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
img_size = 224
batchsize = 128
epochs = 50
lrate = 0.01
lrate_reduction_factor = 0.5
training = False


#Variable setup and Detect images/Classes in dataset folders
traindatadir="Train"
testdatadir="Test"

#Data Augmentation
datagen = ImageDataGenerator(
    rescale = 1./255,
    horizontal_flip = True,
    vertical_flip = True,
    rotation_range=15,
    shear_range=0.1,
    zoom_range=0.2,
    width_shift_range=0.1,
    height_shift_range=0.1
)

#Preprosessing Data
train_datagen=datagen.flow_from_directory(
    traindatadir,
    target_size = (img_size, img_size),
    color_mode = 'rgb',
    batch_size = batchsize,
    shuffle = True,
    seed = 123,
    class_mode= 'categorical'
)

test_datagen=datagen.flow_from_directory(
    testdatadir,
    target_size = (img_size, img_size),
    color_mode = 'rgb',
    batch_size = batchsize,
    shuffle = True,
    seed = 123,
    class_mode= 'categorical'
)


#ResNet Model with custom node tuning (Reduced number of Nodes this time)
resnet = ResNet152V2( 
    include_top = False,
    weights = 'imagenet',
    input_shape = (img_size, img_size, 3)
)
 
resnet.trainable = False

model = Sequential()
 
model.add(resnet)
model.add(Conv2D(512, kernel_size=(3,3), activation="relu"))  
model.add(Conv2D(512, kernel_size=(3,3), activation="relu"))  
model.add(Conv2D(512, kernel_size=(3,3), activation="relu"))  
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dense(64, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(128, activation='relu'))    
model.add(Dense(2, activation='softmax'))
 
model.compile(optimizer=Adam(learning_rate=lrate), loss = 'categorical_crossentropy', metrics = ['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer="Adam", metrics=['accuracy'])

model.summary()


#Use ReduceLR to reduce learning rate when metric not improving
earlystop = EarlyStopping(patience=20)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                            patience=10, 
                                            verbose=1, 
                                            factor=lrate_reduction_factor, 
                                            min_lr=0.000000000000001) 
callbacks = [earlystop, learning_rate_reduction]

if training:
    history = model.fit(train_datagen,epochs=epochs,batch_size=batchsize,validation_data=test_datagen,callbacks=callbacks)
    model.save('trained_weights.hdf5')
    
    #Plot
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6,6))
    ax1.plot(history.history['loss'], color='b', label="Training loss")
    ax1.plot(history.history['val_loss'], color='r', label="validation loss")
    ax1.set_xticks(np.arange(0, epochs, (epochs/10)))
    ax1.legend()
    
    ax2.plot(history.history['accuracy'], color='b', label="Training accuracy")
    ax2.plot(history.history['val_accuracy'], color='r',label="Validation accuracy")
    ax2.set_xticks(np.arange(0, epochs, (epochs/10)))
    ax2.legend()
    
    legend = plt.legend(loc='best', shadow=True)
    plt.tight_layout()
    plt.show()





#Model Prediction
model = models.load_model('trained_weights.hdf5', compile = True)

predict_path = "Train"

datagen = ImageDataGenerator(
    rescale = 1./255,
)

predict_data = datagen.flow_from_directory(
    predict_path,
    target_size = ((img_size,img_size)), 
)

ci = predict_data.class_indices
classes = {v: k for k, v in ci.items()}

#path = "C:/Users/J.A.X/Desktop/TempEnv/Test/autistic/1028.jpg"
path = "C:/Users/J.A.X/Desktop/TempEnv/Train/non_autistic/0001.jpg"
#path = input('Please enter path of image to classify: \n')

inp = Image.open(path)
img = inp.resize((img_size,img_size))
img = np.array(img)/255.0
img = np.reshape(img, [1,img_size,img_size,3])

predictions = model.predict(img)

top_values, top_indices = tf.nn.top_k(predictions, k=2)

values = np.array(top_values)
indices = np.array(top_indices)

#print('Input Image: \n\n\n')
#inp.show()

print('Probabilities: \n')
#print(values)
#print(indices)

for i in range(2):
    print(classes[indices[0][i]] + " : ", end = "")
    print(values[0][i] * 100)
    print()


image = cv2.imread(path)
# Convert the image from BGR to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (img_size, img_size))

# Expand dimensions to match the expected input shape (1, 224, 224, 3)
image = np.expand_dims(image, axis=0)

# Convert image to float32 and normalize
image = image.astype(np.float32) / 255.0

# checking how it looks
plt.imshow(image[0])  # Note: image[0] because image now has a batch dimension
plt.show()

print(image.shape) # Print Shape

i = np.argmax(predictions[0])
print(i) # 0 is autistic and 1 is non austistic

Output:

Found 2054 images belonging to 2 classes.
Found 882 images belonging to 2 classes.
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 resnet152v2 (Functional)    (None, 7, 7, 2048)        58331648  
                                                                 
 conv2d (Conv2D)             (None, 5, 5, 512)         9437696   
                                                                 
 conv2d_1 (Conv2D)           (None, 3, 3, 512)         2359808   
                                                                 
 conv2d_2 (Conv2D)           (None, 1, 1, 512)         2359808   
                                                                 
 global_average_pooling2d (G  (None, 512)              0         
 lobalAveragePooling2D)                                          
                                                                 
 dropout (Dropout)           (None, 512)               0         
                                                                 
 dense (Dense)               (None, 512)               262656    
                                                                 
 dense_1 (Dense)             (None, 64)                32832     
                                                                 
 dropout_1 (Dropout)         (None, 64)                0         
                                                                 
 dense_2 (Dense)             (None, 256)               16640     
                                                                 
 batch_normalization (BatchN  (None, 256)              1024      
 ormalization)                                                   
                                                                 
 dense_3 (Dense)             (None, 128)               32896     
                                                                 
 dense_4 (Dense)             (None, 2)                 258       
                                                                 
=================================================================
Total params: 72,835,266
Trainable params: 14,503,106
Non-trainable params: 58,332,160
_________________________________________________________________

enter image description here

Found 2054 images belonging to 2 classes.
1/1 [==============================] - 3s 3s/step
Probabilities: 

non_autistic : 81.39994740486145

autistic : 18.60005408525467

(1, 224, 224, 3)
1

I wish to create a heatmap to visualize where the activation is take place at the convolution layer.

Something like this: enter image description here

However after following dozens of Grad-Cam examples online and writing a few dozen functions like:

# Grad-CAM implementation
def get_img_array(img_path, size):
    img = load_img(img_path, target_size=size)
    array = np.expand_dims(np.array(img), axis=0)
    return array / 255.0

def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None):
    grad_model = tf.keras.models.Model(
        [model.inputs], [model.get_layer(last_conv_layer_name).output, model.output]
    )
    with tf.GradientTape() as tape:
        last_conv_layer_output, preds = grad_model(img_array)
        if pred_index is None:
            pred_index = tf.argmax(preds[0])
        class_channel = preds[:, pred_index]

    grads = tape.gradient(class_channel, last_conv_layer_output)
    pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))

    last_conv_layer_output = last_conv_layer_output[0]
    heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
    heatmap = tf.squeeze(heatmap)

    heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
    return heatmap.numpy()

def save_and_display_gradcam(img_path, heatmap, cam_path="cam.jpg", alpha=0.4):
    img = cv2.imread(img_path)
    heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
    heatmap = np.uint8(255 * heatmap)
    heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
    superimposed_img = heatmap * alpha + img
    cv2.imwrite(cam_path, superimposed_img)

    img = cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB)
    plt.imshow(img)
    plt.axis('off')
    plt.show()

img_array = get_img_array(path, size=(img_size, img_size))

# Generate Grad-CAM heatmap
last_conv_layer_name = "conv5_block3_out"
heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name)

# Display Grad-CAM
save_and_display_gradcam(path, heatmap)

Each different examples resulted in differing errors of sorts but most most them point to the resnet being nested in the actual model...

According to the examples I've read, its stated to use the last convolutional layer of the model. Not really sure whter they mean the "conv5_block3_out" layer in resnet or my conv2d_2 convolution layer in the model outside of resnet...

Any help to writing and displaying a proper heat map is welcome. I 'm willing to try anything at this point.

Virtual Environment is minicoda

Instructions to setup the virtual environment:

conda create --name Cuda_Python3.8 python=3.8 -y
conda activate Cuda_Python3.8

conda install cudatoolkit=11.2 cudnn=8.1 -c=conda-forge -y

pip install tensorflow-gpu==2.10.1
pip install spyder==5.5.5
pip install Pillow==10.4.0
pip install matplotlib==3.7.5
pip install opencv-python==4.10.0.84

Solution

  • I found a Grad-CAM implementation example that works from this Stack Overflow post.

    The GradCAM class in the answer to that post turned out to be the function I needed to implement the heat map for my ResNet model:

    class GradCAM:
        def __init__(self, model, classIdx, layerName=None):
            # store the model, the class index used to measure the class
            # activation map, and the layer to be used when visualizing
            # the class activation map
            self.model = model
            self.classIdx = classIdx
            self.layerName = layerName
            # if the layer name is None, attempt to automatically find
            # the target output layer
            if self.layerName is None:
                self.layerName = self.find_target_layer()
    
        def find_target_layer(self):
            # attempt to find the final convolutional layer in the network
            # by looping over the layers of the network in reverse order
            for layer in reversed(self.model.layers):
                # check to see if the layer has a 4D output
                if len(layer.output_shape) == 4:
                    return layer.name
            # otherwise, we could not find a 4D layer so the GradCAM
            # algorithm cannot be applied
            raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
    
    
        def compute_heatmap(self, image, eps=1e-8):
            # construct our gradient model by supplying (1) the inputs
            # to our pre-trained model, (2) the output of the (presumably)
            # final 4D layer in the network, and (3) the output of the
            # softmax activations from the model
            gradModel = models.Model(
                inputs=[self.model.inputs],
                outputs=[self.model.get_layer(self.layerName).output, self.model.output])
    
            # record operations for automatic differentiation
            with tf.GradientTape() as tape:
                # cast the image tensor to a float-32 data type, pass the
                # image through the gradient model, and grab the loss
                # associated with the specific class index
                inputs = tf.cast(image, tf.float32)
                (convOutputs, predictions) = gradModel(inputs)
                
                #loss = predictions[:, tf.argmax(predictions[0])]
                loss = predictions[:, self.classIdx]
        
            # use automatic differentiation to compute the gradients
            grads = tape.gradient(loss, convOutputs)
    
            # compute the guided gradients
            castConvOutputs = tf.cast(convOutputs > 0, "float32")
            castGrads = tf.cast(grads > 0, "float32")
            guidedGrads = castConvOutputs * castGrads * grads
            # the convolution and guided gradients have a batch dimension
            # (which we don't need) so let's grab the volume itself and
            # discard the batch
            convOutputs = convOutputs[0]
            guidedGrads = guidedGrads[0]
    
            # compute the average of the gradient values, and using them
            # as weights, compute the ponderation of the filters with
            # respect to the weights
            weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
            cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
    
            # grab the spatial dimensions of the input image and resize
            # the output class activation map to match the input image
            # dimensions
            (w, h) = (image.shape[2], image.shape[1])
            heatmap = cv2.resize(cam.numpy(), (w, h))
            # normalize the heatmap such that all values lie in the range
            # [0, 1], scale the resulting values to the range [0, 255],
            # and then convert to an unsigned 8-bit integer
            numer = heatmap - np.min(heatmap)
            denom = (heatmap.max() - heatmap.min()) + eps
            heatmap = numer / denom
            heatmap = (heatmap * 255).astype("uint8")
            # return the resulting heatmap to the calling function
            return heatmap
    
        def overlay_heatmap(self, heatmap, image, alpha=0.5,
                            colormap=cv2.COLORMAP_VIRIDIS):
            # apply the supplied color map to the heatmap and then
            # overlay the heatmap on the input image
            heatmap = cv2.applyColorMap(heatmap, colormap)
            output = cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0)
            # return a 2-tuple of the color mapped heatmap and the output,
            # overlaid image
            return (heatmap, output)
    

    The only difference between their code and mine is that my model has a nested ResNet layer that I need to account for. Instead of compute_heatmap function, I simply swap out:

    loss = predictions[:, tf.argmax(predictions[0])]
    

    with:

    loss = predictions[:, self.classIdx]
    

    to account for the nested layers.

    Here is the my new full script:

    #Import Libraries
    import numpy as np
    import matplotlib.pyplot as plt
    import tensorflow as tf
    from tensorflow.keras import models
    from tensorflow.keras.applications import ResNet152V2
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dropout, Dense, BatchNormalization, GlobalAveragePooling2D
    from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
    from tensorflow.keras.optimizers import Adam
    from PIL import Image
    import cv2
    
    
    class GradCAM:
        def __init__(self, model, classIdx, layerName=None):
            # store the model, the class index used to measure the class
            # activation map, and the layer to be used when visualizing
            # the class activation map
            self.model = model
            self.classIdx = classIdx
            self.layerName = layerName
            # if the layer name is None, attempt to automatically find
            # the target output layer
            if self.layerName is None:
                self.layerName = self.find_target_layer()
    
        def find_target_layer(self):
            # attempt to find the final convolutional layer in the network
            # by looping over the layers of the network in reverse order
            for layer in reversed(self.model.layers):
                # check to see if the layer has a 4D output
                if len(layer.output_shape) == 4:
                    return layer.name
            # otherwise, we could not find a 4D layer so the GradCAM
            # algorithm cannot be applied
            raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
    
    
        def compute_heatmap(self, image, eps=1e-8):
            # construct our gradient model by supplying (1) the inputs
            # to our pre-trained model, (2) the output of the (presumably)
            # final 4D layer in the network, and (3) the output of the
            # softmax activations from the model
            gradModel = models.Model(
                inputs=[self.model.inputs],
                outputs=[self.model.get_layer(self.layerName).output, self.model.output])
    
            # record operations for automatic differentiation
            with tf.GradientTape() as tape:
                # cast the image tensor to a float-32 data type, pass the
                # image through the gradient model, and grab the loss
                # associated with the specific class index
                inputs = tf.cast(image, tf.float32)
                (convOutputs, predictions) = gradModel(inputs)
                
                #loss = predictions[:, tf.argmax(predictions[0])]
                loss = predictions[:, self.classIdx]
        
            # use automatic differentiation to compute the gradients
            grads = tape.gradient(loss, convOutputs)
    
            # compute the guided gradients
            castConvOutputs = tf.cast(convOutputs > 0, "float32")
            castGrads = tf.cast(grads > 0, "float32")
            guidedGrads = castConvOutputs * castGrads * grads
            # the convolution and guided gradients have a batch dimension
            # (which we don't need) so let's grab the volume itself and
            # discard the batch
            convOutputs = convOutputs[0]
            guidedGrads = guidedGrads[0]
    
            # compute the average of the gradient values, and using them
            # as weights, compute the ponderation of the filters with
            # respect to the weights
            weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
            cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
    
            # grab the spatial dimensions of the input image and resize
            # the output class activation map to match the input image
            # dimensions
            (w, h) = (image.shape[2], image.shape[1])
            heatmap = cv2.resize(cam.numpy(), (w, h))
            # normalize the heatmap such that all values lie in the range
            # [0, 1], scale the resulting values to the range [0, 255],
            # and then convert to an unsigned 8-bit integer
            numer = heatmap - np.min(heatmap)
            denom = (heatmap.max() - heatmap.min()) + eps
            heatmap = numer / denom
            heatmap = (heatmap * 255).astype("uint8")
            # return the resulting heatmap to the calling function
            return heatmap
    
        def overlay_heatmap(self, heatmap, image, alpha=0.5,
                            colormap=cv2.COLORMAP_VIRIDIS):
            # apply the supplied color map to the heatmap and then
            # overlay the heatmap on the input image
            heatmap = cv2.applyColorMap(heatmap, colormap)
            output = cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0)
            # return a 2-tuple of the color mapped heatmap and the output,
            # overlaid image
            return (heatmap, output)
    
    
    img_size = 224
    batchsize = 128
    epochs = 300
    lrate = 0.01
    lrate_reduction_factor = 0.1
    training = True
    
    
    #Variable setup and Detect images/Classes in dataset folders
    traindatadir="Train"
    testdatadir="Test"
    
    #Data Augmentation
    datagen = ImageDataGenerator(
        rescale = 1./255,
        horizontal_flip = True,
        vertical_flip = True,
        rotation_range=15,
        shear_range=0.1,
        zoom_range=0.2,
        width_shift_range=0.1,
        height_shift_range=0.1
    )
    
    #Preprosessing Data
    train_datagen=datagen.flow_from_directory(
        traindatadir,
        target_size = (img_size, img_size),
        color_mode = 'rgb',
        batch_size = batchsize,
        shuffle = True,
        seed = 123,
        class_mode= 'categorical'
    )
    
    test_datagen=datagen.flow_from_directory(
        testdatadir,
        target_size = (img_size, img_size),
        color_mode = 'rgb',
        batch_size = batchsize,
        shuffle = True,
        seed = 123,
        class_mode= 'categorical'
    )
    
    
    #ResNet Model with custom node tuning (Reduced number of Nodes this time)
    resnet = ResNet152V2( 
        include_top = False,
        weights = 'imagenet',
        input_shape = (img_size, img_size, 3)
    )
     
    resnet.trainable = True
    
    model = Sequential()
     
    model.add(resnet) 
    model.add(GlobalAveragePooling2D())
    model.add(Dropout(0.2))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(64, activation = 'relu'))
    model.add(Dropout(0.2))
    model.add(Dense(256, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(128, activation='relu'))    
    model.add(Dense(2, activation='softmax'))
     
    model.compile(optimizer=Adam(learning_rate=lrate), loss = 'categorical_crossentropy', metrics = ['accuracy'])
    #model.compile(loss='categorical_crossentropy', optimizer="Adam", metrics=['accuracy'])
    
    model.summary()
    
    
    #Use ReduceLR to reduce learning rate when metric not improving
    earlystop = EarlyStopping(patience=20)
    learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', 
                                                patience=5, 
                                                verbose=1, 
                                                factor=lrate_reduction_factor, 
                                                min_lr=0.000000000000001) 
    callbacks = [earlystop, learning_rate_reduction]
    
    if training:
        history = model.fit(train_datagen,epochs=epochs,batch_size=batchsize,validation_data=test_datagen,callbacks=callbacks)
        model.save('trained_weights.hdf5')
        
        #Plot
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6,6))
        ax1.plot(history.history['loss'], color='b', label="Training loss")
        ax1.plot(history.history['val_loss'], color='r', label="validation loss")
        ax1.set_xticks(np.arange(0, epochs, (epochs/10)))
        ax1.legend()
        
        ax2.plot(history.history['accuracy'], color='b', label="Training accuracy")
        ax2.plot(history.history['val_accuracy'], color='r',label="Validation accuracy")
        ax2.set_xticks(np.arange(0, epochs, (epochs/10)))
        ax2.legend()
        
        legend = plt.legend(loc='best', shadow=True)
        plt.tight_layout()
        plt.show()
    
    
    
    #Model Prediction
    model = models.load_model('trained_weights.hdf5', compile = True)
    
    predict_path = "Train"
    
    datagen = ImageDataGenerator(
        rescale = 1./255,
    )
    
    predict_data = datagen.flow_from_directory(
        predict_path,
        target_size = ((img_size,img_size)), 
    )
    
    ci = predict_data.class_indices
    classes = {v: k for k, v in ci.items()}
    
    path = "./Test/autistic/1028.jpg"
    #path = "./Train/non_autistic/0001.jpg"
    #path = input('Please enter path of image to classify: \n')
    
    inp = Image.open(path)
    img = inp.resize((img_size,img_size))
    img = np.array(img)/255.0
    img = np.reshape(img, [1,img_size,img_size,3])
    
    predictions = model.predict(img)
    top_values, top_indices = tf.nn.top_k(predictions, k=2)
    values = np.array(top_values)
    indices = np.array(top_indices)
    
    
    
    # checking how it looks
    print('Input Image:')
    plt.imshow(img[0])  # Note: img[0] because image now has a batch dimension
    plt.show()
    
    #print(img.shape) # Print Shape
    
    i = np.argmax(predictions[0])
    #print(i) # 0 is autistic and 1 is non austistic
    
    print('Probabilities:')
    #print(values)
    #print(indices)
    
    for i in range(2):
        print(classes[indices[0][i]] + " : ", end = "")
        print(values[0][i] * 100)
        print()
        
    # View layers in model
    """
    for idx in range(len(model.layers)):
        print(model.get_layer(index = idx).name)
    """
    # View the nested layers in the resnet layer of the model
    """
    for idx in range(len(model.get_layer('resnet152v2').layers)):
        print(model.get_layer('resnet152v2').get_layer(index = idx).name)
    """
    icam = GradCAM(model.get_layer('resnet152v2'), i, 'conv5_block3_out')
    #icam = GradCAM(model, i, 'conv2d_2')  
    heatmap = icam.compute_heatmap(img)
    heatmap = cv2.resize(heatmap, (img_size, img_size))
    
    image = cv2.imread(path)
    # Convert the image from BGR to RGB
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = cv2.resize(image, (img_size, img_size))
    #print(heatmap.shape, image.shape)
    
    (heatmap, output) = icam.overlay_heatmap(heatmap, image, alpha=0.5)
    
    # Visualization
    fig, ax = plt.subplots(1, 3, figsize=(18, 6))
    ax[0].imshow(heatmap)
    ax[0].set_title('Heatmap')
    ax[1].imshow(image)
    ax[1].set_title('Original Image')
    ax[2].imshow(output)
    ax[2].set_title('Overlay')
    
    

    The output plot will show something like this to visualize the heat map: enter image description here