Search code examples
h2o

Is there a way to plot confusion matrix from H2O?


I know H2O can use

model_perf = model.model_performance(input)
model_perf.confusion_matrix

to output the confusion matrix. But is there a way to get the confusion matrix table to create plot?


Solution

  • You have the function you need as indicated here. So you just need to convert the output of your H2OFrames to a Pandas Dataframe. Example is shown below:

    import h2o
    from h2o.estimators.gbm import H2OGradientBoostingEstimator
    import numpy as np
    import matplotlib.pyplot as plt
    
    from sklearn import svm, datasets
    from sklearn.model_selection import train_test_split
    from sklearn.metrics import confusion_matrix
    from sklearn.utils.multiclass import unique_labels
    %matplotlib inline
    
    h2o.init()
    h2o.cluster().show_status()
    
    # import the cars dataset:
    # this dataset is used to classify whether or not a car is economical based on
    # the car's displacement, power, weight, and acceleration, and the year it was made
    cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
    
    
    # print(cars["economy_20mpg"].isna().sum())
    cars[~cars["economy_20mpg"].isna()]["economy_20mpg"].isna().sum()
    cars = cars[~cars["economy_20mpg"].isna()]
    
    
    # convert response column to a factor
    cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
    
    # set the predictor names and the response column name
    predictors = ["displacement","power","weight","acceleration","year"]
    response = "economy_20mpg"
    
    # split into train and validation sets
    train, valid = cars.split_frame(ratios = [.8], seed = 1234)
    
    # try using the `y` parameter:
    # first initialize your estimator
    cars_gbm = H2OGradientBoostingEstimator(seed = 1234, sample_rate=.5)
    
    # then train your model, where you specify your 'x' predictors, your 'y' the response column
    # training_frame and validation_frame
    cars_gbm.train(x = predictors, y = response, training_frame = train, validation_frame = valid)
    

    function from sklearn:

    def plot_confusion_matrix(y_true, y_pred, classes,
                              normalize=False,
                              title=None,
                              cmap=plt.cm.Blues):
        """
        This function prints and plots the confusion matrix.
        Normalization can be applied by setting `normalize=True`.
        """
        if not title:
            if normalize:
                title = 'Normalized confusion matrix'
            else:
                title = 'Confusion matrix, without normalization'
    
        # Compute confusion matrix
        cm = confusion_matrix(y_true, y_pred)
        # Only use the labels that appear in the data
        classes = classes[unique_labels(y_true, y_pred)]
        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
            print("Normalized confusion matrix")
        else:
            print('Confusion matrix, without normalization')
    
        print(cm)
    
        fig, ax = plt.subplots()
        im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
        ax.figure.colorbar(im, ax=ax)
        # We want to show all ticks...
        ax.set(xticks=np.arange(cm.shape[1]),
               yticks=np.arange(cm.shape[0]),
               # ... and label them with the respective list entries
               xticklabels=classes, yticklabels=classes,
               title=title,
               ylabel='True label',
               xlabel='Predicted label')
    
        # Rotate the tick labels and set their alignment.
        plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
                 rotation_mode="anchor")
    
        # Loop over data dimensions and create text annotations.
        fmt = '.2f' if normalize else 'd'
        thresh = cm.max() / 2.
        for i in range(cm.shape[0]):
            for j in range(cm.shape[1]):
                ax.text(j, i, format(cm[i, j], fmt),
                        ha="center", va="center",
                        color="white" if cm[i, j] > thresh else "black")
        fig.tight_layout()
        return ax
    
    

    extract values

    # specify the threshold you want to use to create integer labels
    maxf1_threshold = cars_gbm.find_threshold_by_max_metric('f1')
    
    # specify your tru and prediciton labels
    y_true = cars["economy_20mpg"].as_data_frame()
    y_pred = cars_gbm.predict(cars)
    
    # convert prediction labels (original uncalibrated probabilities into integer labels)
    y_pred = (y_pred['p1'] >= maxf1_threshold).ifelse(1,0)
    y_pred = y_pred.as_data_frame()
    y_pred.columns = ['p1']
    
    y_true1 = y_true.economy_20mpg 
    y_pred1 = y_pred.p1
    class_names = np.array(cars["economy_20mpg"].levels()[0])
    
    # Plot non-normalized confusion matrix
    plot_confusion_matrix(y_true1, y_pred1, classes=class_names,
                          title='Confusion matrix')
    

    image result:

    enter image description here Please note that there is a bug in the H2O-3 confusion matrix that has been noted here