Search code examples
pythonscikit-learngrid-searchgridsearchcv

GridSearchCV - Error: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()


I am trying to do a Neural Network Classification using scikit-learn in python.

I generated my data, split it to train and test, and used it in the model MLPClassifier().

What I plan to do next is to evaluate the parameters used in this model using sklearn.model_selection.GridSearchCV.

Here is my code:

import matplotlib.pyplot as plt
import numpy as np
import itertools

from sklearn.neural_network import MLPClassifier
from sklearn.datasets.samples_generator import make_blobs, make_moons
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV

X, y = make_blobs(n_samples=500, centers=5, n_features=2, random_state=10, cluster_std=2.5)
y[y==0] = -1
X_train, X_test,  y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=10)

X_train and X_test are arrays with 2 features.

model_MLP_RAW = MLPClassifier()
model_MLP_RAW.fit(X_train, y_train)
model_MLP_RAW.predict(X_test) == y_test
model_MLP_RAW.score(X_test, y_test)

model_MLP_RAW = MLPClassifier()

param_gridMLPC = {
    'learning_rate': ["constant", "invscaling", "adaptive"],
    'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)],
    'alpha': [10.0 ** -np.arange(1, 7)],
    'activation': ["logistic", "relu", "tanh"]
}

CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
CV_unknwnMLPC.fit(X_train, y_train)

print(CV_unknwnMLPC.best_params_)

Everything works fine but at the line CV_unknwnMLPC.fit(X_train, y_train) I am getting the following error:

ValueError                                Traceback (most recent call last)
<ipython-input-30-90faf7e56738> in <module>()
     10 
     11 CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
---> 12 CV_unknwnMLPC.fit(X_train, y_train)
     13 
     14 print(CV_unknwnMLPC.best_params_)

~\Anaconda3\lib\site-packages\sklearn\model_selection\_search.py in fit(self, X, y, groups, **fit_params)
    638                                   error_score=self.error_score)
    639           for parameters, (train, test) in product(candidate_params,
--> 640                                                    cv.split(X, y, groups)))
    641 
    642         # if one choose to see train score, "out" will contain train score info

~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in __call__(self, iterable)
    777             # was dispatched. In particular this covers the edge
    778             # case of Parallel used with an exhausted iterator.
--> 779             while self.dispatch_one_batch(iterator):
    780                 self._iterating = True
    781             else:

~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in dispatch_one_batch(self, iterator)
    623                 return False
    624             else:
--> 625                 self._dispatch(tasks)
    626                 return True
    627 

~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in _dispatch(self, batch)
    586         dispatch_timestamp = time.time()
    587         cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
--> 588         job = self._backend.apply_async(batch, callback=cb)
    589         self._jobs.append(job)
    590 

~\Anaconda3\lib\site-packages\sklearn\externals\joblib\_parallel_backends.py in apply_async(self, func, callback)
    109     def apply_async(self, func, callback=None):
    110         """Schedule a func to be run"""
--> 111         result = ImmediateResult(func)
    112         if callback:
    113             callback(result)

~\Anaconda3\lib\site-packages\sklearn\externals\joblib\_parallel_backends.py in __init__(self, batch)
    330         # Don't delay the application, to avoid keeping the input
    331         # arguments in memory
--> 332         self.results = batch()
    333 
    334     def get(self):

~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in __call__(self)
    129 
    130     def __call__(self):
--> 131         return [func(*args, **kwargs) for func, args, kwargs in self.items]
    132 
    133     def __len__(self):

~\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in <listcomp>(.0)
    129 
    130     def __call__(self):
--> 131         return [func(*args, **kwargs) for func, args, kwargs in self.items]
    132 
    133     def __len__(self):

~\Anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, error_score)
    456             estimator.fit(X_train, **fit_params)
    457         else:
--> 458             estimator.fit(X_train, y_train, **fit_params)
    459 
    460     except Exception as e:

~\Anaconda3\lib\site-packages\sklearn\neural_network\multilayer_perceptron.py in fit(self, X, y)
    971         """
    972         return self._fit(X, y, incremental=(self.warm_start and
--> 973                                             hasattr(self, "classes_")))
    974 
    975     @property

~\Anaconda3\lib\site-packages\sklearn\neural_network\multilayer_perceptron.py in _fit(self, X, y, incremental)
    324 
    325         # Validate input parameters.
--> 326         self._validate_hyperparameters()
    327         if np.any(np.array(hidden_layer_sizes) <= 0):
    328             raise ValueError("hidden_layer_sizes must be > 0, got %s." %

~\Anaconda3\lib\site-packages\sklearn\neural_network\multilayer_perceptron.py in _validate_hyperparameters(self)
    390         if self.max_iter <= 0:
    391             raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
--> 392         if self.alpha < 0.0:
    393             raise ValueError("alpha must be >= 0, got %s." % self.alpha)
    394         if (self.learning_rate in ["constant", "invscaling", "adaptive"] and

ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()

I checked some answers online and I double-checked the parameters in param_gridMLPC to make sure they're well provided but the error persists.

What am I doing wrong?

Thanks in advance


Solution

  • 'alpha': [10.0 ** -np.arange(1, 7)]

    In the documentation of MLPClassifier:-

    alpha : float, optional, default 0.0001

    L2 penalty (regularization term) parameter.

    "alpha" should be float. So in the parameter grid, it can be a list of different floats.

    But when you do this:

    'alpha': [10.0 ** -np.arange(1, 7)]
    

    This becomes a list of numpy array. That is a sort of a sequence of sequence (list of list, array of array, 2-d array etc). That means that the first element of list is an numpy array which will be passed to the internal MLPClassifier in place of "alpha". That is the error.

    You can do the following:

    'alpha': 10.0 ** -np.arange(1, 7)
    

    That will be a simple array, from which the elements (float values) will be chosen to send into the model.