Search code examples
pythonpython-3.xscikit-learnpipelinexgboost

K value for SelectKBest() using chi2


I'm using a slightly modified code from here:Ensemble Methods: Tuning a XGBoost model with Scikit-Learn

When I execute it, I keep getting this error:

ValueError: k should be >=0, <= n_features = 4; got 10. Use k='all' to return all features.

I have four features and a target. I've tried values 1-4 for k in the code below in the in the Pipeline parameters in the SeleckKBest() function, but the same error persists.

Here is my reproducible code:

import pandas as pd
df = pd.DataFrame({'Number1': [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
                'Color1': ['Red', 'Blue', 'Green', 'Yellow', 'Orange', 'Red', 
                'Blue', 'Green', 'Yellow', 'Orange'],
                'Number2': [221, 222, 223, 224, 225, 226, 227, 228, 229,230],
                'Trait1': ['Jogger', 'Sedentary', 'Tennis_Player', 'Graveyard', 'Shift_Worker', 'Jogger', 'Fulltime_Mom', 'Tennis_Player', 'Couch_Potato', 'Jogger', 'Graveyard_Shift_Worder'],
                'Target': ['yes', 'no', 'yes', 'no', 'yes', 'no', 'yes', 'no', 'yes', 'no']})

col = pd.Categorical(df['Target'])
df['Target'] = col.codes

from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import MinMaxScaler
class PreprocessTransformer(BaseEstimator, TransformerMixin):
        def __init__(self, cat_features, num_features):
        self.cat_features = cat_features
        self.num_features = num_features
    def fit(self, X, y=None):
        return self
    def transform(self, X, y=None):
        df = X.copy()
        # Convert columns to categorical
        for name in self.cat_features:
            col = pd.Categorical(df[name])
            df[name] = col.codes
        # Normalize numerical features
        scaler = MinMaxScaler()
        df[self.num_features] = scaler.fit_transform(df[self.num_features])
        return df

from sklearn.model_selection import train_test_split
# Split the dataset into training and testing
X_train, X_test, y_train, y_test = train_test_split(
   df.drop('Target', axis=1),
   df['Target'],
   test_size=0.2,
   random_state=42,
  shuffle=True,
  stratify=df['Target']
)               

from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, chi2
import xgboost as xgb
# Get columns list for categorical and numerical
categorical_features = df.select_dtypes('object').columns.tolist()
numerical_features = df.select_dtypes('int64').columns.tolist()
# Create a pipeline
pipe = Pipeline([
  ('preproc', PreprocessTransformer(categorical_features, numerical_features)),
  ('fs', SelectKBest(k=0)),
  ('clf', xgb.XGBClassifier(objective='binary:logistic'))
])

from sklearn.model_selection import KFold, GridSearchCV
from sklearn.metrics import accuracy_score, make_scorer
# Define our search space for grid search
search_space = [
  {
    'clf__n_estimators': [50, 100, 150, 200],
    'clf__learning_rate': [0.01, 0.1, 0.2, 0.3],
    'clf__max_depth': range(3, 10),
    'clf__colsample_bytree': [i/10.0 for i in range(1, 3)],
    'clf__gamma': [i/10.0 for i in range(3)],
    'fs__score_func': [chi2],
    'fs__k': [10],
  }
]
# Define cross validation
kfold = KFold(n_splits=8, random_state=42)
# AUC and accuracy as score
scoring = {'AUC':'roc_auc', 'Accuracy':make_scorer(accuracy_score)}
# Define grid search
grid = GridSearchCV(
  pipe,
  param_grid=search_space,
  cv=kfold,
  scoring=scoring,
  refit='AUC',
  verbose=1,
  n_jobs=-1
)
# Fit grid search
model = grid.fit(X_train, y_train)

Error:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-84-5c30ee0bb39f> in <module>
     28 )
     29 # Fit grid search
---> 30 model = grid.fit(X_train, y_train)

~/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/model_selection/_search.py in fit(self, X, y, groups, **fit_params)
    737             refit_start_time = time.time()
    738             if y is not None:
--> 739                 self.best_estimator_.fit(X, y, **fit_params)
    740             else:
    741                 self.best_estimator_.fit(X, **fit_params)

~/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/pipeline.py in fit(self, X, y, **fit_params)
    348             This estimator
    349         """
--> 350         Xt, fit_params = self._fit(X, y, **fit_params)
    351         with _print_elapsed_time('Pipeline',
    352                                  self._log_message(len(self.steps) - 1)):

~/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/pipeline.py in _fit(self, X, y, **fit_params)
    313                 message_clsname='Pipeline',
    314                 message=self._log_message(step_idx),
--> 315                 **fit_params_steps[name])
    316             # Replace the transformer of the step with the fitted
    317             # transformer. This is necessary when loading the transformer

~/anaconda3/envs/python3/lib/python3.6/site-packages/joblib/memory.py in __call__(self, *args, **kwargs)
    353 
    354     def __call__(self, *args, **kwargs):
--> 355         return self.func(*args, **kwargs)
    356 
    357     def call_and_shelve(self, *args, **kwargs):

~/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/pipeline.py in _fit_transform_one(transformer, X, y, weight, message_clsname, message, **fit_params)
    726     with _print_elapsed_time(message_clsname, message):
    727         if hasattr(transformer, 'fit_transform'):
--> 728             res = transformer.fit_transform(X, y, **fit_params)
    729         else:
    730             res = transformer.fit(X, y, **fit_params).transform(X)

~/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/base.py in fit_transform(self, X, y, **fit_params)
    572         else:
    573             # fit method of arity 2 (supervised transformation)
--> 574             return self.fit(X, y, **fit_params).transform(X)
    575 
    576 

~/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/feature_selection/_univariate_selection.py in fit(self, X, y)
    346                             % (self.score_func, type(self.score_func)))
    347 
--> 348         self._check_params(X, y)
    349         score_func_ret = self.score_func(X, y)
    350         if isinstance(score_func_ret, (list, tuple)):

~/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/feature_selection/_univariate_selection.py in _check_params(self, X, y)
    512             raise ValueError("k should be >=0, <= n_features = %d; got %r. "
    513                              "Use k='all' to return all features."
--> 514                              % (X.shape[1], self.k))
    515 
    516     def _get_support_mask(self):

ValueError: k should be >=0, <= n_features = 4; got 10. Use k='all' to return all features.

Solution

  • There are 4 features (Number1, Color1, Number2, Trait1).

    SelectKBest will select the K most explicative features out of the original set, so K should be a value greater than 0 and lower or equal than the total number of features.

    You are setting the GridSearch object to use always 10 in this line:

    'fs__k': [10]
    

    Which overrides your definition during the declaration

    ('fs', SelectKBest(k=0)),
    

    You can drop the fs__k line and correct the declaration line to the k you want, or set the k you want in the search_grid definition.