I am using the folllowing code:
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import mean_squared_error,roc_auc_score,precision_score
pd.options.display.max_columns = 999
import lightgbm as lgb
def load_csv(filepath):
data = []
col = []
checkcol = False
with open(filepath) as f:
for val in f.readlines():
val = val.replace("\n","")
val = val.split(',')
if checkcol is False:
col = val
checkcol = True
else:
data.append(val)
df = pd.DataFrame(data=data, columns=col)
return df
heart=load_csv(r'C:\Users\PC\Documents\Essay\heart.csv')
df=heart[['chol','cp']]
Y=heart['sex']
sc=StandardScaler()
sc.fit(df)
X=pd.DataFrame(sc.fit_transform(df))
X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.3,random_state=0)
d_train=lgb.Dataset(X_train, label=y_train)
params={}
params['learning_rate']=0.03
params['boosting_type']='gbdt' #GradientBoostingDecisionTree
params['objective']='binary' #Binary target feature
params['metric']='binary_logloss' #metric for binary classification
params['max_depth']=10
clf=lgb.train(params,d_train,100)
Only to get the error message:
ValueError: Series.dtypes must be int, float or bool
I know that this is becuase of my choice of Y, but i have also tried with arrays and nested lists here which still fails.
Using labelencoder would allow to convert your column to the expected format:
from sklearn import preprocessing
encoder = preprocessing.LabelEncoder()
encoder.fit(df['sex'])
encoder.transform(df['sex'])
This will yield a list of zeros and ones which you can feed into your learning algorithm:
array([1, 0, 0, 0, 1])