catboost调参

1. 网格搜索调参

参考博客:Using Grid Search to Optimise CatBoost Parameters

2. Bayesian方法调参:

from skopt.space import Real, Integer
from skopt.utils import use_named_args
from skopt import gp_minimize

reg =  CatBoostRegressor(verbose=0, loss_function='MAE')
# 定义超参空间
space  = [
    Integer(1, 10, name='depth'),
    Integer(250, 1000, name='iterations'),
    Real(0.02, 0.3, name='learning_rate'),
    Integer(1,100, name='l2_leaf_reg'),
    Integer(5, 200, name='border_count'),
    Integer(5, 200, name='ctr_target_border_count'),
         ]

#定义@修饰下的objective
@use_named_args(space)
def objective(**params):
    reg.set_params(**params)

    return np.mean(cross_val_score(reg, train_feature_select, train_label, cv=5, n_jobs=-1,
                                    scoring=make_scorer(mean_absolute_error)))

res_gp = gp_minimize(objective, space, n_calls=50, random_state=0)

print("Best score=%.4f" % res_gp.fun)

print("""Best parameters:
- depth=%d
- iterations=%.6f
- learning_rate=%.6f
- l2_leaf_reg=%d
- border_count=%d
- ctr_target_border_count=%d""" % (res_gp.x[0], res_gp.x[1], 
                            res_gp.x[2], res_gp.x[3], 
                            res_gp.x[4],res_gp.x[5]))

3. 查看参数的importance

fea_df = pd.DataFrame()
fea_df['feature'] = reg.feature_names_
fea_df['importance'] = reg.feature_importances_

fea_df.sort_values('importance', inplace=True,ascending=False)
fea_df.to_csv('feature_importance.csv')
原文地址:https://www.cnblogs.com/54hys/p/12664856.html