skopt超参数优化实例

import numpy as np
import matplotlib.pyplot as plt

from sklearn.datasets import load_boston
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score

X,y = load_boston(return_X_y=True)

n_features = X.shape[1]

model = GradientBoostingRegressor(n_estimators=50,random_state=1)

from skopt.space import Real,Integer,Categorical
from skopt.utils import use_named_args

space = [Integer(1,5,name="max_depth"),
         Real(10**-5, 10**0, "log-uniform", name='learning_rate'),
         Integer(1, n_features, name='max_features'),
         Integer(2, 100, name='min_samples_split'),
         Integer(1, 100, name='min_samples_leaf')]

# this decorator allows your objective function to receive a the parameters as
# keyword arguments. This is particularly convenient when you want to set scikit-learn
@use_named_args(space)
def objective(**params):
    model.set_params(**params)
    return -np.mean(cross_val_score(model,X,y,cv=5,n_jobs=-1,scoring="neg_mean_absolute_error"))

from skopt import gp_minimize
result = gp_minimize(objective,space,n_calls=50,random_state=0)

from skopt.plots import plot_convergence
from skopt.plots import plot_evaluations
from skopt.plots import plot_objective

plot_convergence(result)
plot_evaluations(result)
plot_objective(result)

plt.show()  
原文地址:https://www.cnblogs.com/wzdLY/p/9679521.html