我遇到一个问题,即使用 GridSearchCV 微调超参数并不能真正改善我的分类器。我认为改进应该比这更大。我使用当前代码获得的分类器的最大改进约为 +-0.03。我有一个包含八列和不平衡二进制结果的数据集。对于评分,我使用 f1,并使用 KFold 进行 10 次分割。我希望有人能发现一些不对劲的地方,我应该看看?谢谢你!
我使用以下代码:
model_parameters = {
"GaussianNB": {
},
"DecisionTreeClassifier": {
'min_samples_leaf': range(5, 9),
'max_depth': [None, 0, 1, 2, 3, 4]
},
"KNeighborsClassifier": {
'n_neighbors': range(1, 10),
'weights': ["distance", "uniform"]
},
"SVM": {
'kernel': ["poly"],
'C': np.linspace(0, 15, 30)
},
"LogisticRegression": {
'C': np.linspace(0, 15, 30),
'penalty': ["l1", "l2", "elasticnet", "none"]
}
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
n_splits = 10
scoring_method = make_scorer(lambda true_target, prediction: f1_score(true_target, prediction, average="micro"))
cv = KFold(n_splits=n_splits, random_state=random_state, shuffle=True)
for model_name, parameters in model_parameters.items():
# Models is a dict with 5 classifiers
model = models[model_name]
grid_search = GridSearchCV(model, parameters, cv=cv, n_jobs=-1, scoring=scoring_method, verbose=False).fit(X_train, y_train)
cvScore = cross_val_score(grid_search.best_estimator_, X_test, y_test, cv=cv, scoring='f1').mean()
classDict[model_name] = cvScore