sklearn GridSearchCV网格搜索案例与代码
  TEZNKK3IfmPf 2023年11月14日 48 0

文章目录

  • 准备数据
  • 网格搜索参数
  • 评估结果
  • 全部代码

 

需要的包如下:

import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold

准备数据

这里以随机森林为例,前期的准备数据:

def get_train_x_y(): # 这里使用随机生成数,来表示数据
    x = pd.DataFrame(data=np.random.randint(0, 10, size=(100, 5)))
    y = np.random.randint(0, 2, 100)
    x = StandardScaler().fit_transform(x)
    return x, y

if __name__ == '__main__':
	x_train, y_train = get_train_x_y()
	x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.3)

网格搜索参数

    # 建立随机森林模型
    rf_model = RandomForestClassifier()

    rf_param = {
     
       
        'n_estimators': range(25, 100, 25)
    }  # 网格搜索的参数
    cv = KFold()
    rf_grid = GridSearchCV(rf_model, rf_param, cv=cv, scoring=['accuracy', 'recall'], refit=False)
	rf_grid.fit(x_train, y_train)

评估结果

def get_score_by_grid(grid):
    score_df = pd.DataFrame()
    for score in grid.scoring: # 把网格搜索的结果拿出来,构造成一个dataframe
        mean = grid.cv_results_['mean_test_' + score]
        score_df = pd.concat([pd.DataFrame(mean, columns=['mean_test_' + score]).T, score_df])
        std = grid.cv_results_['std_test_' + score]
        score_df = pd.concat([pd.DataFrame(std, columns=['std_test_' + score]).T, score_df])
    return score_df

print(get_score_by_grid(rf_grid))

全部代码

import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import ShuffleSplit, KFold


def get_score_by_grid(grid):
    score_df = pd.DataFrame()
    for score in grid.scoring:
        mean = grid.cv_results_['mean_test_' + score]
        score_df = pd.concat([pd.DataFrame(mean, columns=['mean_test_' + score]).T, score_df])
        std = grid.cv_results_['std_test_' + score]
        score_df = pd.concat([pd.DataFrame(std, columns=['std_test_' + score]).T, score_df])
    return score_df


def get_train_x_y():
    x = pd.DataFrame(data=np.random.randint(0, 10, size=(100, 5)))
    y = np.random.randint(0, 2, 100)
    x = StandardScaler().fit_transform(x)
    return x, y


if __name__ == '__main__':
    x_train, y_train = get_train_x_y()
    x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.3)
    # 建立随机森林模型
    rf_model = RandomForestClassifier()

    rf_param = {
     
       
        'n_estimators': range(25, 100, 25)
    }  # 网格搜索的参数
    cv = KFold()
    rf_grid = GridSearchCV(rf_model, rf_param, cv=cv, scoring=['accuracy', 'recall'], refit=False)
    rf_grid.fit(x_train, y_train)
    print(get_score_by_grid(rf_grid))
【版权声明】本文内容来自摩杜云社区用户原创、第三方投稿、转载,内容版权归原作者所有。本网站的目的在于传递更多信息,不拥有版权,亦不承担相应法律责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@moduyun.com

  1. 分享:
最后一次编辑于 2023年11月14日 0

暂无评论

TEZNKK3IfmPf