sklearn学習ノート-『モデル検証方法』
5126 ワード
モデル検証方法
1.学習率曲線(learn_curve)
2.クロス検証スコア(cross_val_score)
3.検証曲線(validation_curve)
一.学習率曲線
指定した学習器モデルが異なるサイズのトレーニングセットで交差して検証されたトレーニングスコアとテストスコアを計算します.
まず,クロス検証生成器を用いて全体データセットK回を分割し,各分割に1つのトレーニングセットとテストを行う.
を選択します.そして,各分割された訓練セットから数が増加するサブセットをいくつか取り出し,これらの訓練セットで訓練する.
模型を練る.次に,対応するサブトレーニングセットとテストセットでのモデルのスコアを計算し,最後に各シードについて
トレーニングセットの大きさでは,K回のトレーニングセットスコアとテストセットスコアをそれぞれ平均した.# import numpy as np
# from sklearn.model_selection import learning_curve, ShuffleSplit
# from sklearn.datasets import load_digits
# from sklearn.naive_bayes import GaussianNB
# from sklearn import svm
# import matplotlib.pyplot as plt
# def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,n_jobs=1, train_size=np.linspace(.1, 1.0, 5 )):
# if __name__ == '__main__':
# plt.figure()
# plt.title(title)
# if ylim is not None:
# plt.ylim(*ylim)
# plt.xlabel('Training example')
# plt.ylabel('score')
# train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_size)
# train_scores_mean = np.mean(train_scores, axis=1)
# train_scores_std = np.std(train_scores, axis=1)
# test_scores_mean = np.mean(test_scores, axis=1)
# test_scores_std = np.std(test_scores, axis=1)
# plt.grid()#
# plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
# train_scores_mean + train_scores_std, alpha=0.1,
# color="r")
# plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
# test_scores_mean + test_scores_std, alpha=0.1,
# color="g")
# plt.plot(train_sizes, train_scores_mean, 'o-', color='r',
# label="Training score")
# plt.plot(train_sizes, test_scores_mean,'o-',color="g",
# label="Cross-validation score")
# plt.legend(loc="best")
# return plt
# digits = load_digits()
# X = digits.data
# y = digits.target
# cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)# 100ci
# estimator = GaussianNB()
# title = "Learning Curves(naive_bayes)"
# plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
# title = "Learning Curves(SVM,RBF kernel, $\gamma=0.001$)"
# cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)# , k
# estimator = svm.SVC(gamma=0.001)
# plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
# plt.show()
二.クロス検証スコア
# import matplotlib.pyplot as plt
# from sklearn.model_selection import cross_val_score
# import numpy as np
# from sklearn import datasets, svm
# digits = datasets.load_digits()
# x = digits.data
# y = digits.target
# vsc = svm.SVC(kernel='linear')
# if __name__=='__main__':
# c_S = np.logspace(-10, 0, 10)#
# # print ("length", len(c_S))
# scores = list()
# scores_std = list()
# for c in c_S:
# vsc.C = c
# this_scores = cross_val_score(vsc, x, y, n_jobs=4)# n_jobs,
# scores.append(np.mean(this_scores))
# scores_std.append(np.std(this_scores))
# plt.figure(1, figsize=(4, 3))#
# plt.clf()
# plt.semilogx(c_S, scores)#
# plt.semilogx(c_S, np.array(scores)+np.array(scores_std), 'b--')
# plt.semilogx(c_S, np.array(scores)-np.array(scores_std), 'b--')
# locs, labels = plt.yticks()
# plt.yticks(locs, list(map(lambda X: "%g" % X, locs)))#
# plt.ylabel('CV score')
# plt.xlabel('parameter C')
# plt.ylim(0, 1.1)#
# plt.show()
三.カーブの検証
あるパラメータが絶えず変化すると、各値で計算されたモデルのトレーニングセットとテストセットでの得点
学習器のスコアは、パラメータが1つしかないグリッド検索と同様に変化するパラメータで計算されますが、この関数はトレーニングセットのスコアも計算されます.# from sklearn import svm
# from sklearn.model_selection import validation_curve
# from sklearn.datasets import load_digits
# import numpy as np
# import matplotlib.pyplot as plt
# digits = load_digits()
# X = digits.data
# y = digits.target
# param_range = np.logspace(-6, -1, 5)
# vsc = svm.SVC()
# train_score, test_score = validation_curve(vsc, X, y, param_name='gamma', param_range=param_range, cv=10, scoring="accuracy", n_jobs=1)
# train_score_mean = np.mean(train_score, axis=1)
# train_score_std = np.std(train_score, axis=1)
# test_score_mean = np.mean(test_score, axis=1)
# test_score_std = np.std(test_score, axis=1)
# plt.title("validation curve with SVM")
# plt.xlabel("$\gamma%")
# plt.ylabel("Score")
# plt.ylim()
# lw = 2
# plt.semilogx(param_range, train_score_mean,label="training score", color="darkorange", lw=lw)
# plt.fill_between(param_range, train_score_mean-train_score_std, train_score_mean+train_score_std, alpha=0.2, color="navy", lw=lw)
# plt.legend(loc="best")
# plt.show()
# import numpy as np
# from sklearn.model_selection import learning_curve, ShuffleSplit
# from sklearn.datasets import load_digits
# from sklearn.naive_bayes import GaussianNB
# from sklearn import svm
# import matplotlib.pyplot as plt
# def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,n_jobs=1, train_size=np.linspace(.1, 1.0, 5 )):
# if __name__ == '__main__':
# plt.figure()
# plt.title(title)
# if ylim is not None:
# plt.ylim(*ylim)
# plt.xlabel('Training example')
# plt.ylabel('score')
# train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_size)
# train_scores_mean = np.mean(train_scores, axis=1)
# train_scores_std = np.std(train_scores, axis=1)
# test_scores_mean = np.mean(test_scores, axis=1)
# test_scores_std = np.std(test_scores, axis=1)
# plt.grid()#
# plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
# train_scores_mean + train_scores_std, alpha=0.1,
# color="r")
# plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
# test_scores_mean + test_scores_std, alpha=0.1,
# color="g")
# plt.plot(train_sizes, train_scores_mean, 'o-', color='r',
# label="Training score")
# plt.plot(train_sizes, test_scores_mean,'o-',color="g",
# label="Cross-validation score")
# plt.legend(loc="best")
# return plt
# digits = load_digits()
# X = digits.data
# y = digits.target
# cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)# 100ci
# estimator = GaussianNB()
# title = "Learning Curves(naive_bayes)"
# plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
# title = "Learning Curves(SVM,RBF kernel, $\gamma=0.001$)"
# cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)# , k
# estimator = svm.SVC(gamma=0.001)
# plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
# plt.show()
# import matplotlib.pyplot as plt
# from sklearn.model_selection import cross_val_score
# import numpy as np
# from sklearn import datasets, svm
# digits = datasets.load_digits()
# x = digits.data
# y = digits.target
# vsc = svm.SVC(kernel='linear')
# if __name__=='__main__':
# c_S = np.logspace(-10, 0, 10)#
# # print ("length", len(c_S))
# scores = list()
# scores_std = list()
# for c in c_S:
# vsc.C = c
# this_scores = cross_val_score(vsc, x, y, n_jobs=4)# n_jobs,
# scores.append(np.mean(this_scores))
# scores_std.append(np.std(this_scores))
# plt.figure(1, figsize=(4, 3))#
# plt.clf()
# plt.semilogx(c_S, scores)#
# plt.semilogx(c_S, np.array(scores)+np.array(scores_std), 'b--')
# plt.semilogx(c_S, np.array(scores)-np.array(scores_std), 'b--')
# locs, labels = plt.yticks()
# plt.yticks(locs, list(map(lambda X: "%g" % X, locs)))#
# plt.ylabel('CV score')
# plt.xlabel('parameter C')
# plt.ylim(0, 1.1)#
# plt.show()
# from sklearn import svm
# from sklearn.model_selection import validation_curve
# from sklearn.datasets import load_digits
# import numpy as np
# import matplotlib.pyplot as plt
# digits = load_digits()
# X = digits.data
# y = digits.target
# param_range = np.logspace(-6, -1, 5)
# vsc = svm.SVC()
# train_score, test_score = validation_curve(vsc, X, y, param_name='gamma', param_range=param_range, cv=10, scoring="accuracy", n_jobs=1)
# train_score_mean = np.mean(train_score, axis=1)
# train_score_std = np.std(train_score, axis=1)
# test_score_mean = np.mean(test_score, axis=1)
# test_score_std = np.std(test_score, axis=1)
# plt.title("validation curve with SVM")
# plt.xlabel("$\gamma%")
# plt.ylabel("Score")
# plt.ylim()
# lw = 2
# plt.semilogx(param_range, train_score_mean,label="training score", color="darkorange", lw=lw)
# plt.fill_between(param_range, train_score_mean-train_score_std, train_score_mean+train_score_std, alpha=0.2, color="navy", lw=lw)
# plt.legend(loc="best")
# plt.show()