sklearnの使用データセットの分割と訓練(python 3.6)


研修の授業で2つの例を話して、融合してみました.主なプレゼンテーションの大まかな手順:インポート->分割->トレーニング->モデルレポートおよびいくつかの重要な問題:1ラベル二値化2グリッド探索法パラメータ3 k折り曲げ交差検証4ノイズ特性の増加(以前に関連)
from sklearn import datasets
# cross_validation     warning,    
from sklearn.model_selection import train-test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
import sklearn.exceptioins
#        
iris = datasets.load_iris()
#                 
#  X     (  、       ), 150*4   
#Y      (0, 1, 2  ), 150*1  
#         ,  0, 1, 2   100 010 001
#  y.label_binarize(y, classes[0, 1, 2]),  150*3  
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.5, random_state=0)
#set the parameters by cross_validation
turn_parameters = [{
    'kernel' : ['rbf', 'gamma' : [1e-3, 1e - 4, 'C':[1,10,100,1000]},  
{
    'kernel':['linear'], 'C':[1,10,100,1000]}
]
#clf   
#           
#    5     
clf = GridSearchCV(SVC(C=1), turned_parameters, cv=5, scoring='%s_weighted' % score)
#    train    5     
#     train_test_split     2  
#fit-  
clf.fit(X_train, y_train)
#   
print(clf.best_params_)
#  
for params, mean_score, scores in clf.gird_scores_:
    print("%.3f (+/-%.0.03f) for %r" % (mean_score, scores.std()*1.96,params))
#    
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))