OpenCV python slearnランダム超パラメータ検索の実現
4202 ワード
本論文ではOpenCV python slearnランダム超パラメータ検索の実現を紹介します。
"""
sklearn
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import tensorflow as tf
from tensorflow_core.python.keras.api._v2 import keras # python
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from scipy.stats import reciprocal
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
# 0.
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, sklearn, pd, tf, keras:
print("%s version:%s" % (module.__name__, module.__version__))
#
def plot_learning_curves(his):
pd.DataFrame(his.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
# 1. california
housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)
# 2.
x_train_all, x_test, y_train_all, y_test = train_test_split(
housing.data, housing.target, random_state=7)
x_train, x_valid, y_train, y_valid = train_test_split(
x_train_all, y_train_all, random_state=11)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)
# 3.
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.fit_transform(x_valid)
x_test_scaled = scaler.fit_transform(x_test)
# keras
def build_model(hidden_layers=1, #
layer_size=30,
learning_rate=3e-3):
#
model = keras.models.Sequential()
model.add(keras.layers.Dense(layer_size, activation="relu",
input_shape=x_train.shape[1:]))
#
for _ in range(hidden_layers - 1):
model.add(keras.layers.Dense(layer_size,
activation="relu"))
model.add(keras.layers.Dense(1))
#
optimizer = keras.optimizers.SGD(lr=learning_rate)
model.compile(loss="mse", optimizer=optimizer)
return model
def main():
# RandomizedSearchCV
# 1. sklearn model
sk_learn_model = keras.wrappers.scikit_learn.KerasRegressor(build_model)
callbacks = [keras.callbacks.EarlyStopping(patience=5, min_delta=1e-2)]
history = sk_learn_model.fit(x_train_scaled, y_train, epochs=100,
validation_data=(x_valid_scaled, y_valid),
callbacks=callbacks)
# 2.
# f(x) = 1/(x*log(b/a)) a <= x <= b
param_distribution = {
"hidden_layers": [1, 2, 3, 4],
"layer_size": np.arange(1, 100),
"learning_rate": reciprocal(1e-4, 1e-2),
}
# 3.
# cross_validation: n , n-1 , .
random_search_cv = RandomizedSearchCV(sk_learn_model, param_distribution,
n_iter=10,
cv=3,
n_jobs=1)
random_search_cv.fit(x_train_scaled, y_train, epochs=100,
validation_data=(x_valid_scaled, y_valid),
callbacks=callbacks)
# 4.
print(random_search_cv.best_params_)
print(random_search_cv.best_score_)
print(random_search_cv.best_estimator_)
model = random_search_cv.best_estimator_.model
print(model.evaluate(x_test_scaled, y_test))
# 5.
plot_learning_curves(history)
if __name__ == '__main__':
main()
以上が本文の全部です。皆さんの勉強に役に立つように、私たちを応援してください。