kerasでbackend その3


概要

kerasのbackendだけで、学習して分類してみた。

写真

サンプルコード

from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.optimizers import SGD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons

np.random.seed(0)
dx, dy = make_moons(200, noise = 0.20)
input_dim = 2
output_dim = 1
hidden_dim = 8
x = K.placeholder(shape = (None, input_dim), name = "x")
ytrue = K.placeholder(shape = (None, output_dim), name = "y")
W1 = K.random_uniform_variable((input_dim, hidden_dim), 0, 1, name = "W1")
W2 = K.random_uniform_variable((hidden_dim, output_dim), 0, 1, name = "W2")
b1 = K.random_uniform_variable((hidden_dim, ), 0, 1, name = "b1")
b2 = K.random_uniform_variable((output_dim, ), 0, 1, name = "b2")
params = [W1, b1, W2, b2]
hidden = K.tanh(K.dot(x, W1) + b1)
ypred = K.tanh(K.dot(hidden, W2) + b2)
loss = K.mean(K.square(ypred - ytrue), axis = -1)
opt = SGD()
updates = opt.get_updates(params, [], loss)
train = K.function(inputs = [x, ytrue], outputs = [loss], updates = updates)
for ep in range(1000):
    for i in range(200):
        c3 = train([[dx[i]], [[dy[i]]]])
    if ep % 100 == 0:
        print (ep, c3[0])
pred = K.function(inputs = [x], outputs = [ypred])
x_min, x_max = dx[ : , 0].min() - .5, dx[ : , 0].max() + .5
y_min, y_max = dx[ : , 1].min() - .5, dx[ : , 1].max() + .5
h = 0.01
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
zz = np.c_[xx.ravel(), yy.ravel()]
p = []
for i in range(151208):
    preds = pred([[zz[i, : ]]])
    p.append(preds[0][0])
p = np.array(p)
Z = p.reshape(328, 461)
plt.contourf(xx, yy, Z, cmap = plt.cm.Spectral)
plt.scatter(dx[ : , 0], dx[ : , 1], c = dy, cmap = plt.cm.Spectral)
plt.savefig("backend12.png")
plt.show()

以上。