Udacity優達学城TensorFlowノート2-アパレル画像分類
21339 ワード
Fashion MNIST Dataset
Tシャツ、ショートブーツなど10種類の画像があり、各画像は28*28画素のグレー画像で70000枚、60 Kはトレーニングセット、10 Kはテストセット
# !pip install -U tensorflow_datasets
# !pip install -U -i https://pypi.tuna.tsinghua.edu.cn/simple tensorflow-gpu==1.12.0 # 2.0.0a0
import tensorflow as tf
#
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
# tf.contrib ,tf2.0 Eager Execution
# import tensorflow.contrib.eager as tfe
# Eager 。 ! 。
tf.enable_eager_execution()
import tensorflow_datasets as tfds
import math
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import tqdm.auto
tqdm.tqdm = tqdm.auto.tqdm
1.12.0
dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}".format(num_train_examples))
print("Number of test examples: {}".format(num_test_examples))
Number of training examples: 60000
Number of test examples: 10000
#
def normalize(images, labels):
images = tf.cast(images, tf.float32)
images /= 255
return images, labels
train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)
#
for image, label in test_dataset.take(1):
break
image = image.numpy().reshape((28, 28))
plt.figure()
plt.imshow(image, cmap=plt.cm.binary)
#
plt.colorbar()
# ,
plt.grid(False)
# 25
plt.figure(figsize=(10, 10))
i = 0
for image, label in test_dataset.take(25):
image = image.numpy().reshape((28, 28))
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.imshow(image, cmap=plt.cm.binary)
plt.xlabel(class_names[label])
i += 1
#
model = tf.keras.Sequential([
# 32 , 32
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu,
input_shape=(28, 28, 1)),
# strides=2 2
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Conv2D(64, (3,3), padding='same', activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
#model = tf.keras.Sequential([
# # Flatten
# tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
# tf.keras.layers.Dense(128, activation=tf.nn.relu),
# tf.keras.layers.Dense(10, activation=tf.nn.softmax)
#])
#
model.compile(optimizer=tf.train.AdamOptimizer(0.001),
#
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
#
BATCH_SIZE = 32
train_dataset = train_dataset.repeat().shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
model.fit(train_dataset, epochs=10, steps_per_epoch=math.ceil(num_train_examples/BATCH_SIZE), verbose=1)
Epoch 1/10
1875/1875 [==============================] - 35s 19ms/step - loss: 0.4032 - acc: 0.8542
Epoch 2/10
1875/1875 [==============================] - 29s 16ms/step - loss: 0.2568 - acc: 0.9071
Epoch 3/10
1875/1875 [==============================] - 29s 15ms/step - loss: 0.2163 - acc: 0.9201
Epoch 4/10
1875/1875 [==============================] - 30s 16ms/step - loss: 0.1832 - acc: 0.9327
Epoch 5/10
1875/1875 [==============================] - 30s 16ms/step - loss: 0.1557 - acc: 0.9437
Epoch 6/10
1875/1875 [==============================] - 29s 16ms/step - loss: 0.1357 - acc: 0.9501
Epoch 7/10
1875/1875 [==============================] - 29s 16ms/step - loss: 0.1162 - acc: 0.9570
Epoch 8/10
1875/1875 [==============================] - 29s 16ms/step - loss: 0.0949 - acc: 0.9651
Epoch 9/10
1875/1875 [==============================] - 29s 16ms/step - loss: 0.0803 - acc: 0.9705
Epoch 10/10
1875/1875 [==============================] - 28s 15ms/step - loss: 0.0696 - acc: 0.9749
#
test_loss, test_acc = model.evaluate(test_dataset, steps=math.ceil(num_test_examples/BATCH_SIZE))
print("Accuracy on test dataset:", test_acc)
313/313 [==============================] - 2s 7ms/step
Accuracy on test dataset: 0.9222
!jupyter nbconvert --to markdown "Udacity TensorFlow 2- .ipynb"