【深さ学習はtensorflow 2.0に入る】tensorflow 2.0画像分類モデルのトレーニングと予測方法

10173 ワード

何気なく巨牛の人工知能のチュートリアルを見つけて、思わず共有してあげました.教程は基礎がゼロで、分かりやすくて、しかもとても面白くてユーモアがあって、小説を読むようです!すごいと思って、みんなに分かち合いました.ここをクリックするとチュートリアルにジャンプできます.人工知能チュートリアル
データセットフォルダを作成し、datasetなどの名前を付けます.データセットファイルにtrainという名前のサブフォルダを作成します.データセットファイルにvalという名前のサブフォルダを作成します.trainフォルダには、トレーニングするオブジェクトごとにフォルダを作成し、valフォルダに名前を付けます.訓練するオブジェクトごとにフォルダを作成し、各オブジェクトの画像をtrainフォルダの下に対応する名前のサブフォルダに名前を付けます.これらの画像はモデルを訓練するための画像です.精度の高いモデルを訓練するために、各オブジェクトに約500枚以上の画像を収集することをお勧めします.
ディレクトリ構造は次のとおりです.
.
|-- train
|   |-- animal
|   |-- flower
|   |-- guitar
|   |-- houses
|   `-- plane
`-- val
    |-- animal
    |-- flower
    |-- guitar
    |-- houses
    `-- plane


tensorflow 2を使用する.0残差ニューラルネットワークresnet-50を訓練する.
# -*- coding: utf-8 -*-


from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator

import json
import os

from tensorflow.keras.applications.resnet50 import ResNet50

batch_size = 32
epochs = 100
IMG_HEIGHT = 224
IMG_WIDTH = 224

num_classes=5
image_input=224


PATH = os.path.join('/home/dongli/tensorflow2.0/corpus/dataset/')


train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'val')


train_animal_dir = os.path.join(train_dir, 'animal')
train_flower_dir = os.path.join(train_dir, 'flower')
train_guitar_dir = os.path.join(train_dir, 'guitar')
train_houses_dir = os.path.join(train_dir, 'houses')
train_plane_dir = os.path.join(train_dir, 'plane')



validation_animal_dir = os.path.join(train_dir, 'animal')
validation_flower_dir = os.path.join(train_dir, 'flower')
validation_guitar_dir = os.path.join(train_dir, 'guitar')
validation_houses_dir = os.path.join(train_dir, 'houses')
validation_plane_dir = os.path.join(train_dir, 'plane')






num_animal_tr = len(os.listdir(train_animal_dir))
num_flower_tr = len(os.listdir(train_flower_dir))
num_guitar_tr = len(os.listdir(train_guitar_dir))
num_houses_tr = len(os.listdir(train_houses_dir))
num_plane_tr = len(os.listdir(train_plane_dir))



num_animal_val = len(os.listdir(validation_animal_dir))
num_flower_val = len(os.listdir(validation_flower_dir))
num_guitar_val = len(os.listdir(validation_guitar_dir))
num_houses_val = len(os.listdir(validation_houses_dir))
num_plane_val = len(os.listdir(validation_plane_dir))



total_train = num_animal_tr+num_flower_tr+num_guitar_tr+num_houses_tr+num_plane_tr
total_val = num_animal_val + num_flower_val+num_guitar_val+num_houses_val+num_plane_val






print("Total training images:", total_train)
print("Total validation images:", total_val)




#    
#             ,45   ,    ,    ,         。
image_gen_train = ImageDataGenerator(
                    rescale=1./255,
                    width_shift_range=0.1,
                    height_shift_range=0.1
                    )

train_data_gen = image_gen_train.flow_from_directory(batch_size=batch_size,
                                                     directory=train_dir,
                                                     shuffle=True,
                                                     target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                     class_mode='categorical')

#    

image_gen_val = ImageDataGenerator(rescale=1./255)

val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,
                                                 directory=validation_dir,
                                                 target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                 class_mode='categorical')




#     


model=ResNet50(include_top=True, weights=None,classes=num_classes)
#     

model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])

#     
model.summary()


#         

model_class_dir='./flower_model/'
class_indices = train_data_gen.class_indices
class_json = {}
for eachClass in class_indices:
    class_json[str(class_indices[eachClass])] = eachClass

with open(os.path.join(model_class_dir, "model_class.json"), "w+") as json_file:
    json.dump(class_json, json_file, indent=4, separators=(",", " : "),ensure_ascii=True)
    json_file.close()
print("JSON Mapping for the model classes saved to ", os.path.join(model_class_dir, "model_class.json"))



model_name = 'model_ex-{epoch:03d}_acc-{val_accuracy:03f}.h5'

trained_model_dir='./flower_model/'
model_path = os.path.join(trained_model_dir, model_name)


checkpoint = tf.keras.callbacks.ModelCheckpoint(
             filepath=model_path,
             monitor='val_accuracy',
            verbose=2,
            save_weights_only=True,
            save_best_only=True,
            mode='max',
            period=1)


def lr_schedule(epoch):
    # Learning Rate Schedule

    lr =1e-3
    total_epochs =epoch

    check_1 = int(total_epochs * 0.9)
    check_2 = int(total_epochs * 0.8)
    check_3 = int(total_epochs * 0.6)
    check_4 = int(total_epochs * 0.4)

    if epoch > check_1:
        lr *= 1e-4
    elif epoch > check_2:
        lr *= 1e-3
    elif epoch > check_3:
        lr *= 1e-2
    elif epoch > check_4:
        lr *= 1e-1

    return lr



#lr_scheduler =tf.keras.callbacks.LearningRateScheduler(lr_schedule)


lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,patience=5, min_lr=0.001)


num_train = len(train_data_gen.filenames)
num_test = len(val_data_gen.filenames)

print(num_train,num_test)

#     
#   fit_generator  ImageDataGenerator     。

history = model.fit_generator(
    train_data_gen,
    steps_per_epoch=int(num_train / batch_size),
    epochs=epochs,
    validation_data=val_data_gen,
    validation_steps=int(num_test / batch_size),
    callbacks=[checkpoint,lr_scheduler])





モデルの保存
flower_model
|-- model_class.json
|-- model_ex-001_acc-0.197690.h5
|-- model_ex-001_acc-0.199728.h5
|-- model_ex-002_acc-0.222826.h5
|-- model_ex-003_acc-0.230299.h5
|-- model_ex-004_acc-0.338315.h5
|-- model_ex-005_acc-0.442255.h5
|-- model_ex-006_acc-0.618886.h5
|-- model_ex-007_acc-0.629755.h5
|-- model_ex-008_acc-0.698370.h5
|-- model_ex-011_acc-0.798234.h5
|-- model_ex-012_acc-0.819973.h5
|-- model_ex-018_acc-0.834239.h5
|-- model_ex-020_acc-0.852582.h5
|-- model_ex-023_acc-0.877038.h5
|-- model_ex-024_acc-0.884511.h5
|-- model_ex-029_acc-0.890625.h5
|-- model_ex-030_acc-0.908967.h5
|-- model_ex-035_acc-0.910326.h5
|-- model_ex-041_acc-0.930707.h5
|-- model_ex-051_acc-0.953804.h5
|-- model_ex-054_acc-0.958560.h5
`-- model_ex-095_acc-0.959239.h5



モデルが訓練されたウェイトとグラフ構造情報を保存した場合、モデルは保存されます.load_を採用model()インポート、ウェイトファイルのみインポートする必要がある場合はload_Weights()方式は,同じモデルを再構築し,モデルをコンパイルする必要があり,成功する.
def create_model():
    base_model=ResNet50(include_top=True, weights=None,classes=class_num)
    model = tf.keras.Model(inputs=base_model.input, outputs=base_model.output)
    return model
#       
model=create_model()

#     
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
#       
model.load_weights('./flower_model/model_ex-023_acc-0.864130.h5')


訓練されたモデルウェイトをロードして新しい画像を予測します.
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from PIL import Image
import  numpy as np
from io import BytesIO
import json
import requests
CLASS_INDEX = None
import keras
input_image_size=224
class_num=5

model_jsonPath='./flower_model/model_class.json'





def preprocess_input(x):
    x *= (1./255)
    return x


def decode_predictions(preds, top=5, model_json=""):

    global CLASS_INDEX

    if CLASS_INDEX is None:
        CLASS_INDEX = json.load(open(model_json))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        for i in top_indices:
            each_result = []
            each_result.append(CLASS_INDEX[str(i)])
            each_result.append(pred[i])
            results.append(each_result)
    return results


prediction_results = []

prediction_probabilities = []




url='https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1573119512&di=95ad0908ab5e5ce22a674471f0e4d5d1&imgtype=jpg&er=1&src=http%3A%2F%2Fwww.sinaimg.cn%2Fjc%2Fp%2F2007-06-21%2FU2143P27T1D450794F3DT20070621164533.jpg'

response=requests.get(url).content
image_input=response

image_input = Image.open(BytesIO(image_input))
image_input = image_input.convert('RGB')
image_input = image_input.resize((input_image_size,input_image_size))
image_input = np.expand_dims(image_input, axis=0)
image_to_predict = image_input.copy()
image_to_predict = np.asarray(image_to_predict, dtype=np.float64)
image_to_predict = preprocess_input(image_to_predict)


from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow import keras

def create_model():
    base_model=ResNet50(include_top=True, weights=None,classes=class_num)
    model = tf.keras.Model(inputs=base_model.input, outputs=base_model.output)
    return model


model=create_model()


#     
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])

model.load_weights('./flower_model/model_ex-023_acc-0.864130.h5')

prediction = model.predict(x=image_to_predict)


try:
    predictiondata = decode_predictions(prediction, top=int(class_num), model_json=model_jsonPath)

    for result in predictiondata:
        prediction_results.append(str(result[0]))
        prediction_probabilities.append(result[1] * 100)
except:
    raise ValueError("An error occured! Try again.")


print(prediction_results[0],prediction_probabilities[0])