Cifar10用DCGANモデルを晒すwith keras
15994 ワード
概要
- 時間がないので,GANの仕組みなどはとりあえず割愛
- generatorとdiscriminatorの設定を書く
- 味噌はgenerator,discriminatorともに活性化関数にLeakyReLuを使う
- 学習プロセス全体のソースコードはgithubにアップロード予定です(準備中です申し訳ありません)
モデルパラメータ達
Generator
generator
def _build_generator(self) -> Model:
start_pix_x = 4
start_pix_y = 4
kernel_ini = RandomNormal(mean=0.0, stddev=0.02)
inputs = Input(shape=self.noise_shape)
x = Dense(
units=256*start_pix_x*start_pix_y,
kernel_initializer=kernel_ini,
bias_initializer='zeros')(inputs)
x = LeakyReLU(alpha=0.2)(x)
x = Reshape((start_pix_x, start_pix_y, 256))(x)
x = Conv2DTranspose(
filters=128,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = BatchNormalization(axis=3)(x)
x = Conv2DTranspose(
filters=128,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = BatchNormalization(axis=3)(x)
x = Conv2DTranspose(
filters=128,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(
filters=3,
kernel_size=3,
padding='same',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
y = Activation('tanh')(x)
model = Model(inputs, y)
if self.verbose:
model.summary()
return model
Discriminator
discriminator
def _build_discriminator(self) -> Model:
kernel_ini = RandomNormal(mean=0.0, stddev=0.02)
inputs = Input(shape=self.shape)
x = GaussianNoise(stddev=0.05)(inputs) # prevent d from overfitting.
x = Conv2D(
filters=64,
kernel_size=3,
padding='SAME',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = Dropout(0.5)(x)
x = Conv2D(
filters=128,
kernel_size=3,
strides=2,
padding='SAME',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = Dropout(0.5)(x)
# x = BatchNormalization(axis=3)(x)
x = Conv2D(
filters=128,
kernel_size=3,
strides=2,
padding='SAME',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = Dropout(0.5)(x)
# x = BatchNormalization(axis=3)(x)
x = Conv2D(
filters=256,
kernel_size=3,
strides=2,
padding='SAME',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
x = Flatten()(x)
features = Dropout(0.4)(x)
validity = Dense(1, activation='sigmoid')(features)
model4d = Model(inputs, validity)
model4g = Model(inputs, validity)
if self.verbose:
model4d.summary()
return model4d, model4g
出力結果
Generator
generator
def _build_generator(self) -> Model:
start_pix_x = 4
start_pix_y = 4
kernel_ini = RandomNormal(mean=0.0, stddev=0.02)
inputs = Input(shape=self.noise_shape)
x = Dense(
units=256*start_pix_x*start_pix_y,
kernel_initializer=kernel_ini,
bias_initializer='zeros')(inputs)
x = LeakyReLU(alpha=0.2)(x)
x = Reshape((start_pix_x, start_pix_y, 256))(x)
x = Conv2DTranspose(
filters=128,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = BatchNormalization(axis=3)(x)
x = Conv2DTranspose(
filters=128,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = BatchNormalization(axis=3)(x)
x = Conv2DTranspose(
filters=128,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(
filters=3,
kernel_size=3,
padding='same',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
y = Activation('tanh')(x)
model = Model(inputs, y)
if self.verbose:
model.summary()
return model
Discriminator
discriminator
def _build_discriminator(self) -> Model:
kernel_ini = RandomNormal(mean=0.0, stddev=0.02)
inputs = Input(shape=self.shape)
x = GaussianNoise(stddev=0.05)(inputs) # prevent d from overfitting.
x = Conv2D(
filters=64,
kernel_size=3,
padding='SAME',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = Dropout(0.5)(x)
x = Conv2D(
filters=128,
kernel_size=3,
strides=2,
padding='SAME',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = Dropout(0.5)(x)
# x = BatchNormalization(axis=3)(x)
x = Conv2D(
filters=128,
kernel_size=3,
strides=2,
padding='SAME',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
# x = Dropout(0.5)(x)
# x = BatchNormalization(axis=3)(x)
x = Conv2D(
filters=256,
kernel_size=3,
strides=2,
padding='SAME',
kernel_initializer=kernel_ini,
bias_initializer='zeros')(x)
x = LeakyReLU(alpha=0.2)(x)
x = Flatten()(x)
features = Dropout(0.4)(x)
validity = Dense(1, activation='sigmoid')(features)
model4d = Model(inputs, validity)
model4g = Model(inputs, validity)
if self.verbose:
model4d.summary()
return model4d, model4g
出力結果
出力結果の行はクラスに対応しています.
DCGANはただ画像を生成するだけですが,元の画像で構築した学習モデルに生成画像を入力して予測されたラベルによってラベリングして,予測クラスごとに画像を出力させています.
generatorにLeakyReLUをいれることで,物体の対象がよりしっかりと生成できてる感がある感じがします.
結論
突貫でやったので,詳しく後日ちゃんと書きたいと思います.
Author And Source
この問題について(Cifar10用DCGANモデルを晒すwith keras), 我々は、より多くの情報をここで見つけました https://qiita.com/Yasshi840/items/4cd4e01d18d4acd02c41著者帰属:元の著者の情報は、元のURLに含まれています。著作権は原作者に属する。
Content is automatically searched and collected through network algorithms . If there is a violation . Please contact us . We will adjust (correct author information ,or delete content ) as soon as possible .