Pytrochピット2.MinsitのCNNネットワークを走る
0.はじめに
Pytrochの動的図、自動導出などの概念のネット上で多くのとても良い説明があって、以下の推薦の教程を参照してください
分類問題はニューラルネットワークの比較的経典の応用シーンで、比較的に簡単なのはminisitの手書きのデジタル分類で、10種類に分けて、データセットはネット上でダウンロードすることができます
いくつかの実用的なチュートリアルhttps://morvanzhou.github.io/tutorials/machine-learning/torch/
https://zhuanlan.zhihu.com/p/26649126
http://pytorch.apachecn.org/cn/tutorials/
1.前提パラメータ設定
2.データのロード
Pytrochの動的図、自動導出などの概念のネット上で多くのとても良い説明があって、以下の推薦の教程を参照してください
分類問題はニューラルネットワークの比較的経典の応用シーンで、比較的に簡単なのはminisitの手書きのデジタル分類で、10種類に分けて、データセットはネット上でダウンロードすることができます
いくつかの実用的なチュートリアルhttps://morvanzhou.github.io/tutorials/machine-learning/torch/
https://zhuanlan.zhihu.com/p/26649126
http://pytorch.apachecn.org/cn/tutorials/
1.前提パラメータ設定
#coding=utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision # , 、
import matplotlib.pyplot as plt
from torchvision import transforms, utils
torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 40 # , , 40 , epoch
BATCH_SIZE = 20 # =batch-size*iteration
LR = 0.001 #
DOWNLOAD_MNIST = True # mnist Fasle
2.データのロード
train_data = torchvision.datasets.MNIST(
root='./mnist/',
train=True, # this is training data
transform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to
# torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
download=DOWNLOAD_MNIST,
)
print(train_data.train_data.size()) # (60000, 28, 28)
print(train_data.train_labels.size()) # (60000)
# Data Loader for easy mini-batch return in training, the image batch shape will be (50, 1, 28, 28)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
#test train , train=true False
3. CNN
pytroch
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential( # input shape (1, 28, 28)
nn.Conv2d(
in_channels=1, # input height, , 3
out_channels=16, # n_filters
kernel_size=5, # filter size
stride=1, # filter movement/step
padding=2, # con2d , padding=(kernel_size-1)/2 stride=1
), # output shape (16, 28, 28)
nn.ReLU(), # activation
nn.MaxPool2d(kernel_size=2), # 2x2 , output shape (16, 14, 14)
)
self.conv2 = nn.Sequential( # input shape (16, 14, 14)
nn.Conv2d(16, 32, 5, 1, 2), # output shape (32, 14, 14)
nn.ReLU(), # activation
nn.MaxPool2d(2), # output shape (32, 7, 7)
)
self.out = nn.Linear(32 * 7 * 7, 10) # fully connected layer, output 10 classes,
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1) # (batch_size, 32 * 7 * 7)
output = self.out(x)
return output
cnn = CNN()
print(cnn) # net architecture
"""
CNN (
(conv1): Sequential (
(0): Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): ReLU ()
(2): MaxPool2d (size=(2, 2), stride=(2, 2), dilation=(1, 1))
)
(conv2): Sequential (
(0): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): ReLU ()
(2): MaxPool2d (size=(2, 2), stride=(2, 2), dilation=(1, 1))
)
(out): Linear (1568 -> 10)
)
"""
,
4.
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
b_x = Variable(x) # batch x
b_y = Variable(y) # batch y
output = cnn(b_x)[0] # cnn output
loss = loss_func(output, b_y) # cross entropy loss
train_loss += loss.data[0]
pred = torch.max(out, 1)[1]
train_correct = (pred == batch_y).sum()
train_acc += train_correct.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len(
train_data)), train_acc / (len(train_data))))
# evaluation--------------------------------
cnn.eval()
eval_loss = 0.
eval_acc = 0.
for batch_x, batch_y in test_loader:
batch_x, batch_y = Variable(batch_x), Variable(batch_y)
out = cnn(batch_x)
loss = loss_func(out, batch_y)
eval_loss += loss.data[0]
pred = torch.max(out, 1)[1]
num_correct = (pred == batch_y).sum()
eval_acc += num_correct.data[0]
print(eval_acc)
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
test_data)), eval_acc / (len(test_data))))
loss softmax loss, https://blog.csdn.net/u014380165/article/details/77284921 https://blog.csdn.net/zhangxb35/article/details/72464152?utm_source=itdadao&utm_medium=referra
out softmax , out softmax ,
pred ,https://www.jianshu.com/p/e4c7b3eb8f3d 4 , out 4*10, , top-1 ACC
, cnn ~