pytorch学習(8)RNNにGPU cuda()を使用する
4050 ワード
cuda()を追加する前に、運転時間は55 sです.cuda()を追加すると、実行時間は6 sです.
結果:
#coding=utf-8
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision #
from torch.autograd import Variable
import time
'''
cuda() , 、 。
'''
time_start=time.time()
torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 1 # , ,
BATCH_SIZE = 64
TIME_STEP = 28 # rnn /
INPUT_SIZE = 28 # rnn /
LR = 0.01 # learning rate
DOWNLOAD_MNIST = False # mnist Fasle
# Mnist
train_data = torchvision.datasets.MNIST(
root='./mnist/', #
train=True, # this is training data
transform=torchvision.transforms.ToTensor(), # PIL.Image or numpy.ndarray
# torch.FloatTensor (C x H x W), normalize [0.0, 1.0]
download=DOWNLOAD_MNIST, # ,
)
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
# 50samples, 1 channel, 28x28 (50, 1, 28, 28)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
# , 2000
test_x = Variable(torch.unsqueeze(test_data.test_data, dim=1)).type(torch.FloatTensor)[:2000].cuda()/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_data.test_labels[:2000].cuda()
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.LSTM( # LSTM nn.RNN()
input_size=INPUT_SIZE, #
hidden_size=64, # rnn hidden unit
num_layers=1, # RNN layers
batch_first=True, # input & output batch size e.g. (batch, time_step, input_size)
)
self.out = nn.Linear(64, 10) #
def forward(self, x):
# x shape (batch, time_step, input_size)
# r_out shape (batch, time_step, output_size)
# h_n shape (n_layers, batch, hidden_size) LSTM hidden states, h_n , h_c
# h_c shape (n_layers, batch, hidden_size)
r_out, (h_n, h_c) = self.rnn(x, None) # None hidden state 0 state, h
# r_out
# r_out[:, -1, :] h_n
out = self.out(r_out[:, -1, :]) #
return out
rnn = RNN()
rnn.cuda() # cuda()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
# training and testing
for epoch in range(EPOCH):
for step, (x, b_y) in enumerate(train_loader): # gives batch data
x = Variable(x).cuda()
b_y = Variable(b_y).cuda() # cuda()
b_x = x.view(-1, 28, 28) # reshape x to (batch, time_step, input_size)
output = rnn(b_x) # rnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
test_output = rnn(test_x[:10].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].cpu().data.numpy().squeeze() # cpu(), :can't convert CUDA tensor to numpy (it doesn't support GPU arrays). Use .cpu() to move the tensor to host memory first.
print(pred_y, 'prediction number')
print(test_y[:10], 'real number')
time_end=time.time()
print('time cost',time_end-time_start,'s')
結果:
RNN (
(rnn): LSTM(28, 64, batch_first=True)
(out): Linear (64 -> 10)
)
(array([7, 2, 1, 0, 4, 1, 4, 9, 5, 9]), 'prediction number')
(
7
2
1
0
4
1
4
9
5
9
[torch.cuda.LongTensor of size 10 (GPU 0)]
, 'real number')
('time cost', 6.428691864013672, 's')