pytorch之 RNN classifier


   ###仅为自己练习,没有其他用途



1
import torch 2 from torch import nn 3 import torchvision.datasets as dsets 4 import torchvision.transforms as transforms 5 import matplotlib.pyplot as plt 6 7 8 # torch.manual_seed(1) # reproducible 9 10 # Hyper Parameters 11 EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch 12 BATCH_SIZE = 64 13 TIME_STEP = 28 # rnn time step / image height 14 INPUT_SIZE = 28 # rnn input size / image width 15 LR = 0.01 # learning rate 16 DOWNLOAD_MNIST = True # set to True if haven't download the data 17 18 19 # Mnist digital dataset 20 train_data = dsets.MNIST( 21 root='./mnist/', 22 train=True, # this is training data 23 transform=transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to 24 # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0] 25 download=DOWNLOAD_MNIST, # download it if you don't have it 26 ) 27 28 # # plot one example 29 # print(train_data.train_data.size()) # (60000, 28, 28) 30 # print(train_data.train_labels.size()) # (60000) 31 # plt.imshow(train_data.train_data[0].numpy(), cmap='gray') 32 # plt.title('%i' % train_data.train_labels[0]) 33 # plt.show() 34 35 # Data Loader for easy mini-batch return in training 36 train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) 37 38 # convert test data into Variable, pick 2000 samples to speed up testing 39 test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor()) 40 test_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255. # shape (2000, 28, 28) value in range(0,1) 41 test_y = test_data.test_labels.numpy()[:2000] # covert to numpy array 42 43 44 class RNN(nn.Module): 45 def __init__(self): 46 super(RNN, self).__init__() 47 48 self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns 49 input_size=INPUT_SIZE, 50 hidden_size=64, # rnn hidden unit 51 num_layers=1, # number of rnn layer 52 batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size) 53 ) 54 55 self.out = nn.Linear(64, 10) 56 57 def forward(self, x): 58 # x shape (batch, time_step, input_size) 59 # r_out shape (batch, time_step, output_size) 60 # h_n shape (n_layers, batch, hidden_size) 61 # h_c shape (n_layers, batch, hidden_size) 62 r_out, (h_n, h_c) = self.rnn(x, None) # None represents zero initial hidden state 63 64 # choose r_out at the last time step 65 out = self.out(r_out[:, -1, :]) 66 return out 67 68 69 rnn = RNN() 70 print(rnn) 71 72 optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters 73 loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted 74 75 # training and testing 76 for epoch in range(EPOCH): 77 for step, (b_x, b_y) in enumerate(train_loader): # gives batch data 78 b_x = b_x.view(-1, 28, 28) # reshape x to (batch, time_step, input_size) 79 80 output = rnn(b_x) # rnn output 81 loss = loss_func(output, b_y) # cross entropy loss 82 optimizer.zero_grad() # clear gradients for this training step 83 loss.backward() # backpropagation, compute gradients 84 optimizer.step() # apply gradients 85 86 if step % 50 == 0: 87 test_output = rnn(test_x) # (samples, time_step, input_size) 88 pred_y = torch.max(test_output, 1)[1].data.numpy() 89 accuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size) 90 print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy) 91 92 # print 10 predictions from test data 93 test_output = rnn(test_x[:10].view(-1, 28, 28)) 94 pred_y = torch.max(test_output, 1)[1].data.numpy() 95 print(pred_y, 'prediction number') 96 print(test_y[:10], 'real number')
原文地址:https://www.cnblogs.com/dhName/p/11759291.html