用CNN对CIFAR10进行分类(pytorch)

CIFAR10有60000个(32*32)大小的有颜色的图像,一共10种类别,每种类别有6000个。

训练集一共50000个图像,测试集一共10000个图像。

先载入数据集

import numpy as np
import torch
import torch.optim as optim

from torchvision import datasets
import torchvision.transforms as transforms

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

trainset = datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
                                          shuffle=True, num_workers=2)

testset = datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
                                         shuffle=False, num_workers=2)

再定义网络架构

import torch.nn.functional as F
import torch.nn as nn

class classifier(nn.Module):
    def __init__(self):
        super().__init__()
        '''输入为3*32*32,尺寸减半是因为池化层'''
        self.conv1 = nn.Conv2d(3, 16, 3, padding=1)   #输出为16*16*16
        self.conv2 = nn.Conv2d(16, 32, 3, padding=1)  #输出为32*8*8
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(32 * 8 * 8, 512)
        self.fc2 = nn.Linear(512, 10)
        self.dropout = nn.Dropout(0.2)     #防止过拟合
        
    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        
        x = x.view(-1, 32 * 8 * 8)
        x = self.dropout(x)
        x = F.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.fc2(x)
        return x

开始训练!

model = classifier()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
epochs = 10

for e in range(epochs):
    train_loss = 0
    
    for data, target in train_loader:
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        train_loss += loss.item() * data.size(0)	#loss.item()是平均损失,平均损失*batch_size=一次训练的损失
        
    train_loss = train_loss/len(train_loader.dataset)
    
    print('Epoch: {} 	 Training Loss:{:.6f}'.format(e+1, train_loss))
下面是损失的输出
Epoch: 1 	 Training Loss:1.366521
Epoch: 2 	 Training Loss:1.063830
Epoch: 3 	 Training Loss:0.916826
Epoch: 4 	 Training Loss:0.799573
Epoch: 5 	 Training Loss:0.708303
Epoch: 6 	 Training Loss:0.627443
Epoch: 7 	 Training Loss:0.564043
Epoch: 8 	 Training Loss:0.503542
Epoch: 9 	 Training Loss:0.465513
Epoch: 10 	 Training Loss:0.418729

看看在验证集上的表现如何!

class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
    for data in testloader:
        images, labels = data
        outputs = model(images)
        _, predicted = torch.max(outputs, 1)
        c = (predicted == labels).squeeze()
        for i in range(4):
            label = labels[i]
            class_correct[label] += c[i].item()
            class_total[label] += 1


for i in range(10): 
    print('Accuracy of %5s : %2d %%' % (
        classes[i], 100 * class_correct[i] / class_total[i]))
以及它的输出
Accuracy of plane : 74 %
Accuracy of   car : 76 %
Accuracy of  bird : 55 %
Accuracy of   cat : 56 %
Accuracy of  deer : 54 %
Accuracy of   dog : 54 %
Accuracy of  frog : 81 %
Accuracy of horse : 72 %
Accuracy of  ship : 74 %
Accuracy of truck : 68 %
原文地址:https://www.cnblogs.com/MartinLwx/p/10549229.html