利用PyTorch实现的深度学习解决MNIST数据集识别代码,并利用GPU训练

深度学习网络一般分为4个部分:

  1. 数据集的准备和处理
  2. 定义网络模型
  3. 定义损失函数和优化器
  4. 训练和测试
import torch
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader

# 1 data
batch_size = 64  # 批处理的大小
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])  # 先把数据转换为Tensor,再进行归一化

train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)  # 下载数据集
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)  # 把数据集放到DataLoader中

test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


# 2 model
# 把残差块定义成一个类,方便代码复用
class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.channels = channels  # 通道数
        self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding='same')  # 3*3的卷积核,padding=same表示卷积后的大小不变
        self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding='same')
        self.relu = nn.ReLU()

    def forward(self, x):
        y = self.relu(self.conv1(x))
        y = self.conv2(y)
        return self.relu(x + y)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=5)  # 1表示输入的通道数,16表示输出的通道数,kernel_size表示卷积核的大小
        self.conv2 = nn.Conv2d(16, 32, kernel_size=5)
        self.res1 = ResidualBlock(16)  # 16表示输入的通道数,残差网络的输入和输出通道数相同
        self.res2 = ResidualBlock(32)
        self.relu = nn.ReLU()
        self.mp = nn.MaxPool2d(2)  # 2表示池化核的大小
        self.fc = nn.Linear(512, 10)

    def forward(self, x):
        in_size = x.size(0)
        x = self.mp(self.relu(self.conv1(x)))  # 先卷积,再激活,再池化
        x = self.res1(x)
        x = self.mp(self.relu(self.conv2(x)))
        x = self.res2(x)
        x = x.view(in_size, -1)  # 把x拉成一维的向量,-1表示自适应
        x = self.fc(x)
        return x


model = Net()
device = torch.device("cuda:0"if torch.cuda.is_available() else "cpu")
model.to(device)  # 把模型放到GPU上


# 3 loss and optimizer
loss = nn.CrossEntropyLoss()  # 交叉熵损失函数
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)  # momentum表示动量,作用是加快收敛速度


# 4 training and testing
def train(t_epoch):
    running_loss = 0.0
    for batch_index, data in enumerate(train_loader, 0):  # enumerate函数可以把一个可遍历的数据对象组合成一个索引序列,同时列出数据和数据下标,0表示从0开始计数
        inputs, outputs = data
        inputs, outputs = inputs.to(device), outputs.to(device)  # 把数据放到GPU上
        optimizer.zero_grad()

        # forward + backward + update
        y_hat = model(inputs)
        t_loss = loss(y_hat, outputs)
        t_loss = t_loss.to(device)  # 把损失函数放到GPU上
        t_loss.backward()
        optimizer.step()

        running_loss += t_loss.item()
        if batch_index % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (t_epoch + 1, batch_index + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)  # 把数据放到GPU上
            outputs = model(images)
            predicted = torch.argmax(outputs.data, dim=1)  # torch.argmax函数可以返回每一行中最大值的索引
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('[%d / %d]' % (correct, total))
    print('Accuracy on test set: %d %%' % (100 * correct / total))


if __name__ == '__main__':
    for epoch in range(10):
        train(epoch)
        test()

    print('Program finished')
    print('Using device: ', device)
    print('Using GPU: ', torch.cuda.get_device_name())  # 查看GPU的型号

内容来源于网络如有侵权请私信删除

文章来源: 博客园

原文链接: https://www.cnblogs.com/tanyuyang/p/17331660.html

你还没有登录,请先登录注册
  • 还没有人评论,欢迎说说您的想法!