基于Pytorch框架的手写体识别

import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt


EPOCH = 10
BATCH_SIZE = 100
LEARNING_RATE = 0.001
DOWNLOAD = False

train_dataset = datasets.MNIST(root=./data, train=True, transform=transforms.ToTensor(), download=DOWNLOAD)
test_dataset = datasets.MNIST(root=./data, train=False, transform=transforms.ToTensor(), download=DOWNLOAD)

print(type(train_dataset))
print(train_dataset.data.size())
print(train_dataset.targets.size())
plt.imshow(train_dataset.data[56785].numpy(), cmap=gray)
plt.title(%i % train_dataset.targets[56785])
plt.show()

train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=True)
print(type(train_loader))




class CnnMnist(nn.Module):
    def __init__(self):
        super(CnnMnist, self).__init__()

        self.conv1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1),   #28*28*64
                                   nn.ReLU(),
                                   nn.Conv2d(64, 128, 3, 1, 1),  #28*28*128
                                   nn.ReLU(),
                                   nn.MaxPool2d(stride=2, kernel_size=2))     #14*14*128
        self.fullyconnect = nn.Sequential(nn.Linear(14*14*128, 1024),
                                          nn.ReLU(),
                                          nn.Dropout(p=0.5),
                                          nn.Linear(1024, 10))

    def forward(self, x):
        x = self.conv1(x)
        x = x.view(-1, 14*14*128)
        out = self.fullyconnect(x)
        return out


model = CnnMnist()
if torch.cuda.is_available():
    model.cuda()
optimzer = torch.optim.RMSprop(model.parameters(), lr=LEARNING_RATE)
cost = nn.CrossEntropyLoss()   #多类分类器
print(model)

for epoch in range(EPOCH):
    running_loss = 0.0
    running_accuracy = 0.0
    print(-*20)
    print(Epoch{}/{}.format(epoch+1, EPOCH))
    for data in train_loader:
        x_train, y_train = data
        x_train, y_train = x_train.cuda(), y_train.cuda()
        x_train, y_train = Variable(x_train), Variable(y_train)
        outputs = model(x_train)
        _, pred = torch.max(outputs.data, 1)#torch.max(softmax输出的一个tensor,dim=0:输出每列的最大值/dim=1:输出每行的最大值)
        optimzer.zero_grad()   #清空模型的参数梯度
        loss = cost(outputs, y_train)

        loss.backward()   #反向传播,计算当前梯度
        optimzer.step()    #根据梯度更新网落参数
        running_loss += loss.item()
        running_accuracy += torch.sum(pred == y_train.data)

    test_accuracy = 0.0
    for data in test_loader:
        x_test, y_test = data
        x_test, y_test = x_test.cuda(), y_test.cuda()
        x_test, y_test = Variable(x_test), Variable(y_test)
        outputs = model(x_test)
        _, pred = torch.max(outputs, 1)
        test_accuracy += torch.sum(pred == y_test.data)

    print(Loss is :{:.4f},Train Accuracy is:{:.4f}%,Test Accuracy is:{:.4f}%.format(
        running_loss/len(train_dataset),
        100.0*running_accuracy/len(train_dataset),
        100.0*test_accuracy/len(test_dataset)))
经验分享 程序员 微信小程序 职场和发展