Learn practical skills, build real-world projects, and advance your career
import torch
import torch.nn


conv=torch.nn.Conv2d(16,33,kernel_size=(3,5),stride=(2,1),padding=(4,2),dilation=(3,1))
outputs=conv(inputs)
outputs(size)
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-4-f34cda4a085f> in <module>() 4 5 conv=torch.nn.Conv2d(16,33,kernel_size=(3,5),stride=(2,1),padding=(4,2),dilation=(3,1)) ----> 6 outputs=conv(inputs) 7 outputs(size) ~\Anaconda3\envs\tensorflow\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs) 489 result = self._slow_forward(*input, **kwargs) 490 else: --> 491 result = self.forward(*input, **kwargs) 492 for hook in self._forward_hooks.values(): 493 hook_result = hook(self, input, result) ~\Anaconda3\envs\tensorflow\lib\site-packages\torch\nn\modules\conv.py in forward(self, input) 320 def forward(self, input): 321 return F.conv2d(input, self.weight, self.bias, self.stride, --> 322 self.padding, self.dilation, self.groups) 323 324 RuntimeError: Given groups=1, weight of size [33, 16, 3, 5], expected input[1, 1, 2, 2] to have 16 channels, but got 1 channels instead

inputs=torch.arange(1,5,dtype=torch.float32).reshape(1,1,2,2)
print(inputs.dtype) #inputs不能为longtensor这一数据类型
unsample=torch.nn.Upsample(scale_factor=2,mode='nearest')
unsample(inputs)
torch.float32
tensor([[[[1., 1., 2., 2.],
          [1., 1., 2., 2.],
          [3., 3., 4., 4.],
          [3., 3., 4., 4.]]]])
inputs=torch.arange(12).view(1,1,3,4)
pad=torch.nn.ConstantPad2d(padding=[2,2,2,2],value=-1)
print('常数补全={}'.format(pad(inputs)))
pad=torch.nn.ReplicationPad2d(padding=[1,1,1,1])
print('重复补全={}'.format(pad(inputs.float())))
pad=torch.nn.ReflectionPad2d(padding=[1,1,1,1])
print('反射补全={}'.format(pad(inputs.float()).squeeze()))
常数补全=tensor([[[[-1, -1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1, -1], [-1, -1, 0, 1, 2, 3, -1, -1], [-1, -1, 4, 5, 6, 7, -1, -1], [-1, -1, 8, 9, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1, -1]]]]) 重复补全=tensor([[[[ 0., 0., 1., 2., 3., 3.], [ 0., 0., 1., 2., 3., 3.], [ 4., 4., 5., 6., 7., 7.], [ 8., 8., 9., 10., 11., 11.], [ 8., 8., 9., 10., 11., 11.]]]]) 反射补全=tensor([[ 5., 4., 5., 6., 7., 6.], [ 1., 0., 1., 2., 3., 2.], [ 5., 4., 5., 6., 7., 6.], [ 9., 8., 9., 10., 11., 10.], [ 5., 4., 5., 6., 7., 6.]])
import torchvision.datasets
import torchvision.transforms
import torch.utils.data
import torch
import torch.nn
import torch.optim

#数据读取
train_datasets=torchvision.datasets.MNIST(root='./data/mnist',train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_datasets=torchvision.datasets.MNIST(root='./data/minst',train=False,transform=torchvision.transforms.ToTensor(),download=True)

batch_size=100
train_loader=torch.utils.data.DataLoader(dataset=train_datasets,batch_size=batch_size)
print('len(train_loader)={}'.format(len(train_loader)))
test_loader=torch.utils.data.DataLoader(dataset=test_datasets,batch_size=batch_size)
print('len(test_loader)={}'.format(len(test_loader)))

for images,labels in train_loader:
    print('images.size()={}'.format(images.size()))
    print('labels.size()={}'.format(labels.size()))
    break

#搭建卷积神经网络结构
class Net(torch.nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv1=torch.nn.Sequential(torch.nn.Conv2d(1,64,kernel_size=3,padding=1),torch.nn.ReLU(),
                                      torch.nn.Conv2d(64,128,kernel_size=3,padding=1),torch.nn.ReLU(),
                                      torch.nn.MaxPool2d(stride=2,kernel_size=2))
        self.dense=torch.nn.Sequential(torch.nn.Linear(128*14*14,1024),torch.nn.ReLU(),torch.nn.Dropout(p=0.5),
                                      torch.nn.Linear(1024,10))
        
    def forward(self,x):
        x=self.conv1(x)
        x=x.view(-1,128*14*14)
        x=self.dense(x)
        return x

net=Net()

criterion=torch.nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(net.parameters())

#训练
num_epochs=5
for epoch in range(num_epochs):
    for idx,(images,labels)in enumerate(train_loader):
        optimizer.zero_grad()
        preds=net(images)
        loss=criterion(preds,labels)
        loss.backward()
        optimizer.step()
        
        if idx%100==0:
            print('epoch{},batch{},损失={:g}'.format(epoch,idx,loss.item()))

correct=0
total=0
for images,labels in test_loader:
    preds=ner(images)
    predicted=torch.argmax(preds,1)
    total+=label.size(0)
    crrect+=(predicted==labels).sum().item()
accuracy=correct/total
print('测试数据准确度:{:g}'.format(accuracy))
        
        
len(train_loader)=600 len(test_loader)=100 images.size()=torch.Size([100, 1, 28, 28]) labels.size()=torch.Size([100]) epoch0,batch0,损失=2.30236 epoch0,batch100,损失=0.233516 epoch0,batch200,损失=0.15524 epoch0,batch300,损失=0.123006
import torch
import torch.utils.data
import torch.nn
import torch.optim
import torchvision.datasets
import torchvision.transforms

# 数据读取
train_dataset = torchvision.datasets.MNIST(root='./data/mnist',
        train=True, transform=torchvision.transforms.ToTensor(),
        download=True)
test_dataset = torchvision.datasets.MNIST(root='./data/mnist',
        train=False, transform=torchvision.transforms.ToTensor(),
        download=True)

batch_size = 100
train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset, batch_size=batch_size)

# 搭建网络结构
class Net(torch.nn.Module):
    
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Sequential(
                torch.nn.Conv2d(1, 64, kernel_size=3, padding=1),
                torch.nn.ReLU(),
                torch.nn.Conv2d(64, 128, kernel_size=3, padding=1),
                torch.nn.ReLU(),
                torch.nn.MaxPool2d(stride=2, kernel_size=2))
        self.dense = torch.nn.Sequential(
                torch.nn.Linear(128 * 14 * 14, 1024),
                torch.nn.ReLU(),
                torch.nn.Dropout(p=0.5),
                torch.nn.Linear(1024, 10))
        
    def forward(self, x):
        x = self.conv1(x)
        x = x.view(-1, 128 * 14 * 14)
        x = self.dense(x)
        return x

net = Net()

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters()) 

# 训练
num_epochs = 5
for epoch in range(num_epochs):
    for idx, (images, labels) in enumerate(train_loader):
        optimizer.zero_grad()
        preds = net(images)
        loss = criterion(preds, labels)
        loss.backward()
        optimizer.step()
        
        if idx % 100 == 0:
            print('epoch {}, batch {}, 损失 = {:g}'.format(
                    epoch, idx, loss.item()))

# 测试
correct = 0
total = 0
for images, labels in test_loader:
    preds = net(images)
    predicted = torch.argmax(preds, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum().item()
    
accuracy = correct / total
print('测试数据准确率: {:.1%}'.format(accuracy))