Learn practical skills, build real-world projects, and advance your career
import torch.nn
cell=torch.nn.RNNCell(input_size=3,hidden_size=2)
for name,param in cell.named_parameters():
    print('{}={}'.format(name,param))
weight_ih=Parameter containing: tensor([[ 0.1831, -0.4158, 0.6376], [-0.6257, 0.1874, -0.2692]], requires_grad=True) weight_hh=Parameter containing: tensor([[-0.0016, 0.1171], [-0.5100, 0.1382]], requires_grad=True) bias_ih=Parameter containing: tensor([0.0149, 0.4452], requires_grad=True) bias_hh=Parameter containing: tensor([0.1351, 0.6150], requires_grad=True)
#构造LSTM类实例并用其搭建单层RNN
import torch.nn
import torch
seq_len,batch_size=6,2
input_size,hidden_size=3,5

cell=torch.nn.LSTMCell(input_size=input_size,hidden_size=hidden_size)
inputs=torch.randn(seq_len,batch_size,input_size)
h=torch.randn(batch_size,hidden_size)
c=torch.randn(batch_size,hidden_size)
hs=[]
for t in range(seq_len):
    h,c=cell(inputs[t],(h,c))
    hs.append(h)
outputs=torch.stack(hs)
print(hs)
outputs
[tensor([[-0.4300, 0.1251, -0.2122, 0.0323, 0.0519], [ 0.1709, 0.0652, -0.1586, 0.0737, 0.1276]], grad_fn=<MulBackward0>), tensor([[-0.2693, -0.1088, 0.1333, 0.0473, -0.0404], [ 0.1984, -0.0178, -0.0659, -0.0233, 0.0239]], grad_fn=<MulBackward0>), tensor([[ 0.0828, -0.0928, -0.0020, -0.0091, -0.0987], [-0.2648, -0.2424, 0.3205, 0.0800, -0.0951]], grad_fn=<MulBackward0>), tensor([[-0.0695, -0.2347, 0.1941, 0.0126, -0.1140], [-0.2564, -0.0829, 0.1944, 0.2127, -0.1447]], grad_fn=<MulBackward0>), tensor([[-0.1805, -0.1243, 0.1830, 0.1452, -0.1416], [-0.1369, -0.1500, 0.2049, 0.0742, -0.0359]], grad_fn=<MulBackward0>), tensor([[-0.1926, -0.3762, 0.3116, 0.0276, 0.0827], [-0.2320, -0.0868, 0.2628, 0.1773, 0.0027]], grad_fn=<MulBackward0>)]
tensor([[[-0.4300,  0.1251, -0.2122,  0.0323,  0.0519],
         [ 0.1709,  0.0652, -0.1586,  0.0737,  0.1276]],

        [[-0.2693, -0.1088,  0.1333,  0.0473, -0.0404],
         [ 0.1984, -0.0178, -0.0659, -0.0233,  0.0239]],

        [[ 0.0828, -0.0928, -0.0020, -0.0091, -0.0987],
         [-0.2648, -0.2424,  0.3205,  0.0800, -0.0951]],

        [[-0.0695, -0.2347,  0.1941,  0.0126, -0.1140],
         [-0.2564, -0.0829,  0.1944,  0.2127, -0.1447]],

        [[-0.1805, -0.1243,  0.1830,  0.1452, -0.1416],
         [-0.1369, -0.1500,  0.2049,  0.0742, -0.0359]],

        [[-0.1926, -0.3762,  0.3116,  0.0276,  0.0827],
         [-0.2320, -0.0868,  0.2628,  0.1773,  0.0027]]],
       grad_fn=<StackBackward>)
import torch
import torch.nn
num_layer=2 #神经网络的层数
seq_len,batch_size=6,2
input_size,hidden_size=3,5
rnn=torch.nn.GRU(input_size,hidden_size,num_layer)
inputs=torch.randn(seq_len,batch_size,input_size)
h0=torch.randn(num_layer,batch_size,hidden_size)
outputs,hn=rnn(inputs,h0)
outputs
tensor([[[-0.5562, -0.0266,  0.4143, -0.5868, -0.5355],
         [-0.9346,  0.1405,  0.6476,  0.1245,  0.2489]],

        [[-0.5478,  0.0366,  0.2290, -0.3898, -0.3592],
         [-0.5501,  0.2257,  0.3262,  0.0261,  0.1158]],

        [[-0.4573,  0.1116,  0.0775, -0.2578, -0.2382],
         [-0.3884,  0.2852,  0.1527, -0.0332,  0.0203]],

        [[-0.3827,  0.1408,  0.0436, -0.1802, -0.2873],
         [-0.3630,  0.3035,  0.0548, -0.1107,  0.0734]],

        [[-0.3634,  0.2086, -0.0326, -0.1208, -0.2351],
         [-0.3043,  0.3108, -0.0088, -0.0980, -0.0082]],

        [[-0.3142,  0.2445, -0.0502, -0.0986, -0.1679],
         [-0.2827,  0.3212, -0.0167, -0.0849, -0.0786]]],
       grad_fn=<StackBackward>)
from pandas_datareader import wb
countries=['BR','CA','CN','FR','DE','IN','IL','JP','SA','GB','US']
dat=wb.download(indicator='NY.GDP.PCAP.KD',country=countries,start=1970,end=2017)
df=dat.unstack().T
df.index=df.index.droplevel(0)
df
import torch.nn
class Net(torch.nn.Module):
    def __init__(self,input_size,hidden_size):
        super(Net,self).__init__()
        self.rnn=torch.nn.LSTM(input_size,hidden_size)
        self.fc=torch.nn.Linear(hidden_size,1)
    
    def forward(self,x):
        x=x[:,:,None]
        x,_=self.rnn(x)
        x=self.fc(x)
        x=x[:,:,0]
        return x
net=Net(input_size=1,hidden_size=5)
print(net)
Net( (rnn): LSTM(1, 5) (fc): Linear(in_features=5, out_features=1, bias=True) )