Jovian
⭐️
Sign In
In [2]:
import torch.nn
cell=torch.nn.RNNCell(input_size=3,hidden_size=2)
for name,param in cell.named_parameters():
    print('{}={}'.format(name,param))
weight_ih=Parameter containing: tensor([[ 0.1831, -0.4158, 0.6376], [-0.6257, 0.1874, -0.2692]], requires_grad=True) weight_hh=Parameter containing: tensor([[-0.0016, 0.1171], [-0.5100, 0.1382]], requires_grad=True) bias_ih=Parameter containing: tensor([0.0149, 0.4452], requires_grad=True) bias_hh=Parameter containing: tensor([0.1351, 0.6150], requires_grad=True)
In [8]:
#构造LSTM类实例并用其搭建单层RNN
import torch.nn
import torch
seq_len,batch_size=6,2
input_size,hidden_size=3,5

cell=torch.nn.LSTMCell(input_size=input_size,hidden_size=hidden_size)
inputs=torch.randn(seq_len,batch_size,input_size)
h=torch.randn(batch_size,hidden_size)
c=torch.randn(batch_size,hidden_size)
hs=[]
for t in range(seq_len):
    h,c=cell(inputs[t],(h,c))
    hs.append(h)
outputs=torch.stack(hs)
print(hs)
outputs
[tensor([[-0.4300, 0.1251, -0.2122, 0.0323, 0.0519], [ 0.1709, 0.0652, -0.1586, 0.0737, 0.1276]], grad_fn=<MulBackward0>), tensor([[-0.2693, -0.1088, 0.1333, 0.0473, -0.0404], [ 0.1984, -0.0178, -0.0659, -0.0233, 0.0239]], grad_fn=<MulBackward0>), tensor([[ 0.0828, -0.0928, -0.0020, -0.0091, -0.0987], [-0.2648, -0.2424, 0.3205, 0.0800, -0.0951]], grad_fn=<MulBackward0>), tensor([[-0.0695, -0.2347, 0.1941, 0.0126, -0.1140], [-0.2564, -0.0829, 0.1944, 0.2127, -0.1447]], grad_fn=<MulBackward0>), tensor([[-0.1805, -0.1243, 0.1830, 0.1452, -0.1416], [-0.1369, -0.1500, 0.2049, 0.0742, -0.0359]], grad_fn=<MulBackward0>), tensor([[-0.1926, -0.3762, 0.3116, 0.0276, 0.0827], [-0.2320, -0.0868, 0.2628, 0.1773, 0.0027]], grad_fn=<MulBackward0>)]
Out[8]:
tensor([[[-0.4300,  0.1251, -0.2122,  0.0323,  0.0519],
         [ 0.1709,  0.0652, -0.1586,  0.0737,  0.1276]],

        [[-0.2693, -0.1088,  0.1333,  0.0473, -0.0404],
         [ 0.1984, -0.0178, -0.0659, -0.0233,  0.0239]],

        [[ 0.0828, -0.0928, -0.0020, -0.0091, -0.0987],
         [-0.2648, -0.2424,  0.3205,  0.0800, -0.0951]],

        [[-0.0695, -0.2347,  0.1941,  0.0126, -0.1140],
         [-0.2564, -0.0829,  0.1944,  0.2127, -0.1447]],

        [[-0.1805, -0.1243,  0.1830,  0.1452, -0.1416],
         [-0.1369, -0.1500,  0.2049,  0.0742, -0.0359]],

        [[-0.1926, -0.3762,  0.3116,  0.0276,  0.0827],
         [-0.2320, -0.0868,  0.2628,  0.1773,  0.0027]]],
       grad_fn=<StackBackward>)
In [1]:
import torch
import torch.nn
num_layer=2 #神经网络的层数
seq_len,batch_size=6,2
input_size,hidden_size=3,5
rnn=torch.nn.GRU(input_size,hidden_size,num_layer)
inputs=torch.randn(seq_len,batch_size,input_size)
h0=torch.randn(num_layer,batch_size,hidden_size)
outputs,hn=rnn(inputs,h0)
outputs
Out[1]:
tensor([[[-0.5562, -0.0266,  0.4143, -0.5868, -0.5355],
         [-0.9346,  0.1405,  0.6476,  0.1245,  0.2489]],

        [[-0.5478,  0.0366,  0.2290, -0.3898, -0.3592],
         [-0.5501,  0.2257,  0.3262,  0.0261,  0.1158]],

        [[-0.4573,  0.1116,  0.0775, -0.2578, -0.2382],
         [-0.3884,  0.2852,  0.1527, -0.0332,  0.0203]],

        [[-0.3827,  0.1408,  0.0436, -0.1802, -0.2873],
         [-0.3630,  0.3035,  0.0548, -0.1107,  0.0734]],

        [[-0.3634,  0.2086, -0.0326, -0.1208, -0.2351],
         [-0.3043,  0.3108, -0.0088, -0.0980, -0.0082]],

        [[-0.3142,  0.2445, -0.0502, -0.0986, -0.1679],
         [-0.2827,  0.3212, -0.0167, -0.0849, -0.0786]]],
       grad_fn=<StackBackward>)
In [1]:
from pandas_datareader import wb
countries=['BR','CA','CN','FR','DE','IN','IL','JP','SA','GB','US']
dat=wb.download(indicator='NY.GDP.PCAP.KD',country=countries,start=1970,end=2017)
df=dat.unstack().T
df.index=df.index.droplevel(0)
df

Out[1]:
In [2]:
import torch.nn
class Net(torch.nn.Module):
    def __init__(self,input_size,hidden_size):
        super(Net,self).__init__()
        self.rnn=torch.nn.LSTM(input_size,hidden_size)
        self.fc=torch.nn.Linear(hidden_size,1)
    
    def forward(self,x):
        x=x[:,:,None]
        x,_=self.rnn(x)
        x=self.fc(x)
        x=x[:,:,0]
        return x
net=Net(input_size=1,hidden_size=5)
print(net)
Net( (rnn): LSTM(1, 5) (fc): Linear(in_features=5, out_features=1, bias=True) )
In [9]:
import torch
import torch.optim
from IPython.display import display
import pandas as pd


#归一化
df_scaled=df / df.loc['2000']#原书上此处有错误
#确定训练集合测试集
years=df.index
print(years)
train_seq_len=sum((years>='1971')&(years<='2000'))
test_seq_len=sum(years>'2000')
print('训练集长度={},测试集长度={}'.format(train_seq_len,test_seq_len))

#确定训练使用数据的特征和标签
inputs=torch.tensor(df_scaled.iloc[:-1].values,dtype=torch.float32)
labels=torch.tensor(df_scaled.iloc[1:].values,dtype=torch.float32)

#训练网络
criterion=torch.nn.MSELoss()
optimizer=torch.optim.Adam(net.parameters())
test_loss_count=[1]
count=1
for step in range(10001):
    if step:
        optimizer.zero_grad()
        train_loss.backward()
        optimizer.step()
    preds=net(inputs)
    train_preds=preds[:train_seq_len]
    train_labels=labels[:train_seq_len]
    train_loss=criterion(train_preds,train_labels)
    
    test_preds=preds[-test_seq_len:]
    test_labels=labels[-test_seq_len:]
    test_loss=criterion(test_preds,test_labels)
    count=count+1
    
    if step%500==0:
        print('第{}次迭代:loss(训练集)={},loss(测试集)={}'.format(step,train_loss,test_loss))
    if test_loss<test_loss_count[-1]:
        test_loss_count.append(test_loss)
        count=0
        pred_count=preds
    if count>500:
        break
    
 
    
    
Index(['1970', '1971', '1972', '1973', '1974', '1975', '1976', '1977', '1978', '1979', '1980', '1981', '1982', '1983', '1984', '1985', '1986', '1987', '1988', '1989', '1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017'], dtype='object', name='year') 训练集长度=30,测试集长度=17 第0次迭代:loss(训练集)=0.0016938640037551522,loss(测试集)=0.03710787370800972 第500次迭代:loss(训练集)=0.0012621285859495401,loss(测试集)=0.058069389313459396
In [10]:
from IPython.display import display
import pandas as pd
preds=net(inputs)
df_pred_scaled=pd.DataFrame(pred_count.detach().numpy(),index=years[1:],columns=df.columns)
df_pred=df_pred_scaled*df.loc['2000']
display(df_pred.loc['2001':])
In [11]:
import matplotlib.pyplot as plt
%matplotlib inline
y=df_pred.loc[:,'France']
x=df.loc[:,'France']
plt.plot(x,label='origininal data')
plt.plot(y,label='predicted results')
plt.legend()

Out[11]:
<matplotlib.legend.Legend at 0x1146d9f1f98>
Notebook Image
In [24]:
len(labels[-test_seq_len:])
Out[24]:
17
In [ ]:
import jovian
jovian.commit("rnn_temp.ipynb")
[jovian] Saving notebook..