Jovian
⭐️
Sign In
In [76]:
import torch
import numpy as np
In [77]:
torch.Tensor([-1])
Out[77]:
tensor([-1.])
In [78]:
# 关于torch.nn.Embedding
entity_number = 5
relation_number = 3
dim = 4
ent_embeddings = torch.nn.Embedding(entity_number,dim)
rel_embeddings = torch.nn.Embedding(relation_number,dim)
In [79]:
# torch.nn.Embeding.weight & torch.nn.Embeding.weight.data
print("ent_embeddings.weight:",ent_embeddings.weight)
print("ent_embeddings.weight.data:",ent_embeddings.weight.data)
ent_embeddings.weight: Parameter containing: tensor([[-2.5187e-01, 5.8727e-01, 1.2354e+00, 7.1489e-01], [-5.6152e-01, -5.5336e-01, -1.0236e+00, -4.4449e-01], [-7.8346e-04, -5.8843e-01, -1.4657e+00, -1.2068e+00], [ 5.5088e-01, 1.7039e+00, 2.1763e-01, -8.9384e-01], [ 1.3281e+00, -3.2343e-01, -1.0004e+00, -6.3203e-01]], requires_grad=True) ent_embeddings.weight.data: tensor([[-2.5187e-01, 5.8727e-01, 1.2354e+00, 7.1489e-01], [-5.6152e-01, -5.5336e-01, -1.0236e+00, -4.4449e-01], [-7.8346e-04, -5.8843e-01, -1.4657e+00, -1.2068e+00], [ 5.5088e-01, 1.7039e+00, 2.1763e-01, -8.9384e-01], [ 1.3281e+00, -3.2343e-01, -1.0004e+00, -6.3203e-01]])
In [80]:
batch_h = np.zeros(2,dtype=np.int64)
batch_h[0] = 1
print(batch_h)
[1 0]
In [81]:
ent_embeddings(torch.autograd.Variable(torch.from_numpy(batch_h)))
Out[81]:
tensor([[-0.5615, -0.5534, -1.0236, -0.4445],
        [-0.2519,  0.5873,  1.2354,  0.7149]], grad_fn=<EmbeddingBackward>)
In [82]:
train = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
train = np.array(train,dtype = np.int64)
In [83]:
ent_embeddings(torch.from_numpy(np.int64(range(2))))
Out[83]:
tensor([[-0.2519,  0.5873,  1.2354,  0.7149],
        [-0.5615, -0.5534, -1.0236, -0.4445]], grad_fn=<EmbeddingBackward>)
In [84]:
res = torch.Tensor([1.0,1.0,1.0,1.0])+ent_embeddings(torch.from_numpy(np.int64(range(2))))
In [85]:
for_norm = torch.Tensor([[1,2,3],[4,5,6]])
for_norm
Out[85]:
tensor([[1., 2., 3.],
        [4., 5., 6.]])
In [86]:
res = torch.norm(for_norm,2,-1)
res
Out[86]:
tensor([3.7417, 8.7750])
In [87]:
order_index = res.argsort()
In [88]:
0 in order_index[:1]
Out[88]:
True
In [89]:
tt = torch.Tensor([3,-3])
print("tt before:",tt)
print("tt after:",torch.mean(tt.view(-1,2)))
tt before: tensor([ 3., -3.]) tt after: tensor(0.)
In [90]:
criterior = torch.nn.MarginRankingLoss(1.0,reduction="sum")
In [91]:
x1 = torch.Tensor([7,4])
x2 = torch.Tensor([3,10])
print(x1)
print(x2)
tensor([7., 4.]) tensor([ 3., 10.])
In [92]:
loss = criterior(x1,x2,torch.Tensor([-1]))
print(loss)
tensor(5.)
In [93]:
1 in x1.cpu().data.numpy().argsort()[:1]
Out[93]:
True
In [94]:
import jovian
In [95]:
jovian.commit(nb_filename="trans_learning.ipynb")
[jovian] Saving notebook..
[jovian] Updating notebook "5d61579dfab6441599cc3d1b9a1d5858" on https://jvn.io [jovian] Uploading notebook.. [jovian] Capturing environment.. [jovian] Committed successfully! https://jvn.io/pierrezhangcw/5d61579dfab6441599cc3d1b9a1d5858
In [96]:
torch.nn.init.xavier_uniform(ent_embeddings.weight.data)
D:\MySoftware\Anaconda3\lib\site-packages\ipykernel_launcher.py:1: UserWarning: nn.init.xavier_uniform is now deprecated in favor of nn.init.xavier_uniform_. """Entry point for launching an IPython kernel.
Out[96]:
tensor([[-0.5530, -0.5538, -0.7787, -0.7978],
        [ 0.7861,  0.1092,  0.6927, -0.1110],
        [-0.4178, -0.5045,  0.2555,  0.5819],
        [-0.0015, -0.1020, -0.2211, -0.1957],
        [-0.6460, -0.7293, -0.1276,  0.7769]])
In [98]:
ent_embeddings.weight.data
Out[98]:
tensor([[-0.5530, -0.5538, -0.7787, -0.7978],
        [ 0.7861,  0.1092,  0.6927, -0.1110],
        [-0.4178, -0.5045,  0.2555,  0.5819],
        [-0.0015, -0.1020, -0.2211, -0.1957],
        [-0.6460, -0.7293, -0.1276,  0.7769]])
In [113]:
batch_h = torch.from_numpy(np.array([0,1,2],dtype=np.int64))
batch_r = torch.from_numpy(np.array([1,2,2],dtype=np.int64))
batch_t = torch.from_numpy(np.array([1,4,3],dtype=np.int64))
print(batch_h,batch_r,batch_t)
tensor([0, 1, 2]) tensor([1, 2, 2]) tensor([1, 4, 3])
In [106]:
 
Out[106]:
tensor([0, 1])
In [107]:
ent_embeddings(batch_h)
Out[107]:
tensor([[-0.5530, -0.5538, -0.7787, -0.7978],
        [ 0.7861,  0.1092,  0.6927, -0.1110]], grad_fn=<EmbeddingBackward>)
In [114]:
h = ent_embeddings(batch_h)
r = rel_embeddings(batch_r)
t = ent_embeddings(batch_t)
print(h)
print(r)
print(t)
tensor([[-0.5530, -0.5538, -0.7787, -0.7978], [ 0.7861, 0.1092, 0.6927, -0.1110], [-0.4178, -0.5045, 0.2555, 0.5819]], grad_fn=<EmbeddingBackward>) tensor([[ 1.0431, 1.2827, 0.5053, 0.5848], [ 1.0846, 0.0150, -1.1637, 0.8096], [ 1.0846, 0.0150, -1.1637, 0.8096]], grad_fn=<EmbeddingBackward>) tensor([[ 0.7861, 0.1092, 0.6927, -0.1110], [-0.6460, -0.7293, -0.1276, 0.7769], [-0.0015, -0.1020, -0.2211, -0.1957]], grad_fn=<EmbeddingBackward>)
In [115]:
score = torch.norm(h + r - t, 1, -1)
print(score)
tensor([1.9839, 3.7918, 3.3300], grad_fn=<NormBackward1>)
In [120]:
pos_score = score[0:1]
neg_score = score[1:3]
print(pos_score)
print(neg_score)
tensor([1.9839], grad_fn=<SliceBackward>) tensor([3.7918, 3.3300], grad_fn=<SliceBackward>)
In [121]:
neg_score = neg_score.view(-1,1)
print(neg_score)
tensor([[3.7918], [3.3300]], grad_fn=<ViewBackward>)
In [122]:
neg_score = torch.mean(neg_score,0)
print(neg_score)
tensor([3.5609], grad_fn=<MeanBackward2>)
In [128]:
tt = torch.Tensor([1,-1,2,3,-5,7])
tt
Out[128]:
tensor([ 1., -1.,  2.,  3., -5.,  7.])
In [135]:
tt.view(-1,2)
Out[135]:
tensor([[ 1., -1.],
        [ 2.,  3.],
        [-5.,  7.]])
In [137]:
torch.mean(tt.view(-1,2),0)
Out[137]:
tensor([-0.6667,  3.0000])
In [138]:
inp = torch.randn(3, 5, requires_grad=True)
inp
Out[138]:
tensor([[-0.8090, -1.2097,  1.3346, -0.2813, -1.1619],
        [ 0.3427, -1.3393,  0.7075,  0.7741, -1.8080],
        [ 2.2852, -1.2224, -2.0034, -0.4067, -0.6091]], requires_grad=True)
In [140]:
target = torch.randint(5, (3,), dtype=torch.int64)
target
Out[140]:
tensor([0, 4, 1])
In [142]:
loss = torch.nn.functional.cross_entropy(inp, target)
loss
Out[142]:
tensor(3.2668, grad_fn=<NllLossBackward>)
In [ ]: