import torch
import numpy as np
torch.Tensor([-1])
tensor([-1.])
# 关于torch.nn.Embedding
entity_number = 5
relation_number = 3
dim = 4
ent_embeddings = torch.nn.Embedding(entity_number,dim)
rel_embeddings = torch.nn.Embedding(relation_number,dim)
# torch.nn.Embeding.weight & torch.nn.Embeding.weight.data
print("ent_embeddings.weight:",ent_embeddings.weight)
print("ent_embeddings.weight.data:",ent_embeddings.weight.data)
ent_embeddings.weight: Parameter containing:
tensor([[-2.5187e-01, 5.8727e-01, 1.2354e+00, 7.1489e-01],
[-5.6152e-01, -5.5336e-01, -1.0236e+00, -4.4449e-01],
[-7.8346e-04, -5.8843e-01, -1.4657e+00, -1.2068e+00],
[ 5.5088e-01, 1.7039e+00, 2.1763e-01, -8.9384e-01],
[ 1.3281e+00, -3.2343e-01, -1.0004e+00, -6.3203e-01]],
requires_grad=True)
ent_embeddings.weight.data: tensor([[-2.5187e-01, 5.8727e-01, 1.2354e+00, 7.1489e-01],
[-5.6152e-01, -5.5336e-01, -1.0236e+00, -4.4449e-01],
[-7.8346e-04, -5.8843e-01, -1.4657e+00, -1.2068e+00],
[ 5.5088e-01, 1.7039e+00, 2.1763e-01, -8.9384e-01],
[ 1.3281e+00, -3.2343e-01, -1.0004e+00, -6.3203e-01]])
batch_h = np.zeros(2,dtype=np.int64)
batch_h[0] = 1
print(batch_h)
[1 0]
ent_embeddings(torch.autograd.Variable(torch.from_numpy(batch_h)))
tensor([[-0.5615, -0.5534, -1.0236, -0.4445],
[-0.2519, 0.5873, 1.2354, 0.7149]], grad_fn=<EmbeddingBackward>)
train = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
train = np.array(train,dtype = np.int64)
ent_embeddings(torch.from_numpy(np.int64(range(2))))
tensor([[-0.2519, 0.5873, 1.2354, 0.7149],
[-0.5615, -0.5534, -1.0236, -0.4445]], grad_fn=<EmbeddingBackward>)
res = torch.Tensor([1.0,1.0,1.0,1.0])+ent_embeddings(torch.from_numpy(np.int64(range(2))))
for_norm = torch.Tensor([[1,2,3],[4,5,6]])
for_norm
tensor([[1., 2., 3.],
[4., 5., 6.]])
res = torch.norm(for_norm,2,-1)
res
tensor([3.7417, 8.7750])
order_index = res.argsort()
0 in order_index[:1]
True
tt = torch.Tensor([3,-3])
print("tt before:",tt)
print("tt after:",torch.mean(tt.view(-1,2)))
tt before: tensor([ 3., -3.])
tt after: tensor(0.)
criterior = torch.nn.MarginRankingLoss(1.0,reduction="sum")
x1 = torch.Tensor([7,4])
x2 = torch.Tensor([3,10])
print(x1)
print(x2)
tensor([7., 4.])
tensor([ 3., 10.])
loss = criterior(x1,x2,torch.Tensor([-1]))
print(loss)
tensor(5.)
1 in x1.cpu().data.numpy().argsort()[:1]
True
import jovian
jovian.commit(nb_filename="trans_learning.ipynb")
[jovian] Saving notebook..
[jovian] Updating notebook "5d61579dfab6441599cc3d1b9a1d5858" on https://jvn.io
[jovian] Uploading notebook..
[jovian] Capturing environment..
[jovian] Committed successfully! https://jvn.io/pierrezhangcw/5d61579dfab6441599cc3d1b9a1d5858
torch.nn.init.xavier_uniform(ent_embeddings.weight.data)
D:\MySoftware\Anaconda3\lib\site-packages\ipykernel_launcher.py:1: UserWarning: nn.init.xavier_uniform is now deprecated in favor of nn.init.xavier_uniform_.
"""Entry point for launching an IPython kernel.
tensor([[-0.5530, -0.5538, -0.7787, -0.7978],
[ 0.7861, 0.1092, 0.6927, -0.1110],
[-0.4178, -0.5045, 0.2555, 0.5819],
[-0.0015, -0.1020, -0.2211, -0.1957],
[-0.6460, -0.7293, -0.1276, 0.7769]])
ent_embeddings.weight.data
tensor([[-0.5530, -0.5538, -0.7787, -0.7978],
[ 0.7861, 0.1092, 0.6927, -0.1110],
[-0.4178, -0.5045, 0.2555, 0.5819],
[-0.0015, -0.1020, -0.2211, -0.1957],
[-0.6460, -0.7293, -0.1276, 0.7769]])
batch_h = torch.from_numpy(np.array([0,1,2],dtype=np.int64))
batch_r = torch.from_numpy(np.array([1,2,2],dtype=np.int64))
batch_t = torch.from_numpy(np.array([1,4,3],dtype=np.int64))
print(batch_h,batch_r,batch_t)
tensor([0, 1, 2]) tensor([1, 2, 2]) tensor([1, 4, 3])
tensor([0, 1])
ent_embeddings(batch_h)
tensor([[-0.5530, -0.5538, -0.7787, -0.7978],
[ 0.7861, 0.1092, 0.6927, -0.1110]], grad_fn=<EmbeddingBackward>)
h = ent_embeddings(batch_h)
r = rel_embeddings(batch_r)
t = ent_embeddings(batch_t)
print(h)
print(r)
print(t)
tensor([[-0.5530, -0.5538, -0.7787, -0.7978],
[ 0.7861, 0.1092, 0.6927, -0.1110],
[-0.4178, -0.5045, 0.2555, 0.5819]], grad_fn=<EmbeddingBackward>)
tensor([[ 1.0431, 1.2827, 0.5053, 0.5848],
[ 1.0846, 0.0150, -1.1637, 0.8096],
[ 1.0846, 0.0150, -1.1637, 0.8096]], grad_fn=<EmbeddingBackward>)
tensor([[ 0.7861, 0.1092, 0.6927, -0.1110],
[-0.6460, -0.7293, -0.1276, 0.7769],
[-0.0015, -0.1020, -0.2211, -0.1957]], grad_fn=<EmbeddingBackward>)
score = torch.norm(h + r - t, 1, -1)
print(score)
tensor([1.9839, 3.7918, 3.3300], grad_fn=<NormBackward1>)
pos_score = score[0:1]
neg_score = score[1:3]
print(pos_score)
print(neg_score)
tensor([1.9839], grad_fn=<SliceBackward>)
tensor([3.7918, 3.3300], grad_fn=<SliceBackward>)
neg_score = neg_score.view(-1,1)
print(neg_score)
tensor([[3.7918],
[3.3300]], grad_fn=<ViewBackward>)
neg_score = torch.mean(neg_score,0)
print(neg_score)
tensor([3.5609], grad_fn=<MeanBackward2>)
tt = torch.Tensor([1,-1,2,3,-5,7])
tt
tensor([ 1., -1., 2., 3., -5., 7.])
tt.view(-1,2)
tensor([[ 1., -1.],
[ 2., 3.],
[-5., 7.]])
torch.mean(tt.view(-1,2),0)
tensor([-0.6667, 3.0000])
inp = torch.randn(3, 5, requires_grad=True)
inp
tensor([[-0.8090, -1.2097, 1.3346, -0.2813, -1.1619],
[ 0.3427, -1.3393, 0.7075, 0.7741, -1.8080],
[ 2.2852, -1.2224, -2.0034, -0.4067, -0.6091]], requires_grad=True)
target = torch.randint(5, (3,), dtype=torch.int64)
target
tensor([0, 4, 1])
loss = torch.nn.functional.cross_entropy(inp, target)
loss
tensor(3.2668, grad_fn=<NllLossBackward>)