# clone()真正的copy,新开辟存储 x = torch.tensor(([1.0]),requires_grad=True) y = x.clone() print("Id_x:{} Id_y:{}".format(id(x),id(y))) y += 1 print("x:{} y:{}".format(x,y))
print('-----------------------------------') # detach()与原来的tensor共享存储,操作之后requires_grad变为false x = torch.tensor(([1.0]),requires_grad=True) y = x.detach() print("Id_x:{} Id_y:{}".format(id(x),id(y))) y += 1 print("x:{} y:{}".format(x,y))
print('-----------------------------------') # .data与detach()一样,官方回答是没有足够的时间改代码,所以这个东西还在 x = torch.tensor(([1.0]),requires_grad=True) y = x.data print("Id_x:{} Id_y:{}".format(id(x),id(y))) y += 1 print("x:{} y:{}".format(x,y))
x = torch.tensor(([1.0]),requires_grad=True) y = x**2 z = 2*y w= z**3
# detach it, so the gradient w.r.t `p` does not effect `z`! p = z.detach() print(p) q = torch.tensor(([2.0]), requires_grad=True) pq = p*q pq.backward(retain_graph=True)
w.backward() print(x.grad)
x = torch.tensor(([1.0]),requires_grad=True) y = x**2 z = 2*y w= z**3
# create a subpath for z p = z.clone() print(p) q = torch.tensor(([2.0]), requires_grad=True) pq = p*q pq.backward(retain_graph=True)