import torchfrom torch.autograd import Variabletensor = torch.FloatTensor([[1, 2], [3, 4]])variable = Variable(tensor, requires_grad=True)print(tensor) # tensor([[1., 2.], [3., 4.]])print(variable) # tensor([[1., 2.], [3., 4.]], requires_grad=True) 梯度设置为True,反向传播要使用t_out = torch.mean(tensor*tensor) # x^2v_out = torch.mean(variable*variable)print(t_out) # tensor(7.5000) 变量计算的时候比张量多了一个梯度信息print(v_out) # tensor(7.5000, grad_fn=)v_out.backward() # 反向传播# v_out = 1/4*sun(var*var) # 求导过程# d(v_out)/d(var) = 1/4*2*variable = variable/2 # 求导过程print(variable.grad) # tensor([[0.5000, 1.0000], [1.5000, 2.0000]])print("variable:", variable) # tensor([[1., 2.], [3., 4.]], requires_grad=True)print("variable.data:", variable.data) # tensor([[1., 2.], [3., 4.]])print("variable.data.numpy:", variable.data.numpy()) # [[1. 2.] [3. 4.]]
END