import torch
def demo1(x: torch.Tensor):
# tensor([0., 1., 2., 3.])
x.requires_grad_(True) # tensor([0., 1., 2., 3.], requires_grad=True)
print(x)
# 相当于 torch.arange(4.0, requires_grad=True)
y = 2 * torch.dot(x, x) # tensor(28., grad_fn=<MulBackward0>)
y.backward() # 通过调用反向传播函数来自动计算y关于x每个分量上的梯度
print(x.grad)
print(x.grad == 4 * x)
# 默认情况下,PyTorch会累计梯度,所以再次计算时需要清除之前的计算值
x.grad.zero_()
y = 2 * torch.dot(x, 2 * x)
y.backward()
print(x.grad)
def f(x):
return 3 * torch.dot(x, x) + 5
def demo2(x: torch.Tensor):
x.requires_grad_(True)
print(x)
y = f(x)
y.backward()
print(x.grad)
if __name__ == '__main__':
x = torch.arange(4.0)
demo2(x)