from torch.cuda.amp import autocast, GradScaler
# Init Step 1: Create Model
model, device, start_epoch = create_model(opt)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.cuda()
# Init Step 2: Create Dataset
dataloader = create_dataset(opt.train_path)
# Init Step 3: Create Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) # Original
scaler = GradScaler() """创建一个尺度管理器"""
# Train Step 1: Forward pass, get loss
with autocast(): """开启混合精度模式,只进行前向传播"""
loss, outputs = model(P, A, L, targets)
loss = torch.mean(loss)
# Train Step 2: Backward pass, get gradient
scaler.scale(loss).backward() """使用尺度管理器进行调整"""
# Train Step 3: Optimize params
scaler.step(optimizer)
scaler.update()
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)