Spaces:
Runtime error
Runtime error
File size: 3,587 Bytes
10b0761 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import unittest
import torch
from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
from omegaconf import OmegaConf
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestGradientScaling(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().half()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half()
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda().half()
self.params = list(self.model.parameters())
self.cfg_dls = OmegaConf.create(
{
"optimization": {
"lr": [0.1],
},
"optimizer": {
"_name": "adam",
"lr": [0.1],
"adam_betas": "(0.9, 0.999)",
"adam_eps": 1e-8,
"weight_decay": 0.0,
},
"common": {
"fp16_init_scale": 1,
"fp16_scale_window": 1,
"fp16_scale_tolerance": 1,
"threshold_loss_scale": 1,
"min_loss_scale": 1e-4,
"tpu": False,
},
}
)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
y = model(self.x)
loss = self.loss_fn(y, self.target)
optimizer.backward(loss)
self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16))
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
optimizer.step()
self.assertEqual(
model.weight,
torch.tensor(
[[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(
model.bias,
torch.tensor(
[5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(optimizer.scaler.loss_scale, 2.0)
def test_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
self.assertTrue(
all(
torch.all(
fp32_params.eq(
torch.tensor(
[3.1000, 5.1000], device="cuda:0", requires_grad=True
)
)
)
for fp32_params in optimizer.fp32_params.values()
)
)
def test_memory_efficient(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
if __name__ == "__main__":
unittest.main()
|