test_runner_lightning_quantization.py 17.3 KB
Newer Older
Kai Zhang's avatar
Kai Zhang committed
1
2
3
4
5
#!/usr/bin/env python3

# pyre-unsafe
import os
import unittest
Yanghan Wang's avatar
Yanghan Wang committed
6
from unittest import mock
Kai Zhang's avatar
Kai Zhang committed
7
8
9

import torch
from d2go.runner.callbacks.quantization import (
10
11
    get_default_qat_qconfig,
    ModelTransform,
Kai Zhang's avatar
Kai Zhang committed
12
13
    PostTrainingQuantization,
    QuantizationAwareTraining,
Kai Zhang's avatar
Kai Zhang committed
14
15
    rgetattr,
    rhasattr,
16
    rsetattr,
Kai Zhang's avatar
Kai Zhang committed
17
18
)
from d2go.utils.misc import mode
Yanghan Wang's avatar
Yanghan Wang committed
19
20
from d2go.utils.testing.helper import tempdir
from d2go.utils.testing.lightning_test_module import TestModule
21
from pytorch_lightning import seed_everything, Trainer
Kai Zhang's avatar
Kai Zhang committed
22
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
Yanghan Wang's avatar
Yanghan Wang committed
23
from torch.ao.quantization.qconfig import default_dynamic_qconfig, get_default_qconfig
24
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
Kai Zhang's avatar
Kai Zhang committed
25
26


Kai Zhang's avatar
Kai Zhang committed
27
class TestUtilities(unittest.TestCase):
28
    """Test some basic utilities we rely on."""
Kai Zhang's avatar
Kai Zhang committed
29
30

    def test_get_set_has(self):
31
        """Trivial test for generic behavior. Only support pre-existing deeply nested values."""
Kai Zhang's avatar
Kai Zhang committed
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50

        class TestObject(object):
            def __init__(self):
                self.object = None
                self.set_to_five = 5

        obj = TestObject()
        obj.object = TestObject()
        obj.object.set_to_five = 10

        rsetattr(obj, "object.set_to_five", 1)
        self.assertTrue(rhasattr(obj, "object.set_to_five"))
        self.assertEqual(1, rgetattr(obj, "object.set_to_five"))
        self.assertEqual(5, rgetattr(obj, "set_to_five"))

        with self.assertRaises(AttributeError):
            rsetattr(obj, "object.does_not_exist.five", 5)


Kai Zhang's avatar
Kai Zhang committed
51
class TestModelTransform(unittest.TestCase):
52
    """Tests ModelTransforms."""
Kai Zhang's avatar
Kai Zhang committed
53
54

    def test_invalid_construction_type_error(self):
55
        """Validate construction of ModelTransforms. Always have fn, msg, and one of [step, interval]."""
Kai Zhang's avatar
Kai Zhang committed
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
        with self.assertRaises(TypeError):
            _ = ModelTransform()
        with self.assertRaises(TypeError):
            _ = ModelTransform(fn=lambda x: x)
        with self.assertRaises(TypeError):
            _ = ModelTransform(message="No function defined")
        with self.assertRaises(TypeError):
            _ = ModelTransform(
                fn=lambda x: x,
                message="Specified both step and interval",
                step=1,
                interval=1,
            )

    def test_positivity_value_error(self):
71
        """Validates ModelTransforms are constructed with only valid arguments."""
Kai Zhang's avatar
Kai Zhang committed
72
73
74
75
76
77
78
79
80
81
82
83

        def identity(x):
            return x

        with self.assertRaises(ValueError):
            _ = ModelTransform(fn=identity, message="Negative step", step=-1)
        with self.assertRaises(ValueError):
            _ = ModelTransform(fn=identity, message="Zero interval", interval=0)
        with self.assertRaises(ValueError):
            _ = ModelTransform(fn=identity, message="Negative interval", interval=-1)


84
85
86
@unittest.skip(
    "FX Graph Mode Quantization API has been updated, re-enable the test after PyTorch 1.13 stable release"
)
Kai Zhang's avatar
Kai Zhang committed
87
88
class TestQuantizationAwareTraining(unittest.TestCase):
    def test_qat_misconfiguration(self):
89
        """Tests failure when misconfiguring the QAT Callback."""
Kai Zhang's avatar
Kai Zhang committed
90
91
92
93
94
95
96
97
98
99
100
101
        invalid_params = [
            {"start_step": -1},
            {"enable_observer": (42, 42)},
            {"enable_observer": (42, 21)},
            {"enable_observer": (-1, None)},
            {"freeze_bn_step": -1},
        ]
        for invalid_param in invalid_params:
            with self.assertRaises(ValueError):
                _ = QuantizationAwareTraining(**invalid_param)

    def test_qat_transforms(self):
102
        """Tests the appropropriate ModelTransforms are defined with QAT."""
Kai Zhang's avatar
Kai Zhang committed
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
        qat = QuantizationAwareTraining(
            start_step=300, enable_observer=(350, 500), freeze_bn_step=550
        )

        trainer = Trainer()
        module = TestModule()

        qat.setup(trainer, module, stage="train")

        self.assertGreater(len(qat.transforms), 0)

        def assertContainsTransformsAtStep(step):
            """
            Asserts at least one transform exists at the specified step and
            that it is removed after the step begins.
            """
            self.assertGreater(
                len(
                    [
                        transform
                        for transform in qat.transforms
                        if transform.step == step
                    ]
                ),
                0,
                f"step={step}",
            )
130
            trainer.fit_loop.global_step = step
131
            qat.on_train_batch_start(trainer, module, batch=None, batch_idx=0)
Kai Zhang's avatar
Kai Zhang committed
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151

            self.assertEqual(
                len(
                    [
                        transform
                        for transform in qat.transforms
                        if transform.step == step
                    ]
                ),
                0,
                f"step={step}",
            )

        assertContainsTransformsAtStep(step=300)
        assertContainsTransformsAtStep(step=350)
        assertContainsTransformsAtStep(step=500)
        assertContainsTransformsAtStep(step=550)

    @tempdir
    def test_qat_interval_transform(self, root_dir):
152
        """Tests an interval transform is applied multiple times."""
Kai Zhang's avatar
Kai Zhang committed
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
        seed_everything(100)

        def linear_fn_counter(mod):
            if isinstance(mod, torch.nn.Linear):
                linear_fn_counter.count += 1

        linear_fn_counter.count = 0

        model = TestModule()
        num_epochs = 2
        qat = QuantizationAwareTraining()
        qat.transforms.append(
            ModelTransform(fn=linear_fn_counter, message="Counter", interval=10)
        )
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
169
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
170
171
172
173
174
175
176
177
178
179
180
            callbacks=[qat],
            max_epochs=num_epochs,
            logger=False,
        )
        trainer.fit(model)

        # Model has 2 linear layers.
        self.assertEqual(linear_fn_counter.count, 2 * (trainer.global_step // 10 + 1))

    @tempdir
    def test_module_quantized_during_train(self, root_dir):
181
        """Validate quantized aware training works as expected."""
Kai Zhang's avatar
Kai Zhang committed
182
183
184
185
186
187
188
189
        seed_everything(100)

        model = TestModule()
        test_in = torch.randn(1, 32)
        before_train = model.eval()(test_in)
        num_epochs = 2
        qat = QuantizationAwareTraining()
        trainer = Trainer(
190
            accelerator="cpu",
191
            devices=1,
Kai Zhang's avatar
Kai Zhang committed
192
            default_root_dir=os.path.join(root_dir, "quantized"),
193
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
194
195
196
197
198
199
200
201
202
203
204
            callbacks=[qat],
            max_epochs=num_epochs,
            logger=False,
        )
        trainer.fit(model)

        self.assertIsNotNone(qat.prepared)
        self.assertIsNotNone(qat.quantized)

        test_out = model.eval()(test_in)
        self.assertGreater(
205
            (test_out**2).sum(), 0.03, "With the given seend, L2^2 should be > 0.03."
Kai Zhang's avatar
Kai Zhang committed
206
207
208
209
210
211
212
213
214
215
216
217
218
219
        )

        base_out = qat.quantized.eval()(test_in)
        self.assertTrue(torch.allclose(base_out, test_out))
        # Weight changed during training.
        self.assertFalse(torch.allclose(before_train, test_out))

        # Validate .test() call works as expected and does not change model weights.
        trainer.test(model)

        self.assertTrue(torch.allclose(test_out, model.eval()(test_in)))

    @tempdir
    def test_quantization_without_train(self, root_dir):
220
        """Validate quantization occurs even without a call to .fit() first."""
Kai Zhang's avatar
Kai Zhang committed
221
222
223
224
225
226
227
        seed_everything(100)

        model = TestModule()
        num_epochs = 2
        qat = QuantizationAwareTraining()
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
228
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
229
230
231
232
233
234
235
236
237
238
            callbacks=[qat],
            max_epochs=num_epochs,
            logger=False,
        )

        trainer.test(model)

        self.assertIsNotNone(qat.prepared)
        self.assertIsNotNone(qat.quantized)

Kai Zhang's avatar
Kai Zhang committed
239
240
    @tempdir
    def test_attribute_preservation_qat(self, root_dir):
241
        """Validates we can preserve specified properties in module."""
Kai Zhang's avatar
Kai Zhang committed
242
243
244
245
246
247
248
249
250
251
252
253
254
        seed_everything(100)

        model = TestModule()
        model.layer._added_property = 10
        model._not_preserved = 15
        model._added_property = 20

        num_epochs = 2
        qat = QuantizationAwareTraining(
            preserved_attrs=["_added_property", "layer._added_property"]
        )
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
255
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
            callbacks=[qat],
            max_epochs=num_epochs,
            logger=False,
        )

        trainer.fit(model)

        self.assertIsNotNone(qat.prepared)
        self.assertIsNotNone(qat.quantized)

        # Assert properties are maintained.
        self.assertTrue(hasattr(qat.prepared, "_added_property"))
        self.assertTrue(hasattr(qat.prepared.layer, "_added_property"))

        with self.assertRaises(AttributeError):
            qat.prepared._not_preserved

Kai Zhang's avatar
Kai Zhang committed
273
274
    @tempdir
    def test_quantization_and_checkpointing(self, root_dir):
275
        """Validate written checkpoints can be loaded back as expected."""
Kai Zhang's avatar
Kai Zhang committed
276
277
278
279
280
281
282
283
284
        seed_everything(100)

        model = TestModule()
        num_epochs = 2
        qat = QuantizationAwareTraining()
        checkpoint_dir = os.path.join(root_dir, "checkpoints")
        checkpoint = ModelCheckpoint(dirpath=checkpoint_dir, save_last=True)
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
285
            callbacks=[qat, checkpoint],
Kai Zhang's avatar
Kai Zhang committed
286
287
288
289
290
291
292
293
294
295
296
297
            max_epochs=num_epochs,
            logger=False,
        )
        # Mimick failing mid-training by not running on_fit_end.
        with mock.patch.object(qat, "on_fit_end"):
            trainer.fit(model)

        ckpt = torch.load(os.path.join(checkpoint_dir, "last.ckpt"))
        model.load_state_dict(ckpt["state_dict"])

    @tempdir
    def test_custom_qat(self, root_dir):
298
        """Tests that we can customize QAT by skipping certain layers."""
Kai Zhang's avatar
Kai Zhang committed
299
300

        class _CustomQAT(QuantizationAwareTraining):
301
            """Only quantize TestModule.another_layer."""
Kai Zhang's avatar
Kai Zhang committed
302

Kai Zhang's avatar
Kai Zhang committed
303
            def prepare(self, model, configs, attrs):
304
305
306
307
308
                example_inputs = (torch.rand(1, 2),)
                model.another_layer = prepare_qat_fx(
                    model.another_layer, configs[""], example_inputs
                )

Kai Zhang's avatar
Kai Zhang committed
309
310
                return model

Kai Zhang's avatar
Kai Zhang committed
311
            def convert(self, model, submodules, attrs):
Kai Zhang's avatar
Kai Zhang committed
312
313
314
315
316
317
318
319
320
321
322
                model.another_layer = convert_fx(model.another_layer)
                return model

        seed_everything(100)
        model = TestModule()
        test_in = torch.randn(1, 32)
        before_train = model.eval()(test_in)
        num_epochs = 2
        qat = _CustomQAT()
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
323
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
324
325
326
327
328
329
330
331
332
333
334
            callbacks=[qat],
            max_epochs=num_epochs,
            logger=False,
        )
        trainer.fit(model)

        self.assertIsNotNone(qat.prepared)
        self.assertIsNotNone(qat.quantized)

        test_out = model.eval()(test_in)
        self.assertGreater(
335
            (test_out**2).sum(), 0.03, "With the given seend, L2^2 should be > 0.03."
Kai Zhang's avatar
Kai Zhang committed
336
337
338
339
340
341
342
343
344
345
346
347
348
349
        )

        base_out = qat.quantized.eval()(test_in)
        self.assertTrue(torch.allclose(base_out, test_out))
        # Weight changed during training.
        self.assertFalse(torch.allclose(before_train, test_out))

        # Validate .test() call works as expected and does not change model weights.
        trainer.test(model)

        self.assertTrue(torch.allclose(test_out, model.eval()(test_in)))

    @tempdir
    def test_submodule_qat(self, root_dir):
350
        """Tests that we can customize QAT through exposed API."""
Kai Zhang's avatar
Kai Zhang committed
351
352
353
354
355
356
357
358
359
360
361
        seed_everything(100)

        model = TestModule()
        test_in = torch.randn(1, 32)
        before_train = model.eval()(test_in)
        num_epochs = 2
        qat = QuantizationAwareTraining(
            qconfig_dicts={"another_layer": {"": get_default_qat_qconfig()}}
        )
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
362
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
363
364
365
366
367
368
369
370
371
372
373
            callbacks=[qat],
            max_epochs=num_epochs,
            logger=False,
        )
        trainer.fit(model)

        self.assertIsNotNone(qat.prepared)
        self.assertIsNotNone(qat.quantized)

        test_out = model.eval()(test_in)
        self.assertGreater(
374
            (test_out**2).sum(), 0.03, "With the given seend, L2^2 should be > 0.03."
Kai Zhang's avatar
Kai Zhang committed
375
376
377
378
379
380
381
382
383
384
385
386
387
        )

        base_out = qat.quantized.eval()(test_in)
        self.assertTrue(torch.allclose(base_out, test_out))
        # Weight changed during training.
        self.assertFalse(torch.allclose(before_train, test_out))

        # Validate .test() call works as expected and does not change model weights.
        trainer.test(model)

        self.assertTrue(torch.allclose(test_out, model.eval()(test_in)))


388
389
390
@unittest.skip(
    "FX Graph Mode Quantization API has been updated, re-enable the test after PyTorch 1.13 stable release"
)
Kai Zhang's avatar
Kai Zhang committed
391
392
393
class TestPostTrainingQuantization(unittest.TestCase):
    @tempdir
    def test_post_training_static_quantization(self, root_dir):
394
        """Validate post-training static quantization."""
Kai Zhang's avatar
Kai Zhang committed
395
396
397
398
399
400
401
402
403
        seed_everything(100)

        model = TestModule()
        num_epochs = 4
        static_quantization = PostTrainingQuantization(
            qconfig_dicts={"": {"": get_default_qconfig()}}
        )
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
404
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
            callbacks=[static_quantization],
            max_epochs=num_epochs,
            logger=False,
        )
        # This will both train the model + quantize it.
        trainer.fit(model)

        self.assertIsNotNone(static_quantization.quantized)
        # Default qconfig requires calibration.
        self.assertTrue(static_quantization.should_calibrate)

        test_in = torch.randn(12, 32)
        with mode(model, training=False) as m:
            base_out = m(test_in)
        with mode(static_quantization.quantized, training=False) as q:
            test_out = q(test_in)

        # While quantized/original won't be exact, they should be close.
        self.assertLess(
            ((((test_out - base_out) ** 2).sum(axis=1)) ** (1 / 2)).mean(),
            0.015,
            "RMSE should be less than 0.015 between quantized and original.",
        )

    @tempdir
    def test_post_training_dynamic_quantization(self, root_dir):
431
        """Validates post-training dynamic quantization."""
Kai Zhang's avatar
Kai Zhang committed
432
433
434
435
436
437
438
439
440
        seed_everything(100)

        model = TestModule()
        num_epochs = 2
        dynamic_quant = PostTrainingQuantization(
            qconfig_dicts={"": {"": default_dynamic_qconfig}}
        )
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
441
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
            callbacks=[dynamic_quant],
            max_epochs=num_epochs,
            logger=False,
        )
        # This will both train the model + quantize it.
        trainer.fit(model)

        self.assertIsNotNone(dynamic_quant.quantized)
        # Default qconfig requires calibration.
        self.assertFalse(dynamic_quant.should_calibrate)

        test_in = torch.randn(12, 32)
        with mode(model, training=False) as m:
            base_out = m(test_in)
        with mode(dynamic_quant.quantized, training=False) as q:
            test_out = q(test_in)

        # While quantized/original won't be exact, they should be close.
        self.assertLess(
            ((((test_out - base_out) ** 2).sum(axis=1)) ** (1 / 2)).mean(),
            0.015,
            "RMSE should be less than 0.015 between quantized and original.",
        )

    @tempdir
    def test_custom_post_training_static_quant(self, root_dir):
468
        """Tests that we can customize Post-Training static by skipping certain layers."""
Kai Zhang's avatar
Kai Zhang committed
469
470

        class _CustomStaticQuant(PostTrainingQuantization):
471
            """Only quantize TestModule.another_layer."""
Kai Zhang's avatar
Kai Zhang committed
472

Kai Zhang's avatar
Kai Zhang committed
473
            def prepare(self, model, configs, attrs):
474
475
476
477
478
                example_inputs = (torch.randn(1, 2),)
                model.another_layer = prepare_fx(
                    model.another_layer, configs[""], example_inputs
                )

Kai Zhang's avatar
Kai Zhang committed
479
480
                return model

Kai Zhang's avatar
Kai Zhang committed
481
            def convert(self, model, submodules, attrs):
Kai Zhang's avatar
Kai Zhang committed
482
483
484
485
486
487
488
489
490
491
                model.another_layer = convert_fx(model.another_layer)
                return model

        seed_everything(100)

        model = TestModule()
        num_epochs = 2
        static_quantization = _CustomStaticQuant()
        trainer = Trainer(
            default_root_dir=os.path.join(root_dir, "quantized"),
492
            enable_checkpointing=False,
Kai Zhang's avatar
Kai Zhang committed
493
494
495
            callbacks=[static_quantization],
            max_epochs=num_epochs,
            logger=False,
496
            num_sanity_val_steps=0,
Kai Zhang's avatar
Kai Zhang committed
497
498
499
500
501
502
503
504
505
506
507
508
509
510
        )
        trainer.fit(model)

        self.assertIsNotNone(static_quantization.quantized)

        test_in = torch.randn(12, 32)
        with mode(model, training=False) as m:
            base_out = m(test_in)
        with mode(static_quantization.quantized, training=False) as q:
            test_out = q(test_in)

        # While quantized/original won't be exact, they should be close.
        self.assertLess(
            ((((test_out - base_out) ** 2).sum(axis=1)) ** (1 / 2)).mean(),
511
            0.02,
Kai Zhang's avatar
Kai Zhang committed
512
513
            "RMSE should be less than 0.007 between quantized and original.",
        )