From 26b4f71cc2d3e8fd03e78fb7f76369096e6865af Mon Sep 17 00:00:00 2001 From: Enayat Ullah Date: Thu, 1 Aug 2024 13:07:12 -0700 Subject: [PATCH] Fix DistributedDP Optimizer for Fast Gradient Clipping (#662) Summary: Pull Request resolved: https://github.com/pytorch/opacus/pull/662 The step function incorrectly called "original_optimizer.original_optimizer" instead of "original_optimizer". Fixed it now. Differential Revision: D60484128 --- opacus/__init__.py | 2 ++ opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/opacus/__init__.py b/opacus/__init__.py index c5f7d3f9..0c3f6394 100644 --- a/opacus/__init__.py +++ b/opacus/__init__.py @@ -15,6 +15,7 @@ from . import utils from .grad_sample import GradSampleModule +from .grad_sample_fast_gradient_clipping import GradSampleModuleFastGradientClipping from .privacy_engine import PrivacyEngine from .version import __version__ @@ -22,6 +23,7 @@ __all__ = [ "PrivacyEngine", "GradSampleModule", + "GradSampleModuleFastGradientClipping", "utils", "__version__", ] diff --git a/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py b/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py index b604911f..cff55050 100644 --- a/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py +++ b/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py @@ -76,6 +76,6 @@ def step( if self.pre_step(): self.reduce_gradients() - return self.original_optimizer.original_optimizer.step() + return self.original_optimizer.step() else: return None