From 4b0cd9182d4ae60651496f3c8d0481b0b85fd9c3 Mon Sep 17 00:00:00 2001 From: Iden Kalemaj Date: Wed, 15 Jan 2025 10:22:56 -0800 Subject: [PATCH] Add **kwargs to all optimizer classes (#710) Summary: Pull Request resolved: https://github.com/pytorch/opacus/pull/710 Purpose: To enable creating custom PrivacyEngines that extend the PrivacyEngine class and take in additional parameters. Fix prior diff: D67456352 Reviewed By: HuanyuZhang Differential Revision: D67953655 fbshipit-source-id: 70aef7571e012a370d6a0fd04948eccee06c9a0d --- opacus/optimizers/adaclipoptimizer.py | 1 + opacus/optimizers/ddp_perlayeroptimizer.py | 2 ++ opacus/optimizers/ddpoptimizer.py | 2 ++ opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py | 2 ++ opacus/optimizers/optimizer.py | 1 + opacus/optimizers/optimizer_fast_gradient_clipping.py | 2 ++ opacus/optimizers/perlayeroptimizer.py | 2 ++ opacus/privacy_engine.py | 1 + 8 files changed, 13 insertions(+) diff --git a/opacus/optimizers/adaclipoptimizer.py b/opacus/optimizers/adaclipoptimizer.py index 7144f06b..9498613d 100644 --- a/opacus/optimizers/adaclipoptimizer.py +++ b/opacus/optimizers/adaclipoptimizer.py @@ -53,6 +53,7 @@ def __init__( loss_reduction: str = "mean", generator=None, secure_mode: bool = False, + **kwargs, ): super().__init__( optimizer, diff --git a/opacus/optimizers/ddp_perlayeroptimizer.py b/opacus/optimizers/ddp_perlayeroptimizer.py index c9b9bdfa..30a50633 100644 --- a/opacus/optimizers/ddp_perlayeroptimizer.py +++ b/opacus/optimizers/ddp_perlayeroptimizer.py @@ -48,6 +48,7 @@ def __init__( loss_reduction: str = "mean", generator=None, secure_mode: bool = False, + **kwargs, ): self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() @@ -79,6 +80,7 @@ def __init__( loss_reduction: str = "mean", generator=None, secure_mode: bool = False, + **kwargs, ): self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() diff --git a/opacus/optimizers/ddpoptimizer.py b/opacus/optimizers/ddpoptimizer.py index 1b4e472d..06048a82 100644 --- a/opacus/optimizers/ddpoptimizer.py +++ b/opacus/optimizers/ddpoptimizer.py @@ -38,6 +38,7 @@ def __init__( loss_reduction: str = "mean", generator=None, secure_mode: bool = False, + **kwargs, ): super().__init__( optimizer, @@ -47,6 +48,7 @@ def __init__( loss_reduction=loss_reduction, generator=generator, secure_mode=secure_mode, + **kwargs, ) self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() diff --git a/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py b/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py index b2245303..9442380f 100644 --- a/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py +++ b/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py @@ -38,6 +38,7 @@ def __init__( loss_reduction: str = "mean", generator=None, secure_mode: bool = False, + **kwargs, ): super().__init__( optimizer, @@ -47,6 +48,7 @@ def __init__( loss_reduction=loss_reduction, generator=generator, secure_mode=secure_mode, + **kwargs, ) self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() diff --git a/opacus/optimizers/optimizer.py b/opacus/optimizers/optimizer.py index 7a22eeec..58b4c990 100644 --- a/opacus/optimizers/optimizer.py +++ b/opacus/optimizers/optimizer.py @@ -205,6 +205,7 @@ def __init__( loss_reduction: str = "mean", generator=None, secure_mode: bool = False, + **kwargs, ): """ diff --git a/opacus/optimizers/optimizer_fast_gradient_clipping.py b/opacus/optimizers/optimizer_fast_gradient_clipping.py index 21489779..a5a7425e 100644 --- a/opacus/optimizers/optimizer_fast_gradient_clipping.py +++ b/opacus/optimizers/optimizer_fast_gradient_clipping.py @@ -63,6 +63,7 @@ def __init__( loss_reduction: str = "mean", generator=None, secure_mode: bool = False, + **kwargs, ): """ @@ -91,6 +92,7 @@ def __init__( loss_reduction=loss_reduction, generator=generator, secure_mode=secure_mode, + **kwargs, ) @property diff --git a/opacus/optimizers/perlayeroptimizer.py b/opacus/optimizers/perlayeroptimizer.py index 6d0029bf..6ebc9724 100644 --- a/opacus/optimizers/perlayeroptimizer.py +++ b/opacus/optimizers/perlayeroptimizer.py @@ -39,6 +39,7 @@ def __init__( loss_reduction: str = "mean", generator=None, secure_mode: bool = False, + **kwargs, ): assert len(max_grad_norm) == len(params(optimizer)) self.max_grad_norms = max_grad_norm @@ -51,6 +52,7 @@ def __init__( loss_reduction=loss_reduction, generator=generator, secure_mode=secure_mode, + **kwargs, ) def clip_and_accumulate(self): diff --git a/opacus/privacy_engine.py b/opacus/privacy_engine.py index bdddafe4..558c8f8e 100644 --- a/opacus/privacy_engine.py +++ b/opacus/privacy_engine.py @@ -136,6 +136,7 @@ def _prepare_optimizer( loss_reduction=loss_reduction, generator=generator, secure_mode=self.secure_mode, + **kwargs, ) def _prepare_data_loader(