Skip to content

Commit dac9877

Browse files
Accounting for 2C sensitivity when doing microbatches
PiperOrigin-RevId: 494792995
1 parent 2040f08 commit dac9877

File tree

5 files changed

+26
-6
lines changed

5 files changed

+26
-6
lines changed

tensorflow_privacy/privacy/keras_models/dp_keras_model.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,9 @@ def __init__(
8282
super().__init__(*args, **kwargs)
8383
self._l2_norm_clip = l2_norm_clip
8484
self._noise_multiplier = noise_multiplier
85+
# For microbatching version, the sensitivity is 2*l2_norm_clip.
86+
self._sensitivity_multiplier = 2.0 if (num_microbatches is not None and
87+
num_microbatches > 1) else 1.0
8588

8689
# Given that `num_microbatches` was added as an argument after the fact,
8790
# this check helps detect unintended calls to the earlier API.
@@ -109,7 +112,7 @@ def _process_per_example_grads(self, grads):
109112

110113
def _reduce_per_example_grads(self, stacked_grads):
111114
summed_grads = tf.reduce_sum(input_tensor=stacked_grads, axis=0)
112-
noise_stddev = self._l2_norm_clip * self._noise_multiplier
115+
noise_stddev = self._l2_norm_clip * self._sensitivity_multiplier * self._noise_multiplier
113116
noise = tf.random.normal(
114117
tf.shape(input=summed_grads), stddev=noise_stddev)
115118
noised_grads = summed_grads + noise

tensorflow_privacy/privacy/optimizers/dp_optimizer.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -340,8 +340,13 @@ def __init__(
340340
self._num_microbatches = num_microbatches
341341
self._base_optimizer_class = cls
342342

343+
# For microbatching version, the sensitivity is 2*l2_norm_clip.
344+
sensitivity_multiplier = 2.0 if (num_microbatches is not None and
345+
num_microbatches > 1) else 1.0
346+
343347
dp_sum_query = gaussian_query.GaussianSumQuery(
344-
l2_norm_clip, l2_norm_clip * noise_multiplier)
348+
l2_norm_clip,
349+
sensitivity_multiplier * l2_norm_clip * noise_multiplier)
345350

346351
super(DPGaussianOptimizerClass,
347352
self).__init__(dp_sum_query, num_microbatches, unroll_microbatches,

tensorflow_privacy/privacy/optimizers/dp_optimizer_keras.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -459,8 +459,12 @@ def return_gaussian_query_optimizer(
459459
*args: These will be passed on to the base class `__init__` method.
460460
**kwargs: These will be passed on to the base class `__init__` method.
461461
"""
462+
# For microbatching version, the sensitivity is 2*l2_norm_clip.
463+
sensitivity_multiplier = 2.0 if (num_microbatches is not None and
464+
num_microbatches > 1) else 1.0
465+
462466
dp_sum_query = gaussian_query.GaussianSumQuery(
463-
l2_norm_clip, l2_norm_clip * noise_multiplier)
467+
l2_norm_clip, sensitivity_multiplier * l2_norm_clip * noise_multiplier)
464468
return cls(
465469
dp_sum_query=dp_sum_query,
466470
num_microbatches=num_microbatches,

tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_sparse.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -185,13 +185,18 @@ def __init__(
185185
self._num_microbatches = num_microbatches
186186
self._was_dp_gradients_called = False
187187
self._noise_stddev = None
188+
# For microbatching version, the sensitivity is 2*l2_norm_clip.
189+
self._sensitivity_multiplier = 2.0 if (num_microbatches is not None and
190+
num_microbatches > 1) else 1.0
191+
188192
if self._num_microbatches is not None:
189193
# The loss/gradients is the mean over the microbatches so we
190194
# divide the noise by num_microbatches too to obtain the correct
191195
# normalized noise. If _num_microbatches is not set, the noise stddev
192196
# will be set later when the loss is given.
193-
self._noise_stddev = (self._l2_norm_clip * self._noise_multiplier /
194-
self._num_microbatches)
197+
self._noise_stddev = (
198+
self._l2_norm_clip * self._noise_multiplier *
199+
self._sensitivity_multiplier / self._num_microbatches)
195200

196201
def _generate_noise(self, g):
197202
"""Returns noise to be added to `g`."""

tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,9 @@ def __init__(
104104
self._noise_multiplier = noise_multiplier
105105
self._num_microbatches = num_microbatches
106106
self._was_compute_gradients_called = False
107+
# For microbatching version, the sensitivity is 2*l2_norm_clip.
108+
self._sensitivity_multiplier = 2.0 if (num_microbatches is not None and
109+
num_microbatches > 1) else 1.0
107110

108111
def compute_gradients(self,
109112
loss,
@@ -166,7 +169,7 @@ def process_microbatch(microbatch_loss):
166169

167170
def reduce_noise_normalize_batch(stacked_grads):
168171
summed_grads = tf.reduce_sum(input_tensor=stacked_grads, axis=0)
169-
noise_stddev = self._l2_norm_clip * self._noise_multiplier
172+
noise_stddev = self._l2_norm_clip * self._noise_multiplier * self._sensitivity_multiplier
170173
noise = tf.random.normal(
171174
tf.shape(input=summed_grads), stddev=noise_stddev)
172175
noised_grads = summed_grads + noise

0 commit comments

Comments
 (0)