Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Parallelize BrendelBethgeAttack #604

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
129 changes: 62 additions & 67 deletions foolbox/attacks/brendel_bethge.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from ..criteria import Misclassification, TargetedMisclassification
from .base import raise_if_kwargs
from ..distances import l0, l1, l2, linf

from joblib import Parallel, delayed

try:
from numba.experimental import jitclass # type: ignore
Expand Down Expand Up @@ -489,87 +489,82 @@ def logits_diff_and_grads(x) -> Tuple[Any, Any]:
rate_normalization = np.prod(x.shape) * (max_ - min_)
original_shape = x.shape
_best_advs = best_advs.numpy()

with Parallel(n_jobs=10, backend='threading') as parallel:
for step in range(1, self.steps + 1):
if converged.all():
break # pragma: no cover

for step in range(1, self.steps + 1):
if converged.all():
break # pragma: no cover

# get logits and local boundary geometry
# TODO: only perform forward pass on non-converged samples
logits_diffs, _boundary = logits_diff_and_grads(x)

# record optimal adversarials
distances = self.norms(originals - x)
source_norms = self.norms(originals - best_advs)
# get logits and local boundary geometry
# TODO: only perform forward pass on non-converged samples
logits_diffs, _boundary = logits_diff_and_grads(x)

closer = distances < source_norms
is_advs = logits_diffs < 0
closer = closer.logical_and(ep.from_numpy(x, is_advs))
# record optimal adversarials
distances = self.norms(originals - x)
source_norms = self.norms(originals - best_advs)

x_np_flatten = x.numpy().reshape((N, -1))
closer = distances < source_norms
is_advs = logits_diffs < 0
closer = closer.logical_and(ep.from_numpy(x, is_advs))

if closer.any():
_best_advs = best_advs.numpy().copy()
_closer = closer.numpy().flatten()
for idx in np.arange(N)[_closer]:
_best_advs[idx] = x_np_flatten[idx].reshape(original_shape[1:])
x_np_flatten = x.numpy().reshape((N, -1))

best_advs = ep.from_numpy(x, _best_advs)
if closer.any():
_best_advs = best_advs.numpy().copy()
_closer = closer.numpy().flatten()
for idx in np.arange(N)[_closer]:
_best_advs[idx] = x_np_flatten[idx].reshape(original_shape[1:])

# denoise estimate of boundary using a short history of the boundary
if step == 1:
boundary = _boundary
else:
boundary = (1 - self.momentum) * _boundary + self.momentum * boundary
best_advs = ep.from_numpy(x, _best_advs)

# learning rate adaptation
if (step + 1) % lr_reduction_interval == 0:
lrs *= self.lr_decay
# denoise estimate of boundary using a short history of the boundary
if step == 1:
boundary = _boundary
else:
boundary = (1 - self.momentum) * _boundary + self.momentum * boundary

# compute optimal step within trust region depending on metric
x = x.reshape((N, -1))
region = lrs * rate_normalization
# learning rate adaptation
if (step + 1) % lr_reduction_interval == 0:
lrs *= self.lr_decay

# we aim to slight overshoot over the boundary to stay within the adversarial region
corr_logits_diffs = np.where(
-logits_diffs < 0,
-self.overshoot * logits_diffs,
-(2 - self.overshoot) * logits_diffs,
)
# compute optimal step within trust region depending on metric
x = x.reshape((N, -1))
region = lrs * rate_normalization

# employ solver to find optimal step within trust region
# for each sample
deltas, k = [], 0
# we aim to slight overshoot over the boundary to stay within the adversarial region
corr_logits_diffs = np.where(
-logits_diffs < 0,
-self.overshoot * logits_diffs,
-(2 - self.overshoot) * logits_diffs,
)

for sample in range(N):
if converged[sample]:
# don't perform optimisation on converged samples
deltas.append(
np.zeros_like(x0_np_flatten[sample])
) # pragma: no cover
else:
_x0 = x0_np_flatten[sample]
_x = x_np_flatten[sample]
_b = boundary[k].flatten()
_c = corr_logits_diffs[k]
r = region[sample]

delta = self._optimizer.solve( # type: ignore
_x0, _x, _b, bounds[0], bounds[1], _c, r
)
deltas.append(delta)
# employ solver to find optimal step within trust region
# for each sample
def optimum(sample):
if converged[sample]:
return np.zeros_like(x0_np_flatten[sample])
else:
_x0 = x0_np_flatten[sample]
_x = x_np_flatten[sample]
k = sample - converged.sum()
_b = boundary[k].flatten()
_c = corr_logits_diffs[k]
r = region[sample]
return self._optimizer.solve( # type: ignore
_x0, _x, _b, bounds[0], bounds[1], _c, r
)

k += 1 # idx of masked sample
deltas = parallel(delayed(optimum)(sample) for sample in range(N))

deltas = np.stack(deltas)
deltas = ep.from_numpy(x, deltas.astype(np.float32)) # type: ignore
deltas = np.stack(deltas)
deltas = ep.from_numpy(x, deltas.astype(np.float32)) # type: ignore

# add step to current perturbation
x = (x + ep.astensor(deltas)).reshape(original_shape)
# add step to current perturbation
x = (x + ep.astensor(deltas)).reshape(original_shape)

tb.probability("converged", converged, step)
tb.histogram("norms", source_norms, step)
tb.histogram("candidates/distances", distances, step)
tb.probability("converged", converged, step)
tb.histogram("norms", source_norms, step)
tb.histogram("candidates/distances", distances, step)

tb.close()

Expand Down