Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
141 commits
Select commit Hold shift + click to select a range
d699b14
Add YOLOv3 with ShapeShifter
dxoigmn Apr 17, 2023
44b3be5
Move mart.attack.callbacks to mart.callbacks
dxoigmn Apr 18, 2023
25a81ef
Disable default EarlyStopping
dxoigmn Apr 18, 2023
0c6bad4
Add PerturbationVisualizer callback
dxoigmn Apr 18, 2023
150e862
Add train end perturbation
dxoigmn Apr 18, 2023
def4095
Log learning rate
dxoigmn Apr 18, 2023
f57edd0
style
dxoigmn Apr 18, 2023
0580214
Fix max_steps
dxoigmn Apr 18, 2023
ae20b98
bugfix
dxoigmn Apr 18, 2023
3fe80a8
Enable configuration of loss from command line
dxoigmn Apr 18, 2023
551e8d7
Limit val/test batches
dxoigmn Apr 18, 2023
8ca64fd
Optimize composer parameters
dxoigmn Apr 18, 2023
40932c2
Don't use variable interpolation on YOLOv3 sequences
dxoigmn Apr 18, 2023
407b803
Don't output training metrics in YOLOv3 model
dxoigmn Apr 18, 2023
c19e814
Update optimizer parameters
dxoigmn Apr 18, 2023
b5cc6e9
style
dxoigmn Apr 18, 2023
916ab77
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 18, 2023
7de1dab
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 18, 2023
07ef40e
Add TV loss
dxoigmn Apr 19, 2023
a686a05
Alwaysm inimize noobj loss
dxoigmn Apr 19, 2023
b09221d
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 19, 2023
ae34adc
Update warp
dxoigmn Apr 19, 2023
d84230b
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 19, 2023
763be22
Add drop params
dxoigmn Apr 19, 2023
2971a3f
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 19, 2023
cbedc4c
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 19, 2023
025452d
bugfix
dxoigmn Apr 19, 2023
bffb0b6
cleanup
dxoigmn Apr 20, 2023
b10065b
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 20, 2023
3a5977d
style
dxoigmn Apr 20, 2023
495339e
Merge branch 'shapeshifter_yolov3' of github.com:IntelLabs/MART into …
dxoigmn Apr 20, 2023
f46cc19
Add adversarial losses
dxoigmn Apr 20, 2023
37261d0
Turn off BatchNorm buffer updating in freeze
dxoigmn Apr 20, 2023
6434f4f
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 20, 2023
55d7016
Add weights to mart.nn.Sum
dxoigmn Apr 21, 2023
34a67cd
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Apr 21, 2023
3027caa
Add OverrideMode callback
dxoigmn Apr 23, 2023
7d51487
Add target-specific losses
dxoigmn Apr 23, 2023
21aa038
Turn off gradient modifier
dxoigmn Apr 23, 2023
5ac28cd
Merge branch 'shapeshifter_yolov3' of github.com:IntelLabs/MART into …
dxoigmn Apr 23, 2023
f0ac583
Use default transform
dxoigmn Apr 24, 2023
e5dc9bb
Use CocoDetection datamodule
dxoigmn Apr 24, 2023
d214e17
Replace LitModular weights_fpath's with load_state_dict
dxoigmn Apr 24, 2023
9b98b11
Cleanup sequences
dxoigmn Apr 24, 2023
597947f
Get rid of torch.nested
dxoigmn Apr 24, 2023
f9eb95d
Return mask of correct size even when empty
dxoigmn Apr 24, 2023
6424ef9
Cleanup
dxoigmn Apr 24, 2023
a30ada2
Use default COCO collate function
dxoigmn Apr 24, 2023
91cbd16
Add Underlay
dxoigmn Apr 24, 2023
2d50694
Revert "Use default COCO collate function"
dxoigmn Apr 24, 2023
7b6557c
Merge Underlay/Overlay into Composite
dxoigmn Apr 24, 2023
326017d
Cleanup COCO datamodule config
dxoigmn Apr 24, 2023
053e8ce
Switch to persons-only COCO and overlay persons on top of perturbation
dxoigmn Apr 24, 2023
685dc08
Fix YoloNetV3 to produce preds and logits
dxoigmn Apr 24, 2023
88d46e2
Add score thresholding to losses
dxoigmn Apr 24, 2023
bcdb8c7
style
dxoigmn Apr 24, 2023
53bb29e
bugfix
dxoigmn Apr 24, 2023
2aaeeb2
Turn off random erasing
dxoigmn Apr 24, 2023
5d03868
Only apply color jitter during training
dxoigmn Apr 24, 2023
9cbfd3b
Add Image attack initializer
dxoigmn Apr 24, 2023
494a47f
PackTarget -> PackBoxesAndLabels
dxoigmn Apr 27, 2023
bbb73f1
Add CreateBackgroundMask transform
dxoigmn Apr 27, 2023
f410dc9
Pass all targets in yolo_collate_fn
dxoigmn Apr 27, 2023
4133885
Bugfix CreateBackgroundMask
dxoigmn Apr 27, 2023
5730a3d
CreateBackgroundMask -> CreateBackgroundMaskFromCOCOMasks
dxoigmn Apr 27, 2023
40b567c
Resize bg_masks
dxoigmn Apr 27, 2023
ec6fdbd
Add CreateBackgroundMaskFromImage
dxoigmn Apr 27, 2023
9704ff8
Pad bg_masks
dxoigmn Apr 27, 2023
fb5c49b
Add LoadTensors transform
dxoigmn Apr 27, 2023
3aba51c
Remove bg_mask in favor of perturbable_mask
dxoigmn Apr 27, 2023
a570490
Add support for batch warping
dxoigmn Apr 27, 2023
a5a5ca8
Implement proper Composite for all cases
dxoigmn Apr 27, 2023
45639a1
Image initializer scale to 0-1
dxoigmn Apr 27, 2023
592e27f
Add override mode
dxoigmn Apr 28, 2023
349c56c
Decrease score threshold in loss
dxoigmn Apr 28, 2023
8a8f73a
Normalize losses by batch size
dxoigmn Apr 28, 2023
e52c92f
Move override mode into callback
dxoigmn Apr 28, 2023
ae4d22f
style
dxoigmn Apr 28, 2023
9c5e715
bugfix
dxoigmn Apr 28, 2023
9cb5321
bugfix freeze
dxoigmn Apr 28, 2023
b6d5d65
Add alpha-aware ColorJitter
dxoigmn Apr 28, 2023
f6a174e
Batch ColorJitter
dxoigmn Apr 28, 2023
622698b
Update RandomAffine parameters
dxoigmn Apr 28, 2023
ffb6755
bugfix
dxoigmn Apr 28, 2023
6f11f2b
Apply warp only in training mode
dxoigmn Apr 28, 2023
7628f8f
Normalize individual losses
dxoigmn May 3, 2023
8c3c078
Merge branch 'freeze_callback' into shapeshifter_yolov3
dxoigmn May 5, 2023
7ebe5bc
Add image initializer
dxoigmn May 5, 2023
92d9965
Add scale parameter
dxoigmn May 5, 2023
b585684
Merge branch 'image_initializer' into shapeshifter_yolov3
dxoigmn May 5, 2023
5b64e4f
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn May 5, 2023
8d1a6f8
Merge branch 'shapeshifter' into image_initializer
dxoigmn May 5, 2023
56147ca
Merge branch 'image_initializer' into shapeshifter_yolov3
dxoigmn May 5, 2023
0d95bd1
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn May 5, 2023
1d607b2
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn May 5, 2023
cd43c24
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn May 5, 2023
abc9bcc
Merge branch 'add_load_state_dict_to_litmodular' into shapeshifter_yo…
dxoigmn May 5, 2023
2aa676f
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn May 22, 2023
eb3172d
Fix sequences
dxoigmn May 22, 2023
f42c153
fix config
dxoigmn May 23, 2023
9b6d386
fix configs
dxoigmn May 23, 2023
1a8fa04
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn May 23, 2023
5aa8c47
Fix callbacks
dxoigmn May 23, 2023
6289048
remove total variation
dxoigmn May 23, 2023
80415be
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Jun 1, 2023
4ff8b3b
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Jun 2, 2023
b861f38
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Jun 2, 2023
382cd47
Add YOLOv3 dependency
dxoigmn Jun 2, 2023
8c55b47
Merge branch 'main' into freeze_callback
dxoigmn Jun 8, 2023
99a7669
Use attrgetter
dxoigmn Jun 9, 2023
588068c
Better implementation of ModelParamsNoGrad
dxoigmn Jun 9, 2023
3832d22
Better implementation of AttackInEvalMode
dxoigmn Jun 9, 2023
a9348df
Log which params will have gradients disabled
dxoigmn Jun 9, 2023
9c955df
Remove Freeze callback
dxoigmn Jun 9, 2023
d278aba
bugfix
dxoigmn Jun 9, 2023
55a6161
comments
dxoigmn Jun 9, 2023
830e765
comments
dxoigmn Jun 9, 2023
be8ae5d
comments
dxoigmn Jun 9, 2023
04069b9
Even better AttackInEvalMode
dxoigmn Jun 9, 2023
113d483
Fix type
dxoigmn Jun 9, 2023
3dbdfd4
Even better ModelParamsNoGrad
dxoigmn Jun 9, 2023
48577ad
more lenient
dxoigmn Jun 9, 2023
54c6ab9
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Jun 9, 2023
ca7fd35
Merge branch 'freeze_callback' into shapeshifter_yolov3
dxoigmn Jun 9, 2023
cc0e057
Manually merge general_visualizer
dxoigmn Jun 9, 2023
2e9545d
Use attrgetter
dxoigmn Jun 9, 2023
10accf5
Use attrgetter
dxoigmn Jun 9, 2023
d8fe8a0
Restore image_visualizer config
dxoigmn Jun 9, 2023
a2eac45
Merge branch 'general_visualizer' into shapeshifter_yolov3
dxoigmn Jun 9, 2023
e348a8b
bugfix configs
dxoigmn Jun 9, 2023
3f86d8f
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Jun 9, 2023
353c281
ImageVisualizer consumes outputs
dxoigmn Jun 10, 2023
3d04ef7
Update example modules to run in eval mode
dxoigmn Jun 12, 2023
77c2350
Only log and run in fit stage
dxoigmn Jun 12, 2023
b5f7643
Merge branch 'freeze_callback' into shapeshifter_yolov3
dxoigmn Jun 12, 2023
fcc85d4
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Jun 15, 2023
76c88f9
Revert "ImageVisualizer consumes outputs"
dxoigmn Jun 15, 2023
3f2b8f8
Merge branch 'better_sequentialdict3' into shapeshifter_yolov3
dxoigmn Jun 15, 2023
1192acb
Set weights in forward
dxoigmn Jun 15, 2023
331f684
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Jun 22, 2023
31b41e9
Merge branch 'shapeshifter' into shapeshifter_yolov3
dxoigmn Jun 23, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 36 additions & 12 deletions mart/callbacks/eval_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,47 @@
# SPDX-License-Identifier: BSD-3-Clause
#

from __future__ import annotations

from pytorch_lightning.callbacks import Callback

from mart import utils

logger = utils.get_pylogger(__name__)

__all__ = ["AttackInEvalMode"]


class AttackInEvalMode(Callback):
"""Switch the model into eval mode during attack."""

def __init__(self):
self.training_mode_status = None

def on_train_start(self, trainer, model):
self.training_mode_status = model.training
model.train(False)

def on_train_end(self, trainer, model):
assert self.training_mode_status is not None

# Resume the previous training status of the model.
model.train(self.training_mode_status)
def __init__(self, module_classes: type | list[type]):
# FIXME: convert strings to classes using hydra.utils.get_class? This will clean up some verbosity in configuration but will require importing hydra in this callback.
if isinstance(module_classes, type):
module_classes = [module_classes]

self.module_classes = tuple(module_classes)

def setup(self, trainer, pl_module, stage):
if stage != "fit":
return

# Log to the console so the user can see visually see which modules will be in eval mode during training.
for name, module in pl_module.named_modules():
if isinstance(module, self.module_classes):
logger.info(
f"Setting eval mode for {name} ({module.__class__.__module__}.{module.__class__.__name__})"
)

def on_train_epoch_start(self, trainer, pl_module):
# We must use on_train_epoch_start because PL will set pl_module to train mode right before this callback.
# See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks
for name, module in pl_module.named_modules():
if isinstance(module, self.module_classes):
module.eval()

def on_train_epoch_end(self, trainer, pl_module):
# FIXME: Why is this necessary?
for name, module in pl_module.named_modules():
if isinstance(module, self.module_classes):
module.train()
34 changes: 28 additions & 6 deletions mart/callbacks/no_grad_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,15 @@
# SPDX-License-Identifier: BSD-3-Clause
#

from __future__ import annotations

import torch
from pytorch_lightning.callbacks import Callback

from mart import utils

logger = utils.get_pylogger(__name__)

__all__ = ["ModelParamsNoGrad"]


Expand All @@ -15,10 +22,25 @@ class ModelParamsNoGrad(Callback):
This callback should not change the result. Don't use unless an attack runs faster.
"""

def on_train_start(self, trainer, model):
for param in model.parameters():
param.requires_grad_(False)
def __init__(self, module_names: str | list[str] = None):
if isinstance(module_names, str):
module_names = [module_names]

self.module_names = module_names

def setup(self, trainer, pl_module, stage):
if stage != "fit":
return

# We use setup, and not on_train_start, so that mart.optim.OptimizerFactory can ignore parameters with no gradients.
# See: https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#hooks
for name, param in pl_module.named_parameters():
if any(name.startswith(module_name) for module_name in self.module_names):
logger.info(f"Disabling gradient for {name}")
param.requires_grad_(False)

def on_train_end(self, trainer, model):
for param in model.parameters():
param.requires_grad_(True)
def teardown(self, trainer, pl_module, stage):
for name, param in pl_module.named_parameters():
if any(name.startswith(module_name) for module_name in self.module_names):
# FIXME: Why is this necessary?
param.requires_grad_(True)
47 changes: 23 additions & 24 deletions mart/callbacks/visualizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,38 +4,37 @@
# SPDX-License-Identifier: BSD-3-Clause
#

import os
from operator import attrgetter

from pytorch_lightning.callbacks import Callback
from torchvision.transforms import ToPILImage

__all__ = ["PerturbedImageVisualizer"]
__all__ = ["ImageVisualizer"]


class PerturbedImageVisualizer(Callback):
"""Save adversarial images as files."""
class ImageVisualizer(Callback):
def __init__(self, frequency: int = 100, **tag_paths):
self.frequency = frequency
self.tag_paths = tag_paths

def __init__(self, folder):
super().__init__()
def log_image(self, trainer, tag, image):
# Add image to each logger
for logger in trainer.loggers:
# FIXME: Should we just use isinstance(logger.experiment, SummaryWriter)?
if not hasattr(logger.experiment, "add_image"):
continue

# FIXME: This should use the Trainer's logging directory.
self.folder = folder
self.convert = ToPILImage()
logger.experiment.add_image(tag, image, global_step=trainer.global_step)

if not os.path.isdir(self.folder):
os.makedirs(self.folder)
def log_images(self, trainer, pl_module):
for tag, path in self.tag_paths.items():
image = attrgetter(path)(pl_module)
self.log_image(trainer, tag, image)

def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx):
# Save input and target for on_train_end
self.input = batch["input"]
self.target = batch["target"]
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
if batch_idx % self.frequency != 0:
return

def on_train_end(self, trainer, model):
# FIXME: We should really just save this to outputs instead of recomputing adv_input
adv_input = model(input=self.input, target=self.target)
self.log_images(trainer, pl_module)

for img, tgt in zip(adv_input, self.target):
fname = tgt["file_name"]
fpath = os.path.join(self.folder, fname)
im = self.convert(img / 255)
im.save(fpath)
def on_train_end(self, trainer, pl_module):
self.log_images(trainer, pl_module)
9 changes: 9 additions & 0 deletions mart/configs/callbacks/attack_in_eval_mode.yaml
Original file line number Diff line number Diff line change
@@ -1,2 +1,11 @@
attack_in_eval_mode:
_target_: mart.callbacks.AttackInEvalMode
module_classes: ???
# - _target_: hydra.utils.get_class
# path: mart.models.LitModular
# - _target_: hydra.utils.get_class
# path: torch.nn.BatchNorm2d
# - _target_: hydra.utils.get_class
# path: torch.nn.Dropout
# - _target_: hydra.utils.get_class
# path: torch.nn.SyncBatchNorm
3 changes: 2 additions & 1 deletion mart/configs/callbacks/no_grad_mode.yaml
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
attack_in_eval_mode:
no_grad_mode:
_target_: mart.callbacks.ModelParamsNoGrad
module_names: ???
4 changes: 4 additions & 0 deletions mart/configs/callbacks/perturbation_visualizer.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
perturbation_visualizer:
_target_: mart.callbacks.ImageVisualizer
frequency: 100
perturbation: ???
2 changes: 1 addition & 1 deletion mart/configs/datamodule/coco.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
defaults:
- default.yaml
- default

train_dataset:
_target_: mart.datamodules.coco.CocoDetection
Expand Down
29 changes: 29 additions & 0 deletions mart/configs/datamodule/coco_yolov3.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
defaults:
- coco

num_workers: 1

train_dataset:
transforms:
transforms:
- _target_: torchvision.transforms.ToTensor
- _target_: mart.transforms.ConvertCocoPolysToMask
- _target_: mart.transforms.PadToSquare
fill: 0.5
- _target_: mart.transforms.Resize
size: [416, 416]
- _target_: mart.transforms.ConvertBoxesToCXCYHW
- _target_: mart.transforms.RemapLabels
- _target_: mart.transforms.PackBoxesAndLabels
num_classes: 80
- _target_: mart.transforms.ConvertInstanceSegmentationToPerturbable

val_dataset:
transforms: ${..train_dataset.transforms}

test_dataset:
transforms: ${..val_dataset.transforms}

collate_fn:
_target_: hydra.utils.get_method
path: mart.datamodules.coco.yolo_collate_fn
34 changes: 34 additions & 0 deletions mart/configs/experiment/COCO_YOLOv3.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# @package _global_

defaults:
- override /datamodule: coco_yolov3
- override /model: yolov3
- override /metric: average_precision
- override /optimization: super_convergence

task_name: "COCO_YOLOv3"
tags: ["evaluation"]

optimized_metric: "test_metrics/map"

trainer:
# 117,266 training images, 6 epochs, batch_size=16, 43,974.75
max_steps: 43975
# FIXME: "nms_kernel" not implemented for 'BFloat16', torch.ops.torchvision.nms().
precision: 32

datamodule:
num_workers: 32
ims_per_batch: 16

model:
load_state_dict:
yolov3: ${paths.data_dir}/yolov3_original.pt

# yolov3 model does not produce preds/targets in training sequence
training_metrics: null

optimizer:
lr: 0.001
momentum: 0.9
weight_decay: 0.0005
Loading