Skip to content

Commit

Permalink
Merge pull request #35 from aimagelab/dev
Browse files Browse the repository at this point in the history
Big merge from dev
  • Loading branch information
loribonna authored Mar 14, 2024
2 parents ad4d390 + d8de400 commit 9efb344
Show file tree
Hide file tree
Showing 183 changed files with 75,458 additions and 1,994 deletions.
19 changes: 19 additions & 0 deletions .github/workflows/autopep8.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# This action works with pull requests and pushes
name: Continuous Integration

on:
pull_request:
push:
branches:
- master

jobs:
build:
runs-on: ubuntu-latest

steps:
- name: autopep8
id: autopep8
uses: peter-evans/autopep8@v2
with:
args: --recursive --in-place --aggressive --max-line-length=200 --ignore=E402 .
36 changes: 36 additions & 0 deletions .github/workflows/documentation.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
name: documentation

on:
pull_request:
branches:
- release/wiki
push:
branches:
- release/wiki

permissions:
contents: write

jobs:
docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Install dependencies
run: |
pip install -r docs/requirements.txt -r requirements.txt
pip install quadprog==0.1.11
- name: Sphinx build
run: |
sphinx-build -j auto docs _build
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/release/wiki' }}
with:
publish_branch: gh-pages
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: _build
force_orphan: true
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
*.gif
*.pdf
data/
data
checkpoints/
*.png
*.pt
Expand Down Expand Up @@ -90,3 +91,8 @@ dmypy.json
.pytype/
cython_debug/
wandb
logs
**/_build/
_autosummary
generated
val_permutations
173 changes: 110 additions & 63 deletions README.md

Large diffs are not rendered by default.

60 changes: 8 additions & 52 deletions backbone/EfficientNet.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,7 @@ def round_filters(filters, global_params):
multiplier = global_params.width_coefficient
if not multiplier:
return filters
# TODO: modify the params names.
# maybe the names (width_divisor,min_width)
# are more suitable than (depth_divisor,min_depth).

divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
Expand Down Expand Up @@ -246,13 +244,6 @@ def get_same_padding_conv2d(image_size=None):
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)

# Parameters for the entire model (stem, all blocks, and head)
# GlobalParams = collections.namedtuple('GlobalParams', [
# 'width_coefficient', 'depth_coefficient', 'image_size', 'dropout_rate',
# 'num_classes', 'batch_norm_momentum', 'batch_norm_epsilon',
# 'drop_connect_rate', 'depth_divisor', 'min_depth', 'include_top'])


GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'data_format',
'num_classes', 'width_coefficient', 'depth_coefficient', 'depth_divisor',
Expand All @@ -261,11 +252,6 @@ def get_same_padding_conv2d(image_size=None):
'blocks_args', 'image_size', 'drop_connect_rate', 'include_top'
])

# Parameters for an individual model block
# BlockArgs = collections.namedtuple('BlockArgs', [
# 'num_repeat', 'kernel_size', 'stride', 'expand_ratio',
# 'input_filters', 'output_filters', 'se_ratio', 'id_skip'])

BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'strides', 'se_ratio', 'conv_type', 'fused_conv',
Expand Down Expand Up @@ -965,47 +951,17 @@ def _change_in_channels(self, in_channels):
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)

# TODO se va piallare
# def get_params(self, discard_classifier=False) -> torch.Tensor:
# """
# Returns all the parameters concatenated in a single tensor.
# :return: parameters tensor (??)
# """
# params = []
# for pp in list(self.parameters() if not discard_classifier else self._features.parameters()):
# params.append(pp.view(-1))
# return torch.cat(params)

# def set_params(self, new_params: torch.Tensor) -> None:
# """
# Sets the parameters to a given value.
# :param new_params: concatenated values to be set (??)
# """
# assert new_params.size() == self.get_params().size()
# progress = 0
# for pp in list(self.parameters()):
# cand_params = new_params[progress: progress +
# torch.tensor(pp.size()).prod()].view(pp.size())
# progress += torch.tensor(pp.size()).prod()
# pp.data = cand_params

# def get_grads(self, discard_classifier=False) -> torch.Tensor:
# """
# Returns all the gradients concatenated in a single tensor.
# :return: gradients tensor (??)
# """
# grads = []
# for pp in list(self.parameters() if not discard_classifier else self._features.parameters()):
# grads.append(pp.grad.view(-1))
# return torch.cat(grads)


def mammoth_efficientnet(nclasses: int, model_name: str, pretrained=False):
"""
Instantiates a ResNet18 network.
:param nclasses: number of output classes
:param nf: number of filters
:return: ResNet network
Args:
nclasses: number of output classes
nf: number of filters
Returns:
ResNet network
"""
print(model_name)
if not pretrained:
Expand Down
19 changes: 15 additions & 4 deletions backbone/MNISTMLP.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@ class MNISTMLP(MammothBackbone):
def __init__(self, input_size: int, output_size: int) -> None:
"""
Instantiates the layers of the network.
:param input_size: the size of the input data
:param output_size: the size of the output
Args:
input_size: the size of the input data
output_size: the size of the output
"""
super(MNISTMLP, self).__init__()

Expand Down Expand Up @@ -48,8 +50,12 @@ def reset_parameters(self) -> None:
def forward(self, x: torch.Tensor, returnt='out') -> torch.Tensor:
"""
Compute a forward pass.
:param x: input tensor (batch_size, input_size)
:return: output tensor (output_size)
Args:
x: input tensor (batch_size, input_size)
Returns:
output tensor (output_size)
"""
x = x.view(-1, num_flat_features(x))

Expand All @@ -66,3 +72,8 @@ def forward(self, x: torch.Tensor, returnt='out') -> torch.Tensor:
return (out, feats)

raise NotImplementedError("Unknown return type")

def to(self, device):
super().to(device)
self.device = device
return self
16 changes: 11 additions & 5 deletions backbone/MNISTMLP_PNN.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,11 @@ def __init__(self, input_size: int, output_size: int,
old_cols: List[AlphaModule] = None) -> None:
"""
Instantiates the layers of the network.
:param input_size: the size of the input data
:param output_size: the size of the output
:param old_cols: a list of all the old columns
Args:
input_size: the size of the input data
output_size: the size of the output
old_cols: a list of all the old columns
"""
super(MNISTMLP_PNN, self).__init__()

Expand Down Expand Up @@ -84,8 +86,12 @@ def reset_parameters(self) -> None:
def forward(self, x: torch.Tensor, returnt='out') -> torch.Tensor:
"""
Compute a forward pass.
:param x: input tensor (batch_size, input_size)
:return: output tensor (output_size)
Args:
x: input tensor (batch_size, input_size)
Retruns:
output tensor (output_size)
"""
x = x.view(-1, num_flat_features(x))
if len(self.old_cols) > 0:
Expand Down
Loading

0 comments on commit 9efb344

Please sign in to comment.