Skip to content

Commit

Permalink
Merge pull request #234 from neuronets/pre-commit-ci-update-config
Browse files Browse the repository at this point in the history
[pre-commit.ci] pre-commit autoupdate
  • Loading branch information
satra committed Feb 6, 2023
2 parents 18c2473 + 46d9e20 commit affbd76
Show file tree
Hide file tree
Showing 8 changed files with 2 additions and 14 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Expand Up @@ -12,15 +12,15 @@ repos:
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/psf/black
rev: 22.12.0
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
rev: 5.11.4
rev: 5.12.0
hooks:
- id: isort
exclude: ^(nobrainer/_version\.py|versioneer\.py)$
Expand Down
1 change: 0 additions & 1 deletion nobrainer/losses.py
Expand Up @@ -218,7 +218,6 @@ def __init__(self, reduction=ReductionV2.AUTO, name="wasserstein"):


def gradient_penalty(gradients, real_pred, gp_weight=10, epsilon_weight=0.001):

gradients_squared = tf.square(gradients)
gradients_sqr_sum = tf.reduce_sum(
gradients_squared, axis=tf.range(1, tf.rank(gradients_squared))
Expand Down
3 changes: 0 additions & 3 deletions nobrainer/models/progressiveae.py
Expand Up @@ -206,7 +206,6 @@ def add_resolution(self):
self.build([images_shape, alpha_shape])

def call(self, inputs):

images, alpha = inputs

x = self.Head_Conv(images)
Expand Down Expand Up @@ -261,7 +260,6 @@ def __init__(
self.build([(None, latent_size), (1,)])

def update_res(self):

self.current_resolution += 1
self.current_width = 2**self.current_resolution

Expand Down Expand Up @@ -363,7 +361,6 @@ def add_resolution(self):
self.build([(None, self.latent_size), (1,)])

def call(self, inputs):

latents, alpha = inputs

x = self.make_Dbase(latents)
Expand Down
4 changes: 0 additions & 4 deletions nobrainer/models/progressivegan.py
Expand Up @@ -159,7 +159,6 @@ def _make_generator_block(self, nf, kernel_size=4, name=""):
return models.Sequential(block_layers, name=name)

def generator_head(self, x, y, alpha):

x = self.Upsampling()(x)
x = self.HeadConv1(x)

Expand All @@ -171,7 +170,6 @@ def generator_head(self, x, y, alpha):
return output

def add_resolution(self):

self.current_resolution += 1
self.resolution_blocks.append(self.highest_resolution_block)
self.highest_resolution_block = self._make_generator_block(
Expand Down Expand Up @@ -281,7 +279,6 @@ def _nf(self, stage):
return min(int(self.fmap_base / (2.0 ** (stage))), self.fmap_max)

def discriminator_base(self, x, y, alpha):

x = self.AveragePooling()(x)
x = self.BaseConv(x)

Expand Down Expand Up @@ -357,7 +354,6 @@ def add_resolution(self):
self.build([images_shape, alpha_shape])

def call(self, inputs):

images, alpha = inputs

# To bring to the right number of filters
Expand Down
1 change: 0 additions & 1 deletion nobrainer/prediction.py
Expand Up @@ -186,7 +186,6 @@ def predict_from_array(
progbar = tf.keras.utils.Progbar(n_batches)
progbar.update(0)
for j in range(0, n_blocks, batch_size):

this_x = features[j : j + batch_size]
s = StreamingStats()
for n in range(n_samples):
Expand Down
1 change: 0 additions & 1 deletion nobrainer/tests/prediction_test.py
Expand Up @@ -12,7 +12,6 @@


def test_predict(tmp_path):

x = np.ones((4, 4, 4))
img = nib.Nifti1Image(x, affine=np.eye(4))
path = str(tmp_path / "features.nii.gz")
Expand Down
1 change: 0 additions & 1 deletion nobrainer/training.py
Expand Up @@ -206,7 +206,6 @@ def compile(self, optimizer, loss_fn):
self.loss_fn = compile_utils.LossesContainer(loss_fn)

def train_step(self, images):

if isinstance(images, tuple):
images = images[0]

Expand Down
1 change: 0 additions & 1 deletion nobrainer/validation.py
Expand Up @@ -132,7 +132,6 @@ def validate_from_filepaths(
None
"""
for filepath in filepaths:

outputs, dice = validate_from_filepath(
filepath=filepath,
predictor=predictor,
Expand Down

0 comments on commit affbd76

Please sign in to comment.