Skip to content

Commit

Permalink
Merge pull request #101 from jameschapman19/dev
Browse files Browse the repository at this point in the history
A few bugs introduced from before. Testing looking done.
  • Loading branch information
jameschapman19 committed Nov 17, 2021
2 parents b1318ba + 686f76a commit f773dba
Show file tree
Hide file tree
Showing 28 changed files with 674 additions and 707 deletions.
9 changes: 3 additions & 6 deletions .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ name: Python package

on:
push:
branches: [ main ]
branches: [ main, dev ]
pull_request:
branches: [ main ]
branches: [ main, dev ]

jobs:
build:
Expand Down Expand Up @@ -41,11 +41,8 @@ jobs:
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Run Test
run: pytest --cov-report=xml --cov=cca_zoo ./cca_zoo/test/
run: pytest --doctest-modules --cov-report=xml --cov=cca_zoo ./cca_zoo/test/
- name: "Upload coverage to Codecov"
uses: codecov/codecov-action@v1
with:
fail_ci_if_error: true
- name: Run doctests
run: |
pytest --doctest-modules --ignore=$MODULE_NAME/tests $MODULE_NAME
52 changes: 26 additions & 26 deletions cca_zoo/data/simulated.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,16 @@


def generate_covariance_data(
n: int,
view_features: List[int],
latent_dims: int = 1,
view_sparsity: List[Union[int, float]] = None,
correlation: Union[List[float], float] = 1,
structure: Union[str, List[str]] = None,
sigma: List[float] = None,
decay: float = 0.5,
positive=None,
random_state: Union[int, np.random.RandomState] = None,
n: int,
view_features: List[int],
latent_dims: int = 1,
view_sparsity: List[Union[int, float]] = None,
correlation: Union[List[float], float] = 1,
structure: Union[str, List[str]] = None,
sigma: List[float] = None,
decay: float = 0.5,
positive=None,
random_state: Union[int, np.random.RandomState] = None,
):
"""
Function to generate CCA dataset with defined population correlations
Expand Down Expand Up @@ -58,7 +58,7 @@ def generate_covariance_data(
covs = []
true_features = []
for view_p, sparsity, view_structure, view_positive, view_sigma in zip(
view_features, view_sparsity, structure, positive, sigma
view_features, view_sparsity, structure, positive, sigma
):
# Covariance Bit
if view_structure == "identity":
Expand Down Expand Up @@ -88,8 +88,8 @@ def generate_covariance_data(
).T
random_state.shuffle(mask)
while (
np.sum(np.unique(mask, axis=1, return_counts=True)[1] > 1) > 0
or np.sum(np.sum(mask, axis=0) == 0) > 0
np.sum(np.unique(mask, axis=1, return_counts=True)[1] > 1) > 0
or np.sum(np.sum(mask, axis=0) == 0) > 0
):
random_state.shuffle(mask)
weights = weights * mask
Expand All @@ -113,12 +113,12 @@ def generate_covariance_data(
# Cross Bit
cross += covs[i] @ A @ covs[j]
cov[
splits[i]: splits[i] + view_features[i],
splits[j]: splits[j] + view_features[j],
splits[i] : splits[i] + view_features[i],
splits[j] : splits[j] + view_features[j],
] = cross
cov[
splits[j]: splits[j] + view_features[j],
splits[i]: splits[i] + view_features[i],
splits[j] : splits[j] + view_features[j],
splits[i] : splits[i] + view_features[i],
] = cross.T

X = np.zeros((n, sum(view_features)))
Expand All @@ -133,12 +133,12 @@ def generate_covariance_data(


def generate_simple_data(
n: int,
view_features: List[int],
view_sparsity: List[int] = None,
eps: float = 0,
transform=True,
random_state=None,
n: int,
view_features: List[int],
view_sparsity: List[int] = None,
eps: float = 0,
transform=True,
random_state=None,
):
"""
Simple latent variable model to generate data with one latent factor
Expand Down Expand Up @@ -200,9 +200,9 @@ def _gaussian(x, mu, sig, dn):
:param dn:
"""
return (
np.exp(-np.power(x - mu, 2.0) / (2 * np.power(sig, 2.0)))
* dn
/ (np.sqrt(2 * np.pi) * sig)
np.exp(-np.power(x - mu, 2.0) / (2 * np.power(sig, 2.0)))
* dn
/ (np.sqrt(2 * np.pi) * sig)
)


Expand Down
4 changes: 2 additions & 2 deletions cca_zoo/data/toy.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class Split_MNIST_Dataset(Dataset):
"""

def __init__(
self, mnist_type: str = "MNIST", train: bool = True, flatten: bool = True
self, mnist_type: str = "MNIST", train: bool = True, flatten: bool = True
):
"""
Expand Down Expand Up @@ -72,7 +72,7 @@ class Noisy_MNIST_Dataset(Dataset):
"""

def __init__(
self, mnist_type: str = "MNIST", train: bool = True, flatten: bool = True
self, mnist_type: str = "MNIST", train: bool = True, flatten: bool = True
):
"""
Expand Down
62 changes: 31 additions & 31 deletions cca_zoo/deepmodels/architectures.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ def forward(self, x):

class Encoder(BaseEncoder):
def __init__(
self,
latent_dims: int,
variational: bool = False,
feature_size: int = 784,
layer_sizes: Iterable = None,
self,
latent_dims: int,
variational: bool = False,
feature_size: int = 784,
layer_sizes: Iterable = None,
):
super(Encoder, self).__init__(latent_dims, variational=variational)
if layer_sizes is None:
Expand Down Expand Up @@ -78,11 +78,11 @@ def forward(self, x):

class Decoder(BaseDecoder):
def __init__(
self,
latent_dims: int,
feature_size: int = 784,
layer_sizes: list = None,
norm_output: bool = False,
self,
latent_dims: int,
feature_size: int = 784,
layer_sizes: list = None,
norm_output: bool = False,
):
super(Decoder, self).__init__(latent_dims)
if layer_sizes is None:
Expand Down Expand Up @@ -126,14 +126,14 @@ def forward(self, x):

class CNNEncoder(BaseEncoder):
def __init__(
self,
latent_dims: int,
variational: bool = False,
feature_size: Iterable = (28, 28),
channels: list = None,
kernel_sizes: list = None,
stride: list = None,
padding: list = None,
self,
latent_dims: int,
variational: bool = False,
feature_size: Iterable = (28, 28),
channels: list = None,
kernel_sizes: list = None,
stride: list = None,
padding: list = None,
):
super(CNNEncoder, self).__init__(latent_dims, variational=variational)
if channels is None:
Expand Down Expand Up @@ -199,14 +199,14 @@ def forward(self, x):

class CNNDecoder(BaseDecoder):
def __init__(
self,
latent_dims: int,
feature_size: Iterable = (28, 28),
channels: list = None,
kernel_sizes=None,
strides=None,
paddings=None,
norm_output: bool = False,
self,
latent_dims: int,
feature_size: Iterable = (28, 28),
channels: list = None,
kernel_sizes=None,
strides=None,
paddings=None,
norm_output: bool = False,
):
super(CNNDecoder, self).__init__(latent_dims)
if channels is None:
Expand All @@ -229,7 +229,7 @@ def __init__(
# Loop backward through decoding layers in order to work out the dimensions at each layer - in particular the first
# linear layer needs to know B*current_size*current_size*channels
for l_id, (channel, kernel, stride, padding) in reversed(
list(enumerate(zip(channels, kernel_sizes, strides, paddings)))
list(enumerate(zip(channels, kernel_sizes, strides, paddings)))
):
conv_layers.append(
torch.nn.Sequential(
Expand Down Expand Up @@ -302,10 +302,10 @@ def forward(self, x):
# BrainNetCNN Network for fitting Gold-MSI on LSD dataset
class BrainNetEncoder(BaseEncoder):
def __init__(
self,
latent_dims: int,
variational: bool = False,
feature_size: Tuple[int] = (200, ...),
self,
latent_dims: int,
variational: bool = False,
feature_size: Tuple[int] = (200, ...),
):
super(BrainNetEncoder, self).__init__(latent_dims, variational=variational)
_check_feature_size(feature_size)
Expand Down
12 changes: 6 additions & 6 deletions cca_zoo/deepmodels/dcca.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ class DCCA(_DCCA_base):
"""

def __init__(
self,
latent_dims: int,
objective=objectives.MCCA,
encoders=None,
r: float = 0,
eps: float = 1e-3,
self,
latent_dims: int,
objective=objectives.MCCA,
encoders=None,
r: float = 0,
eps: float = 1e-3,
):
"""
Constructor class for DCCA
Expand Down
8 changes: 4 additions & 4 deletions cca_zoo/deepmodels/dcca_barlow_twins.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ class BarlowTwins(DCCA):
"""

def __init__(
self,
latent_dims: int,
encoders: Iterable[BaseEncoder] = [Encoder, Encoder],
lam=1,
self,
latent_dims: int,
encoders: Iterable[BaseEncoder] = [Encoder, Encoder],
lam=1,
):
"""
Constructor class for Barlow Twins
Expand Down
18 changes: 9 additions & 9 deletions cca_zoo/deepmodels/dcca_noi.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@ class DCCA_NOI(DCCA):
"""

def __init__(
self,
latent_dims: int,
N: int,
encoders=None,
r: float = 0,
rho: float = 0.2,
eps: float = 1e-9,
shared_target: bool = False,
self,
latent_dims: int,
N: int,
encoders=None,
r: float = 0,
rho: float = 0.2,
eps: float = 1e-9,
shared_target: bool = False,
):
"""
Constructor class for DCCA
Expand Down Expand Up @@ -58,7 +58,7 @@ def forward(self, *args):
z = []
# Users architecture + final linear layer
for i, (encoder, linear_layer) in enumerate(
zip(self.encoders, self.linear_layers)
zip(self.encoders, self.linear_layers)
):
z.append(linear_layer(encoder(args[i])))
return z
Expand Down
18 changes: 9 additions & 9 deletions cca_zoo/deepmodels/dcca_sdl.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ class DCCA_SDL(DCCA_NOI):
"""

def __init__(
self,
latent_dims: int,
N: int,
encoders=None,
r: float = 0,
rho: float = 0.2,
eps: float = 1e-3,
shared_target: bool = False,
lam=0.5,
self,
latent_dims: int,
N: int,
encoders=None,
r: float = 0,
rho: float = 0.2,
eps: float = 1e-3,
shared_target: bool = False,
lam=0.5,
):
"""
Constructor class for DCCA
Expand Down
16 changes: 8 additions & 8 deletions cca_zoo/deepmodels/dccae.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@ class DCCAE(_DCCA_base):
"""

def __init__(
self,
latent_dims: int,
objective=objectives.MCCA,
encoders=None,
decoders=None,
r: float = 0,
eps: float = 1e-3,
lam=0.5,
self,
latent_dims: int,
objective=objectives.MCCA,
encoders=None,
decoders=None,
r: float = 0,
eps: float = 1e-3,
lam=0.5,
):
"""
:param latent_dims: # latent dimensions
Expand Down
2 changes: 1 addition & 1 deletion cca_zoo/deepmodels/dtcca.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class DTCCA(DCCA):
"""

def __init__(
self, latent_dims: int, encoders=None, r: float = 0, eps: float = 1e-3
self, latent_dims: int, encoders=None, r: float = 0, eps: float = 1e-3
):
"""
Expand Down
10 changes: 5 additions & 5 deletions cca_zoo/deepmodels/dvcca.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@ class DVCCA(_DCCA_base):
"""

def __init__(
self,
latent_dims: int,
encoders=None,
decoders=None,
private_encoders: Iterable[BaseEncoder] = None,
self,
latent_dims: int,
encoders=None,
decoders=None,
private_encoders: Iterable[BaseEncoder] = None,
):
"""
:param latent_dims: # latent dimensions
Expand Down
4 changes: 2 additions & 2 deletions cca_zoo/deepmodels/objectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,10 +199,10 @@ def loss(self, H1, H2):

SigmaHat12 = (1.0 / (n - 1)) * H1bar.T @ H2bar
SigmaHat11 = (1 - self.r) * (
1.0 / (n - 1)
1.0 / (n - 1)
) * H1bar.T @ H1bar + self.r * torch.eye(o1, device=H1.device)
SigmaHat22 = (1 - self.r) * (
1.0 / (n - 1)
1.0 / (n - 1)
) * H2bar.T @ H2bar + self.r * torch.eye(o2, device=H2.device)

SigmaHat11RootInv = torch.linalg.inv(
Expand Down
Loading

0 comments on commit f773dba

Please sign in to comment.