Skip to content

Commit

Permalink
new Changes
Browse files Browse the repository at this point in the history
Almost Completed!
  • Loading branch information
take2rohit committed Jan 15, 2022
1 parent 597c8d3 commit 9bb22c4
Show file tree
Hide file tree
Showing 21 changed files with 141 additions and 46 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ wandb/
protoruns/
**/__pycache__/
**/**/__pycache__/
**/**/**/__pycache__/
logs/
*.npy
centroids
Expand All @@ -17,3 +18,5 @@ model/
OfficeHomeDataset_10072016.zip
dalib/domainbed/__pycache__/*
degaa
**/tensorboard/
san/
54 changes: 54 additions & 0 deletions common/vision/datasets/Concatenate/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import bisect
import warnings

from torch.utils.data import Dataset, IterableDataset
# from torch.utils.data.dataset import ConcatDataset


class ConcatenateDataset(Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Arguments:
datasets (sequence): List of datasets to be concatenated
"""

@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r

def __init__(self, datasets) -> None:
super(ConcatenateDataset, self).__init__()
# Cannot verify that datasets is Sized
assert len(datasets) > 0, 'datasets should not be an empty iterable' # type: ignore
self.datasets = list(datasets)
for d in self.datasets:
assert not isinstance(d, IterableDataset), "ConcatenateDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)

def __len__(self):
return self.cumulative_sizes[-1]

def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx], self.datasets[dataset_idx].domain_index

@property
def cummulative_sizes(self):
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
Binary file removed dalib/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file removed dalib/domainbed/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file removed dalib/domainbed/__pycache__/datasets.cpython-37.pyc
Binary file not shown.
Binary file not shown.
Binary file removed dalib/domainbed/__pycache__/networks.cpython-37.pyc
Binary file not shown.
8 changes: 2 additions & 6 deletions dalib/domainbed/algorithms_proto.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ class Proto(nn.Module):
"""

def __init__(self, input_shape, num_classes, num_domains, hparams, use_relu=True):
# def __init__(self, hparams, use_relu=True):
super(Proto, self).__init__()

self.hparams = hparams
Expand All @@ -60,19 +61,14 @@ def __init__(self, input_shape, num_classes, num_domains, hparams, use_relu=True
# weight_decay=self.hparams['weight_decay']
# )

# initializing constants
self.nd = num_domains
self.nc = num_classes

# initializing architecture parameters
featurizer = networks.Featurizer(input_shape, self.hparams)
featurizer = networks.Featurizer(hparams["input_shape"], self.hparams)
self.ft_output_size = featurizer.n_outputs
self.proto_size = int(self.ft_output_size * 0.25)
self.feat_size = int(self.ft_output_size)

# initializing hyperparameters
self.proto_frac = hparams["proto_train_frac"]
self.epochs = hparams["n_steps"]
self.proto_epochs = hparams["n_steps_proto"]

# self.kernel_type = "gaussian"
Expand Down
2 changes: 2 additions & 0 deletions dalib/domainbed/hparams_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def _hparams(algorithm, dataset, random_state):
hparams["dataset"] = (dataset, dataset)
hparams["domains_per_iter"] = (4, 4)
hparams["data_parallel"] = (True, True)
hparams["input_shape"] = ((3, 224, 224,), (3, 224, 224,))

if dataset in RESNET_DATASETS:
hparams["lr"] = (1e-4, 10 ** random_state.uniform(-5.5, -3.5))
Expand All @@ -44,6 +45,7 @@ def _hparams(algorithm, dataset, random_state):
hparams["weight_decay"] = (1e-4, 10 ** random_state.uniform(-4, -4))

hparams["class_balanced"] = (False, False)
hparams["num_proto_extraction_points"] = (500, 500)

if algorithm in ["DANN", "CDANN"]:

Expand Down
Binary file not shown.
Binary file removed dalib/domainbed/lib/__pycache__/misc.cpython-37.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
1 change: 1 addition & 0 deletions scripts/adapt.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
CUDA_VISIBLE_DEVICES=1 python src/degaa.py --output_dir ./adapt/run1 --tensorboard --source Ar,Pr --target Cl,Rw --batch_size 32
3 changes: 3 additions & 0 deletions scripts/embeddings.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# CUDA_VISIBLE_DEVICES=0 python src/embeddings_old.py --output_dir ./protoruns/run4 --batch_size 48 --num_proto_steps 100000 --tensorboard --resume 60000

CUDA_VISIBLE_DEVICES=1 python src/embeddings_old.py --output_dir ./protoruns/run5 --batch_size 32 --hparams '{"mixup":1}' --num_proto_steps 1000000 --tensorboard --resume 400100 --checkpoint_freq 10000
8 changes: 4 additions & 4 deletions scripts/source_only.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
# python image_source_final.py --output san --gpu_id 0 --dset office --max_epoch 50 --s 0 --net vit

#office-home
python image_source_final.py --output weights --gpu_id 0 --dset office-home --max_epoch 50 --s 0 --net resnet50
python image_source_final.py --output weights --gpu_id 0 --dset office-home --max_epoch 50 --s 1 --net resnet50
python image_source_final.py --output weights --gpu_id 1 --dset office-home --max_epoch 50 --s 2 --net resnet50
python image_source_final.py --output weights --gpu_id 0 --dset office-home --max_epoch 50 --s 3 --net resnet50
python image_source_final.py --output weights --gpu_id 2 --batch_size 128 --dataset OfficeHome --max_epoch 50 --source Ar,Pr --target Cl,Rw --wandb 0
# python image_source_final.py --output weights --gpu_id 0 --dataset OfficeHome --max_epoch 50 --source "Ar,Pr" --target "Cl,Rw"
# python image_source_final.py --output weights --gpu_id 1 --dataset OfficeHome --max_epoch 50 --source "Ar,Pr" --target "Cl,Rw"
# python image_source_final.py --output weights --gpu_id 0 --dataset OfficeHome --max_epoch 50 --source "Ar,Pr" --target "Cl,Rw"

#pacs
# python image_source_final.py --output san --gpu_id 0 --dset pacs --max_epoch 50 --s 0 --net vit
Expand Down
Loading

0 comments on commit 9bb22c4

Please sign in to comment.