-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
18 changed files
with
2,646 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,140 @@ | ||
#!/bin/bash | ||
#SBATCH --job-name=m_v_mpn | ||
#SBATCH --time=12:00:00 | ||
#SBATCH --ntasks-per-node=12 | ||
#SBATCH --gres=gpu:4 | ||
#SBATCH --mem=20g | ||
|
||
eval "$(command conda 'shell.bash' 'hook' 2> /dev/null)" | ||
#module load pytorch/1.9.0-py39-cuda112 torchvision cvxpylayers | ||
conda activate convnext | ||
cd /scratch2/rah025 | ||
|
||
set -e | ||
:<<! | ||
*****************Instruction***************** | ||
Here you can easily creat a model by selecting | ||
an arbitray backbone model and global method. | ||
You can fine-tune it on your own datasets by | ||
using a pre-trained model. | ||
Modify the following settings as you wish ! | ||
********************************************* | ||
! | ||
|
||
#***************Backbone model**************** | ||
#Our code provides some mainstream architectures: | ||
#alexnet | ||
#vgg family:vgg11, vgg11_bn, vgg13, vgg13_bn, | ||
# vgg16, vgg16_bn, vgg19_bn, vgg19 | ||
#resnet family: resnet18, resnet34, resnet50, | ||
# resnet101, resnet152 | ||
#mpncovresnet: mpncovresnet50, mpncovresnet101 | ||
#inceptionv3 | ||
#You can also add your own network in src/network | ||
arch=resnet50 | ||
#********************************************* | ||
|
||
#***************global method**************** | ||
#Our code provides some global method at the end | ||
#of network: | ||
#GAvP (global average pooling), | ||
#MPNCOV (matrix power normalized cov pooling), | ||
#BCNN (bilinear pooling) | ||
#CBP (compact bilinear pooling) | ||
#... | ||
#You can also add your own method in src/representation | ||
image_representation=GAvP_CONVNEXT | ||
# short description of method | ||
description=reproduce | ||
#********************************************* | ||
|
||
#*******************Dataset******************* | ||
#Choose the dataset folder | ||
benchmark=imagenet100 | ||
datadir=dataset | ||
dataset=$datadir/$benchmark | ||
num_classes=100 | ||
#********************************************* | ||
|
||
#****************Hyper-parameters************* | ||
|
||
# Freeze the layers before a certain layer. | ||
freeze_layer=0 | ||
# Batch size | ||
batchsize=50 | ||
# The number of total epochs for training | ||
epoch=100 | ||
# The inital learning rate | ||
# dcreased by step method | ||
#lr=1.2e-6 | ||
lr=1.2e-4 | ||
lr_method=step | ||
lr_params=15\ 30 | ||
# log method | ||
# description: lr = logspace(params1, params2, #epoch) | ||
|
||
#lr_method=log | ||
#lr_params=-1.1\ -5.0 | ||
weight_decay=1e-4 | ||
classifier_factor=5 | ||
#********************************************* | ||
echo "Start finetuning!" | ||
modeldir=Results/Finetune-$benchmark-$arch-$image_representation-$description-lr$lr-bs$batchsize-448 | ||
if [ ! -d "Results" ]; then | ||
|
||
mkdir Results | ||
|
||
fi | ||
if [ ! -e $modeldir/*.pth.tar ]; then | ||
|
||
if [ ! -d "$modeldir" ]; then | ||
|
||
mkdir $modeldir | ||
|
||
fi | ||
#cp finetune_mpncovvggd16.sh $modeldir | ||
|
||
python main.py $dataset\ | ||
--benchmark $benchmark\ | ||
--pretrained\ | ||
-a $arch\ | ||
-p 10\ | ||
--epochs $epoch\ | ||
--lr $lr\ | ||
--lr-method $lr_method\ | ||
--lr-params $lr_params\ | ||
--weight-decay $weight_decay\ | ||
-j 8\ | ||
-b $batchsize\ | ||
--num-classes $num_classes\ | ||
--representation $image_representation\ | ||
--freezed-layer $freeze_layer\ | ||
--classifier-factor $classifier_factor\ | ||
--benchmark $benchmark\ | ||
--modeldir $modeldir | ||
|
||
else | ||
checkpointfile=$(ls -rt $modeldir/*.pth.tar | tail -1) | ||
|
||
python main.py $dataset\ | ||
--benchmark $benchmark\ | ||
--pretrained\ | ||
-a $arch\ | ||
-p 10\ | ||
--epochs $epoch\ | ||
--lr $lr\ | ||
--lr-method $lr_method\ | ||
--lr-params $lr_params\ | ||
--weight-decay $weight_decay\ | ||
-j 8\ | ||
-b $batchsize\ | ||
--num-classes $num_classes\ | ||
--representation $image_representation\ | ||
--freezed-layer $freeze_layer\ | ||
--modeldir $modeldir\ | ||
--classifier-factor $classifier_factor\ | ||
--benchmark $benchmark\ | ||
--resume $checkpointfile | ||
|
||
fi | ||
echo "Done!" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,138 @@ | ||
#!/bin/bash | ||
#SBATCH --job-name=gap | ||
#SBATCH --time=24:00:00 | ||
#SBATCH --ntasks-per-node=12 | ||
#SBATCH --gres=gpu:4 | ||
#SBATCH --mem=20g | ||
|
||
eval "$(command conda 'shell.bash' 'hook' 2> /dev/null)" | ||
#module load pytorch/1.9.0-py39-cuda112 torchvision cvxpylayers | ||
conda activate convnext | ||
cd /scratch2/rah025 | ||
|
||
set -e | ||
:<<! | ||
*****************Instruction***************** | ||
Here you can easily creat a model by selecting | ||
an arbitray backbone model and global method. | ||
You can fine-tune it on your own datasets by | ||
using a pre-trained model. | ||
Modify the following settings as you wish ! | ||
********************************************* | ||
! | ||
|
||
#***************Backbone model**************** | ||
#Our code provides some mainstream architectures: | ||
#alexnet | ||
#vgg family:vgg11, vgg11_bn, vgg13, vgg13_bn, | ||
# vgg16, vgg16_bn, vgg19_bn, vgg19 | ||
#resnet family: resnet18, resnet34, resnet50, | ||
# resnet101, resnet152 | ||
#mpncovresnet: mpncovresnet50, mpncovresnet101 | ||
#inceptionv3 | ||
#You can also add your own network in src/network | ||
arch=resnet50 | ||
#********************************************* | ||
|
||
#***************global method**************** | ||
#Our code provides some global method at the end | ||
#of network: | ||
#GAvP (global average pooling), | ||
#MPNCOV (matrix power normalized cov pooling), | ||
#BCNN (bilinear pooling) | ||
#CBP (compact bilinear pooling) | ||
#... | ||
#You can also add your own method in src/representation | ||
image_representation=GAvP_CONVNEXT | ||
# short description of method | ||
description=reproduce | ||
#********************************************* | ||
|
||
#*******************Dataset******************* | ||
#Choose the dataset folder | ||
benchmark=imagenet100 | ||
datadir=dataset | ||
dataset=$datadir/$benchmark | ||
num_classes=100 | ||
#********************************************* | ||
|
||
#****************Hyper-parameters************* | ||
|
||
# Freeze the layers before a certain layer. | ||
freeze_layer=0 | ||
# Batch size | ||
batchsize=50 | ||
# The number of total epochs for training | ||
epoch=100 | ||
# The inital learning rate | ||
# dcreased by step method | ||
#lr=1.2e-6 | ||
lr=0.01 | ||
lr_method=step | ||
lr_params=15\ 30\ 45 | ||
# log method | ||
# description: lr = logspace(params1, params2, #epoch) | ||
|
||
#lr_method=log | ||
#lr_params=-1.1\ -5.0 | ||
weight_decay=1e-4 | ||
classifier_factor=1 | ||
#********************************************* | ||
echo "Start finetuning!" | ||
modeldir=Results/Finetune-$benchmark-$arch-$image_representation-$description-lr$lr-bs$batchsize-scratch | ||
if [ ! -d "Results" ]; then | ||
|
||
mkdir Results | ||
|
||
fi | ||
if [ ! -e $modeldir/*.pth.tar ]; then | ||
|
||
if [ ! -d "$modeldir" ]; then | ||
|
||
mkdir $modeldir | ||
|
||
fi | ||
#cp finetune_mpncovvggd16.sh $modeldir | ||
|
||
python main.py $dataset\ | ||
--benchmark $benchmark\ | ||
-a $arch\ | ||
-p 10\ | ||
--epochs $epoch\ | ||
--lr $lr\ | ||
--lr-method $lr_method\ | ||
--lr-params $lr_params\ | ||
--weight-decay $weight_decay\ | ||
-j 8\ | ||
-b $batchsize\ | ||
--num-classes $num_classes\ | ||
--representation $image_representation\ | ||
--freezed-layer $freeze_layer\ | ||
--classifier-factor $classifier_factor\ | ||
--benchmark $benchmark\ | ||
--modeldir $modeldir | ||
|
||
else | ||
checkpointfile=$(ls -rt $modeldir/*.pth.tar | tail -1) | ||
|
||
python main.py $dataset\ | ||
--benchmark $benchmark\ | ||
-a $arch\ | ||
-p 10\ | ||
--epochs $epoch\ | ||
--lr $lr\ | ||
--lr-method $lr_method\ | ||
--lr-params $lr_params\ | ||
--weight-decay $weight_decay\ | ||
-j 8\ | ||
-b $batchsize\ | ||
--num-classes $num_classes\ | ||
--representation $image_representation\ | ||
--freezed-layer $freeze_layer\ | ||
--modeldir $modeldir\ | ||
--classifier-factor $classifier_factor\ | ||
--benchmark $benchmark\ | ||
--resume $checkpointfile | ||
|
||
fi | ||
echo "Done!" |
Oops, something went wrong.