Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test problem #6

Closed
liuyingbin123 opened this issue Feb 8, 2023 · 9 comments
Closed

test problem #6

liuyingbin123 opened this issue Feb 8, 2023 · 9 comments

Comments

@liuyingbin123
Copy link

AttributeError: Can't pickle local object 'DeepWBNet.init..'
运行test.py提示无法序列化这个类。
另外想问下你的python版本那是多少

@onpix
Copy link
Owner

onpix commented Feb 8, 2023

可以提供完整的报错信息吗?我的python版本是3.8。

@liuyingbin123
Copy link
Author

命令:HYDRA_FULL_ERROR=1 python src/test.py checkpoint_path=trained_on_ours.ckpt backend=cuda

报错信息:
Global seed set to 233
src/test.py:15: UserWarning:
The version_base parameter is not specified.
Please specify a compatability version level, or None.
Will assume defaults for version 1.1
@hydra.main(config_path='config', config_name="config")
/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/defaults_list.py:251: UserWarning: In 'config': Defaults list is missing _self_. See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/default_composition_order for more information
warnings.warn(msg, UserWarning)
/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/hydra.py:119: UserWarning: Future Hydra versions will no longer change working directory at job runtime by default.
See https://hydra.cc/docs/1.2/upgrades/1.1_to_1.2/changes_to_job_working_dir/ for more information.
ret = run_job(
Check runtime config: use "/data/lyb/code/LCDPNet/src/config/runtime/lcdpnet.default.yaml" as template.
Running config: {'aug': {'crop': False, 'downsample': [512, 512], 'h-flip': True, 'v-flip': True}, 'train_ds': {'class': 'img_dataset', 'name': 'lcdp_data.train', 'input': ['/data/lyb/data/lowlight/LCDP/data/input/'], 'GT': ['/data/lyb/data/lowlight/LCDP/data/gt/']}, 'test_ds': {'class': 'img_dataset', 'name': 'lcdp_data.test', 'input': ['/data/lyb/data/lowlight/LCDP/data/test-input/'], 'GT': ['/data/lyb/data/lowlight/LCDP/data/test-gt/']}, 'valid_ds': {'class': 'img_dataset', 'name': 'lcdp_data.valid', 'input': ['/data/lyb/data/lowlight/LCDP/data/valid-input/'], 'GT': ['/data/lyb/data/lowlight/LCDP/data/valid-gt/']}, 'runtime': {'bilateral_upsample_net': {'hist_unet': {'n_bins': 8, 'hist_as_guide': False, 'channel_nums': [8, 16, 32, 64, 128], 'encoder_use_hist': False, 'guide_feature_from_hist': True, 'region_num': 2, 'use_gray_hist': False, 'conv_type': 'drconv', 'down_ratio': 2, 'hist_conv_trainable': False, 'drconv_position': [0, 1]}, 'modelname': 'bilateral_upsample_net', 'predict_illumination': False, 'loss': {'mse': 1.0, 'cos': 0.1, 'ltv': 0.1}, 'luma_bins': 8, 'channel_multiplier': 1, 'spatial_bin': 16, 'batch_norm': True, 'low_resolution': 256, 'coeffs_type': 'matrix', 'conv_type': 'conv', 'backbone': 'hist-unet', 'illu_map_power': False}, 'hist_unet': {'n_bins': 8, 'hist_as_guide': False, 'channel_nums': False, 'encoder_use_hist': False, 'guide_feature_from_hist': False, 'region_num': 8, 'use_gray_hist': False, 'conv_type': 'drconv', 'down_ratio': 2, 'hist_conv_trainable': False, 'drconv_position': [1, 1]}, 'modelname': 'lcdpnet', 'use_wavelet': False, 'use_attn_map': False, 'use_non_local': False, 'how_to_fuse': 'cnn-weights', 'backbone': 'bilateral_upsample_net', 'conv_type': 'conv', 'backbone_out_illu': True, 'illumap_channel': 3, 'share_weights': True, 'n_bins': 8, 'hist_as_guide': False, 'loss': {'ltv': 0, 'cos': 0, 'weighted_loss': 0, 'tvloss1': 0, 'tvloss2': 0, 'tvloss1_new': 0.01, 'tvloss2_new': 0.01, 'l1_loss': 1.0, 'ssim_loss': 0, 'psnr_loss': 0, 'illumap_loss': 0, 'hist_loss': 0, 'inter_hist_loss': 0, 'vgg_loss': 0, 'cos2': 0.5}}, 'project': 'default_proj', 'name': 'default_name', 'comment': False, 'debug': False, 'val_debug_step_nums': 2, 'gpu': -1, 'backend': 'cuda', 'runtime_precision': 16, 'amp_backend': 'native', 'amp_level': 'O1', 'dataloader_num_worker': 5, 'mode': 'train', 'logger': 'tb', 'num_epoch': 1000, 'valid_every': 10, 'savemodel_every': 4, 'log_every': 100, 'batchsize': 1, 'valid_batchsize': 1, 'lr': 0.0001, 'checkpoint_path': 'trained_on_ours.ckpt', 'checkpoint_monitor': 'loss', 'resume_training': True, 'monitor_mode': 'min', 'early_stop': False, 'valid_ratio': 0.1, 'flags': {}}
ERR: import thop failed, skip. error msg:
No module named 'thop'
Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v1.9.0. To apply the upgrade to your files permanently, run python -m pytorch_lightning.utilities.upgrade_checkpoint --file trained_on_ours.ckpt
[ WARN ] Use Conv in HistGuidedDRDoubleConv[0] instead of DRconv.
[ WARN ] Use Conv in HistGuidedDRDoubleConv[0] instead of DRconv.
[ WARN ] Use Conv in HistGuidedDRDoubleConv[0] instead of DRconv.
[ WARN ] Use Conv in HistGuidedDRDoubleConv[0] instead of DRconv.
[[ WARN ]] Using HistUNet in BilateralUpsampleNet as backbone
Running initialization for BaseModel
DeepWBNet(
(illu_net): BilateralUpsampleNet(
(guide): GuideNet(
(conv1): ConvBlock(
(conv): Conv2d(3, 16, kernel_size=(1, 1), stride=(1, 1))
(activation): ReLU()
(bn): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(conv2): ConvBlock(
(conv): Conv2d(16, 1, kernel_size=(1, 1), stride=(1, 1))
(activation): Sigmoid()
)
)
(slice): SliceNode()
(coeffs): LowResHistUNet(
(maxpool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(hist_conv): Conv2d(8, 8, kernel_size=(2, 2), stride=(2, 2), bias=False)
(inc): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
(down1): Down(
(maxpool_conv): Sequential(
(0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(8, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
)
)
(down2): Down(
(maxpool_conv): Sequential(
(0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
)
)
(down3): Down(
(maxpool_conv): Sequential(
(0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
)
)
(down4): Down(
(maxpool_conv): Sequential(
(0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
)
)
(up1): Up(
(up): Upsample(scale_factor=2.0, mode=bilinear)
(conv): HistGuidedDRDoubleConv(
(conv1): Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(inter1): Sequential(
(0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
(conv2): DRConv2d(
(conv_kernel): Sequential(
(0): AdaptiveAvgPool2d(output_size=(3, 3))
(1): Conv2d(64, 4, kernel_size=(1, 1), stride=(1, 1))
(2): Sigmoid()
(3): Conv2d(4, 4096, kernel_size=(1, 1), stride=(1, 1), groups=2)
)
(conv_guide): Conv2d(24, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(corr): Correlation(xcorr_fast)
)
(inter2): Sequential(
(0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
)
)
(up2): Up(
(up): Upsample(scale_factor=2.0, mode=bilinear)
(conv): HistGuidedDRDoubleConv(
(conv1): Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(inter1): Sequential(
(0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
(conv2): DRConv2d(
(conv_kernel): Sequential(
(0): AdaptiveAvgPool2d(output_size=(3, 3))
(1): Conv2d(32, 4, kernel_size=(1, 1), stride=(1, 1))
(2): Sigmoid()
(3): Conv2d(4, 1024, kernel_size=(1, 1), stride=(1, 1), groups=2)
)
(conv_guide): Conv2d(24, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(corr): Correlation(xcorr_fast)
)
(inter2): Sequential(
(0): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
)
)
(up3): Up(
(up): Upsample(scale_factor=2.0, mode=bilinear)
(conv): HistGuidedDRDoubleConv(
(conv1): Conv2d(32, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(inter1): Sequential(
(0): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
(conv2): DRConv2d(
(conv_kernel): Sequential(
(0): AdaptiveAvgPool2d(output_size=(3, 3))
(1): Conv2d(16, 4, kernel_size=(1, 1), stride=(1, 1))
(2): Sigmoid()
(3): Conv2d(4, 256, kernel_size=(1, 1), stride=(1, 1), groups=2)
)
(conv_guide): Conv2d(24, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(corr): Correlation(xcorr_fast)
)
(inter2): Sequential(
(0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
)
)
(up4): Up(
(up): Upsample(scale_factor=2.0, mode=bilinear)
(conv): HistGuidedDRDoubleConv(
(conv1): Conv2d(16, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(inter1): Sequential(
(0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
(conv2): DRConv2d(
(conv_kernel): Sequential(
(0): AdaptiveAvgPool2d(output_size=(3, 3))
(1): Conv2d(8, 4, kernel_size=(1, 1), stride=(1, 1))
(2): Sigmoid()
(3): Conv2d(4, 128, kernel_size=(1, 1), stride=(1, 1), groups=2)
)
(conv_guide): Conv2d(24, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(corr): Correlation(xcorr_fast)
)
(inter2): Sequential(
(0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
)
)
(outc): OutConv(
(conv): Conv2d(8, 96, kernel_size=(1, 1), stride=(1, 1))
)
)
(apply_coeffs): ApplyCoeffs()
)
(out_net): Sequential(
(0): Conv2d(9, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): NONLocalBlock2D(
(g): Sequential(
(0): Conv2d(32, 16, kernel_size=(1, 1), stride=(1, 1))
(1): UpsamplingBilinear2d(size=[16, 16], mode=bilinear)
)
(W): Conv2d(16, 32, kernel_size=(1, 1), stride=(1, 1))
(theta): Conv2d(32, 16, kernel_size=(1, 1), stride=(1, 1))
(phi): Sequential(
(0): Conv2d(32, 16, kernel_size=(1, 1), stride=(1, 1))
(1): UpsamplingBilinear2d(size=[16, 16], mode=bilinear)
)
)
(5): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1))
(8): NONLocalBlock2D(
(g): Sequential(
(0): Conv2d(3, 1, kernel_size=(1, 1), stride=(1, 1))
(1): UpsamplingBilinear2d(size=[16, 16], mode=bilinear)
)
(W): Conv2d(1, 3, kernel_size=(1, 1), stride=(1, 1))
(theta): Conv2d(3, 1, kernel_size=(1, 1), stride=(1, 1))
(phi): Sequential(
(0): Conv2d(3, 1, kernel_size=(1, 1), stride=(1, 1))
(1): UpsamplingBilinear2d(size=[16, 16], mode=bilinear)
)
)
)
)
[ WARN ] Result directory "lcdpnet__trained_on_ours.ckpt@lcdp_data.test" exists. Press ENTER to overwrite or input suffix to create a new one:

New name: lcdpnet__trained_on_ours.ckpt@lcdp_data.test.
[ WARN ] Overwrite result_dir: lcdpnet__trained_on_ours.ckpt@lcdp_data.test
TEST - Result save path:
test_result/lcdpnet__trained_on_ours.ckpt@lcdp_data.test
Loading model from: trained_on_ours.ckpt
Dataset augmentation:
[ToPILImage(), Downsample([512, 512]), RandomHorizontalFlip(p=0.5), RandomVerticalFlip(p=0.5), ToTensor()]
/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/accelerator_connector.py:467: LightningDeprecationWarning: Setting Trainer(gpus=-1) is deprecated in v1.7 and will be removed in v2.0. Please use Trainer(accelerator='gpu', devices=-1) instead.
rank_zero_deprecation(
Using 16bit None Automatic Mixed Precision (AMP)
GPU available: True (cuda), used: True
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: False, using: 0 HPUs
Error executing job with overrides: ['checkpoint_path=trained_on_ours.ckpt', 'backend=cuda']
Traceback (most recent call last):
File "src/test.py", line 46, in
main()
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/main.py", line 90, in decorated_main
_run_hydra(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 394, in _run_hydra
_run_app(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 457, in _run_app
run_and_report(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 222, in run_and_report
raise ex
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 219, in run_and_report
return func()
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 458, in
lambda: hydra.run(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/hydra.py", line 132, in run
_ = ret.return_value
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/core/utils.py", line 260, in return_value
raise self._return_value
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/core/utils.py", line 186, in run_job
ret.return_value = task_function(task_cfg)
File "src/test.py", line 39, in main
trainer.test(model, datamodule)
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 785, in test
return call._call_and_handle_interrupt(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/pytorch_lightning/trainer/call.py", line 36, in _call_and_handle_interrupt
return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/pytorch_lightning/strategies/launchers/multiprocessing.py", line 113, in launch
mp.start_processes(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 189, in start_processes
process.start()
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/context.py", line 284, in _Popen
return Popen(process_obj)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 32, in init
super().init(process_obj)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/popen_fork.py", line 19, in init
self._launch(process_obj)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'cv2.TonemapReinhard' object

我把模型里面的几个lambda替换成函数后就报和之前一个issue想通的错误了

@liuyingbin123
Copy link
Author

或者老哥看下能不能提供下python、torch、还有requirements.txt里包的版本号

@onpix
Copy link
Owner

onpix commented Feb 8, 2023

我刚刚测试了一下,我的环境下不做改动直接运行是没问题的。也许是你做了改动的缘故?目前我使用的是python3.8.8,torch相关环境是:

pytorch-lightning                  1.7.6
torch                              1.12.1
torchmetrics                       0.9.2
torchvision                        0.13.1

本代码发布时我使用的版本更老一些,不过二者都能正常运行。

@onpix
Copy link
Owner

onpix commented Feb 8, 2023

若是由于修改代码导致的错误,我倾向认为是由于model代码修改后,model class和ckpt不匹配导致的。

@liuyingbin123
Copy link
Author

不是的,修改前也不能直接运行,无法序列化带lambda的类

Global seed set to 233
src/test.py:15: UserWarning:
The version_base parameter is not specified.
Please specify a compatability version level, or None.
Will assume defaults for version 1.1
@hydra.main(config_path='config', config_name="config")
/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/defaults_list.py:251: UserWarning: In 'config': Defaults list is missing _self_. See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/default_composition_order for more information
warnings.warn(msg, UserWarning)
/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/hydra.py:119: UserWarning: Future Hydra versions will no longer change working directory at job runtime by default.
See https://hydra.cc/docs/1.2/upgrades/1.1_to_1.2/changes_to_job_working_dir/ for more information.
ret = run_job(
Check runtime config: use "/data/lyb/code/LCDPNet/src/config/runtime/lcdpnet.default.yaml" as template.
Running config: {'aug': {'crop': False, 'downsample': [512, 512], 'h-flip': True, 'v-flip': True}, 'train_ds': {'class': 'img_dataset', 'name': 'lcdp_data.train', 'input': ['/data/lyb/data/lowlight/LCDP/data/input/'], 'GT': ['/data/lyb/data/lowlight/LCDP/data/gt/']}, 'test_ds': {'class': 'img_dataset', 'name': 'lcdp_data.test', 'input': ['/data/lyb/data/lowlight/LCDP/data/test-input/'], 'GT': ['/data/lyb/data/lowlight/LCDP/data/test-gt/']}, 'valid_ds': {'class': 'img_dataset', 'name': 'lcdp_data.valid', 'input': ['/data/lyb/data/lowlight/LCDP/data/valid-input/'], 'GT': ['/data/lyb/data/lowlight/LCDP/data/valid-gt/']}, 'runtime': {'bilateral_upsample_net': {'hist_unet': {'n_bins': 8, 'hist_as_guide': False, 'channel_nums': [8, 16, 32, 64, 128], 'encoder_use_hist': False, 'guide_feature_from_hist': True, 'region_num': 2, 'use_gray_hist': False, 'conv_type': 'drconv', 'down_ratio': 2, 'hist_conv_trainable': False, 'drconv_position': [0, 1]}, 'modelname': 'bilateral_upsample_net', 'predict_illumination': False, 'loss': {'mse': 1.0, 'cos': 0.1, 'ltv': 0.1}, 'luma_bins': 8, 'channel_multiplier': 1, 'spatial_bin': 16, 'batch_norm': True, 'low_resolution': 256, 'coeffs_type': 'matrix', 'conv_type': 'conv', 'backbone': 'hist-unet', 'illu_map_power': False}, 'hist_unet': {'n_bins': 8, 'hist_as_guide': False, 'channel_nums': False, 'encoder_use_hist': False, 'guide_feature_from_hist': False, 'region_num': 8, 'use_gray_hist': False, 'conv_type': 'drconv', 'down_ratio': 2, 'hist_conv_trainable': False, 'drconv_position': [1, 1]}, 'modelname': 'lcdpnet', 'use_wavelet': False, 'use_attn_map': False, 'use_non_local': False, 'how_to_fuse': 'cnn-weights', 'backbone': 'bilateral_upsample_net', 'conv_type': 'conv', 'backbone_out_illu': True, 'illumap_channel': 3, 'share_weights': True, 'n_bins': 8, 'hist_as_guide': False, 'loss': {'ltv': 0, 'cos': 0, 'weighted_loss': 0, 'tvloss1': 0, 'tvloss2': 0, 'tvloss1_new': 0.01, 'tvloss2_new': 0.01, 'l1_loss': 1.0, 'ssim_loss': 0, 'psnr_loss': 0, 'illumap_loss': 0, 'hist_loss': 0, 'inter_hist_loss': 0, 'vgg_loss': 0, 'cos2': 0.5}}, 'project': 'default_proj', 'name': 'default_name', 'comment': False, 'debug': False, 'val_debug_step_nums': 2, 'gpu': -1, 'backend': 'cuda', 'runtime_precision': 16, 'amp_backend': 'native', 'amp_level': 'O1', 'dataloader_num_worker': 5, 'mode': 'train', 'logger': 'tb', 'num_epoch': 1000, 'valid_every': 10, 'savemodel_every': 4, 'log_every': 100, 'batchsize': 1, 'valid_batchsize': 1, 'lr': 0.0001, 'checkpoint_path': 'trained_on_ours.ckpt', 'checkpoint_monitor': 'loss', 'resume_training': True, 'monitor_mode': 'min', 'early_stop': False, 'valid_ratio': 0.1, 'flags': {}}
ERR: import thop failed, skip. error msg:
No module named 'thop'
Lightning automatically upgraded your loaded checkpoint from v1.5.4 to v1.9.0. To apply the upgrade to your files permanently, run python -m pytorch_lightning.utilities.upgrade_checkpoint --file trained_on_ours.ckpt
[ WARN ] Use Conv in HistGuidedDRDoubleConv[0] instead of DRconv.
[ WARN ] Use Conv in HistGuidedDRDoubleConv[0] instead of DRconv.
[ WARN ] Use Conv in HistGuidedDRDoubleConv[0] instead of DRconv.
[ WARN ] Use Conv in HistGuidedDRDoubleConv[0] instead of DRconv.
[[ WARN ]] Using HistUNet in BilateralUpsampleNet as backbone
Running initialization for BaseModel
DeepWBNet(
(illu_net): BilateralUpsampleNet(
(guide): GuideNet(
(conv1): ConvBlock(
(conv): Conv2d(3, 16, kernel_size=(1, 1), stride=(1, 1))
(activation): ReLU()
(bn): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(conv2): ConvBlock(
(conv): Conv2d(16, 1, kernel_size=(1, 1), stride=(1, 1))
(activation): Sigmoid()
)
)
(slice): SliceNode()
(coeffs): LowResHistUNet(
(maxpool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(hist_conv): Conv2d(8, 8, kernel_size=(2, 2), stride=(2, 2), bias=False)
(inc): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
(down1): Down(
(maxpool_conv): Sequential(
(0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(8, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
)
)
(down2): Down(
(maxpool_conv): Sequential(
(0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
)
)
(down3): Down(
(maxpool_conv): Sequential(
(0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
)
)
(down4): Down(
(maxpool_conv): Sequential(
(0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): DoubleConv(
(double_conv): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(inplace=True)
)
)
)
)
(up1): Up(
(up): Upsample(scale_factor=2.0, mode=bilinear)
(conv): HistGuidedDRDoubleConv(
(conv1): Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(inter1): Sequential(
(0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
(conv2): DRConv2d(
(conv_kernel): Sequential(
(0): AdaptiveAvgPool2d(output_size=(3, 3))
(1): Conv2d(64, 4, kernel_size=(1, 1), stride=(1, 1))
(2): Sigmoid()
(3): Conv2d(4, 4096, kernel_size=(1, 1), stride=(1, 1), groups=2)
)
(conv_guide): Conv2d(24, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(corr): Correlation(xcorr_fast)
)
(inter2): Sequential(
(0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
)
)
(up2): Up(
(up): Upsample(scale_factor=2.0, mode=bilinear)
(conv): HistGuidedDRDoubleConv(
(conv1): Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(inter1): Sequential(
(0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
(conv2): DRConv2d(
(conv_kernel): Sequential(
(0): AdaptiveAvgPool2d(output_size=(3, 3))
(1): Conv2d(32, 4, kernel_size=(1, 1), stride=(1, 1))
(2): Sigmoid()
(3): Conv2d(4, 1024, kernel_size=(1, 1), stride=(1, 1), groups=2)
)
(conv_guide): Conv2d(24, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(corr): Correlation(xcorr_fast)
)
(inter2): Sequential(
(0): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
)
)
(up3): Up(
(up): Upsample(scale_factor=2.0, mode=bilinear)
(conv): HistGuidedDRDoubleConv(
(conv1): Conv2d(32, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(inter1): Sequential(
(0): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
(conv2): DRConv2d(
(conv_kernel): Sequential(
(0): AdaptiveAvgPool2d(output_size=(3, 3))
(1): Conv2d(16, 4, kernel_size=(1, 1), stride=(1, 1))
(2): Sigmoid()
(3): Conv2d(4, 256, kernel_size=(1, 1), stride=(1, 1), groups=2)
)
(conv_guide): Conv2d(24, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(corr): Correlation(xcorr_fast)
)
(inter2): Sequential(
(0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
)
)
(up4): Up(
(up): Upsample(scale_factor=2.0, mode=bilinear)
(conv): HistGuidedDRDoubleConv(
(conv1): Conv2d(16, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(inter1): Sequential(
(0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
(conv2): DRConv2d(
(conv_kernel): Sequential(
(0): AdaptiveAvgPool2d(output_size=(3, 3))
(1): Conv2d(8, 4, kernel_size=(1, 1), stride=(1, 1))
(2): Sigmoid()
(3): Conv2d(4, 128, kernel_size=(1, 1), stride=(1, 1), groups=2)
)
(conv_guide): Conv2d(24, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(corr): Correlation(xcorr_fast)
)
(inter2): Sequential(
(0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU(inplace=True)
)
)
)
(outc): OutConv(
(conv): Conv2d(8, 96, kernel_size=(1, 1), stride=(1, 1))
)
)
(apply_coeffs): ApplyCoeffs()
)
(out_net): Sequential(
(0): Conv2d(9, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): NONLocalBlock2D(
(g): Sequential(
(0): Conv2d(32, 16, kernel_size=(1, 1), stride=(1, 1))
(1): UpsamplingBilinear2d(size=[16, 16], mode=bilinear)
)
(W): Conv2d(16, 32, kernel_size=(1, 1), stride=(1, 1))
(theta): Conv2d(32, 16, kernel_size=(1, 1), stride=(1, 1))
(phi): Sequential(
(0): Conv2d(32, 16, kernel_size=(1, 1), stride=(1, 1))
(1): UpsamplingBilinear2d(size=[16, 16], mode=bilinear)
)
)
(5): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1))
(8): NONLocalBlock2D(
(g): Sequential(
(0): Conv2d(3, 1, kernel_size=(1, 1), stride=(1, 1))
(1): UpsamplingBilinear2d(size=[16, 16], mode=bilinear)
)
(W): Conv2d(1, 3, kernel_size=(1, 1), stride=(1, 1))
(theta): Conv2d(3, 1, kernel_size=(1, 1), stride=(1, 1))
(phi): Sequential(
(0): Conv2d(3, 1, kernel_size=(1, 1), stride=(1, 1))
(1): UpsamplingBilinear2d(size=[16, 16], mode=bilinear)
)
)
)
)
[ WARN ] Result directory "lcdpnet__trained_on_ours.ckpt@lcdp_data.test" exists. Press ENTER to overwrite or input suffix to create a new one:

New name: lcdpnet__trained_on_ours.ckpt@lcdp_data.test.
[ WARN ] Overwrite result_dir: lcdpnet__trained_on_ours.ckpt@lcdp_data.test
TEST - Result save path:
test_result/lcdpnet__trained_on_ours.ckpt@lcdp_data.test
Loading model from: trained_on_ours.ckpt
Dataset augmentation:
[ToPILImage(), Downsample([512, 512]), RandomHorizontalFlip(p=0.5), RandomVerticalFlip(p=0.5), ToTensor()]
/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/accelerator_connector.py:467: LightningDeprecationWarning: Setting Trainer(gpus=-1) is deprecated in v1.7 and will be removed in v2.0. Please use Trainer(accelerator='gpu', devices=-1) instead.
rank_zero_deprecation(
Using 16bit None Automatic Mixed Precision (AMP)
GPU available: True (cuda), used: True
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: False, using: 0 HPUs
Error executing job with overrides: ['checkpoint_path=trained_on_ours.ckpt', 'backend=cuda']
Traceback (most recent call last):
File "src/test.py", line 46, in
main()
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/main.py", line 90, in decorated_main
_run_hydra(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 394, in _run_hydra
_run_app(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 457, in _run_app
run_and_report(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 222, in run_and_report
raise ex
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 219, in run_and_report
return func()
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/utils.py", line 458, in
lambda: hydra.run(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/_internal/hydra.py", line 132, in run
_ = ret.return_value
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/core/utils.py", line 260, in return_value
raise self._return_value
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/hydra/core/utils.py", line 186, in run_job
ret.return_value = task_function(task_cfg)
File "src/test.py", line 39, in main
trainer.test(model, datamodule)
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 785, in test
return call._call_and_handle_interrupt(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/pytorch_lightning/trainer/call.py", line 36, in _call_and_handle_interrupt
return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/pytorch_lightning/strategies/launchers/multiprocessing.py", line 113, in launch
mp.start_processes(
File "/opt/anaconda3/envs/rvm/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 189, in start_processes
process.start()
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/context.py", line 284, in _Popen
return Popen(process_obj)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 32, in init
super().init(process_obj)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/popen_fork.py", line 19, in init
self._launch(process_obj)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/opt/anaconda3/envs/rvm/lib/python3.8/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
AttributeError: Can't pickle local object 'DeepWBNet.init..'

@liuyingbin123
Copy link
Author

建议老哥再提供一下requirements.txt中的版本号,我确保和你那边的环境是一致的

@onpix
Copy link
Owner

onpix commented Feb 8, 2023

requirements.txt已更新。顺便上传了env.yaml,可以直接使用conda安装。

@liuyingbin123
Copy link
Author

感谢

@onpix onpix closed this as completed Feb 13, 2023
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants