You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
AssertionError Traceback (most recent call last)
/Users/gaopeng/Desktop/tensorflow/project/optnet/denoising/main.py in ()
229
230 if name=='main':
--> 231 main()
global main = <function main at 0x114ba7510>
/Users/gaopeng/Desktop/tensorflow/project/optnet/denoising/main.py in main()
109 elif args.model == 'optnet':
110 if args.learnD:
--> 111 model = models.OptNet_LearnD(nFeatures, args)
model = undefined
global models.OptNet_LearnD = <class 'models.OptNet_LearnD'>
nFeatures = 100
args = Namespace(Dpenalty=0.01, batchSz=150, cuda=False, eps=0.0001, learnD=True, model='optnet', nEpoch=50, no_cuda=False, save='work/optnet.eps=0.0001.learnD.0.01', testBatchSz=100, testPct=0.1, tvInit=False)
112 else:
113 model = models.OptNet(nFeatures, args)
/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in new(cls=<class 'torch.cuda.FloatTensor'>, *args=(torch.Size([199, 199]),), **kwargs={})
275
276 def new(cls, *args, **kwargs):
--> 277 _lazy_init()
global _lazy_init = <function _lazy_init at 0x110ab5378>
278 # We need this method only for lazy init, so we can remove it
279 del _CudaBase.new
/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in _lazy_init()
87 raise RuntimeError(
88 "Cannot re-initialize CUDA in forked subprocess. " + msg)
---> 89 _check_driver()
global _check_driver = <function _check_driver at 0x110ab52f0>
90 assert torch._C._cuda_init()
91 assert torch._C._cuda_sparse_init()
/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in _check_driver()
54 def _check_driver():
55 if not hasattr(torch._C, '_cuda_isDriverSufficient'):
---> 56 raise AssertionError("Torch not compiled with CUDA enabled")
global AssertionError = undefined
57 if not torch._C._cuda_isDriverSufficient():
58 if torch._C._cuda_getDriverVersion() == 0:
AssertionError: Torch not compiled with CUDA enabled
Hi, it's great to hear you've been looking into our OptNet project and that you are exploring the code. I think I forgot to add some checks around some of the cuda() calls, like the one in the trace you sent. Please send in a PR if you add them!
AssertionError Traceback (most recent call last)
/Users/gaopeng/Desktop/tensorflow/project/optnet/denoising/main.py in ()
229
230 if name=='main':
--> 231 main()
global main = <function main at 0x114ba7510>
/Users/gaopeng/Desktop/tensorflow/project/optnet/denoising/main.py in main()
109 elif args.model == 'optnet':
110 if args.learnD:
--> 111 model = models.OptNet_LearnD(nFeatures, args)
model = undefined
global models.OptNet_LearnD = <class 'models.OptNet_LearnD'>
nFeatures = 100
args = Namespace(Dpenalty=0.01, batchSz=150, cuda=False, eps=0.0001, learnD=True, model='optnet', nEpoch=50, no_cuda=False, save='work/optnet.eps=0.0001.learnD.0.01', testBatchSz=100, testPct=0.1, tvInit=False)
112 else:
113 model = models.OptNet(nFeatures, args)
/Users/gaopeng/Desktop/tensorflow/project/optnet/denoising/models.py in init(self=OptNet_LearnD (
), nFeatures=100, args=Namespace(Dpenalty=0.01, batchSz=150, cuda=False....01', testBatchSz=100, testPct=0.1, tvInit=False))
113
114 # self.fc1 = nn.Linear(nFeatures, nHidden)
--> 115 self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())
self.M = undefined
global Variable = <class 'torch.autograd.variable.Variable'>
global torch.tril =
global torch.ones =
nHidden = 199
nHidden.cuda = undefined
116
117 Q = 1e-8*torch.eye(nHidden)
/usr/local/lib/python3.6/site-packages/torch/utils.py in cuda(self=
1 0 0 ... 0 0 0
1... 1 1
[torch.FloatTensor of size 199x199]
, device=-1, async=False)
63 else:
64 new_type = getattr(torch.cuda, self.class.name)
---> 65 return new_type(self.size()).copy(self, async)
new_type = <class 'torch.cuda.FloatTensor'>
self.size.copy = undefined
self =
1 0 0 ... 0 0 0
1 1 0 ... 0 0 0
1 1 1 ... 0 0 0
... ⋱ ...
1 1 1 ... 1 0 0
1 1 1 ... 1 1 0
1 1 1 ... 1 1 1
[torch.FloatTensor of size 199x199]
/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in new(cls=<class 'torch.cuda.FloatTensor'>, *args=(torch.Size([199, 199]),), **kwargs={})
275
276 def new(cls, *args, **kwargs):
--> 277 _lazy_init()
global _lazy_init = <function _lazy_init at 0x110ab5378>
278 # We need this method only for lazy init, so we can remove it
279 del _CudaBase.new
/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in _lazy_init()
87 raise RuntimeError(
88 "Cannot re-initialize CUDA in forked subprocess. " + msg)
---> 89 _check_driver()
global _check_driver = <function _check_driver at 0x110ab52f0>
90 assert torch._C._cuda_init()
91 assert torch._C._cuda_sparse_init()
/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in _check_driver()
54 def _check_driver():
55 if not hasattr(torch._C, '_cuda_isDriverSufficient'):
---> 56 raise AssertionError("Torch not compiled with CUDA enabled")
global AssertionError = undefined
57 if not torch._C._cuda_isDriverSufficient():
58 if torch._C._cuda_getDriverVersion() == 0:
AssertionError: Torch not compiled with CUDA enabled
The text was updated successfully, but these errors were encountered: