Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

how to run the cpu version #2

Closed
gaopeng-eugene opened this issue Apr 1, 2017 · 1 comment
Closed

how to run the cpu version #2

gaopeng-eugene opened this issue Apr 1, 2017 · 1 comment

Comments

@gaopeng-eugene
Copy link

/usr/local/lib/python3.6/site-packages/IPython/core/ultratb.py(1268)call()
-> self.handler((etype, evalue, etb))
(Pdb)


AssertionError Traceback (most recent call last)
/Users/gaopeng/Desktop/tensorflow/project/optnet/denoising/main.py in ()
229
230 if name=='main':
--> 231 main()
global main = <function main at 0x114ba7510>

/Users/gaopeng/Desktop/tensorflow/project/optnet/denoising/main.py in main()
109 elif args.model == 'optnet':
110 if args.learnD:
--> 111 model = models.OptNet_LearnD(nFeatures, args)
model = undefined
global models.OptNet_LearnD = <class 'models.OptNet_LearnD'>
nFeatures = 100
args = Namespace(Dpenalty=0.01, batchSz=150, cuda=False, eps=0.0001, learnD=True, model='optnet', nEpoch=50, no_cuda=False, save='work/optnet.eps=0.0001.learnD.0.01', testBatchSz=100, testPct=0.1, tvInit=False)
112 else:
113 model = models.OptNet(nFeatures, args)

/Users/gaopeng/Desktop/tensorflow/project/optnet/denoising/models.py in init(self=OptNet_LearnD (
), nFeatures=100, args=Namespace(Dpenalty=0.01, batchSz=150, cuda=False....01', testBatchSz=100, testPct=0.1, tvInit=False))
113
114 # self.fc1 = nn.Linear(nFeatures, nHidden)
--> 115 self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())
self.M = undefined
global Variable = <class 'torch.autograd.variable.Variable'>
global torch.tril =
global torch.ones =
nHidden = 199
nHidden.cuda = undefined
116
117 Q = 1e-8*torch.eye(nHidden)

/usr/local/lib/python3.6/site-packages/torch/utils.py in cuda(self=
1 0 0 ... 0 0 0
1... 1 1
[torch.FloatTensor of size 199x199]
, device=-1, async=False)
63 else:
64 new_type = getattr(torch.cuda, self.class.name)
---> 65 return new_type(self.size()).copy
(self, async)
new_type = <class 'torch.cuda.FloatTensor'>
self.size.copy
= undefined
self =
1 0 0 ... 0 0 0
1 1 0 ... 0 0 0
1 1 1 ... 0 0 0
... ⋱ ...
1 1 1 ... 1 0 0
1 1 1 ... 1 1 0
1 1 1 ... 1 1 1
[torch.FloatTensor of size 199x199]

    async = False
 66 
 67 

/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in new(cls=<class 'torch.cuda.FloatTensor'>, *args=(torch.Size([199, 199]),), **kwargs={})
275
276 def new(cls, *args, **kwargs):
--> 277 _lazy_init()
global _lazy_init = <function _lazy_init at 0x110ab5378>
278 # We need this method only for lazy init, so we can remove it
279 del _CudaBase.new

/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in _lazy_init()
87 raise RuntimeError(
88 "Cannot re-initialize CUDA in forked subprocess. " + msg)
---> 89 _check_driver()
global _check_driver = <function _check_driver at 0x110ab52f0>
90 assert torch._C._cuda_init()
91 assert torch._C._cuda_sparse_init()

/usr/local/lib/python3.6/site-packages/torch/cuda/init.py in _check_driver()
54 def _check_driver():
55 if not hasattr(torch._C, '_cuda_isDriverSufficient'):
---> 56 raise AssertionError("Torch not compiled with CUDA enabled")
global AssertionError = undefined
57 if not torch._C._cuda_isDriverSufficient():
58 if torch._C._cuda_getDriverVersion() == 0:

AssertionError: Torch not compiled with CUDA enabled

/usr/local/lib/python3.6/site-packages/IPython/core/ultratb.py(1269)call()
-> try:

@bamos
Copy link
Member

bamos commented Apr 1, 2017

Hi, it's great to hear you've been looking into our OptNet project and that you are exploring the code. I think I forgot to add some checks around some of the cuda() calls, like the one in the trace you sent. Please send in a PR if you add them!

-Brandon.

@bamos bamos closed this as completed Apr 1, 2017
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants