Skip to content

Commit

Permalink
improve CUDA VRAM monitoring
Browse files Browse the repository at this point in the history
extra check that device==cuda before getting VRAM stats
  • Loading branch information
lstein committed Sep 11, 2022
1 parent bfb2781 commit 839e30e
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 7 deletions.
12 changes: 7 additions & 5 deletions ldm/generate.py
Expand Up @@ -357,12 +357,14 @@ def process_image(image,seed):
print(
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
)
print(
f'>> Max VRAM used for this generation:',
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
)
if torch.cuda.is_available() and self.device.type == 'cuda':
print(
f'>> Max VRAM used for this generation:',
'%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9),
'Current VRAM utilization:'
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
)

if self.session_peakmem:
self.session_peakmem = max(
self.session_peakmem, torch.cuda.max_memory_allocated()
)
Expand Down
7 changes: 5 additions & 2 deletions scripts/dream.py
Expand Up @@ -130,8 +130,11 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
command = get_next_command(infile)
except EOFError:
done = True
break

continue
except KeyboardInterrupt:
done = True
continue

# skip empty lines
if not command.strip():
continue
Expand Down

0 comments on commit 839e30e

Please sign in to comment.