Skip to content

Commit

Permalink
Add a command line argument to enable backend:cudaMallocAsync
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Jul 17, 2023
1 parent 3a150ba commit 1679abd
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 2 deletions.
1 change: 1 addition & 0 deletions comfy/cli_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def __call__(self, parser, namespace, values, option_string=None):
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
parser.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync.")
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")

fp_group = parser.add_mutually_exclusive_group()
Expand Down
2 changes: 1 addition & 1 deletion comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def is_nvidia():
def get_torch_device_name(device):
if hasattr(device, 'type'):
if device.type == "cuda":
return "{} {}".format(device, torch.cuda.get_device_name(device))
return "{} {} : {}".format(device, torch.cuda.get_device_name(device), torch.cuda.get_allocator_backend())
else:
return "{}".format(device.type)
else:
Expand Down
10 changes: 9 additions & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ def execute_script(script_path):
import gc

from comfy.cli_args import args
import comfy.utils

if os.name == "nt":
import logging
Expand All @@ -62,7 +61,16 @@ def execute_script(script_path):
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
print("Set cuda device to:", args.cuda_device)

if args.cuda_malloc:
env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None)
if env_var is None:
env_var = "backend:cudaMallocAsync"
else:
env_var += ",backend:cudaMallocAsync"

os.environ['PYTORCH_CUDA_ALLOC_CONF'] = env_var

import comfy.utils
import yaml

import execution
Expand Down

0 comments on commit 1679abd

Please sign in to comment.