-
Notifications
You must be signed in to change notification settings - Fork 562
Minor cleanups #2581
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Minor cleanups #2581
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -215,7 +215,7 @@ def _pre_fork_setup(num_devices): | |
| return PreForkConfig(dev_kind=dev_kind, num_devices=num_devices) | ||
|
|
||
|
|
||
| def _setup_gpu_worker(index, gindex, pf_cfg): | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. IMHO when there are symmetrical APIs (gpu, tpu, cpu) it is better to have a consistent interface, even though one instance of such APIs does not actually use an argument.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure what consistency you're referring to here. All gpu, tpu, cpu consistently don't use that argument.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right. That argument contains the configuration info parsed before the fork.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll keep it removed if that's fine by you. Thanks for the review Davide! |
||
| def _setup_gpu_worker(index, gindex): | ||
| os.environ[xenv.MP_DEVICE] = 'GPU:{}'.format( | ||
| _get_mp_device_ordinal(index, gindex)) | ||
| os.environ[xenv.LOCAL_WORKER] = '{}:{}'.format(_LOCAL_WORKER, gindex) | ||
|
|
@@ -228,7 +228,7 @@ def _setup_gpu_worker(index, gindex, pf_cfg): | |
| os.environ.pop(xenv.GPU_NUM_DEVICES, None) | ||
|
|
||
|
|
||
| def _setup_cpu_worker(index, gindex, pf_cfg): | ||
| def _setup_cpu_worker(index, gindex): | ||
| task_no = 0 | ||
| dev_index = _get_mp_device_ordinal(index, gindex) | ||
| os.environ[xenv.MP_DEVICE] = 'CPU:{}'.format(dev_index) | ||
|
|
@@ -254,7 +254,7 @@ def _wants_tpu_env_config(index, gindex): | |
| return gindex == 0 | ||
|
|
||
|
|
||
| def _setup_tpu_worker(index, gindex, pf_cfg, tpu_env_config): | ||
| def _setup_tpu_worker(index, gindex, tpu_env_config): | ||
| os.environ[xenv.MP_DEVICE] = 'TPU:{}'.format( | ||
| _get_mp_device_ordinal(index, gindex)) | ||
| if xenv.LOCAL_WORKER not in os.environ: | ||
|
|
@@ -282,8 +282,7 @@ def _prepare_env_for_index(index, pf_cfg): | |
| os.environ[xenv.LOCAL_ORDINAL] = str(index) | ||
|
|
||
| if pf_cfg.dev_kind == 'TPU': | ||
| _setup_tpu_worker(index, gindex, pf_cfg, | ||
| os.environ.get(xenv.TPU_CONFIG, None)) | ||
| _setup_tpu_worker(index, gindex, os.environ.get(xenv.TPU_CONFIG, None)) | ||
| if xenv.HOST_ORDINAL in os.environ: | ||
| # If xenv.HOST_ORDINAL is set, we are in a sea-of-devices TPU setup, where | ||
| # each host has local TPU devices, but not interconnected with the fast TPU | ||
|
|
@@ -301,9 +300,9 @@ def _prepare_env_for_index(index, pf_cfg): | |
| # to 0 in all hosts. | ||
| _setup_torch_distributed() | ||
| elif pf_cfg.dev_kind == 'GPU': | ||
| _setup_gpu_worker(index, gindex, pf_cfg) | ||
| _setup_gpu_worker(index, gindex) | ||
| elif pf_cfg.dev_kind == 'CPU': | ||
| _setup_cpu_worker(index, gindex, pf_cfg) | ||
| _setup_cpu_worker(index, gindex) | ||
| _setup_torch_distributed() | ||
| return gindex | ||
|
|
||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.