Skip to content

Commit

Permalink
Add correct __all__ for torch.distributed and torch.cuda submodules
Browse files Browse the repository at this point in the history
[ghstack-poisoned]
  • Loading branch information
anjali411 committed Sep 27, 2022
1 parent b21a632 commit 4c1cb7f
Show file tree
Hide file tree
Showing 22 changed files with 31 additions and 152 deletions.
143 changes: 1 addition & 142 deletions test/allowlist_for_publicAPI.json
Original file line number Diff line number Diff line change
Expand Up @@ -268,20 +268,6 @@
"torch.backends": [
"contextmanager"
],
"torch.cpu.amp.autocast_mode": [
"Any"
],
"torch.cuda": [
"Any",
"Device",
"Dict",
"List",
"Optional",
"Set",
"Tuple",
"Union",
"classproperty"
],
"torch.cuda.comm": [
"broadcast",
"broadcast_coalesced",
Expand All @@ -290,12 +276,6 @@
"scatter",
"gather"
],
"torch.cuda.amp.autocast_mode": [
"Any"
],
"torch.cuda.amp.common": [
"find_spec"
],
"torch.cuda.nccl": [
"init_rank",
"is_available",
Expand Down Expand Up @@ -418,47 +398,7 @@
"torch.distributed.optim.utils": [
"Type"
],
"torch.distributed.pipeline.sync.checkpoint": [
"Checkpoint",
"Checkpointing",
"Context",
"Function",
"Recompute",
"ThreadLocal",
"checkpoint",
"enable_checkpointing",
"enable_recomputing",
"restore_rng_states",
"save_rng_states"
],
"torch.distributed.pipeline.sync.copy": [
"Context",
"Copy",
"Wait"
],
"torch.distributed.pipeline.sync.dependency": [
"Fork",
"Join",
"fork",
"join"
],
"torch.distributed.pipeline.sync.microbatch": [
"Batch",
"NoChunk",
"check",
"gather",
"scatter"
],
"torch.distributed.pipeline.sync.phony": [
"get_phony"
],
"torch.distributed.pipeline.sync.pipe": [
"BalanceError",
"PipeSequential",
"Pipeline",
"WithDevice"
],
"torch.distributed.pipeline.sync.pipeline": [
"Pipeline"
],
"torch.distributed.pipeline.sync.skip.layout": [
Expand All @@ -482,25 +422,6 @@
"current_skip_tracker",
"use_skip_tracker"
],
"torch.distributed.pipeline.sync.stream": [
"CPUStreamType",
"as_cuda",
"current_stream",
"default_stream",
"get_device",
"is_cuda",
"new_stream",
"record_stream",
"use_device",
"use_stream",
"wait_stream"
],
"torch.distributed.pipeline.sync.worker": [
"Task",
"create_workers",
"spawn_workers",
"worker"
],
"torch.distributed.remote_device": [
"Optional",
"Union"
Expand All @@ -521,69 +442,7 @@
"urlunparse"
],
"torch.distributed.rpc": [
"Any",
"Dict",
"Future",
"Generator",
"Generic",
"GenericWithOneTypeVar",
"PyRRef",
"RemoteProfilerManager",
"RpcAgent",
"RpcBackendOptions",
"Set",
"Store",
"TensorPipeAgent",
"Tuple",
"TypeVar",
"WorkerInfo",
"enable_gil_profiling",
"get_rpc_timeout",
"method",
"timedelta",
"urlparse"
],
"torch.distributed.rpc.api": [
"Any",
"Dict",
"Future",
"Generic",
"GenericWithOneTypeVar",
"PyRRef",
"PythonUDF",
"RPCExecMode",
"RemoteProfilerManager",
"Set",
"TypeVar",
"WorkerInfo",
"get_rpc_timeout",
"method"
],
"torch.distributed.rpc.backend_registry": [
"Dict",
"List",
"Set",
"Tuple"
],
"torch.distributed.rpc.constants": [
"timedelta"
],
"torch.distributed.rpc.internal": [
"Enum"
],
"torch.distributed.rpc.options": [
"DeviceType",
"Dict",
"List",
"Optional",
"Union"
],
"torch.distributions.utils": [
"Any",
"Dict",
"Number",
"is_tensor_like",
"update_wrapper"

],
"torch.fft": [
"Tensor",
Expand Down
2 changes: 2 additions & 0 deletions torch/cpu/amp/autocast_mode.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import torch
from typing import Any

__all__ = ["autocast"]

class autocast(torch.amp.autocast_mode.autocast):
r"""
See :class:`torch.autocast`.
Expand Down
2 changes: 1 addition & 1 deletion torch/cuda/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -834,7 +834,7 @@ def dtype(self):
'IntStorage', 'IntTensor',
'LongStorage', 'LongTensor',
'ShortStorage', 'ShortTensor',
'CUDAGraph', 'CudaError', 'DeferredCudaCallError', 'Device', 'Event', 'ExternalStream', 'OutOfMemoryError',
'CUDAGraph', 'CudaError', 'DeferredCudaCallError', 'Event', 'ExternalStream', 'OutOfMemoryError',
'Stream', 'StreamContext', 'amp', 'caching_allocator_alloc', 'caching_allocator_delete', 'can_device_access_peer',
'check_error', 'cudaStatus', 'cudart', 'current_blas_handle', 'current_device', 'current_stream', 'default_generators',
'default_stream', 'device', 'device_count', 'device_of', 'empty_cache', 'get_arch_list', 'get_device_capability',
Expand Down
1 change: 1 addition & 0 deletions torch/cuda/amp/autocast_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from torch._six import string_classes
from typing import Any

__all__ = ["autocast", "custom_fwd", "custom_bwd"]

class autocast(torch.amp.autocast_mode.autocast):
r"""
Expand Down
1 change: 1 addition & 0 deletions torch/cuda/amp/common.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
from importlib.util import find_spec

__all__ = ["amp_definitely_not_available"]

def amp_definitely_not_available():
return not (torch.cuda.is_available() or find_spec('torch_xla'))
1 change: 1 addition & 0 deletions torch/distributed/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import torch

__all__ = ["is_available"]

def is_available() -> bool:
"""
Expand Down
4 changes: 3 additions & 1 deletion torch/distributed/pipeline/sync/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@
from .microbatch import Batch
from .phony import get_phony

__all__ = ["is_checkpointing", "is_recomputing"]
__all__ = ["Function", "checkpoint", "Checkpointing", "ThreadLocal", "enable_checkpointing",
"enable_recomputing", "is_checkpointing", "is_recomputing", "Context", "save_rng_states",
"restore_rng_states", "Checkpoint", "Recompute"]


Tensors = Sequence[Tensor]
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/pipeline/sync/copy.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from .stream import AbstractStream, current_stream, get_device, record_stream, use_stream, wait_stream

__all__: List[str] = []
__all__: List[str] = ["Context", "Copy", "Wait"]


Tensors = Sequence[Tensor]
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/pipeline/sync/dependency.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

from .phony import get_phony

__all__: List[str] = []
__all__: List[str] = ["fork", "Fork", "join", "Join"]


def fork(input: Tensor) -> Tuple[Tensor, Tensor]:
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/pipeline/sync/microbatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from torch import Tensor
import torch.cuda.comm

__all__: List[str] = []
__all__: List[str] = ["NoChunk", "Batch", "check", "scatter", "gather"]


Tensors = Sequence[Tensor]
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/pipeline/sync/phony.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

from .stream import default_stream, use_stream

__all__: List[str] = []
__all__: List[str] = ["get_phony"]


_phonies: Dict[Tuple[torch.device, bool], Tensor] = {}
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/pipeline/sync/pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream

__all__ = ["Pipe"]
__all__ = ["Pipe", "BalanceError", "PipeSequential", "WithDevice"]


Device = Union[torch.device, int, str]
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/pipeline/sync/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers

__all__: List[str] = []
__all__: List[str] = ["Pipeline"]


Tensors = Sequence[Tensor]
Expand Down
4 changes: 3 additions & 1 deletion torch/distributed/pipeline/sync/stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@

import torch

__all__: List[str] = []
__all__: List[str] = ["CPUStreamType", "new_stream", "current_stream", "default_stream",
"use_device", "use_stream", "get_device", "wait_stream", "record_stream",
"is_cuda", "as_cuda"]


class CPUStreamType:
Expand Down
2 changes: 2 additions & 0 deletions torch/distributed/pipeline/sync/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from torch import nn
from typing import List

__all__ = ["partition_model"]

def partition_model(
module: nn.Sequential,
balance: List[int],
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/pipeline/sync/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from .microbatch import Batch
from .stream import AbstractStream, use_device, use_stream

__all__: List[str] = []
__all__: List[str] = ["Task", "worker", "create_workers", "spawn_workers"]


ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
Expand Down
1 change: 1 addition & 0 deletions torch/distributed/rpc/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import torch
import torch.distributed as dist

__all__ = ["is_available"]

logger = logging.getLogger(__name__)

Expand Down
2 changes: 2 additions & 0 deletions torch/distributed/rpc/backend_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
from . import api
from . import constants as rpc_constants

__all__ = ["backend_registered", "register_backend", "construct_rpc_backend_options", "init_backend"]

BackendValue = collections.namedtuple(
"BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"]
)
Expand Down
2 changes: 2 additions & 0 deletions torch/distributed/rpc/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,5 @@
DEFAULT_PROCESS_GROUP_TIMEOUT: timedelta = timedelta(milliseconds=2 ** 31 - 1)
# Value indicating that timeout is not set for RPC call, and the default should be used.
UNSET_RPC_TIMEOUT: float = _UNSET_RPC_TIMEOUT

__all__ = []
1 change: 1 addition & 0 deletions torch/distributed/rpc/internal.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import torch.distributed as dist
from torch._C._distributed_rpc import _get_current_rpc_agent

__all__ = ["RPCExecMode", "serialize", "deserialize"]

# Thread local tensor tables to store tensors while pickling torch.Tensor
# objects
Expand Down
1 change: 1 addition & 0 deletions torch/distributed/rpc/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

DeviceType = Union[int, str, torch.device]

__all__ = ["TensorPipeRpcBackendOptions"]

def _to_device(device: DeviceType) -> torch.device:
device = torch.device(device)
Expand Down
2 changes: 2 additions & 0 deletions torch/distributions/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@

euler_constant = 0.57721566490153286060 # Euler Mascheroni Constant

__all__ = ["broadcast_all", "logits_to_probs", "clamp_probs", "probs_to_logits", "lazy_property",
"tril_matrix_to_vec", "vec_to_tril_matrix"]

def broadcast_all(*values):
r"""
Expand Down

0 comments on commit 4c1cb7f

Please sign in to comment.