Skip to content

Commit

Permalink
Add __all__ to torch.{autograd, fx, cuda} submodules
Browse files Browse the repository at this point in the history
ghstack-source-id: 52ecf64e1f2ffdbb341ec30dae856e395b701fa2
Pull Request resolved: #85343
  • Loading branch information
anjali411 committed Sep 27, 2022
1 parent a03c07c commit f9eadcf
Show file tree
Hide file tree
Showing 11 changed files with 21 additions and 95 deletions.
91 changes: 0 additions & 91 deletions test/allowlist_for_publicAPI.json
Original file line number Diff line number Diff line change
Expand Up @@ -265,57 +265,6 @@
"set_grad_enabled",
"variable"
],
"torch.autograd.function": [
"Any",
"List",
"Optional",
"OrderedDict",
"with_metaclass"
],
"torch.autograd.functional": [
"List",
"Tuple"
],
"torch.autograd.graph": [
"Any",
"Callable"
],
"torch.autograd.profiler": [
"Any",
"ContextDecorator",
"DeviceType",
"Dict",
"Future",
"List",
"Optional",
"ProfilerActivity",
"ProfilerConfig",
"ProfilerState",
"kineto_available",
"warn"
],
"torch.autograd.profiler_legacy": [
"DeviceType",
"EventList",
"FunctionEvent",
"ProfilerConfig",
"ProfilerState",
"warn"
],
"torch.autograd.profiler_util": [
"DeviceType",
"Dict",
"List",
"Optional",
"Tuple",
"attrgetter",
"defaultdict",
"namedtuple"
],
"torch.autograd.variable": [
"ImperativeEngine",
"with_metaclass"
],
"torch.backends": [
"contextmanager"
],
Expand Down Expand Up @@ -353,10 +302,6 @@
"unique_id",
"version"
],
"torch.cuda.profiler": [
"check_error",
"cudart"
],
"torch.distributed": [
"AllToAllOptions",
"AllreduceCoalescedOptions",
Expand Down Expand Up @@ -704,42 +649,6 @@
"hashable",
"isvar"
],
"torch.fx.graph_module": [
"Any",
"Dict",
"Graph",
"Importer",
"List",
"Optional",
"PackageExporter",
"PackageImporter",
"Path",
"PythonCode",
"Set",
"Type",
"Union",
"compatibility"
],
"torch.fx.operator_schemas": [
"Any",
"Callable",
"Dict",
"List",
"NamedTuple",
"OpOverload",
"OpOverloadPacket",
"Optional",
"Tuple",
"cast",
"compatibility"
],
"torch.fx.passes.graph_drawer": [
"Any",
"Dict",
"TensorMetadata",
"chain",
"compatibility"
],
"torch.fx.proxy": [
"assert_fn"
],
Expand Down
3 changes: 3 additions & 0 deletions torch/autograd/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
from collections import OrderedDict
from typing import Any, List, Optional

__all__ = ["FunctionCtx", "BackwardCFunction", "FunctionMeta", "Function", "once_differentiable", "traceable",
"InplaceFunction", "NestedIOFunction"]

# Formerly known as: _ContextMethodMixin
class FunctionCtx(object):

Expand Down
2 changes: 2 additions & 0 deletions torch/autograd/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from . import forward_ad as fwAD
from torch._vmap_internals import _vmap

__all__ = ["vjp", "jvp", "jacobian", "hessian", "hvp", "vhp"]

# Utility functions

def _as_tuple_nocheck(x):
Expand Down
1 change: 1 addition & 0 deletions torch/autograd/graph.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
from typing import Callable, Any

__all__ = ["saved_tensors_hooks", "save_on_cpu"]

class saved_tensors_hooks():
"""Context-manager that sets a pair of pack / unpack hooks for saved tensors.
Expand Down
9 changes: 5 additions & 4 deletions torch/autograd/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,16 @@
)
from torch.futures import Future

__all__ = ["profile", "record_function", "emit_itt", "emit_nvtx", "load_nvprof", "EnforceUnique",
"parse_nvprof_trace", "kineto_step", "EventList", "FunctionEvent", "MemRecordsAcc"]

try:
# Available in Python >= 3.2
from contextlib import ContextDecorator
from contextlib import ContextDecorator as _ContextDecorator
except ImportError:
import functools

class ContextDecorator(object): # type: ignore[no-redef]
class _ContextDecorator(object): # type: ignore[no-redef]

def __enter__(self):
raise NotImplementedError
Expand All @@ -53,7 +55,6 @@ def wrapped(*args, **kwargs):

return wrapped


class profile(object):
"""Context manager that manages autograd profiler state and holds a summary of results.
Under the hood it just records events of functions being executed in C++ and
Expand Down Expand Up @@ -440,7 +441,7 @@ def createFunctionEventForMemoryEvents(evt):
return function_events


class record_function(ContextDecorator):
class record_function(_ContextDecorator):
"""Context manager/function decorator that adds a label to a block of
Python code (or function) when running autograd profiler. It is
useful when tracing the code profile.
Expand Down
1 change: 1 addition & 0 deletions torch/autograd/profiler_legacy.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import itertools
from warnings import warn

__all__ = ["profile"]

class profile(object):
"""DEPRECATED: use torch.profiler instead"""
Expand Down
2 changes: 2 additions & 0 deletions torch/autograd/profiler_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
import bisect
import math

__all__ = ["EventList", "FormattedTimesMixin", "Interval", "Kernel", "FunctionEvent", "FunctionEventAvg",
"StringTable", "MemRecordsAcc"]

class EventList(list):
"""A list of Events (for pretty printing)"""
Expand Down
1 change: 1 addition & 0 deletions torch/autograd/variable.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
from torch._six import with_metaclass

__all__ = ["VariableMeta", "Variable"]

class VariableMeta(type):
def __instancecheck__(cls, other):
Expand Down
1 change: 1 addition & 0 deletions torch/cuda/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import contextlib
from . import cudart, check_error

__all__ = ["init", "start", "stop", "profile"]

DEFAULT_FLAGS = [
"gpustarttimestamp",
Expand Down
2 changes: 2 additions & 0 deletions torch/fx/graph_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
import os
import warnings

__all__ = ["reduce_graph_module", "reduce_package_graph_module", "reduce_deploy_graph_module", "GraphModule"]

# Normal exec loses the source code, however we can work with
# the linecache module to recover it.
# Using _exec_with_source will add it to our local cache
Expand Down
3 changes: 3 additions & 0 deletions torch/fx/operator_schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
if TYPE_CHECKING:
from .node import Argument

__all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint",
"type_matches", "normalize_function", "normalize_module"]

@compatibility(is_backward_compatible=False)
class ArgsKwargsPair(NamedTuple):
"""
Expand Down

0 comments on commit f9eadcf

Please sign in to comment.