Skip to content

Commit

Permalink
Update on "Make C10_NODISCARD macro more portable for nvcc+clang."
Browse files Browse the repository at this point in the history
Make C10_NODISCARD macro more portable for nvcc+clang.

Fixes #13118.

Signed-off-by: Edward Z. Yang <ezyang@fb.com>

gh-metadata: pytorch pytorch 20324 gh/ezyang/125/head
  • Loading branch information
ezyang committed May 16, 2019
2 parents bfb1c69 + 09f22d1 commit 53630ff
Show file tree
Hide file tree
Showing 496 changed files with 12,959 additions and 8,230 deletions.
1 change: 0 additions & 1 deletion .circleci/cimodel/data/caffe2_build_data.py
Expand Up @@ -11,7 +11,6 @@
(Ver("gcc", "4.9"), [X("py2")]),
]),
(Ver("ubuntu", "16.04"), [
(Ver("cuda", "8.0"), [X("py2")]),
(Ver("cuda", "9.0"), [
# TODO make explicit that this is a "secret TensorRT build"
# (see https://github.com/pytorch/pytorch/pull/17323#discussion_r259446749)
Expand Down
14 changes: 11 additions & 3 deletions .circleci/cimodel/data/pytorch_build_data.py
Expand Up @@ -8,7 +8,7 @@
(None, [
X("2.7.9"),
X("2.7"),
X("3.5"),
("3.5", [("important", [X(True)])]),
X("nightly"),
]),
("gcc", [
Expand All @@ -28,7 +28,6 @@
("5", [X("3.6")]),
]),
("cuda", [
("8", [X("3.6")]),
("9", [
# Note there are magic strings here
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L21
Expand All @@ -37,7 +36,7 @@
# and
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L153
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453144)
X("2.7"),
("2.7", [("important", [X(True)])]),
X("3.6"),
]),
("9.2", [X("3.6")]),
Expand Down Expand Up @@ -136,6 +135,7 @@ def child_constructor(self):
next_nodes = {
"xla": XlaConfigNode,
"namedtensor": NamedTensorConfigNode,
"important": ImportantConfigNode,
}
return next_nodes[experimental_feature]

Expand All @@ -156,6 +156,14 @@ def init2(self, node_name):
self.props["is_namedtensor"] = node_name


class ImportantConfigNode(TreeConfigNode):
def modify_label(self, label):
return "IMPORTANT=" + str(label)

def init2(self, node_name):
self.props["is_important"] = node_name


class XenialCompilerConfigNode(TreeConfigNode):

def init2(self, node_name):
Expand Down
13 changes: 10 additions & 3 deletions .circleci/cimodel/data/pytorch_build_definitions.py
Expand Up @@ -25,7 +25,8 @@ def __init__(self,
gpu_resource=None,
dependent_tests=None,
parent_build=None,
is_namedtensor=False):
is_namedtensor=False,
is_important=False):

self.distro = distro
self.pyver = pyver
Expand All @@ -37,6 +38,7 @@ def __init__(self,
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453608)
self.is_xla = is_xla
self.is_namedtensor = is_namedtensor
self.is_important = is_important

self.restrict_phases = restrict_phases
self.gpu_resource = gpu_resource
Expand All @@ -46,7 +48,10 @@ def __init__(self,
# TODO: Eliminate the special casing for docker paths
# In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch
def get_parms(self, for_docker):
leading = ["pytorch"]
leading = []
if self.is_important and not for_docker:
leading.append("AAA")
leading.append("pytorch")
if self.is_xla and not for_docker:
leading.append("xla")
if self.is_namedtensor and not for_docker:
Expand Down Expand Up @@ -225,6 +230,7 @@ def instantiate_configs():

is_xla = fc.find_prop("is_xla") or False
is_namedtensor = fc.find_prop("is_namedtensor") or False
is_important = fc.find_prop("is_important") or False

gpu_resource = None
if cuda_version and cuda_version != "10":
Expand All @@ -239,9 +245,10 @@ def instantiate_configs():
restrict_phases,
gpu_resource,
is_namedtensor=is_namedtensor,
is_important=is_important,
)

if cuda_version == "8":
if cuda_version == "9" and python_version == "3.6":
c.dependent_tests = gen_dependent_configs(c)

config_list.append(c)
Expand Down

0 comments on commit 53630ff

Please sign in to comment.