Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pass NVIDIA_TF32_OVERRIDE to internal (#43646) #44796

Merged
merged 1 commit into from
Aug 2, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
117 changes: 54 additions & 63 deletions python/paddle/fluid/tests/unittests/test_collective_api_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@


class TestCollectiveAPIRunnerBase(object):

def get_model(self, train_prog, startup_prog, rank, indata=None):
raise NotImplementedError(
"get model should be implemented by child class.")
Expand Down Expand Up @@ -91,6 +92,7 @@ def runtime_main(test_class, col_type):


class TestDistBase(unittest.TestCase):

def setUp(self):
self._port_set = set()
self._trainers = 2
Expand All @@ -104,6 +106,7 @@ def tearDown(self):
self.temp_dir.cleanup()

def _find_free_port(self):

def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
Expand Down Expand Up @@ -168,17 +171,15 @@ def _run_cluster(self, model_file, envs):
tr0_pipe = open(path0, "w")
tr1_pipe = open(path1, "w")
#print(tr0_cmd)
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=env0)

tr1_proc = subprocess.Popen(
tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=env1)
tr0_proc = subprocess.Popen(tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=env0)

tr1_proc = subprocess.Popen(tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=env1)

tr0_out, tr0_err = tr0_proc.communicate()
tr1_out, tr1_err = tr1_proc.communicate()
Expand Down Expand Up @@ -220,8 +221,14 @@ def check_with_place(self,
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
required_envs["GLOO_LOG_LEVEL"] = "TRACE"
tr0_out, tr1_out, pid0, pid1 = self._run_cluster(model_file,
required_envs)

if os.getenv('NVIDIA_TF32_OVERRIDE', '') is not None:
required_envs['NVIDIA_TF32_OVERRIDE'] = os.getenv(
'NVIDIA_TF32_OVERRIDE', '')

tr0_out, tr1_out, pid0, pid1 = self._run_cluster(
model_file, required_envs)

np.random.seed(pid0)
input1 = np.random.random((10, 1000))
np.random.seed(pid1)
Expand All @@ -248,36 +255,33 @@ def check_with_place(self,
elif col_type == "allreduce":
need_result = input1 + input2
self.assertTrue(
np.allclose(
tr0_out, need_result, rtol=1e-05, atol=1e-05))
np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out, need_result, rtol=1e-05, atol=1e-05))
np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05))
elif col_type == "parallel_embedding":
result_data = tr0_out[0]
np.random.seed(2020)
need_result = np.random.rand(12, 8)
for i in range(result_data.shape[0]):
for j in range(result_data.shape[1]):
data = result_data[i][j]
assert np.allclose(
tr0_out[1][i][j], need_result[data], atol=1e-08)
assert np.allclose(tr0_out[1][i][j],
need_result[data],
atol=1e-08)
elif col_type == "row_parallel_linear":
result_data = tr0_out[0]
np.random.seed(2020)
weight = np.random.rand(1000, 16)
need_result = np.matmul(input1, weight)
self.assertTrue(
np.allclose(
result_data, need_result, rtol=1e-05, atol=1e-05))
np.allclose(result_data, need_result, rtol=1e-05, atol=1e-05))
elif col_type == "column_parallel_linear":
result_data = tr0_out[0]
np.random.seed(2020)
weight = np.random.rand(1000, 16)
need_result = np.matmul(input1, weight)
self.assertTrue(
np.allclose(
result_data, need_result, rtol=1e-05, atol=1e-05))
np.allclose(result_data, need_result, rtol=1e-05, atol=1e-05))
elif col_type == "alltoall":
need_result1 = np.vstack((input1[0:input1.shape[0] // 2, :],
input2[0:input2.shape[0] // 2, :]))
Expand All @@ -286,16 +290,13 @@ def check_with_place(self,
tr0_out = np.vstack(tr0_out)
tr1_out = np.vstack(tr1_out)
self.assertTrue(
np.allclose(
tr0_out, need_result1, rtol=1e-05, atol=1e-05))
np.allclose(tr0_out, need_result1, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out, need_result2, rtol=1e-05, atol=1e-05))
np.allclose(tr1_out, need_result2, rtol=1e-05, atol=1e-05))
elif col_type == "sendrecv":
result_data = tr1_out[0]
self.assertTrue(
np.allclose(
input1, result_data, rtol=1e-05, atol=1e-05))
np.allclose(input1, result_data, rtol=1e-05, atol=1e-05))
elif col_type == "global_gather":
in_feat = 2
n_expert = 2
Expand Down Expand Up @@ -372,15 +373,13 @@ def check_with_place(self,
if result1 == []:
output1 = np.array([])
else:
output1 = np.concatenate(
result1, axis=0).reshape(
sum(local_expert_count1), in_feat)
output1 = np.concatenate(result1, axis=0).reshape(
sum(local_expert_count1), in_feat)
if result2 == []:
output2 = np.array([])
else:
output2 = np.concatenate(
result2, axis=0).reshape(
sum(local_expert_count2), in_feat)
output2 = np.concatenate(result2, axis=0).reshape(
sum(local_expert_count2), in_feat)

if tr0_out[0] is None or tr0_out[0].shape[0] == 0:
tr0_out[0] = np.array([])
Expand All @@ -389,24 +388,20 @@ def check_with_place(self,
tr1_out[0] = np.array([])

self.assertTrue(
np.allclose(
tr0_out[0], output1, rtol=1e-05, atol=1e-05))
np.allclose(tr0_out[0], output1, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out[0], output2, rtol=1e-05, atol=1e-05))
np.allclose(tr1_out[0], output2, rtol=1e-05, atol=1e-05))
if static_mode == 0:
self.assertTrue(
np.allclose(
tr0_out[1],
2 * local_input_buf1,
rtol=1e-05,
atol=1e-05))
np.allclose(tr0_out[1],
2 * local_input_buf1,
rtol=1e-05,
atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out[1],
2 * local_input_buf2,
rtol=1e-05,
atol=1e-05))
np.allclose(tr1_out[1],
2 * local_input_buf2,
rtol=1e-05,
atol=1e-05))

elif col_type == "global_scatter":
np.random.seed(pid0)
Expand Down Expand Up @@ -460,23 +455,19 @@ def check_with_place(self,
tr1_out[0] = np.array([])

self.assertTrue(
np.allclose(
tr0_out[0], output1, rtol=1e-05, atol=1e-05))
np.allclose(tr0_out[0], output1, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out[0], output2, rtol=1e-05, atol=1e-05))
np.allclose(tr1_out[0], output2, rtol=1e-05, atol=1e-05))
if static_mode == 0:
self.assertTrue(
np.allclose(
tr0_out[1],
2 * local_input_buf1,
rtol=1e-05,
atol=1e-05))
np.allclose(tr0_out[1],
2 * local_input_buf1,
rtol=1e-05,
atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out[1],
2 * local_input_buf2,
rtol=1e-05,
atol=1e-05))
np.allclose(tr1_out[1],
2 * local_input_buf2,
rtol=1e-05,
atol=1e-05))
else:
pass