|
39 | 39 | from tensorflow.python.ops import resource_variable_ops |
40 | 40 | from tensorflow.python.ops import state_ops |
41 | 41 | from tensorflow.python.ops import variables |
42 | | -from tensorflow.python.platform import build_info |
| 42 | +from tensorflow.python.platform import sysconfig |
43 | 43 | from tensorflow.python.platform import tf_logging as logging |
44 | 44 | from tensorflow.python.util.tf_export import keras_export |
45 | 45 |
|
@@ -650,7 +650,7 @@ def gpu_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major, |
650 | 650 | # (6 * units) |
651 | 651 | bias = array_ops.split(K.flatten(bias), 6) |
652 | 652 |
|
653 | | - if build_info.build_info['is_cuda_build']: |
| 653 | + if sysconfig.get_build_info()['is_cuda_build']: |
654 | 654 | # Note that the gate order for CuDNN is different from the canonical format. |
655 | 655 | # canonical format is [z, r, h], whereas CuDNN is [r, z, h]. The swap need |
656 | 656 | # to be done for kernel, recurrent_kernel, input_bias, recurrent_bias. |
@@ -1454,7 +1454,7 @@ def gpu_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, |
1454 | 1454 | # so that mathematically it is same as the canonical LSTM implementation. |
1455 | 1455 | full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0) |
1456 | 1456 |
|
1457 | | - if build_info.build_info['is_rocm_build']: |
| 1457 | + if sysconfig.get_build_info()['is_rocm_build']: |
1458 | 1458 | # ROCm MIOpen's weight sequence for LSTM is different from both canonical |
1459 | 1459 | # and Cudnn format |
1460 | 1460 | # MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o] |
|
0 commit comments