diff --git a/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py index baa7392eb4f..7468fdce01d 100755 --- a/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py +++ b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict -from turtle import Turtle from typing import Dict from typing import List from typing import Tuple diff --git a/paddlespeech/s2t/training/gradclip.py b/paddlespeech/s2t/training/gradclip.py index be6fcf5899f..06587c749b5 100644 --- a/paddlespeech/s2t/training/gradclip.py +++ b/paddlespeech/s2t/training/gradclip.py @@ -56,15 +56,15 @@ def _dygraph_clip(self, params_grads): if len(sum_square_list) == 0: return params_grads - global_norm_var = layers.concat(sum_square_list) + global_norm_var = paddle.concat(sum_square_list) global_norm_var = paddle.sum(global_norm_var) global_norm_var = paddle.sqrt(global_norm_var) # debug log logger.debug(f"Grad Global Norm: {float(global_norm_var)}!!!!") - max_global_norm = layers.fill_constant( - shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm) + max_global_norm = paddle.full( + shape=[1], dtype=global_norm_var.dtype, fill_value=self.clip_norm) clip_var = paddle.divide( x=max_global_norm, y=paddle.maximum(x=global_norm_var, y=max_global_norm))