diff --git a/_typos.toml b/_typos.toml index 7ca57f18a41..48c85a9e435 100644 --- a/_typos.toml +++ b/_typos.toml @@ -36,16 +36,7 @@ setted = "setted" shoule = "shoule" similary = "similary" simplier = "simplier" -skiped = "skiped" softwares = "softwares" -specail = "specail" sperated = "sperated" -splited = "splited" splitted = "splitted" -stirng = "stirng" -stradegy = "stradegy" -subract = "subract" -sucess = "sucess" -sucessor = "sucessor" -sucessors = "sucessors" szie = "szie" diff --git a/ci_scripts/check_api_parameters.py b/ci_scripts/check_api_parameters.py index c6662f86903..c3c20945d99 100644 --- a/ci_scripts/check_api_parameters.py +++ b/ci_scripts/check_api_parameters.py @@ -184,7 +184,7 @@ def check_api_parameters(rstfiles, apiinfo): """check function's parameters same as its origin definition. TODO: - 1. All the documents of classes are skiped now. As + 1. All the documents of classes are skipped now. As (1) there ars many class methods in documents, may break the scripts. (2) parameters of Class should be checked with its `__init__` method. 2. Some COMPLICATED annotations may break the scripts. diff --git a/docs/api/gen_doc.py b/docs/api/gen_doc.py index 16722564727..0c510dd8164 100755 --- a/docs/api/gen_doc.py +++ b/docs/api/gen_doc.py @@ -262,8 +262,8 @@ def parse_module_file(mod): return logger.debug("parsing %s:%s", mod_name, src_file) if len(mod_name) >= 6 and mod_name[:6] == "paddle": - fn_splited = os.path.splitext(src_file) - if len(fn_splited) > 1 and fn_splited[1].lower() == ".py": + fn_split = os.path.splitext(src_file) + if len(fn_split) > 1 and fn_split[1].lower() == ".py": mod_ast = ast.parse(open(src_file, "r").read()) for node in mod_ast.body: short_names = [] diff --git a/docs/design/memory/memory_optimization.md b/docs/design/memory/memory_optimization.md index faefed25a9f..3ea071e1dfa 100644 --- a/docs/design/memory/memory_optimization.md +++ b/docs/design/memory/memory_optimization.md @@ -3,7 +3,7 @@ ## Problem -In a lecture from Andrew Ng, he attributes the recent sucess of AI due to a combination of these: +In a lecture from Andrew Ng, he attributes the recent success of AI due to a combination of these: - Availability of Big Data - Supercomputing power to process this Big Data over very large neural networks @@ -74,7 +74,7 @@ A simple way to perform data-flow analysis of programs is to set up dataflow equ - Flow Graph Terminology -A flow graph node has out-edges that lead to sucessor nodes, and in-edges that come from predecessor nodes. The set *pred[n]* is all the predecessors of node n, and *succ[n]* is the set of sucessors. +A flow graph node has out-edges that lead to successor nodes, and in-edges that come from predecessor nodes. The set *pred[n]* is all the predecessors of node n, and *succ[n]* is the set of successors. In former control flow graph, the out-edges of node 5 are 5 --> 6 and 5 --> 2, and *succ[5]* = {2, 6}. The in-edges of 2 are 5 --> 2 and 1 --> 2, and *pred[2]* = {1, 5}. - Uses and Defs @@ -147,7 +147,7 @@ We can transfer all the operators and variables in ProgramDesc to build a contro ```python class ControlFlowGraph: def __init__(self, Program): - self._sucessors = defaultdict(set) + self._successors = defaultdict(set) self._presucessors = defaultdict(set) self._uses = defaultdict(set) self._defs = defaultdict(set) diff --git a/docs/design/modules/python_api.md b/docs/design/modules/python_api.md index 19d4ce330b3..fffbf85968e 100644 --- a/docs/design/modules/python_api.md +++ b/docs/design/modules/python_api.md @@ -116,7 +116,7 @@ class Operator: block, # Block type, # string inputs, # dict - outputs,# dict + outputs,# dict attrs # dict ): self.desc = core.NewOpDesc(block.desc, type, inputs, outputs, attrs) diff --git a/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md b/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md index c28310924ba..ff7851210bc 100644 --- a/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md +++ b/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md @@ -242,7 +242,7 @@ | paddle2.0 之前 | pytorch | numpy | tensorflow | paddle2.0 之后 | | :------------- | :----- | :------ | :--------- | :--------------- | | elementwise_add | add | add | add | add | - | elementwise_sub | sub | subtract | subtract | subract | + | elementwise_sub | sub | subtract | subtract | subtract | | elementwise_mul | mul | multiply | multiply | multiply | | elementwise_div | div | divide | divide | divide | | elementwise_min | min | minimum | minimum | minimum | diff --git "a/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" "b/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" index b6bdcde327f..7ee28e7b285 100644 --- "a/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" +++ "b/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" @@ -105,13 +105,13 @@ else: ```python if self._nranks > 1: dist.init_parallel_env() - stradegy = fluid.dygraph.parallel.ParallelStrategy() - stradegy.nranks = ParallelEnv().nranks - stradegy.local_rank = ParallelEnv().local_rank - stradegy.trainer_endpoints = ParallelEnv().trainer_endpoints - stradegy.current_endpoint = ParallelEnv().current_endpoint + strategy = fluid.dygraph.parallel.ParallelStrategy() + strategy.nranks = ParallelEnv().nranks + strategy.local_rank = ParallelEnv().local_rank + strategy.trainer_endpoints = ParallelEnv().trainer_endpoints + strategy.current_endpoint = ParallelEnv().current_endpoint self.ddp_model = fluid.dygraph.parallel.DataParallel( - self.model.network, stradegy) + self.model.network, strategy) ``` 这样的话,分布式的环境与分布式的模型就已经在 paddle.Model 内的内部构建完成了。 diff --git a/docs/guides/advanced/auto_type_promotion_en.md b/docs/guides/advanced/auto_type_promotion_en.md index a5ec6426e20..0338fad46d0 100644 --- a/docs/guides/advanced/auto_type_promotion_en.md +++ b/docs/guides/advanced/auto_type_promotion_en.md @@ -230,7 +230,7 @@ Number | API | Tensor-to-Tensor | Tensor-to-Scalar | 31 | huber_loss | Common Rule | - | 32 | mse_loss | Common Rule | - | -There are two specail rules in this table above: +There are two special rules in this table above: - Divide Rule: For divide API, it will not return dtype smaller than float. Such as int32 / Scalar returns float32.