From 60d4798e2099775d61f399756912031a8224800d Mon Sep 17 00:00:00 2001 From: root Date: Tue, 4 Nov 2025 05:38:02 +0000 Subject: [PATCH] fix E-List --- _typos.toml | 12 ------------ ci_scripts/check_api_docs_en.py | 2 +- ci_scripts/check_api_docs_en.sh | 2 +- docs/design/dist_train/README.md | 2 +- docs/design/memory/memory_optimization.md | 2 +- docs/design/mkldnn/inplace/inplace.md | 2 +- docs/design/phi/kernel_migrate_cn.md | 2 +- docs/design/phi/kernel_migrate_en.md | 2 +- .../custom_device_docs/custom_device_example_en.md | 2 +- .../06_distributed_training/model_parallel_cn.rst | 2 +- docs/guides/advanced/customize_cn.ipynb | 4 ++-- docs/guides/advanced/layer_and_model_en.md | 2 +- .../torch.nn.functional.avg_pool1d.md | 2 +- .../torch.nn.functional.avg_pool2d.md | 2 +- .../torch.nn.functional.avg_pool3d.md | 2 +- .../convert_from_pytorch/nlp_fast_explore_cn.md | 2 +- docs/practices/nlp/seq2seq_with_attention.ipynb | 12 ++++++------ docs/practices/quick_start/high_level_api.ipynb | 4 ++-- 18 files changed, 24 insertions(+), 36 deletions(-) diff --git a/_typos.toml b/_typos.toml index 92ea00cff15..d5a7d483e93 100644 --- a/_typos.toml +++ b/_typos.toml @@ -26,9 +26,6 @@ Nervana = "Nervana" # These words need to be fixed Creenshot = "Creenshot" -Embeddding = "Embeddding" -Embeding = "Embeding" -Engish = "Engish" Learing = "Learing" Moible = "Moible" Operaton = "Operaton" @@ -63,15 +60,6 @@ dimention = "dimention" dimentions = "dimentions" dirrectories = "dirrectories" disucssion = "disucssion" -egde = "egde" -enviornment = "enviornment" -erros = "erros" -evalute = "evalute" -exampels = "exampels" -exection = "exection" -exlusive = "exlusive" -exmaple = "exmaple" -exsits = "exsits" feeded = "feeded" flaot = "flaot" fliters = "fliters" diff --git a/ci_scripts/check_api_docs_en.py b/ci_scripts/check_api_docs_en.py index ed7bb411b4d..468a495f68f 100644 --- a/ci_scripts/check_api_docs_en.py +++ b/ci_scripts/check_api_docs_en.py @@ -124,6 +124,6 @@ def check_system_message_in_doc(doc_file): if error_files: print("error files: ", error_files) print( - "ERROR: these docs exsits System Message: WARNING/ERROR, please check and fix them" + "ERROR: these docs exists System Message: WARNING/ERROR, please check and fix them" ) sys.exit(1) diff --git a/ci_scripts/check_api_docs_en.sh b/ci_scripts/check_api_docs_en.sh index a0e161c3825..e1578177344 100644 --- a/ci_scripts/check_api_docs_en.sh +++ b/ci_scripts/check_api_docs_en.sh @@ -13,7 +13,7 @@ function check_system_message(){ fi } -echo "RUN Engish API Docs Checks" +echo "RUN English API Docs Checks" jsonfn=$1 output_path=$2 need_check_api_py_files="${3}" diff --git a/docs/design/dist_train/README.md b/docs/design/dist_train/README.md index fe1283e51d5..0392fe87de0 100644 --- a/docs/design/dist_train/README.md +++ b/docs/design/dist_train/README.md @@ -48,7 +48,7 @@ The training process of asynchronous training can be: 2. Trainer gets all parameters back from pserver. ### Note: -There are also some conditions that need to consider. For exmaple: +There are also some conditions that need to consider. For example: 1. If trainer needs to wait for the pserver to apply it's gradient and then get back the parameters back. 1. If we need a lock between parameter update and parameter fetch. diff --git a/docs/design/memory/memory_optimization.md b/docs/design/memory/memory_optimization.md index fee0f493b6f..0526e4bc84c 100644 --- a/docs/design/memory/memory_optimization.md +++ b/docs/design/memory/memory_optimization.md @@ -60,7 +60,7 @@ We can leran these techniques from compilers. There are mainly two stages to mak #### Control Flow Graph -To perform analysis on a program, it is often useful to make a control flow graph. A [control flow graph](https://en.wikipedia.org/wiki/Control_flow_graph) (CFG) in computer science is a representation, using graph notation, of all paths that might be traversed through a program during its execution. Each statement in the program is a node in the flow graph; if statemment x can be followed by statement y, there is an egde from x to y. +To perform analysis on a program, it is often useful to make a control flow graph. A [control flow graph](https://en.wikipedia.org/wiki/Control_flow_graph) (CFG) in computer science is a representation, using graph notation, of all paths that might be traversed through a program during its execution. Each statement in the program is a node in the flow graph; if statemment x can be followed by statement y, there is an edge from x to y. Following is the flow graph for a simple loop. diff --git a/docs/design/mkldnn/inplace/inplace.md b/docs/design/mkldnn/inplace/inplace.md index 5e4f0ae7669..10967c26de4 100644 --- a/docs/design/mkldnn/inplace/inplace.md +++ b/docs/design/mkldnn/inplace/inplace.md @@ -94,4 +94,4 @@ replace this original name in all of next op instances. \* oneDNN gelu kernel is able to perform in-place execution, but currently gelu op does not support in-place execution. -\*\* sum kernel is using oneDNN sum primitive that does not provide in-place exection, so in-place computation is done faked through external buffer. So it was not added into oneDNN inplace pass. +\*\* sum kernel is using oneDNN sum primitive that does not provide in-place execution, so in-place computation is done faked through external buffer. So it was not added into oneDNN inplace pass. diff --git a/docs/design/phi/kernel_migrate_cn.md b/docs/design/phi/kernel_migrate_cn.md index 5ad778a4be3..fb80ccc33b0 100644 --- a/docs/design/phi/kernel_migrate_cn.md +++ b/docs/design/phi/kernel_migrate_cn.md @@ -159,7 +159,7 @@ void LogSoftmaxKernel(const Context& dev_ctx, | `auto* ptr = out->mutbale_data()` | `auto* ptr = out->data()` | | `out->mutbale_data(dims, place)` | `out->Resize(dims); dev_ctx.template Alloc(out)` | | `out->mutbale_data(place, dtype)` | `dev_ctx.Alloc(out, dtype)` | -| `platform::erros::XXX` | `phi::erros::XXX` | +| `platform::errors::XXX` | `phi::errors::XXX` | | `platform::float16/bfloat16/complex64/complex128` | `dtype::float16/bfloat16/complex64/complex128` | | `framework::Eigen***` | `Eigen***` | | `platform::XXXPlace` | `phi::XXXPlace` | diff --git a/docs/design/phi/kernel_migrate_en.md b/docs/design/phi/kernel_migrate_en.md index e5d6987a2d6..26c653e0cdc 100644 --- a/docs/design/phi/kernel_migrate_en.md +++ b/docs/design/phi/kernel_migrate_en.md @@ -159,7 +159,7 @@ Secondly, it is necessary to replace some of the types or functions that were on | `auto* ptr = out->mutbale_data()` | `auto* ptr = out->data()` | | `out->mutbale_data(dims, place)` | `out->Resize(dims); dev_ctx.template Alloc(out)` | | `out->mutbale_data(place, dtype)` | `dev_ctx.Alloc(out, dtype)` | -| `platform::erros::XXX` | `phi::erros::XXX` | +| `platform::errors::XXX` | `phi::errors::XXX` | | `platform::float16/bfloat16/complex64/complex128` | `dtype::float16/bfloat16/complex64/complex128` | | `framework::Eigen***` | `Eigen***` | | `platform::XXXPlace` | `phi::XXXPlace` | diff --git a/docs/dev_guides/custom_device_docs/custom_device_example_en.md b/docs/dev_guides/custom_device_docs/custom_device_example_en.md index a0a46416af5..3d222e3d0fe 100644 --- a/docs/dev_guides/custom_device_docs/custom_device_example_en.md +++ b/docs/dev_guides/custom_device_docs/custom_device_example_en.md @@ -10,7 +10,7 @@ In this section we will walk through the steps required to extend a fake hardwar **InitPlugin** -As a custom runtime entry function, InitPlugin is required to be implemented by the plug-in. The parameter in InitPlugin should also be checked, device information should be filled in, and the runtime API should be registered. In the initialization, PaddlePaddle loads the plug-in and invokes InitPlugin to initialize it, and register runtime (The whole process can be done automatically by the framework, only if the dynamic-link library is in site-packages/paddle-plugins/ or the designated directory of the enviornment variable of CUSTOM_DEVICE_ROOT). +As a custom runtime entry function, InitPlugin is required to be implemented by the plug-in. The parameter in InitPlugin should also be checked, device information should be filled in, and the runtime API should be registered. In the initialization, PaddlePaddle loads the plug-in and invokes InitPlugin to initialize it, and register runtime (The whole process can be done automatically by the framework, only if the dynamic-link library is in site-packages/paddle-plugins/ or the designated directory of the environment variable of CUSTOM_DEVICE_ROOT). Example: diff --git a/docs/guides/06_distributed_training/model_parallel_cn.rst b/docs/guides/06_distributed_training/model_parallel_cn.rst index 985b0f4aa93..01b60faddd1 100644 --- a/docs/guides/06_distributed_training/model_parallel_cn.rst +++ b/docs/guides/06_distributed_training/model_parallel_cn.rst @@ -29,7 +29,7 @@ 对于 Embedding 操作,可以将其理解为一种查找表操作。即,将输入看做索引,将 Embedding 参数看做查找表,根据该索引查表得到相应的输出,如下图(a)所示。当采用模型并行时,Embedding 的参数被均匀切分到多个卡上。假设 Embedding 参数的维度为 N*D,并采用 K 张卡执行模型并行,那么模型并行模式下每张卡上的 Embedding 参数的维度为 N//K*D。当参数的维度 N 不能被卡数 K 整除时,最后一张卡的参数维度值为(N//K+N%K)*D。以下图(b)为例,Embedding 参数的维度为 8*D,采用 2 张卡执行模型并行,那么每张卡上 Embedding 参数的维度为 4*D。 -为了便于说明,以下我们均假设 Embedding 的参数维度值 D 可以被模型并行的卡数 D 整除。此时,每张卡上 Embedding 参数的索引值为[0, N/K),逻辑索引值为[k*N/K, (k+1)*N/K),其中 k 表示卡序号,0<=k\"]]] * num_of_exampels_to_evaluate)\n", + "word = np.array([[cn_vocab[\"\"]]] * num_of_examples_to_evaluate)\n", "word = paddle.to_tensor(word)\n", "\n", - "hidden = paddle.zeros([num_of_exampels_to_evaluate, 1, hidden_size])\n", - "cell = paddle.zeros([num_of_exampels_to_evaluate, 1, hidden_size])\n", + "hidden = paddle.zeros([num_of_examples_to_evaluate, 1, hidden_size])\n", + "cell = paddle.zeros([num_of_examples_to_evaluate, 1, hidden_size])\n", "\n", "decoded_sent = []\n", "for i in range(MAX_LEN + 2):\n", @@ -693,7 +693,7 @@ " word = paddle.unsqueeze(word, axis=-1)\n", "\n", "results = np.stack(decoded_sent, axis=1)\n", - "for i in range(num_of_exampels_to_evaluate):\n", + "for i in range(num_of_examples_to_evaluate):\n", " en_input = \" \".join(filtered_pairs[indices[i]][0])\n", " ground_truth_translate = \"\".join(filtered_pairs[indices[i]][1])\n", " model_translate = \"\"\n", diff --git a/docs/practices/quick_start/high_level_api.ipynb b/docs/practices/quick_start/high_level_api.ipynb index 2a384912b3e..a79e551dbdb 100755 --- a/docs/practices/quick_start/high_level_api.ipynb +++ b/docs/practices/quick_start/high_level_api.ipynb @@ -941,8 +941,8 @@ " def on_epoch_end(self, epoch, logs=None) 每轮训练结束后,`Model.fit`接口中调用 \n", " def on_train_batch_begin(self, step, logs=None) 单个Batch训练开始前,`Model.fit`和`Model.train_batch`接口中调用\n", " def on_train_batch_end(self, step, logs=None) 单个Batch训练结束后,`Model.fit`和`Model.train_batch`接口中调用\n", - " def on_eval_batch_begin(self, step, logs=None) 单个Batch评估开始前,`Model.evalute`和`Model.eval_batch`接口中调用\n", - " def on_eval_batch_end(self, step, logs=None) 单个Batch评估结束后,`Model.evalute`和`Model.eval_batch`接口中调用\n", + " def on_eval_batch_begin(self, step, logs=None) 单个Batch评估开始前,`Model.evaluate`和`Model.eval_batch`接口中调用\n", + " def on_eval_batch_end(self, step, logs=None) 单个Batch评估结束后,`Model.evaluate`和`Model.eval_batch`接口中调用\n", " def on_test_batch_begin(self, step, logs=None) 单个Batch预测测试开始前,`Model.predict`和`Model.test_batch`接口中调用\n", " def on_test_batch_end(self, step, logs=None) 单个Batch预测测试结束后,`Model.predict`和`Model.test_batch`接口中调用\n", " \"\"\"\n",