From 420439d10837010e1e94f08306b454fbab070de4 Mon Sep 17 00:00:00 2001 From: LiaoYFBH <2273398935@qq.com> Date: Sun, 9 Nov 2025 14:31:17 +0800 Subject: [PATCH] Fix(typo): Resolve typos S-1, S-3, S-6, S-8, S-9 (#7543) --- _typos.toml | 5 ----- docs/api/paddle/optimizer/lr/CyclicLR_cn.rst | 2 +- docs/design/memory/memory_optimization.md | 2 +- .../quantization/training_quantization_model_format.md | 2 +- .../op_optimization/kernel_primitive_api/model_example_en.md | 2 +- docs/guides/jit/grammar_list_en.md | 2 +- docs/guides/model_convert/convert_with_x2paddle_cn.md | 4 ++-- 7 files changed, 7 insertions(+), 12 deletions(-) diff --git a/_typos.toml b/_typos.toml index ec74897c8b6..450fdc4e956 100644 --- a/_typos.toml +++ b/_typos.toml @@ -27,17 +27,14 @@ Nervana = "Nervana" datas = "datas" # These words need to be fixed -Creenshot = "Creenshot" Learing = "Learing" Moible = "Moible" Operaton = "Operaton" Optimizaing = "Optimizaing" Optimzier = "Optimzier" Setment = "Setment" -Similarily = "Similarily" Simle = "Simle" Sovler = "Sovler" -Successed = "Successed" desgin = "desgin" desginated = "desginated" desigin = "desigin" @@ -95,9 +92,7 @@ overrided = "overrided" overwrited = "overwrited" porcess = "porcess" processer = "processer" -sacle = "sacle" samle = "samle" -satifies = "satifies" schedual = "schedual" secenarios = "secenarios" sematic = "sematic" diff --git a/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst b/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst index ba3c1daa086..67b9b2b74a0 100644 --- a/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst +++ b/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst @@ -24,7 +24,7 @@ CyclicLR - **step_size_down** (int,可选) - 学习率从最大学习率下降到初始学习率所需步数。若未指定,则其值默认等于 ``step_size_up`` 。 - **mode** (str,可选) - 可以是 triangular、triangular2 或者 exp_range,对应策略已在上文描述,当 scale_fn 被指定时时,该参数将被忽略。默认值为 triangular。 - **exp_gamma** (float,可选) - exp_range 缩放函数中的常量。默认值为 1.0。 - - **sacle_fn** (function,可选) - 一个有且仅有单个参数的函数,且对于任意的输入 x,都必须满足 0 ≤ scale_fn(x) ≤ 1;如果该参数被指定,则会忽略 mode 参数。默认值为 ``False`` 。 + - **scale_fn** (function,可选) - 一个有且仅有单个参数的函数,且对于任意的输入 x,都必须满足 0 ≤ scale_fn(x) ≤ 1;如果该参数被指定,则会忽略 mode 参数。默认值为 ``False`` 。 - **scale_mode** (str,可选) - cycle 或者 iterations,表示缩放函数使用 cycle 数或 iterations 数作为输入。默认值为 cycle。 - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 diff --git a/docs/design/memory/memory_optimization.md b/docs/design/memory/memory_optimization.md index 0526e4bc84c..ba5f7c14efb 100644 --- a/docs/design/memory/memory_optimization.md +++ b/docs/design/memory/memory_optimization.md @@ -197,7 +197,7 @@ After op1, we can process variable b and variable c; After op2, we can process v #### memory sharing policy -A memory pool will be mantained in the stage of memory optimization. Each operator node will be scanned to determine memory optimization is done or not. If an operator satifies the requirement, following policy will be taken to handle input/output variables. +A memory pool will be mantained in the stage of memory optimization. Each operator node will be scanned to determine memory optimization is done or not. If an operator satisfies the requirement, following policy will be taken to handle input/output variables. ``` if op.support_inplace(): diff --git a/docs/design/quantization/training_quantization_model_format.md b/docs/design/quantization/training_quantization_model_format.md index 90784c6abdf..48488d1fcd0 100644 --- a/docs/design/quantization/training_quantization_model_format.md +++ b/docs/design/quantization/training_quantization_model_format.md @@ -26,7 +26,7 @@ fake_quantize_abs_max { ### 1.2 静态量化 -与动态量化不同,静态量化的量化 scale 是在量化训练时通过**窗口滑动平均**或者**窗口绝对值最大值**等方法计算求得的。静态量化主要通过`fake_quantize_moving_average_abs_max`op 或者`fake_quantize_range_abs_max`op 实现,它们利用输入的量化 scale 将输入 tensor 量化到-127~127 值域范围内。`fake_quantize_moving_average_abs_max`op 和`fake_quantize_range_abs_max`op 的输入和输出格式都是一样的,不同点在于 op 内部计算量化 scale 时使用的策略不同。`fake_quantize_moving_average_abs_max`op 使用一个窗口内绝对值最大值的滑动平均值作为量化 sacle,而`fake_quantize_range_abs_max`op 使用一个窗口内绝对值最大值的最大值作为量化 sacle。下面以`fake_quantize_moving_average_abs_max`op 为例,对其进行整体描述: +与动态量化不同,静态量化的量化 scale 是在量化训练时通过**窗口滑动平均**或者**窗口绝对值最大值**等方法计算求得的。静态量化主要通过`fake_quantize_moving_average_abs_max`op 或者`fake_quantize_range_abs_max`op 实现,它们利用输入的量化 scale 将输入 tensor 量化到-127~127 值域范围内。`fake_quantize_moving_average_abs_max`op 和`fake_quantize_range_abs_max`op 的输入和输出格式都是一样的,不同点在于 op 内部计算量化 scale 时使用的策略不同。`fake_quantize_moving_average_abs_max`op 使用一个窗口内绝对值最大值的滑动平均值作为量化 scale,而`fake_quantize_range_abs_max`op 使用一个窗口内绝对值最大值的最大值作为量化 scale。下面以`fake_quantize_moving_average_abs_max`op 为例,对其进行整体描述: ``` fake_quantize_moving_average_abs_max { diff --git a/docs/dev_guides/op_optimization/kernel_primitive_api/model_example_en.md b/docs/dev_guides/op_optimization/kernel_primitive_api/model_example_en.md index 4573f6c1521..46ee4c8cd48 100644 --- a/docs/dev_guides/op_optimization/kernel_primitive_api/model_example_en.md +++ b/docs/dev_guides/op_optimization/kernel_primitive_api/model_example_en.md @@ -41,7 +41,7 @@ nohup python tools/train.py \ -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml \ -o Global.device=xpu > ResNet50_xpu2.log & ``` -+ 5. Creenshot is as follows:
++ 5. Screenshot is as follows:
![Model](./images/example_model.png) ### XPU2 Kernel Primitive API Model List diff --git a/docs/guides/jit/grammar_list_en.md b/docs/guides/jit/grammar_list_en.md index 48d719ce5ea..524e10fe39e 100644 --- a/docs/guides/jit/grammar_list_en.md +++ b/docs/guides/jit/grammar_list_en.md @@ -265,7 +265,7 @@ def sort_list(x, y): - Don't support get shape after a reshape operators. You may get a -1 in shape value. -For example, `x = reshape(x, shape=shape_tensor)` , then use `x.shape[0]` to do other operation. Due to the difference between dynamic and static graph, it is okay in dynamic but it will fail in static graph. The reason is that APIs return computation result in dynamic graph mode, so x.shape has deterministic value after calling reshape . However, static graph doesn’t have the value shape_tensor during building network, so PaddlePaddle doesn’t know the value of x.shape after calling reshape. PaddlePaddle static graph will set -1 to represent unknown shape value for each dimension of x.shape in this case, not the expected value. Similarily, calling the shape of the output tensor of those APIs which change the shape, such as expend, cannot be converted into static graph properly. +For example, `x = reshape(x, shape=shape_tensor)` , then use `x.shape[0]` to do other operation. Due to the difference between dynamic and static graph, it is okay in dynamic but it will fail in static graph. The reason is that APIs return computation result in dynamic graph mode, so x.shape has deterministic value after calling reshape . However, static graph doesn’t have the value shape_tensor during building network, so PaddlePaddle doesn’t know the value of x.shape after calling reshape. PaddlePaddle static graph will set -1 to represent unknown shape value for each dimension of x.shape in this case, not the expected value. Similarly, calling the shape of the output tensor of those APIs which change the shape, such as expend, cannot be converted into static graph properly. #### examples : diff --git a/docs/guides/model_convert/convert_with_x2paddle_cn.md b/docs/guides/model_convert/convert_with_x2paddle_cn.md index 58e2d17f2f2..93c270f3385 100644 --- a/docs/guides/model_convert/convert_with_x2paddle_cn.md +++ b/docs/guides/model_convert/convert_with_x2paddle_cn.md @@ -332,14 +332,14 @@ try: if relative_diff >= 1e-05: is_successd = False if is_successd: - f.write("Dygraph Successed\n") + f.write("Dygraph Succeeded\n") else: f.write("!!!!!Dygraph Failed\n") except: f.write("!!!!!Failed\n") ``` -最终比较结果写在 result.txt 当中,若显示 Dygraph Successed 表示成功,验证通过后,则可使用 [Paddle Inference](https://www.paddlepaddle.org.cn/inference/product_introduction/inference_intro.html) 部署该模型。 +最终比较结果写在 result.txt 当中,若显示 Dygraph Succeeded 表示成功,验证通过后,则可使用 [Paddle Inference](https://www.paddlepaddle.org.cn/inference/product_introduction/inference_intro.html) 部署该模型。 ## 三、迁移 ONNX 模型示例