Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,6 @@ Optimzier = "Optimzier"
Setment = "Setment"
Simle = "Simle"
Sovler = "Sovler"
inferface = "inferface"
infor = "infor"
instert = "instert"
instrinsics = "instrinsics"
interchangable = "interchangable"
intializers = "intializers"
intput = "intput"
libary = "libary"
matrics = "matrics"
metrices = "metrices"
Expand Down
72 changes: 38 additions & 34 deletions ci_scripts/CAPItools/utils_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def decode(self):
# 避免空函数解析
self.init_func = self.class_name

self.functions_infor = []
self.functions_info = []
# Note: 未来可能在private也有函数
# Note: 函数内构造函数可能解析有问题,需要后期查验
self.class_function_number = len(self.class_dict["methods"]["public"])
Expand Down Expand Up @@ -267,7 +267,7 @@ def decode(self):
"param_intro"
][param_name]

self.functions_infor.append(
self.functions_info.append(
{
"name": function_name,
"doxygen": funcs_intro,
Expand Down Expand Up @@ -322,46 +322,48 @@ def create_and_write_file_cn(self, save_dir, language):
)
f.write(class_function_head_text)

for fun_infor in self.functions_infor:
if fun_infor["template"] == "":
for fun_info in self.functions_info:
if fun_info["template"] == "":
fun_name_and_intro_text = ""
else:
fun_name_and_intro_text = f"{fun_infor['template']}\n"
fun_name_and_intro_text = f"{fun_info['template']}\n"
fun_name_and_intro_text += (
f"{fun_infor['name']}\n"
f"{fun_info['name']}\n"
+ "'''''''''''\n"
+ f"{fun_infor['doxygen']}\n"
+ f"{fun_info['doxygen']}\n"
+ "\n"
)
f.write(fun_name_and_intro_text)

if fun_infor["note"] != "":
if fun_info["note"] != "":
fun_note_text = (
"..note::\n" + f"\t{fun_infor['note']}\n" + "\n"
"..note::\n" + f"\t{fun_info['note']}\n" + "\n"
)
f.write(fun_note_text)

if len(fun_infor["parameter"]) != 0:
if len(fun_info["parameter"]) != 0:
parameters_text = "**参数**\n" + "'''''''''''\n"
f.write(parameters_text)
for param in fun_infor["parameter"].keys():
for param in fun_info["parameter"].keys():
param_text = f"\t- **{param}**"
if fun_infor["parameter"][param]["type"] != "":
param_text += f" ({fun_infor['parameter'][param]['type']})"
if fun_infor["parameter"][param]["intro"] != "":
param_text += f" - {fun_infor['parameter'][param]['intro']}"
if fun_info["parameter"][param]["type"] != "":
param_text += (
f" ({fun_info['parameter'][param]['type']})"
)
if fun_info["parameter"][param]["intro"] != "":
param_text += f" - {fun_info['parameter'][param]['intro']}"
param_text += "\n"
f.write(param_text)
f.write("\n")

if (
fun_infor["returns"] != ""
and "void" not in fun_infor["returns"]
fun_info["returns"] != ""
and "void" not in fun_info["returns"]
):
fun_return_text = (
"**返回**\n"
+ "'''''''''''\n"
+ f"{fun_infor['returns']}\n"
+ f"{fun_info['returns']}\n"
+ "\n"
)
f.write(fun_return_text)
Expand Down Expand Up @@ -399,46 +401,48 @@ def create_and_write_file_en(self, save_dir, language):
)
f.write(class_function_head_text)

for fun_infor in self.functions_infor:
if fun_infor["template"] == "":
for fun_info in self.functions_info:
if fun_info["template"] == "":
fun_name_and_intro_text = ""
else:
fun_name_and_intro_text = f"{fun_infor['template']}\n"
fun_name_and_intro_text = f"{fun_info['template']}\n"
fun_name_and_intro_text += (
f"{fun_infor['name']}\n"
f"{fun_info['name']}\n"
+ "'''''''''''\n"
+ f"{fun_infor['doxygen']}\n"
+ f"{fun_info['doxygen']}\n"
+ "\n"
)
f.write(fun_name_and_intro_text)

if fun_infor["note"] != "":
if fun_info["note"] != "":
fun_note_text = (
"..note::\n" + f"\t{fun_infor['note']}\n" + "\n"
"..note::\n" + f"\t{fun_info['note']}\n" + "\n"
)
f.write(fun_note_text)

if len(fun_infor["parameter"]) != 0:
if len(fun_info["parameter"]) != 0:
parameters_text = "**Parameters**\n" + "'''''''''''\n"
f.write(parameters_text)
for param in fun_infor["parameter"].keys():
for param in fun_info["parameter"].keys():
param_text = f"\t- **{param}**"
if fun_infor["parameter"][param]["type"] != "":
param_text += f" ({fun_infor['parameter'][param]['type']})"
if fun_infor["parameter"][param]["intro"] != "":
param_text += f" - {fun_infor['parameter'][param]['intro']}"
if fun_info["parameter"][param]["type"] != "":
param_text += (
f" ({fun_info['parameter'][param]['type']})"
)
if fun_info["parameter"][param]["intro"] != "":
param_text += f" - {fun_info['parameter'][param]['intro']}"
param_text += "\n"
f.write(param_text)
f.write("\n")

if (
fun_infor["returns"] != ""
and "void" not in fun_infor["returns"]
fun_info["returns"] != ""
and "void" not in fun_info["returns"]
):
fun_return_text = (
"**Returns**\n"
+ "'''''''''''\n"
+ f"{fun_infor['returns']}\n"
+ f"{fun_info['returns']}\n"
+ "\n"
)
f.write(fun_return_text)
Expand Down
4 changes: 2 additions & 2 deletions docs/api/copy_codes_from_en_doc.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def find_codeblock_needed(cf_info):
return None


def instert_codes_into_cn_rst_if_need(cnrstfilename):
def insert_codes_into_cn_rst_if_need(cnrstfilename):
"""
Analyse the cn rst file, if need, extract code-blocks from en docstring.
"""
Expand Down Expand Up @@ -290,7 +290,7 @@ def instert_codes_into_cn_rst_if_need(cnrstfilename):


def filter_all_files(
rootdir, ext="_cn.rst", action=instert_codes_into_cn_rst_if_need
rootdir, ext="_cn.rst", action=insert_codes_into_cn_rst_if_need
):
"""
find all the _en.html file, and do the action.
Expand Down
2 changes: 1 addition & 1 deletion docs/design/data_type/float16.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ To support the above features, two fundamental conversion functions are provided
float16 float_to_half_rn(float f); // convert to half precision in round-to-nearest-even mode
float half_to_float(float16 h);
```
which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresponding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion.
which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM intrinsics will be used when the corresponding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion.

## float16 inference
In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one.
Expand Down
2 changes: 1 addition & 1 deletion docs/design/modules/infer_var_type.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ The variable type will be constant at runtime. Every variable's type can either

## Proposed Solution

The `InferVarType` is a compile-time function which is registered to each operator. The inferface of that function is:
The `InferVarType` is a compile-time function which is registered to each operator. The interface of that function is:


```c++
Expand Down
2 changes: 1 addition & 1 deletion docs/design/others/graph.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ For each parameter, like W and b created by `layer.fc`, marked as double circles

## Block and Graph

The word block and graph are interchangable in the design of PaddlePaddle. A [Block](https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphor of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block.
The word block and graph are interchangeable in the design of PaddlePaddle. A [Block](https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphor of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block.

A Block keeps operators in an array `BlockDesc::ops`

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ PyTorch 参数更多,具体如下:
### 转写示例
#### output_ratio:目标输出比例
```python
# 假设 intput 的 with=7, height=7,
# 假设 input 的 with=7, height=7,
# output_ratio = 0.75, 则目标 output 的 width = int(7*0.75) = 5, height = int(7*0.75) = 5
# Pytorch 写法
torch.nn.FractionalMaxPool2d(2, output_ratio=[0.75, 0.75], return_indices=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ PyTorch 参数更多,具体如下:
### 转写示例
#### output_ratio:目标输出比例
```python
# 假设 intput 的 depth=7, with=7, height=7,
# 假设 input 的 depth=7, with=7, height=7,
# output_ratio = 0.75, 则目标 output 的 depth = int(7*0.75) = 5, width = int(7*0.75) = 5, height = int(7*0.75) = 5
# Pytorch 写法
torch.nn.FractionalMaxPool3d(2, output_ratio=[0.75, 0.75, 0.75], return_indices=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ PyTorch 参数更多,具体如下:
### 转写示例
#### output_ratio:目标输出比例
```python
# 假设 intput 的 with=7, height=7,
# 假设 input 的 with=7, height=7,
# output_ratio = 0.75, 则目标 output 的 width = int(7*0.75) = 5, height = int(7*0.75) = 5
# Pytorch 写法
torch.nn.functional.fractional_max_pool2d(input, 2, output_ratio=[0.75, 0.75], return_indices=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ PyTorch 参数更多,具体如下:
### 转写示例
#### output_ratio:目标输出比例
```python
# 假设 intput 的 depth=7, with=7, height=7,
# 假设 input 的 depth=7, with=7, height=7,
# output_ratio = 0.75, 则目标 output 的 depth = int(7*0.75) = 5, width = int(7*0.75) = 5, height = int(7*0.75) = 5
# Pytorch 写法
torch.nn.functional.fractional_max_pool3d(input, 2, output_ratio=[0.75, 0.75, 0.75], return_indices=True)
Expand Down