Skip to content

Commit

Permalink
doc: add operation definition documents
Browse files Browse the repository at this point in the history
  • Loading branch information
wuxun-zhang authored and TaoLv committed Oct 14, 2022
1 parent c02bb43 commit d19d0f7
Show file tree
Hide file tree
Showing 101 changed files with 6,202 additions and 1 deletion.
101 changes: 101 additions & 0 deletions doc/DoxygenLayout.xml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,107 @@
<tab type="user" title="SYCL Programming" url="@ref dev_guide_sycl_programming"/>
<tab type="user" title="OPs and Patterns" url="@ref dev_guide_ops_and_patterns"/>
</tab>
<tab type="usergroup" title="Supported Operations">
<tab type="user" title="Add" url="@ref dev_guide_op_add"/>
<tab type="user" title="AbsBackprop" url="@ref dev_guide_op_absbackprop"/>
<tab type="user" title="AvgPool" url="@ref dev_guide_op_avgpool"/>
<tab type="user" title="AvgPoolBackprop" url="@ref dev_guide_op_avgpoolbackprop"/>
<tab type="user" title="BatchNormForwardTraining" url="@ref dev_guide_op_batchnormforwardtraining"/>
<tab type="user" title="BatchNormInference" url="@ref dev_guide_op_batchnorminference"/>
<tab type="user" title="BatchNormTrainingBackprop" url="@ref dev_guide_op_batchnormtrainingbackprop"/>
<tab type="user" title="BiasAdd" url="@ref dev_guide_op_biasadd"/>
<tab type="user" title="BiasAddBackprop" url="@ref dev_guide_op_biasaddbackprop"/>
<tab type="user" title="Clamp" url="@ref dev_guide_op_clamp"/>
<tab type="user" title="ClampBackprop" url="@ref dev_guide_op_clampbackprop"/>
<tab type="user" title="Concat" url="@ref dev_guide_op_concat"/>
<tab type="user" title="Convolution" url="@ref dev_guide_op_convolution"/>
<tab type="user" title="ConvolutionBackpropData" url="@ref dev_guide_op_convolutionbackpropdata"/>
<tab type="user" title="ConvolutionBackpropFilters" url="@ref dev_guide_op_convolutionbackpropfilters"/>
<tab type="user" title="ConvTranspose" url="@ref dev_guide_op_convtranspose"/>
<tab type="user" title="ConvTransposeBackpropData" url="@ref dev_guide_op_convtransposebackpropdata"/>
<tab type="user" title="ConvTransposeBackpropFilters" url="@ref dev_guide_op_convtransposebackpropfilters"/>
<tab type="user" title="Dequantize" url="@ref dev_guide_op_dequantize"/>
<tab type="user" title="Divide" url="@ref dev_guide_op_divide"/>
<tab type="user" title="DynamicDequantize" url="@ref dev_guide_op_dynamicdequantize"/>
<tab type="user" title="DynamicQuantize" url="@ref dev_guide_op_dynamicquantize"/>
<tab type="user" title="DynamicReshape" url="@ref dev_guide_op_dynamicreshape"/>
<tab type="user" title="DynamicTranspose" url="@ref dev_guide_op_dynamictranspose"/>
<tab type="user" title="Elu" url="@ref dev_guide_op_elu"/>
<tab type="user" title="EluBackprop" url="@ref dev_guide_op_elubackprop"/>
<tab type="user" title="End" url="@ref dev_guide_op_end"/>
<tab type="user" title="Equal" url="@ref dev_guide_op_equal"/>
<tab type="user" title="Erf" url="@ref dev_guide_op_erf"/>
<tab type="user" title="Exp" url="@ref dev_guide_op_exp"/>
<tab type="user" title="GELU" url="@ref dev_guide_op_gelu"/>
<tab type="user" title="GELUBackprop" url="@ref dev_guide_op_gelubackprop"/>
<tab type="user" title="Greater" url="@ref dev_guide_op_greater"/>
<tab type="user" title="GreaterEqual" url="@ref dev_guide_op_greaterequal"/>
<tab type="user" title="HardSwish" url="@ref dev_guide_op_hardswish"/>
<tab type="user" title="HardSwishBackprop" url="@ref dev_guide_op_hardswishbackprop"/>
<tab type="user" title="Index" url="@ref dev_guide_op_index"/>
<tab type="user" title="Interpolate" url="@ref dev_guide_op_interpolate"/>
<tab type="user" title="InterpolateBackprop" url="@ref dev_guide_op_interpolatebackprop"/>
<tab type="user" title="LayerNorm" url="@ref dev_guide_op_layernorm"/>
<tab type="user" title="LayerNormBackprop" url="@ref dev_guide_op_layernormbackprop"/>
<tab type="user" title="LeakyReLU" url="@ref dev_guide_op_leakyrelu"/>
<tab type="user" title="Less" url="@ref dev_guide_op_less"/>
<tab type="user" title="LessEqual" url="@ref dev_guide_op_lessequal"/>
<tab type="user" title="Log" url="@ref dev_guide_op_log"/>
<tab type="user" title="LogicalAnd" url="@ref dev_guide_op_logicaland"/>
<tab type="user" title="LogicalOr" url="@ref dev_guide_op_logicalor"/>
<tab type="user" title="LogicalNot" url="@ref dev_guide_op_logicalnot"/>
<tab type="user" title="LogicalXor" url="@ref dev_guide_op_logicalxor"/>
<tab type="user" title="LogSoftmax" url="@ref dev_guide_op_logsoftmax"/>
<tab type="user" title="LogSoftmaxBackprop" url="@ref dev_guide_op_logsoftmaxbackprop"/>
<tab type="user" title="MatMul" url="@ref dev_guide_op_matmul"/>
<tab type="user" title="Maximum" url="@ref dev_guide_op_maximum"/>
<tab type="user" title="MaxPool" url="@ref dev_guide_op_maxpool"/>
<tab type="user" title="MaxPoolBackprop" url="@ref dev_guide_op_maxpoolbackprop"/>
<tab type="user" title="Minimum" url="@ref dev_guide_op_minimum"/>
<tab type="user" title="Mish" url="@ref dev_guide_op_mish"/>
<tab type="user" title="MishBackprop" url="@ref dev_guide_op_mishbackprop"/>
<tab type="user" title="Multiply" url="@ref dev_guide_op_multiply"/>
<tab type="user" title="Negative" url="@ref dev_guide_op_negative"/>
<tab type="user" title="NotEqual" url="@ref dev_guide_op_notequal"/>
<tab type="user" title="Pow" url="@ref dev_guide_op_pow"/>
<tab type="user" title="PowBackprop" url="@ref dev_guide_op_powbackprop"/>
<tab type="user" title="PowBackpropExponent" url="@ref dev_guide_op_powbackpropexponent"/>
<tab type="user" title="PReLU" url="@ref dev_guide_op_prelu"/>
<tab type="user" title="PReLUBackprop" url="@ref dev_guide_op_prelubackprop"/>
<tab type="user" title="Quantize" url="@ref dev_guide_op_quantize"/>
<tab type="user" title="Reciprocal" url="@ref dev_guide_op_reciprocal"/>
<tab type="user" title="ReduceL1" url="@ref dev_guide_op_reducel1"/>
<tab type="user" title="ReduceL2" url="@ref dev_guide_op_reducel2"/>
<tab type="user" title="ReduceMax" url="@ref dev_guide_op_reducemax"/>
<tab type="user" title="ReduceMean" url="@ref dev_guide_op_reducemean"/>
<tab type="user" title="ReduceMin" url="@ref dev_guide_op_reducemin"/>
<tab type="user" title="ReduceProd" url="@ref dev_guide_op_reduceprod"/>
<tab type="user" title="ReduceSum" url="@ref dev_guide_op_reducesum"/>
<tab type="user" title="ReLU" url="@ref dev_guide_op_relu"/>
<tab type="user" title="ReLUBackprop" url="@ref dev_guide_op_relubackprop"/>
<tab type="user" title="Reorder" url="@ref dev_guide_op_reorder"/>
<tab type="user" title="Round" url="@ref dev_guide_op_round"/>
<tab type="user" title="Rsqrt" url="@ref dev_guide_op_rsqrt"/>
<tab type="user" title="Select" url="@ref dev_guide_op_select"/>
<tab type="user" title="Sigmoid" url="@ref dev_guide_op_sigmoid"/>
<tab type="user" title="SigmoidBackprop" url="@ref dev_guide_op_sigmoidbackprop"/>
<tab type="user" title="Sign" url="@ref dev_guide_op_sign"/>
<tab type="user" title="SoftMax" url="@ref dev_guide_op_softmax"/>
<tab type="user" title="SoftMaxBackprop" url="@ref dev_guide_op_softmaxbackprop"/>
<tab type="user" title="SoftPlus" url="@ref dev_guide_op_softplus"/>
<tab type="user" title="SoftPlusBackprop" url="@ref dev_guide_op_softplusbackprop"/>
<tab type="user" title="Sqrt" url="@ref dev_guide_op_sqrt"/>
<tab type="user" title="SqrtBackprop" url="@ref dev_guide_op_sqrtbackprop"/>
<tab type="user" title="Square" url="@ref dev_guide_op_square"/>
<tab type="user" title="SquaredDifference" url="@ref dev_guide_op_squareddifference"/>
<tab type="user" title="StaticReshape" url="@ref dev_guide_op_staticreshape"/>
<tab type="user" title="StaticTranspose" url="@ref dev_guide_op_statictranspose"/>
<tab type="user" title="Subtract" url="@ref dev_guide_op_subtract"/>
<tab type="user" title="Tanh" url="@ref dev_guide_op_tanh"/>
<tab type="user" title="TanhBackprop" url="@ref dev_guide_op_tanhbackprop"/>
<tab type="user" title="TypeCast" url="@ref dev_guide_op_typecast"/>
<tab type="user" title="Wildcard" url="@ref dev_guide_op_wildcard"/>
</tab>
<tab type="usergroup" title="Performance Profiling">
<tab type="user" title="Verbose Mode" url="@ref dev_guide_verbose_mode"/>
<tab type="user" title="Graph Serialization" url="@ref dev_guide_graph_serialization"/>
Expand Down
37 changes: 37 additions & 0 deletions doc/operations/AbsBackprop.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# AbsBackprop {#dev_guide_op_absbackprop}

**Versioned name**: *AbsBackprop-1*

**Category**: *Arithmetic*

**Short description**: *AbsBackprop* computes gradient for Abs

\f$ds = \begin{cases}
dd & \text{if } s>0 \\
-dd & \text{if } s<0 \\
0 & \text{if } s=0
\end{cases} \f$

## Inputs

* **1**: ``input_forward`` - original input tensor of Abs op. **Required.**

* **Type**: T

* **2**: ``output_delta`` - the gradient tensor with respect to the output.
**Required.**

* **Type**: T

## Outputs

* **1**: ``input_delta`` - the gradient tensor with respect to the input of
Abs.

* **Type**: T

**Types**:

* **T**: f32, f16, bf16.
* **Note**: Inputs and outputs have the same data type denoted by *T*. For
example, if input is f32 tensor, then all other tensors have f32 data type.
57 changes: 57 additions & 0 deletions doc/operations/Add.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# Add {#dev_guide_op_add}

**Versioned name**: *Add-1*

**Category**: *Arithmetic*

**Short description**: *Add* performs element-wise addition operation with two
given tensors applying multi-directional broadcast rules.

## Detailed description

Before performing arithmetic operation, *input_1* and *input_2* are broadcasted
if their shapes are different and ``auto_broadcast`` attributes is
not ``none``. Broadcasting is performed according to ``auto_broadcast`` value.

After broadcasting *Add* does the following with *input_1* and *input_2*:

\f$output_i = input\_1_{i}+input\_2_{i}\f$

## Attributes

* *auto_broadcast*

* **Description**: specifies rules used for auto-broadcasting of input
tensors.
* **Range of values**:

* *none* - no auto-broadcasting is allowed, all input shapes should match
* *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting.
Description is available in
[ONNX docs](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).

* **Type**: string
* **Default value**: *numpy*
* **Required**: *no*

## Inputs

* **1**: ``input_1`` - the first input tensor. **Required.**

* **Type**: T

* **2**: ``input_2`` - the second input tensor. **Required.**

* **Type**: T

## Outputs

* **1**: ``output`` - the output tensor of element-wise addition operation.

* **Type**: T

**Types**:

* **T**: f32, f16, bf16.
* **Note**: Inputs and outputs have the same data type denoted by *T*. For
example, if input is f32 tensor, then all other tensors have f32 data type.
115 changes: 115 additions & 0 deletions doc/operations/AvgPool.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# AvgPool {#dev_guide_op_avgpool}

**Versioned name**: *AvgPool-1*

**Category**: *Pooling*

**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/pooling.html)

**Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#pool)

## Attributes

* *strides*

* **Description**: *strides* is a distance (in pixels) to slide the window on
the feature map over the `(z, y, x)` axes for 3D poolings and `(y, x)` axes
for 2D poolings. For example, *strides* equal `(4, 2, 1)` means sliding the
window 4 pixel at a time over depth dimension, 2 over height dimension and
1 over width dimension.
* **Range of values**: Non-negative s64 value
* **Type**: s64[]
* **Required**: *yes*

* *pads_begin*

* **Description**: *pads_begin* is a number of pixels to add to the beginning
along each axis. For example, *pads_begin* equal `(1, 2)` means adding 1
pixel to the top of the input and 2 to the left of the input.
* **Range of values**: Non-negative s64 values.
* **Type**: s64[]
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.

* *pads_end*

* **Description**: *pads_end* is a number of pixels to add to the ending along
each axis. For example, *pads_end* equal `(1, 2)` means adding 1 pixel to the
bottom of the input and 2 to the right of the input.
* **Range of values**: Non-negative s64 values.
* **Type**: s64[]
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.

* *kernel*

* **Description**: *kernel* is a size of each filter. For example, *kernel*
equal `(2, 3)` means that each filter has height equal to 2 and width equal
to 3.
* **Range of values**: positive s64 values.
* **Type**: s64[]
* **Required**: *yes*

* *exclude_pad*

* **Description**: *exclude_pad* is a type of pooling strategy for values in
the padding area. For example, if *exclude_pad* is *true*, zero-values in
the padding are not used.
* **Range of values**: True or False
* **Type**: bool
* **Required**: *yes*

* *rounding_type*

* **Description**: *rounding_type* is a type of rounding to be applied.
* **Range of values**:

* *ceil*
* *floor*

* **Type**: string
* **Default value**: *floor*
* **Required**: *no*

* *auto_pad*

* **Description**: *auto_pad* how the padding is calculated. Possible values:

* *none (not specified)*: use explicit padding values.
* *same_upper (same_lower)* the input is padded to match the output size.
In case of odd padding value an extra padding is added at the end (at the
beginning).
* *valid* - do not use padding.

* **Type**: string
* **Default value**: *none*
* **Required**: *no*
* **Note**: *pads_begin* and *pads_end* attributes are ignored when *auto_pad*
is specified.

* *data_format*

* **Description**: *data_format* denotes the data format of the input and
output data.
* **Range of values**: *NXC* or *NCX* (X means HW for 2D, DHW for 3D)
* **Type**: string
* **Default value**: *NXC*
* **Required**: *no*

## Inputs

* **1**: ``input`` - input tensor. **Required.**

* **Type**: T

## Outputs

* **1**: ``output`` - the result tensor.

* **Type**: T

**Types**:

* **T**: f32, f16, bf16.
* **Note**: Inputs and outputs have the same data type denoted by *T*. For
example, if input is f32 tensor, then all other tensors have f32 data type.
Loading

0 comments on commit d19d0f7

Please sign in to comment.