From cabfc945b8522e1cba3cd2244fb5c730e6989602 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Wed, 18 Dec 2024 19:08:59 -0800 Subject: [PATCH 1/6] Wave 3 skeleton --- index.bs | 1407 ++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 1249 insertions(+), 158 deletions(-) diff --git a/index.bs b/index.bs index 113fcd4f..3b3d3926 100644 --- a/index.bs +++ b/index.bs @@ -871,7 +871,7 @@ dictionary MLComputeResult { interface MLContext { Promise compute( MLGraph graph, MLNamedArrayBufferViews inputs, MLNamedArrayBufferViews outputs); - + MLOpSupportLimits opSupportLimits(); }; @@ -2565,6 +2565,9 @@ partial interface MLGraphBuilder { MLOperand equal(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); + MLOperand notEqual(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); MLOperand greater(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); @@ -2578,6 +2581,15 @@ partial interface MLGraphBuilder { MLOperand b, optional MLOperatorOptions options = {}); MLOperand logicalNot(MLOperand a, optional MLOperatorOptions options = {}); + MLOperand logicalAnd(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); + MLOperand logicalOr(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); + MLOperand logicalXor(MLOperand a, + MLOperand b, + optional MLOperatorOptions options = {}); }; dictionary MLLogicalNotSupportLimits { @@ -2587,11 +2599,15 @@ dictionary MLLogicalNotSupportLimits { partial dictionary MLOpSupportLimits { MLBinarySupportLimits equal; + MLBinarySupportLimits notEqual; MLBinarySupportLimits greater; MLBinarySupportLimits greaterOrEqual; MLBinarySupportLimits lesser; MLBinarySupportLimits lesserOrEqual; MLLogicalNotSupportLimits logicalNot; + MLLogicalNotSupportLimits logicalAnd; + MLLogicalNotSupportLimits logicalOr; + MLLogicalNotSupportLimits logicalXor; }; @@ -2616,6 +2632,8 @@ partial dictionary MLOpSupportLimits {
: equal :: Support limits for operator {{MLGraphBuilder/equal()}}. + : notEqual + :: Support limits for operator {{MLGraphBuilder/notEqual()}}. : greater :: Support limits for operator {{MLGraphBuilder/greater()}}. : greaterOrEqual @@ -2626,16 +2644,26 @@ partial dictionary MLOpSupportLimits { :: Support limits for operator {{MLGraphBuilder/lesserOrEqual()}}. : logicalNot :: Support limits for operator {{MLGraphBuilder/logicalNot()}}. + : logicalAnd + :: Support limits for operator {{MLGraphBuilder/logicalAnd()}}. + : logicalOr + :: Support limits for operator {{MLGraphBuilder/logicalOr()}}. + : logicalXor + :: Support limits for operator {{MLGraphBuilder/logicalXor()}}.
**Operation types:** - *equal*: Compare if the values of the two input tensors are equal, element-wise. + - *notEqual*: Compare if the values of the two input tensors are not equal, element-wise. - *greater*: Compare if the values of the first input tensor is greater, element-wise. - *greaterOrEqual*: Compare if the values of the first input tensor is greater or equal, element-wise. - *lesser*: Compare if the values of the first input tensor is lesser, element-wise. - *lesserOrEqual*: Compare if the values of the first input tensor is lesser or equal, element-wise. - *logicalNot*: Invert the values of the input tensor to values 0 or 1, element-wise. Specifically, when the input value is non-zero, invert it to 0. Conversely, for a zero input value, invert it to 1. + - *logicalAnd*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalOr*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalXor*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1.
@@ -2646,7 +2674,7 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les To create element-wise logical operation given [=string=] |op|, {{MLOperand}} |a|, an optional {{MLOperand}} |b|, and {{MLOperatorOptions}} |options|, run the following steps: - 1. [=Assert=]: |op| is one of "equal", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot". + 1. [=Assert=]: |op| is one of "equal", "notEqual", "greater", "greaterOrEqual", "lesser", "lesserOrEqual", "logicalNot", "logicalAnd", "logicalOr", "logicalXor". 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If |op| is "logicalNot": 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |a| returns false, then [=exception/throw=] a {{TypeError}}. @@ -2677,6 +2705,12 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. Return |output|.
+ The notEqual(|a|, |b|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "notEqual", |a|, |b|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. + +
The greater(|a|, |b|, |options|) method steps are: 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "greater", |a|, |b|, and |options|. @@ -2711,6 +2745,27 @@ Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/les 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. 1. Return |output|.
+ +
+ The logicalAnd(|a|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalAnd", |a|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+ +
+ The logicalOr(|a|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalOr", |a|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+ +
+ The logicalXor(|a|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-logical-op | create element-wise logical operation=] given "logicalXor", |a|, and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
### Element-wise unary operations ### {#api-mlgraphbuilder-unary} @@ -2919,6 +2974,196 @@ partial dictionary MLOpSupportLimits { + + +### dequantizeLinear ### {#api-mlgraphbuilder-dequantizelinear} +!!!Select the values from the trueValue or the falseValue tensor depending on the corresponding values of the condition tensor, where non-zero is true and zero is false. The condition tensor is often the output of one of the element-wise logical operations. + +The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors. + + + +
+ **Arguments:** + - condition: an {{MLOperand}}. The condition tensor. + - trueValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to true. + - falseValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to false. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor. +
+ +{{MLWhereSupportLimits}} has the following members: +
+ : condition + :: {{MLSupportLimits}} for condition operand. + : trueValue + :: {{MLSupportLimits}} for trueValue operand. + : falseValue + :: {{MLSupportLimits}} for falseValue operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/where()}}: +
+ : where + :: Support limits for operator {{MLGraphBuilder/where()}}. +
+ + +
+ + The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |condition|'s [=MLOperand/dataType=] is not equal to {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. + 1. If |trueValue|'s [=MLOperand/dataType=] is not equal to |falseValue|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |trueValue|'s [=MLOperand/shape=] and |falseValue|'s [=MLOperand/shape=]. + 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Set |outputShape| to the result of [=bidirectionally broadcasting=] |condition|'s [=MLOperand/shape=] and |outputShape]. + 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given |trueValue|'s [=MLOperand/dataType=] and |outputShape|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. + 1. Let |operator| be an [=operator=] for the "where" operation, given |condition|, |trueValue|, |falseValue|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |condition|, |trueValue| and |falseValue|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function where(builder, condition, trueValue, falseValue) {
+      const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
+      builder.add(
+        builder.mul(trueValue, builder.cast(c, trueValue.dataType)),
+        builder.mul(
+          falseValue, builder.cast(builder.logicalNot(c), falseValue.dataType)));
+    }
+    
+
+
+ + +### quantizeLinear ### {#api-mlgraphbuilder-quantizelinear} +!!!Select the values from the trueValue or the falseValue tensor depending on the corresponding values of the condition tensor, where non-zero is true and zero is false. The condition tensor is often the output of one of the element-wise logical operations. + +The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors. + + + +
+ **Arguments:** + - condition: an {{MLOperand}}. The condition tensor. + - trueValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to true. + - falseValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to false. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor. +
+ +{{MLWhereSupportLimits}} has the following members: +
+ : condition + :: {{MLSupportLimits}} for condition operand. + : trueValue + :: {{MLSupportLimits}} for trueValue operand. + : falseValue + :: {{MLSupportLimits}} for falseValue operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/where()}}: +
+ : where + :: Support limits for operator {{MLGraphBuilder/where()}}. +
+ + +
+ + The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |condition|'s [=MLOperand/dataType=] is not equal to {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. + 1. If |trueValue|'s [=MLOperand/dataType=] is not equal to |falseValue|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |trueValue|'s [=MLOperand/shape=] and |falseValue|'s [=MLOperand/shape=]. + 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Set |outputShape| to the result of [=bidirectionally broadcasting=] |condition|'s [=MLOperand/shape=] and |outputShape]. + 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. + 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given |trueValue|'s [=MLOperand/dataType=] and |outputShape|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. + 1. Let |operator| be an [=operator=] for the "where" operation, given |condition|, |trueValue|, |falseValue|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |condition|, |trueValue| and |falseValue|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function where(builder, condition, trueValue, falseValue) {
+      const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
+      builder.add(
+        builder.mul(trueValue, builder.cast(c, trueValue.dataType)),
+        builder.mul(
+          falseValue, builder.cast(builder.logicalNot(c), falseValue.dataType)));
+    }
+    
+
+
+ + ### elu ### {#api-mlgraphbuilder-elu} Calculate the exponential linear unit function (ELU) on the input tensor element-wise. The calculation follows the expression `max(0, x) + alpha * (exp(min(0, x)) - 1)`. @@ -3039,6 +3284,7 @@ partial dictionary MLOpSupportLimits { 1. Return |output|. + ### gather ### {#api-mlgraphbuilder-gather} Gather values of the input tensor along an axis according to the indices. -
+{{MLGatherOptions}} has the following members: +
+ : axis + :: + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
**Arguments:** - - input: an {{MLOperand}}. The input tensor. - - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. - **Returns:** - - an {{MLOperand}}. The output tensor of the same shape as *input*. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/gelu()}}: -
- : gelu - :: Support limits for operator {{MLGraphBuilder/gelu()}}. +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand.
-
+{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+ +
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
- The gelu(|input|, |options|) method steps are: + The gather(|input|, |indices|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* - 1. Let |output| be the result of [=copying an MLOperand=] given |input|. - 1. Let |operator| be an [=operator=] for the "gelu" operation given |options|. + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
-
-
- - The behavior of this operation can be [EMULATED] - -
-    function gelu(builder, input) {
-      return builder.mul(
-        builder.mul(input, builder.constant(input.dataType, 0.5)),
-        builder.add(
-          builder.constant(input.dataType, 1),
-          builder.erf(builder.div(
-            input, builder.sqrt(builder.constant(input.dataType, 2))))));
-    }
-    
-
-
+
+
+ + Examples of how gather works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
 
-### gemm ### {#api-mlgraphbuilder-gemm}
-Calculate the [general matrix multiplication of the Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3). The calculation follows the expression `alpha * A * B + beta * C`, where `A` is a 2-D tensor with shape *[M, K]* or *[K, M]*, `B` is a 2-D tensor with shape *[K, N]* or *[N, K]*, and `C` is [=unidirectionally broadcastable=] to the shape *[M, N]*. `A` and `B` may optionally be transposed prior to the calculation.
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+### gatherNd ### {#api-mlgraphbuilder-gathernd} +!!!Gather values of the input tensor along an axis according to the indices. -{{MLGemmOptions}} has the following members: -
- : c - :: - The third input tensor. It is either a scalar, or of the shape that is [=unidirectionally broadcastable=] to the shape *[M, N]*. When it is not specified, the computation is done as if *c* is a scalar 0.0. - - : alpha - :: - A multiplier for the first input. - - : beta - :: - A multiplier for the third input {{MLGemmOptions/c}}. - - : aTranspose - :: - Indicates if the first input should be transposed prior to calculating the output. - - : bTranspose +{{MLGatherOptions}} has the following members: +
+ : axis :: - Indicates if the second input should be transposed prior to calculating the output. + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor.
-
+
**Arguments:** - - a: an {{MLOperand}}. The first input 2-D tensor with shape *[M, K]* if *aTranspose* is false, or *[K, M]* if *aTranspose* is true. - - b: an {{MLOperand}}. The second input 2-D tensor with shape *[K, N]* if *bTranspose* is false, or *[N, K]* if *bTranspose* is true. - - options: an optional {{MLGemmOptions}}. The optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output 2-D tensor of shape *[M, N]* that contains the calculated product of all the inputs. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLGemmSupportLimits}} has the following members: -
- : a - :: {{MLSupportLimits}} for a operand. - : b - :: {{MLSupportLimits}} for b operand. - : c - :: {{MLSupportLimits}} for c operand. +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. : output :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/gemm()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}:
- : gemm - :: Support limits for operator {{MLGraphBuilder/gemm()}}. + : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}.
+
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+
- The gemm(|a|, |b|, |options|) method steps are: + The gather(|input|, |indices|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |a| and |b| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. - 1. If |b|'s [=MLOperand/dataType=] is not equal to |a|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. - 1. If |a|'s [=MLOperand/rank=] is not 2 or |b|'s [=MLOperand/rank=] is not 2, then [=exception/throw=] a {{TypeError}}. - 1. Set |options|.{{MLGemmOptions/alpha}} to the result of [=casting=] |options|.{{MLGemmOptions/alpha}} to |a|'s [=MLOperand/dataType=]. - 1. Set |options|.{{MLGemmOptions/beta}} to the result of [=casting=] |options|.{{MLGemmOptions/beta}} to |a|'s [=MLOperand/dataType=]. - 1. Let |shapeA| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. - 1. Let |shapeB| be a [=list/clone=] of |b|'s [=MLOperand/shape=]. - 1. If |options|.{{MLGemmOptions/aTranspose}} is true, then reverse the order of the items in |shapeA|. - 1. If |options|.{{MLGemmOptions/bTranspose}} is true, then reverse the order of the items in |shapeB|. - 1. If |shapeA|[1] is not equal to |shapeB|[0], then [=exception/throw=] a {{TypeError}}. - 1. If |options|.{{MLGemmOptions/c}} [=map/exists=]: - 1. If it is not [=unidirectionally broadcastable=] to the shape « |shapeA|[0], |shapeB|[1] », then [=exception/throw=] a {{TypeError}}. - 1. If its [=MLOperand/dataType=] is not equal to |a|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. - 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |a|'s [=MLOperand/dataType=] and « |shapeA|[0], |shapeB|[1] ». + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. - 1. Let |operator| be an [=operator=] for the "gemm" operation, given |options|. + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/inputs=] to |a| and |b|. - 1. If |options|.{{MLGemmOptions/c}} [=map/exists=], then add it to |operator|'s [=operator/inputs=]. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
-
+
- The behavior of this operation can be [EMULATED] + Examples of how gather works in different slicing schemes.
-    function gemm(builder, a, b, options) {
-      if (options.aTranspose)
-        a = builder.transpose(a);
-
-      if (options.bTranspose)
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+ +### gelu ### {#api-mlgraphbuilder-gelu-method} +Compute the gaussian error linear unit function (GELU) of the input tensor. The calculation follows the expression `0.5 * x * (1 + erf(x / sqrt(2)))`. + + + +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/gelu()}}: +
+ : gelu + :: Support limits for operator {{MLGraphBuilder/gelu()}}. +
+ +
+ + The gelu(|input|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "gelu" operation given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function gelu(builder, input) {
+      return builder.mul(
+        builder.mul(input, builder.constant(input.dataType, 0.5)),
+        builder.add(
+          builder.constant(input.dataType, 1),
+          builder.erf(builder.div(
+            input, builder.sqrt(builder.constant(input.dataType, 2))))));
+    }
+    
+
+
+ +### gemm ### {#api-mlgraphbuilder-gemm} +Calculate the [general matrix multiplication of the Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3). The calculation follows the expression `alpha * A * B + beta * C`, where `A` is a 2-D tensor with shape *[M, K]* or *[K, M]*, `B` is a 2-D tensor with shape *[K, N]* or *[N, K]*, and `C` is [=unidirectionally broadcastable=] to the shape *[M, N]*. `A` and `B` may optionally be transposed prior to the calculation. + + + +{{MLGemmOptions}} has the following members: +
+ : c + :: + The third input tensor. It is either a scalar, or of the shape that is [=unidirectionally broadcastable=] to the shape *[M, N]*. When it is not specified, the computation is done as if *c* is a scalar 0.0. + + : alpha + :: + A multiplier for the first input. + + : beta + :: + A multiplier for the third input {{MLGemmOptions/c}}. + + : aTranspose + :: + Indicates if the first input should be transposed prior to calculating the output. + + : bTranspose + :: + Indicates if the second input should be transposed prior to calculating the output. +
+ +
+ **Arguments:** + - a: an {{MLOperand}}. The first input 2-D tensor with shape *[M, K]* if *aTranspose* is false, or *[K, M]* if *aTranspose* is true. + - b: an {{MLOperand}}. The second input 2-D tensor with shape *[K, N]* if *bTranspose* is false, or *[N, K]* if *bTranspose* is true. + - options: an optional {{MLGemmOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output 2-D tensor of shape *[M, N]* that contains the calculated product of all the inputs. +
+ +{{MLGemmSupportLimits}} has the following members: +
+ : a + :: {{MLSupportLimits}} for a operand. + : b + :: {{MLSupportLimits}} for b operand. + : c + :: {{MLSupportLimits}} for c operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/gemm()}}: +
+ : gemm + :: Support limits for operator {{MLGraphBuilder/gemm()}}. +
+ +
+ + The gemm(|a|, |b|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |a| and |b| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |a|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. If |b|'s [=MLOperand/dataType=] is not equal to |a|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. If |a|'s [=MLOperand/rank=] is not 2 or |b|'s [=MLOperand/rank=] is not 2, then [=exception/throw=] a {{TypeError}}. + 1. Set |options|.{{MLGemmOptions/alpha}} to the result of [=casting=] |options|.{{MLGemmOptions/alpha}} to |a|'s [=MLOperand/dataType=]. + 1. Set |options|.{{MLGemmOptions/beta}} to the result of [=casting=] |options|.{{MLGemmOptions/beta}} to |a|'s [=MLOperand/dataType=]. + 1. Let |shapeA| be a [=list/clone=] of |a|'s [=MLOperand/shape=]. + 1. Let |shapeB| be a [=list/clone=] of |b|'s [=MLOperand/shape=]. + 1. If |options|.{{MLGemmOptions/aTranspose}} is true, then reverse the order of the items in |shapeA|. + 1. If |options|.{{MLGemmOptions/bTranspose}} is true, then reverse the order of the items in |shapeB|. + 1. If |shapeA|[1] is not equal to |shapeB|[0], then [=exception/throw=] a {{TypeError}}. + 1. If |options|.{{MLGemmOptions/c}} [=map/exists=]: + 1. If it is not [=unidirectionally broadcastable=] to the shape « |shapeA|[0], |shapeB|[1] », then [=exception/throw=] a {{TypeError}}. + 1. If its [=MLOperand/dataType=] is not equal to |a|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |a|'s [=MLOperand/dataType=] and « |shapeA|[0], |shapeB|[1] ». + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. + 1. Let |operator| be an [=operator=] for the "gemm" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |a| and |b|. + 1. If |options|.{{MLGemmOptions/c}} [=map/exists=], then add it to |operator|'s [=operator/inputs=]. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function gemm(builder, a, b, options) {
+      if (options.aTranspose)
+        a = builder.transpose(a);
+
+      if (options.bTranspose)
         b = builder.transpose(b);
 
       let ab = builder.matmul(
@@ -5982,74 +6538,447 @@ partial dictionary MLOpSupportLimits {
     1. Return |output|.
 
-### reshape ### {#api-mlgraphbuilder-reshape-method} -Alter the shape of a tensor to a new shape. Reshape does not copy or change the content of the tensor. It just changes the tensor's logical shape for the subsequent operations. +### reshape ### {#api-mlgraphbuilder-reshape-method} +Alter the shape of a tensor to a new shape. Reshape does not copy or change the content of the tensor. It just changes the tensor's logical shape for the subsequent operations. + +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - newShape: [=sequence=]<{{unsigned long}}>. The shape of the output tensor. + The number of elements implied by *newShape* must be the same as the + number of elements in the input tensor. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output tensor. The values of the output + tensor are the same as values of the input tensor. The shape of the output + tensor is specified by the *newShape* argument. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reshape()}}: +
+ : reshape + :: Support limits for operator {{MLGraphBuilder/reshape()}}. +
+ +
+ + The reshape(|input|, |newShape|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. Let |outputShape| be an empty array of {{unsigned long}}. + 1. If |newShape|'s [=list/size=] is 0, set |outputShape| to an empty [=/list=] for a scalar. + 1. If any [=list/item=] in |newShape| is not a [=valid dimension=], then [=exception/throw=] a {{TypeError}}. + 1. Let |inputElementCount| be the product of all elements in |input|'s [=MLOperand/shape=]. Empty dimensions yield an |inputElementCount| of 1. + 1. If product of all values in |newShape| is not equal to |inputElementCount|, then [=exception/throw=] a {{TypeError}}. + 1. Let |desc| be a copy of |input|.{{MLOperand/[[descriptor]]}}. + 1. Set |desc|.{{MLOperandDescriptor/shape}} to |newShape|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. + 1. Let |operator| be an [=operator=] for the "reshape" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ + +### scatterElements ### {#api-mlgraphbuilder-scatterelements} +!!!Scatter values of the input tensor along an axis according to the indices. + + +{{MLGatherOptions}} has the following members: +
+ : axis + :: + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. +
+ +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+ +
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
+ + The gather(|input|, |indices|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + Examples of how gather works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+ + +### scatterNd ### {#api-mlgraphbuilder-scatternd} +!!!Scatter values of the input tensor along an axis according to the indices. + + +{{MLGatherOptions}} has the following members: +
+ : axis + :: + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. +
+ +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+ +
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
+ + The gather(|input|, |indices|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + Examples of how gather works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+ + +### sigmoid ### {#api-mlgraphbuilder-sigmoid-method} +Compute the sigmoid function of the input tensor. The calculation follows the expression `1 / (exp(-x) + 1)`. -
+ +
**Arguments:** - input: an {{MLOperand}}. The input tensor. - - newShape: [=sequence=]<{{unsigned long}}>. The shape of the output tensor. - The number of elements implied by *newShape* must be the same as the - number of elements in the input tensor. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output tensor. The values of the output - tensor are the same as values of the input tensor. The shape of the output - tensor is specified by the *newShape* argument. + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reshape()}}: +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/sigmoid()}}:
- : reshape - :: Support limits for operator {{MLGraphBuilder/reshape()}}. + : sigmoid + :: Support limits for operator {{MLGraphBuilder/sigmoid()}}.
- The reshape(|input|, |newShape|, |options|) method steps are: + The sigmoid(|input|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. Let |outputShape| be an empty array of {{unsigned long}}. - 1. If |newShape|'s [=list/size=] is 0, set |outputShape| to an empty [=/list=] for a scalar. - 1. If any [=list/item=] in |newShape| is not a [=valid dimension=], then [=exception/throw=] a {{TypeError}}. - 1. Let |inputElementCount| be the product of all elements in |input|'s [=MLOperand/shape=]. Empty dimensions yield an |inputElementCount| of 1. - 1. If product of all values in |newShape| is not equal to |inputElementCount|, then [=exception/throw=] a {{TypeError}}. - 1. Let |desc| be a copy of |input|.{{MLOperand/[[descriptor]]}}. - 1. Set |desc|.{{MLOperandDescriptor/shape}} to |newShape|. + 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |desc|. - 1. Let |operator| be an [=operator=] for the "reshape" operation, given |options|. + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "sigmoid" operation, given |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/input=] to |input|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
+
+
+ + The behavior of this operation can be [EMULATED] + +
+    function sigmoid(builder, input) {
+      return builder.div(
+        builder.constant(input.dataType, 1),
+        builder.add(
+          builder.exp(builder.neg(input)), builder.constant(input.dataType, 1)));
+    }
+    
+
+
+ + +### sign ### {#api-mlgraphbuilder-sign} +!!!Compute the softplus function of the input tensor. The calculation follows the expression `ln(1 + exp(x))`. -### sigmoid ### {#api-mlgraphbuilder-sigmoid-method} -Compute the sigmoid function of the input tensor. The calculation follows the expression `1 / (exp(-x) + 1)`. -
+
**Arguments:** - input: an {{MLOperand}}. The input tensor. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. @@ -6058,22 +6987,22 @@ partial dictionary MLOpSupportLimits { - an {{MLOperand}}. The output tensor of the same shape as *input*.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/sigmoid()}}: +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/softplus()}}:
- : sigmoid - :: Support limits for operator {{MLGraphBuilder/sigmoid()}}. + : softplus + :: Support limits for operator {{MLGraphBuilder/softplus()}}.
- The sigmoid(|input|, |options|) method steps are: + The softplus(|input|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* 1. Let |output| be the result of [=copying an MLOperand=] given |input|. - 1. Let |operator| be an [=operator=] for the "sigmoid" operation, given |options|. + 1. Let |operator| be an [=operator=] for the "softplus" operation and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/input=] to |input|. 1. Set |operator|'s [=operator/output=] to |output|. @@ -6086,16 +7015,15 @@ partial dictionary MLOpSupportLimits { The behavior of this operation can be [EMULATED]
-    function sigmoid(builder, input) {
-      return builder.div(
-        builder.constant(input.dataType, 1),
-        builder.add(
-          builder.exp(builder.neg(input)), builder.constant(input.dataType, 1)));
+    function softplus(builder, input) {
+      return builder.log(
+        builder.add(builder.exp(input), builder.constant(input.dataType, 1)));
     }
     
+ ### slice ### {#api-mlgraphbuilder-slice} Produce a slice of the input tensor. + +{{MLGatherOptions}} has the following members: +
+ : axis + :: + The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. +
+ +{{MLGatherSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+ +
+ The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. +
+ +
+ + The gather(|input|, |indices|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. + 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. + 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. + 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. + 1. Let |dimCount| be zero. + 1. Let |rankOutput| be zero. + 1. Let |shapeOutput| be an empty list. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is equal to |axis| then [=iteration/break=]. + 1. Set |shapeOutput|[|dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeIndices|: + 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. + 1. Increment |dimCount| by one. + 1. Set |rankOutput| to |rankOutput| + |dimCount|. + 1. Let |dimCount| be zero. + 1. [=list/For each=] |size| of |shapeInput|: + 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. + 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. + 1. Increment |dimCount| by one. + 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. *Make graph connections:* + 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. + 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + Examples of how gather works in different slicing schemes. + +
+    // input of shape [4,3]:
+    //   [[ 0,  1,  2],
+    //    [10, 11, 12],
+    //    [20, 21, 22],
+    //    [30, 31, 32]]
+    const input = builder.constant(
+      {shape: [4, 3]},
+      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
+
+    const indices1 = builder.constant(
+      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
+
+    const indices2 = builder.constant(
+      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
+
+    const indices3 = builder.constant(
+      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
+
+    // axis = 0 (default)
+    // indices of shape [2]:
+    //   [3,1]
+    // output of shape [2,3]:
+    //   [[30, 31, 32],
+    //    [10, 11, 12]]
+    const output1 = builder.gather(input, indices1);
+
+    // axis = 1
+    // indices of shape [3]:
+    //   [2,1,1]
+    // output of shape [4,3]:
+    //   [[ 2,  1,  1],
+    //    [12, 11, 11],
+    //    [22, 21, 21],
+    //    [32, 31, 31]]
+    const output2 = builder.gather(input, indices2, {axis: 1});
+
+    // axis = 1
+    // indices of shape [2,2]:
+    //   [[0, 1],
+    //    [1, 2]]
+    // output of shape [4,2,2]:
+    //   [[[ 0,  1], [ 1,  2]],
+    //    [[10, 11], [11, 12]],
+    //    [[20, 21], [21, 22]],
+    //    [[30, 31], [31, 32]]]
+    const output3 = builder.gather(input, indices3, {axis: 1});
+  
+
+
+ ### transpose ### {#api-mlgraphbuilder-transpose} Permute the dimensions of the input tensor according to the *permutation* argument. + +{{MLConvTranspose2dOptions}} has the following members: +
+ : exclusive + :: + Whether to include or exclude the current value in the output, meaning inclusive presum addition (see https://en.wikipedia.org/wiki/Prefix_sum) or exclusive post-sum addition. Given input [1,2,3,4], inclusive addition would yield an output of [1,3,6,10] whereas exclusive would yield [0,1,3,6]. The default is inclusive. + + : reversed + :: + Whether to reverse the summation direction along the active axis to instead start from the high coordinate to low coordinate. Given input *[1,2,3,4]*, inclusive forward addition would yield an output of *[1,3,6,10]* whereas backward summation would yield *[10,9,7,4]*. The default is exclusive. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - options: an {{MLCumulativeSumOptions}}. Specifies the optional parameters of the operation. + + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*. +
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/cumulativeSum()}}: +
+ : cumulativeSum + :: Support limits for operator {{MLGraphBuilder/cumulativeSum()}}. +
+ +
+ + The cumulativeSum(|input|, |options|) method steps are: + + 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "softplus" operation and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
+ +
+
+ + The behavior of this operation can be [EMULATED] + +
+    function softplus(builder, input) {
+      return builder.log(
+        builder.add(builder.exp(input), builder.constant(input.dataType, 1)));
+    }
+    
+
+
+ + ### Element-wise binary operations ### {#api-mlgraphbuilder-binary} Compute the element-wise binary addition, subtraction, multiplication, division, power, maximum and minimum of the two input tensors. @@ -3443,24 +3520,14 @@ partial dictionary MLOpSupportLimits { ### gatherElements ### {#api-mlgraphbuilder-gatherelements} !!!Gather values of the input tensor along an axis according to the indices. @@ -3598,57 +3665,30 @@ partial dictionary MLOpSupportLimits { ### gatherNd ### {#api-mlgraphbuilder-gathernd} !!!Gather values of the input tensor along an axis according to the indices. -{{MLGatherOptions}} has the following members: -
- : axis - :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. -
-
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLGatherSupportLimits}} has the following members: -
- : input - :: {{MLSupportLimits}} for input operand. - : indices - :: {{MLSupportLimits}} for indices operand. - : output - :: {{MLSupportLimits}} for output operand. -
- -{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherNd()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : gatherNd + :: Support limits for operator {{MLGraphBuilder/gatherNd()}}.
From 9d0d2319eccf49ed1c994c8facb653f8625fc579 Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Wed, 15 Jan 2025 21:09:08 -0800 Subject: [PATCH 3/6] Update more operators --- index.bs | 641 +++++++++++++++++++++++++++---------------------------- 1 file changed, 316 insertions(+), 325 deletions(-) diff --git a/index.bs b/index.bs index 7b0c2312..b7e8a253 100644 --- a/index.bs +++ b/index.bs @@ -2919,7 +2919,9 @@ partial dictionary MLOpSupportLimits { 1. Return |output|.
-### cumulativeSum ### {#api-mlgraphbuilder-sign} + +### cumulativeSum ### {#api-mlgraphbuilder-cumulativesum} +!!! Compute the accumulated sum of a series of values along the given axis, either including or excluding the current value. -{{MLConvTranspose2dOptions}} has the following members: + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/cumulativeSum()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ +{{MLCumulativeSumOptions}} has the following members:
: exclusive :: - Whether to include or exclude the current value in the output, meaning inclusive presum addition (see https://en.wikipedia.org/wiki/Prefix_sum) or exclusive post-sum addition. Given input [1,2,3,4], inclusive addition would yield an output of [1,3,6,10] whereas exclusive would yield [0,1,3,6]. The default is inclusive. + Whether to include or exclude the current value in the output, meaning inclusive presum addition (see https://en.wikipedia.org/wiki/Prefix_sum) or exclusive post-sum addition. Given input *[1,2,3,4]*, inclusive addition would yield an output of *[1,3,6,10]* whereas exclusive would yield *[0,1,3,6]*. The default is inclusive. : reversed :: @@ -2953,6 +2977,7 @@ partial dictionary MLOpSupportLimits {
**Arguments:** - input: an {{MLOperand}}. The input tensor. + - axis: an {{unsigned long}} scalar. The dimension the reduction will be performed on. - options: an {{MLCumulativeSumOptions}}. Specifies the optional parameters of the operation. **Returns:** @@ -2969,32 +2994,19 @@ partial dictionary MLOpSupportLimits { The cumulativeSum(|input|, |options|) method steps are: - 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-cumulativesum)), then [=exception/throw=] a {{TypeError}}. + 1. If |axis| is greater than or equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* 1. Let |output| be the result of [=copying an MLOperand=] given |input|. - 1. Let |operator| be an [=operator=] for the "softplus" operation and |options|. + 1. Let |operator| be an [=operator=] for the "cumulativeSum" operation and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/input=] to |input|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|. -
-
- - The behavior of this operation can be [EMULATED] - -
-    function softplus(builder, input) {
-      return builder.log(
-        builder.add(builder.exp(input), builder.constant(input.dataType, 1)));
-    }
-    
-
-
- ### Element-wise binary operations ### {#api-mlgraphbuilder-binary} Compute the element-wise binary addition, subtraction, multiplication, division, power, maximum and minimum of the two input tensors. @@ -3416,6 +3428,7 @@ partial interface MLGraphBuilder { MLOperand neg(MLOperand input, optional MLOperatorOptions options = {}); MLOperand reciprocal(MLOperand input, optional MLOperatorOptions options = {}); MLOperand sin(MLOperand input, optional MLOperatorOptions options = {}); + MLOperand sign(MLOperand input, optional MLOperatorOptions options = {}); MLOperand sqrt(MLOperand input, optional MLOperatorOptions options = {}); MLOperand tan(MLOperand input, optional MLOperatorOptions options = {}); }; @@ -3432,12 +3445,13 @@ partial dictionary MLOpSupportLimits { MLSingleInputSupportLimits neg; MLSingleInputSupportLimits reciprocal; MLSingleInputSupportLimits sin; + MLSingleInputSupportLimits sign; MLSingleInputSupportLimits sqrt; MLSingleInputSupportLimits tan; }; -
+
**Arguments:** - input: an {{MLOperand}}. The input tensor. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. @@ -3447,7 +3461,7 @@ partial dictionary MLOpSupportLimits { tensor is the same as the shape of input tensor.
- +
@@ -3494,6 +3508,8 @@ partial dictionary MLOpSupportLimits { : sin :: Support limits for operator {{MLGraphBuilder/sin()}}. : sqrt + :: Support limits for operator {{MLGraphBuilder/sign()}}. + : sqrt :: Support limits for operator {{MLGraphBuilder/sqrt()}}. : tan :: Support limits for operator {{MLGraphBuilder/tan()}}. @@ -3512,6 +3528,7 @@ partial dictionary MLOpSupportLimits { - *neg*: Compute the numerical negative value of the input tensor, element-wise. - *reciprocal*: Compute the reciprocal of the input tensor, element-wise. - *sin*: Compute the sine of the input tensor, element-wise. + - *sign*: Compute the sign (-1, 0, 1) of the input tensor, element-wise, returning 1 if > 0, -1 if < 0, and 0 otherwise. - *sqrt*: Compute the square root of the input tensor, element-wise. - *tan*: Compute the tangent of the input tensor, element-wise. @@ -3520,7 +3537,7 @@ partial dictionary MLOpSupportLimits { To create element-wise unary operation given [=string=] |op|, {{MLOperand}} |input|, optional [=/list=] |allowedDataTypes|, and |options|, run the following steps: - 1. [=Assert=]: |op| is one of "abs", "ceil", "cos", "erf", "exp", "floor", "identity", "log", "neg", "reciprocal", "sin", "sqrt", "tan". + 1. [=Assert=]: |op| is one of "abs", "ceil", "cos", "erf", "exp", "floor", "identity", "log", "neg", "reciprocal", "sin", "sign", "sqrt", "tan". 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |allowedDataTypes| is given and it does not [=list/contain=] |input|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. @@ -3614,6 +3631,13 @@ partial dictionary MLOpSupportLimits { 1. Return |output|. +
+ The sign(|input|, |options|) method steps are: + 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-unary-op | create element-wise unary operation=] given "sign", |input|, signed types « {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}}, {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"int8"}} », and |options|. + 1. If that [=exception/throws=] an error, then re-[=exception/throw=] the error. + 1. Return |output|. +
+
The sqrt(|input|, |options|) method steps are: 1. Let |output| be the result of running the [=MLGraphBuilder/element-wise-unary-op | create element-wise unary operation=] given "sqrt", |input|, « {{MLOperandDataType/"float32"}}, {{MLOperandDataType/"float16"}} », and |options|. @@ -3629,65 +3653,88 @@ partial dictionary MLOpSupportLimits {
+
+
+ + The behavior of the {{MLGraphBuilder/sign()}} operation can be [EMULATED] + +
+    function sign(builder, input, options) {
+      let zero = builder.constant(input.dataType, 0);
+      let positiveOne = builder.constant(input.dataType, 1);
+      let negativeOne = builder.constant(input.dataType, -1);
+
+      return builder.where(
+        builder.greater(input, zero),
+        positiveOne,
+        builder.where(
+          builder.lesser(input, zero),
+          negativeOne,
+          positiveOne));
+    }
+    
+
+
### dequantizeLinear ### {#api-mlgraphbuilder-dequantizelinear} -!!!Select the values from the trueValue or the falseValue tensor depending on the corresponding values of the condition tensor, where non-zero is true and zero is false. The condition tensor is often the output of one of the element-wise logical operations. +!!! +Dequantizes an integer tensor to floating point space using the scale and zero-point bias. The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors. -
+
**Arguments:** - - condition: an {{MLOperand}}. The condition tensor. - - trueValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to true. - - falseValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to false. + - input: an {{MLOperand}}. The condition tensor. + - scale: an {{MLOperand}}. ---- + - zeroPoint: an {{MLOperand}}. ---- - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor.
-{{MLWhereSupportLimits}} has the following members: -
- : condition - :: {{MLSupportLimits}} for condition operand. - : trueValue - :: {{MLSupportLimits}} for trueValue operand. - : falseValue - :: {{MLSupportLimits}} for falseValue operand. +{{MLQuantizationSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : scale + :: {{MLSupportLimits}} for scale operand. + : zeroPoint + :: {{MLSupportLimits}} for zeroPoint operand. : output :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/where()}}: +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/dequantizeLinear()}}:
- : where - :: Support limits for operator {{MLGraphBuilder/where()}}. + : dequantizeLinear + :: Support limits for operator {{MLGraphBuilder/dequantizeLinear()}}.
- The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: + The where(|input|, |scale|, |zeroPoint|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. @@ -3713,6 +3760,7 @@ partial dictionary MLOpSupportLimits { The behavior of this operation can be [EMULATED]
+    ----
     function where(builder, condition, trueValue, falseValue) {
       const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
       builder.add(
@@ -3726,62 +3774,63 @@ partial dictionary MLOpSupportLimits {
 
 
 ### quantizeLinear ### {#api-mlgraphbuilder-quantizelinear}
-!!!Select the values from the trueValue or the falseValue tensor depending on the corresponding values of the condition tensor, where non-zero is true and zero is false. The condition tensor is often the output of one of the element-wise logical operations.
+!!!
+Quantizes a floating point tensor to integer point space using the scale and zero-point bias.
 
 The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors.
 
 
 
-
+
**Arguments:** - - condition: an {{MLOperand}}. The condition tensor. - - trueValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to true. - - falseValue: an {{MLOperand}}. The tensor from which the value is selected when the condition of the corresponding element is set to false. + - input: an {{MLOperand}}. The condition tensor. + - scale: an {{MLOperand}}. !!! + - zeroPoint: an {{MLOperand}}. !!! - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor. + **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from ------.
-{{MLWhereSupportLimits}} has the following members: -
- : condition - :: {{MLSupportLimits}} for condition operand. - : trueValue - :: {{MLSupportLimits}} for trueValue operand. - : falseValue - :: {{MLSupportLimits}} for falseValue operand. +{{MLQuantizationSupportLimits}} has the following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : scale + :: {{MLSupportLimits}} for scale operand. + : zeroPoint + :: {{MLSupportLimits}} for zeroPoint operand. : output :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/where()}}: +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/quantizeLinear()}}:
- : where - :: Support limits for operator {{MLGraphBuilder/where()}}. + : quantizeLinear + :: Support limits for operator {{MLGraphBuilder/quantizeLinear()}}.
- The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: + The quantizeLinear(|input|, |scale|, |zeroPoint|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. @@ -3807,6 +3856,7 @@ partial dictionary MLOpSupportLimits { The behavior of this operation can be [EMULATED]
+    ------
     function where(builder, condition, trueValue, falseValue) {
       const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
       builder.add(
@@ -4164,7 +4214,9 @@ partial dictionary MLOpSupportLimits {
 
### gatherElements ### {#api-mlgraphbuilder-gatherelements} -!!!Gather values of the input tensor along an axis according to the indices. +!!! +Gather values of the input tensor along an axis according to the indices. + + +{{MLLayerNormalizationOptions}} has the following members: +
+ : axes + :: + The indices to the input dimensions to reverse. When this member is not present, it is treated as if all dimensions are reversed. If explicitly passed as empty, no dimensions are reversed. +
+ +
+ **Arguments:** + - input: an {{MLOperand}}. The input tensor. + - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + + **Returns:** + - an {{MLOperand}}. The output tensor of the same shape as *input*. +
+ +
Constraints for element-wise unary options
+ + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/reverse()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ +{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/reverse()}}: +
+ : reverse + :: Support limits for operator {{MLGraphBuilder/reverse()}}. +
+ +
+ + The reverse(|input|, |options|) method steps are: + + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + 1. If |input|'s [=MLOperand/dataType=] is not one of its [=/allowed data types=] (according to [this table](#constraints-reverse)), then [=exception/throw=] a {{TypeError}}. + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "reverse" operation and |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. +
### scatterElements ### {#api-mlgraphbuilder-scatterelements} -!!!Scatter values of the input tensor along an axis according to the indices. +!!! +Scatter values of the input tensor along an axis according to the indices. + -{{MLGatherOptions}} has the following members: -
+{{MLScatterOptions}} has the following members: +
: axis :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. + The axis along which the scattered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor.
-
+
**Arguments:** - - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from which the values are scattered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - updates: an {{MLOperand}}. !!!! + - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLGatherSupportLimits}} has the following members: -
+{{MLScatterSupportLimits}} has the following members: +
: input :: {{MLSupportLimits}} for input operand. : indices @@ -7903,26 +8038,26 @@ partial dictionary MLOpSupportLimits { :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterElements()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : scatterElements + :: Support limits for operator {{MLGraphBuilder/scatterElements()}}.
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterElements(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The gather(|input|, |indices|, |options|) method steps are: + The scatterElements(|input|, |indices|, |updates|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. - 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. Let |axis| be |options|.{{MLScatterOptions/axis}}. 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. 1. Let |dimCount| be zero. 1. Let |rankOutput| be zero. @@ -7945,7 +8080,7 @@ partial dictionary MLOpSupportLimits { 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. - 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Let |operator| be an [=operator=] for the "scatterElements" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. @@ -7955,7 +8090,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how gather works in different slicing schemes. + Examples of how scatterElements works in different slicing schemes.
     // input of shape [4,3]:
@@ -7982,7 +8117,7 @@ partial dictionary MLOpSupportLimits {
     // output of shape [2,3]:
     //   [[30, 31, 32],
     //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
+    const output1 = builder.scatterElements(input, indices1, updates);
 
     // axis = 1
     // indices of shape [3]:
@@ -7992,7 +8127,7 @@ partial dictionary MLOpSupportLimits {
     //    [12, 11, 11],
     //    [22, 21, 21],
     //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
+    const output2 = builder.scatterElements(input, indices2, updates, {axis: 1});
 
     // axis = 1
     // indices of shape [2,2]:
@@ -8003,54 +8138,57 @@ partial dictionary MLOpSupportLimits {
     //    [[10, 11], [11, 12]],
     //    [[20, 21], [21, 22]],
     //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    const output3 = builder.scatterElements(input, indices3, updates, {axis: 1});
   
### scatterNd ### {#api-mlgraphbuilder-scatternd} -!!!Scatter values of the input tensor along an axis according to the indices. +!!! +Scatter values of the input tensor along an axis according to the indices. + -{{MLGatherOptions}} has the following members: -
+{{MLScatterOptions}} has the following members: +
: axis :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. + The axis along which the scattered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor.
-
+
**Arguments:** - - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor from which the values are scattered. + - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
-{{MLGatherSupportLimits}} has the following members: -
+{{MLScatterSupportLimits}} has the following members: +
: input :: {{MLSupportLimits}} for input operand. : indices @@ -8059,26 +8197,26 @@ partial dictionary MLOpSupportLimits { :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterNd()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : scatterNd + :: Support limits for operator {{MLGraphBuilder/scatterNd()}}.
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The gather(|input|, |indices|, |options|) method steps are: + The scatterNd(|input|, |indices|, |updates|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. - 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. + 1. Let |axis| be |options|.{{MLScatterOptions/axis}}. 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. 1. Let |dimCount| be zero. 1. Let |rankOutput| be zero. @@ -8101,7 +8239,7 @@ partial dictionary MLOpSupportLimits { 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. 1. *Make graph connections:* 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. - 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Let |operator| be an [=operator=] for the "scatterNd" operation, given |input|, |indices|, and |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. 1. Set |operator|'s [=operator/output=] to |output|. @@ -8111,7 +8249,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how gather works in different slicing schemes. + Examples of how scatterNd works in different slicing schemes.
     // input of shape [4,3]:
@@ -8138,7 +8276,7 @@ partial dictionary MLOpSupportLimits {
     // output of shape [2,3]:
     //   [[30, 31, 32],
     //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
+    const output1 = builder.scatterNd(input, indices1, updates);
 
     // axis = 1
     // indices of shape [3]:
@@ -8148,7 +8286,7 @@ partial dictionary MLOpSupportLimits {
     //    [12, 11, 11],
     //    [22, 21, 21],
     //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
+    const output2 = builder.scatterNd(input, indices2, updates, {axis: 1});
 
     // axis = 1
     // indices of shape [2,2]:
@@ -8159,7 +8297,7 @@ partial dictionary MLOpSupportLimits {
     //    [[10, 11], [11, 12]],
     //    [[20, 21], [21, 22]],
     //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    const output3 = builder.scatterNd(input, indices3, updates, {axis: 1});
   
@@ -8245,86 +8383,42 @@ partial dictionary MLOpSupportLimits {
- -### sign ### {#api-mlgraphbuilder-sign} -!!!Compute the softplus function of the input tensor. The calculation follows the expression `ln(1 + exp(x))`. - - - -
- **Arguments:** - - input: an {{MLOperand}}. The input tensor. - - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. - - **Returns:** - - an {{MLOperand}}. The output tensor of the same shape as *input*. -
- -{{MLOpSupportLimits}} has the following member for {{MLGraphBuilder/softplus()}}: -
- : softplus - :: Support limits for operator {{MLGraphBuilder/softplus()}}. -
- -
- - The softplus(|input|, |options|) method steps are: - - 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |input|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"float32"}} or {{MLOperandDataType/"float16"}}, then [=exception/throw=] a {{TypeError}}. - 1. *Make graph connections:* - 1. Let |output| be the result of [=copying an MLOperand=] given |input|. - 1. Let |operator| be an [=operator=] for the "softplus" operation and |options|. - 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/input=] to |input|. - 1. Set |operator|'s [=operator/output=] to |output|. - 1. Return |output|. -
- -
-
- - The behavior of this operation can be [EMULATED] - -
-    function softplus(builder, input) {
-      return builder.log(
-        builder.add(builder.exp(input), builder.constant(input.dataType, 1)));
-    }
-    
-
-
- - ### slice ### {#api-mlgraphbuilder-slice} Produce a slice of the input tensor. + +{{MLSliceOptions}} has the following members: +
+ : strides + :: + The stride to step over each input along each axis. + The length of the strides array must equal the [=MLOperand/rank=] of the input tensor. + The the default is an array of length [=MLOperand/rank=] consisting of all 1's. + e.g. [1,1,1] for a 3-D tensor. + Strides must be greater than zero. +
+
**Arguments:** - input: an {{MLOperand}}. The input tensor. - starts: [=sequence=]<{{unsigned long}}>. The starting index to slice of each input dimension, of length N where N is the [=MLOperand/rank=] of the input tensor. For each dimension *d* of *input*, *starts[d]* indicates the starting index to slice in that dimension. The starting index must be in the range [0, input size - 1] in that dimension. - sizes: [=sequence=]<{{unsigned long}}>. The number of elements to slice of each input dimension, of length N where N is the [=MLOperand/rank=] of the input tensor. For each dimension *d* of *input*, *sizes[d]* indicates the number of elements to slice in that dimension. The size must not be 0 and must satisfy the constraint `starting index + size <= input size` in that dimension. - - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. + - options: an {{MLSliceOptions}}. Specifies the optional parameters of the operation. **Returns:** an {{MLOperand}}. The output tensor of the same rank as the input tensor with tensor values stripped to the specified starting and ending indices in each dimension.
@@ -8371,6 +8465,7 @@ partial dictionary MLOpSupportLimits { 1. If |starts|[|index|] is greater than or equal to |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}. 1. If |starts|[|index|] + |sizes|[|index|] is greater than |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}. + 1. !!!!----- 1. *Make graph connections:* 1. Let |output| be the result of [=copying an MLOperand=] given |input|. 1. Let |operator| be an [=operator=] for the "slice" operation, given |starts|, |sizes|, and |options|. @@ -8847,160 +8942,56 @@ partial dictionary MLOpSupportLimits {
### tile ### {#api-mlgraphbuilder-tile} -!!!Gather values of the input tensor along an axis according to the indices. - -{{MLGatherOptions}} has the following members: -
- : axis - :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. -
- -
+
**Arguments:** - - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. + - input: an {{MLOperand}}. The input N-D tensor. + - repetitions: A count per each dimension of how many times to repeat that dimension. The repetitions count must match the input rank, using 1's for any axis that should retain the same size. + - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. + **Returns:** an {{MLOperand}}. The permuted or transposed N-D tensor.
-{{MLGatherSupportLimits}} has the following members: -
- : input - :: {{MLSupportLimits}} for input operand. - : indices - :: {{MLSupportLimits}} for indices operand. - : output - :: {{MLSupportLimits}} for output operand. -
- -{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/tile()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : tile + :: Support limits for operator {{MLGraphBuilder/tile()}}.
-
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. -
-
- The gather(|input|, |indices|, |options|) method steps are: + The tile(|input|, |repetitions|, |options|) method steps are: - 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input| and |indices| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |indices|'s [=MLOperand/dataType=] is not {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, then [=exception/throw=] a {{TypeError}}. - 1. Let |shapeInput| be |input|'s [=MLOperand/shape=] and |rankInput| be |shapeInput|'s [=MLOperand/rank=]. - 1. Let |shapeIndices| be |indices|'s [=MLOperand/shape=]. - 1. Let |axis| be |options|.{{MLGatherOptions/axis}}. - 1. If |axis| is greater than or equal to |rankInput|, then [=exception/throw=] a {{TypeError}}. - 1. Let |dimCount| be zero. - 1. Let |rankOutput| be zero. - 1. Let |shapeOutput| be an empty list. - 1. [=list/For each=] |size| of |shapeInput|: - 1. If |dimCount| is equal to |axis| then [=iteration/break=]. - 1. Set |shapeOutput|[|dimCount|] to |size|. - 1. Increment |dimCount| by one. - 1. Set |rankOutput| to |dimCount|. - 1. Let |dimCount| be zero. - 1. [=list/For each=] |size| of |shapeIndices|: - 1. Set |shapeOutput|[|rankOutput| + |dimCount|] to |size|. - 1. Increment |dimCount| by one. - 1. Set |rankOutput| to |rankOutput| + |dimCount|. - 1. Let |dimCount| be zero. - 1. [=list/For each=] |size| of |shapeInput|: - 1. If |dimCount| is less than or equal to |axis| then [=iteration/continue=]. - 1. Set |shapeOutput|[|rankOutput| + |dimCount| - |axis| - 1] to |size|. - 1. Increment |dimCount| by one. - 1. Let |desc| be the result of [=creating an MLOperandDescriptor=] given |input|'s [=MLOperand/dataType=] and |shapeOutput|. + 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. + !!!1. If |options|.{{MLTransposeOptions/permutation}} does not [=map/exist=], let |options|.{{MLTransposeOptions/permutation}} be the reversed sequence of all indices for |input|'s [=MLOperand/shape=]. + 1. Otherwise if |options|.{{MLTransposeOptions/permutation}} [=map/exists=]: + 1. If its [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If its values are not in [=the range=] 0 to |input|'s [=MLOperand/rank=] exclusive, then [=exception/throw=] a {{TypeError}}. + 1. If it contains duplicate values, then [=exception/throw=] a {{TypeError}}. 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given |desc|. - 1. Let |operator| be an [=operator=] for the "gather" operation, given |input|, |indices|, and |options|. + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "transpose" operation, given |options|. 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/inputs=] to |input| and |indices|. + 1. Set |operator|'s [=operator/input=] to |input|. 1. Set |operator|'s [=operator/output=] to |output|. 1. Return |output|.
-
-
- - Examples of how gather works in different slicing schemes. - -
-    // input of shape [4,3]:
-    //   [[ 0,  1,  2],
-    //    [10, 11, 12],
-    //    [20, 21, 22],
-    //    [30, 31, 32]]
-    const input = builder.constant(
-      {shape: [4, 3]},
-      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
-
-    const indices1 = builder.constant(
-      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
-
-    const indices2 = builder.constant(
-      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
-
-    const indices3 = builder.constant(
-      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
-
-    // axis = 0 (default)
-    // indices of shape [2]:
-    //   [3,1]
-    // output of shape [2,3]:
-    //   [[30, 31, 32],
-    //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
-
-    // axis = 1
-    // indices of shape [3]:
-    //   [2,1,1]
-    // output of shape [4,3]:
-    //   [[ 2,  1,  1],
-    //    [12, 11, 11],
-    //    [22, 21, 21],
-    //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
-
-    // axis = 1
-    // indices of shape [2,2]:
-    //   [[0, 1],
-    //    [1, 2]]
-    // output of shape [4,2,2]:
-    //   [[[ 0,  1], [ 1,  2]],
-    //    [[10, 11], [11, 12]],
-    //    [[20, 21], [21, 22]],
-    //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
-  
-
-
- ### transpose ### {#api-mlgraphbuilder-transpose} Permute the dimensions of the input tensor according to the *permutation* argument. - +
@@ -3307,12 +3306,12 @@ partial dictionary MLOpSupportLimits { - *lesserOrEqual*: Compare if the values of the first input tensor is lesser or equal, element-wise. - *logicalNot*: Invert the values of the input tensor to values 0 or 1, element-wise. Specifically, when the input value is non-zero, invert it to 0. Conversely, for a zero input value, invert it to 1. - *logicalAnd*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. - - *logicalOr*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. - - *logicalXor*: Compute the logical *and* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalOr*: Compute the logical *or* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1. + - *logicalXor*: Compute the logical *xor* operator, element-wise, treating any non-zero value as true and returning elements of 0 or 1.
-Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/lesserOrEqual()}} can each be implemented in terms of operations {{MLGraphBuilder/logicalNot()}}, {{MLGraphBuilder/lesser()}}, and {{MLGraphBuilder/greater()}} in other words `builder.greaterOrEqual(a, b)` is `builder.logicalNot(builder.lesser(a, b))`, they are specifically defined to handle NaN cases and for performance reason to avoid double comparisons. +Although operations {{MLGraphBuilder/greaterOrEqual()}} and {{MLGraphBuilder/lesserOrEqual()}} can each be implemented in terms of operations {{MLGraphBuilder/logicalNot()}}, {{MLGraphBuilder/lesser()}}, and {{MLGraphBuilder/greater()}} (in other words `builder.greaterOrEqual(a, b)` is `builder.logicalNot(builder.lesser(a, b))`), they are specifically defined to handle NaN cases and for performance reason to avoid double comparisons.
@@ -3678,10 +3677,9 @@ partial dictionary MLOpSupportLimits { ### dequantizeLinear ### {#api-mlgraphbuilder-dequantizelinear} -!!! -Dequantizes an integer tensor to floating point space using the scale and zero-point bias. +Dequantizes an integer tensor to floating point space using the scale and zero-point bias, where `output = (input - zeroPoint) * scale`. -The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors. +The operation will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The input tensors must be [=bidirectionally broadcastable=]. The [=MLOperand/rank=] of the output tensor is the maximum [=MLOperand/rank=] of the input tensors. For each dimension of the output tensor, its size is the maximum size along that dimension of the input tensors, and each dimension must be blockwise compatible with the output (e.g. given an input shape [12], scales of the following shapes are blockwise compatible {[1], [3], [4], [6], [12]} as they are all multiples of the input dimensions, but a shape of [5] would not be). -
+
**Arguments:** - input: an {{MLOperand}}. The condition tensor. - - scale: an {{MLOperand}}. !!! - - zeroPoint: an {{MLOperand}}. !!! + - scale: an {{MLOperand}}. The scale tensor to multiply each input value by after adjusting by the zero point. + - zeroPoint: an {{MLOperand}}. The zero point tensor to subtract from each input value. - options: an {{MLOperatorOptions}}. Specifies the optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from ------. + **Returns:** an {{MLOperand}}. The output tensor that contains the values computed from the quantization.
{{MLQuantizationSupportLimits}} has the following members: @@ -3830,24 +3806,11 @@ partial dictionary MLOpSupportLimits {
- The quantizeLinear(|input|, |scale|, |zeroPoint|, |options|) method steps are: + The where(|input|, |scale|, |zeroPoint|, |options|) method steps are: 1. If [=this=].{{MLGraphBuilder/[[hasBuilt]]}} is true, then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. - 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |condition|, |trueValue|, and |falseValue| returns false, then [=exception/throw=] a {{TypeError}}. - 1. If |condition|'s [=MLOperand/dataType=] is not equal to {{MLOperandDataType/"uint8"}}, then [=exception/throw=] a {{TypeError}}. - 1. If |trueValue|'s [=MLOperand/dataType=] is not equal to |falseValue|'s [=MLOperand/dataType=], then [=exception/throw=] a {{TypeError}}. - 1. Let |outputShape| be the result of [=bidirectionally broadcasting=] |trueValue|'s [=MLOperand/shape=] and |falseValue|'s [=MLOperand/shape=]. - 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. - 1. Set |outputShape| to the result of [=bidirectionally broadcasting=] |condition|'s [=MLOperand/shape=] and |outputShape]. - 1. If that returns failure, then [=exception/throw=] a {{TypeError}}. - 1. Let |descriptor| be the result of [=creating an MLOperandDescriptor=] given |trueValue|'s [=MLOperand/dataType=] and |outputShape|. - 1. *Make graph connections:* - 1. Let |output| be the result of [=creating an MLOperand=] given [=this=] and |descriptor|. - 1. Let |operator| be an [=operator=] for the "where" operation, given |condition|, |trueValue|, |falseValue|, and |options|. - 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. - 1. Set |operator|'s [=operator/inputs=] to |condition|, |trueValue| and |falseValue|. - 1. Set |operator|'s [=operator/output=] to |output|. - 1. Return |output|. + 1. If [=MLGraphBuilder/validating operand=] with [=this=] and any of |input|, |scale|, and |zeroPoint| returns false, then [=exception/throw=] a {{TypeError}}. + TODO: Add validation for scale and zero point shape.
@@ -3856,14 +3819,7 @@ partial dictionary MLOpSupportLimits { The behavior of this operation can be [EMULATED]
-    ------
-    function where(builder, condition, trueValue, falseValue) {
-      const c = builder.clamp(condition, {'minValue': 0, 'maxValue': 1});
-      builder.add(
-        builder.mul(trueValue, builder.cast(c, trueValue.dataType)),
-        builder.mul(
-          falseValue, builder.cast(builder.logicalNot(c), falseValue.dataType)));
-    }
+    TODO:
     
@@ -4214,7 +4170,6 @@ partial dictionary MLOpSupportLimits { ### gatherElements ### {#api-mlgraphbuilder-gatherelements} -!!! Gather values of the input tensor along an axis according to the indices. {{MLLayerNormalizationOptions}} has the following members: -
+
: axes :: The indices to the input dimensions to reverse. When this member is not present, it is treated as if all dimensions are reversed. If explicitly passed as empty, no dimensions are reversed. @@ -7985,7 +7874,6 @@ partial dictionary MLOpSupportLimits { ### scatterElements ### {#api-mlgraphbuilder-scatterelements} -!!! Scatter values of the input tensor along an axis according to the indices. -{{MLGatherOptions}} has the following members: -
- : axis - :: - The axis along which the gathered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. -
- -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - options: an optional {{MLGatherOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*.
{{MLGatherSupportLimits}} has the following members: @@ -4210,14 +4264,40 @@ partial dictionary MLOpSupportLimits { :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gather()}}: +
Constraints for {{MLGraphBuilder/cumulativeSum()}}
+ + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/gatherElements()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=]> 1
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}*input*'s [=MLOperand/rank=]
*output*[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=]
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherElements()}}:
- : gather - :: Support limits for operator {{MLGraphBuilder/gather()}}. + : gatherElements + :: Support limits for operator {{MLGraphBuilder/gatherElements()}}.
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/gatherElements(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherElements()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
@@ -4230,60 +4310,15 @@ partial dictionary MLOpSupportLimits {
- Examples of how gather works in different slicing schemes. + Examples of how gatherElements works in different slicing schemes.
-    // input of shape [4,3]:
-    //   [[ 0,  1,  2],
-    //    [10, 11, 12],
-    //    [20, 21, 22],
-    //    [30, 31, 32]]
-    const input = builder.constant(
-      {shape: [4, 3]},
-      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
-
-    const indices1 = builder.constant(
-      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
-
-    const indices2 = builder.constant(
-      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
-
-    const indices3 = builder.constant(
-      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
-
-    // axis = 0 (default)
-    // indices of shape [2]:
-    //   [3,1]
-    // output of shape [2,3]:
-    //   [[30, 31, 32],
-    //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
-
-    // axis = 1
-    // indices of shape [3]:
-    //   [2,1,1]
-    // output of shape [4,3]:
-    //   [[ 2,  1,  1],
-    //    [12, 11, 11],
-    //    [22, 21, 21],
-    //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
-
-    // axis = 1
-    // indices of shape [2,2]:
-    //   [[0, 1],
-    //    [1, 2]]
-    // output of shape [4,2,2]:
-    //   [[[ 0,  1], [ 1,  2]],
-    //    [[10, 11], [11, 12]],
-    //    [[20, 21], [21, 22]],
-    //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    TODO:
   
-### gatherNd ### {#api-mlgraphbuilder-gathernd} +### gatherND ### {#api-mlgraphbuilder-gathernd} Gather values of the input tensor along an axis according to the indices. -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are gathered. - indices: an {{MLOperand}}. The indices N-D tensor of the input values to gather. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherNd()}}: + + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/gatherND()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=]> 1
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}> 1
*output*[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/gatherND()}}:
- : gatherNd - :: Support limits for operator {{MLGraphBuilder/gatherNd()}}. + : gatherND + :: Support limits for operator {{MLGraphBuilder/gatherND()}}.
- The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/gatherND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gatherND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The gather(|input|, |indices|, |options|) method steps are: + The gatherND(|input|, |indices|, |options|) method steps are: TODO:
@@ -4327,55 +4388,10 @@ partial dictionary MLOpSupportLimits {
- Examples of how gather works in different slicing schemes. + Examples of how gatherND works in different slicing schemes.
-    // input of shape [4,3]:
-    //   [[ 0,  1,  2],
-    //    [10, 11, 12],
-    //    [20, 21, 22],
-    //    [30, 31, 32]]
-    const input = builder.constant(
-      {shape: [4, 3]},
-      new Float32Array([0, 1, 2, 10, 11, 12, 20, 21, 22, 30, 31, 32]));
-
-    const indices1 = builder.constant(
-      {dataType: 'uint32', shape: [2]}, new Uint32Array([3, 1]));
-
-    const indices2 = builder.constant(
-      {dataType: 'uint32', shape: [3]}, new Uint32Array([2, 1, 1]));
-
-    const indices3 = builder.constant(
-      {dataType: 'uint32', shape: [2, 2]}, new Uint32Array([0, 1, 1, 2]));
-
-    // axis = 0 (default)
-    // indices of shape [2]:
-    //   [3,1]
-    // output of shape [2,3]:
-    //   [[30, 31, 32],
-    //    [10, 11, 12]]
-    const output1 = builder.gather(input, indices1);
-
-    // axis = 1
-    // indices of shape [3]:
-    //   [2,1,1]
-    // output of shape [4,3]:
-    //   [[ 2,  1,  1],
-    //    [12, 11, 11],
-    //    [22, 21, 21],
-    //    [32, 31, 31]]
-    const output2 = builder.gather(input, indices2, {axis: 1});
-
-    // axis = 1
-    // indices of shape [2,2]:
-    //   [[0, 1],
-    //    [1, 2]]
-    // output of shape [4,2,2]:
-    //   [[[ 0,  1], [ 1,  2]],
-    //    [[10, 11], [11, 12]],
-    //    [[20, 21], [21, 22]],
-    //    [[30, 31], [31, 32]]]
-    const output3 = builder.gather(input, indices3, {axis: 1});
+    TODO:
   
@@ -7814,7 +7830,7 @@ partial dictionary MLOpSupportLimits { }; -{{MLLayerNormalizationOptions}} has the following members: +{{MLReverseOptions}} has the following members:
: axes :: @@ -7874,7 +7890,7 @@ partial dictionary MLOpSupportLimits {
### scatterElements ### {#api-mlgraphbuilder-scatterelements} -Scatter values of the input tensor along an axis according to the indices. +Scatter values from the updates tensor along an axis according to the indices in place of the input tensor. -{{MLScatterOptions}} has the following members: -
- : axis - :: - The axis along which the scattered values are obtained. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. -
- -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor from which the values are scattered. - - indices: an {{MLOperand}}. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - indices: an {{MLOperand}}. TODO: Elaborate on indices coordinate order. The indices N-D tensor of the input values to scatter. The values must be of type {{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}} or {{MLOperandDataType/"int64"}}, and must be in the range -N (inclusive) to N (exclusive) where N is the size of the input dimension indexed by *options.axis*, and a negative index means indexing from the end of the dimension. + - updates: an {{MLOperand}}. New values to replace atop the input. - options: an optional {{MLScatterOptions}}. The optional parameters of the operation. - **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1. + **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1.
{{MLScatterSupportLimits}} has the following members: @@ -8003,23 +8041,56 @@ partial dictionary MLOpSupportLimits { :: {{MLSupportLimits}} for input operand. : indices :: {{MLSupportLimits}} for indices operand. + : updates + :: {{MLSupportLimits}} for updates operand. : output :: {{MLSupportLimits}} for output operand.
-{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterNd()}}: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/scatterND()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=]> 1
{{indices}}{{MLOperandDataType/"int32"}}, {{MLOperandDataType/"uint32"}}, {{MLOperandDataType/"int64"}}> 1
{{updates}}[=/same type as|same as=] {{input}}*input*'s [=MLOperand/rank=] + *indices*'s [=MLOperand/rank=] - *indices*'s [=MLOperand/shape=][-1] - 1
*output*[=/same type as|same as=] {{input}}> 1
+ +{{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/scatterND()}}:
- : scatterNd - :: Support limits for operator {{MLGraphBuilder/scatterNd()}}. + : scatterND + :: Support limits for operator {{MLGraphBuilder/scatterND()}}.
- The {{MLGraphBuilder/scatterNd(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterNd()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index. + The {{MLGraphBuilder/scatterND(input, indices, options)/indices}} parameter to {{MLGraphBuilder/scatterND()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
- The scatterNd(|input|, |indices|, |updates|, |options|) method steps are: + The scatterND(|input|, |indices|, |updates|, |options|) method steps are: TODO:
@@ -8027,7 +8098,7 @@ partial dictionary MLOpSupportLimits {
- Examples of how scatterNd works in different slicing schemes. + Examples of how scatterND works in different slicing schemes.
     TODO:
@@ -8191,6 +8262,8 @@ partial dictionary MLOpSupportLimits {
     1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}.
     1. If any of |sizes|'s [=list/items=] are 0, then [=exception/throw=] a {{TypeError}}.
     1. If |starts|'s [=list/size=] and |sizes|'s [=list/size=] are not both equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}.
+    1. If |options|.{{MLSliceOptions/strides}} [=map/exists=]:
+        1. If |options|.{{MLSliceOptions/strides}}'s [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}.
     1. [=list/For each=] |index| in [=the range=] 0 to |input|'s [=MLOperand/rank=], exclusive:
         1. If |sizes|[|index|] is 0, then [=exception/throw=] a {{TypeError}}.
 
@@ -8198,7 +8271,8 @@ partial dictionary MLOpSupportLimits {
 
         1. If |starts|[|index|] is greater than or equal to |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}.
         1. If |starts|[|index|] + |sizes|[|index|] is greater than |input|'s [=MLOperand/shape=][|index|], then [=exception/throw=] a {{TypeError}}.
-        1. TODO: Validate steps.
+        1. If |options|.{{MLSliceOptions/strides}} [=map/exists=]:
+            1. If |options|.{{MLSliceOptions/strides}}[|index|] is less than 1, then [=exception/throw=] a {{TypeError}}.
     1. *Make graph connections:*
         1. Let |output| be the result of [=copying an MLOperand=] given |input|.
         1. Let |operator| be an [=operator=] for the "slice" operation, given |starts|, |sizes|, and |options|.
@@ -8675,7 +8749,7 @@ partial dictionary MLOpSupportLimits {
 
### tile ### {#api-mlgraphbuilder-tile} -Repeat a tensor the number of times along each dimension. +Repeat a tensor the given number of times along each dimension. -
+
**Arguments:** - input: an {{MLOperand}}. The input N-D tensor. - - repetitions: A count per each dimension of how many times to repeat that dimension. The repetitions count must match the input rank, using 1's for any axis that should retain the same size. + - repetitions: A count per dimension of how many times to repeat that dimension. The |repetitions| [=list/size=] must match the |input|'s [=MLOperand/rank=], using 1's for any axis that should retain the same size. - options: an optional {{MLOperatorOptions}}. The optional parameters of the operation. **Returns:** an {{MLOperand}}. The reversed N-D tensor.
+ + + + + + + + + + + + + + + + + + + +
Constraints for {{MLGraphBuilder/tile()}}
operand[=/allowed data types=][=/allowed ranks=]
{{input}}[=/any data type|any=][=/any rank|N=]
*output*[=/same type as|same as=] {{input}}[=/same rank as|same as=] {{input}}
+ {{MLOpSupportLimits}} has the following members for {{MLGraphBuilder/tile()}}:
: tile @@ -8710,7 +8805,18 @@ partial dictionary MLOpSupportLimits { 1. If [=this=] [=MLGraphBuilder/can not build=], then [=exception/throw=] an "{{InvalidStateError}}" {{DOMException}}. 1. If [=MLGraphBuilder/validating operand=] with [=this=] and |input| returns false, then [=exception/throw=] a {{TypeError}}. - TODO: + 1. If |repetitions|'s [=list/size=] is not equal to |input|'s [=MLOperand/rank=], then [=exception/throw=] a {{TypeError}}. + 1. If |repetitions|'s values contain 0's, then [=exception/throw=] a {{TypeError}}. + + Issue(391): If 0-size dimensions are allowed, revise these steps. + + 1. *Make graph connections:* + 1. Let |output| be the result of [=copying an MLOperand=] given |input|. + 1. Let |operator| be an [=operator=] for the "tile" operation, given |options|. + 1. Set |output|.{{MLOperand/[[operator]]}} to |operator|. + 1. Set |operator|'s [=operator/input=] to |input|. + 1. Set |operator|'s [=operator/output=] to |output|. + 1. Return |output|. ### transpose ### {#api-mlgraphbuilder-transpose} From 4da4febdded3f8c22ef72fcc85e67a1013c4a1df Mon Sep 17 00:00:00 2001 From: Dwayne Robinson Date: Thu, 16 Jan 2025 19:24:18 -0800 Subject: [PATCH 6/6] Rank typo for Q and DQ --- index.bs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/index.bs b/index.bs index a9707d4f..b3ec9d12 100644 --- a/index.bs +++ b/index.bs @@ -3659,9 +3659,9 @@ partial dictionary MLOpSupportLimits {
     function sign(builder, input, options) {
-      let zero = builder.constant(input.dataType, 0);
-      let positiveOne = builder.constant(input.dataType, 1);
-      let negativeOne = builder.constant(input.dataType, -1);
+      const zero = builder.constant(input.dataType, 0);
+      const positiveOne = builder.constant(input.dataType, 1);
+      const negativeOne = builder.constant(input.dataType, -1);
 
       return builder.where(
         builder.greater(input, zero),
@@ -3738,7 +3738,7 @@ partial dictionary MLOpSupportLimits {
   
     *output*
     [=/same type as|same as=] {{scale}}
-    4
+    [=/same rank as|same as=] {{input}}
   
 
 
@@ -3843,7 +3843,7 @@ partial dictionary MLOpSupportLimits {
   
     *output*
     [=/same type as|same as=] {{zeroPoint}}
-    4
+    [=/same rank as|same as=] {{input}}