diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 6ef033dec..4006ff745 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -175,11 +175,12 @@ * [nn.gemm](framework/operators/neural-network/nn.gemm.md) * [nn.grid\_sample](framework/operators/neural-network/nn.grid\_sample.md) * [nn.col2im](framework/operators/neural-network/nn.col2im.md) - * [nn.conv_transpose](framework/operators/neural-network/nn.conv\_transpose.md) + * [nn.conv\_transpose](framework/operators/neural-network/nn.conv\_transpose.md) * [nn.conv](framework/operators/neural-network/nn.conv.md) * [nn.depth_to_space](framework/operators/neural-network/nn.depth_to_space.md) * [nn.space_to_depth](framework/operators/neural-network/nn.space_to_depth.md) * [nn.max\_pool](framework/operators/neural-network/nn.max\_pool.md) + * [nn.deform\_conv](framework/operators/neural-network/nn.deform\_conv_.md) * [Machine Learning](framework/operators/machine-learning/README.md) * [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md) * [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 2a6bc7ea2..fb2da75a6 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -48,6 +48,7 @@ You can see below the list of current supported ONNX Operators: | [ConvTranspose](operators/neural-network/nn.conv\_transpose_.md) | :white\_check\_mark: | | [Conv](operators/neural-network/nn.conv.md) | :white\_check\_mark: | | [MaxPool](operators/neural-network/nn.max\_pool.md) | :white\_check\_mark: | +| [DeformConv](operators/neural-network/nn.deform\_conv_.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | | [Atanh](operators/tensor/tensor.atanh.md) | :white\_check\_mark: | diff --git a/docs/framework/operators/neural-network/nn.deform_conv.md b/docs/framework/operators/neural-network/nn.deform_conv.md new file mode 100644 index 000000000..6a6718645 --- /dev/null +++ b/docs/framework/operators/neural-network/nn.deform_conv.md @@ -0,0 +1,152 @@ +# NNTrait::deform_conv + +```rust + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor +``` + +Performs deformable convolution as described in https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. This operator specification supports the 2-D case. + +## Args + + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + +* `X`(`@Tensor`) - Input data tensor. For 2D image data, it has shape (N, C, H, W) where N is the batch size, C is the number of input channels, and H and W are the height and width. +* `W`(`@Tensor`) - Weight tensor that will be used in the convolutions. It has shape (oC, C/group, kH, kW), where oC is the number of output channels and kH and kW are the kernel height and width. +* `offset`(`@Tensor`) - Offset tensor denoting the offset for the sampling locations in the convolution kernel. It has shape (N, offset_group * kH * kW * 2, oH, oW) for 2D data +* `B`(`Option>`) - Default is a tensor of zeros, optional 1D bias of length oC to be added to the convolution. +* `mask`(`Option>`) - Default is a tensor of ones, the mask tensor to be applied to each position in the convolution kernel. It has shape (N, offset_group * kH * kW, oH, oW) for 2D data. +* `dilations`(`Option>`) - Default is 1 along each axis, dilation value along each spatial axis of the kernel. +* `group`(`usize`) - Default is 1, number of groups the input and output channels, C and oC, are divided into. +* `kernel_shape`(`Option>`) - Shape of the convolution kernel. If not present, it is inferred from the shape of input W. +* `offset_group`(`Option`) - Default is 1, number of groups of offset. C must be divisible by offset_group. +* `pads`(`Option>`) - Default is 0 along each axis, padding for the beginning and end along each spatial axis. The values represent the number of pixels added to the beginning and end of the corresponding axis and can take any nonnegative value. +* `strides`(`Option>`) - Default is 1 along each axis, stride along each spatial axis. + +## Returns + +A `Tensor` output tensor that contains the result of convolution. + +## Examples + +```rust +fn example_deform_conv() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + let mut W = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + let mut offset = TensorTrait::new(shape.span(), data.span()); + + + return NNTrait::deform_conv( + @X, + @W, + @offset, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::Some(array![0, 0, 0, 0].span()), + Option::None, + ); +} + +>>> [ + [ + [ + [9.5, 11.9], + [20.0, 24.0], + ] + ] + ] + +```` \ No newline at end of file diff --git a/nodegen/node/deform_conv.py b/nodegen/node/deform_conv.py new file mode 100644 index 000000000..abb101dc7 --- /dev/null +++ b/nodegen/node/deform_conv.py @@ -0,0 +1,463 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + +import numpy as np + +def deform_conv_implementation( # type: ignore + X, + W, + offset, + B=None, + mask=None, + dilations=None, + group=None, + kernel_shape=None, + offset_group=None, + pads=None, + strides=None, +): + if dilations is None: + dilations = [1 for s in X.shape[2:]] + if kernel_shape is None: + kernel_shape = W.shape[2:] + if pads is None: + pads = [0 for s in X.shape[2:]] * 2 + if strides is None: + strides = [1 for s in X.shape[2:]] + if group is None: + group = 1 + if offset_group is None: + offset_group = 1 + + n, ic = X.shape[:2] + oc = W.shape[0] + output_shape = offset.shape[2:] + + if ic != W.shape[1] * group or oc % group != 0: + raise ValueError( + f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={group}." + ) + ics_per_group, ocs_per_group = W.shape[1], oc // group + + if ic % offset_group != 0: + raise ValueError("Number of input channels must be divisible by offset_group.") + ics_per_offset_group = ic // offset_group + + if offset_group * np.prod(kernel_shape) * len(kernel_shape) != offset.shape[1]: + raise ValueError( + f"Offset shape {offset.shape} is inconsistent with offset_group {offset_group} " + f"and kernel shape {kernel_shape}." + ) + offset = offset.reshape( + (n, offset_group, *kernel_shape, len(kernel_shape), *output_shape) + ) + + if mask is None: + mask = np.ones((n, offset_group * np.prod(kernel_shape), *output_shape)) + mask = mask.reshape((n, offset_group, *kernel_shape, *output_shape)) + + from onnx.reference.ops._op_list import GridSample + + if len(X.shape) == 4: + ih, iw = X.shape[2:] + oh, ow = offset.shape[-2:] + kh, kw = kernel_shape + sth, stw = strides + dh, dw = dilations + kh_new, kw_new = (kh - 1) * dh + 1, (kw - 1) * dw + 1 + + if oh != int(((ih - kh_new + pads[0] + pads[2]) / sth) + 1) or ow != int( + ((iw - kw_new + pads[1] + pads[3]) / stw) + 1 + ): + raise RuntimeError( + "Padding, dilation, stride, and kernel shape incompatible with output shape." + ) + + bh, bw = -pads[0], -pads[1] + + res = np.zeros((n, oc, oh, ow), dtype=X.dtype) + if B is not None: + res[:, :, :, :] = B.reshape((1, -1, 1, 1)) + + kernel_pos_w, kernel_pos_h = np.meshgrid( + np.arange(0, kw_new, dw), np.arange(0, kh_new, dh) + ) + + kernel_pos_wrt_first_elem = np.stack( + (kernel_pos_h, kernel_pos_w), axis=2 + ) + + for batch_idx in range(n): + for oc_idx in range(oc): + for ic_idx in range(ic): + # Group convolution logic + if ic_idx // ics_per_group != oc_idx // ocs_per_group: + # Input channel and output channel don't belong to same group + continue + + # Offset group logic + offset_group_idx = ic_idx // ics_per_offset_group + + for i in range(oh): + h_coord = bh + sth * i + for j in range(ow): + w_coord = bw + stw * j + + kernel = np.copy(kernel_pos_wrt_first_elem).astype(float) + kernel[:, :, 0] += ( + h_coord + + offset[batch_idx, offset_group_idx, :, :, 0, i, j] + ) + kernel[:, :, 1] += ( + w_coord + + offset[batch_idx, offset_group_idx, :, :, 1, i, j] + ) + + kernel[:, :, 0] = kernel[:, :, 0] / (ih - 1) * 2 - 1 + kernel[:, :, 1] = kernel[:, :, 1] / (iw - 1) * 2 - 1 + + kernel = np.expand_dims(kernel, 0) + + kernel = np.flip( + kernel, 3 + ) + + grid_sample_output = GridSample.eval( + X[batch_idx : batch_idx + 1, ic_idx : ic_idx + 1], + kernel, + align_corners=1, + ) + + conv_value = np.multiply( + grid_sample_output, + W[oc_idx, ic_idx % ics_per_group, :, :], + ) + conv_value = np.multiply( + conv_value, + mask[batch_idx, offset_group_idx, :, :, i, j], + ) + res[batch_idx, oc_idx, i, j] += np.sum(conv_value) + + return res + raise RuntimeError( + f"The convolution for X.shape={X.shape}, W.shape={W.shape}, " + f"kernel_shape={kernel_shape} is not implemented yet." + ) + + + +def deform_conv_implementation( # type: ignore + X, + W, + offset, + B=None, + mask=None, + dilations=None, + group=None, + kernel_shape=None, + offset_group=None, + pads=None, + strides=None, +): + if dilations is None: + dilations = [1 for s in X.shape[2:]] + if kernel_shape is None: + kernel_shape = W.shape[2:] + if pads is None: + pads = [0 for s in X.shape[2:]] * 2 + if strides is None: + strides = [1 for s in X.shape[2:]] + if group is None: + group = 1 + if offset_group is None: + offset_group = 1 + + n, ic = X.shape[:2] + oc = W.shape[0] + output_shape = offset.shape[2:] + + if ic != W.shape[1] * group or oc % group != 0: + raise ValueError( + f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={group}." + ) + ics_per_group, ocs_per_group = W.shape[1], oc // group + + if ic % offset_group != 0: + raise ValueError("Number of input channels must be divisible by offset_group.") + ics_per_offset_group = ic // offset_group + + if offset_group * np.prod(kernel_shape) * len(kernel_shape) != offset.shape[1]: + raise ValueError( + f"Offset shape {offset.shape} is inconsistent with offset_group {offset_group} " + f"and kernel shape {kernel_shape}." + ) + offset = offset.reshape( + (n, offset_group, *kernel_shape, len(kernel_shape), *output_shape) + ) + + if mask is None: + mask = np.ones((n, offset_group * np.prod(kernel_shape), *output_shape)) + mask = mask.reshape((n, offset_group, *kernel_shape, *output_shape)) + + from onnx.reference.ops._op_list import GridSample + + if len(X.shape) == 4: + ih, iw = X.shape[2:] + oh, ow = offset.shape[-2:] + kh, kw = kernel_shape + sth, stw = strides + dh, dw = dilations + kh_new, kw_new = (kh - 1) * dh + 1, (kw - 1) * dw + 1 + + if oh != int(((ih - kh_new + pads[0] + pads[2]) / sth) + 1) or ow != int( + ((iw - kw_new + pads[1] + pads[3]) / stw) + 1 + ): + raise RuntimeError( + "Padding, dilation, stride, and kernel shape incompatible with output shape." + ) + + bh, bw = -pads[0], -pads[1] + + res = np.zeros((n, oc, oh, ow), dtype=X.dtype) + if B is not None: + res[:, :, :, :] = B.reshape((1, -1, 1, 1)) + + kernel_pos_w, kernel_pos_h = np.meshgrid( + np.arange(0, kw_new, dw), np.arange(0, kh_new, dh) + ) + + kernel_pos_wrt_first_elem = np.stack( + (kernel_pos_h, kernel_pos_w), axis=2 + ) + + for batch_idx in range(n): + for oc_idx in range(oc): + for ic_idx in range(ic): + # Group convolution logic + if ic_idx // ics_per_group != oc_idx // ocs_per_group: + # Input channel and output channel don't belong to same group + continue + + # Offset group logic + offset_group_idx = ic_idx // ics_per_offset_group + + for i in range(oh): + h_coord = bh + sth * i + for j in range(ow): + w_coord = bw + stw * j + + kernel = np.copy(kernel_pos_wrt_first_elem).astype(float) + kernel[:, :, 0] += ( + h_coord + + offset[batch_idx, offset_group_idx, :, :, 0, i, j] + ) + kernel[:, :, 1] += ( + w_coord + + offset[batch_idx, offset_group_idx, :, :, 1, i, j] + ) + + kernel[:, :, 0] = kernel[:, :, 0] / (ih - 1) * 2 - 1 + kernel[:, :, 1] = kernel[:, :, 1] / (iw - 1) * 2 - 1 + + kernel = np.expand_dims(kernel, 0) + + kernel = np.flip( + kernel, 3 + ) + + grid_sample_output = GridSample.eval( + X[batch_idx : batch_idx + 1, ic_idx : ic_idx + 1], + kernel, + align_corners=1, + ) + + conv_value = np.multiply( + grid_sample_output, + W[oc_idx, ic_idx % ics_per_group, :, :], + ) + conv_value = np.multiply( + conv_value, + mask[batch_idx, offset_group_idx, :, :, i, j], + ) + res[batch_idx, oc_idx, i, j] += np.sum(conv_value) + + return res + raise RuntimeError( + f"The convolution for X.shape={X.shape}, W.shape={W.shape}, " + f"kernel_shape={kernel_shape} is not implemented yet." + ) + + +class Deform_conv(RunAll): + + @staticmethod + def export_deform_conv_without_padding() -> None: + x = np.arange(9).astype(np.float32) + x.shape = (1, 1, 3, 3) + w = np.ones((1, 1, 2, 2), dtype=np.float32) + + # Convolution without padding + offset = np.zeros((1, 8, 2, 2), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 + offset[ + 0, 5, 0, 1 + ] = -0.1 + + + + y = deform_conv_implementation(x, w, offset, kernel_shape=[2, 2]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + offset = Tensor(Dtype.FP16x16, offset.shape, to_fp(offset.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "deform_conv" + func_sig = "NNTrait::deform_conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "@input_2," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w, offset], y, func_sig, name, Trait.NN) + + @staticmethod + def export_deform_conv_with_padding() -> None: + x = np.arange(9).astype(np.float32) + x.shape = (1, 1, 3, 3) + w = np.ones((1, 1, 2, 2), dtype=np.float32) + + # Convolution with padding + offset = np.zeros((1, 8, 4, 4), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 + offset[ + 0, 5, 1, 2 + ] = -0.1 + + + + y = deform_conv_implementation(x, w, offset, kernel_shape=[2, 2], pads=[1, 1, 1, 1]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + offset = Tensor(Dtype.FP16x16, offset.shape, to_fp(offset.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "deform_conv_with_padding" + func_sig = "NNTrait::deform_conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "@input_2," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::None," + func_sig += "Option::Some(array![1, 1, 1, 1].span())," + func_sig += "Option::None)" + make_test( + [x, w, offset], y, func_sig, name, Trait.NN) + + @staticmethod + def export_deform_conv_with_mask_bias() -> None: + x = np.arange(9).astype(np.float32) + x.shape = (1, 1, 3, 3) + w = np.ones((1, 1, 2, 2), dtype=np.float32) + + b = np.ones((1,), dtype=np.float32) + + offset = np.zeros((1, 8, 2, 2), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 + offset[ + 0, 5, 0, 1 + ] = -0.1 + + mask = np.ones((1, 4, 2, 2), dtype=np.float32) + mask[0, 2, 1, 1] = 0.2 + + y = deform_conv_implementation(x, w, offset, mask=mask, B=b, kernel_shape=[2, 2]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + offset = Tensor(Dtype.FP16x16, offset.shape, to_fp(offset.flatten(), FixedImpl.FP16x16)) + b = Tensor(Dtype.FP16x16, b.shape, to_fp(b.flatten(), FixedImpl.FP16x16)) + mask = Tensor(Dtype.FP16x16, mask.shape, to_fp(mask.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "deform_conv_with_mask_bias" + func_sig = "NNTrait::deform_conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "@input_2," + func_sig += "Option::Some(input_3.data)," + func_sig += "Option::Some(input_4)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w, offset, b, mask], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_deform_conv_with_multiple_offset_groups() -> None: + x = np.zeros((1, 2, 3, 3), dtype=np.float32) + x[0, 0] = np.reshape(np.arange(9).astype(np.float32), (3, 3)) + x[0, 1] = np.reshape(np.arange(8, -1, -1).astype(np.float32), (3, 3)) + x.shape = (1, 2, 3, 3) + w = np.ones((1, 2, 2, 2), dtype=np.float32) + + offset = np.zeros((1, 16, 2, 2), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 + offset[ + 0, 13, 0, 1 + ] = ( + -0.1 + ) + + + y = deform_conv_implementation(x, w, offset, offset_group=2, kernel_shape=[2, 2]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + offset = Tensor(Dtype.FP16x16, offset.shape, to_fp(offset.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "deform_conv_with_multiple_offset_groups" + func_sig = "NNTrait::deform_conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "@input_2," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::Some(2)," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w, offset], y, func_sig, name, Trait.NN) + + + + \ No newline at end of file diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index e3b7b5a7b..7227870b8 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -1425,4 +1425,169 @@ trait NNTrait { strides: Option>, output_len: usize, ) -> (Tensor, Option>); + /// # NNTrait::deform_conv + /// + /// ```rust + /// fn deform_conv( + /// X: @Tensor, + /// W: @Tensor, + /// offset: @Tensor, + /// B: Option>, + /// mask: Option>, + /// dilations: Option>, + /// group: Option, + /// kernel_shape: Option>, + /// offset_group: Option, + /// pads: Option>, + /// strides: Option>, + /// ) -> Tensor + /// ``` + /// + /// Performs deformable convolution as described in https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. This operator specification supports the 2-D case. + /// + /// ## Args + /// + /// X: @Tensor, + /// W: @Tensor, + /// offset: @Tensor, + /// B: Option>, + /// mask: Option>, + /// dilations: Option>, + /// group: Option, + /// kernel_shape: Option>, + /// offset_group: Option, + /// pads: Option>, + /// strides: Option>, + /// + /// * `X`(`@Tensor`) - Input data tensor. For 2D image data, it has shape (N, C, H, W) where N is the batch size, C is the number of input channels, and H and W are the height and width. + /// * `W`(`@Tensor`) - Weight tensor that will be used in the convolutions. It has shape (oC, C/group, kH, kW), where oC is the number of output channels and kH and kW are the kernel height and width. + /// * `offset`(`@Tensor`) - Offset tensor denoting the offset for the sampling locations in the convolution kernel. It has shape (N, offset_group * kH * kW * 2, oH, oW) for 2D data + /// * `B`(`Option>`) - Default is a tensor of zeros, optional 1D bias of length oC to be added to the convolution. + /// * `mask`(`Option>`) - Default is a tensor of ones, the mask tensor to be applied to each position in the convolution kernel. It has shape (N, offset_group * kH * kW, oH, oW) for 2D data. + /// * `dilations`(`Option>`) - Default is 1 along each axis, dilation value along each spatial axis of the kernel. + /// * `group`(`usize`) - Default is 1, number of groups the input and output channels, C and oC, are divided into. + /// * `kernel_shape`(`Option>`) - Shape of the convolution kernel. If not present, it is inferred from the shape of input W. + /// * `offset_group`(`Option`) - Default is 1, number of groups of offset. C must be divisible by offset_group. + /// * `pads`(`Option>`) - Default is 0 along each axis, padding for the beginning and end along each spatial axis. The values represent the number of pixels added to the beginning and end of the corresponding axis and can take any nonnegative value. + /// * `strides`(`Option>`) - Default is 1 along each axis, stride along each spatial axis. + /// + /// ## Returns + /// + /// A `Tensor` output tensor that contains the result of convolution. + /// + /// ## Examples + /// + /// ```rust + /// fn example_deform_conv() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(3); + /// shape.append(3); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 131072, sign: false }); + /// data.append(FP16x16 { mag: 196608, sign: false }); + /// data.append(FP16x16 { mag: 262144, sign: false }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// data.append(FP16x16 { mag: 393216, sign: false }); + /// data.append(FP16x16 { mag: 458752, sign: false }); + /// data.append(FP16x16 { mag: 524288, sign: false }); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(2); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// let mut W = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(8); + /// shape.append(2); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 32768, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 6553, sign: true }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// let mut offset = TensorTrait::new(shape.span(), data.span()); + /// + /// + /// return NNTrait::deform_conv( + /// @X, + /// @W, + /// @offset, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::Some(array![2, 2].span()), + /// Option::None, + /// Option::Some(array![0, 0, 0, 0].span()), + /// Option::None, + /// ); + /// } + /// + /// >>> [ + /// [ + /// [ + /// [9.5, 11.9], + /// [20.0, 24.0], + /// ] + /// ] + /// ] + /// + /// ```` + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor; } diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index f02570148..50dbebb38 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -18,3 +18,4 @@ mod space_to_depth; mod conv; mod max_pool; mod common_pool; +mod deform_conv; diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index 0dcea51f2..851c15f88 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -7,6 +7,7 @@ use orion::operators::tensor::core::{stride}; use orion::operators::nn::AUTO_PAD; +/// Cf: NNTrait::conv docstring fn conv< T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, +Mul, +AddEq, >( diff --git a/src/operators/nn/functional/deform_conv.cairo b/src/operators/nn/functional/deform_conv.cairo new file mode 100644 index 000000000..bb04b11c8 --- /dev/null +++ b/src/operators/nn/functional/deform_conv.cairo @@ -0,0 +1,548 @@ +use core::array::ArrayTrait; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::tensor::core::{stride}; +use core::debug::PrintTrait; +use core::traits::Into; +use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; + + +use orion::operators::nn::functional::grid_sample::{grid_sample}; + + +/// Cf: NNTrait::deform_conv docstring +fn deform_conv< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +Add, + +Mul, + +Sub, + +Div, + +AddEq, + +PrintTrait, + +PartialOrd, + +PartialEq, + +TryInto, + +Into, + +Rem, + +Neg, + +SubEq, + +Mul>, +>( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, +) -> Tensor { + assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); + assert((*W).shape.len() >= 3, 'X must have at least 3 dim'); + + let dilations = match dilations { + Option::Some(dilations) => dilations, + Option::None => { + let mut dilations = ArrayTrait::new(); + let mut i = 2; + while i != (*X).shape.len() { + dilations.append(1); + i += 1; + }; + dilations.span() + }, + }; + let kernel_shape = match kernel_shape { + Option::Some(kernel_shape) => kernel_shape, + Option::None => { + let mut kernel_shape = ArrayTrait::new(); + let mut i = 2; + while i != (*W).shape.len() { + kernel_shape.append(*(*W).shape.at(i)); + i += 1; + }; + kernel_shape.span() + }, + }; + let pads = match pads { + Option::Some(pads) => pads, + Option::None => { + let mut pads = ArrayTrait::new(); + let mut i = 2; + while i != (*X).shape.len() { + pads.append(0); + pads.append(0); + i += 1; + }; + pads.span() + }, + }; + let strides = match strides { + Option::Some(strides) => strides, + Option::None => { + let mut strides = ArrayTrait::new(); + let mut i = 2; + while i != (*X).shape.len() { + strides.append(1); + i += 1; + }; + strides.span() + }, + }; + let group = match group { + Option::Some(group) => group, + Option::None => { 1 }, + }; + + let offset_group = match offset_group { + Option::Some(offset_group) => offset_group, + Option::None => { 1 }, + }; + + let n = *(*X).shape.at(0); + let ic = *(*X).shape.at(1); + let oc = *(*W).shape.at(0); + let output_shape = SpanTrait::slice((*offset).shape, 2, (*offset).shape.len() - 2); + + assert(ic == *(*W).shape.at(1) * group, 'shape inconsistencies'); + assert(oc % group == 0, 'shape inconsistencies'); + + let ics_per_group = *(*W).shape.at(1); + let ocs_per_group = oc / group; + + assert(ic % offset_group == 0, 'offset_group inconsistencies'); + + let ics_per_offset_group = ic / offset_group; + + assert( + offset_group * prod(kernel_shape, 0) * kernel_shape.len() == *(*offset).shape.at(1), + 'offset_group inconsistencies' + ); + + let mut offset_shape = array![n.into(), offset_group.into()]; + offset_shape.append_span(span_U32_to_span_I32(kernel_shape.clone())); + offset_shape.append(kernel_shape.len().into()); + offset_shape.append_span(span_U32_to_span_I32(output_shape.clone())); + + let offset = offset.reshape(offset_shape.span(), false); + + let mask = match mask { + Option::Some(mask) => mask, + Option::None => { + let mut mask = ArrayTrait::::new(); + let mask_end = n * offset_group * prod(kernel_shape, 0) * prod(output_shape, 0); + let mut i = 0; + while i != mask_end { + mask.append(NumberTrait::::one()); + i += 1; + }; + let mut mask_shape = array![n, offset_group * prod(kernel_shape, 0)]; + mask_shape.append_span(output_shape); + TensorTrait::new(mask_shape.span(), mask.span()) + }, + }; + + let mut mask_shape = array![n.into(), offset_group.into()]; + mask_shape.append_span(span_U32_to_span_I32(kernel_shape.clone())); + mask_shape.append_span(span_U32_to_span_I32(output_shape.clone())); + let mask = mask.reshape(mask_shape.span(), false); + + if (*X).shape.len() == 4 { + let ih: T = NumberTrait::new_unscaled((*(*X).shape.at(2)).into(), false); + let iw: T = NumberTrait::new_unscaled((*(*X).shape.at(3)).into(), false); + + let x_stride = stride((*X).shape); + let w_stride = stride((*W).shape); + let offset_stride = stride(offset.shape); + let mask_stride = stride(mask.shape); + + let mut x_subset_shape = array![1, 1]; + x_subset_shape.append_span(SpanTrait::slice(*(X).shape, 2, (*(X).shape).len() - 2)); + let x_subset_shape = x_subset_shape.span(); + + let mut w_subset_shape = array![1, 1]; + w_subset_shape.append_span(SpanTrait::slice(*(W).shape, 2, (*(W).shape).len() - 2)); + let w_subset_shape = w_subset_shape.span(); + + let oh = *offset.shape.at(offset_shape.len() - 2); + let ow = *offset.shape.at(offset_shape.len() - 1); + + let kh = *kernel_shape.at(0); + let kw = *kernel_shape.at(1); + + let sth: T = NumberTrait::new_unscaled((*strides.at(0)).into(), false); + let stw: T = NumberTrait::new_unscaled((*strides.at(1)).into(), false); + + let dh = *dilations.at(0); + let dw = *dilations.at(1); + + let kh_new = (kh - 1) * dh + 1; + let kw_new = (kw - 1) * dw + 1; + + let bh: T = NumberTrait::new_unscaled((*pads.at(0)).into(), true); + let bw: T = NumberTrait::new_unscaled((*pads.at(1)).into(), true); + + assert( + oh == (((*(*X).shape.at(2) - kh_new + *pads.at(0) + *pads.at(2)) / *strides.at(0)) + 1), + 'incompatible shapes' + ); + assert( + ow == (((*(*X).shape.at(3) - kw_new + *pads.at(1) + *pads.at(3)) / *strides.at(1)) + 1), + 'incompatible shapes' + ); + + let mut res = NullableVecImpl::new(); + let res_shape = array![n, oc, oh, ow].span(); + let res_stride = stride(res_shape); + res.set(n * *res_stride.at(0) - 1, NumberTrait::zero()); + + match B { + Option::Some(B) => { + let mut i = 0; + while i != n { + let mut j = 0; + while j != oc { + let b_j = *B.at(j); + let mut k = 0; + while k != oh { + let mut l = 0; + while l != ow { + res + .set( + i * *res_stride.at(0) + + j * *res_stride.at(1) + + k * *res_stride.at(2) + + l, + b_j + ); + l += 1; + }; + k += 1; + }; + j += 1; + }; + i += 1; + }; + }, + Option::None => {}, + } + + let (kernel_pos_w, kernel_pos_h) = meshgrid(arange(0, kw_new, dw), arange(0, kh_new, dh)); + let kernel_pos_wrt_first_elem = stack(kernel_pos_h, kernel_pos_w); + + let dh: T = NumberTrait::new_unscaled(dh.into(), false); + let dw: T = NumberTrait::new_unscaled(dw.into(), false); + + let kh_new: T = NumberTrait::new_unscaled(kh_new.into(), false); + let kw_new: T = NumberTrait::new_unscaled(kw_new.into(), false); + + // dimension of kernel_pos_wrt_first_elem is ks0 x ks1 + let ks0 = NumberTrait::ceil(kh_new / dh).try_into().unwrap(); + let ks1 = NumberTrait::ceil(kw_new / dw).try_into().unwrap(); + + let one: T = NumberTrait::one(); + let two: T = NumberTrait::one() + NumberTrait::one(); + + let mut batch_idx = 0; + while batch_idx != n { + let mut oc_idx = 0; + while oc_idx != oc { + let mut ic_idx = 0; + while ic_idx != ic { + if (ic_idx / ics_per_group) == (oc_idx / ocs_per_group) { + let offset_group_idx = ic_idx / ics_per_offset_group; + + let mut i = 0; + while i != oh { + let index = NumberTrait::new_unscaled(i.into(), false); + let h_coord = bh + sth * index; + let mut j = 0; + while j != ow { + let jndex = NumberTrait::new_unscaled(j.into(), false); + let w_coord = bw + stw * jndex; + + let mut kernel = copy_to_vec(kernel_pos_wrt_first_elem); + let mut mask_subset = ArrayTrait::new(); + let mut kernel_test = ArrayTrait::new(); + let mut offset_TEST = ArrayTrait::new(); + + let mut hi = 0; + while hi != ks0 { + let mut wi = 0; + while wi != ks1 { + let elem1 = h_coord + + *offset + .data + .at( + batch_idx * *offset_stride.at(0) + + offset_group_idx * *offset_stride.at(1) + + hi * *offset_stride.at(2) + + wi * *offset_stride.at(3) + + 0 * *offset_stride.at(4) + + i * *offset_stride.at(5) + + j + ); + let elem2 = w_coord + + *offset + .data + .at( + batch_idx * *offset_stride.at(0) + + offset_group_idx * *offset_stride.at(1) + + hi * *offset_stride.at(2) + + wi * *offset_stride.at(3) + + 1 * *offset_stride.at(4) + + i * *offset_stride.at(5) + + j + ); + + mask_subset + .append( + *mask + .data + .at( + batch_idx * *mask_stride.at(0) + + offset_group_idx * *mask_stride.at(1) + + hi * *mask_stride.at(2) + + wi * *mask_stride.at(3) + + i * *mask_stride.at(4) + + j + ) + ); + kernel_test.append(kernel.at(hi * (ks1 * 2) + wi * 2)); + offset_TEST + .append( + *offset + .data + .at( + batch_idx * *offset_stride.at(0) + + offset_group_idx + * *offset_stride.at(1) + + hi * *offset_stride.at(2) + + wi * *offset_stride.at(3) + + 0 * *offset_stride.at(4) + + i * *offset_stride.at(5) + + j + ) + ); + kernel + .set( + hi * (ks1 * 2) + wi * 2, + (kernel.at(hi * (ks1 * 2) + wi * 2) + elem1) + / (ih - one) + * two + - one + ); + kernel + .set( + hi * (ks1 * 2) + wi * 2 + 1, + (kernel.at(hi * (ks1 * 2) + wi * 2 + 1) + elem2) + / (iw - one) + * two + - one + ); + wi += 1; + }; + hi += 1; + }; + let kernel = flip_mod_2(ref kernel); + + let subset_x = TensorTrait::new( + x_subset_shape, + SpanTrait::slice( + (*X).data, + batch_idx * *x_stride.at(0) + ic_idx * *x_stride.at(1), + *x_stride.at(1) + ) + ); + let subset_w = TensorTrait::new( + w_subset_shape, + SpanTrait::slice( + (*W).data, + oc_idx * *w_stride.at(0) + + (ic_idx % ics_per_group) * *w_stride.at(1), + *w_stride.at(1) + ) + ); + let mask_subset = TensorTrait::new( + array![1, 1, ks0, ks1].span(), mask_subset.span() + ); + let kernel = TensorTrait::new( + array![1, ks0, ks1, 2].span(), kernel + ); + + let grid_sample_output = grid_sample( + @subset_x, @kernel, Option::Some(1), Option::None, Option::None + ); + + // broadcasted multiply + let conv_value = (grid_sample_output * subset_w); + let conv_value = (conv_value * mask_subset); + + res + .set( + batch_idx * *res_stride.at(0) + + oc_idx * *res_stride.at(1) + + i * *res_stride.at(2) + + j, + res + .at( + batch_idx * *res_stride.at(0) + + oc_idx * *res_stride.at(1) + + i * *res_stride.at(2) + + j + ) + + sum(conv_value.data, 0) + ); + j += 1; + }; + i += 1; + }; + } + ic_idx += 1; + }; + oc_idx += 1; + }; + batch_idx += 1; + }; + + let mut res_data = ArrayTrait::new(); + let mut i = 0; + while i != res.len() { + res_data.append(res.at(i)); + i += 1; + }; + return TensorTrait::new(res_shape, res_data.span()); + } + + panic(array!['not supported yet!']) +} + + +fn meshgrid(x: Span, y: Span) -> (Span, Span) { + let mut xv = ArrayTrait::new(); + let mut yv = ArrayTrait::new(); + + let mut i = 0; + while i != y.len() { + + xv.append_span(x); + let mut j = 0; + while j != x.len() { + yv.append(*y.at(i)); + j += 1; + }; + i += 1; + }; + return (xv.span(), yv.span()); +} + +fn stack(x: Span, y: Span) -> Span { + let mut stack = ArrayTrait::new(); + + let mut i = 0; + while i != x.len() { + stack.append(*x.at(i)); + stack.append(*y.at(i)); + i += 1; + }; + + return stack.span(); +} + + +fn flip_mod_2, impl TCopy: Copy, +NumberTrait>( + ref x: NullableVec +) -> Span { + let mut i = 0; + let mut res = ArrayTrait::new(); + while i != x.len / 2 { + res.append(x.at(i * 2 + 1)); + res.append(x.at(i * 2)); + i += 1; + }; + + return res.span(); +} + +fn copy_to_vec< + T, MAG, +Drop, +Copy, +NumberTrait, +TryInto, +Into, +>( + x: Span +) -> NullableVec { + let mut res = NullableVecImpl::new(); + + let mut i = 0; + while i != x.len() { + res.set(i, NumberTrait::new_unscaled((*x.at(i)).into(), false)); + i += 1; + }; + + return res; +} + +// return a span of len ceil((end - start) / step) +fn arange(start: usize, end: usize, step: usize) -> Span { + let mut arr = ArrayTrait::new(); + let mut i = start; + while i != end { + arr.append(i); + i += step; + }; + return arr.span(); +} + + +fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( + a: Span, start: usize +) -> T { + assert(a.len() > start, 'wrong input dim'); + let mut prod = NumberTrait::one(); + let mut i = start; + while i != a.len() { + prod = prod * (*a.at(i)); + i += 1; + }; + return prod; +} + + + +fn sum, +Copy, +NumberTrait, +TensorTrait, +AddEq,>( + a: Span, start: usize +) -> T { + assert(a.len() > start, 'wrong input dim'); + let mut sum = NumberTrait::zero(); + let mut i = start; + while i != a.len() { + sum += (*a.at(i)); + i += 1; + }; + return sum; +} + + +fn span_U32_to_span_I32( + mut x: Span +) -> Span { + let mut res = ArrayTrait::new(); + + loop { + match x.pop_front() { + Option::Some(v) => { + res.append((*v).into()); + }, + Option::None => { break; } + }; + }; + + return res.span(); +} \ No newline at end of file diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 12a991b01..05faad2ba 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -3,13 +3,13 @@ use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; use orion::operators::tensor::implementations::tensor_fp16x16::{ - FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorAdd + FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorAdd, FP16x16TensorMul }; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16W, FP16x16IntoFP16x16W }; use orion::operators::tensor::implementations::tensor_fp16x16wide::{ - FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd + FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd, FP16x16WTensorMul }; use orion::operators::nn::AUTO_PAD; @@ -165,7 +165,23 @@ impl FP16x16NN of NNTrait { pads, storage_order, strides, - output_len + output_len) + } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::deform_conv::deform_conv( + X, W, offset, B, mask, dilations, group, kernel_shape, offset_group, pads, strides, ) } } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 0664981d4..a1ca177dd 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -3,7 +3,7 @@ use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x32Impl}; use orion::operators::tensor::implementations::tensor_fp32x32::{ - FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd + FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd, FP32x32TensorMul }; use orion::operators::nn::AUTO_PAD; @@ -159,7 +159,23 @@ impl FP32x32NN of NNTrait { pads, storage_order, strides, - output_len + output_len) + } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::deform_conv::deform_conv( + X, W, offset, B, mask, dilations, group, kernel_shape, offset_group, pads, strides, ) } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index e60148d30..6d6770551 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -3,7 +3,7 @@ use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x64Impl}; use orion::operators::tensor::implementations::tensor_fp64x64::{ - FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd + FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd, FP64x64TensorMul }; use orion::operators::nn::AUTO_PAD; @@ -159,7 +159,23 @@ impl FP64x64NN of NNTrait { pads, storage_order, strides, - output_len + output_len) + } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::deform_conv::deform_conv( + X, W, offset, B, mask, dilations, group, kernel_shape, offset_group, pads, strides, ) } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 38d9bed74..924b16d34 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -3,7 +3,7 @@ use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; use orion::operators::tensor::implementations::tensor_fp8x23::{ - FP8x23Tensor, FP8x23TensorDiv, FP8x23TensorAdd + FP8x23Tensor, FP8x23TensorDiv, FP8x23TensorAdd, FP8x23TensorMul }; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W @@ -161,7 +161,23 @@ impl FP8x23NN of NNTrait { pads, storage_order, strides, - output_len + output_len) + } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::deform_conv::deform_conv( + X, W, offset, B, mask, dilations, group, kernel_shape, offset_group, pads, strides, ) } } diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index b427eadb7..973dfb552 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -4,6 +4,7 @@ use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i32::{I32Tensor, I32TensorAdd}; use orion::operators::nn::AUTO_PAD; + impl I32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { functional::relu::relu(*tensor) @@ -145,4 +146,19 @@ impl I32NN of NNTrait { ) -> (Tensor, Option>) { panic(array!['not supported!']) } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index 18398eca2..d48e398df 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -4,6 +4,7 @@ use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd}; use orion::operators::nn::AUTO_PAD; + impl I8NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { functional::relu::relu(*tensor) @@ -145,4 +146,19 @@ impl I8NN of NNTrait { ) -> (Tensor, Option>) { panic(array!['not supported!']) } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index a76f0528b..504a8199b 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -4,6 +4,7 @@ use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd}; use orion::operators::nn::AUTO_PAD; + impl U32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { functional::relu::relu(*tensor) @@ -145,4 +146,19 @@ impl U32NN of NNTrait { ) -> (Tensor, Option>) { panic(array!['not supported!']) } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + panic(array!['not supported!']) + } } diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 665c20106..95c1c87a3 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1001,3 +1001,7 @@ mod argmax_negative_axis_keepdims; mod argmax_negative_axis_keepdims_select_last_index; mod argmax_no_keepdims; mod argmax_no_keepdims_select_last_index; +mod deform_conv_with_padding; +mod deform_conv_with_mask_bias; +mod deform_conv_with_multiple_offset_groups; +mod deform_conv; diff --git a/tests/nodes/deform_conv.cairo b/tests/nodes/deform_conv.cairo new file mode 100644 index 000000000..4568731a7 --- /dev/null +++ b/tests/nodes/deform_conv.cairo @@ -0,0 +1,36 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_deform_conv() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::deform_conv( + @input_0, + @input_1, + @input_2, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/deform_conv/input_0.cairo b/tests/nodes/deform_conv/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/deform_conv/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv/input_1.cairo b/tests/nodes/deform_conv/input_1.cairo new file mode 100644 index 000000000..fd236fd56 --- /dev/null +++ b/tests/nodes/deform_conv/input_1.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv/input_2.cairo b/tests/nodes/deform_conv/input_2.cairo new file mode 100644 index 000000000..f292a30c7 --- /dev/null +++ b/tests/nodes/deform_conv/input_2.cairo @@ -0,0 +1,47 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv/output_0.cairo b/tests/nodes/deform_conv/output_0.cairo new file mode 100644 index 000000000..0405486ac --- /dev/null +++ b/tests/nodes/deform_conv/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 622592, sign: false }); + data.append(FP16x16 { mag: 779878, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias.cairo b/tests/nodes/deform_conv_with_mask_bias.cairo new file mode 100644 index 000000000..2627fa720 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias.cairo @@ -0,0 +1,40 @@ +mod input_0; +mod input_1; +mod input_2; +mod input_3; +mod input_4; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_deform_conv_with_mask_bias() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let input_3 = input_3::input_3(); + let input_4 = input_4::input_4(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::deform_conv( + @input_0, + @input_1, + @input_2, + Option::Some(input_3.data), + Option::Some(input_4), + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_0.cairo b/tests/nodes/deform_conv_with_mask_bias/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_1.cairo b/tests/nodes/deform_conv_with_mask_bias/input_1.cairo new file mode 100644 index 000000000..fd236fd56 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_1.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_2.cairo b/tests/nodes/deform_conv_with_mask_bias/input_2.cairo new file mode 100644 index 000000000..f292a30c7 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_2.cairo @@ -0,0 +1,47 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_3.cairo b/tests/nodes/deform_conv_with_mask_bias/input_3.cairo new file mode 100644 index 000000000..805491b57 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_3.cairo @@ -0,0 +1,13 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_3() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_4.cairo b/tests/nodes/deform_conv_with_mask_bias/input_4.cairo new file mode 100644 index 000000000..13381e7f3 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_4.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_4() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/output_0.cairo b/tests/nodes/deform_conv_with_mask_bias/output_0.cairo new file mode 100644 index 000000000..4e2ac6dc2 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 688128, sign: false }); + data.append(FP16x16 { mag: 845414, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1271398, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups.cairo new file mode 100644 index 000000000..91840d1c4 --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups.cairo @@ -0,0 +1,36 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_deform_conv_with_multiple_offset_groups() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::deform_conv( + @input_0, + @input_1, + @input_2, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::Some(2), + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups/input_0.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups/input_0.cairo new file mode 100644 index 000000000..e628eeb6d --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups/input_0.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups/input_1.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups/input_1.cairo new file mode 100644 index 000000000..a6c0269ea --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups/input_1.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups/input_2.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups/input_2.cairo new file mode 100644 index 000000000..1c59d0824 --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups/input_2.cairo @@ -0,0 +1,79 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(16); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups/output_0.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups/output_0.cairo new file mode 100644 index 000000000..9483db7b7 --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2195456, sign: false }); + data.append(FP16x16 { mag: 2103705, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_padding.cairo b/tests/nodes/deform_conv_with_padding.cairo new file mode 100644 index 000000000..553b33a28 --- /dev/null +++ b/tests/nodes/deform_conv_with_padding.cairo @@ -0,0 +1,36 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_deform_conv_with_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::deform_conv( + @input_0, + @input_1, + @input_2, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::Some(array![1, 1, 1, 1].span()), + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/deform_conv_with_padding/input_0.cairo b/tests/nodes/deform_conv_with_padding/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/deform_conv_with_padding/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_padding/input_1.cairo b/tests/nodes/deform_conv_with_padding/input_1.cairo new file mode 100644 index 000000000..fd236fd56 --- /dev/null +++ b/tests/nodes/deform_conv_with_padding/input_1.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_padding/input_2.cairo b/tests/nodes/deform_conv_with_padding/input_2.cairo new file mode 100644 index 000000000..1c854d398 --- /dev/null +++ b/tests/nodes/deform_conv_with_padding/input_2.cairo @@ -0,0 +1,143 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_padding/output_0.cairo b/tests/nodes/deform_conv_with_padding/output_0.cairo new file mode 100644 index 000000000..a5444d5fd --- /dev/null +++ b/tests/nodes/deform_conv_with_padding/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 779878, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +}