Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add size #417

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@
* [tensor.and](framework/operators/tensor/tensor.and.md)
* [tensor.where](framework/operators/tensor/tensor.where.md)
* [tensor.round](framework/operators/tensor/tensor.round.md)
* [tensor.size](framework/operators/tensor/tensor.size.md)
* [Neural Network](framework/operators/neural-network/README.md)
* [nn.relu](framework/operators/neural-network/nn.relu.md)
* [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md)
Expand Down
3 changes: 2 additions & 1 deletion docs/framework/compatibility.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,5 +72,6 @@ You can see below the list of current supported ONNX Operators:
| [Round](operators/tensor/tensor.round.md) | :white\_check\_mark: |
| [MaxInTensor](operators/tensor/tensor.max\_in\_tensor.md) | :white\_check\_mark: |
| [Max](operators/tensor/tensor.max.md) | :white\_check\_mark: |
| [Size](operators/tensor/tensor.size.md) | :white\_check\_mark: |

Current Operators support: **61/156 (39%)**
Current Operators support: **62/156 (39%)**
1 change: 1 addition & 0 deletions docs/framework/operators/tensor/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ use orion::operators::tensor::TensorTrait;
| [`tensor.identity`](tensor.identity.md) | Return a Tensor with the same shape and contents as input. |
| [`tensor.where`](tensor.where.md) | Return elements chosen from x or y depending on condition. |
| [`tensor.round`](tensor.round.md) | Computes the round value of all elements in the input tensor. |
| [`tensor.size`](tensor.size.md) | Returns the size of a tensor. |

## Arithmetic Operations

Expand Down
33 changes: 33 additions & 0 deletions docs/framework/operators/tensor/tensor.size.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# tensor.size

```rust
fn size(self: @Tensor<T>) -> Tensor<T>;
```

Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.

## Args

* `self`(`@Tensor<T>`) - An input tensor.

## Returns

A new `Tensor<T>` of Total number of elements of the input tensor.

## Example

```rust
use array::{ArrayTrait, SpanTrait};

use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};

fn size_example() -> Tensor<FP8x23> {
let tensor = TensorTrait::<FP8x23>::new(
shape: array![2, 3].span(),
data: array![[[1, 2, 3], [4, 5, 6]]].span(),
);

return tensor.size();
}
>>> [6]
```
70 changes: 70 additions & 0 deletions nodegen/node/size.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_node, make_test, to_fp, Tensor, Dtype, FixedImpl


class Size(RunAll):

@staticmethod
def size_fp8x23():
x = np.array([[1, 2, 3],[4, 5, 6],]).astype(np.float32)
y = np.array([x.size]).astype(np.float32)

x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))

name = "size_fp8x23"
make_node([x], [y], name)
make_test([x], y, "input_0.size()", name)

@staticmethod
def size_fp16x16():
x = np.array([[1, 2, 3],[4, 5, 6],]).astype(np.float32)
y = np.array([x.size]).astype(np.float32)

x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))

name = "size_fp16x16"
make_node([x], [y], name)
make_test([x], y, "input_0.size()", name)

@staticmethod
def size_i8():
x = np.array([[1, 2, 3],[4, 5, 6],]).astype(np.int8)
y = np.array([x.size]).astype(np.int8)

x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())

name = "size_i8"
make_node([x], [y], name)
make_test([x], y, "input_0.size()", name)

@staticmethod
def size_i32():
x = np.array([[1, 2, 3],[4, 5, 6],]).astype(np.int32)
y = np.array([x.size]).astype(np.int32)

x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "size_i32"
make_node([x], [y], name)
make_test([x], y, "input_0.size()", name)

@staticmethod
def size_u32():
x = np.array([[1, 2, 3],[4, 5, 6],]).astype(np.uint32)
y = np.array([x.size]).astype(np.uint32)

x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())

name = "size_u32"
make_node([x], [y], name)
make_test([x], y, "input_0.size()", name)
23 changes: 17 additions & 6 deletions src/numbers.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ mod signed_integer;

use orion::numbers::signed_integer::integer_trait::IntegerTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::fixed_point::utils;

// Common methods from Fixed Point and Signed Integers.
trait NumberTrait<T, MAG> {
Expand Down Expand Up @@ -1128,7 +1129,9 @@ impl I8Number of NumberTrait<i8, u8> {
}

fn from_felt(val: felt252) -> i8 {
panic(array!['not supported!'])
let mag = integer::u8_try_from_felt252(utils::felt_abs(val)).unwrap();
let sign = utils::felt_sign(val);
i8 { mag, sign }
}

fn ceil(self: i8) -> i8 {
Expand Down Expand Up @@ -1320,7 +1323,9 @@ impl i16Number of NumberTrait<i16, u16> {
}

fn from_felt(val: felt252) -> i16 {
panic(array!['not supported!'])
let mag = integer::u16_try_from_felt252(utils::felt_abs(val)).unwrap();
let sign = utils::felt_sign(val);
i16 { mag, sign }
}

fn ceil(self: i16) -> i16 {
Expand Down Expand Up @@ -1512,7 +1517,9 @@ impl i32Number of NumberTrait<i32, u32> {
}

fn from_felt(val: felt252) -> i32 {
panic(array!['not supported!'])
let mag = integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap();
let sign = utils::felt_sign(val);
i32 { mag, sign }
}

fn ceil(self: i32) -> i32 {
Expand Down Expand Up @@ -1704,7 +1711,9 @@ impl i64Number of NumberTrait<i64, u64> {
}

fn from_felt(val: felt252) -> i64 {
panic(array!['not supported!'])
let mag = integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
let sign = utils::felt_sign(val);
i64 { mag, sign }
}

fn ceil(self: i64) -> i64 {
Expand Down Expand Up @@ -1896,7 +1905,9 @@ impl i128Number of NumberTrait<i128, u128> {
}

fn from_felt(val: felt252) -> i128 {
panic(array!['not supported!'])
let mag = integer::u128_try_from_felt252(utils::felt_abs(val)).unwrap();
let sign = utils::felt_sign(val);
i128 { mag, sign }
}

fn ceil(self: i128) -> i128 {
Expand Down Expand Up @@ -2086,7 +2097,7 @@ impl u32Number of NumberTrait<u32, u32> {
}

fn from_felt(val: felt252) -> u32 {
panic(array!['not supported!'])
integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap()
}

fn ceil(self: u32) -> u32 {
Expand Down
53 changes: 53 additions & 0 deletions src/operators/tensor/core.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ impl TensorSerde<T, impl TSerde: Serde<T>, impl TDrop: Drop<T>> of Serde<Tensor<
/// identity - Return a Tensor with the same shape and contents as input.
/// where - Return elements chosen from x or y depending on condition.
/// round - Computes the round value of all elements in the input tensor.
/// size - Returns the size of a tensor.
///
trait TensorTrait<T> {
/// # tensor.new
///
Expand Down Expand Up @@ -3041,6 +3043,41 @@ trait TensorTrait<T> {
/// ```
///
fn round(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.size
///
/// ```rust
/// fn size(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - An input tensor.
///
/// ## Returns
///
/// A new `Tensor<T>` of Total number of elements of the input tensor.
///
/// ## Example
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
///
/// fn size_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 3].span(),
/// data: array![[[1, 2, 3], [4, 5, 6]]].span(),
/// );
///
/// return tensor.size();
/// }
/// >>> [6]
/// ```
///
fn size(self: @Tensor<T>) -> Tensor<T>;
}

/// Cf: TensorTrait::new docstring
Expand Down Expand Up @@ -3582,3 +3619,19 @@ fn clip<
fn identity<T>(self: @Tensor<T>) -> Tensor<T> {
Tensor::<T> { shape: *self.shape, data: *self.data }
}

/// Cf: TensorTrait::size docstring
fn size<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TDrop: Drop<T>,
>
(
self: @Tensor<T>
) -> Tensor<T> {

let value = NumberTrait::new_unscaled(NumberTrait::mag(NumberTrait::<T, MAG>::from_felt(len_from_shape(*self.shape).into())), false);
Tensor::<T> { shape: array![1].span(), data: array![value].span() }

}
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_fp16x16.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,10 @@ impl FP16x16Tensor of TensorTrait<FP16x16> {
math::round::round(*self)
}

fn size(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
core::size(self)
}

}

/// Implements addition for `Tensor<FP16x16>` using the `Add` trait.
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_fp16x16wide.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,10 @@ impl FP16x16WTensor of TensorTrait<FP16x16W> {
fn round(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::round::round(*self)
}

fn size(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
core::size(self)
}
}

/// Implements addition for `Tensor<FP16x16W>` using the `Add` trait.
Expand Down
6 changes: 5 additions & 1 deletion src/operators/tensor/implementations/tensor_fp32x32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,11 @@ impl FP32x32Tensor of TensorTrait<FP32x32> {

fn round(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::round::round(*self)
}
}

fn size(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
core::size(self)
}
}

/// Implements addition for `Tensor<FP32x32>` using the `Add` trait.
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_fp64x64.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,10 @@ impl FP64x64Tensor of TensorTrait<FP64x64> {
fn round(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::round::round(*self)
}

fn size(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
core::size(self)
}
}

/// Implements addition for `Tensor<FP64x64>` using the `Add` trait.
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_fp8x23.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,10 @@ impl FP8x23Tensor of TensorTrait<FP8x23> {
fn round(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::round::round(*self)
}

fn size(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
core::size(self)
}
}

/// Implements addition for `Tensor<FP8x23>` using the `Add` trait.
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_fp8x23wide.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,10 @@ impl FP8x23WTensor of TensorTrait<FP8x23W> {
math::round::round(*self)
}

fn size(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
core::size(self)
}

}

/// Implements addition for `Tensor<FP8x23W>` using the `Add` trait.
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_i32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,10 @@ impl I32Tensor of TensorTrait<i32> {
fn round(self: @Tensor<i32>) -> Tensor<i32> {
math::round::round(*self)
}

fn size(self: @Tensor<i32>) -> Tensor<i32> {
core::size(self)
}
}

/// Implements addition for `Tensor<i32>` using the `Add` trait.
Expand Down
6 changes: 5 additions & 1 deletion src/operators/tensor/implementations/tensor_i8.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,11 @@ impl I8Tensor of TensorTrait<i8> {

fn round(self: @Tensor<i8>) -> Tensor<i8> {
math::round::round(*self)
}
}

fn size(self: @Tensor<i8>) -> Tensor<i8> {
core::size(self)
}
}

/// Implements addition for `Tensor<i8>` using the `Add` trait.
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_u32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,10 @@ impl U32Tensor of TensorTrait<u32> {
fn round(self: @Tensor<u32>) -> Tensor<u32> {
math::round::round(*self)
}

fn size(self: @Tensor<u32>) -> Tensor<u32> {
core::size(self)
}
}

/// Implements addition for `Tensor<u32>` using the `Add` trait.
Expand Down
7 changes: 6 additions & 1 deletion tests/nodes.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -515,4 +515,9 @@ mod max_i8_broadcast_two_tensors;
mod max_u32_three_tensors;
mod max_u32_broadcast_three_tensors;
mod max_u32_two_tensors;
mod max_u32_broadcast_two_tensors;
mod max_u32_broadcast_two_tensors;
mod size_fp16x16;
mod size_fp8x23;
mod size_i32;
mod size_i8;
mod size_u32;
Loading