Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update OS to v0.13.3 #449

Closed
wants to merge 17 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cairo-lang
Submodule cairo-lang updated 39 files
+2 −2 README.md
+2 −0 src/starkware/cairo/common/BUILD
+36 −0 src/starkware/cairo/common/copy_indices.cairo
+28 −0 src/starkware/cairo/common/log2_ceil.cairo
+1 −0 src/starkware/cairo/lang/BUILD
+1 −1 src/starkware/cairo/lang/VERSION
+1 −1 src/starkware/cairo/lang/ide/vscode-cairo/package.json
+6 −3 src/starkware/starknet/business_logic/execution/execute_entry_point.py
+1 −0 src/starkware/starknet/business_logic/fact_state/BUILD
+112 −8 src/starkware/starknet/business_logic/fact_state/state.py
+3 −3 src/starkware/starknet/business_logic/state/state_api_objects.py
+9 −6 src/starkware/starknet/business_logic/transaction/deprecated_objects.py
+6 −2 src/starkware/starknet/core/aggregator/BUILD
+169 −125 src/starkware/starknet/core/aggregator/aggregator_test.py
+159 −37 src/starkware/starknet/core/aggregator/output_parser.py
+2 −2 src/starkware/starknet/core/aggregator/program_hash.json
+6 −4 src/starkware/starknet/core/aggregator/utils.py
+2 −2 src/starkware/starknet/core/os/BUILD
+26 −0 src/starkware/starknet/core/os/data_availability/BUILD
+432 −0 src/starkware/starknet/core/os/data_availability/compression.cairo
+288 −0 src/starkware/starknet/core/os/data_availability/compression.py
+42 −12 src/starkware/starknet/core/os/output.cairo
+1 −1 src/starkware/starknet/core/os/program_hash.json
+54 −28 src/starkware/starknet/core/os/state/output.cairo
+1 −1 src/starkware/starknet/definitions/BUILD
+6 −1 src/starkware/starknet/definitions/chain_ids.py
+2 −0 src/starkware/starknet/definitions/constants.py
+1 −0 src/starkware/starknet/definitions/error_codes.py
+6 −7 src/starkware/starknet/definitions/fields.py
+37 −59 src/starkware/starknet/definitions/general_config.py
+4 −5 src/starkware/starknet/definitions/general_config.yml
+11 −0 src/starkware/starknet/definitions/overridable_versioned_constants.py
+1 −1 src/starkware/starknet/definitions/versioned_constants.json
+20 −0 src/starkware/starknet/solidity/IStarknetMessaging.sol
+2 −1 src/starkware/starknet/solidity/Starknet.sol
+1 −1 src/starkware/starknet/solidity/StarknetGovernance.sol
+23 −11 src/starkware/starknet/solidity/StarknetMessaging.sol
+1 −1 src/starkware/starknet/solidity/StarknetOperator.sol
+8 −2 src/starkware/starknet/solidity/StarknetState.sol
1 change: 1 addition & 0 deletions crates/bin/prove_block/src/rpc_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ pub(crate) fn get_starknet_version(block_with_txs: &BlockWithTxs) -> blockifier:
"0.13.1.1" => blockifier::versioned_constants::StarknetVersion::V0_13_1_1,
"0.13.2" => blockifier::versioned_constants::StarknetVersion::V0_13_2,
"0.13.2.1" => blockifier::versioned_constants::StarknetVersion::Latest,
"0.13.3" => blockifier::versioned_constants::StarknetVersion::Latest,
other => {
unimplemented!("Unsupported Starknet version: {}", other)
}
Expand Down
3 changes: 2 additions & 1 deletion crates/bin/prove_block/tests/prove_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ const DEFAULT_COMPILED_OS: &[u8] = include_bytes!("../../../../build/os_latest.j
#[case::inconsistent_cairo0_class_hash_1(204936)]
#[case::no_possible_convertion_1(155007)]
#[case::no_possible_convertion_2(155029)]
#[case::reference_pie_with_full_output_enabled(173404)]
// #[case::reference_pie_with_full_output_enabled(173404)]
#[case::inconsistent_cairo0_class_hash_2(159674)]
#[case::inconsistent_cairo0_class_hash_3(164180)]
#[case::key_not_in_proof_0(155087)]
Expand All @@ -72,6 +72,7 @@ const DEFAULT_COMPILED_OS: &[u8] = include_bytes!("../../../../build/os_latest.j
#[case::memory_invalid_signature(216914)]
#[case::diff_assert_values(218624)]
#[case::could_nt_compute_operand_op1(204337)]
#[case::os_v0_13_3(320000)]
#[ignore = "Requires a running Pathfinder node"]
#[tokio::test(flavor = "multi_thread")]
async fn test_prove_selected_blocks(#[case] block_number: u64) {
Expand Down
25 changes: 14 additions & 11 deletions crates/starknet-os/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@ pub const COMPILED_CLASS_HASH_COMMITMENT_TREE_HEIGHT: usize = 251;
pub const CONTRACT_STATES_COMMITMENT_TREE_HEIGHT: usize = 251;
pub const DEFAULT_INNER_TREE_HEIGHT: u64 = 64;
// TODO: update with relevant address
pub const DEFAULT_FEE_TOKEN_ADDR: &str = "482bc27fc5627bf974a72b65c43aa8a0464a70aab91ad8379b56a4f17a84c3";
pub const DEFAULT_DEPRECATED_FEE_TOKEN_ADDR: &str = "482bc27fc5627bf974a72b65c43aa8a0464a70aab91ad8379b56a4f17a84c3";
pub const SEQUENCER_ADDR_0_13_2: &str = "0x795488c127693ffb36733cc054f9e2be39241a794a4877dc8fc1dbe52750488";
pub const DEFAULT_FEE_TOKEN_ADDR: &str = "7ce4aa542d72a82662cda96b147da9b041ecf8c61f67ef657f3bbb852fc698f";
pub const DEFAULT_DEPRECATED_FEE_TOKEN_ADDR: &str = "5195ba458d98a8d5a390afa87e199566e473d1124c07a3c57bf19813255ac41";
pub const SEQUENCER_ADDR_0_13_2: &str = "0x31c641e041f8d25997985b0efe68d0c5ce89d418ca9a127ae043aebed6851c5";
pub const CONTRACT_ADDRESS_BITS: usize = 251;
pub const CONTRACT_CLASS_LEAF_VERSION: &[u8] = "CONTRACT_CLASS_LEAF_V0".as_bytes();

Expand Down Expand Up @@ -67,8 +67,9 @@ const fn default_use_kzg_da() -> bool {
pub struct StarknetGeneralConfig {
pub starknet_os_config: StarknetOsConfig,
pub gas_price_bounds: GasPriceBounds,
pub invoke_tx_max_n_steps: u32,
pub validate_max_n_steps: u32,
// pub invoke_tx_max_n_steps: u32,
// pub validate_max_n_steps: u32,
pub validate_max_n_steps_override: u32,
pub default_eth_price_in_fri: u128,
pub sequencer_address: ContractAddress,
pub enforce_l1_handler_fee: bool,
Expand All @@ -92,8 +93,9 @@ impl Default for StarknetGeneralConfig {
min_wei_l1_data_gas_price: 100000,
min_wei_l1_gas_price: 10000000000,
},
invoke_tx_max_n_steps: MAX_STEPS_PER_TX,
validate_max_n_steps: MAX_STEPS_PER_TX,
// invoke_tx_max_n_steps: MAX_STEPS_PER_TX,
// validate_max_n_steps: MAX_STEPS_PER_TX,
validate_max_n_steps_override: MAX_STEPS_PER_TX,
default_eth_price_in_fri: 1_000_000_000_000_000_000_000,
sequencer_address: contract_address!(SEQUENCER_ADDR_0_13_2),
enforce_l1_handler_fee: true,
Expand All @@ -114,8 +116,8 @@ impl StarknetGeneralConfig {

pub fn empty_block_context(&self) -> BlockContext {
let mut versioned_constants = VersionedConstants::default();
versioned_constants.invoke_tx_max_n_steps = self.invoke_tx_max_n_steps;
versioned_constants.validate_max_n_steps = self.validate_max_n_steps;
// versioned_constants.invoke_tx_max_n_steps = self.invoke_tx_max_n_steps;
// versioned_constants.validate_max_n_steps = self.validate_max_n_steps;
versioned_constants.max_recursion_depth = 50;

let block_info = BlockInfo {
Expand Down Expand Up @@ -177,9 +179,10 @@ mod tests {

assert!(conf.enforce_l1_handler_fee);

assert_eq!(1000000, conf.invoke_tx_max_n_steps);
// assert_eq!(1000000, conf.invoke_tx_max_n_steps);
assert_eq!(1000000000000000000000, conf.default_eth_price_in_fri);
assert_eq!(1000000, conf.validate_max_n_steps);
// assert_eq!(1000000, conf.validate_max_n_steps);
assert_eq!(1000000, conf.validate_max_n_steps_override);

assert_eq!(expected_seq_addr, conf.sequencer_address);
}
Expand Down
274 changes: 274 additions & 0 deletions crates/starknet-os/src/hints/compression.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,274 @@
use std::collections::HashMap;

use cairo_vm::hint_processor::builtin_hint_processor::hint_utils::{get_integer_from_var_name, get_ptr_from_var_name};
use cairo_vm::hint_processor::hint_processor_definition::HintReference;
use cairo_vm::hint_processor::hint_processor_utils::felt_to_usize;
use cairo_vm::serde::deserialize_program::ApTracking;
use cairo_vm::types::exec_scope::ExecutionScopes;
use cairo_vm::types::relocatable::MaybeRelocatable;
use cairo_vm::vm::errors::hint_errors::HintError;
use cairo_vm::vm::vm_core::VirtualMachine;
use cairo_vm::Felt252;
use indoc::indoc;

use crate::hints::vars;
use crate::utils::get_constant;

const COMPRESSION_VERSION: u8 = 0;
const MAX_N_BITS: usize = 251;
const N_UNIQUE_VALUE_BUCKETS: usize = 6;
const TOTAL_N_BUCKETS: usize = N_UNIQUE_VALUE_BUCKETS + 1;

#[derive(Debug, Clone)]
struct UniqueValueBucket {
n_bits: Felt252,
value_to_index: HashMap<Felt252, usize>,
}

impl UniqueValueBucket {
fn new(n_bits: Felt252) -> Self {
Self { n_bits, value_to_index: HashMap::new() }
}

fn add(&mut self, value: &Felt252) {
if !self.value_to_index.contains_key(value) {
let next_index = self.value_to_index.len();
self.value_to_index.insert(*value, next_index);
}
}

fn get_index(&self, value: &Felt252) -> Option<usize> {
self.value_to_index.get(value).copied()
}

fn pack_in_felts(&self) -> Vec<&Felt252> {
let mut values: Vec<&Felt252> = self.value_to_index.keys().collect();
values.sort_by_key(|&v| self.value_to_index[v]);
values
}
}

struct CompressionSet {
buckets: Vec<UniqueValueBucket>,
sorted_buckets: Vec<(usize, UniqueValueBucket)>,
repeating_value_locations: Vec<(usize, usize)>,
bucket_index_per_elm: Vec<usize>,
finalized: bool,
}

impl CompressionSet {
fn new(n_bits_per_bucket: Vec<Felt252>) -> Self {
let buckets: Vec<UniqueValueBucket> =
n_bits_per_bucket.iter().map(|&n_bits| UniqueValueBucket::new(n_bits)).collect();

let mut indexed_buckets: Vec<(usize, UniqueValueBucket)> = Vec::new();
for (index, bucket) in buckets.iter().enumerate() {
indexed_buckets.push((index, bucket.clone()));
}
indexed_buckets.sort_by(|a, b| a.1.n_bits.cmp(&b.1.n_bits));

CompressionSet {
buckets,
sorted_buckets: indexed_buckets,
repeating_value_locations: Vec::new(),
bucket_index_per_elm: Vec::new(),
finalized: false,
}
}

fn update(&mut self, values: Vec<Felt252>) {
assert!(!self.finalized, "Cannot add values after finalizing.");
let buckets_len = self.buckets.len();
for value in values.iter() {
for (bucket_index, bucket) in self.sorted_buckets.iter_mut() {
if Felt252::from(value.bits()) <= bucket.n_bits {
if bucket.value_to_index.contains_key(value) {
// Repeated value; add the location of the first added copy.
if let Some(index) = bucket.get_index(value) {
self.repeating_value_locations.push((*bucket_index, index));
self.bucket_index_per_elm.push(buckets_len);
}
} else {
// First appearance of this value.
bucket.add(value);
self.bucket_index_per_elm.push(*bucket_index);
}
}
}
}
}

fn finalize(&mut self) {
self.finalized = true;
}
pub fn get_bucket_index_per_elm(&self) -> Vec<usize> {
assert!(self.finalized, "Cannot get bucket_index_per_elm before finalizing.");
self.bucket_index_per_elm.clone()
}

pub fn get_unique_value_bucket_lengths(&self) -> Vec<usize> {
self.sorted_buckets.iter().map(|elem| elem.1.value_to_index.len()).collect()
}

pub fn get_repeating_value_bucket_length(&self) -> usize {
self.repeating_value_locations.len()
}

pub fn pack_unique_values(&self) -> Vec<Felt252> {
assert!(self.finalized, "Cannot pack before finalizing.");
// Chain the packed felts from each bucket into a single vector.
self.buckets.iter().flat_map(|bucket| bucket.pack_in_felts()).cloned().collect()
}

/// Returns a list of pointers corresponding to the repeating values.
/// The pointers point to the chained unique value buckets.
pub fn get_repeating_value_pointers(&self) -> Vec<usize> {
assert!(self.finalized, "Cannot get pointers before finalizing.");

let unique_value_bucket_lengths = self.get_unique_value_bucket_lengths();
let bucket_offsets = get_bucket_offsets(unique_value_bucket_lengths);

let mut pointers = Vec::new();
for (bucket_index, index_in_bucket) in self.repeating_value_locations.iter() {
pointers.push(bucket_offsets[*bucket_index] + index_in_bucket);
}

pointers
}
}

fn pack_in_felt(elms: Vec<usize>, elm_bound: usize) -> Felt252 {
let mut res = Felt252::ZERO;
for (i, &elm) in elms.iter().enumerate() {
res += Felt252::from(elm * elm_bound.pow(i as u32));
}
assert!(res.to_biguint() < Felt252::prime(), "Out of bound packing.");
res
}

fn pack_in_felts(elms: Vec<usize>, elm_bound: usize) -> Vec<Felt252> {
assert!(elms.iter().all(|&elm| elm < elm_bound), "Element out of bound.");

elms.chunks(get_n_elms_per_felt(elm_bound)).map(|chunk| pack_in_felt(chunk.to_vec(), elm_bound)).collect()
}

fn get_bucket_offsets(bucket_lengths: Vec<usize>) -> Vec<usize> {
let mut offsets = Vec::new();
let mut sum = 0;
for length in bucket_lengths {
offsets.push(sum);
sum += length;
}
offsets
}

fn log2_ceil(x: usize) -> usize {
assert!(x > 0);
(x - 1).count_ones() as usize
}

fn get_n_elms_per_felt(elm_bound: usize) -> usize {
if elm_bound <= 1 {
return MAX_N_BITS;
}
if elm_bound > 2_usize.pow(MAX_N_BITS as u32) {
return 1;
}

MAX_N_BITS / log2_ceil(elm_bound)
}

fn compression(
data: Vec<Felt252>,
data_size: usize,
constants: &HashMap<String, Felt252>,
) -> Result<Vec<Felt252>, HintError> {
let n_bits_per_bucket = vec![
Felt252::from(252),
Felt252::from(125),
Felt252::from(83),
Felt252::from(62),
Felt252::from(31),
Felt252::from(15),
];
let header_elm_n_bits = felt_to_usize(get_constant(vars::constants::HEADER_ELM_N_BITS, constants)?)?;
let header_elm_bound = 1usize << header_elm_n_bits;

assert!(data_size < header_elm_bound, "Data length exceeds the header element bound");

let mut compression_set = CompressionSet::new(n_bits_per_bucket);
compression_set.update(data);
compression_set.finalize();

let bucket_index_per_elm = compression_set.get_bucket_index_per_elm();

let unique_value_bucket_lengths = compression_set.get_unique_value_bucket_lengths();
let n_unique_values = unique_value_bucket_lengths.iter().sum::<usize>();

let mut header = vec![COMPRESSION_VERSION as usize, data_size];
header.extend(unique_value_bucket_lengths.iter().cloned());
header.push(compression_set.get_repeating_value_bucket_length());

let packed_header = vec![pack_in_felt(header, header_elm_bound)];

let packed_repeating_value_pointers =
pack_in_felts(compression_set.get_repeating_value_pointers(), n_unique_values);

let packed_bucket_index_per_elm = pack_in_felts(bucket_index_per_elm, TOTAL_N_BUCKETS);

let compressed_data = packed_header
.into_iter()
.chain(compression_set.pack_unique_values().into_iter())
.chain(packed_repeating_value_pointers.into_iter())
.chain(packed_bucket_index_per_elm.into_iter())
.collect::<Vec<Felt252>>();

Ok(compressed_data)
}

pub const COMPRESS: &str = indoc! {r#"from starkware.starknet.core.os.data_availability.compression import compress
data = memory.get_range_as_ints(addr=ids.data_start, size=ids.data_end - ids.data_start)
segments.write_arg(ids.compressed_dst, compress(data))"#};

pub fn compress(
vm: &mut VirtualMachine,
_exec_scopes: &mut ExecutionScopes,
ids_data: &HashMap<String, HintReference>,
ap_tracking: &ApTracking,
constants: &HashMap<String, Felt252>,
) -> Result<(), HintError> {
let data_start = get_ptr_from_var_name(vars::ids::DATA_START, vm, ids_data, ap_tracking)?;
let data_end = get_ptr_from_var_name(vars::ids::DATA_END, vm, ids_data, ap_tracking)?;
let data_size = (data_end - data_start)?;

let compressed_dst = get_ptr_from_var_name(vars::ids::COMPRESSED_DST, vm, ids_data, ap_tracking)?;

let data: Vec<Felt252> = vm.get_integer_range(data_start, data_size)?.into_iter().map(|s| *s).collect();
let compress_result = compression(data, data_size, constants)?
.into_iter()
.map(MaybeRelocatable::Int)
.collect::<Vec<MaybeRelocatable>>();

vm.write_arg(compressed_dst, &compress_result)?;

Ok(())
}

pub const SET_DECOMPRESSED_DST: &str = indoc! {r#"memory[ids.decompressed_dst] = ids.packed_felt % ids.elm_bound"#
};

pub fn set_decompressed_dst(
vm: &mut VirtualMachine,
_exec_scopes: &mut ExecutionScopes,
ids_data: &HashMap<String, HintReference>,
ap_tracking: &ApTracking,
_constants: &HashMap<String, Felt252>,
) -> Result<(), HintError> {
let decompressed_dst = get_ptr_from_var_name(vars::ids::DECOMPRESSED_DST, vm, ids_data, ap_tracking)?;

let packed_felt = get_integer_from_var_name(vars::ids::PACKED_FELT, vm, ids_data, ap_tracking)?.to_biguint();
let elm_bound = get_integer_from_var_name(vars::ids::ELM_BOUND, vm, ids_data, ap_tracking)?.to_biguint();

vm.insert_value(decompressed_dst, Felt252::from(packed_felt % elm_bound))?;
Ok(())
}
5 changes: 5 additions & 0 deletions crates/starknet-os/src/hints/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ mod bls_field;
mod bls_utils;
pub mod builtins;
mod compiled_class;
mod compression;
mod deprecated_compiled_class;
mod execute_transactions;
pub mod execution;
Expand Down Expand Up @@ -174,7 +175,9 @@ fn hints<PCS>() -> HashMap<String, HintImpl> where
hints.insert(os::SET_AP_TO_PREV_BLOCK_HASH.into(), os::set_ap_to_prev_block_hash);
hints.insert(kzg::STORE_DA_SEGMENT.into(), kzg::store_da_segment::<PCS>);
hints.insert(output::SET_STATE_UPDATES_START.into(), output::set_state_updates_start);
hints.insert(output::SET_COMPRESSED_START.into(), output::set_compressed_start);
hints.insert(output::SET_TREE_STRUCTURE.into(), output::set_tree_structure);
hints.insert(output::SET_N_UPDATES_SMALL.into(), output::set_n_updates_small);
hints.insert(patricia::ASSERT_CASE_IS_RIGHT.into(), patricia::assert_case_is_right);
hints.insert(patricia::BUILD_DESCENT_MAP.into(), patricia::build_descent_map);
hints.insert(patricia::HEIGHT_IS_ZERO_OR_LEN_NODE_PREIMAGE_IS_TWO.into(), patricia::height_is_zero_or_len_node_preimage_is_two);
Expand Down Expand Up @@ -252,6 +255,8 @@ fn hints<PCS>() -> HashMap<String, HintImpl> where
hints.insert(compiled_class::SET_AP_TO_SEGMENT_HASH.into(), compiled_class::set_ap_to_segment_hash);
hints.insert(secp::READ_EC_POINT_ADDRESS.into(), secp::read_ec_point_from_address);
hints.insert(execute_transactions::SHA2_FINALIZE.into(), execute_transactions::sha2_finalize);
hints.insert(compression::COMPRESS.into(), compression::compress);
hints.insert(compression::SET_DECOMPRESSED_DST.into(), compression::set_decompressed_dst);
hints
}

Expand Down
Loading
Loading