Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add ability to config profiling on/off #1867

Merged
merged 1 commit into from
Apr 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 11 additions & 10 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions config/default_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,11 @@
"privacy": "TemporaryValue",
"value": false
},
"collect_profiling_metrics": {
"description": "If true, collect profiling metrics for the node.",
"privacy": "Public",
"value": false
},
"monitoring_gateway.collect_metrics": {
"description": "If true, collect and return metrics in the monitoring gateway.",
"pointer_target": "collect_metrics",
Expand Down
7 changes: 7 additions & 0 deletions crates/papyrus_common/src/metrics.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::sync::OnceLock;

/// The central marker is the first block number that doesn't exist yet.
pub const PAPYRUS_CENTRAL_BLOCK_MARKER: &str = "papyrus_central_block_marker";

Expand Down Expand Up @@ -30,3 +32,8 @@ pub const PAPYRUS_NUM_ACTIVE_INBOUND_SESSIONS: &str = "papyrus_num_active_inboun

/// The number of active sessions this peer has in which it requests data.
pub const PAPYRUS_NUM_ACTIVE_OUTBOUND_SESSIONS: &str = "papyrus_num_active_outbound_sessions";

// TODO: consider making this value non static and add a way to change this while the app is
// running. e.g via a monitoring endpoint.
/// Global variable set by the main config to enable collecting profiling metrics.
pub static COLLECT_PROFILING_METRICS: OnceLock<bool> = OnceLock::new();
15 changes: 10 additions & 5 deletions crates/papyrus_node/src/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,15 @@ use clap::{arg, value_parser, Arg, ArgMatches, Command};
use itertools::{chain, Itertools};
use lazy_static::lazy_static;
use papyrus_base_layer::ethereum_base_layer_contract::EthereumBaseLayerConfig;
#[cfg(not(feature = "rpc"))]
use papyrus_config::dumping::ser_param;
use papyrus_config::dumping::{
append_sub_config_name,
ser_optional_sub_config,
ser_param,
ser_pointer_target_param,
SerializeConfig,
};
use papyrus_config::loading::load_and_process_config;
#[cfg(not(feature = "rpc"))]
use papyrus_config::ParamPrivacyInput;
use papyrus_config::{ConfigError, ParamPath, SerializedParam};
use papyrus_config::{ConfigError, ParamPath, ParamPrivacyInput, SerializedParam};
use papyrus_monitoring_gateway::MonitoringGatewayConfig;
use papyrus_network::NetworkConfig;
use papyrus_p2p_sync::{P2PSync, P2PSyncConfig};
Expand Down Expand Up @@ -67,6 +64,7 @@ pub struct NodeConfig {
pub p2p_sync: Option<P2PSyncConfig>,
// TODO(shahak): Make network non-optional once it's developed enough.
pub network: Option<NetworkConfig>,
pub collect_profiling_metrics: bool,
}

// Default configuration values.
Expand All @@ -82,6 +80,7 @@ impl Default for NodeConfig {
sync: Some(SyncConfig::default()),
p2p_sync: None,
network: None,
collect_profiling_metrics: false,
}
}
}
Expand All @@ -98,6 +97,12 @@ impl SerializeConfig for NodeConfig {
ser_optional_sub_config(&self.sync, "sync"),
ser_optional_sub_config(&self.p2p_sync, "p2p_sync"),
ser_optional_sub_config(&self.network, "network"),
BTreeMap::from_iter([ser_param(
"collect_profiling_metrics",
&self.collect_profiling_metrics,
"If true, collect profiling metrics for the node.",
ParamPrivacyInput::Public,
)]),
];
#[cfg(feature = "rpc")]
sub_configs.push(append_sub_config_name(self.rpc.dump(), "rpc"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,11 @@ expression: dumped_default_config
"value": "https://alpha-mainnet.starknet.io/",
"privacy": "Public"
},
"collect_profiling_metrics": {
"description": "If true, collect profiling metrics for the node.",
"value": false,
"privacy": "Public"
},
"monitoring_gateway.collect_metrics": {
"description": "If true, collect and return metrics in the monitoring gateway.",
"value": false,
Expand Down
5 changes: 5 additions & 0 deletions crates/papyrus_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ use futures::channel::mpsc::Sender;
use futures::future::BoxFuture;
use futures::FutureExt;
use papyrus_base_layer::ethereum_base_layer_contract::EthereumBaseLayerConfig;
use papyrus_common::metrics::COLLECT_PROFILING_METRICS;
use papyrus_common::pending_classes::PendingClasses;
use papyrus_common::BlockHashAndNumber;
use papyrus_config::presentation::get_config_presentation;
Expand Down Expand Up @@ -297,6 +298,10 @@ async fn main() -> anyhow::Result<()> {
exit(1);
}

COLLECT_PROFILING_METRICS
.set(config.collect_profiling_metrics)
.expect("This should be the first and only time we set this value.");

info!("Booting up.");
let res = run_threads(config.clone()).await;
if config.p2p_sync.is_some_and(|c| c.stop_sync_at_block_number.is_some()) {
Expand Down
1 change: 1 addition & 0 deletions crates/papyrus_proc_macros/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ quote = "1.0.26"
[dev-dependencies]
metrics.workspace = true
metrics-exporter-prometheus.workspace = true
papyrus_common = { path = "../papyrus_common", version = "0.4.0-dev.1" }
prometheus-parse.workspace = true
test_utils = { path = "../test_utils" }

Expand Down
41 changes: 36 additions & 5 deletions crates/papyrus_proc_macros/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
use std::str::FromStr;

use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use syn::{parse_macro_input, ExprLit, ItemFn, ItemTrait, LitStr, Meta, TraitItem};
use syn::{parse_macro_input, ExprLit, ItemFn, ItemTrait, LitBool, LitStr, Meta, TraitItem};

/// This macro is a wrapper around the "rpc" macro supplied by the jsonrpsee library that generates
/// a server and client traits from a given trait definition. The wrapper gets a version id and
Expand Down Expand Up @@ -98,30 +100,59 @@ pub fn versioned_rpc(attr: TokenStream, input: TokenStream) -> TokenStream {
}

/// This macro will emit a histogram metric with the given name and the latency of the function.
/// The macro also receives a boolean for whether it will be emitted only when
/// profiling is activated or at all times.
///
/// # Example
/// Given this code:
///
/// ```rust,ignore
/// #[latency_histogram("metric_name")]
/// #[latency_histogram("metric_name", false)]
/// fn foo() {
/// // Some code ...
/// }
/// ```
/// Every call to foo will update the histogram metric with the name “metric_name” with the time it
/// took to execute foo.
/// The metric will be emitted regardless of the value of the profiling configuration,
/// since the config value is false.
#[proc_macro_attribute]
pub fn latency_histogram(attr: TokenStream, input: TokenStream) -> TokenStream {
let mut input_fn = parse_macro_input!(input as ItemFn);
let metric_name = parse_macro_input!(attr as ExprLit);
let parts = attr
.to_string()
.split(',')
.map(|s| {
TokenStream::from_str(s)
.expect("Expecting metric name and bool (is for profiling only)")
})
.collect::<Vec<_>>();
let metric_name_as_tokenstream = parts
.first()
.expect("attribute should include metric name and controll with config boolean")
.clone();
// TODO: consider naming the input value instead of providing a bool
// TODO: consider adding support for metrics levels (e.g. debug, info, warn, error) instead of
// boolean
let controll_with_config_as_tokenstream = parts
.get(1)
.expect("attribute should include metric name and controll with config boolean")
.clone();
let metric_name = parse_macro_input!(metric_name_as_tokenstream as ExprLit);
let controll_with_config = parse_macro_input!(controll_with_config_as_tokenstream as LitBool);
let origin_block = &mut input_fn.block;

// Create a new block with the metric update.
let expanded_block = quote! {
{
let start_function_time=std::time::Instant::now();
let mut start_function_time = None;
if !#controll_with_config || (#controll_with_config && *(papyrus_common::metrics::COLLECT_PROFILING_METRICS.get().unwrap_or(&false))) {
start_function_time=Some(std::time::Instant::now());
}
let return_value=#origin_block;
metrics::histogram!(#metric_name, start_function_time.elapsed().as_secs_f64());
if let Some(start_time) = start_function_time {
metrics::histogram!(#metric_name, start_time.elapsed().as_secs_f64());
}
return_value
}
};
Expand Down
14 changes: 13 additions & 1 deletion crates/papyrus_proc_macros/tests/latency_histogram.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,31 @@
use metrics_exporter_prometheus::PrometheusBuilder;
use papyrus_common::metrics::COLLECT_PROFILING_METRICS;
use papyrus_proc_macros::latency_histogram;
use prometheus_parse::Value::Untyped;
use test_utils::prometheus_is_contained;

#[test]
fn latency_histogram_test() {
#[latency_histogram("foo_histogram")]
COLLECT_PROFILING_METRICS.set(false).unwrap();

#[latency_histogram("foo_histogram", false)]
fn foo() -> usize {
#[allow(clippy::let_and_return)]
let start_function_time = 1000;
start_function_time
}

#[latency_histogram("bar_histogram", true)]
fn bar() -> usize {
#[allow(clippy::let_and_return)]
let start_function_time = 1000;
start_function_time
}

let handle = PrometheusBuilder::new().install_recorder().unwrap();

assert!(handle.render().is_empty());
assert_eq!(bar(), 1000);
assert!(handle.render().is_empty());
assert_eq!(foo(), 1000);
assert_eq!(
Expand Down
6 changes: 3 additions & 3 deletions crates/papyrus_storage/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ description = "A storage implementation for a Starknet node."

[features]
testing = ["tempfile"]
document_calls = ["lazy_static", "papyrus_common"]
document_calls = ["lazy_static"]

[[bin]]
name = "dump_declared_classes"
Expand All @@ -17,7 +17,7 @@ path = "src/bin/dump_declared_classes.rs"

[[bin]]
name = "storage_benchmark"
required-features = ["clap", "papyrus_common", "statistical"]
required-features = ["clap", "statistical"]
path = "src/bin/storage_benchmark.rs"

[dependencies]
Expand All @@ -35,7 +35,7 @@ memmap2.workspace = true
metrics.workspace = true
num-bigint.workspace = true
page_size.workspace = true
papyrus_common = { path = "../papyrus_common", version = "0.4.0-dev.2", optional = true }
papyrus_common = { path = "../papyrus_common", version = "0.4.0-dev.2" }
papyrus_config = { path = "../papyrus_config", version = "0.4.0-dev.2" }
papyrus_proc_macros = { path = "../papyrus_proc_macros", version = "0.4.0-dev.2" }
parity-scale-codec.workspace = true
Expand Down
2 changes: 1 addition & 1 deletion crates/papyrus_storage/src/body/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ impl<'env, Mode: TransactionKind> StorageTxn<'env, Mode> {
}

impl<'env> BodyStorageWriter for StorageTxn<'env, RW> {
#[latency_histogram("storage_append_body_latency_seconds")]
#[latency_histogram("storage_append_body_latency_seconds", false)]
fn append_body(self, block_number: BlockNumber, block_body: BlockBody) -> StorageResult<Self> {
let markers_table = self.open_table(&self.tables.markers)?;
update_marker(&self.txn, &markers_table, block_number)?;
Expand Down
2 changes: 1 addition & 1 deletion crates/papyrus_storage/src/class.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ impl<'env, Mode: TransactionKind> ClassStorageReader for StorageTxn<'env, Mode>
}

impl<'env> ClassStorageWriter for StorageTxn<'env, RW> {
#[latency_histogram("storage_append_classes_latency_seconds")]
#[latency_histogram("storage_append_classes_latency_seconds", false)]
fn append_classes(
self,
block_number: BlockNumber,
Expand Down
2 changes: 1 addition & 1 deletion crates/papyrus_storage/src/compiled_class.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ impl<'env, Mode: TransactionKind> CasmStorageReader for StorageTxn<'env, Mode> {
}

impl<'env> CasmStorageWriter for StorageTxn<'env, RW> {
#[latency_histogram("storage_append_casm_latency_seconds")]
#[latency_histogram("storage_append_casm_latency_seconds", false)]
fn append_casm(self, class_hash: &ClassHash, casm: &CasmContractClass) -> StorageResult<Self> {
let casm_table = self.open_table(&self.tables.casms)?;
let markers_table = self.open_table(&self.tables.markers)?;
Expand Down
2 changes: 1 addition & 1 deletion crates/papyrus_storage/src/state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ impl<'env, Mode: TransactionKind> StateReader<'env, Mode> {
}

impl<'env> StateStorageWriter for StorageTxn<'env, RW> {
#[latency_histogram("storage_append_thin_state_diff_latency_seconds")]
#[latency_histogram("storage_append_thin_state_diff_latency_seconds", false)]
fn append_state_diff(
self,
block_number: BlockNumber,
Expand Down
6 changes: 3 additions & 3 deletions crates/papyrus_sync/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ impl<
}
}

#[latency_histogram("sync_store_block_latency_seconds")]
#[latency_histogram("sync_store_block_latency_seconds", false)]
#[instrument(skip(self, block), level = "debug", fields(block_hash = %block.header.block_hash), err)]
fn store_block(
&mut self,
Expand Down Expand Up @@ -424,7 +424,7 @@ impl<
Ok(())
}

#[latency_histogram("sync_store_state_diff_latency_seconds")]
#[latency_histogram("sync_store_state_diff_latency_seconds", false)]
#[instrument(skip(self, state_diff, deployed_contract_class_definitions), level = "debug", err)]
fn store_state_diff(
&mut self,
Expand Down Expand Up @@ -471,7 +471,7 @@ impl<
Ok(())
}

#[latency_histogram("sync_store_compiled_class_latency_seconds")]
#[latency_histogram("sync_store_compiled_class_latency_seconds", false)]
#[instrument(skip(self, compiled_class), level = "debug", err)]
fn store_compiled_class(
&mut self,
Expand Down
Loading
Loading