diff --git a/Cargo.lock b/Cargo.lock index a388f42a93e05..688c99c2473cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2150,20 +2150,26 @@ dependencies = [ "aptos-protos 1.3.1", "aptos-transaction-filter", "async-trait", + "build_html", "clap 4.5.21", + "dashmap", "futures", "jemallocator", + "lazy_static", "once_cell", "prost 0.13.4", + "rand 0.7.3", "redis", "serde", "serde_json", "tokio", + "tokio-scoped", "tokio-stream", "tonic 0.12.3", "tonic-reflection", "tracing", "uuid", + "warp", ] [[package]] @@ -2172,11 +2178,26 @@ version = "1.0.0" dependencies = [ "anyhow", "aptos-indexer-grpc-server-framework", + "aptos-indexer-grpc-utils", + "aptos-protos 1.3.1", "async-trait", + "build_html", "clap 4.5.21", + "dashmap", + "futures", "jemallocator", + "once_cell", + "prost 0.13.4", + "rand 0.7.3", "serde", "tokio", + "tokio-scoped", + "tokio-stream", + "tonic 0.12.3", + "tonic-reflection", + "tracing", + "uuid", + "warp", ] [[package]] @@ -2316,15 +2337,23 @@ dependencies = [ "anyhow", "aptos-config", "aptos-indexer-grpc-server-framework", + "aptos-indexer-grpc-utils", "aptos-protos 1.3.1", "async-trait", + "build_html", "clap 4.5.21", + "dashmap", + "futures", "jemallocator", + "prost 0.13.4", + "rand 0.7.3", "serde", + "serde_json", "tokio", "tokio-scoped", "tonic 0.12.3", "tracing", + "warp", ] [[package]] @@ -2389,6 +2418,8 @@ dependencies = [ "async-trait", "backoff", "base64 0.13.1", + "build_html", + "bytesize", "chrono", "cloud-storage", "dashmap", @@ -2404,10 +2435,12 @@ dependencies = [ "serde", "serde_json", "tokio", + "tokio-stream", "tokio-util 0.7.10", "tonic 0.12.3", "tracing", "url", + "warp", ] [[package]] @@ -5902,6 +5935,12 @@ dependencies = [ "serde", ] +[[package]] +name = "build_html" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225eb82ce9e70dcc0cfa6e404d0f353326b6e163bf500ec4711cec317d11935c" + [[package]] name = "bulletproofs" version = "4.0.0" @@ -6000,6 +6039,12 @@ dependencies = [ "serde", ] +[[package]] +name = "bytesize" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" + [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" @@ -6679,16 +6724,6 @@ dependencies = [ "libc", ] -[[package]] -name = "core-foundation" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -12067,7 +12102,7 @@ dependencies = [ "openssl-probe", "openssl-sys", "schannel", - "security-framework 2.9.2", + "security-framework", "security-framework-sys", "tempfile", ] @@ -14868,7 +14903,7 @@ dependencies = [ "openssl-probe", "rustls-pemfile 1.0.4", "schannel", - "security-framework 2.9.2", + "security-framework", ] [[package]] @@ -14881,19 +14916,20 @@ dependencies = [ "rustls-pemfile 2.1.1", "rustls-pki-types", "schannel", - "security-framework 2.9.2", + "security-framework", ] [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" dependencies = [ "openssl-probe", + "rustls-pemfile 2.1.1", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework", ] [[package]] @@ -15139,20 +15175,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" -dependencies = [ - "bitflags 2.6.0", - "core-foundation 0.10.0", + "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", @@ -16221,7 +16244,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", - "core-foundation 0.9.4", + "core-foundation", "system-configuration-sys", ] @@ -16935,7 +16958,7 @@ dependencies = [ "percent-encoding", "pin-project 1.1.3", "prost 0.13.4", - "rustls-native-certs 0.8.1", + "rustls-native-certs 0.8.0", "rustls-pemfile 2.1.1", "socket2 0.5.5", "tokio", diff --git a/Cargo.toml b/Cargo.toml index d70df5de06172..20e516bc1edf1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -519,9 +519,11 @@ blst = "0.3.11" # The __private_bench feature exposes the Fp12 type which we need to implement a multi-threaded multi-pairing. blstrs = { version = "0.7.1", features = ["serde", "__private_bench"] } bollard = "0.15" +build_html = "2.5.0" bulletproofs = { version = "4.0.0" } byteorder = "1.4.3" bytes = { version = "1.4.0", features = ["serde"] } +bytesize = { version = "1.3.0" } camino = { version = "1.1.6" } chrono = { version = "0.4.19", features = ["clock", "serde"] } cfg-if = "1.0.0" @@ -705,6 +707,8 @@ prometheus-parse = "0.2.4" proptest = "1.4.0" proptest-derive = "0.4.0" prost = { version = "0.13.4", features = ["no-recursion-limit"] } +prost-types = "0.13.3" +quanta = "0.10.1" quick_cache = "0.5.1" quick-junit = "0.5.0" quote = "1.0.18" diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/Cargo.toml index 533ac675464a0..e4d3614ae2edf 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/Cargo.toml @@ -15,10 +15,25 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } aptos-indexer-grpc-server-framework = { workspace = true } +aptos-indexer-grpc-utils = { workspace = true } +aptos-protos = { workspace = true } async-trait = { workspace = true } +build_html = { workspace = true } clap = { workspace = true } +dashmap = { workspace = true } +futures = { workspace = true } +once_cell = { workspace = true } +prost = { workspace = true } +rand = { workspace = true } serde = { workspace = true } tokio = { workspace = true } +tokio-scoped = { workspace = true } +tokio-stream = { workspace = true } +tonic = { workspace = true } +tonic-reflection = { workspace = true } +tracing = { workspace = true } +uuid = { workspace = true } +warp = { workspace = true } [target.'cfg(unix)'.dependencies] jemallocator = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/config.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/config.rs index 7f81be313ec48..414a6b61f745c 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/config.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/config.rs @@ -1,21 +1,416 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; +use crate::{ + connection_manager::ConnectionManager, + historical_data_service::HistoricalDataService, + live_data_service::LiveDataService, + service::{DataServiceWrapper, DataServiceWrapperWrapper}, +}; +use anyhow::{bail, Result}; use aptos_indexer_grpc_server_framework::RunnableConfig; +use aptos_indexer_grpc_utils::{ + config::IndexerGrpcFileStoreConfig, + status_page::{get_throughput_from_samples, render_status_page, Tab}, +}; +use aptos_protos::{ + indexer::v1::FILE_DESCRIPTOR_SET as INDEXER_V1_FILE_DESCRIPTOR_SET, + transaction::v1::FILE_DESCRIPTOR_SET as TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET, + util::timestamp::FILE_DESCRIPTOR_SET as UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET, +}; +use build_html::{ + Container, ContainerType, HtmlContainer, HtmlElement, HtmlTag, Table, TableCell, TableCellType, + TableRow, +}; +use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; +use tokio::task::JoinHandle; +use tonic::{codec::CompressionEncoding, transport::Server}; +use tracing::info; +use warp::{reply::Response, Rejection}; + +pub(crate) static LIVE_DATA_SERVICE: OnceCell> = OnceCell::new(); +pub(crate) static HISTORICAL_DATA_SERVICE: OnceCell = OnceCell::new(); + +pub(crate) const MAX_MESSAGE_SIZE: usize = 256 * (1 << 20); + +// HTTP2 ping interval and timeout. +// This can help server to garbage collect dead connections. +// tonic server: https://docs.rs/tonic/latest/tonic/transport/server/struct.Server.html#method.http2_keepalive_interval +const HTTP2_PING_INTERVAL_DURATION: std::time::Duration = std::time::Duration::from_secs(60); +const HTTP2_PING_TIMEOUT_DURATION: std::time::Duration = std::time::Duration::from_secs(10); + +const DEFAULT_MAX_RESPONSE_CHANNEL_SIZE: usize = 3; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct TlsConfig { + /// The address for the TLS GRPC server to listen on. + pub data_service_grpc_listen_address: SocketAddr, + pub cert_path: String, + pub key_path: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct NonTlsConfig { + /// The address for the TLS GRPC server to listen on. + pub data_service_grpc_listen_address: SocketAddr, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct LiveDataServiceConfig { + pub enabled: bool, + #[serde(default = "LiveDataServiceConfig::default_num_slots")] + pub num_slots: usize, + #[serde(default = "LiveDataServiceConfig::default_size_limit_bytes")] + pub size_limit_bytes: usize, +} + +impl LiveDataServiceConfig { + fn default_num_slots() -> usize { + 5_000_000 + } + + fn default_size_limit_bytes() -> usize { + 10_000_000_000 + } +} #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -pub struct IndexerGrpcDataServiceConfig {} +pub struct HistoricalDataServiceConfig { + pub enabled: bool, + pub file_store_config: IndexerGrpcFileStoreConfig, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcDataServiceConfig { + pub chain_id: u64, + /// If given, we will run a server that uses TLS. + pub data_service_grpc_tls_config: Option, + /// If given, we will run a server that does not use TLS. + pub data_service_grpc_non_tls_config: Option, + /// The size of the response channel that response can be buffered. + #[serde(default = "IndexerGrpcDataServiceConfig::default_data_service_response_channel_size")] + pub data_service_response_channel_size: usize, + + pub live_data_service_config: LiveDataServiceConfig, + pub historical_data_service_config: HistoricalDataServiceConfig, + pub grpc_manager_addresses: Vec, + pub self_advertised_address: String, +} + +impl IndexerGrpcDataServiceConfig { + pub const fn default_data_service_response_channel_size() -> usize { + DEFAULT_MAX_RESPONSE_CHANNEL_SIZE + } + + async fn create_live_data_service( + &self, + tasks: &mut Vec>>, + ) -> Option { + if !self.live_data_service_config.enabled { + return None; + } + let connection_manager = Arc::new( + ConnectionManager::new( + self.chain_id, + self.grpc_manager_addresses.clone(), + self.self_advertised_address.clone(), + /*is_live_data_service=*/ true, + ) + .await, + ); + let (handler_tx, handler_rx) = tokio::sync::mpsc::channel(10); + let service = DataServiceWrapper::new( + connection_manager.clone(), + handler_tx, + self.data_service_response_channel_size, + /*is_live_data_service=*/ true, + ); + + let connection_manager_clone = connection_manager.clone(); + tasks.push(tokio::task::spawn(async move { + connection_manager_clone.start().await; + Ok(()) + })); + + let chain_id = self.chain_id; + let config = self.live_data_service_config.clone(); + tasks.push(tokio::task::spawn_blocking(move || { + LIVE_DATA_SERVICE + .get_or_init(|| LiveDataService::new(chain_id, config, connection_manager)) + .run(handler_rx); + Ok(()) + })); + + Some(service) + } + + async fn create_historical_data_service( + &self, + tasks: &mut Vec>>, + ) -> Option { + if !self.historical_data_service_config.enabled { + return None; + } + let connection_manager = Arc::new( + ConnectionManager::new( + self.chain_id, + self.grpc_manager_addresses.clone(), + self.self_advertised_address.clone(), + /*is_live_data_service=*/ false, + ) + .await, + ); + let (handler_tx, handler_rx) = tokio::sync::mpsc::channel(10); + let service = DataServiceWrapper::new( + connection_manager.clone(), + handler_tx, + self.data_service_response_channel_size, + /*is_live_data_service=*/ false, + ); + + let connection_manager_clone = connection_manager.clone(); + tasks.push(tokio::task::spawn(async move { + connection_manager_clone.start().await; + Ok(()) + })); + + let chain_id = self.chain_id; + let config = self.historical_data_service_config.clone(); + tasks.push(tokio::task::spawn_blocking(move || { + HISTORICAL_DATA_SERVICE + .get_or_init(|| HistoricalDataService::new(chain_id, config, connection_manager)) + .run(handler_rx); + Ok(()) + })); + + Some(service) + } +} #[async_trait::async_trait] impl RunnableConfig for IndexerGrpcDataServiceConfig { + fn validate(&self) -> Result<()> { + if self.data_service_grpc_non_tls_config.is_none() + && self.data_service_grpc_tls_config.is_none() + { + bail!("At least one of data_service_grpc_non_tls_config and data_service_grpc_tls_config must be set"); + } + Ok(()) + } + async fn run(&self) -> Result<()> { + let reflection_service = tonic_reflection::server::Builder::configure() + // Note: It is critical that the file descriptor set is registered for every + // file that the top level API proto depends on recursively. If you don't, + // compilation will still succeed but reflection will fail at runtime. + // + // TODO: Add a test for this / something in build.rs, this is a big footgun. + .register_encoded_file_descriptor_set(INDEXER_V1_FILE_DESCRIPTOR_SET) + .register_encoded_file_descriptor_set(TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET) + .register_encoded_file_descriptor_set(UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET) + .build_v1alpha() + .map_err(|e| anyhow::anyhow!("Failed to build reflection service: {}", e))? + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip); + + let mut tasks = vec![]; + + let live_data_service = self.create_live_data_service(&mut tasks).await; + let historical_data_service = self.create_historical_data_service(&mut tasks).await; + + let wrapper = Arc::new(DataServiceWrapperWrapper::new( + live_data_service, + historical_data_service, + )); + let wrapper_service_raw = + aptos_protos::indexer::v1::raw_data_server::RawDataServer::from_arc(wrapper.clone()) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip) + .max_decoding_message_size(MAX_MESSAGE_SIZE) + .max_encoding_message_size(MAX_MESSAGE_SIZE); + let wrapper_service = + aptos_protos::indexer::v1::data_service_server::DataServiceServer::from_arc(wrapper) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip) + .max_decoding_message_size(MAX_MESSAGE_SIZE) + .max_encoding_message_size(MAX_MESSAGE_SIZE); + let wrapper_service_raw_clone = wrapper_service_raw.clone(); + let wrapper_service_clone = wrapper_service.clone(); + let reflection_service_clone = reflection_service.clone(); + + if let Some(config) = &self.data_service_grpc_non_tls_config { + let listen_address = config.data_service_grpc_listen_address; + info!( + grpc_address = listen_address.to_string().as_str(), + "[data service] starting gRPC server with non-TLS." + ); + tasks.push(tokio::spawn(async move { + Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .add_service(wrapper_service_clone) + .add_service(wrapper_service_raw_clone) + .add_service(reflection_service_clone) + .serve(listen_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + if let Some(config) = &self.data_service_grpc_tls_config { + let listen_address = config.data_service_grpc_listen_address; + let cert = tokio::fs::read(config.cert_path.clone()).await?; + let key = tokio::fs::read(config.key_path.clone()).await?; + let identity = tonic::transport::Identity::from_pem(cert, key); + info!( + grpc_address = listen_address.to_string().as_str(), + "[Data Service] Starting gRPC server with TLS." + ); + tasks.push(tokio::spawn(async move { + Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .tls_config(tonic::transport::ServerTlsConfig::new().identity(identity))? + .add_service(wrapper_service) + .add_service(wrapper_service_raw) + .add_service(reflection_service) + .serve(listen_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + + futures::future::try_join_all(tasks).await?; Ok(()) } fn get_server_name(&self) -> String { "indexer_grpc_data_service_v2".to_string() } + + async fn status_page(&self) -> Result { + let mut tabs = vec![]; + // TODO(grao): Add something real. + let overview_tab_content = HtmlElement::new(HtmlTag::Div).with_raw("Welcome!").into(); + tabs.push(Tab::new("Overview", overview_tab_content)); + if let Some(live_data_service) = LIVE_DATA_SERVICE.get() { + let connection_manager_info = + render_connection_manager_info(live_data_service.get_connection_manager()); + let cache_info = render_cache_info(); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(connection_manager_info) + .with_container(cache_info) + .into(); + tabs.push(Tab::new("LiveDataService", content)); + } + + if let Some(historical_data_service) = HISTORICAL_DATA_SERVICE.get() { + let connection_manager_info = + render_connection_manager_info(historical_data_service.get_connection_manager()); + let file_store_info = render_file_store_info(); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(connection_manager_info) + .with_container(file_store_info) + .into(); + tabs.push(Tab::new("HistoricalDataService", content)); + } + + render_status_page(tabs) + } +} + +fn render_connection_manager_info(connection_manager: &ConnectionManager) -> Container { + let known_latest_version = connection_manager.known_latest_version(); + let active_streams = connection_manager.get_active_streams(); + let active_streams_table = active_streams.into_iter().fold( + Table::new() + .with_attributes([("style", "width: 100%; border: 5px solid black;")]) + .with_thead_attributes([("style", "background-color: lightcoral; color: white;")]) + .with_custom_header_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Header).with_raw("Id")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("Current Version")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("End Version")) + .with_cell( + TableCell::new(TableCellType::Header).with_raw("Past 10s throughput"), + ) + .with_cell( + TableCell::new(TableCellType::Header).with_raw("Past 60s throughput"), + ) + .with_cell( + TableCell::new(TableCellType::Header).with_raw("Past 10min throughput"), + ), + ), + |table, active_stream| { + table.with_custom_body_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Data).with_raw(active_stream.id())) + .with_cell(TableCell::new(TableCellType::Data).with_raw(format!( + "{:?}", + active_stream + .progress.as_ref() + .map(|progress| { + progress.samples.last().map(|sample| sample.version) + }) + .flatten() + ))) + .with_cell( + TableCell::new(TableCellType::Data).with_raw(active_stream.end_version()), + ) + .with_cell(TableCell::new(TableCellType::Data).with_raw( + get_throughput_from_samples( + active_stream.progress.as_ref(), + Duration::from_secs(10), + ), + )) + .with_cell(TableCell::new(TableCellType::Data).with_raw( + get_throughput_from_samples( + active_stream.progress.as_ref(), + Duration::from_secs(60), + ), + )) + .with_cell(TableCell::new(TableCellType::Data).with_raw( + get_throughput_from_samples( + active_stream.progress.as_ref(), + Duration::from_secs(600), + ), + )), + ) + }, + ); + + Container::new(ContainerType::Section) + .with_paragraph_attr( + "Connection Manager", + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_paragraph(format!("Known latest version: {known_latest_version}.")) + .with_paragraph_attr( + "Active Streams", + [("style", "font-size: 16px; font-weight: bold;")], + ) + .with_table(active_streams_table) +} + +fn render_cache_info() -> Container { + Container::new(ContainerType::Section).with_paragraph_attr( + "In Memory Cache", + [("style", "font-size: 24px; font-weight: bold;")], + ) +} + +fn render_file_store_info() -> Container { + Container::new(ContainerType::Section).with_paragraph_attr( + "File Store", + [("style", "font-size: 24px; font-weight: bold;")], + ) } diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/connection_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/connection_manager.rs new file mode 100644 index 0000000000000..54f456c806fa6 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/connection_manager.rs @@ -0,0 +1,298 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::config::{LIVE_DATA_SERVICE, MAX_MESSAGE_SIZE}; +use aptos_indexer_grpc_utils::{system_time_to_proto, timestamp_now_proto}; +use aptos_protos::indexer::v1::{ + grpc_manager_client::GrpcManagerClient, service_info::Info, ActiveStream, HeartbeatRequest, + HistoricalDataServiceInfo, LiveDataServiceInfo, ServiceInfo, StreamInfo, StreamProgress, + StreamProgressSampleProto, +}; +use dashmap::DashMap; +use rand::prelude::*; +use std::{ + collections::VecDeque, + sync::atomic::{AtomicU64, Ordering}, + time::{Duration, SystemTime}, +}; +use tonic::{codec::CompressionEncoding, transport::channel::Channel}; +use tracing::{info, warn}; + +pub static MAX_HEARTBEAT_RETRIES: usize = 3; + +static OLD_PROGRESS_SAMPLING_RATE: Duration = Duration::from_secs(60); +static RECENT_PROGRESS_SAMPLING_RATE: Duration = Duration::from_secs(1); +static MAX_RECENT_SAMPLES_TO_KEEP: usize = 60; +static MAX_OLD_SAMPLES_TO_KEEP: usize = 60; + +#[derive(Default)] +struct StreamProgressSamples { + old_samples: VecDeque, + recent_samples: VecDeque, +} + +impl StreamProgressSamples { + fn new() -> Self { + Default::default() + } + + fn to_proto(&self) -> Vec { + self.old_samples + .iter() + .chain(self.recent_samples.iter()) + .map(|sample| StreamProgressSampleProto { + timestamp: Some(system_time_to_proto(sample.timestamp)), + version: sample.version, + size_bytes: sample.size_bytes, + }) + .collect() + } + + fn maybe_add_sample(&mut self, version: u64, size_bytes: u64) { + let now = SystemTime::now(); + let sample = StreamProgressSample { + timestamp: now, + version, + size_bytes, + }; + + if Self::accept_sample(&self.recent_samples, &sample, RECENT_PROGRESS_SAMPLING_RATE) { + self.recent_samples.push_back(sample); + if self.recent_samples.len() > MAX_RECENT_SAMPLES_TO_KEEP { + let sample = self.recent_samples.pop_front().unwrap(); + if Self::accept_sample(&self.old_samples, &sample, OLD_PROGRESS_SAMPLING_RATE) { + self.old_samples.push_back(sample); + if self.old_samples.len() > MAX_OLD_SAMPLES_TO_KEEP { + self.old_samples.pop_front(); + } + } + } + } + } + + fn accept_sample( + samples: &VecDeque, + sample: &StreamProgressSample, + sampling_rate: Duration, + ) -> bool { + if let Some(last_sample) = samples.back() { + if let Ok(delta) = sample.timestamp.duration_since(last_sample.timestamp) { + if delta >= sampling_rate { + return true; + } + } + } else { + return true; + }; + + return false; + } +} + +struct StreamProgressSample { + timestamp: SystemTime, + version: u64, + size_bytes: u64, +} + +pub(crate) struct ConnectionManager { + chain_id: u64, + grpc_manager_connections: DashMap>, + self_advertised_address: String, + known_latest_version: AtomicU64, + active_streams: DashMap, + is_live_data_service: bool, +} + +impl ConnectionManager { + pub(crate) async fn new( + chain_id: u64, + grpc_manager_addresses: Vec, + self_advertised_address: String, + is_live_data_service: bool, + ) -> Self { + let grpc_manager_connections = DashMap::new(); + grpc_manager_addresses.into_iter().for_each(|address| { + grpc_manager_connections + .insert(address.clone(), Self::create_client_from_address(&address)); + }); + let res = Self { + chain_id, + grpc_manager_connections, + self_advertised_address, + known_latest_version: AtomicU64::new(0), + active_streams: DashMap::new(), + is_live_data_service, + }; + + // Keep fetching latest version until it is available. + while res.known_latest_version.load(Ordering::SeqCst) == 0 { + for entry in res.grpc_manager_connections.iter() { + let address = entry.key(); + if let Err(e) = res.heartbeat(address).await { + warn!("Error during heartbeat: {e}."); + } + } + tokio::time::sleep(Duration::from_secs(1)).await; + } + + res + } + + pub(crate) async fn start(&self) { + loop { + for entry in self.grpc_manager_connections.iter() { + let address = entry.key(); + let mut retries = 0; + loop { + let result = self.heartbeat(address).await; + if result.is_ok() { + break; + } + retries += 1; + if retries > MAX_HEARTBEAT_RETRIES { + warn!("Failed to send heartbeat to GrpcManager at {address}, last error: {result:?}."); + } + } + continue; + } + + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + pub(crate) fn chain_id(&self) -> u64 { + self.chain_id + } + + pub(crate) fn get_grpc_manager_client_for_request(&self) -> GrpcManagerClient { + let mut rng = thread_rng(); + self.grpc_manager_connections + .iter() + .choose(&mut rng) + .map(|kv| kv.value().clone()) + .unwrap() + } + + pub(crate) fn known_latest_version(&self) -> u64 { + self.known_latest_version.load(Ordering::SeqCst) + } + + pub(crate) fn update_known_latest_version(&self, version: u64) { + self.known_latest_version + .fetch_max(version, Ordering::SeqCst); + } + + pub(crate) fn insert_active_stream( + &self, + id: &String, + start_version: u64, + end_version: Option, + ) { + self.active_streams.insert( + id.clone(), + ( + ActiveStream { + id: Some(id.clone()), + start_time: Some(timestamp_now_proto()), + start_version, + end_version, + progress: None, + }, + StreamProgressSamples::new(), + ), + ); + } + + pub(crate) fn remove_active_stream(&self, id: &String) { + self.active_streams.remove(id); + } + + pub(crate) fn update_stream_progress(&self, id: &str, version: u64, size_bytes: u64) { + self.active_streams + .get_mut(id) + .unwrap() + .1 + .maybe_add_sample(version, size_bytes); + } + + pub(crate) fn get_active_streams(&self) -> Vec { + self.active_streams + .iter() + .map(|entry| { + let (active_stream, samples) = entry.value(); + let mut active_stream = active_stream.clone(); + active_stream.progress = Some(StreamProgress { + samples: samples.to_proto(), + }); + active_stream + }) + .collect() + } + + async fn heartbeat(&self, address: &str) -> Result<(), tonic::Status> { + info!("Sending heartbeat to GrpcManager {address}."); + let timestamp = Some(timestamp_now_proto()); + let known_latest_version = Some(self.known_latest_version()); + let stream_info = Some(StreamInfo { + active_streams: self.get_active_streams(), + }); + + let info = if self.is_live_data_service { + let min_servable_version = match LIVE_DATA_SERVICE.get() { + Some(svc) => Some(svc.get_min_servable_version().await), + None => None, + }; + Some(Info::LiveDataServiceInfo(LiveDataServiceInfo { + chain_id: self.chain_id, + timestamp, + known_latest_version, + stream_info, + min_servable_version, + })) + } else { + Some(Info::HistoricalDataServiceInfo(HistoricalDataServiceInfo { + chain_id: self.chain_id, + timestamp, + known_latest_version, + stream_info, + })) + }; + let service_info = ServiceInfo { + address: Some(self.self_advertised_address.clone()), + info, + }; + let request = HeartbeatRequest { + service_info: Some(service_info), + }; + let response = self + .grpc_manager_connections + .get(address) + // TODO(grao): Consider to not use unwrap here. + .unwrap() + .clone() + .heartbeat(request) + .await? + .into_inner(); + if let Some(known_latest_version) = response.known_latest_version { + info!("Received known_latest_version ({known_latest_version}) from GrpcManager {address}."); + self.update_known_latest_version(known_latest_version); + } else { + warn!("HeartbeatResponse doesn't contain known_latest_version, GrpcManager address: {address}"); + } + + Ok(()) + } + + fn create_client_from_address(address: &str) -> GrpcManagerClient { + info!("Creating GrpcManagerClient for {address}."); + let channel = Channel::from_shared(address.to_string()) + .expect("Bad address.") + .connect_lazy(); + GrpcManagerClient::new(channel) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .max_decoding_message_size(MAX_MESSAGE_SIZE) + .max_encoding_message_size(MAX_MESSAGE_SIZE) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/historical_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/historical_data_service.rs new file mode 100644 index 0000000000000..641a3bf93c28f --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/historical_data_service.rs @@ -0,0 +1,174 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{config::HistoricalDataServiceConfig, connection_manager::ConnectionManager}; +use aptos_indexer_grpc_utils::file_store_operator_v2::FileStoreReader; +use aptos_protos::indexer::v1::{GetTransactionsRequest, TransactionsResponse}; +use futures::executor::block_on; +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tonic::{Request, Status}; +use tracing::info; +use uuid::Uuid; + +pub struct HistoricalDataService { + chain_id: u64, + connection_manager: Arc, + file_store_reader: Arc, +} + +impl HistoricalDataService { + pub fn new( + chain_id: u64, + config: HistoricalDataServiceConfig, + connection_manager: Arc, + ) -> Self { + let file_store = block_on(config.file_store_config.create_filestore()); + let file_store_reader = Arc::new(block_on(FileStoreReader::new(chain_id, file_store))); + Self { + chain_id, + connection_manager: connection_manager.clone(), + file_store_reader, + } + } + + pub fn run( + &self, + mut handler_rx: Receiver<( + Request, + Sender>, + )>, + ) { + info!("Running HistoricalDataService..."); + tokio_scoped::scope(|scope| { + while let Some((request, response_sender)) = handler_rx.blocking_recv() { + // TODO(grao): Store request metadata. + let request = request.into_inner(); + let id = Uuid::new_v4().to_string(); + info!("Received request: {request:?}."); + + if request.starting_version.is_none() { + let err = Err(Status::invalid_argument("Must provide starting_version.")); + info!("Client error: {err:?}."); + let _ = response_sender.blocking_send(err); + continue; + } + let starting_version = request.starting_version.unwrap(); + + let max_num_transactions_per_batch = if let Some(batch_size) = request.batch_size { + batch_size as usize + } else { + 10000 + }; + + let ending_version = request + .transactions_count + .map(|count| starting_version + count); + + scope.spawn(async move { + self.start_streaming( + id, + starting_version, + ending_version, + max_num_transactions_per_batch, + response_sender, + ) + .await + }); + } + }); + } + + pub(crate) fn get_connection_manager(&self) -> &ConnectionManager { + &self.connection_manager + } + + async fn start_streaming( + &self, + id: String, + starting_version: u64, + ending_version: Option, + max_num_transactions_per_batch: usize, + response_sender: tokio::sync::mpsc::Sender>, + ) { + info!(stream_id = id, "Start streaming, starting_version: {starting_version}, ending_version: {ending_version:?}."); + self.connection_manager + .insert_active_stream(&id, starting_version, ending_version); + let mut next_version = starting_version; + let ending_version = ending_version.unwrap_or(u64::MAX); + let mut size_bytes = 0; + 'out: loop { + self.connection_manager + .update_stream_progress(&id, next_version, size_bytes); + if next_version >= ending_version { + break; + } + + if !self.file_store_reader.can_serve(next_version).await { + info!(stream_id = id, "next_version {next_version} is larger or equal than file store version, terminate the stream."); + break; + } + + // TODO(grao): Pick a better channel size here, and consider doing parallel fetching + // inside the `get_transaction_batch` call based on the channel size. + let (tx, mut rx) = channel(1); + + let file_store_reader = self.file_store_reader.clone(); + tokio::spawn(async move { + file_store_reader + .get_transaction_batch( + next_version, + /*retries=*/ 3, + /*max_files=*/ None, + tx, + ) + .await; + }); + + let mut close_to_latest = false; + while let Some((transactions, batch_size_bytes)) = rx.recv().await { + next_version += transactions.len() as u64; + size_bytes += batch_size_bytes as u64; + let timestamp = transactions.first().unwrap().timestamp.unwrap(); + let timestamp_since_epoch = + Duration::new(timestamp.seconds as u64, timestamp.nanos as u32); + let now_since_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + let delta = now_since_epoch.saturating_sub(timestamp_since_epoch); + + if delta < Duration::from_secs(60) { + close_to_latest = true; + } + let responses = transactions + .chunks(max_num_transactions_per_batch) + .into_iter() + .map(|chunk| TransactionsResponse { + transactions: chunk.to_vec(), + chain_id: Some(self.chain_id), + }); + for response in responses { + if let Err(_) = response_sender.send(Ok(response)).await { + // NOTE: We are not recalculating the version and size_bytes for the stream + // progress since nobody cares about the accurate if client has dropped the + // connection. + info!(stream_id = id, "Client dropped."); + break 'out; + } + } + } + if close_to_latest { + info!( + stream_id = id, + "Stream is approaching to the latest transactions, terminate." + ); + break; + } + } + + self.connection_manager + .update_stream_progress(&id, next_version, size_bytes); + self.connection_manager.remove_active_stream(&id); + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/lib.rs index 36d88a8c1b06b..23591fc9503a0 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/lib.rs @@ -2,3 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 pub mod config; +pub(crate) mod connection_manager; +pub(crate) mod historical_data_service; +pub(crate) mod live_data_service; +pub(crate) mod service; diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/live_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/live_data_service.rs new file mode 100644 index 0000000000000..77e481405a862 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/live_data_service.rs @@ -0,0 +1,456 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{config::LiveDataServiceConfig, connection_manager::ConnectionManager}; +use aptos_protos::{ + indexer::v1::{GetTransactionsRequest, TransactionsResponse}, + transaction::v1::Transaction, +}; +use futures::future::{BoxFuture, FutureExt, Shared}; +use prost::Message; +use std::{sync::Arc, time::Duration}; +use tokio::sync::{ + mpsc::{Receiver, Sender}, + RwLock, +}; +use tonic::{Request, Status}; +use tracing::{info, trace}; +use uuid::Uuid; + +static MAX_BYTES_PER_BATCH: usize = 20 * (1 << 20); + +struct DataClient { + connection_manager: Arc, +} + +impl DataClient { + fn new(connection_manager: Arc) -> Self { + Self { connection_manager } + } + + async fn fetch_transactions(&self, starting_version: u64) -> Vec { + trace!("Fetching transactions from GrpcManager, start_version: {starting_version}."); + + let request = GetTransactionsRequest { + starting_version: Some(starting_version), + transactions_count: None, + batch_size: None, + }; + loop { + let mut client = self + .connection_manager + .get_grpc_manager_client_for_request(); + let response = client.get_transactions(request).await; + if let Ok(response) = response { + return response.into_inner().transactions; + } + // TODO(grao): Error handling. + } + } +} + +type FetchTask<'a> = Shared>; + +struct FetchManager<'a> { + data_manager: Arc>, + data_client: Arc, + fetching_latest_data_task: RwLock>>, +} + +impl<'a> FetchManager<'a> { + fn new( + data_manager: Arc>, + connection_manager: Arc, + ) -> Self { + Self { + data_manager, + data_client: Arc::new(DataClient::new(connection_manager)), + fetching_latest_data_task: RwLock::new(None), + } + } + + async fn fetch_past_data(&self, version: u64) -> usize { + Self::fetch_and_update_cache(self.data_client.clone(), self.data_manager.clone(), version) + .await + } + + async fn fetch_and_update_cache( + data_client: Arc, + data_manager: Arc>, + version: u64, + ) -> usize { + let transactions = data_client.fetch_transactions(version).await; + let len = transactions.len(); + + if len > 0 { + data_manager + .write() + .await + .update_data(version, transactions); + } + + len + } + + async fn fetch_latest_data(&'a self) -> usize { + let version = self.data_manager.read().await.end_version; + info!("Fetching latest data starting from version {version}."); + loop { + let num_transactions = Self::fetch_and_update_cache( + self.data_client.clone(), + self.data_manager.clone(), + version, + ) + .await; + if num_transactions != 0 { + info!("Finished fetching latest data, got {num_transactions} num_transactions starting from version {version}."); + return num_transactions; + } + tokio::time::sleep(Duration::from_millis(200)).await; + } + } + + async fn continuously_fetch_latest_data(&'a self) { + loop { + let task = self.fetch_latest_data().boxed().shared(); + *self.fetching_latest_data_task.write().await = Some(task.clone()); + let _ = task.await; + } + } +} + +// TODO(grao): Naive implementation for now. This can be replaced by a more performant +// implementation in the future. +struct DataManager { + start_version: u64, + end_version: u64, + data: Vec>>, + + size_limit_bytes: usize, + eviction_target: usize, + total_size: usize, + num_slots: usize, +} + +impl DataManager { + fn new(end_version: u64, num_slots: usize, size_limit_bytes: usize) -> Self { + Self { + start_version: end_version.saturating_sub(num_slots as u64), + end_version, + data: vec![None; num_slots], + size_limit_bytes, + eviction_target: size_limit_bytes, + total_size: 0, + num_slots, + } + } + + fn update_data(&mut self, start_version: u64, transactions: Vec) { + let end_version = start_version + transactions.len() as u64; + + trace!( + "Updating data for {} transactions in range [{start_version}, {end_version}).", + transactions.len(), + ); + if start_version > self.end_version { + // TODO(grao): unexpected + return; + } + + if end_version <= self.start_version { + // TODO(grao): Log and counter. + return; + } + + let num_to_skip = self.start_version.saturating_sub(start_version); + let start_version = start_version.max(self.start_version); + + let mut size_increased = 0; + let mut size_decreased = 0; + + for (i, transaction) in transactions + .into_iter() + .enumerate() + .skip(num_to_skip as usize) + { + let version = start_version + i as u64; + let slot_index = version as usize % self.num_slots; + if let Some(transaction) = self.data[slot_index].take() { + size_decreased += transaction.encoded_len(); + } + size_increased += transaction.encoded_len(); + self.data[version as usize % self.num_slots] = Some(Box::new(transaction)); + } + + if end_version > self.end_version { + self.end_version = end_version; + if self.start_version + (self.num_slots as u64) < end_version { + self.start_version = end_version - self.num_slots as u64; + } + } + + self.total_size += size_increased; + self.total_size -= size_decreased; + + if self.total_size >= self.size_limit_bytes { + while self.total_size >= self.eviction_target { + if let Some(transaction) = + self.data[self.start_version as usize % self.num_slots].take() + { + self.total_size -= transaction.encoded_len(); + drop(transaction); + } + self.start_version += 1; + } + } + } +} + +pub struct InMemoryCache<'a> { + data_manager: Arc>, + fetch_manager: Arc>, +} + +impl<'a> InMemoryCache<'a> { + pub fn new( + connection_manager: Arc, + known_latest_version: u64, + num_slots: usize, + size_limit_bytes: usize, + ) -> Self { + let data_manager = Arc::new(RwLock::new(DataManager::new( + known_latest_version + 1, + num_slots, + size_limit_bytes, + ))); + let fetch_manager = Arc::new(FetchManager::new(data_manager.clone(), connection_manager)); + Self { + data_manager, + fetch_manager, + } + } + + async fn get_data( + &'a self, + starting_version: u64, + ending_version: u64, + max_num_transactions_per_batch: usize, + max_bytes_per_batch: usize, + ) -> Option<(Vec, usize)> { + while starting_version >= self.data_manager.read().await.end_version { + trace!("Reached head, wait..."); + let num_transactions = self + .fetch_manager + .fetching_latest_data_task + .read() + .await + .as_ref() + .unwrap() + .clone() + .await; + + trace!("Done waiting, got {num_transactions} transactions at head."); + } + + loop { + let data_manager = self.data_manager.read().await; + + trace!("Getting data from cache, requested_version: {starting_version}, oldest available version: {}.", data_manager.start_version); + if starting_version < data_manager.start_version { + return None; + } + + let start_index = starting_version as usize % data_manager.num_slots; + + if data_manager.data[start_index].is_none() { + drop(data_manager); + self.fetch_manager.fetch_past_data(starting_version).await; + continue; + } + + let mut total_bytes = 0; + let mut version = starting_version; + let ending_version = ending_version.min(data_manager.end_version); + + if let Some(_) = data_manager.data[version as usize % data_manager.num_slots].as_ref() { + let mut result = Vec::new(); + while version < ending_version + && total_bytes < max_bytes_per_batch + && result.len() < max_num_transactions_per_batch + { + if let Some(transaction) = + data_manager.data[version as usize % data_manager.num_slots].as_ref() + { + // NOTE: We allow 1 more txn beyond the size limit here, for simplicity. + total_bytes += transaction.encoded_len(); + result.push(transaction.as_ref().clone()); + version += 1; + } else { + break; + } + } + trace!("Data was sent from cache, last version: {}.", version - 1); + return Some((result, total_bytes)); + } else { + unreachable!("Data cannot be None."); + } + } + } +} + +pub struct LiveDataService<'a> { + chain_id: u64, + in_memory_cache: InMemoryCache<'a>, + connection_manager: Arc, +} + +impl<'a> LiveDataService<'a> { + pub fn new( + chain_id: u64, + config: LiveDataServiceConfig, + connection_manager: Arc, + ) -> Self { + let known_latest_version = connection_manager.known_latest_version(); + Self { + chain_id, + connection_manager: connection_manager.clone(), + in_memory_cache: InMemoryCache::new( + connection_manager, + known_latest_version, + config.num_slots, + config.size_limit_bytes, + ), + } + } + + pub fn run( + &'a self, + mut handler_rx: Receiver<( + Request, + Sender>, + )>, + ) { + info!("Running LiveDataService..."); + tokio_scoped::scope(|scope| { + scope.spawn(async move { + let _ = self + .in_memory_cache + .fetch_manager + .continuously_fetch_latest_data() + .await; + }); + while let Some((request, response_sender)) = handler_rx.blocking_recv() { + // TODO(grao): Store request metadata. + let request = request.into_inner(); + let id = Uuid::new_v4().to_string(); + let known_latest_version = self.get_known_latest_version(); + let starting_version = request.starting_version.unwrap_or(known_latest_version); + + info!("Received request: {request:?}."); + if starting_version > known_latest_version + 10000 { + let err = Err(Status::failed_precondition( + "starting_version cannot be set to a far future version.", + )); + info!("Client error: {err:?}."); + let _ = response_sender.blocking_send(err); + continue; + } + + let max_num_transactions_per_batch = if let Some(batch_size) = request.batch_size { + batch_size as usize + } else { + 10000 + }; + + let ending_version = request + .transactions_count + .map(|count| starting_version + count); + + scope.spawn(async move { + self.start_streaming( + id, + starting_version, + ending_version, + max_num_transactions_per_batch, + MAX_BYTES_PER_BATCH, + response_sender, + ) + .await + }); + } + }); + } + + pub(crate) fn get_connection_manager(&self) -> &ConnectionManager { + &self.connection_manager + } + + pub(crate) async fn get_min_servable_version(&self) -> u64 { + self.in_memory_cache.data_manager.read().await.start_version + } + + async fn start_streaming( + &'a self, + id: String, + starting_version: u64, + ending_version: Option, + max_num_transactions_per_batch: usize, + max_bytes_per_batch: usize, + response_sender: tokio::sync::mpsc::Sender>, + ) { + info!(stream_id = id, "Start streaming, starting_version: {starting_version}, ending_version: {ending_version:?}."); + self.connection_manager + .insert_active_stream(&id, starting_version, ending_version); + let mut next_version = starting_version; + let mut size_bytes = 0; + let ending_version = ending_version.unwrap_or(u64::MAX); + loop { + if next_version >= ending_version { + break; + } + self.connection_manager + .update_stream_progress(&id, next_version, size_bytes); + let known_latest_version = self.get_known_latest_version(); + if next_version > known_latest_version { + info!(stream_id = id, "next_version {next_version} is larger than known_latest_version {known_latest_version}"); + tokio::time::sleep(Duration::from_millis(100)).await; + continue; + } + + if let Some((transactions, batch_size_bytes)) = self + .in_memory_cache + .get_data( + next_version, + ending_version, + max_num_transactions_per_batch, + max_bytes_per_batch, + ) + .await + { + next_version += transactions.len() as u64; + size_bytes += batch_size_bytes as u64; + let response = TransactionsResponse { + transactions, + chain_id: Some(self.chain_id), + }; + if let Err(_) = response_sender.send(Ok(response)).await { + info!(stream_id = id, "Client dropped."); + break; + } + } else { + let err = Err(Status::not_found("Requested data is too old.")); + info!(stream_id = id, "Client error: {err:?}."); + let _ = response_sender.send(err).await; + break; + } + } + + self.connection_manager + .update_stream_progress(&id, next_version, size_bytes); + self.connection_manager.remove_active_stream(&id); + } + + fn get_known_latest_version(&self) -> u64 { + self.connection_manager.known_latest_version() + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/service.rs new file mode 100644 index 0000000000000..ac98414e7c545 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service-v2/src/service.rs @@ -0,0 +1,200 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{config::LIVE_DATA_SERVICE, connection_manager::ConnectionManager}; +use anyhow::Result; +use aptos_indexer_grpc_utils::timestamp_now_proto; +use aptos_protos::indexer::v1::{ + data_service_server::DataService, ping_data_service_response::Info, raw_data_server::RawData, + GetTransactionsRequest, HistoricalDataServiceInfo, LiveDataServiceInfo, PingDataServiceRequest, + PingDataServiceResponse, StreamInfo, TransactionsResponse, +}; +use futures::{Stream, StreamExt}; +use std::{pin::Pin, sync::Arc}; +use tokio::sync::mpsc::{channel, Sender}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{Request, Response, Status}; + +type ResponseStream = Pin> + Send>>; + +// Note: For now we still allow starting both services together, so people don't have to rely on +// GrpcManager for routing. +pub struct DataServiceWrapperWrapper { + live_data_service: Option, + historical_data_service: Option, +} + +impl DataServiceWrapperWrapper { + pub fn new( + live_data_service: Option, + historical_data_service: Option, + ) -> Self { + Self { + live_data_service, + historical_data_service, + } + } +} + +#[tonic::async_trait] +impl DataService for DataServiceWrapperWrapper { + type GetTransactionsStream = ResponseStream; + + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + if let Some(live_data_service) = self.live_data_service.as_ref() { + if let Some(historical_data_service) = self.historical_data_service.as_ref() { + let request = req.into_inner(); + let mut stream = live_data_service + .get_transactions(Request::new(request.clone())) + .await? + .into_inner(); + let peekable = std::pin::pin!(stream.as_mut().peekable()); + if let Some(Ok(_)) = peekable.peek().await { + return live_data_service + .get_transactions(Request::new(request.clone())) + .await; + } + + historical_data_service + .get_transactions(Request::new(request.clone())) + .await + } else { + live_data_service.get_transactions(req).await + } + } else if let Some(historical_data_service) = self.historical_data_service.as_ref() { + historical_data_service.get_transactions(req).await + } else { + unreachable!("Must have at least one of the data services enabled."); + } + } + + async fn ping( + &self, + req: Request, + ) -> Result, Status> { + let request = req.get_ref(); + if request.ping_live_data_service { + if let Some(live_data_service) = self.live_data_service.as_ref() { + live_data_service.ping(req).await + } else { + Err(Status::not_found("LiveDataService is not enabled.")) + } + } else if let Some(historical_data_service) = self.historical_data_service.as_ref() { + historical_data_service.ping(req).await + } else { + Err(Status::not_found("HistoricalDataService is not enabled.")) + } + } +} + +#[tonic::async_trait] +impl RawData for DataServiceWrapperWrapper { + type GetTransactionsStream = ResponseStream; + + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + DataService::get_transactions(self, req).await + } +} + +pub struct DataServiceWrapper { + connection_manager: Arc, + handler_tx: Sender<( + Request, + Sender>, + )>, + pub data_service_response_channel_size: usize, + is_live_data_service: bool, +} + +impl DataServiceWrapper { + pub fn new( + connection_manager: Arc, + handler_tx: Sender<( + Request, + Sender>, + )>, + data_service_response_channel_size: usize, + is_live_data_service: bool, + ) -> Self { + Self { + connection_manager, + handler_tx, + data_service_response_channel_size, + is_live_data_service, + } + } +} + +#[tonic::async_trait] +impl DataService for DataServiceWrapper { + type GetTransactionsStream = ResponseStream; + + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + let (tx, rx) = channel(self.data_service_response_channel_size); + self.handler_tx.send((req, tx)).await.unwrap(); + + let output_stream = ReceiverStream::new(rx); + let response = Response::new(Box::pin(output_stream) as Self::GetTransactionsStream); + + Ok(response) + } + + async fn ping( + &self, + req: Request, + ) -> Result, Status> { + let request = req.into_inner(); + if request.ping_live_data_service != self.is_live_data_service { + if request.ping_live_data_service { + return Err(Status::not_found("LiveDataService is not enabled.")); + } else { + return Err(Status::not_found("HistoricalDataService is not enabled.")); + } + } + + let known_latest_version = request.known_latest_version(); + self.connection_manager + .update_known_latest_version(known_latest_version); + let stream_info = StreamInfo { + active_streams: self.connection_manager.get_active_streams(), + }; + + let response = if self.is_live_data_service { + let min_servable_version = match LIVE_DATA_SERVICE.get() { + Some(svc) => Some(svc.get_min_servable_version().await), + None => None, + }; + let info = LiveDataServiceInfo { + chain_id: self.connection_manager.chain_id(), + timestamp: Some(timestamp_now_proto()), + known_latest_version: Some(known_latest_version), + stream_info: Some(stream_info), + min_servable_version, + }; + PingDataServiceResponse { + info: Some(Info::LiveDataServiceInfo(info)), + } + } else { + let info = HistoricalDataServiceInfo { + chain_id: self.connection_manager.chain_id(), + timestamp: Some(timestamp_now_proto()), + known_latest_version: Some(known_latest_version), + stream_info: Some(stream_info), + }; + PingDataServiceResponse { + info: Some(Info::HistoricalDataServiceInfo(info)), + } + }; + + Ok(Response::new(response)) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/connection_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/connection_manager.rs new file mode 100644 index 0000000000000..e6947a7922a41 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/connection_manager.rs @@ -0,0 +1,289 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::config::{LIVE_DATA_SERVICE, MAX_MESSAGE_SIZE}; +use aptos_indexer_grpc_utils::{system_time_to_proto, timestamp_now_proto}; +use aptos_protos::indexer::v1::{ + grpc_manager_client::GrpcManagerClient, service_info::Info, ActiveStream, HeartbeatRequest, + HistoricalDataServiceInfo, LiveDataServiceInfo, ServiceInfo, StreamInfo, StreamProgress, + StreamProgressSampleProto, +}; +use dashmap::DashMap; +use rand::prelude::*; +use std::{ + collections::VecDeque, + sync::atomic::{AtomicU64, Ordering}, + time::{Duration, SystemTime}, +}; +use tonic::{codec::CompressionEncoding, transport::channel::Channel}; +use tracing::{info, warn}; + +pub static MAX_HEARTBEAT_RETRIES: usize = 3; + +static OLD_PROGRESS_SAMPLING_RATE: Duration = Duration::from_secs(60); +static RECENT_PROGRESS_SAMPLING_RATE: Duration = Duration::from_secs(1); +static MAX_RECENT_SAMPLES_TO_KEEP: usize = 60; +static MAX_OLD_SAMPLES_TO_KEEP: usize = 60; + +#[derive(Default)] +struct StreamProgressSamples { + old_samples: VecDeque, + recent_samples: VecDeque, +} + +impl StreamProgressSamples { + fn new() -> Self { + Default::default() + } + + fn to_proto(&self) -> Vec { + self.old_samples + .iter() + .chain(self.recent_samples.iter()) + .map(|sample| StreamProgressSampleProto { + timestamp: Some(system_time_to_proto(sample.timestamp)), + version: sample.version, + size_bytes: sample.size_bytes, + }) + .collect() + } + + fn maybe_add_sample(&mut self, version: u64, size_bytes: u64) { + let now = SystemTime::now(); + let sample = StreamProgressSample { + timestamp: now, + version, + size_bytes, + }; + + if Self::accept_sample(&self.recent_samples, &sample, RECENT_PROGRESS_SAMPLING_RATE) { + self.recent_samples.push_back(sample); + if self.recent_samples.len() > MAX_RECENT_SAMPLES_TO_KEEP { + let sample = self.recent_samples.pop_front().unwrap(); + if Self::accept_sample(&self.old_samples, &sample, OLD_PROGRESS_SAMPLING_RATE) { + self.old_samples.push_back(sample); + if self.old_samples.len() > MAX_OLD_SAMPLES_TO_KEEP { + self.old_samples.pop_front(); + } + } + } + } + } + + fn accept_sample( + samples: &VecDeque, + sample: &StreamProgressSample, + sampling_rate: Duration, + ) -> bool { + if let Some(last_sample) = samples.back() { + if let Ok(delta) = sample.timestamp.duration_since(last_sample.timestamp) { + if delta >= sampling_rate { + return true; + } + } + } else { + return true; + }; + + return false; + } +} + +struct StreamProgressSample { + timestamp: SystemTime, + version: u64, + size_bytes: u64, +} + +pub(crate) struct ConnectionManager { + grpc_manager_connections: DashMap>, + self_advertised_address: String, + known_latest_version: AtomicU64, + active_streams: DashMap, + is_live_data_service: bool, +} + +impl ConnectionManager { + pub(crate) async fn new( + grpc_manager_addresses: Vec, + self_advertised_address: String, + is_live_data_service: bool, + ) -> Self { + let grpc_manager_connections = DashMap::new(); + grpc_manager_addresses.into_iter().for_each(|address| { + grpc_manager_connections + .insert(address.clone(), Self::create_client_from_address(&address)); + }); + let res = Self { + grpc_manager_connections, + self_advertised_address, + known_latest_version: AtomicU64::new(0), + active_streams: DashMap::new(), + is_live_data_service, + }; + + // Keep fetching latest version until it is available. + while res.known_latest_version.load(Ordering::SeqCst) == 0 { + for entry in res.grpc_manager_connections.iter() { + let address = entry.key(); + if let Err(e) = res.heartbeat(address).await { + warn!("Error during heartbeat: {e}."); + } + } + tokio::time::sleep(Duration::from_secs(1)).await; + } + + res + } + + pub(crate) async fn start(&self) { + loop { + for entry in self.grpc_manager_connections.iter() { + let address = entry.key(); + let mut retries = 0; + loop { + let result = self.heartbeat(address).await; + if result.is_ok() { + break; + } + retries += 1; + if retries > MAX_HEARTBEAT_RETRIES { + warn!("Failed to send heartbeat to GrpcManager at {address}, last error: {result:?}."); + } + } + continue; + } + + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + pub(crate) fn get_grpc_manager_client_for_request(&self) -> GrpcManagerClient { + let mut rng = thread_rng(); + self.grpc_manager_connections + .iter() + .choose(&mut rng) + .map(|kv| kv.value().clone()) + .unwrap() + } + + pub(crate) fn known_latest_version(&self) -> u64 { + self.known_latest_version.load(Ordering::SeqCst) + } + + pub(crate) fn update_known_latest_version(&self, version: u64) { + self.known_latest_version + .fetch_max(version, Ordering::SeqCst); + } + + pub(crate) fn insert_active_stream( + &self, + id: &String, + start_version: u64, + end_version: Option, + ) { + self.active_streams.insert( + id.clone(), + ( + ActiveStream { + id: Some(id.clone()), + start_time: Some(timestamp_now_proto()), + start_version, + end_version, + progress: None, + }, + StreamProgressSamples::new(), + ), + ); + } + + pub(crate) fn remove_active_stream(&self, id: &String) { + self.active_streams.remove(id); + } + + pub(crate) fn update_stream_progress(&self, id: &str, version: u64, size_bytes: u64) { + self.active_streams + .get_mut(id) + .unwrap() + .1 + .maybe_add_sample(version, size_bytes); + } + + pub(crate) fn get_active_streams(&self) -> Vec { + self.active_streams + .iter() + .map(|entry| { + let (active_stream, samples) = entry.value(); + let mut active_stream = active_stream.clone(); + active_stream.progress = Some(StreamProgress { + samples: samples.to_proto(), + }); + active_stream + }) + .collect() + } + + async fn heartbeat(&self, address: &str) -> Result<(), tonic::Status> { + info!("Sending heartbeat to GrpcManager {address}."); + let timestamp = Some(timestamp_now_proto()); + let known_latest_version = Some(self.known_latest_version()); + let stream_info = Some(StreamInfo { + active_streams: self.get_active_streams(), + }); + + let info = if self.is_live_data_service { + let min_servable_version = match LIVE_DATA_SERVICE.get() { + Some(svc) => Some(svc.get_min_servable_version().await), + None => None, + }; + Some(Info::LiveDataServiceInfo(LiveDataServiceInfo { + timestamp, + known_latest_version, + stream_info, + min_servable_version, + })) + } else { + Some(Info::HistoricalDataServiceInfo(HistoricalDataServiceInfo { + timestamp, + known_latest_version, + stream_info, + })) + }; + let service_info = ServiceInfo { + address: Some(self.self_advertised_address.clone()), + info, + }; + let request = HeartbeatRequest { + service_info: Some(service_info), + }; + let response = self + .grpc_manager_connections + .get(address) + // TODO(grao): Consider to not use unwrap here. + .unwrap() + .clone() + .heartbeat(request) + .await? + .into_inner(); + if let Some(known_latest_version) = response.known_latest_version { + info!("Received known_latest_version ({known_latest_version}) from GrpcManager {address}."); + self.update_known_latest_version(known_latest_version); + } else { + warn!("HeartbeatResponse doesn't contain known_latest_version, GrpcManager address: {address}"); + } + + Ok(()) + } + + fn create_client_from_address(address: &str) -> GrpcManagerClient { + info!("Creating GrpcManagerClient for {address}."); + let channel = Channel::from_shared(address.to_string()) + .expect("Bad address.") + .connect_lazy(); + GrpcManagerClient::new(channel) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .max_decoding_message_size(MAX_MESSAGE_SIZE) + .max_encoding_message_size(MAX_MESSAGE_SIZE) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/historical_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/historical_data_service.rs new file mode 100644 index 0000000000000..641a3bf93c28f --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/historical_data_service.rs @@ -0,0 +1,174 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{config::HistoricalDataServiceConfig, connection_manager::ConnectionManager}; +use aptos_indexer_grpc_utils::file_store_operator_v2::FileStoreReader; +use aptos_protos::indexer::v1::{GetTransactionsRequest, TransactionsResponse}; +use futures::executor::block_on; +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tonic::{Request, Status}; +use tracing::info; +use uuid::Uuid; + +pub struct HistoricalDataService { + chain_id: u64, + connection_manager: Arc, + file_store_reader: Arc, +} + +impl HistoricalDataService { + pub fn new( + chain_id: u64, + config: HistoricalDataServiceConfig, + connection_manager: Arc, + ) -> Self { + let file_store = block_on(config.file_store_config.create_filestore()); + let file_store_reader = Arc::new(block_on(FileStoreReader::new(chain_id, file_store))); + Self { + chain_id, + connection_manager: connection_manager.clone(), + file_store_reader, + } + } + + pub fn run( + &self, + mut handler_rx: Receiver<( + Request, + Sender>, + )>, + ) { + info!("Running HistoricalDataService..."); + tokio_scoped::scope(|scope| { + while let Some((request, response_sender)) = handler_rx.blocking_recv() { + // TODO(grao): Store request metadata. + let request = request.into_inner(); + let id = Uuid::new_v4().to_string(); + info!("Received request: {request:?}."); + + if request.starting_version.is_none() { + let err = Err(Status::invalid_argument("Must provide starting_version.")); + info!("Client error: {err:?}."); + let _ = response_sender.blocking_send(err); + continue; + } + let starting_version = request.starting_version.unwrap(); + + let max_num_transactions_per_batch = if let Some(batch_size) = request.batch_size { + batch_size as usize + } else { + 10000 + }; + + let ending_version = request + .transactions_count + .map(|count| starting_version + count); + + scope.spawn(async move { + self.start_streaming( + id, + starting_version, + ending_version, + max_num_transactions_per_batch, + response_sender, + ) + .await + }); + } + }); + } + + pub(crate) fn get_connection_manager(&self) -> &ConnectionManager { + &self.connection_manager + } + + async fn start_streaming( + &self, + id: String, + starting_version: u64, + ending_version: Option, + max_num_transactions_per_batch: usize, + response_sender: tokio::sync::mpsc::Sender>, + ) { + info!(stream_id = id, "Start streaming, starting_version: {starting_version}, ending_version: {ending_version:?}."); + self.connection_manager + .insert_active_stream(&id, starting_version, ending_version); + let mut next_version = starting_version; + let ending_version = ending_version.unwrap_or(u64::MAX); + let mut size_bytes = 0; + 'out: loop { + self.connection_manager + .update_stream_progress(&id, next_version, size_bytes); + if next_version >= ending_version { + break; + } + + if !self.file_store_reader.can_serve(next_version).await { + info!(stream_id = id, "next_version {next_version} is larger or equal than file store version, terminate the stream."); + break; + } + + // TODO(grao): Pick a better channel size here, and consider doing parallel fetching + // inside the `get_transaction_batch` call based on the channel size. + let (tx, mut rx) = channel(1); + + let file_store_reader = self.file_store_reader.clone(); + tokio::spawn(async move { + file_store_reader + .get_transaction_batch( + next_version, + /*retries=*/ 3, + /*max_files=*/ None, + tx, + ) + .await; + }); + + let mut close_to_latest = false; + while let Some((transactions, batch_size_bytes)) = rx.recv().await { + next_version += transactions.len() as u64; + size_bytes += batch_size_bytes as u64; + let timestamp = transactions.first().unwrap().timestamp.unwrap(); + let timestamp_since_epoch = + Duration::new(timestamp.seconds as u64, timestamp.nanos as u32); + let now_since_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + let delta = now_since_epoch.saturating_sub(timestamp_since_epoch); + + if delta < Duration::from_secs(60) { + close_to_latest = true; + } + let responses = transactions + .chunks(max_num_transactions_per_batch) + .into_iter() + .map(|chunk| TransactionsResponse { + transactions: chunk.to_vec(), + chain_id: Some(self.chain_id), + }); + for response in responses { + if let Err(_) = response_sender.send(Ok(response)).await { + // NOTE: We are not recalculating the version and size_bytes for the stream + // progress since nobody cares about the accurate if client has dropped the + // connection. + info!(stream_id = id, "Client dropped."); + break 'out; + } + } + } + if close_to_latest { + info!( + stream_id = id, + "Stream is approaching to the latest transactions, terminate." + ); + break; + } + } + + self.connection_manager + .update_stream_progress(&id, next_version, size_bytes); + self.connection_manager.remove_active_stream(&id); + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/live_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/live_data_service.rs new file mode 100644 index 0000000000000..77e481405a862 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/live_data_service.rs @@ -0,0 +1,456 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{config::LiveDataServiceConfig, connection_manager::ConnectionManager}; +use aptos_protos::{ + indexer::v1::{GetTransactionsRequest, TransactionsResponse}, + transaction::v1::Transaction, +}; +use futures::future::{BoxFuture, FutureExt, Shared}; +use prost::Message; +use std::{sync::Arc, time::Duration}; +use tokio::sync::{ + mpsc::{Receiver, Sender}, + RwLock, +}; +use tonic::{Request, Status}; +use tracing::{info, trace}; +use uuid::Uuid; + +static MAX_BYTES_PER_BATCH: usize = 20 * (1 << 20); + +struct DataClient { + connection_manager: Arc, +} + +impl DataClient { + fn new(connection_manager: Arc) -> Self { + Self { connection_manager } + } + + async fn fetch_transactions(&self, starting_version: u64) -> Vec { + trace!("Fetching transactions from GrpcManager, start_version: {starting_version}."); + + let request = GetTransactionsRequest { + starting_version: Some(starting_version), + transactions_count: None, + batch_size: None, + }; + loop { + let mut client = self + .connection_manager + .get_grpc_manager_client_for_request(); + let response = client.get_transactions(request).await; + if let Ok(response) = response { + return response.into_inner().transactions; + } + // TODO(grao): Error handling. + } + } +} + +type FetchTask<'a> = Shared>; + +struct FetchManager<'a> { + data_manager: Arc>, + data_client: Arc, + fetching_latest_data_task: RwLock>>, +} + +impl<'a> FetchManager<'a> { + fn new( + data_manager: Arc>, + connection_manager: Arc, + ) -> Self { + Self { + data_manager, + data_client: Arc::new(DataClient::new(connection_manager)), + fetching_latest_data_task: RwLock::new(None), + } + } + + async fn fetch_past_data(&self, version: u64) -> usize { + Self::fetch_and_update_cache(self.data_client.clone(), self.data_manager.clone(), version) + .await + } + + async fn fetch_and_update_cache( + data_client: Arc, + data_manager: Arc>, + version: u64, + ) -> usize { + let transactions = data_client.fetch_transactions(version).await; + let len = transactions.len(); + + if len > 0 { + data_manager + .write() + .await + .update_data(version, transactions); + } + + len + } + + async fn fetch_latest_data(&'a self) -> usize { + let version = self.data_manager.read().await.end_version; + info!("Fetching latest data starting from version {version}."); + loop { + let num_transactions = Self::fetch_and_update_cache( + self.data_client.clone(), + self.data_manager.clone(), + version, + ) + .await; + if num_transactions != 0 { + info!("Finished fetching latest data, got {num_transactions} num_transactions starting from version {version}."); + return num_transactions; + } + tokio::time::sleep(Duration::from_millis(200)).await; + } + } + + async fn continuously_fetch_latest_data(&'a self) { + loop { + let task = self.fetch_latest_data().boxed().shared(); + *self.fetching_latest_data_task.write().await = Some(task.clone()); + let _ = task.await; + } + } +} + +// TODO(grao): Naive implementation for now. This can be replaced by a more performant +// implementation in the future. +struct DataManager { + start_version: u64, + end_version: u64, + data: Vec>>, + + size_limit_bytes: usize, + eviction_target: usize, + total_size: usize, + num_slots: usize, +} + +impl DataManager { + fn new(end_version: u64, num_slots: usize, size_limit_bytes: usize) -> Self { + Self { + start_version: end_version.saturating_sub(num_slots as u64), + end_version, + data: vec![None; num_slots], + size_limit_bytes, + eviction_target: size_limit_bytes, + total_size: 0, + num_slots, + } + } + + fn update_data(&mut self, start_version: u64, transactions: Vec) { + let end_version = start_version + transactions.len() as u64; + + trace!( + "Updating data for {} transactions in range [{start_version}, {end_version}).", + transactions.len(), + ); + if start_version > self.end_version { + // TODO(grao): unexpected + return; + } + + if end_version <= self.start_version { + // TODO(grao): Log and counter. + return; + } + + let num_to_skip = self.start_version.saturating_sub(start_version); + let start_version = start_version.max(self.start_version); + + let mut size_increased = 0; + let mut size_decreased = 0; + + for (i, transaction) in transactions + .into_iter() + .enumerate() + .skip(num_to_skip as usize) + { + let version = start_version + i as u64; + let slot_index = version as usize % self.num_slots; + if let Some(transaction) = self.data[slot_index].take() { + size_decreased += transaction.encoded_len(); + } + size_increased += transaction.encoded_len(); + self.data[version as usize % self.num_slots] = Some(Box::new(transaction)); + } + + if end_version > self.end_version { + self.end_version = end_version; + if self.start_version + (self.num_slots as u64) < end_version { + self.start_version = end_version - self.num_slots as u64; + } + } + + self.total_size += size_increased; + self.total_size -= size_decreased; + + if self.total_size >= self.size_limit_bytes { + while self.total_size >= self.eviction_target { + if let Some(transaction) = + self.data[self.start_version as usize % self.num_slots].take() + { + self.total_size -= transaction.encoded_len(); + drop(transaction); + } + self.start_version += 1; + } + } + } +} + +pub struct InMemoryCache<'a> { + data_manager: Arc>, + fetch_manager: Arc>, +} + +impl<'a> InMemoryCache<'a> { + pub fn new( + connection_manager: Arc, + known_latest_version: u64, + num_slots: usize, + size_limit_bytes: usize, + ) -> Self { + let data_manager = Arc::new(RwLock::new(DataManager::new( + known_latest_version + 1, + num_slots, + size_limit_bytes, + ))); + let fetch_manager = Arc::new(FetchManager::new(data_manager.clone(), connection_manager)); + Self { + data_manager, + fetch_manager, + } + } + + async fn get_data( + &'a self, + starting_version: u64, + ending_version: u64, + max_num_transactions_per_batch: usize, + max_bytes_per_batch: usize, + ) -> Option<(Vec, usize)> { + while starting_version >= self.data_manager.read().await.end_version { + trace!("Reached head, wait..."); + let num_transactions = self + .fetch_manager + .fetching_latest_data_task + .read() + .await + .as_ref() + .unwrap() + .clone() + .await; + + trace!("Done waiting, got {num_transactions} transactions at head."); + } + + loop { + let data_manager = self.data_manager.read().await; + + trace!("Getting data from cache, requested_version: {starting_version}, oldest available version: {}.", data_manager.start_version); + if starting_version < data_manager.start_version { + return None; + } + + let start_index = starting_version as usize % data_manager.num_slots; + + if data_manager.data[start_index].is_none() { + drop(data_manager); + self.fetch_manager.fetch_past_data(starting_version).await; + continue; + } + + let mut total_bytes = 0; + let mut version = starting_version; + let ending_version = ending_version.min(data_manager.end_version); + + if let Some(_) = data_manager.data[version as usize % data_manager.num_slots].as_ref() { + let mut result = Vec::new(); + while version < ending_version + && total_bytes < max_bytes_per_batch + && result.len() < max_num_transactions_per_batch + { + if let Some(transaction) = + data_manager.data[version as usize % data_manager.num_slots].as_ref() + { + // NOTE: We allow 1 more txn beyond the size limit here, for simplicity. + total_bytes += transaction.encoded_len(); + result.push(transaction.as_ref().clone()); + version += 1; + } else { + break; + } + } + trace!("Data was sent from cache, last version: {}.", version - 1); + return Some((result, total_bytes)); + } else { + unreachable!("Data cannot be None."); + } + } + } +} + +pub struct LiveDataService<'a> { + chain_id: u64, + in_memory_cache: InMemoryCache<'a>, + connection_manager: Arc, +} + +impl<'a> LiveDataService<'a> { + pub fn new( + chain_id: u64, + config: LiveDataServiceConfig, + connection_manager: Arc, + ) -> Self { + let known_latest_version = connection_manager.known_latest_version(); + Self { + chain_id, + connection_manager: connection_manager.clone(), + in_memory_cache: InMemoryCache::new( + connection_manager, + known_latest_version, + config.num_slots, + config.size_limit_bytes, + ), + } + } + + pub fn run( + &'a self, + mut handler_rx: Receiver<( + Request, + Sender>, + )>, + ) { + info!("Running LiveDataService..."); + tokio_scoped::scope(|scope| { + scope.spawn(async move { + let _ = self + .in_memory_cache + .fetch_manager + .continuously_fetch_latest_data() + .await; + }); + while let Some((request, response_sender)) = handler_rx.blocking_recv() { + // TODO(grao): Store request metadata. + let request = request.into_inner(); + let id = Uuid::new_v4().to_string(); + let known_latest_version = self.get_known_latest_version(); + let starting_version = request.starting_version.unwrap_or(known_latest_version); + + info!("Received request: {request:?}."); + if starting_version > known_latest_version + 10000 { + let err = Err(Status::failed_precondition( + "starting_version cannot be set to a far future version.", + )); + info!("Client error: {err:?}."); + let _ = response_sender.blocking_send(err); + continue; + } + + let max_num_transactions_per_batch = if let Some(batch_size) = request.batch_size { + batch_size as usize + } else { + 10000 + }; + + let ending_version = request + .transactions_count + .map(|count| starting_version + count); + + scope.spawn(async move { + self.start_streaming( + id, + starting_version, + ending_version, + max_num_transactions_per_batch, + MAX_BYTES_PER_BATCH, + response_sender, + ) + .await + }); + } + }); + } + + pub(crate) fn get_connection_manager(&self) -> &ConnectionManager { + &self.connection_manager + } + + pub(crate) async fn get_min_servable_version(&self) -> u64 { + self.in_memory_cache.data_manager.read().await.start_version + } + + async fn start_streaming( + &'a self, + id: String, + starting_version: u64, + ending_version: Option, + max_num_transactions_per_batch: usize, + max_bytes_per_batch: usize, + response_sender: tokio::sync::mpsc::Sender>, + ) { + info!(stream_id = id, "Start streaming, starting_version: {starting_version}, ending_version: {ending_version:?}."); + self.connection_manager + .insert_active_stream(&id, starting_version, ending_version); + let mut next_version = starting_version; + let mut size_bytes = 0; + let ending_version = ending_version.unwrap_or(u64::MAX); + loop { + if next_version >= ending_version { + break; + } + self.connection_manager + .update_stream_progress(&id, next_version, size_bytes); + let known_latest_version = self.get_known_latest_version(); + if next_version > known_latest_version { + info!(stream_id = id, "next_version {next_version} is larger than known_latest_version {known_latest_version}"); + tokio::time::sleep(Duration::from_millis(100)).await; + continue; + } + + if let Some((transactions, batch_size_bytes)) = self + .in_memory_cache + .get_data( + next_version, + ending_version, + max_num_transactions_per_batch, + max_bytes_per_batch, + ) + .await + { + next_version += transactions.len() as u64; + size_bytes += batch_size_bytes as u64; + let response = TransactionsResponse { + transactions, + chain_id: Some(self.chain_id), + }; + if let Err(_) = response_sender.send(Ok(response)).await { + info!(stream_id = id, "Client dropped."); + break; + } + } else { + let err = Err(Status::not_found("Requested data is too old.")); + info!(stream_id = id, "Client error: {err:?}."); + let _ = response_sender.send(err).await; + break; + } + } + + self.connection_manager + .update_stream_progress(&id, next_version, size_bytes); + self.connection_manager.remove_active_stream(&id); + } + + fn get_known_latest_version(&self) -> u64 { + self.connection_manager.known_latest_version() + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs index 94cdf25b686d9..388e76aa3b534 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs @@ -2,16 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{counters::CHANNEL_SIZE, stream_coordinator::IndexerStreamCoordinator, ServiceContext}; -use aptos_indexer_grpc_utils::counters::{log_grpc_step_fullnode, IndexerGrpcStep}; +use aptos_indexer_grpc_utils::{ + counters::{log_grpc_step_fullnode, IndexerGrpcStep}, + timestamp_now_proto, +}; use aptos_logger::{error, info}; use aptos_moving_average::MovingAverage; -use aptos_protos::internal::fullnode::v1::{ - fullnode_data_server::FullnodeData, stream_status::StatusType, transactions_from_node_response, - GetTransactionsFromNodeRequest, PingFullnodeRequest, PingFullnodeResponse, StreamStatus, - TransactionsFromNodeResponse, +use aptos_protos::{ + indexer::v1::FullnodeInfo, + internal::fullnode::v1::{ + fullnode_data_server::FullnodeData, stream_status::StatusType, + transactions_from_node_response, GetTransactionsFromNodeRequest, PingFullnodeRequest, + PingFullnodeResponse, StreamStatus, TransactionsFromNodeResponse, + }, + util::timestamp::Timestamp, }; use futures::Stream; -use std::pin::Pin; +use std::{ + pin::Pin, + time::{SystemTime, UNIX_EPOCH}, +}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status}; @@ -162,7 +172,19 @@ impl FullnodeData for FullnodeDataService { &self, _request: Request, ) -> Result, Status> { - unimplemented!() + let timestamp = timestamp_now_proto(); + + let info = FullnodeInfo { + timestamp: Some(timestamp), + known_latest_version: self + .service_context + .context + .db + .get_synced_version() + .map_err(|e| Status::internal(format!("{e}")))?, + }; + let response = PingFullnodeResponse { info: Some(info) }; + Ok(Response::new(response)) } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-manager/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-manager/Cargo.toml index 29cdcff29faf7..d58627a7098f0 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-manager/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-manager/Cargo.toml @@ -15,14 +15,22 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } aptos-indexer-grpc-server-framework = { workspace = true } +aptos-indexer-grpc-utils = { workspace = true } aptos-protos = { workspace = true } async-trait = { workspace = true } +build_html = { workspace = true } clap = { workspace = true } +dashmap = { workspace = true } +futures = { workspace = true } +prost = { workspace = true } +rand = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } tokio = { workspace = true } tokio-scoped = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } +warp = { workspace = true } [dev-dependencies] aptos-config = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-manager/src/config.rs b/ecosystem/indexer-grpc/indexer-grpc-manager/src/config.rs index 591388a8c06df..36f428a4dec75 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-manager/src/config.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-manager/src/config.rs @@ -1,12 +1,29 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::grpc_manager::GrpcManager; +use crate::{data_manager::DataManager, grpc_manager::GrpcManager}; use anyhow::Result; use aptos_indexer_grpc_server_framework::RunnableConfig; +use aptos_indexer_grpc_utils::{ + config::IndexerGrpcFileStoreConfig, + status_page::{get_throughput_from_samples, render_status_page, Tab}, +}; +use aptos_protos::{ + indexer::v1::{FullnodeInfo, HistoricalDataServiceInfo, LiveDataServiceInfo, StreamInfo}, + util::timestamp::Timestamp, +}; +use build_html::{ + Container, ContainerType, HtmlContainer, HtmlElement, HtmlTag, Table, TableCell, TableCellType, + TableRow, +}; use serde::{Deserialize, Serialize}; -use std::net::SocketAddr; +use std::{ + collections::{HashMap, VecDeque}, + net::SocketAddr, + time::Duration, +}; use tokio::sync::OnceCell; +use warp::{reply::Response, Rejection}; static GRPC_MANAGER: OnceCell = OnceCell::const_new(); @@ -15,11 +32,30 @@ pub(crate) struct ServiceConfig { pub(crate) listen_address: SocketAddr, } +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct CacheConfig { + pub(crate) max_cache_size: usize, + pub(crate) target_cache_size: usize, +} + #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct IndexerGrpcManagerConfig { pub(crate) chain_id: u64, pub(crate) service_config: ServiceConfig, + #[serde(default = "default_cache_config")] + pub(crate) cache_config: CacheConfig, + pub(crate) file_store_config: IndexerGrpcFileStoreConfig, + pub(crate) self_advertised_address: String, + pub(crate) grpc_manager_addresses: Vec, + pub(crate) fullnode_addresses: Vec, +} + +const fn default_cache_config() -> CacheConfig { + CacheConfig { + max_cache_size: 5 * (1 << 30), + target_cache_size: 4 * (1 << 30), + } } #[async_trait::async_trait] @@ -34,4 +70,396 @@ impl RunnableConfig for IndexerGrpcManagerConfig { fn get_server_name(&self) -> String { "grpc_manager".to_string() } + + async fn status_page(&self) -> Result { + let mut tabs = vec![]; + + if let Some(grpc_manager) = GRPC_MANAGER.get() { + let data_manager = grpc_manager.get_data_manager(); + tabs.push(render_overview_tab(data_manager).await); + let metadata_manager = grpc_manager.get_metadata_manager(); + tabs.push(render_fullnode_tab(metadata_manager.get_fullnodes_info())); + let live_data_services_info = metadata_manager.get_live_data_services_info(); + let historical_data_services_info = + metadata_manager.get_historical_data_services_info(); + tabs.push(render_live_data_service_tab(&live_data_services_info)); + tabs.push(render_historical_data_service_tab( + &historical_data_services_info, + )); + tabs.push(render_stream_tab( + &live_data_services_info, + &historical_data_services_info, + )); + } + + render_status_page(tabs) + } +} + +fn render_fullnode_tab(fullnodes_info: HashMap>) -> Tab { + let overview = Container::new(ContainerType::Section) + .with_paragraph_attr( + "Connected Fullnodes", + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_table( + fullnodes_info.into_iter().fold( + Table::new() + .with_attributes([("style", "width: 100%; border: 5px solid black;")]) + .with_thead_attributes([( + "style", + "background-color: lightcoral; color: white;", + )]) + .with_custom_header_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Header).with_raw("Id")) + .with_cell( + TableCell::new(TableCellType::Header) + .with_raw("Last Ping/Heartbeat Time"), + ) + .with_cell( + TableCell::new(TableCellType::Header) + .with_raw("Known Latest Version"), + ), + ), + |table, fullnode_info| { + let last_sample = fullnode_info.1.back(); + let (timestamp, known_latest_version) = if let Some(last_sample) = last_sample { + ( + format!("{:?}", last_sample.timestamp.unwrap()), + format!("{}", last_sample.known_latest_version()), + ) + } else { + ("No data point.".to_string(), "No data point.".to_string()) + }; + table.with_custom_body_row( + TableRow::new() + .with_cell( + TableCell::new(TableCellType::Data).with_raw(fullnode_info.0), + ) + .with_cell(TableCell::new(TableCellType::Data).with_raw(timestamp)) + .with_cell( + TableCell::new(TableCellType::Data).with_raw(known_latest_version), + ), + ) + }, + ), + ); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(overview) + .into(); + + Tab::new("Fullnodes", content) +} + +fn render_live_data_service_tab( + data_services_info: &HashMap>, +) -> Tab { + let column_names = [ + "Id", + "Last Ping/Heartbeat Time", + "Known Latest Version", + "Min Servable Version", + "# of Connected Streams", + ]; + + let rows = data_services_info + .into_iter() + .map(|entry| { + let id = entry.0.clone(); + let last_sample = entry.1.back(); + let (timestamp, known_latest_version, min_servable_version, num_connected_streams) = + if let Some(last_sample) = last_sample { + ( + format!("{:?}", last_sample.timestamp.unwrap()), + format!("{}", last_sample.known_latest_version()), + format!("{:?}", last_sample.min_servable_version), + format!( + "{}", + last_sample + .stream_info + .as_ref() + .map(|stream_info| stream_info.active_streams.len()) + .unwrap_or_default() + ), + ) + } else { + ( + "No data point.".to_string(), + "No data point.".to_string(), + "No data point.".to_string(), + "No data point.".to_string(), + ) + }; + + [ + id, + timestamp, + known_latest_version, + min_servable_version, + num_connected_streams, + ] + }) + .collect(); + + render_data_service_tab("LiveDataServices", column_names, rows) +} + +fn render_historical_data_service_tab( + data_services_info: &HashMap>, +) -> Tab { + let column_names = [ + "Id", + "Last Ping/Heartbeat Time", + "Known Latest Version", + "# of Connected Streams", + ]; + + let rows = data_services_info + .into_iter() + .map(|entry| { + let id = entry.0.clone(); + let last_sample = entry.1.back(); + let (timestamp, known_latest_version, num_connected_streams) = + if let Some(last_sample) = last_sample { + ( + format!("{:?}", last_sample.timestamp.unwrap()), + format!("{}", last_sample.known_latest_version()), + format!( + "{}", + last_sample + .stream_info + .as_ref() + .map(|stream_info| stream_info.active_streams.len()) + .unwrap_or_default() + ), + ) + } else { + ( + "No data point.".to_string(), + "No data point.".to_string(), + "No data point.".to_string(), + ) + }; + + [id, timestamp, known_latest_version, num_connected_streams] + }) + .collect(); + + render_data_service_tab("HistoricalDataServices", column_names, rows) +} + +fn render_data_service_tab( + tab_name: &str, + column_names: [&str; N], + rows: Vec<[String; N]>, +) -> Tab { + let overview = Container::new(ContainerType::Section) + .with_paragraph_attr( + format!("Connected {tab_name}"), + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_table( + rows.iter().fold( + Table::new() + .with_attributes([("style", "width: 100%; border: 5px solid black;")]) + .with_thead_attributes([( + "style", + "background-color: lightcoral; color: white;", + )]) + .with_custom_header_row(column_names.into_iter().fold( + TableRow::new(), + |row, column_name| { + row.with_cell( + TableCell::new(TableCellType::Header).with_raw(column_name), + ) + }, + )), + |table, row| { + table.with_custom_body_row(row.into_iter().fold(TableRow::new(), |r, cell| { + r.with_cell(TableCell::new(TableCellType::Data).with_raw(cell)) + })) + }, + ), + ); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(overview) + .into(); + + Tab::new(tab_name, content) +} + +fn render_live_data_service_streams( + data_service_info: &HashMap>, +) -> Table { + let streams = data_service_info + .into_iter() + .filter_map(|entry| { + entry + .1 + .back() + .cloned() + .map(|sample| { + sample.stream_info.map(|stream_info| { + let data_service_instance = entry.0.clone(); + ( + data_service_instance, + sample.timestamp.unwrap(), + stream_info, + ) + }) + }) + .flatten() + }) + .collect(); + + render_stream_table(streams) +} + +fn render_historical_data_service_streams( + data_service_info: &HashMap>, +) -> Table { + let streams = data_service_info + .into_iter() + .filter_map(|entry| { + entry + .1 + .back() + .cloned() + .map(|sample| { + sample.stream_info.map(|stream_info| { + let data_service_instance = entry.0.clone(); + ( + data_service_instance, + sample.timestamp.unwrap(), + stream_info, + ) + }) + }) + .flatten() + }) + .collect(); + + render_stream_table(streams) +} + +fn render_stream_table(streams: Vec<(String, Timestamp, StreamInfo)>) -> Table { + streams.into_iter().fold( + Table::new() + .with_attributes([("style", "width: 100%; border: 5px solid black;")]) + .with_thead_attributes([("style", "background-color: lightcoral; color: white;")]) + .with_custom_header_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Header).with_raw("Stream Id")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("Timestamp")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("Current Version")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("End Version")) + .with_cell( + TableCell::new(TableCellType::Header).with_raw("Data Service Instance"), + ) + .with_cell( + TableCell::new(TableCellType::Header).with_raw("Past 10s throughput"), + ) + .with_cell( + TableCell::new(TableCellType::Header).with_raw("Past 60s throughput"), + ) + .with_cell( + TableCell::new(TableCellType::Header).with_raw("Past 10min throughput"), + ), + ), + |mut table, stream| { + let data_service_instance = stream.0; + let timestamp = format!("{:?}", stream.1); + stream.2.active_streams.iter().for_each(|active_stream| { + table.add_custom_body_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Data).with_raw(active_stream.id())) + .with_cell(TableCell::new(TableCellType::Data).with_raw(×tamp)) + .with_cell(TableCell::new(TableCellType::Data).with_raw(format!( + "{:?}", + active_stream + .progress + .as_ref() + .map(|progress| { + progress.samples.last().map(|sample| sample.version) + }) + .flatten() + ))) + .with_cell( + TableCell::new(TableCellType::Data) + .with_raw(active_stream.end_version()), + ) + .with_cell( + TableCell::new(TableCellType::Data) + .with_raw(data_service_instance.as_str()), + ) + .with_cell(TableCell::new(TableCellType::Data).with_raw( + get_throughput_from_samples( + active_stream.progress.as_ref(), + Duration::from_secs(10), + ), + )) + .with_cell(TableCell::new(TableCellType::Data).with_raw( + get_throughput_from_samples( + active_stream.progress.as_ref(), + Duration::from_secs(60), + ), + )) + .with_cell(TableCell::new(TableCellType::Data).with_raw( + get_throughput_from_samples( + active_stream.progress.as_ref(), + Duration::from_secs(600), + ), + )), + ) + }); + table + }, + ) +} + +fn render_stream_tab( + live_data_services_info: &HashMap>, + historical_data_services_info: &HashMap>, +) -> Tab { + let overview = Container::new(ContainerType::Section) + .with_paragraph_attr( + format!("Connected Streams"), + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_paragraph_attr( + format!("LiveDataService Streams"), + [("style", "font-size: 18px; font-weight: bold;")], + ) + .with_table(render_live_data_service_streams(live_data_services_info)) + .with_paragraph_attr( + format!("HistoricalDataService Streams"), + [("style", "font-size: 18px; font-weight: bold;")], + ) + .with_table(render_historical_data_service_streams( + historical_data_services_info, + )); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(overview) + .into(); + + Tab::new("Streams", content) +} + +async fn render_overview_tab(data_manager: &DataManager) -> Tab { + let overview = Container::new(ContainerType::Section) + .with_paragraph_attr( + format!("Cache Stats"), + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_paragraph_attr( + data_manager.cache_stats().await, + [("style", "font-size: 16px;")], + ); + + let content = HtmlElement::new(HtmlTag::Div) + .with_container(overview) + .into(); + + Tab::new("Overview", content) } diff --git a/ecosystem/indexer-grpc/indexer-grpc-manager/src/data_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-manager/src/data_manager.rs new file mode 100644 index 0000000000000..33635a830c5c9 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-manager/src/data_manager.rs @@ -0,0 +1,320 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{config::CacheConfig, metadata_manager::MetadataManager}; +use anyhow::{bail, ensure, Result}; +use aptos_indexer_grpc_utils::{ + config::IndexerGrpcFileStoreConfig, file_store_operator_v2::FileStoreReader, +}; +use aptos_protos::{ + internal::fullnode::v1::{ + transactions_from_node_response::Response, GetTransactionsFromNodeRequest, + }, + transaction::v1::Transaction, +}; +use futures::StreamExt; +use prost::Message; +use std::{ + collections::VecDeque, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; +use tokio::sync::{mpsc::channel, RwLock}; +use tracing::{debug, error, info, trace, warn}; + +struct Cache { + start_version: u64, + file_store_version: AtomicU64, + transactions: VecDeque, + cache_size: usize, + + max_cache_size: usize, + target_cache_size: usize, +} + +impl Cache { + fn new(cache_config: CacheConfig, file_store_version: u64) -> Self { + Self { + start_version: file_store_version, + file_store_version: AtomicU64::new(file_store_version), + transactions: VecDeque::new(), + cache_size: 0, + max_cache_size: cache_config.max_cache_size, + target_cache_size: cache_config.target_cache_size, + } + } + + fn maybe_evict(&mut self) -> bool { + if self.cache_size <= self.max_cache_size { + return true; + } + + while self.start_version < self.file_store_version.load(Ordering::SeqCst) + && self.cache_size > self.target_cache_size + { + let transaction = self.transactions.pop_front().unwrap(); + self.cache_size -= transaction.encoded_len(); + self.start_version += 1; + } + + self.cache_size <= self.max_cache_size + } + + fn put_transactions(&mut self, transactions: Vec) { + self.cache_size += transactions + .iter() + .map(|transaction| transaction.encoded_len()) + .sum::(); + self.transactions.extend(transactions); + } + + fn get_transactions( + &self, + start_version: u64, + max_size_bytes: usize, + update_file_store_version: bool, + ) -> Vec { + if !update_file_store_version { + trace!( + "Requesting version {start_version} from cache, update_file_store_version = {update_file_store_version}.", + ); + trace!( + "Current data range in cache: [{}, {}).", + self.start_version, + self.start_version + self.transactions.len() as u64 + ); + } + if start_version < self.start_version { + return vec![]; + } + + let mut transactions = vec![]; + let mut size_bytes = 0; + for transaction in self + .transactions + .iter() + .skip((start_version - self.start_version) as usize) + { + size_bytes += transaction.encoded_len(); + transactions.push(transaction.clone()); + if size_bytes > max_size_bytes { + // Note: We choose to not pop the last transaction here, so the size could be + // slightly larger than the `max_size_bytes`. This is fine. + break; + } + } + if update_file_store_version { + self.file_store_version + .fetch_add(transactions.len() as u64, Ordering::SeqCst); + } else { + trace!( + "Returned {} transactions from Cache, total {size_bytes} bytes.", + transactions.len() + ); + } + transactions + } +} + +pub(crate) struct DataManager { + cache: RwLock, + file_store_reader: FileStoreReader, + metadata_manager: Arc, +} + +impl DataManager { + pub(crate) async fn new( + chain_id: u64, + file_store_config: IndexerGrpcFileStoreConfig, + cache_config: CacheConfig, + metadata_manager: Arc, + ) -> Self { + let file_store = file_store_config.create_filestore().await; + let file_store_reader = FileStoreReader::new(chain_id, file_store).await; + let file_store_version = file_store_reader.get_latest_version().await.unwrap(); + Self { + cache: RwLock::new(Cache::new(cache_config, file_store_version)), + file_store_reader, + metadata_manager, + } + } + + pub(crate) async fn start(&self) { + info!("Starting DataManager loop."); + + 'out: loop { + let mut fullnode_client = self.metadata_manager.get_fullnode_for_request(); + let cache = self.cache.read().await; + let request = GetTransactionsFromNodeRequest { + starting_version: Some(cache.start_version + cache.transactions.len() as u64), + transactions_count: Some(100000), + }; + drop(cache); + + debug!( + "Requesting transactions from fullnodes, starting_version: {}.", + request.starting_version.unwrap() + ); + let response = fullnode_client.get_transactions_from_node(request).await; + if response.is_err() { + warn!( + "Error when getting transactions from fullnode: {}", + response.err().unwrap() + ); + tokio::time::sleep(Duration::from_millis(100)).await; + continue; + } + + let mut response = response.unwrap().into_inner(); + while let Some(response_item) = response.next().await { + loop { + if self.cache.write().await.maybe_evict() { + break; + } + let cache = self.cache.read().await; + warn!("Filestore is lagging behind, cache is full [{}, {}), known_latest_version ({}).", + cache.start_version, + cache.start_version + cache.transactions.len() as u64, + self.metadata_manager.get_known_latest_version()); + tokio::time::sleep(Duration::from_millis(100)).await; + } + match response_item { + Ok(r) => { + if let Some(response) = r.response { + match response { + Response::Data(data) => { + self.cache.write().await.put_transactions(data.transactions); + }, + Response::Status(_) => continue, + } + } else { + warn!("Error when getting transactions from fullnode: no data."); + continue 'out; + } + }, + Err(e) => { + warn!("Error when getting transactions from fullnode: {}", e); + continue 'out; + }, + } + } + } + } + + pub(crate) fn lagging(&self, cache_next_version: u64) -> bool { + // TODO(grao): Need a better way, we can use the information in the metadata_manager. + cache_next_version + 20000 < self.metadata_manager.get_known_latest_version() + } + + pub(crate) async fn get_transactions( + &self, + start_version: u64, + max_size: usize, + ) -> Result> { + let cache = self.cache.read().await; + let cache_start_version = cache.start_version; + let cache_next_version = cache_start_version + cache.transactions.len() as u64; + drop(cache); + + if start_version >= cache_start_version { + if start_version >= cache_next_version { + // If lagging, try to fetch the data from FN. + if self.lagging(cache_next_version) { + debug!("GrpcManager is lagging, getting data from FN, requested_version: {start_version}, cache_next_version: {cache_next_version}."); + let request = GetTransactionsFromNodeRequest { + starting_version: Some(cache_next_version), + transactions_count: Some(5000), + }; + + let mut fullnode_client = self.metadata_manager.get_fullnode_for_request(); + let response = fullnode_client.get_transactions_from_node(request).await?; + let mut response = response.into_inner(); + while let Some(Ok(response_item)) = response.next().await { + if let Some(response) = response_item.response { + match response { + Response::Data(data) => { + return Ok(data.transactions); + }, + Response::Status(_) => continue, + } + } + } + } + + // Let client side to retry. + return Ok(vec![]); + } + // NOTE: We are not holding the read lock for cache here. Therefore it's possible that + // the start_version becomes older than the cache.start_version. In that case the + // following function will return empty return, and let the client to retry. + return Ok(self + .get_transactions_from_cache( + start_version, + max_size, + /*update_file_store_version=*/ false, + ) + .await); + } + + let (tx, mut rx) = channel(1); + self.file_store_reader + .get_transaction_batch( + start_version, + /*retries=*/ 3, + /*max_files=*/ Some(1), + tx, + ) + .await; + + if let Some((transactions, _)) = rx.recv().await { + debug!( + "Transactions returned from filestore: [{start_version}, {}).", + transactions.last().unwrap().version + ); + let first_version = transactions.first().unwrap().version; + ensure!( + first_version == start_version, + "Version doesn't match, something is wrong." + ); + Ok(transactions) + } else { + let error_msg = "Failed to fetch transactions from filestore, either filestore is not available, or data is corrupted."; + // TODO(grao): Consider downgrade this to warn! if this happens too frequently when + // filestore is unavailable. + error!(error_msg); + bail!(error_msg); + } + } + + pub(crate) async fn get_transactions_from_cache( + &self, + start_version: u64, + max_size: usize, + update_file_store_version: bool, + ) -> Vec { + self.cache + .read() + .await + .get_transactions(start_version, max_size, update_file_store_version) + } + + pub(crate) async fn get_file_store_version(&self) -> u64 { + self.file_store_reader.get_latest_version().await.unwrap() + } + + pub(crate) async fn cache_stats(&self) -> String { + let cache = self.cache.read().await; + let len = cache.transactions.len() as u64; + format!( + "cache version: [{}, {}), # of txns: {}, file store version: {}, cache size: {}", + cache.start_version, + cache.start_version + len, + len, + cache.file_store_version.load(Ordering::SeqCst), + cache.cache_size + ) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-manager/src/file_store_uploader.rs b/ecosystem/indexer-grpc/indexer-grpc-manager/src/file_store_uploader.rs new file mode 100644 index 0000000000000..235505b870f6b --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-manager/src/file_store_uploader.rs @@ -0,0 +1,221 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::data_manager::DataManager; +use anyhow::Result; +use aptos_indexer_grpc_utils::{ + compression_util::{FileEntry, StorageFormat}, + config::IndexerGrpcFileStoreConfig, + file_store_operator_v2::{ + BatchMetadata, FileStoreMetadata, FileStoreOperatorV2, FileStoreReader, IFileStore, + METADATA_FILE_NAME, + }, +}; +use aptos_protos::transaction::v1::Transaction; +use std::{path::PathBuf, sync::Arc, time::Duration}; +use tokio::{sync::mpsc::channel, time::Instant}; +use tracing::info; + +const NUM_TXNS_PER_FOLDER: u64 = 100000; +const MAX_SIZE_PER_FILE: usize = 20 * (1 << 20); +const MAX_NUM_FOLDERS_TO_CHECK_FOR_RECOVERY: usize = 5; + +pub(crate) struct FileStoreUploader { + chain_id: u64, + reader: FileStoreReader, + // TODO(grao): Change to IFileStoreReader when the trait_upcasting feature is in stable Rust. + writer: Arc, + + last_batch_metadata_update_time: Option, + last_metadata_update_time: Instant, +} + +impl FileStoreUploader { + pub(crate) async fn new( + chain_id: u64, + file_store_config: IndexerGrpcFileStoreConfig, + ) -> Result { + let file_store = file_store_config.create_filestore().await; + if !file_store.is_initialized().await { + info!( + chain_id = chain_id, + "FileStore is not initialized, initializing..." + ); + info!("Transactions per folder: {NUM_TXNS_PER_FOLDER}."); + let metadata = FileStoreMetadata { + chain_id, + num_transactions_per_folder: NUM_TXNS_PER_FOLDER, + version: 0, + }; + let raw_data = serde_json::to_vec(&metadata).unwrap(); + file_store + .save_raw_file(PathBuf::from(METADATA_FILE_NAME), raw_data) + .await + .unwrap_or_else(|e| panic!("Failed to initialize FileStore: {e:?}.")); + } + + let reader = FileStoreReader::new(chain_id, file_store.clone()).await; + // NOTE: We cannot change NUM_TXNS_PER_FOLDER without backfilling the data, put a check + // here to make sure we don't change it accidentally. + assert_eq!( + reader + .get_file_store_metadata() + .await + .unwrap() + .num_transactions_per_folder, + NUM_TXNS_PER_FOLDER + ); + + Ok(Self { + chain_id, + reader, + writer: file_store, + last_batch_metadata_update_time: None, + last_metadata_update_time: Instant::now(), + }) + } + + async fn recover(&self) -> Result<(u64, BatchMetadata)> { + let mut version = self + .reader + .get_latest_version() + .await + .expect("Latest version must exist."); + let mut num_folders_checked = 0; + let mut buffered_batch_metadata_to_recover = BatchMetadata::default(); + while let Some(batch_metadata) = self.reader.get_batch_metadata(version).await { + let batch_last_version = batch_metadata.files.last().unwrap().1; + version = batch_last_version; + if version % NUM_TXNS_PER_FOLDER != 0 { + buffered_batch_metadata_to_recover = batch_metadata; + break; + } + num_folders_checked += 1; + if num_folders_checked >= MAX_NUM_FOLDERS_TO_CHECK_FOR_RECOVERY { + panic!( + "File store metadata is way behind batch metadata, data might be corrupted." + ); + } + } + + self.update_file_store_metadata(version).await?; + + Ok((version, buffered_batch_metadata_to_recover)) + } + + pub(crate) async fn start(&mut self, data_manager: Arc) -> Result<()> { + let (version, batch_metadata) = self.recover().await?; + + let mut file_store_operator = FileStoreOperatorV2::new( + MAX_SIZE_PER_FILE, + NUM_TXNS_PER_FOLDER, + version, + batch_metadata, + ) + .await; + tokio_scoped::scope(|s| { + let (tx, mut rx) = channel(5); + s.spawn(async move { + while let Some((transactions, batch_metadata, end_batch)) = rx.recv().await { + self.do_upload(transactions, batch_metadata, end_batch) + .await + .unwrap(); + } + }); + s.spawn(async move { + loop { + let transactions = data_manager + .get_transactions_from_cache( + file_store_operator.version(), + MAX_SIZE_PER_FILE, + /*update_file_store_version=*/ true, + ) + .await; + let len = transactions.len(); + for transaction in transactions { + file_store_operator + .buffer_and_maybe_dump_transactions_to_file(transaction, tx.clone()) + .await + .unwrap(); + } + if len == 0 { + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + }); + }); + + Ok(()) + } + + async fn do_upload( + &mut self, + transactions: Vec, + batch_metadata: BatchMetadata, + end_batch: bool, + ) -> Result<()> { + let first_version = transactions.first().unwrap().version; + let last_version = transactions.last().unwrap().version; + let data_file = + FileEntry::from_transactions(transactions, StorageFormat::Lz4CompressedProto); + let path = self.reader.get_path_for_version(first_version); + + info!("Dumping transactions [{first_version}, {last_version}] to file {path:?}."); + + self.writer + .save_raw_file(path, data_file.into_inner()) + .await?; + + let mut update_batch_metadata = false; + let max_update_frequency = self.writer.max_update_frequency(); + if self.last_batch_metadata_update_time.is_none() + || Instant::now() - self.last_batch_metadata_update_time.unwrap() + >= max_update_frequency + { + update_batch_metadata = true; + } else if end_batch { + update_batch_metadata = true; + tokio::time::sleep_until( + self.last_batch_metadata_update_time.unwrap() + max_update_frequency, + ) + .await; + } + + if update_batch_metadata { + let batch_metadata_path = self.reader.get_path_for_batch_metadata(first_version); + self.writer + .save_raw_file( + batch_metadata_path, + serde_json::to_vec(&batch_metadata).map_err(anyhow::Error::msg)?, + ) + .await?; + + if end_batch { + self.last_batch_metadata_update_time = None; + } else { + self.last_batch_metadata_update_time = Some(Instant::now()); + } + + if Instant::now() - self.last_metadata_update_time >= max_update_frequency { + self.update_file_store_metadata(last_version + 1).await?; + self.last_metadata_update_time = Instant::now(); + } + } + + Ok(()) + } + + /// Updates the file store metadata. + async fn update_file_store_metadata(&self, version: u64) -> Result<()> { + let metadata = FileStoreMetadata { + chain_id: self.chain_id, + num_transactions_per_folder: NUM_TXNS_PER_FOLDER, + version, + }; + + let raw_data = serde_json::to_vec(&metadata).map_err(anyhow::Error::msg)?; + self.writer + .save_raw_file(PathBuf::from(METADATA_FILE_NAME), raw_data) + .await + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-manager/src/grpc_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-manager/src/grpc_manager.rs index 9d04ab15abf04..014e2d80fe41c 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-manager/src/grpc_manager.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-manager/src/grpc_manager.rs @@ -3,11 +3,15 @@ use crate::{ config::{IndexerGrpcManagerConfig, ServiceConfig}, + data_manager::DataManager, + file_store_uploader::FileStoreUploader, + metadata_manager::MetadataManager, service::GrpcManagerService, }; use anyhow::Result; use aptos_protos::indexer::v1::grpc_manager_server::GrpcManagerServer; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; +use tokio::sync::Mutex; use tonic::{codec::CompressionEncoding, transport::Server}; use tracing::info; @@ -16,25 +20,88 @@ const HTTP2_PING_TIMEOUT_DURATION: Duration = Duration::from_secs(10); pub(crate) struct GrpcManager { chain_id: u64, + filestore_uploader: Mutex, + metadata_manager: Arc, + data_manager: Arc, } impl GrpcManager { pub(crate) async fn new(config: &IndexerGrpcManagerConfig) -> Self { let chain_id = config.chain_id; + let filestore_uploader = Mutex::new( + FileStoreUploader::new(chain_id, config.file_store_config.clone()) + .await + .expect(&format!( + "Failed to create filestore uploader, config: {:?}.", + config.file_store_config + )), + ); - Self { chain_id } + info!( + chain_id = chain_id, + "FilestoreUploader is created, config: {:?}.", config.file_store_config + ); + + let metadata_manager = Arc::new(MetadataManager::new( + chain_id, + config.self_advertised_address.clone(), + config.grpc_manager_addresses.clone(), + config.fullnode_addresses.clone(), + )); + + info!( + self_advertised_address = config.self_advertised_address, + "MetadataManager is created, grpc_manager_addresses: {:?}, fullnode_addresses: {:?}.", + config.grpc_manager_addresses, + config.fullnode_addresses + ); + + let data_manager = Arc::new( + DataManager::new( + chain_id, + config.file_store_config.clone(), + config.cache_config.clone(), + metadata_manager.clone(), + ) + .await, + ); + + info!("DataManager is created."); + + Self { + chain_id, + filestore_uploader, + metadata_manager, + data_manager, + } } pub(crate) fn start(&self, service_config: &ServiceConfig) -> Result<()> { - let service = GrpcManagerServer::new(GrpcManagerService::new(self.chain_id)) - .send_compressed(CompressionEncoding::Zstd) - .accept_compressed(CompressionEncoding::Zstd); + let service = GrpcManagerServer::new(GrpcManagerService::new( + self.chain_id, + self.metadata_manager.clone(), + self.data_manager.clone(), + )) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd); let server = Server::builder() .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) .add_service(service); tokio_scoped::scope(|s| { + s.spawn(async move { + self.metadata_manager.start().await.unwrap(); + }); + s.spawn(async move { self.data_manager.start().await }); + s.spawn(async move { + self.filestore_uploader + .lock() + .await + .start(self.data_manager.clone()) + .await + .unwrap(); + }); s.spawn(async move { info!("Starting GrpcManager at {}.", service_config.listen_address); server.serve(service_config.listen_address).await.unwrap(); @@ -43,4 +110,12 @@ impl GrpcManager { Ok(()) } + + pub(crate) fn get_metadata_manager(&self) -> &MetadataManager { + &self.metadata_manager + } + + pub(crate) fn get_data_manager(&self) -> &DataManager { + &self.data_manager + } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-manager/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-manager/src/lib.rs index 789d6b0bc6d93..d51fb4b19550c 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-manager/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-manager/src/lib.rs @@ -2,7 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 pub mod config; +pub mod data_manager; +pub mod file_store_uploader; mod grpc_manager; +pub mod metadata_manager; mod service; #[cfg(test)] mod test; diff --git a/ecosystem/indexer-grpc/indexer-grpc-manager/src/metadata_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-manager/src/metadata_manager.rs new file mode 100644 index 0000000000000..9dd3cbc1d4544 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-manager/src/metadata_manager.rs @@ -0,0 +1,409 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::{bail, Result}; +use aptos_indexer_grpc_utils::timestamp_now_proto; +use aptos_protos::{ + indexer::v1::{ + data_service_client::DataServiceClient, grpc_manager_client::GrpcManagerClient, + service_info::Info, FullnodeInfo, GrpcManagerInfo, HeartbeatRequest, + HistoricalDataServiceInfo, LiveDataServiceInfo, PingDataServiceRequest, ServiceInfo, + }, + internal::fullnode::v1::{fullnode_data_client::FullnodeDataClient, PingFullnodeRequest}, + util::timestamp::Timestamp, +}; +use dashmap::DashMap; +use rand::prelude::*; +use std::{ + collections::{HashMap, VecDeque}, + sync::atomic::{AtomicU64, Ordering}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use tonic::transport::channel::Channel; +use tracing::trace; + +const MAX_NUM_OF_STATES_TO_KEEP: usize = 100; + +struct Peer { + client: GrpcManagerClient, + recent_states: VecDeque, +} + +impl Peer { + fn new(address: String) -> Self { + let channel = Channel::from_shared(address) + .expect("Bad address.") + .connect_lazy(); + let client = GrpcManagerClient::new(channel); + Self { + client, + recent_states: VecDeque::new(), + } + } +} + +struct Fullnode { + client: FullnodeDataClient, + recent_states: VecDeque, +} + +impl Fullnode { + fn new(address: String) -> Self { + let channel = Channel::from_shared(address) + .expect("Bad address.") + .connect_lazy(); + let client = FullnodeDataClient::new(channel); + Self { + client, + recent_states: VecDeque::new(), + } + } +} + +struct LiveDataService { + client: DataServiceClient, + recent_states: VecDeque, +} + +impl LiveDataService { + fn new(address: String) -> Self { + let channel = Channel::from_shared(address) + .expect("Bad address.") + .connect_lazy(); + let client = DataServiceClient::new(channel); + Self { + client, + recent_states: VecDeque::new(), + } + } +} + +struct HistoricalDataService { + client: DataServiceClient, + recent_states: VecDeque, +} + +impl HistoricalDataService { + fn new(address: String) -> Self { + let channel = Channel::from_shared(address) + .expect("Bad address.") + .connect_lazy(); + let client = DataServiceClient::new(channel); + Self { + client, + recent_states: VecDeque::new(), + } + } +} + +pub(crate) struct MetadataManager { + chain_id: u64, + self_advertised_address: String, + grpc_managers: DashMap, + fullnodes: DashMap, + live_data_services: DashMap, + historical_data_services: DashMap, + known_latest_version: AtomicU64, +} + +impl MetadataManager { + pub(crate) fn new( + chain_id: u64, + self_advertised_address: String, + grpc_manager_addresses: Vec, + fullnode_addresses: Vec, + ) -> Self { + let grpc_managers = DashMap::new(); + for address in grpc_manager_addresses { + grpc_managers.insert(address.clone(), Peer::new(address)); + } + let fullnodes = DashMap::new(); + for address in fullnode_addresses { + fullnodes.insert(address.clone(), Fullnode::new(address)); + } + Self { + chain_id, + self_advertised_address, + grpc_managers, + fullnodes, + live_data_services: DashMap::new(), + historical_data_services: DashMap::new(), + known_latest_version: AtomicU64::new(0), + } + } + + fn need_ping(latest_state_timestamp: Timestamp, threshold: Duration) -> bool { + let latest_state_timestamp_since_epoch = Duration::new( + latest_state_timestamp.seconds as u64, + latest_state_timestamp.nanos as u32, + ); + let now_since_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + let staleness = now_since_epoch.saturating_sub(latest_state_timestamp_since_epoch); + + staleness >= threshold + } + + pub(crate) async fn start(&self) -> Result<()> { + loop { + tokio_scoped::scope(|s| { + for kv in &self.grpc_managers { + let grpc_manager = kv.value(); + let client = grpc_manager.client.clone(); + s.spawn(async move { + let _ = self.heartbeat(client).await; + }); + } + + for kv in &self.fullnodes { + let (address, fullnode) = kv.pair(); + let need_ping = fullnode.recent_states.back().map_or(true, |s| { + Self::need_ping(s.timestamp.unwrap_or_default(), Duration::from_secs(5)) + }); + if need_ping { + let address = address.clone(); + let client = fullnode.client.clone(); + s.spawn(async move { + let _ = self.ping_fullnode(address, client).await; + }); + } + } + + for kv in &self.live_data_services { + let (address, live_data_service) = kv.pair(); + let need_ping = live_data_service.recent_states.back().map_or(true, |s| { + Self::need_ping(s.timestamp.unwrap_or_default(), Duration::from_secs(5)) + }); + if need_ping { + let address = address.clone(); + let client = live_data_service.client.clone(); + s.spawn(async move { + let _ = self.ping_live_data_service(address, client).await; + }); + } + } + + for kv in &self.historical_data_services { + let (address, historical_data_service) = kv.pair(); + let need_ping = + historical_data_service + .recent_states + .back() + .map_or(true, |s| { + Self::need_ping( + s.timestamp.unwrap_or_default(), + Duration::from_secs(5), + ) + }); + if need_ping { + let address = address.clone(); + let client = historical_data_service.client.clone(); + s.spawn(async move { + let _ = self.ping_historical_data_service(address, client).await; + }); + } + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + pub(crate) fn handle_heartbeat(&self, address: String, info: Info) -> Result<()> { + match info { + Info::LiveDataServiceInfo(info) => self.handle_live_data_service_info(address, info), + Info::HistoricalDataServiceInfo(info) => { + self.handle_historical_data_service_info(address, info) + }, + Info::FullnodeInfo(info) => self.handle_fullnode_info(address, info), + Info::GrpcManagerInfo(info) => self.handle_grpc_manager_info(address, info), + } + } + + pub(crate) fn get_fullnode_for_request(&self) -> FullnodeDataClient { + let mut rng = thread_rng(); + // TODO(grao): Filter out bad FNs. + self.fullnodes + .iter() + .choose(&mut rng) + .map(|kv| kv.value().client.clone()) + .unwrap() + } + + pub(crate) fn get_fullnodes_info(&self) -> HashMap> { + self.fullnodes + .iter() + .map(|entry| (entry.key().clone(), entry.value().recent_states.clone())) + .collect() + } + + pub(crate) fn get_live_data_services_info( + &self, + ) -> HashMap> { + self.live_data_services + .iter() + .map(|entry| (entry.key().clone(), entry.value().recent_states.clone())) + .collect() + } + + pub(crate) fn get_historical_data_services_info( + &self, + ) -> HashMap> { + self.historical_data_services + .iter() + .map(|entry| (entry.key().clone(), entry.value().recent_states.clone())) + .collect() + } + + pub(crate) fn get_known_latest_version(&self) -> u64 { + self.known_latest_version.load(Ordering::SeqCst) + } + + fn update_known_latest_version(&self, version: u64) { + self.known_latest_version + .fetch_max(version, Ordering::SeqCst); + } + + async fn heartbeat(&self, mut client: GrpcManagerClient) -> Result<()> { + let grpc_manager_info = GrpcManagerInfo { + chain_id: self.chain_id, + timestamp: Some(timestamp_now_proto()), + known_latest_version: Some(self.get_known_latest_version()), + master_address: None, + }; + let service_info = ServiceInfo { + address: Some(self.self_advertised_address.clone()), + info: Some(Info::GrpcManagerInfo(grpc_manager_info)), + }; + let request = HeartbeatRequest { + service_info: Some(service_info), + }; + let _ = client.heartbeat(request).await?; + + Ok(()) + } + + async fn ping_fullnode( + &self, + address: String, + mut client: FullnodeDataClient, + ) -> Result<()> { + trace!("Pinging fullnode {address}."); + let request = PingFullnodeRequest {}; + let response = client.ping(request).await?; + if let Some(info) = response.into_inner().info { + self.handle_fullnode_info(address, info) + } else { + bail!("Bad response.") + } + } + + async fn ping_live_data_service( + &self, + address: String, + mut client: DataServiceClient, + ) -> Result<()> { + let request = PingDataServiceRequest { + known_latest_version: Some(self.get_known_latest_version()), + ping_live_data_service: true, + }; + let response = client.ping(request).await?; + if let Some(info) = response.into_inner().info { + match info { + aptos_protos::indexer::v1::ping_data_service_response::Info::LiveDataServiceInfo(info) => { + self.handle_live_data_service_info(address, info) + }, + _ => bail!("Bad response."), + } + } else { + bail!("Bad response.") + } + } + + async fn ping_historical_data_service( + &self, + address: String, + mut client: DataServiceClient, + ) -> Result<()> { + let request = PingDataServiceRequest { + known_latest_version: Some(self.get_known_latest_version()), + ping_live_data_service: false, + }; + let response = client.ping(request).await?; + if let Some(info) = response.into_inner().info { + match info { + aptos_protos::indexer::v1::ping_data_service_response::Info::HistoricalDataServiceInfo(info) => { + self.handle_historical_data_service_info(address, info) + }, + _ => bail!("Bad response."), + } + } else { + bail!("Bad response.") + } + } + + fn handle_live_data_service_info( + &self, + address: String, + info: LiveDataServiceInfo, + ) -> Result<()> { + let mut entry = self + .live_data_services + .entry(address.clone()) + .or_insert(LiveDataService::new(address)); + entry.value_mut().recent_states.push_back(info); + if entry.value().recent_states.len() > MAX_NUM_OF_STATES_TO_KEEP { + entry.value_mut().recent_states.pop_front(); + } + + Ok(()) + } + + fn handle_historical_data_service_info( + &self, + address: String, + info: HistoricalDataServiceInfo, + ) -> Result<()> { + let mut entry = self + .historical_data_services + .entry(address.clone()) + .or_insert(HistoricalDataService::new(address)); + entry.value_mut().recent_states.push_back(info); + if entry.value().recent_states.len() > MAX_NUM_OF_STATES_TO_KEEP { + entry.value_mut().recent_states.pop_front(); + } + + Ok(()) + } + + fn handle_fullnode_info(&self, address: String, info: FullnodeInfo) -> Result<()> { + let mut entry = self + .fullnodes + .entry(address.clone()) + .or_insert(Fullnode::new(address.clone())); + entry.value_mut().recent_states.push_back(info); + if let Some(known_latest_version) = info.known_latest_version { + trace!( + "Received known_latest_version ({known_latest_version}) from fullnode {address}." + ); + self.update_known_latest_version(known_latest_version); + } + if entry.value().recent_states.len() > MAX_NUM_OF_STATES_TO_KEEP { + entry.value_mut().recent_states.pop_front(); + } + + Ok(()) + } + + fn handle_grpc_manager_info(&self, address: String, info: GrpcManagerInfo) -> Result<()> { + let mut entry = self + .grpc_managers + .entry(address.clone()) + .or_insert(Peer::new(address)); + entry.value_mut().recent_states.push_back(info); + if entry.value().recent_states.len() > MAX_NUM_OF_STATES_TO_KEEP { + entry.value_mut().recent_states.pop_front(); + } + + Ok(()) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-manager/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-manager/src/service.rs index d50eca972d6df..d4b77f1c16148 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-manager/src/service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-manager/src/service.rs @@ -1,39 +1,111 @@ // Copyright (c) Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::{data_manager::DataManager, metadata_manager::MetadataManager}; use aptos_protos::indexer::v1::{ grpc_manager_server::GrpcManager, service_info::Info, GetDataServiceForRequestRequest, GetDataServiceForRequestResponse, GetTransactionsRequest, HeartbeatRequest, HeartbeatResponse, TransactionsResponse, }; +use rand::{thread_rng, Rng}; +use std::sync::Arc; use tonic::{Request, Response, Status}; +const MAX_BATCH_SIZE: usize = 5 * (1 << 20); + pub struct GrpcManagerService { chain_id: u64, + metadata_manager: Arc, + data_manager: Arc, } impl GrpcManagerService { - pub(crate) fn new(chain_id: u64) -> Self { - Self { chain_id } + pub(crate) fn new( + chain_id: u64, + metadata_manager: Arc, + data_manager: Arc, + ) -> Self { + Self { + chain_id, + metadata_manager, + data_manager, + } } async fn handle_heartbeat( &self, - _address: String, - _info: Info, + address: String, + info: Info, ) -> anyhow::Result> { - // TODO(grao): Implement. - todo!() + self.metadata_manager.handle_heartbeat(address, info)?; + + Ok(Response::new(HeartbeatResponse { + known_latest_version: Some(self.metadata_manager.get_known_latest_version()), + })) + } + + fn pick_data_service_from_candidate(candidates: Vec<(String, usize)>) -> Option { + if candidates.is_empty() { + return None; + } + + // TODO(grao): This is a magic number, consider a different algorithm here. + let capacity = std::cmp::max(candidates.iter().map(|c| c.1).max().unwrap() + 2, 20); + + let total_capacity: usize = candidates.iter().map(|c| capacity - c.1).sum(); + + let mut rng = thread_rng(); + let pick = rng.gen_range(0, total_capacity); + + let mut cumulative_weight = 0; + for candidate in candidates { + cumulative_weight += capacity - candidate.1; + if pick < cumulative_weight { + return Some(candidate.0); + } + } + + unreachable!(); } - fn pick_live_data_service(&self, _starting_version: u64) -> Option { - // TODO(grao): Implement. - todo!() + fn pick_live_data_service(&self, starting_version: u64) -> Option { + let mut candidates = vec![]; + for candidate in self.metadata_manager.get_live_data_services_info() { + if let Some(info) = candidate.1.back().as_ref() { + // TODO(grao): Handle the case when the requested starting version is beyond the + // latest version. + if info.min_servable_version.is_none() + || starting_version < info.min_servable_version.unwrap() + { + continue; + } + // TODO(grao): Validate the data at the metadata manager side to make sure + // stream_info is always available. + let num_active_streams = info.stream_info.as_ref().unwrap().active_streams.len(); + candidates.push((candidate.0, num_active_streams)); + } + } + + Self::pick_data_service_from_candidate(candidates) } - async fn pick_historical_data_service(&self, _starting_version: u64) -> Option { - // TODO(grao): Implement. - todo!() + async fn pick_historical_data_service(&self, starting_version: u64) -> Option { + let file_store_version = self.data_manager.get_file_store_version().await; + if starting_version >= file_store_version { + return None; + } + + let mut candidates = vec![]; + for candidate in self.metadata_manager.get_historical_data_services_info() { + if let Some(info) = candidate.1.back().as_ref() { + // TODO(grao): Validate the data at the metadata manager side to make sure + // stream_info is always available. + let num_active_streams = info.stream_info.as_ref().unwrap().active_streams.len(); + candidates.push((candidate.0, num_active_streams)); + } + } + + Self::pick_data_service_from_candidate(candidates) } } @@ -50,7 +122,7 @@ impl GrpcManager for GrpcManagerService { return self .handle_heartbeat(address, info) .await - .map_err(|e| Status::internal(format!("Error handling heartbeat: {e}"))); + .map_err(|e| Status::internal(&format!("Error handling heartbeat: {e}"))); } } } @@ -62,9 +134,12 @@ impl GrpcManager for GrpcManagerService { &self, request: Request, ) -> Result, Status> { - let _request = request.into_inner(); - let transactions = vec![]; - // TODO(grao): Implement. + let request = request.into_inner(); + let transactions = self + .data_manager + .get_transactions(request.starting_version(), MAX_BATCH_SIZE) + .await + .map_err(|e| Status::internal(format!("{e}")))?; Ok(Response::new(TransactionsResponse { transactions, diff --git a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs index ce4a68249e82a..80197d83d2e7f 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs @@ -13,7 +13,7 @@ use std::convert::Infallible; use std::{fs::File, io::Read, panic::PanicInfo, path::PathBuf, process}; use tracing::error; use tracing_subscriber::EnvFilter; -use warp::{http::Response, Filter}; +use warp::{http::Response, reply::Reply, Filter}; /// ServerArgs bootstraps a server with all common pieces. And then triggers the run method for /// the specific service. @@ -45,8 +45,9 @@ where { let health_port = config.health_check_port; // Start liveness and readiness probes. + let config_clone = config.clone(); let task_handler = tokio::spawn(async move { - register_probes_and_metrics_handler(health_port).await; + register_probes_and_metrics_handler(config_clone, health_port).await; anyhow::Ok(()) }); let main_task_handler = @@ -71,7 +72,7 @@ where } } -#[derive(Deserialize, Debug, Serialize)] +#[derive(Deserialize, Clone, Debug, Serialize)] pub struct GenericConfig { // Shared configuration among all services. pub health_check_port: u16, @@ -96,11 +97,15 @@ where fn get_server_name(&self) -> String { self.server_config.get_server_name() } + + async fn status_page(&self) -> Result { + self.server_config.status_page().await + } } /// RunnableConfig is a trait that all services must implement for their configuration. #[async_trait::async_trait] -pub trait RunnableConfig: DeserializeOwned + Send + Sync + 'static { +pub trait RunnableConfig: Clone + DeserializeOwned + Send + Sync + 'static { // Validate the config. fn validate(&self) -> Result<()> { Ok(()) @@ -111,6 +116,10 @@ pub trait RunnableConfig: DeserializeOwned + Send + Sync + 'static { // Get the server name. fn get_server_name(&self) -> String; + + async fn status_page(&self) -> Result { + Ok("Status page is not found.".into_response()) + } } /// Parse a yaml file into a struct. @@ -181,7 +190,10 @@ pub fn setup_logging(make_writer: Option Box } /// Register readiness and liveness probes and set up metrics endpoint. -async fn register_probes_and_metrics_handler(port: u16) { +async fn register_probes_and_metrics_handler(config: GenericConfig, port: u16) +where + C: RunnableConfig, +{ let readiness = warp::path("readiness") .map(move || warp::reply::with_status("ready", warp::http::StatusCode::OK)); @@ -201,6 +213,11 @@ async fn register_probes_and_metrics_handler(port: u16) { .body(encode_buffer) }); + let status_endpoint = warp::path::end().and_then(move || { + let config = config.clone(); + async move { config.status_page().await } + }); + if cfg!(target_os = "linux") { #[cfg(target_os = "linux")] let profilez = warp::path("profilez").and_then(|| async move { @@ -228,11 +245,16 @@ async fn register_probes_and_metrics_handler(port: u16) { }) }); #[cfg(target_os = "linux")] - warp::serve(readiness.or(metrics_endpoint).or(profilez)) - .run(([0, 0, 0, 0], port)) - .await; + warp::serve( + readiness + .or(metrics_endpoint) + .or(status_endpoint) + .or(profilez), + ) + .run(([0, 0, 0, 0], port)) + .await; } else { - warp::serve(readiness.or(metrics_endpoint)) + warp::serve(readiness.or(metrics_endpoint).or(status_endpoint)) .run(([0, 0, 0, 0], port)) .await; } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml index 2c0516ddda804..bb541f3f279db 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml @@ -19,6 +19,8 @@ aptos-protos = { workspace = true } async-trait = { workspace = true } backoff = { workspace = true } base64 = { workspace = true } +build_html = { workspace = true } +bytesize = { workspace = true } chrono = { workspace = true } cloud-storage = { workspace = true } dashmap = { workspace = true } @@ -34,7 +36,9 @@ ripemd = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } +tokio-stream = { workspace = true } tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } +warp = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs index b304c7f3e0999..4d0684b94313b 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs @@ -380,14 +380,18 @@ impl CacheOperator { let start_time = std::time::Instant::now(); let mut transactions = vec![]; for encoded_transaction in encoded_transactions { + if encoded_transaction.is_empty() { + break; + } let cache_entry: CacheEntry = CacheEntry::new(encoded_transaction, self.storage_format); let transaction = cache_entry.into_transaction(); transactions.push(transaction); } + /* ensure!( transactions.len() == transaction_count as usize, "Failed to get all transactions from cache." - ); + );*/ let decoding_duration = start_time.elapsed().as_secs_f64(); Ok((transactions, io_duration, decoding_duration)) } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs index 07f528e6df124..826cbfb6f54a5 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs @@ -65,6 +65,7 @@ impl FileStoreMetadata { } } +#[derive(Debug)] pub enum CacheEntry { Lz4CompressionProto(Vec), // Only used for legacy cache entry. @@ -147,7 +148,9 @@ impl CacheEntry { decompressor .read_to_end(&mut decompressed) .expect("Lz4 decompression failed."); - Transaction::decode(decompressed.as_slice()).expect("proto deserialization failed.") + let res = Transaction::decode(decompressed.as_slice()) + .expect("proto deserialization failed."); + res }, CacheEntry::Base64UncompressedProto(bytes) => { let bytes: Vec = base64::decode(bytes).expect("base64 decoding failed."); @@ -197,13 +200,14 @@ impl FileEntry { .first() .expect("Cannot build empty file") .version; + /* let transactions_count = transactions.len(); if transactions_count % FILE_ENTRY_TRANSACTION_COUNT as usize != 0 { panic!("The number of transactions to upload has to be a multiple of FILE_ENTRY_TRANSACTION_COUNT.") } if starting_version % FILE_ENTRY_TRANSACTION_COUNT != 0 { panic!("Starting version has to be a multiple of FILE_ENTRY_TRANSACTION_COUNT.") - } + }*/ match storage_format { StorageFormat::Lz4CompressedProto => { let t = TransactionsInStorage { diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/config.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/config.rs index 56e32a33ab591..5a5e791b53e34 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/config.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/config.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; /// Common configuration for Indexer GRPC Store. use std::path::PathBuf; +use std::sync::Arc; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GcsFileStore { @@ -43,6 +44,26 @@ impl Default for IndexerGrpcFileStoreConfig { } impl IndexerGrpcFileStoreConfig { + pub async fn create_filestore(self) -> Arc { + match self { + IndexerGrpcFileStoreConfig::GcsFileStore(gcs_file_store) => Arc::new( + crate::file_store_operator_v2::gcs::GcsFileStore::new( + gcs_file_store.gcs_file_store_bucket_name, + gcs_file_store.gcs_file_store_bucket_sub_dir, + gcs_file_store + .gcs_file_store_service_account_key_path + .clone(), + ) + .await, + ), + IndexerGrpcFileStoreConfig::LocalFileStore(local_file_store) => { + Arc::new(crate::file_store_operator_v2::local::LocalFileStore::new( + local_file_store.local_file_store_path, + )) + }, + } + } + pub fn create(&self) -> Box { match self { IndexerGrpcFileStoreConfig::GcsFileStore(gcs_file_store) => { diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/gcs.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/gcs.rs new file mode 100644 index 0000000000000..a57dd31f60268 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/gcs.rs @@ -0,0 +1,135 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::file_store_operator_v2::{IFileStoreReader, IFileStoreWriter}; +use anyhow::{bail, Result}; +use cloud_storage::{Bucket, ListRequest, Object}; +use futures::StreamExt; +use std::{env, path::PathBuf}; +use tokio::time::Duration; +use tracing::{info, trace}; + +const JSON_FILE_TYPE: &str = "application/json"; +// The environment variable to set the service account path. +const SERVICE_ACCOUNT_ENV_VAR: &str = "SERVICE_ACCOUNT"; + +pub struct GcsFileStore { + bucket_name: String, + bucket_sub_dir: Option, +} + +impl GcsFileStore { + pub async fn new( + bucket_name: String, + bucket_sub_dir: Option, + service_account_path: String, + ) -> Self { + env::set_var(SERVICE_ACCOUNT_ENV_VAR, service_account_path); + + info!( + bucket_name = bucket_name, + "Verifying the bucket exists for GcsFileStore." + ); + + Bucket::read(&bucket_name) + .await + .expect("Failed to read bucket."); + + info!( + bucket_name = bucket_name, + "Bucket exists, GcsFileStore is created." + ); + Self { + bucket_name, + bucket_sub_dir, + } + } + + fn get_path(&self, file_path: PathBuf) -> String { + if let Some(sub_dir) = &self.bucket_sub_dir { + let mut path = sub_dir.clone(); + path.push(file_path); + path.to_string_lossy().into_owned() + } else { + file_path.to_string_lossy().into_owned() + } + } +} + +#[async_trait::async_trait] +impl IFileStoreReader for GcsFileStore { + fn tag(&self) -> &str { + "GCS" + } + + async fn is_initialized(&self) -> bool { + let request = ListRequest { + max_results: Some(1), + prefix: self + .bucket_sub_dir + .clone() + .map(|p| p.to_string_lossy().into_owned()), + ..Default::default() + }; + + let response = Object::list(&self.bucket_name, request) + .await + .unwrap() + .boxed() + .next() + .await + .unwrap() + .unwrap(); + + !response.prefixes.is_empty() || !response.items.is_empty() + } + + async fn get_raw_file(&self, file_path: PathBuf) -> Result>> { + let path = self.get_path(file_path); + trace!( + "Downloading object at {}/{}.", + self.bucket_name, + path.as_str() + ); + match Object::download(&self.bucket_name, path.as_str()).await { + Ok(file) => Ok(Some(file)), + Err(cloud_storage::Error::Other(err)) => { + if err.contains("No such object: ") { + Ok(None) + } else { + bail!("[Indexer File] Error happens when downloading file at {path:?}. {err}",); + } + }, + Err(err) => { + bail!("[Indexer File] Error happens when downloading file at {path:?}. {err}"); + }, + } + } +} + +#[async_trait::async_trait] +impl IFileStoreWriter for GcsFileStore { + async fn save_raw_file(&self, file_path: PathBuf, data: Vec) -> Result<()> { + let path = self.get_path(file_path); + trace!( + "Uploading object to {}/{}.", + self.bucket_name, + path.as_str() + ); + Object::create( + self.bucket_name.as_str(), + data, + path.as_str(), + JSON_FILE_TYPE, + ) + .await + .map_err(anyhow::Error::msg)?; + + Ok(()) + } + + fn max_update_frequency(&self) -> Duration { + // NOTE: GCS has rate limiting on per object update rate at once per second. + Duration::from_secs_f32(1.5) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/local.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/local.rs new file mode 100644 index 0000000000000..f398f3f1334d4 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/local.rs @@ -0,0 +1,74 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::file_store_operator_v2::{IFileStoreReader, IFileStoreWriter}; +use anyhow::{bail, Result}; +use std::path::PathBuf; +use tokio::time::Duration; +use tracing::info; + +#[derive(Clone)] +pub struct LocalFileStore { + path: PathBuf, +} + +impl LocalFileStore { + pub fn new(path: PathBuf) -> Self { + info!( + path = path.to_str().unwrap(), + "Verifying the path exists for LocalFileStore." + ); + if !path.exists() { + panic!("LocalFileStore path does not exist."); + } + Self { path } + } +} + +#[async_trait::async_trait] +impl IFileStoreReader for LocalFileStore { + fn tag(&self) -> &str { + "LOCAL" + } + + async fn is_initialized(&self) -> bool { + tokio::fs::read_dir(&self.path) + .await + .unwrap() + .next_entry() + .await + .unwrap() + .is_some() + } + + async fn get_raw_file(&self, file_path: PathBuf) -> Result>> { + let file_path = self.path.join(file_path); + match tokio::fs::read(&file_path).await { + Ok(file) => Ok(Some(file)), + Err(err) => { + if err.kind() == std::io::ErrorKind::NotFound { + Ok(None) + } else { + bail!("[Indexer File] Error happens when getting file at {file_path:?}. {err}"); + } + }, + } + } +} + +#[async_trait::async_trait] +impl IFileStoreWriter for LocalFileStore { + async fn save_raw_file(&self, file_path: PathBuf, data: Vec) -> Result<()> { + let file_path = self.path.join(file_path); + if let Some(parent) = file_path.parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(file_path, data) + .await + .map_err(anyhow::Error::msg) + } + + fn max_update_frequency(&self) -> Duration { + Duration::from_secs(0) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/mod.rs new file mode 100644 index 0000000000000..f73eb795e6643 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/mod.rs @@ -0,0 +1,326 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod gcs; +pub mod local; + +use crate::{ + compression_util::{FileEntry, StorageFormat}, + counters::TRANSACTION_STORE_FETCH_RETRIES, +}; +use anyhow::Result; +use aptos_protos::transaction::v1::Transaction; +use prost::Message; +use serde::{Deserialize, Serialize}; +use std::{ + path::PathBuf, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; +use tokio::{sync::mpsc::Sender, time::Duration}; +use tracing::{error, trace}; + +pub const METADATA_FILE_NAME: &str = "metadata.json"; + +#[derive(Serialize, Deserialize, Debug)] +pub struct FileStoreMetadata { + pub chain_id: u64, + pub num_transactions_per_folder: u64, + pub version: u64, +} + +#[derive(Serialize, Deserialize, Default, Clone)] +pub struct BatchMetadata { + // "[first_version, last_version), size_bytes" + pub files: Vec<(u64, u64, usize)>, +} + +#[async_trait::async_trait] +pub trait IFileStoreReader: Sync + Send { + /// The tag of the store, for logging. + fn tag(&self) -> &str; + + async fn is_initialized(&self) -> bool; + + async fn get_raw_file(&self, file_path: PathBuf) -> Result>>; +} + +#[async_trait::async_trait] +pub trait IFileStoreWriter: Sync + Send { + async fn save_raw_file(&self, file_path: PathBuf, data: Vec) -> Result<()>; + + fn max_update_frequency(&self) -> Duration; +} + +#[async_trait::async_trait] +pub trait IFileStore: IFileStoreReader + IFileStoreWriter {} + +impl IFileStore for T where T: IFileStoreReader + IFileStoreWriter {} + +pub struct FileStoreReader { + chain_id: u64, + // TODO(grao): Change to IFileStoreReader when the trait_upcasting feature is in stable Rust. + reader: Arc, + num_transactions_per_folder: u64, + cached_file_store_version: AtomicU64, +} + +impl FileStoreReader { + pub async fn new(chain_id: u64, reader: Arc) -> Self { + assert!(reader.is_initialized().await); + + let mut myself = Self { + chain_id, + reader, + num_transactions_per_folder: 0, + cached_file_store_version: AtomicU64::new(0), + }; + + let metadata = Self::get_file_store_metadata(&myself) + .await + .expect("Failed to fetch num_transactions_per_folder."); + + assert!(chain_id == metadata.chain_id); + + myself.num_transactions_per_folder = metadata.num_transactions_per_folder; + + myself + } + + pub fn get_path_for_version(&self, version: u64) -> PathBuf { + let mut buf = self.get_folder_name(version); + buf.push(format!("{}", version)); + buf + } + + pub fn get_path_for_batch_metadata(&self, version: u64) -> PathBuf { + let folder = self.get_folder_name(version); + let mut batch_metadata_path = PathBuf::new(); + batch_metadata_path.push(folder); + batch_metadata_path.push(METADATA_FILE_NAME); + batch_metadata_path + } + + pub async fn get_transaction_batch( + &self, + version: u64, + retries: u8, + max_files: Option, + tx: Sender<(Vec, usize)>, + ) { + trace!( + "Getting transactions from file store, version: {version}, max_files: {max_files:?}." + ); + let batch_metadata = self.get_batch_metadata(version).await; + if batch_metadata.is_none() { + // TODO(grao): This is unexpected, should only happen when data is corrupted. Consider + // make it panic!. + error!("Failed to get the batch metadata, unable to serve the request."); + return; + } + + let batch_metadata = batch_metadata.unwrap(); + + let mut file_index = None; + for (i, (file_store_version, _, _)) in batch_metadata.files.iter().enumerate().rev() { + if *file_store_version <= version { + file_index = Some(i); + break; + } + } + + let file_index = + file_index.unwrap_or_else(|| panic!("Must find file_index for version: {version}.")); + let mut end_file_index = batch_metadata.files.len(); + if let Some(max_files) = max_files { + end_file_index = end_file_index.min(file_index.saturating_add(max_files)); + } + + for i in file_index..end_file_index { + let current_version = batch_metadata.files[i].0; + let mut size_bytes = batch_metadata.files[i].2; + let transactions = self + .get_transaction_file_at_version(current_version, retries) + .await; + if let Ok(mut transactions) = transactions { + let num_to_skip = version.saturating_sub(current_version) as usize; + let result = if num_to_skip > 0 { + let transactions_to_return = transactions.split_off(num_to_skip); + for transaction in transactions { + size_bytes -= transaction.encoded_len(); + } + (transactions_to_return, size_bytes) + } else { + (transactions, size_bytes) + }; + trace!("Got {} transactions from file store to send, size: {size_bytes}, first_version: {:?}", result.0.len(), result.0.first().map(|t| t.version)); + if tx.send(result).await.is_err() { + break; + } + } else { + error!("Got error from file store: {:?}.", transactions); + break; + } + } + } + + /// Returns file store metadata, or None if not found. + pub async fn get_file_store_metadata(&self) -> Option { + self.reader + .get_raw_file(PathBuf::from(METADATA_FILE_NAME)) + .await + .expect("Failed to get file store metadata.") + .map(|data| serde_json::from_slice(&data).expect("Metadata JSON is invalid.")) + } + + /// Returns the batch matadata for the batch that includes the provided version, or None if not + /// found. + pub async fn get_batch_metadata(&self, version: u64) -> Option { + self.reader + .get_raw_file(self.get_path_for_batch_metadata(version)) + .await + .expect("Failed to get batch metadata.") + .map(|data| serde_json::from_slice(&data).expect("Batch metadata JSON is invalid.")) + } + + /// Returns the latest_version (next_version) that is going to be process by file store, or + /// None if the metadata file doesn't exist. + pub async fn get_latest_version(&self) -> Option { + let metadata = self.get_file_store_metadata().await; + let latest_version = metadata.map(|metadata| { + if metadata.chain_id != self.chain_id { + panic!("Wrong chain_id."); + } + metadata.version + }); + + if let Some(version) = latest_version { + self.cached_file_store_version + .fetch_max(version, Ordering::SeqCst); + } + + latest_version + } + + pub async fn can_serve(&self, version: u64) -> bool { + if self.cached_file_store_version.load(Ordering::SeqCst) > version { + return true; + } + + self.get_latest_version().await.unwrap() > version + } + + fn get_folder_name(&self, version: u64) -> PathBuf { + let mut buf = PathBuf::new(); + buf.push(format!("{}", version / self.num_transactions_per_folder)); + buf + } + + async fn get_transaction_file_at_version( + &self, + version: u64, + retries: u8, + ) -> Result> { + let mut retries = retries; + let bytes = loop { + let path = self.get_path_for_version(version); + match self.reader.get_raw_file(path.clone()).await { + Ok(bytes) => break bytes.unwrap_or_else(|| panic!("File should exist: {path:?}.")), + Err(err) => { + TRANSACTION_STORE_FETCH_RETRIES + .with_label_values(&[self.reader.tag()]) + .inc_by(1); + + if retries == 0 { + return Err(err); + } + retries -= 1; + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + }, + } + }; + + let transactions_in_storage = tokio::task::spawn_blocking(move || { + FileEntry::new(bytes, StorageFormat::Lz4CompressedProto).into_transactions_in_storage() + }) + .await?; + + Ok(transactions_in_storage.transactions) + } +} + +pub struct FileStoreOperatorV2 { + max_size_per_file: usize, + num_txns_per_folder: u64, + + buffer: Vec, + buffer_size: usize, + buffer_batch_metadata: BatchMetadata, + version: u64, +} + +impl FileStoreOperatorV2 { + pub async fn new( + max_size_per_file: usize, + num_txns_per_folder: u64, + version: u64, + batch_metadata: BatchMetadata, + ) -> Self { + Self { + max_size_per_file, + num_txns_per_folder, + buffer: vec![], + buffer_size: 0, + buffer_batch_metadata: batch_metadata, + version, + } + } + + pub fn version(&self) -> u64 { + self.version + } + + pub async fn buffer_and_maybe_dump_transactions_to_file( + &mut self, + transaction: Transaction, + tx: Sender<(Vec, BatchMetadata, bool)>, + ) -> Result<()> { + let end_batch = (transaction.version + 1) % self.num_txns_per_folder == 0; + let size = transaction.encoded_len(); + self.buffer.push(transaction); + self.buffer_size += size; + self.version += 1; + if self.buffer_size >= self.max_size_per_file || end_batch { + self.dump_transactions_to_file(end_batch, tx).await?; + } + + Ok(()) + } + + async fn dump_transactions_to_file( + &mut self, + end_batch: bool, + tx: Sender<(Vec, BatchMetadata, bool)>, + ) -> Result<()> { + let transactions = std::mem::take(&mut self.buffer); + let first_version = transactions.first().unwrap().version; + self.buffer_batch_metadata.files.push(( + first_version, + first_version + transactions.len() as u64, + self.buffer_size, + )); + self.buffer_size = 0; + + tx.send((transactions, self.buffer_batch_metadata.clone(), end_batch)) + .await + .map_err(anyhow::Error::msg)?; + + if end_batch { + self.buffer_batch_metadata = BatchMetadata::default(); + } + + Ok(()) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs index d76fdc5108225..d6d45b38e1baf 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs @@ -7,7 +7,9 @@ pub mod config; pub mod constants; pub mod counters; pub mod file_store_operator; +pub mod file_store_operator_v2; pub mod in_memory_cache; +pub mod status_page; pub mod types; use anyhow::{Context, Result}; @@ -17,7 +19,7 @@ use aptos_protos::{ util::timestamp::Timestamp, }; use prost::Message; -use std::time::Duration; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tonic::codec::CompressionEncoding; use url::Url; @@ -95,9 +97,21 @@ pub async fn create_data_service_grpc_client( Ok(client) } +pub fn timestamp_now_proto() -> Timestamp { + system_time_to_proto(SystemTime::now()) +} + +pub fn system_time_to_proto(system_time: SystemTime) -> Timestamp { + let ts = system_time.duration_since(UNIX_EPOCH).unwrap(); + Timestamp { + seconds: ts.as_secs() as i64, + nanos: ts.subsec_nanos() as i32, + } +} + pub fn time_diff_since_pb_timestamp_in_secs(timestamp: &Timestamp) -> f64 { - let current_timestamp = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) + let current_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) .expect("SystemTime before UNIX EPOCH!") .as_secs_f64(); let transaction_time = timestamp.seconds as f64 + timestamp.nanos as f64 * 1e-9; diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/html.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/html.rs new file mode 100644 index 0000000000000..169078018a96b --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/html.rs @@ -0,0 +1,61 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +const STYLE: &str = r#" +#nav-bar { + background-color: #333; + overflow: hidden; + margin-bottom: 20px; + padding: 10px 0; +} + +#nav-bar ul { + list-style-type: none; + margin: 0; + padding: 0; + display: flex; + justify-content: center; +} + +.tab { + display: inline; + padding: 14px 20px; + cursor: pointer; + color: white; + text-align: center; + text-decoration: none; + background-color: #333; + border: 1px solid #444; + transition: background-color 0.3s ease; +} + +.tab:hover { + background-color: #575757; +} + +.tab.active { + background-color: #007bff; + border-color: #0056b3; + font-weight: bold; +} + +tbody tr:nth-child(odd) { + background-color: #ff33cc; +} + +tbody tr:nth-child(even) { + background-color: #e495e4; +} + +"#; + +const SCRIPT: &str = r#" +function showTab(index) { + let tabs = document.querySelectorAll('[id^="tab-"]'); + let navItems = document.querySelectorAll('.tab'); + tabs.forEach(tab => tab.style.display = 'none'); + navItems.forEach(item => item.classList.remove('active')); + document.getElementById('tab-' + index).style.display = 'block'; + navItems[index].classList.add('active'); +} +"#; diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/mod.rs new file mode 100644 index 0000000000000..0e28902023bb2 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/mod.rs @@ -0,0 +1,117 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::timestamp_to_unixtime; +use aptos_protos::indexer::v1::StreamProgress; +use build_html::{Html, HtmlChild, HtmlContainer, HtmlElement, HtmlPage, HtmlTag}; +use std::time::{Duration, SystemTime}; +use warp::{ + reply::{html, Reply, Response}, + Rejection, +}; + +include!("html.rs"); + +pub struct Tab { + name: String, + content: HtmlChild, +} + +impl Tab { + pub fn new(name: &str, content: HtmlChild) -> Self { + Self { + name: name.to_string(), + content, + } + } +} + +pub fn render_status_page(tabs: Vec) -> Result { + let tab_names = tabs.iter().map(|tab| tab.name.clone()).collect::>(); + let tab_contents = tabs.into_iter().map(|tab| tab.content).collect::>(); + + let nav_bar = HtmlElement::new(HtmlTag::Div) + .with_attribute("id", "nav-bar") + .with_child( + tab_names + .into_iter() + .enumerate() + .fold( + HtmlElement::new(HtmlTag::UnorderedList), + |ul, (i, tab_name)| { + ul.with_child( + HtmlElement::new(HtmlTag::ListElement) + .with_attribute("onclick", format!("showTab({i})")) + .with_attribute("class", if i == 0 { "tab active" } else { "tab" }) + .with_child(tab_name.into()) + .into(), + ) + }, + ) + .into(), + ); + + let content = tab_contents.into_iter().enumerate().fold( + HtmlElement::new(HtmlTag::Div), + |div, (i, tab_content)| { + div.with_child( + HtmlElement::new(HtmlTag::Div) + .with_attribute("id", format!("tab-{i}")) + .with_attribute( + "style", + if i == 0 { + "display: block;" + } else { + "display: none;" + }, + ) + .with_child(tab_content) + .into(), + ) + }, + ); + + let page = HtmlPage::new() + .with_title("Status") + .with_style(STYLE) + .with_script_literal(SCRIPT) + .with_raw(nav_bar) + .with_raw(content) + .to_html_string(); + + Ok(html(page).into_response()) +} + +pub fn get_throughput_from_samples( + progress: Option<&StreamProgress>, + duration: Duration, +) -> String { + if let Some(progress) = progress { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs_f64(); + let index = progress.samples.partition_point(|p| { + let diff = now - timestamp_to_unixtime(p.timestamp.as_ref().unwrap()); + diff > duration.as_secs_f64() + }); + + // Need 2 sample points for calculation. + // TODO(grao): Consider doing interpolation here. + if index + 1 < progress.samples.len() { + let sample_a = progress.samples[index]; + let sample_b = progress.samples.last().unwrap(); + let time_diff = timestamp_to_unixtime(sample_b.timestamp.as_ref().unwrap()) + - timestamp_to_unixtime(sample_a.timestamp.as_ref().unwrap()); + let tps = (sample_b.version - sample_a.version) as f64 / time_diff; + let bps = (sample_b.size_bytes - sample_a.size_bytes) as f64 / time_diff; + return format!( + "{} tps, {} / s", + tps as u64, + bytesize::to_string(bps as u64, /*si_prefix=*/ false) + ); + } + } + + "No data".to_string() +} diff --git a/protos/proto/aptos/indexer/v1/grpc.proto b/protos/proto/aptos/indexer/v1/grpc.proto index 442f1e31fbddc..ecf23b97cfa61 100644 --- a/protos/proto/aptos/indexer/v1/grpc.proto +++ b/protos/proto/aptos/indexer/v1/grpc.proto @@ -33,7 +33,7 @@ message StreamInfo { } message LiveDataServiceInfo { - optional uint64 chain_id = 1; + uint64 chain_id = 1; optional aptos.util.timestamp.Timestamp timestamp = 2; optional uint64 known_latest_version = 3; optional StreamInfo stream_info = 4; @@ -42,20 +42,20 @@ message LiveDataServiceInfo { } message HistoricalDataServiceInfo { - optional uint64 chain_id = 1; + uint64 chain_id = 1; optional aptos.util.timestamp.Timestamp timestamp = 2; optional uint64 known_latest_version = 3; optional StreamInfo stream_info = 4; } message FullnodeInfo { - optional uint64 chain_id = 1; + uint64 chain_id = 1; optional aptos.util.timestamp.Timestamp timestamp = 2; optional uint64 known_latest_version = 3; } message GrpcManagerInfo { - optional uint64 chain_id = 1; + uint64 chain_id = 1; optional aptos.util.timestamp.Timestamp timestamp = 2; optional uint64 known_latest_version = 3; optional string master_address = 4; diff --git a/protos/rust/src/pb/aptos.indexer.v1.rs b/protos/rust/src/pb/aptos.indexer.v1.rs index f8bba237f4fdd..a93716f759202 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.rs @@ -4,7 +4,6 @@ // @generated // This file is @generated by prost-build. /// This is for storage only. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsInStorage { /// Required; transactions data. @@ -14,7 +13,6 @@ pub struct TransactionsInStorage { #[prost(uint64, optional, tag="2")] pub starting_version: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetTransactionsRequest { /// Required; start version of current stream. @@ -30,7 +28,6 @@ pub struct GetTransactionsRequest { pub batch_size: ::core::option::Option, } /// TransactionsResponse is a batch of transactions. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsResponse { /// Required; transactions data. @@ -40,7 +37,6 @@ pub struct TransactionsResponse { #[prost(uint64, optional, tag="2")] pub chain_id: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct StreamProgressSampleProto { #[prost(message, optional, tag="1")] @@ -50,13 +46,11 @@ pub struct StreamProgressSampleProto { #[prost(uint64, tag="3")] pub size_bytes: u64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamProgress { #[prost(message, repeated, tag="1")] pub samples: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActiveStream { #[prost(string, optional, tag="1")] @@ -70,17 +64,15 @@ pub struct ActiveStream { #[prost(message, optional, tag="5")] pub progress: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamInfo { #[prost(message, repeated, tag="1")] pub active_streams: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LiveDataServiceInfo { - #[prost(uint64, optional, tag="1")] - pub chain_id: ::core::option::Option, + #[prost(uint64, tag="1")] + pub chain_id: u64, #[prost(message, optional, tag="2")] pub timestamp: ::core::option::Option, #[prost(uint64, optional, tag="3")] @@ -91,11 +83,10 @@ pub struct LiveDataServiceInfo { #[prost(uint64, optional, tag="5")] pub min_servable_version: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HistoricalDataServiceInfo { - #[prost(uint64, optional, tag="1")] - pub chain_id: ::core::option::Option, + #[prost(uint64, tag="1")] + pub chain_id: u64, #[prost(message, optional, tag="2")] pub timestamp: ::core::option::Option, #[prost(uint64, optional, tag="3")] @@ -103,21 +94,19 @@ pub struct HistoricalDataServiceInfo { #[prost(message, optional, tag="4")] pub stream_info: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct FullnodeInfo { - #[prost(uint64, optional, tag="1")] - pub chain_id: ::core::option::Option, + #[prost(uint64, tag="1")] + pub chain_id: u64, #[prost(message, optional, tag="2")] pub timestamp: ::core::option::Option, #[prost(uint64, optional, tag="3")] pub known_latest_version: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GrpcManagerInfo { - #[prost(uint64, optional, tag="1")] - pub chain_id: ::core::option::Option, + #[prost(uint64, tag="1")] + pub chain_id: u64, #[prost(message, optional, tag="2")] pub timestamp: ::core::option::Option, #[prost(uint64, optional, tag="3")] @@ -125,7 +114,6 @@ pub struct GrpcManagerInfo { #[prost(string, optional, tag="4")] pub master_address: ::core::option::Option<::prost::alloc::string::String>, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ServiceInfo { #[prost(string, optional, tag="1")] @@ -135,8 +123,7 @@ pub struct ServiceInfo { } /// Nested message and enum types in `ServiceInfo`. pub mod service_info { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Info { #[prost(message, tag="2")] LiveDataServiceInfo(super::LiveDataServiceInfo), @@ -148,19 +135,16 @@ pub mod service_info { GrpcManagerInfo(super::GrpcManagerInfo), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HeartbeatRequest { #[prost(message, optional, tag="1")] pub service_info: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct HeartbeatResponse { #[prost(uint64, optional, tag="1")] pub known_latest_version: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PingDataServiceRequest { #[prost(uint64, optional, tag="1")] @@ -169,7 +153,6 @@ pub struct PingDataServiceRequest { #[prost(bool, tag="2")] pub ping_live_data_service: bool, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PingDataServiceResponse { #[prost(oneof="ping_data_service_response::Info", tags="1, 2")] @@ -177,8 +160,7 @@ pub struct PingDataServiceResponse { } /// Nested message and enum types in `PingDataServiceResponse`. pub mod ping_data_service_response { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Info { #[prost(message, tag="1")] LiveDataServiceInfo(super::LiveDataServiceInfo), @@ -186,13 +168,11 @@ pub mod ping_data_service_response { HistoricalDataServiceInfo(super::HistoricalDataServiceInfo), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetDataServiceForRequestRequest { #[prost(message, optional, tag="1")] pub user_request: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetDataServiceForRequestResponse { #[prost(string, tag="1")] @@ -349,7 +329,7 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x29, 0x16, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x29, 0x37, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x29, 0x3e, 0x52, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x0a, 0x84, 0x3b, 0x0a, 0x1b, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, + 0x33, 0x0a, 0x84, 0x3a, 0x0a, 0x1b, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, @@ -399,327 +379,319 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x73, 0x22, 0x88, 0x03, 0x0a, 0x13, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x0a, - 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, - 0x00, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x48, 0x01, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, - 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, - 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, - 0x02, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x03, 0x52, 0x0a, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, - 0x6d, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x48, 0x04, 0x52, 0x12, 0x6d, 0x69, - 0x6e, 0x53, 0x65, 0x72, 0x76, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x17, - 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6d, 0x69, 0x6e, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0xbe, 0x02, 0x0a, 0x19, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1e, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, + 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x12, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x02, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x49, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6d, 0x69, 0x6e, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x04, 0x48, 0x03, 0x52, 0x12, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, 0x76, + 0x61, 0x62, 0x6c, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x17, 0x0a, 0x15, + 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xac, + 0x02, 0x0a, 0x19, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, + 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x12, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, + 0x01, 0x01, 0x12, 0x42, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x02, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, + 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x0a, + 0x0c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xcb, 0x01, + 0x0a, 0x0c, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x48, 0x00, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x42, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x48, 0x01, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, - 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, - 0x48, 0x02, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x0b, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x03, 0x52, 0x0a, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, - 0x09, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, - 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x22, 0xdd, 0x01, 0x0a, 0x0c, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x1e, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x88, - 0x01, 0x01, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, - 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, - 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, - 0x09, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, - 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x9f, 0x02, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, - 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, - 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, - 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, - 0x12, 0x2a, 0x0a, 0x0e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0d, 0x6d, 0x61, 0x73, 0x74, - 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, - 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x22, 0xa6, 0x03, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x5c, 0x0a, 0x16, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x13, 0x6c, 0x69, 0x76, - 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x12, 0x6e, 0x0a, 0x1c, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x19, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, - 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x12, 0x45, 0x0a, 0x0d, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x6e, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x6e, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4f, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x06, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, - 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x6a, 0x0a, 0x10, - 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x63, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, - 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, + 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, + 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x12, 0x6b, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x88, 0x01, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x9d, 0x01, - 0x0a, 0x16, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, - 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, - 0x33, 0x0a, 0x16, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x13, 0x70, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xef, 0x01, - 0x0a, 0x17, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x16, 0x6c, 0x69, 0x76, - 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, - 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x74, 0x6f, - 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x76, - 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x48, 0x00, 0x52, 0x13, 0x6c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x6e, 0x0a, 0x1c, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8d, 0x02, 0x0a, 0x0f, + 0x47, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, + 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, + 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x12, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2a, 0x0a, 0x0e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, + 0x0d, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, + 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xa6, 0x03, 0x0a, 0x0b, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x5c, 0x0a, 0x16, 0x6c, 0x69, + 0x76, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x74, + 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x48, 0x00, 0x52, 0x13, 0x6c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x6e, 0x0a, 0x1c, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x19, 0x68, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x45, 0x0a, 0x0d, 0x66, 0x75, 0x6c, 0x6c, + 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, + 0x00, 0x52, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x4f, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x74, + 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, + 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x42, 0x06, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x22, 0x6a, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, + 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x22, 0x63, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, + 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x17, 0x0a, 0x15, + 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x9d, 0x01, 0x0a, 0x16, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, + 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x69, 0x6e, 0x67, 0x5f, + 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x70, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x15, + 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xef, 0x01, 0x0a, 0x17, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x5c, 0x0a, 0x16, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x13, 0x6c, 0x69, 0x76, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x6e, 0x0a, 0x1c, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, + 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x48, 0x00, 0x52, 0x19, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x06, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x0c, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x75, + 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, + 0x0d, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x54, + 0x0a, 0x20, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x32, 0xcc, 0x02, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, + 0x74, 0x12, 0x22, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, + 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x19, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x06, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, - 0x84, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x0c, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, - 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x54, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x32, 0xcc, 0x02, 0x0a, - 0x0b, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x09, - 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x22, 0x2e, 0x61, 0x70, 0x74, 0x6f, - 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, - 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x81, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x2e, 0x61, + 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x6f, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x32, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x32, 0xd1, 0x01, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x2e, 0x61, 0x70, + 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x65, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x26, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xd1, 0x01, 0x0a, 0x0b, - 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x04, 0x50, - 0x69, 0x6e, 0x67, 0x12, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x83, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x61, 0x70, - 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, - 0x83, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x09, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xa2, 0x02, 0x03, 0x41, 0x49, 0x58, 0xaa, 0x02, 0x10, 0x41, 0x70, - 0x74, 0x6f, 0x73, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, - 0x10, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x5c, 0x56, - 0x31, 0xe2, 0x02, 0x1c, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, - 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0xea, 0x02, 0x12, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x3a, 0x3a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, - 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x4a, 0xf8, 0x1b, 0x0a, 0x06, 0x12, 0x04, 0x03, 0x00, 0x6f, 0x01, - 0x0a, 0x4e, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x03, 0x00, 0x12, 0x32, 0x44, 0x20, 0x43, 0x6f, 0x70, - 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0xc2, 0xa9, 0x20, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x20, - 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x53, 0x50, 0x44, 0x58, - 0x2d, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x2d, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x3a, 0x20, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2d, 0x32, 0x2e, 0x30, 0x0a, - 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x05, 0x00, 0x19, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, - 0x12, 0x03, 0x07, 0x00, 0x29, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x08, 0x00, 0x30, - 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, 0x12, 0x03, 0x09, 0x00, 0x2e, 0x0a, 0x0a, 0x0a, 0x02, 0x04, - 0x00, 0x12, 0x04, 0x0b, 0x00, 0x0f, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, - 0x0b, 0x08, 0x21, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0c, 0x02, 0x38, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x04, 0x12, 0x03, 0x0c, 0x02, 0x0a, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0c, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0c, 0x2a, 0x33, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x00, 0x03, 0x12, 0x03, 0x0c, 0x36, 0x37, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, - 0x12, 0x03, 0x0d, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, - 0x0d, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0d, 0x09, - 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0d, 0x13, 0x14, 0x0a, - 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0e, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0e, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x09, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, - 0x03, 0x12, 0x03, 0x0e, 0x16, 0x17, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x11, 0x00, - 0x13, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x11, 0x08, 0x16, 0x0a, 0x0b, - 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x12, 0x02, 0x31, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x01, 0x02, 0x00, 0x04, 0x12, 0x03, 0x12, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, - 0x00, 0x06, 0x12, 0x03, 0x12, 0x0b, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, - 0x12, 0x03, 0x12, 0x25, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, - 0x12, 0x2f, 0x30, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x15, 0x00, 0x1c, 0x01, 0x0a, - 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x15, 0x08, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x04, - 0x02, 0x02, 0x00, 0x12, 0x03, 0x16, 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, - 0x04, 0x12, 0x03, 0x16, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, - 0x03, 0x16, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x16, - 0x12, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x16, 0x17, 0x18, - 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x17, 0x02, 0x39, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x02, 0x02, 0x01, 0x04, 0x12, 0x03, 0x17, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x02, 0x02, 0x01, 0x06, 0x12, 0x03, 0x17, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, - 0x01, 0x01, 0x12, 0x03, 0x17, 0x2a, 0x34, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, - 0x12, 0x03, 0x17, 0x37, 0x38, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, 0x18, - 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x05, 0x12, 0x03, 0x18, 0x02, 0x08, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, 0x12, 0x03, 0x18, 0x09, 0x16, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, 0x18, 0x19, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, - 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, 0x19, 0x02, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, - 0x03, 0x04, 0x12, 0x03, 0x19, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x03, 0x05, - 0x12, 0x03, 0x19, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x03, 0x01, 0x12, 0x03, - 0x19, 0x12, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x03, 0x03, 0x12, 0x03, 0x19, 0x20, - 0x21, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x04, 0x12, 0x03, 0x1b, 0x02, 0x27, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x02, 0x02, 0x04, 0x04, 0x12, 0x03, 0x1b, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x02, 0x02, 0x04, 0x06, 0x12, 0x03, 0x1b, 0x0b, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, - 0x02, 0x04, 0x01, 0x12, 0x03, 0x1b, 0x1a, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x04, - 0x03, 0x12, 0x03, 0x1b, 0x25, 0x26, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x1e, 0x00, - 0x20, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, 0x01, 0x12, 0x03, 0x1e, 0x08, 0x12, 0x0a, 0x0b, - 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x1f, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x03, 0x02, 0x00, 0x04, 0x12, 0x03, 0x1f, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, - 0x00, 0x06, 0x12, 0x03, 0x1f, 0x0b, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, - 0x12, 0x03, 0x1f, 0x18, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, - 0x1f, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x04, 0x12, 0x04, 0x22, 0x00, 0x29, 0x01, 0x0a, - 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, 0x22, 0x08, 0x1b, 0x0a, 0x0b, 0x0a, 0x04, 0x04, - 0x04, 0x02, 0x00, 0x12, 0x03, 0x23, 0x02, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, - 0x04, 0x12, 0x03, 0x23, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x05, 0x12, - 0x03, 0x23, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x01, 0x12, 0x03, 0x23, - 0x12, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x03, 0x12, 0x03, 0x23, 0x1d, 0x1e, - 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x01, 0x12, 0x03, 0x24, 0x02, 0x38, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x04, 0x02, 0x01, 0x04, 0x12, 0x03, 0x24, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x04, 0x02, 0x01, 0x06, 0x12, 0x03, 0x24, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, - 0x01, 0x01, 0x12, 0x03, 0x24, 0x2a, 0x33, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x03, - 0x12, 0x03, 0x24, 0x36, 0x37, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x02, 0x12, 0x03, 0x25, - 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x04, 0x12, 0x03, 0x25, 0x02, 0x0a, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x05, 0x12, 0x03, 0x25, 0x0b, 0x11, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x25, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, 0x25, 0x29, 0x2a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, - 0x02, 0x03, 0x12, 0x03, 0x26, 0x02, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x04, - 0x12, 0x03, 0x26, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x06, 0x12, 0x03, - 0x26, 0x0b, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x01, 0x12, 0x03, 0x26, 0x16, - 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x03, 0x12, 0x03, 0x26, 0x24, 0x25, 0x0a, - 0x60, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x04, 0x12, 0x03, 0x28, 0x02, 0x2b, 0x1a, 0x53, 0x20, 0x49, - 0x66, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x2c, 0x20, 0x69, - 0x74, 0x20, 0x6d, 0x65, 0x61, 0x6e, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, - 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, - 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x20, 0x61, 0x6e, 0x79, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x20, 0x79, 0x65, 0x74, 0x2e, - 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x04, 0x04, 0x12, 0x03, 0x28, 0x02, 0x0a, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x04, 0x05, 0x12, 0x03, 0x28, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x04, 0x02, 0x04, 0x01, 0x12, 0x03, 0x28, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x04, 0x02, 0x04, 0x03, 0x12, 0x03, 0x28, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x05, 0x12, - 0x04, 0x2b, 0x00, 0x30, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, 0x03, 0x2b, 0x08, - 0x21, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, 0x2c, 0x02, 0x1f, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x04, 0x12, 0x03, 0x2c, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x05, 0x02, 0x00, 0x05, 0x12, 0x03, 0x2c, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, - 0x02, 0x00, 0x01, 0x12, 0x03, 0x2c, 0x12, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, - 0x03, 0x12, 0x03, 0x2c, 0x1d, 0x1e, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x01, 0x12, 0x03, - 0x2d, 0x02, 0x38, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x04, 0x12, 0x03, 0x2d, 0x02, - 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x06, 0x12, 0x03, 0x2d, 0x0b, 0x29, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x01, 0x12, 0x03, 0x2d, 0x2a, 0x33, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x05, 0x02, 0x01, 0x03, 0x12, 0x03, 0x2d, 0x36, 0x37, 0x0a, 0x0b, 0x0a, 0x04, 0x04, - 0x05, 0x02, 0x02, 0x12, 0x03, 0x2e, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, - 0x04, 0x12, 0x03, 0x2e, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x05, 0x12, - 0x03, 0x2e, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x01, 0x12, 0x03, 0x2e, - 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x03, 0x12, 0x03, 0x2e, 0x29, 0x2a, - 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x03, 0x12, 0x03, 0x2f, 0x02, 0x26, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x05, 0x02, 0x03, 0x04, 0x12, 0x03, 0x2f, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x05, 0x02, 0x03, 0x06, 0x12, 0x03, 0x2f, 0x0b, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, - 0x03, 0x01, 0x12, 0x03, 0x2f, 0x16, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x03, 0x03, - 0x12, 0x03, 0x2f, 0x24, 0x25, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x06, 0x12, 0x04, 0x32, 0x00, 0x36, - 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x06, 0x01, 0x12, 0x03, 0x32, 0x08, 0x14, 0x0a, 0x0b, 0x0a, - 0x04, 0x04, 0x06, 0x02, 0x00, 0x12, 0x03, 0x33, 0x02, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, - 0x02, 0x00, 0x04, 0x12, 0x03, 0x33, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, - 0x05, 0x12, 0x03, 0x33, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x01, 0x12, - 0x03, 0x33, 0x12, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, 0x12, 0x03, 0x33, - 0x1d, 0x1e, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x01, 0x12, 0x03, 0x34, 0x02, 0x38, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x04, 0x12, 0x03, 0x34, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x06, 0x02, 0x01, 0x06, 0x12, 0x03, 0x34, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x06, 0x02, 0x01, 0x01, 0x12, 0x03, 0x34, 0x2a, 0x33, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, - 0x01, 0x03, 0x12, 0x03, 0x34, 0x36, 0x37, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x02, 0x12, - 0x03, 0x35, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x04, 0x12, 0x03, 0x35, - 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x05, 0x12, 0x03, 0x35, 0x0b, 0x11, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x01, 0x12, 0x03, 0x35, 0x12, 0x26, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x03, 0x12, 0x03, 0x35, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, - 0x04, 0x07, 0x12, 0x04, 0x38, 0x00, 0x3d, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x07, 0x01, 0x12, - 0x03, 0x38, 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x00, 0x12, 0x03, 0x39, 0x02, - 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x04, 0x12, 0x03, 0x39, 0x02, 0x0a, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x05, 0x12, 0x03, 0x39, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x07, 0x02, 0x00, 0x01, 0x12, 0x03, 0x39, 0x12, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x07, 0x02, 0x00, 0x03, 0x12, 0x03, 0x39, 0x1d, 0x1e, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x07, 0x02, + 0x42, 0x09, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xa2, 0x02, 0x03, + 0x41, 0x49, 0x58, 0xaa, 0x02, 0x10, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x10, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1c, 0x41, 0x70, 0x74, 0x6f, + 0x73, 0x5c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x41, 0x70, 0x74, 0x6f, 0x73, + 0x3a, 0x3a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x4a, 0xc0, 0x1b, + 0x0a, 0x06, 0x12, 0x04, 0x03, 0x00, 0x6f, 0x01, 0x0a, 0x4e, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x03, + 0x00, 0x12, 0x32, 0x44, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0xc2, + 0xa9, 0x20, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x0a, 0x20, 0x53, 0x50, 0x44, 0x58, 0x2d, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, + 0x2d, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x41, 0x70, 0x61, + 0x63, 0x68, 0x65, 0x2d, 0x32, 0x2e, 0x30, 0x0a, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x05, + 0x00, 0x19, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x07, 0x00, 0x29, 0x0a, 0x09, 0x0a, + 0x02, 0x03, 0x01, 0x12, 0x03, 0x08, 0x00, 0x30, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, 0x12, 0x03, + 0x09, 0x00, 0x2e, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0b, 0x00, 0x0f, 0x01, 0x0a, + 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x08, 0x21, 0x0a, 0x0b, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x00, 0x12, 0x03, 0x0c, 0x02, 0x38, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, + 0x04, 0x12, 0x03, 0x0c, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, + 0x03, 0x0c, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0c, + 0x2a, 0x33, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0c, 0x36, 0x37, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0d, 0x02, 0x15, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x0d, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0d, 0x09, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x0d, 0x13, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, + 0x03, 0x0e, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0e, + 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x09, 0x13, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x0e, 0x16, 0x17, 0x0a, 0x0a, + 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x11, 0x00, 0x13, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, + 0x01, 0x12, 0x03, 0x11, 0x08, 0x16, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, + 0x12, 0x02, 0x31, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x04, 0x12, 0x03, 0x12, 0x02, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x12, 0x0b, 0x24, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x12, 0x25, 0x2c, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x12, 0x2f, 0x30, 0x0a, 0x0a, 0x0a, 0x02, 0x04, + 0x02, 0x12, 0x04, 0x15, 0x00, 0x1c, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, + 0x15, 0x08, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x16, 0x02, 0x19, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x04, 0x12, 0x03, 0x16, 0x02, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, 0x03, 0x16, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x16, 0x12, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x16, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x17, 0x02, 0x39, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x04, 0x12, 0x03, + 0x17, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x06, 0x12, 0x03, 0x17, 0x0b, + 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, 0x17, 0x2a, 0x34, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, 0x17, 0x37, 0x38, 0x0a, 0x0b, 0x0a, + 0x04, 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, 0x18, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x02, 0x05, 0x12, 0x03, 0x18, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x18, 0x09, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x18, 0x19, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, 0x19, 0x02, + 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x03, 0x04, 0x12, 0x03, 0x19, 0x02, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x03, 0x05, 0x12, 0x03, 0x19, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x03, 0x01, 0x12, 0x03, 0x19, 0x12, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x02, 0x02, 0x03, 0x03, 0x12, 0x03, 0x19, 0x20, 0x21, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, + 0x04, 0x12, 0x03, 0x1b, 0x02, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x04, 0x04, 0x12, + 0x03, 0x1b, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x04, 0x06, 0x12, 0x03, 0x1b, + 0x0b, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x04, 0x01, 0x12, 0x03, 0x1b, 0x1a, 0x22, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x04, 0x03, 0x12, 0x03, 0x1b, 0x25, 0x26, 0x0a, 0x0a, + 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x1e, 0x00, 0x20, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, + 0x01, 0x12, 0x03, 0x1e, 0x08, 0x12, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, + 0x1f, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x04, 0x12, 0x03, 0x1f, 0x02, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, 0x1f, 0x0b, 0x17, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x1f, 0x18, 0x26, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x1f, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, + 0x04, 0x12, 0x04, 0x22, 0x00, 0x29, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, + 0x22, 0x08, 0x1b, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x00, 0x12, 0x03, 0x23, 0x02, 0x16, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x05, 0x12, 0x03, 0x23, 0x02, 0x08, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x01, 0x12, 0x03, 0x23, 0x09, 0x11, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x04, 0x02, 0x00, 0x03, 0x12, 0x03, 0x23, 0x14, 0x15, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, + 0x02, 0x01, 0x12, 0x03, 0x24, 0x02, 0x38, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x04, + 0x12, 0x03, 0x24, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x06, 0x12, 0x03, + 0x24, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x01, 0x12, 0x03, 0x24, 0x2a, + 0x33, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x03, 0x12, 0x03, 0x24, 0x36, 0x37, 0x0a, + 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x02, 0x12, 0x03, 0x25, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x04, 0x02, 0x02, 0x04, 0x12, 0x03, 0x25, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, + 0x02, 0x02, 0x05, 0x12, 0x03, 0x25, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x25, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x25, 0x29, 0x2a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x03, 0x12, 0x03, 0x26, 0x02, + 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x04, 0x12, 0x03, 0x26, 0x02, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x06, 0x12, 0x03, 0x26, 0x0b, 0x15, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x04, 0x02, 0x03, 0x01, 0x12, 0x03, 0x26, 0x16, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x04, 0x02, 0x03, 0x03, 0x12, 0x03, 0x26, 0x24, 0x25, 0x0a, 0x60, 0x0a, 0x04, 0x04, 0x04, 0x02, + 0x04, 0x12, 0x03, 0x28, 0x02, 0x2b, 0x1a, 0x53, 0x20, 0x49, 0x66, 0x20, 0x6e, 0x6f, 0x74, 0x20, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x65, 0x61, 0x6e, + 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x20, 0x61, 0x6e, 0x79, + 0x74, 0x68, 0x69, 0x6e, 0x67, 0x20, 0x79, 0x65, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x04, 0x02, 0x04, 0x04, 0x12, 0x03, 0x28, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, + 0x04, 0x05, 0x12, 0x03, 0x28, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x04, 0x01, + 0x12, 0x03, 0x28, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x04, 0x03, 0x12, 0x03, + 0x28, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x05, 0x12, 0x04, 0x2b, 0x00, 0x30, 0x01, 0x0a, + 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, 0x03, 0x2b, 0x08, 0x21, 0x0a, 0x0b, 0x0a, 0x04, 0x04, + 0x05, 0x02, 0x00, 0x12, 0x03, 0x2c, 0x02, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, + 0x05, 0x12, 0x03, 0x2c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, + 0x03, 0x2c, 0x09, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x03, 0x12, 0x03, 0x2c, + 0x14, 0x15, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x01, 0x12, 0x03, 0x2d, 0x02, 0x38, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x04, 0x12, 0x03, 0x2d, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x05, 0x02, 0x01, 0x06, 0x12, 0x03, 0x2d, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x05, 0x02, 0x01, 0x01, 0x12, 0x03, 0x2d, 0x2a, 0x33, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x2d, 0x36, 0x37, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x02, 0x12, + 0x03, 0x2e, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x04, 0x12, 0x03, 0x2e, + 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x05, 0x12, 0x03, 0x2e, 0x0b, 0x11, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x01, 0x12, 0x03, 0x2e, 0x12, 0x26, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x03, 0x12, 0x03, 0x2e, 0x29, 0x2a, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x05, 0x02, 0x03, 0x12, 0x03, 0x2f, 0x02, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, + 0x03, 0x04, 0x12, 0x03, 0x2f, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x03, 0x06, + 0x12, 0x03, 0x2f, 0x0b, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x03, 0x01, 0x12, 0x03, + 0x2f, 0x16, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x03, 0x03, 0x12, 0x03, 0x2f, 0x24, + 0x25, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x06, 0x12, 0x04, 0x32, 0x00, 0x36, 0x01, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x06, 0x01, 0x12, 0x03, 0x32, 0x08, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, + 0x00, 0x12, 0x03, 0x33, 0x02, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x05, 0x12, + 0x03, 0x33, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x01, 0x12, 0x03, 0x33, + 0x09, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, 0x12, 0x03, 0x33, 0x14, 0x15, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x01, 0x12, 0x03, 0x34, 0x02, 0x38, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x06, 0x02, 0x01, 0x04, 0x12, 0x03, 0x34, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x06, 0x02, 0x01, 0x06, 0x12, 0x03, 0x34, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, + 0x01, 0x01, 0x12, 0x03, 0x34, 0x2a, 0x33, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x03, + 0x12, 0x03, 0x34, 0x36, 0x37, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x02, 0x12, 0x03, 0x35, + 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x04, 0x12, 0x03, 0x35, 0x02, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x05, 0x12, 0x03, 0x35, 0x0b, 0x11, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x01, 0x12, 0x03, 0x35, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x06, 0x02, 0x02, 0x03, 0x12, 0x03, 0x35, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x07, + 0x12, 0x04, 0x38, 0x00, 0x3d, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x07, 0x01, 0x12, 0x03, 0x38, + 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x00, 0x12, 0x03, 0x39, 0x02, 0x16, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x05, 0x12, 0x03, 0x39, 0x02, 0x08, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x07, 0x02, 0x00, 0x01, 0x12, 0x03, 0x39, 0x09, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x00, 0x03, 0x12, 0x03, 0x39, 0x14, 0x15, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x01, 0x12, 0x03, 0x3a, 0x02, 0x38, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x04, 0x12, 0x03, 0x3a, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x06, 0x12, 0x03, 0x3a, 0x0b, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x01, 0x12, 0x03, 0x3a, 0x2a, 0x33, diff --git a/protos/rust/src/pb/aptos.indexer.v1.serde.rs b/protos/rust/src/pb/aptos.indexer.v1.serde.rs index 1099bc3379d93..d5769f68ce408 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.serde.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.serde.rs @@ -176,7 +176,7 @@ impl serde::Serialize for FullnodeInfo { { use serde::ser::SerializeStruct; let mut len = 0; - if self.chain_id.is_some() { + if self.chain_id != 0 { len += 1; } if self.timestamp.is_some() { @@ -186,8 +186,8 @@ impl serde::Serialize for FullnodeInfo { len += 1; } let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.FullnodeInfo", len)?; - if let Some(v) = self.chain_id.as_ref() { - struct_ser.serialize_field("chainId", ToString::to_string(&v).as_str())?; + if self.chain_id != 0 { + struct_ser.serialize_field("chainId", ToString::to_string(&self.chain_id).as_str())?; } if let Some(v) = self.timestamp.as_ref() { struct_ser.serialize_field("timestamp", v)?; @@ -270,7 +270,7 @@ impl<'de> serde::Deserialize<'de> for FullnodeInfo { return Err(serde::de::Error::duplicate_field("chainId")); } chain_id__ = - map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) ; } GeneratedField::Timestamp => { @@ -290,7 +290,7 @@ impl<'de> serde::Deserialize<'de> for FullnodeInfo { } } Ok(FullnodeInfo { - chain_id: chain_id__, + chain_id: chain_id__.unwrap_or_default(), timestamp: timestamp__, known_latest_version: known_latest_version__, }) @@ -625,7 +625,7 @@ impl serde::Serialize for GrpcManagerInfo { { use serde::ser::SerializeStruct; let mut len = 0; - if self.chain_id.is_some() { + if self.chain_id != 0 { len += 1; } if self.timestamp.is_some() { @@ -638,8 +638,8 @@ impl serde::Serialize for GrpcManagerInfo { len += 1; } let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.GrpcManagerInfo", len)?; - if let Some(v) = self.chain_id.as_ref() { - struct_ser.serialize_field("chainId", ToString::to_string(&v).as_str())?; + if self.chain_id != 0 { + struct_ser.serialize_field("chainId", ToString::to_string(&self.chain_id).as_str())?; } if let Some(v) = self.timestamp.as_ref() { struct_ser.serialize_field("timestamp", v)?; @@ -730,7 +730,7 @@ impl<'de> serde::Deserialize<'de> for GrpcManagerInfo { return Err(serde::de::Error::duplicate_field("chainId")); } chain_id__ = - map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) ; } GeneratedField::Timestamp => { @@ -756,7 +756,7 @@ impl<'de> serde::Deserialize<'de> for GrpcManagerInfo { } } Ok(GrpcManagerInfo { - chain_id: chain_id__, + chain_id: chain_id__.unwrap_or_default(), timestamp: timestamp__, known_latest_version: known_latest_version__, master_address: master_address__, @@ -960,7 +960,7 @@ impl serde::Serialize for HistoricalDataServiceInfo { { use serde::ser::SerializeStruct; let mut len = 0; - if self.chain_id.is_some() { + if self.chain_id != 0 { len += 1; } if self.timestamp.is_some() { @@ -973,8 +973,8 @@ impl serde::Serialize for HistoricalDataServiceInfo { len += 1; } let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.HistoricalDataServiceInfo", len)?; - if let Some(v) = self.chain_id.as_ref() { - struct_ser.serialize_field("chainId", ToString::to_string(&v).as_str())?; + if self.chain_id != 0 { + struct_ser.serialize_field("chainId", ToString::to_string(&self.chain_id).as_str())?; } if let Some(v) = self.timestamp.as_ref() { struct_ser.serialize_field("timestamp", v)?; @@ -1065,7 +1065,7 @@ impl<'de> serde::Deserialize<'de> for HistoricalDataServiceInfo { return Err(serde::de::Error::duplicate_field("chainId")); } chain_id__ = - map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) ; } GeneratedField::Timestamp => { @@ -1091,7 +1091,7 @@ impl<'de> serde::Deserialize<'de> for HistoricalDataServiceInfo { } } Ok(HistoricalDataServiceInfo { - chain_id: chain_id__, + chain_id: chain_id__.unwrap_or_default(), timestamp: timestamp__, known_latest_version: known_latest_version__, stream_info: stream_info__, @@ -1109,7 +1109,7 @@ impl serde::Serialize for LiveDataServiceInfo { { use serde::ser::SerializeStruct; let mut len = 0; - if self.chain_id.is_some() { + if self.chain_id != 0 { len += 1; } if self.timestamp.is_some() { @@ -1125,8 +1125,8 @@ impl serde::Serialize for LiveDataServiceInfo { len += 1; } let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.LiveDataServiceInfo", len)?; - if let Some(v) = self.chain_id.as_ref() { - struct_ser.serialize_field("chainId", ToString::to_string(&v).as_str())?; + if self.chain_id != 0 { + struct_ser.serialize_field("chainId", ToString::to_string(&self.chain_id).as_str())?; } if let Some(v) = self.timestamp.as_ref() { struct_ser.serialize_field("timestamp", v)?; @@ -1225,7 +1225,7 @@ impl<'de> serde::Deserialize<'de> for LiveDataServiceInfo { return Err(serde::de::Error::duplicate_field("chainId")); } chain_id__ = - map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + Some(map.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) ; } GeneratedField::Timestamp => { @@ -1259,7 +1259,7 @@ impl<'de> serde::Deserialize<'de> for LiveDataServiceInfo { } } Ok(LiveDataServiceInfo { - chain_id: chain_id__, + chain_id: chain_id__.unwrap_or_default(), timestamp: timestamp__, known_latest_version: known_latest_version__, stream_info: stream_info__, diff --git a/protos/rust/src/pb/aptos.indexer.v1.tonic.rs b/protos/rust/src/pb/aptos.indexer.v1.tonic.rs index 7c1140ba61e57..4e0c6ea804053 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod raw_data_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod raw_data_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod raw_data_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { RawDataClient::new(InterceptedService::new(inner, interceptor)) } @@ -101,8 +107,7 @@ pub mod raw_data_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -119,16 +124,22 @@ pub mod raw_data_client { } /// Generated server implementations. pub mod raw_data_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with RawDataServer. #[async_trait] - pub trait RawData: Send + Sync + 'static { + pub trait RawData: std::marker::Send + std::marker::Sync + 'static { /// Server streaming response type for the GetTransactions method. type GetTransactionsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; /** Get transactions batch without any filtering from starting version and end if transaction count is present. */ @@ -142,14 +153,14 @@ pub mod raw_data_server { } /// #[derive(Debug)] - pub struct RawDataServer { + pub struct RawDataServer { inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - impl RawDataServer { + impl RawDataServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } @@ -203,8 +214,8 @@ pub mod raw_data_server { impl tonic::codegen::Service> for RawDataServer where T: RawData, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -266,23 +277,25 @@ pub mod raw_data_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for RawDataServer { + impl Clone for RawDataServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -294,13 +307,21 @@ pub mod raw_data_server { } } } - impl tonic::server::NamedService for RawDataServer { - const NAME: &'static str = "aptos.indexer.v1.RawData"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.RawData"; + impl tonic::server::NamedService for RawDataServer { + const NAME: &'static str = SERVICE_NAME; } } /// Generated client implementations. pub mod grpc_manager_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -323,8 +344,8 @@ pub mod grpc_manager_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -349,7 +370,7 @@ pub mod grpc_manager_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { GrpcManagerClient::new(InterceptedService::new(inner, interceptor)) } @@ -396,8 +417,7 @@ pub mod grpc_manager_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -422,8 +442,7 @@ pub mod grpc_manager_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -450,8 +469,7 @@ pub mod grpc_manager_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -473,11 +491,17 @@ pub mod grpc_manager_client { } /// Generated server implementations. pub mod grpc_manager_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with GrpcManagerServer. #[async_trait] - pub trait GrpcManager: Send + Sync + 'static { + pub trait GrpcManager: std::marker::Send + std::marker::Sync + 'static { /// async fn heartbeat( &self, @@ -505,14 +529,14 @@ pub mod grpc_manager_server { } /// #[derive(Debug)] - pub struct GrpcManagerServer { + pub struct GrpcManagerServer { inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - impl GrpcManagerServer { + impl GrpcManagerServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } @@ -566,8 +590,8 @@ pub mod grpc_manager_server { impl tonic::codegen::Service> for GrpcManagerServer where T: GrpcManager, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -723,23 +747,25 @@ pub mod grpc_manager_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for GrpcManagerServer { + impl Clone for GrpcManagerServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -751,13 +777,21 @@ pub mod grpc_manager_server { } } } - impl tonic::server::NamedService for GrpcManagerServer { - const NAME: &'static str = "aptos.indexer.v1.GrpcManager"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.GrpcManager"; + impl tonic::server::NamedService for GrpcManagerServer { + const NAME: &'static str = SERVICE_NAME; } } /// Generated client implementations. pub mod data_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -780,8 +814,8 @@ pub mod data_service_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -806,7 +840,7 @@ pub mod data_service_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { DataServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -853,8 +887,7 @@ pub mod data_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -879,8 +912,7 @@ pub mod data_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -899,11 +931,17 @@ pub mod data_service_client { } /// Generated server implementations. pub mod data_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with DataServiceServer. #[async_trait] - pub trait DataService: Send + Sync + 'static { + pub trait DataService: std::marker::Send + std::marker::Sync + 'static { /// async fn ping( &self, @@ -916,7 +954,7 @@ pub mod data_service_server { type GetTransactionsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; /// async fn get_transactions( @@ -929,14 +967,14 @@ pub mod data_service_server { } /// #[derive(Debug)] - pub struct DataServiceServer { + pub struct DataServiceServer { inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - impl DataServiceServer { + impl DataServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } @@ -990,8 +1028,8 @@ pub mod data_service_server { impl tonic::codegen::Service> for DataServiceServer where T: DataService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -1098,23 +1136,25 @@ pub mod data_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for DataServiceServer { + impl Clone for DataServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -1126,7 +1166,9 @@ pub mod data_service_server { } } } - impl tonic::server::NamedService for DataServiceServer { - const NAME: &'static str = "aptos.indexer.v1.DataService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.DataService"; + impl tonic::server::NamedService for DataServiceServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.internal.fullnode.v1.rs b/protos/rust/src/pb/aptos.internal.fullnode.v1.rs index 90c0d9208ae5f..f5547af9719d0 100644 --- a/protos/rust/src/pb/aptos.internal.fullnode.v1.rs +++ b/protos/rust/src/pb/aptos.internal.fullnode.v1.rs @@ -10,13 +10,11 @@ // TransactionOutput data(size n) // StreamStatus: BATCH_END with version x + (k + 1) * n - 1 -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsOutput { #[prost(message, repeated, tag="1")] pub transactions: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct StreamStatus { #[prost(enumeration="stream_status::StatusType", tag="1")] @@ -46,9 +44,9 @@ pub mod stream_status { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - StatusType::Unspecified => "STATUS_TYPE_UNSPECIFIED", - StatusType::Init => "STATUS_TYPE_INIT", - StatusType::BatchEnd => "STATUS_TYPE_BATCH_END", + Self::Unspecified => "STATUS_TYPE_UNSPECIFIED", + Self::Init => "STATUS_TYPE_INIT", + Self::BatchEnd => "STATUS_TYPE_BATCH_END", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -62,7 +60,6 @@ pub mod stream_status { } } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetTransactionsFromNodeRequest { /// Required; start version of current stream. @@ -74,7 +71,6 @@ pub struct GetTransactionsFromNodeRequest { #[prost(uint64, optional, tag="2")] pub transactions_count: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsFromNodeResponse { /// Making sure that all the responses include a chain id @@ -85,8 +81,7 @@ pub struct TransactionsFromNodeResponse { } /// Nested message and enum types in `TransactionsFromNodeResponse`. pub mod transactions_from_node_response { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Response { #[prost(message, tag="1")] Status(super::StreamStatus), @@ -94,11 +89,9 @@ pub mod transactions_from_node_response { Data(super::TransactionsOutput), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PingFullnodeRequest { } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PingFullnodeResponse { #[prost(message, optional, tag="1")] diff --git a/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs b/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs index 3879fd94dcefd..f8cf73cd62ea8 100644 --- a/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod fullnode_data_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod fullnode_data_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod fullnode_data_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { FullnodeDataClient::new(InterceptedService::new(inner, interceptor)) } @@ -100,8 +106,7 @@ pub mod fullnode_data_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -130,8 +135,7 @@ pub mod fullnode_data_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -153,11 +157,17 @@ pub mod fullnode_data_client { } /// Generated server implementations. pub mod fullnode_data_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with FullnodeDataServer. #[async_trait] - pub trait FullnodeData: Send + Sync + 'static { + pub trait FullnodeData: std::marker::Send + std::marker::Sync + 'static { /// async fn ping( &self, @@ -173,7 +183,7 @@ pub mod fullnode_data_server { tonic::Status, >, > - + Send + + std::marker::Send + 'static; /// async fn get_transactions_from_node( @@ -186,14 +196,14 @@ pub mod fullnode_data_server { } /// #[derive(Debug)] - pub struct FullnodeDataServer { + pub struct FullnodeDataServer { inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - impl FullnodeDataServer { + impl FullnodeDataServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } @@ -247,8 +257,8 @@ pub mod fullnode_data_server { impl tonic::codegen::Service> for FullnodeDataServer where T: FullnodeData, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -361,23 +371,25 @@ pub mod fullnode_data_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for FullnodeDataServer { + impl Clone for FullnodeDataServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -389,7 +401,9 @@ pub mod fullnode_data_server { } } } - impl tonic::server::NamedService for FullnodeDataServer { - const NAME: &'static str = "aptos.internal.fullnode.v1.FullnodeData"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.internal.fullnode.v1.FullnodeData"; + impl tonic::server::NamedService for FullnodeDataServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.remote_executor.v1.rs b/protos/rust/src/pb/aptos.remote_executor.v1.rs index b84e6e1e68f50..29daad3efd968 100644 --- a/protos/rust/src/pb/aptos.remote_executor.v1.rs +++ b/protos/rust/src/pb/aptos.remote_executor.v1.rs @@ -3,7 +3,6 @@ // @generated // This file is @generated by prost-build. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NetworkMessage { #[prost(bytes="vec", tag="1")] @@ -11,7 +10,6 @@ pub struct NetworkMessage { #[prost(string, tag="2")] pub message_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Empty { } diff --git a/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs b/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs index bab5b94b15a56..85f08bf9e8caa 100644 --- a/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod network_message_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod network_message_service_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod network_message_service_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { NetworkMessageServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -97,8 +103,7 @@ pub mod network_message_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -120,11 +125,17 @@ pub mod network_message_service_client { } /// Generated server implementations. pub mod network_message_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with NetworkMessageServiceServer. #[async_trait] - pub trait NetworkMessageService: Send + Sync + 'static { + pub trait NetworkMessageService: std::marker::Send + std::marker::Sync + 'static { /// async fn simple_msg_exchange( &self, @@ -133,14 +144,14 @@ pub mod network_message_service_server { } /// #[derive(Debug)] - pub struct NetworkMessageServiceServer { + pub struct NetworkMessageServiceServer { inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - impl NetworkMessageServiceServer { + impl NetworkMessageServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } @@ -195,8 +206,8 @@ pub mod network_message_service_server { for NetworkMessageServiceServer where T: NetworkMessageService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -260,23 +271,25 @@ pub mod network_message_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for NetworkMessageServiceServer { + impl Clone for NetworkMessageServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -288,8 +301,9 @@ pub mod network_message_service_server { } } } - impl tonic::server::NamedService - for NetworkMessageServiceServer { - const NAME: &'static str = "aptos.remote_executor.v1.NetworkMessageService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.remote_executor.v1.NetworkMessageService"; + impl tonic::server::NamedService for NetworkMessageServiceServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.transaction.v1.rs b/protos/rust/src/pb/aptos.transaction.v1.rs index 3f029a2fe9981..967dcbdbb8c47 100644 --- a/protos/rust/src/pb/aptos.transaction.v1.rs +++ b/protos/rust/src/pb/aptos.transaction.v1.rs @@ -12,7 +12,6 @@ /// the same `height`. /// /// The Genesis Transaction (version 0) is contained within the first block, which has a height of `0` -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Block { /// Timestamp represents the timestamp of the `BlockMetadataTransaction` (or `GenesisTransaction` for the genesis block) @@ -35,7 +34,6 @@ pub struct Block { /// - Block Metadata Transaction: transactions generated by the chain to group together transactions forming a "block" /// - Block Epilogue / State Checkpoint Transaction: transactions generated by the chain to end the group transactions forming a bloc /// - Genesis Transaction: the first transaction of the chain, with all core contract and validator information baked in -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Transaction { #[prost(message, optional, tag="1")] @@ -76,13 +74,13 @@ pub mod transaction { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - TransactionType::Unspecified => "TRANSACTION_TYPE_UNSPECIFIED", - TransactionType::Genesis => "TRANSACTION_TYPE_GENESIS", - TransactionType::BlockMetadata => "TRANSACTION_TYPE_BLOCK_METADATA", - TransactionType::StateCheckpoint => "TRANSACTION_TYPE_STATE_CHECKPOINT", - TransactionType::User => "TRANSACTION_TYPE_USER", - TransactionType::Validator => "TRANSACTION_TYPE_VALIDATOR", - TransactionType::BlockEpilogue => "TRANSACTION_TYPE_BLOCK_EPILOGUE", + Self::Unspecified => "TRANSACTION_TYPE_UNSPECIFIED", + Self::Genesis => "TRANSACTION_TYPE_GENESIS", + Self::BlockMetadata => "TRANSACTION_TYPE_BLOCK_METADATA", + Self::StateCheckpoint => "TRANSACTION_TYPE_STATE_CHECKPOINT", + Self::User => "TRANSACTION_TYPE_USER", + Self::Validator => "TRANSACTION_TYPE_VALIDATOR", + Self::BlockEpilogue => "TRANSACTION_TYPE_BLOCK_EPILOGUE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -99,8 +97,7 @@ pub mod transaction { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum TxnData { #[prost(message, tag="7")] BlockMetadata(super::BlockMetadataTransaction), @@ -119,7 +116,6 @@ pub mod transaction { } } /// Transaction types. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockMetadataTransaction { #[prost(string, tag="1")] @@ -135,7 +131,6 @@ pub struct BlockMetadataTransaction { #[prost(uint32, repeated, tag="6")] pub failed_proposer_indices: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GenesisTransaction { #[prost(message, optional, tag="1")] @@ -143,11 +138,9 @@ pub struct GenesisTransaction { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct StateCheckpointTransaction { } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorTransaction { #[prost(message, repeated, tag="3")] @@ -157,16 +150,14 @@ pub struct ValidatorTransaction { } /// Nested message and enum types in `ValidatorTransaction`. pub mod validator_transaction { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ObservedJwkUpdate { #[prost(message, optional, tag="1")] pub quorum_certified_update: ::core::option::Option, } /// Nested message and enum types in `ObservedJwkUpdate`. pub mod observed_jwk_update { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportedProviderJwKs { #[prost(string, tag="1")] pub issuer: ::prost::alloc::string::String, @@ -177,16 +168,14 @@ pub mod validator_transaction { } /// Nested message and enum types in `ExportedProviderJWKs`. pub mod exported_provider_jw_ks { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct Jwk { #[prost(oneof="jwk::JwkType", tags="1, 2")] pub jwk_type: ::core::option::Option, } /// Nested message and enum types in `JWK`. pub mod jwk { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct Rsa { #[prost(string, tag="1")] pub kid: ::prost::alloc::string::String, @@ -199,16 +188,14 @@ pub mod validator_transaction { #[prost(string, tag="5")] pub n: ::prost::alloc::string::String, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct UnsupportedJwk { #[prost(bytes="vec", tag="1")] pub id: ::prost::alloc::vec::Vec, #[prost(bytes="vec", tag="2")] pub payload: ::prost::alloc::vec::Vec, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum JwkType { #[prost(message, tag="1")] UnsupportedJwk(UnsupportedJwk), @@ -217,8 +204,7 @@ pub mod validator_transaction { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportedAggregateSignature { #[prost(uint64, repeated, tag="1")] pub signer_indices: ::prost::alloc::vec::Vec, @@ -226,8 +212,7 @@ pub mod validator_transaction { #[prost(bytes="vec", tag="2")] pub sig: ::prost::alloc::vec::Vec, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct QuorumCertifiedUpdate { #[prost(message, optional, tag="1")] pub update: ::core::option::Option, @@ -235,16 +220,14 @@ pub mod validator_transaction { pub multi_sig: ::core::option::Option, } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct DkgUpdate { #[prost(message, optional, tag="1")] pub dkg_transcript: ::core::option::Option, } /// Nested message and enum types in `DkgUpdate`. pub mod dkg_update { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct DkgTranscript { #[prost(uint64, tag="1")] pub epoch: u64, @@ -254,8 +237,7 @@ pub mod validator_transaction { pub payload: ::prost::alloc::vec::Vec, } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum ValidatorTransactionType { #[prost(message, tag="1")] ObservedJwkUpdate(ObservedJwkUpdate), @@ -263,13 +245,11 @@ pub mod validator_transaction { DkgUpdate(DkgUpdate), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct BlockEpilogueTransaction { #[prost(message, optional, tag="1")] pub block_end_info: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct BlockEndInfo { #[prost(bool, tag="1")] @@ -281,7 +261,6 @@ pub struct BlockEndInfo { #[prost(uint64, tag="4")] pub block_approx_output_size: u64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserTransaction { #[prost(message, optional, tag="1")] @@ -289,7 +268,6 @@ pub struct UserTransaction { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Event { #[prost(message, optional, tag="1")] @@ -303,7 +281,6 @@ pub struct Event { #[prost(string, tag="4")] pub data: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionInfo { #[prost(bytes="vec", tag="1")] @@ -325,7 +302,6 @@ pub struct TransactionInfo { #[prost(message, repeated, tag="9")] pub changes: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EventKey { #[prost(uint64, tag="1")] @@ -333,7 +309,6 @@ pub struct EventKey { #[prost(string, tag="2")] pub account_address: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserTransactionRequest { #[prost(string, tag="1")] @@ -351,7 +326,6 @@ pub struct UserTransactionRequest { #[prost(message, optional, tag="7")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSet { #[prost(enumeration="write_set::WriteSetType", tag="1")] @@ -375,9 +349,9 @@ pub mod write_set { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - WriteSetType::Unspecified => "WRITE_SET_TYPE_UNSPECIFIED", - WriteSetType::ScriptWriteSet => "WRITE_SET_TYPE_SCRIPT_WRITE_SET", - WriteSetType::DirectWriteSet => "WRITE_SET_TYPE_DIRECT_WRITE_SET", + Self::Unspecified => "WRITE_SET_TYPE_UNSPECIFIED", + Self::ScriptWriteSet => "WRITE_SET_TYPE_SCRIPT_WRITE_SET", + Self::DirectWriteSet => "WRITE_SET_TYPE_DIRECT_WRITE_SET", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -390,8 +364,7 @@ pub mod write_set { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum WriteSet { #[prost(message, tag="2")] ScriptWriteSet(super::ScriptWriteSet), @@ -399,7 +372,6 @@ pub mod write_set { DirectWriteSet(super::DirectWriteSet), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScriptWriteSet { #[prost(string, tag="1")] @@ -407,7 +379,6 @@ pub struct ScriptWriteSet { #[prost(message, optional, tag="2")] pub script: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DirectWriteSet { #[prost(message, repeated, tag="1")] @@ -415,7 +386,6 @@ pub struct DirectWriteSet { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSetChange { #[prost(enumeration="write_set_change::Type", tag="1")] @@ -443,13 +413,13 @@ pub mod write_set_change { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::DeleteModule => "TYPE_DELETE_MODULE", - Type::DeleteResource => "TYPE_DELETE_RESOURCE", - Type::DeleteTableItem => "TYPE_DELETE_TABLE_ITEM", - Type::WriteModule => "TYPE_WRITE_MODULE", - Type::WriteResource => "TYPE_WRITE_RESOURCE", - Type::WriteTableItem => "TYPE_WRITE_TABLE_ITEM", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::DeleteModule => "TYPE_DELETE_MODULE", + Self::DeleteResource => "TYPE_DELETE_RESOURCE", + Self::DeleteTableItem => "TYPE_DELETE_TABLE_ITEM", + Self::WriteModule => "TYPE_WRITE_MODULE", + Self::WriteResource => "TYPE_WRITE_RESOURCE", + Self::WriteTableItem => "TYPE_WRITE_TABLE_ITEM", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -466,8 +436,7 @@ pub mod write_set_change { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Change { #[prost(message, tag="2")] DeleteModule(super::DeleteModule), @@ -483,7 +452,6 @@ pub mod write_set_change { WriteTableItem(super::WriteTableItem), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteModule { #[prost(string, tag="1")] @@ -493,7 +461,6 @@ pub struct DeleteModule { #[prost(message, optional, tag="3")] pub module: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteResource { #[prost(string, tag="1")] @@ -505,7 +472,6 @@ pub struct DeleteResource { #[prost(string, tag="4")] pub type_str: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteTableItem { #[prost(bytes="vec", tag="1")] @@ -517,7 +483,6 @@ pub struct DeleteTableItem { #[prost(message, optional, tag="4")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteTableData { #[prost(string, tag="1")] @@ -525,7 +490,6 @@ pub struct DeleteTableData { #[prost(string, tag="2")] pub key_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteModule { #[prost(string, tag="1")] @@ -535,7 +499,6 @@ pub struct WriteModule { #[prost(message, optional, tag="3")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteResource { #[prost(string, tag="1")] @@ -549,7 +512,6 @@ pub struct WriteResource { #[prost(string, tag="5")] pub data: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteTableData { #[prost(string, tag="1")] @@ -561,7 +523,6 @@ pub struct WriteTableData { #[prost(string, tag="4")] pub value_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteTableItem { #[prost(bytes="vec", tag="1")] @@ -573,7 +534,6 @@ pub struct WriteTableItem { #[prost(message, optional, tag="4")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionPayload { #[prost(enumeration="transaction_payload::Type", tag="1")] @@ -599,11 +559,11 @@ pub mod transaction_payload { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", - Type::ScriptPayload => "TYPE_SCRIPT_PAYLOAD", - Type::WriteSetPayload => "TYPE_WRITE_SET_PAYLOAD", - Type::MultisigPayload => "TYPE_MULTISIG_PAYLOAD", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", + Self::ScriptPayload => "TYPE_SCRIPT_PAYLOAD", + Self::WriteSetPayload => "TYPE_WRITE_SET_PAYLOAD", + Self::MultisigPayload => "TYPE_MULTISIG_PAYLOAD", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -618,8 +578,7 @@ pub mod transaction_payload { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Payload { #[prost(message, tag="2")] EntryFunctionPayload(super::EntryFunctionPayload), @@ -631,7 +590,6 @@ pub mod transaction_payload { MultisigPayload(super::MultisigPayload), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EntryFunctionPayload { #[prost(message, optional, tag="1")] @@ -643,7 +601,6 @@ pub struct EntryFunctionPayload { #[prost(string, tag="4")] pub entry_function_id_str: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveScriptBytecode { #[prost(bytes="vec", tag="1")] @@ -651,7 +608,6 @@ pub struct MoveScriptBytecode { #[prost(message, optional, tag="2")] pub abi: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScriptPayload { #[prost(message, optional, tag="1")] @@ -661,7 +617,6 @@ pub struct ScriptPayload { #[prost(string, repeated, tag="3")] pub arguments: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultisigPayload { #[prost(string, tag="1")] @@ -669,7 +624,6 @@ pub struct MultisigPayload { #[prost(message, optional, tag="2")] pub transaction_payload: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultisigTransactionPayload { #[prost(enumeration="multisig_transaction_payload::Type", tag="1")] @@ -692,8 +646,8 @@ pub mod multisig_transaction_payload { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -705,14 +659,12 @@ pub mod multisig_transaction_payload { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Payload { #[prost(message, tag="2")] EntryFunctionPayload(super::EntryFunctionPayload), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModuleBytecode { #[prost(bytes="vec", tag="1")] @@ -720,7 +672,6 @@ pub struct MoveModuleBytecode { #[prost(message, optional, tag="2")] pub abi: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModule { #[prost(string, tag="1")] @@ -734,7 +685,6 @@ pub struct MoveModule { #[prost(message, repeated, tag="5")] pub structs: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveFunction { #[prost(string, tag="1")] @@ -767,10 +717,10 @@ pub mod move_function { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Visibility::Unspecified => "VISIBILITY_UNSPECIFIED", - Visibility::Private => "VISIBILITY_PRIVATE", - Visibility::Public => "VISIBILITY_PUBLIC", - Visibility::Friend => "VISIBILITY_FRIEND", + Self::Unspecified => "VISIBILITY_UNSPECIFIED", + Self::Private => "VISIBILITY_PRIVATE", + Self::Public => "VISIBILITY_PUBLIC", + Self::Friend => "VISIBILITY_FRIEND", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -785,7 +735,6 @@ pub mod move_function { } } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStruct { #[prost(string, tag="1")] @@ -801,7 +750,6 @@ pub struct MoveStruct { #[prost(message, repeated, tag="5")] pub fields: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructGenericTypeParam { #[prost(enumeration="MoveAbility", repeated, tag="1")] @@ -809,7 +757,6 @@ pub struct MoveStructGenericTypeParam { #[prost(bool, tag="2")] pub is_phantom: bool, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructField { #[prost(string, tag="1")] @@ -817,13 +764,11 @@ pub struct MoveStructField { #[prost(message, optional, tag="2")] pub r#type: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveFunctionGenericTypeParam { #[prost(enumeration="MoveAbility", repeated, tag="1")] pub constraints: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveType { #[prost(enumeration="MoveTypes", tag="1")] @@ -833,16 +778,14 @@ pub struct MoveType { } /// Nested message and enum types in `MoveType`. pub mod move_type { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReferenceType { #[prost(bool, tag="1")] pub mutable: bool, #[prost(message, optional, boxed, tag="2")] pub to: ::core::option::Option<::prost::alloc::boxed::Box>, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Content { #[prost(message, tag="3")] Vector(::prost::alloc::boxed::Box), @@ -856,13 +799,11 @@ pub mod move_type { Unparsable(::prost::alloc::string::String), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSetPayload { #[prost(message, optional, tag="1")] pub write_set: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EntryFunctionId { #[prost(message, optional, tag="1")] @@ -870,7 +811,6 @@ pub struct EntryFunctionId { #[prost(string, tag="2")] pub name: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModuleId { #[prost(string, tag="1")] @@ -878,7 +818,6 @@ pub struct MoveModuleId { #[prost(string, tag="2")] pub name: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructTag { #[prost(string, tag="1")] @@ -890,7 +829,6 @@ pub struct MoveStructTag { #[prost(message, repeated, tag="4")] pub generic_type_params: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Signature { #[prost(enumeration="signature::Type", tag="1")] @@ -917,12 +855,12 @@ pub mod signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::MultiEd25519 => "TYPE_MULTI_ED25519", - Type::MultiAgent => "TYPE_MULTI_AGENT", - Type::FeePayer => "TYPE_FEE_PAYER", - Type::SingleSender => "TYPE_SINGLE_SENDER", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::MultiEd25519 => "TYPE_MULTI_ED25519", + Self::MultiAgent => "TYPE_MULTI_AGENT", + Self::FeePayer => "TYPE_FEE_PAYER", + Self::SingleSender => "TYPE_SINGLE_SENDER", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -938,8 +876,7 @@ pub mod signature { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Signature { #[prost(message, tag="2")] Ed25519(super::Ed25519Signature), @@ -954,7 +891,6 @@ pub mod signature { SingleSender(super::SingleSender), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Ed25519Signature { #[prost(bytes="vec", tag="1")] @@ -962,7 +898,6 @@ pub struct Ed25519Signature { #[prost(bytes="vec", tag="2")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiEd25519Signature { #[prost(bytes="vec", repeated, tag="1")] @@ -974,7 +909,6 @@ pub struct MultiEd25519Signature { #[prost(uint32, repeated, tag="4")] pub public_key_indices: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiAgentSignature { #[prost(message, optional, tag="1")] @@ -984,7 +918,6 @@ pub struct MultiAgentSignature { #[prost(message, repeated, tag="3")] pub secondary_signers: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeePayerSignature { #[prost(message, optional, tag="1")] @@ -998,7 +931,6 @@ pub struct FeePayerSignature { #[prost(message, optional, tag="5")] pub fee_payer_signer: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnyPublicKey { #[prost(enumeration="any_public_key::Type", tag="1")] @@ -1025,12 +957,12 @@ pub mod any_public_key { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", - Type::Secp256r1Ecdsa => "TYPE_SECP256R1_ECDSA", - Type::Keyless => "TYPE_KEYLESS", - Type::FederatedKeyless => "TYPE_FEDERATED_KEYLESS", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", + Self::Secp256r1Ecdsa => "TYPE_SECP256R1_ECDSA", + Self::Keyless => "TYPE_KEYLESS", + Self::FederatedKeyless => "TYPE_FEDERATED_KEYLESS", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1047,7 +979,6 @@ pub mod any_public_key { } } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnySignature { #[prost(enumeration="any_signature::Type", tag="1")] @@ -1079,11 +1010,11 @@ pub mod any_signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", - Type::Webauthn => "TYPE_WEBAUTHN", - Type::Keyless => "TYPE_KEYLESS", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", + Self::Webauthn => "TYPE_WEBAUTHN", + Self::Keyless => "TYPE_KEYLESS", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1099,8 +1030,7 @@ pub mod any_signature { } } /// Support: >= 1.10. - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum SignatureVariant { #[prost(message, tag="3")] Ed25519(super::Ed25519), @@ -1112,31 +1042,26 @@ pub mod any_signature { Keyless(super::Keyless), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Ed25519 { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Secp256k1Ecdsa { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WebAuthn { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Keyless { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SingleKeySignature { #[prost(message, optional, tag="1")] @@ -1144,7 +1069,6 @@ pub struct SingleKeySignature { #[prost(message, optional, tag="2")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IndexedSignature { #[prost(uint32, tag="1")] @@ -1152,7 +1076,6 @@ pub struct IndexedSignature { #[prost(message, optional, tag="2")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiKeySignature { #[prost(message, repeated, tag="1")] @@ -1162,7 +1085,6 @@ pub struct MultiKeySignature { #[prost(uint32, tag="3")] pub signatures_required: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AbstractionSignature { #[prost(string, tag="1")] @@ -1170,13 +1092,11 @@ pub struct AbstractionSignature { #[prost(bytes="vec", tag="2")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SingleSender { #[prost(message, optional, tag="1")] pub sender: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountSignature { #[prost(enumeration="account_signature::Type", tag="1")] @@ -1203,12 +1123,12 @@ pub mod account_signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::MultiEd25519 => "TYPE_MULTI_ED25519", - Type::SingleKey => "TYPE_SINGLE_KEY", - Type::MultiKey => "TYPE_MULTI_KEY", - Type::Abstraction => "TYPE_ABSTRACTION", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::MultiEd25519 => "TYPE_MULTI_ED25519", + Self::SingleKey => "TYPE_SINGLE_KEY", + Self::MultiKey => "TYPE_MULTI_KEY", + Self::Abstraction => "TYPE_ABSTRACTION", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1224,8 +1144,7 @@ pub mod account_signature { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Signature { #[prost(message, tag="2")] Ed25519(super::Ed25519Signature), @@ -1240,7 +1159,6 @@ pub mod account_signature { Abstraction(super::AbstractionSignature), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionSizeInfo { #[prost(uint32, tag="1")] @@ -1250,7 +1168,6 @@ pub struct TransactionSizeInfo { #[prost(message, repeated, tag="3")] pub write_op_size_info: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct EventSizeInfo { #[prost(uint32, tag="1")] @@ -1258,7 +1175,6 @@ pub struct EventSizeInfo { #[prost(uint32, tag="2")] pub total_bytes: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WriteOpSizeInfo { #[prost(uint32, tag="1")] @@ -1297,21 +1213,21 @@ impl MoveTypes { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - MoveTypes::Unspecified => "MOVE_TYPES_UNSPECIFIED", - MoveTypes::Bool => "MOVE_TYPES_BOOL", - MoveTypes::U8 => "MOVE_TYPES_U8", - MoveTypes::U16 => "MOVE_TYPES_U16", - MoveTypes::U32 => "MOVE_TYPES_U32", - MoveTypes::U64 => "MOVE_TYPES_U64", - MoveTypes::U128 => "MOVE_TYPES_U128", - MoveTypes::U256 => "MOVE_TYPES_U256", - MoveTypes::Address => "MOVE_TYPES_ADDRESS", - MoveTypes::Signer => "MOVE_TYPES_SIGNER", - MoveTypes::Vector => "MOVE_TYPES_VECTOR", - MoveTypes::Struct => "MOVE_TYPES_STRUCT", - MoveTypes::GenericTypeParam => "MOVE_TYPES_GENERIC_TYPE_PARAM", - MoveTypes::Reference => "MOVE_TYPES_REFERENCE", - MoveTypes::Unparsable => "MOVE_TYPES_UNPARSABLE", + Self::Unspecified => "MOVE_TYPES_UNSPECIFIED", + Self::Bool => "MOVE_TYPES_BOOL", + Self::U8 => "MOVE_TYPES_U8", + Self::U16 => "MOVE_TYPES_U16", + Self::U32 => "MOVE_TYPES_U32", + Self::U64 => "MOVE_TYPES_U64", + Self::U128 => "MOVE_TYPES_U128", + Self::U256 => "MOVE_TYPES_U256", + Self::Address => "MOVE_TYPES_ADDRESS", + Self::Signer => "MOVE_TYPES_SIGNER", + Self::Vector => "MOVE_TYPES_VECTOR", + Self::Struct => "MOVE_TYPES_STRUCT", + Self::GenericTypeParam => "MOVE_TYPES_GENERIC_TYPE_PARAM", + Self::Reference => "MOVE_TYPES_REFERENCE", + Self::Unparsable => "MOVE_TYPES_UNPARSABLE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1352,11 +1268,11 @@ impl MoveAbility { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - MoveAbility::Unspecified => "MOVE_ABILITY_UNSPECIFIED", - MoveAbility::Copy => "MOVE_ABILITY_COPY", - MoveAbility::Drop => "MOVE_ABILITY_DROP", - MoveAbility::Store => "MOVE_ABILITY_STORE", - MoveAbility::Key => "MOVE_ABILITY_KEY", + Self::Unspecified => "MOVE_ABILITY_UNSPECIFIED", + Self::Copy => "MOVE_ABILITY_COPY", + Self::Drop => "MOVE_ABILITY_DROP", + Self::Store => "MOVE_ABILITY_STORE", + Self::Key => "MOVE_ABILITY_KEY", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/protos/rust/src/pb/aptos.util.timestamp.rs b/protos/rust/src/pb/aptos.util.timestamp.rs index df8a9f30f8a73..f746dba3fd7b1 100644 --- a/protos/rust/src/pb/aptos.util.timestamp.rs +++ b/protos/rust/src/pb/aptos.util.timestamp.rs @@ -3,7 +3,6 @@ // @generated // This file is @generated by prost-build. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Timestamp { /// Represents seconds of UTC time since Unix epoch diff --git a/protos/typescript/src/aptos/indexer/v1/grpc.ts b/protos/typescript/src/aptos/indexer/v1/grpc.ts index 20ecfd84421e7..a3205dd03192d 100644 --- a/protos/typescript/src/aptos/indexer/v1/grpc.ts +++ b/protos/typescript/src/aptos/indexer/v1/grpc.ts @@ -594,7 +594,7 @@ export const StreamInfo = { function createBaseLiveDataServiceInfo(): LiveDataServiceInfo { return { - chainId: undefined, + chainId: BigInt("0"), timestamp: undefined, knownLatestVersion: undefined, streamInfo: undefined, @@ -604,7 +604,7 @@ function createBaseLiveDataServiceInfo(): LiveDataServiceInfo { export const LiveDataServiceInfo = { encode(message: LiveDataServiceInfo, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.chainId !== undefined) { + if (message.chainId !== undefined && message.chainId !== BigInt("0")) { if (BigInt.asUintN(64, message.chainId) !== message.chainId) { throw new globalThis.Error("value provided for field message.chainId of type uint64 too large"); } @@ -718,7 +718,7 @@ export const LiveDataServiceInfo = { fromJSON(object: any): LiveDataServiceInfo { return { - chainId: isSet(object.chainId) ? BigInt(object.chainId) : undefined, + chainId: isSet(object.chainId) ? BigInt(object.chainId) : BigInt("0"), timestamp: isSet(object.timestamp) ? Timestamp.fromJSON(object.timestamp) : undefined, knownLatestVersion: isSet(object.knownLatestVersion) ? BigInt(object.knownLatestVersion) : undefined, streamInfo: isSet(object.streamInfo) ? StreamInfo.fromJSON(object.streamInfo) : undefined, @@ -728,7 +728,7 @@ export const LiveDataServiceInfo = { toJSON(message: LiveDataServiceInfo): unknown { const obj: any = {}; - if (message.chainId !== undefined) { + if (message.chainId !== undefined && message.chainId !== BigInt("0")) { obj.chainId = message.chainId.toString(); } if (message.timestamp !== undefined) { @@ -751,7 +751,7 @@ export const LiveDataServiceInfo = { }, fromPartial(object: DeepPartial): LiveDataServiceInfo { const message = createBaseLiveDataServiceInfo(); - message.chainId = object.chainId ?? undefined; + message.chainId = object.chainId ?? BigInt("0"); message.timestamp = (object.timestamp !== undefined && object.timestamp !== null) ? Timestamp.fromPartial(object.timestamp) : undefined; @@ -765,12 +765,12 @@ export const LiveDataServiceInfo = { }; function createBaseHistoricalDataServiceInfo(): HistoricalDataServiceInfo { - return { chainId: undefined, timestamp: undefined, knownLatestVersion: undefined, streamInfo: undefined }; + return { chainId: BigInt("0"), timestamp: undefined, knownLatestVersion: undefined, streamInfo: undefined }; } export const HistoricalDataServiceInfo = { encode(message: HistoricalDataServiceInfo, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.chainId !== undefined) { + if (message.chainId !== undefined && message.chainId !== BigInt("0")) { if (BigInt.asUintN(64, message.chainId) !== message.chainId) { throw new globalThis.Error("value provided for field message.chainId of type uint64 too large"); } @@ -871,7 +871,7 @@ export const HistoricalDataServiceInfo = { fromJSON(object: any): HistoricalDataServiceInfo { return { - chainId: isSet(object.chainId) ? BigInt(object.chainId) : undefined, + chainId: isSet(object.chainId) ? BigInt(object.chainId) : BigInt("0"), timestamp: isSet(object.timestamp) ? Timestamp.fromJSON(object.timestamp) : undefined, knownLatestVersion: isSet(object.knownLatestVersion) ? BigInt(object.knownLatestVersion) : undefined, streamInfo: isSet(object.streamInfo) ? StreamInfo.fromJSON(object.streamInfo) : undefined, @@ -880,7 +880,7 @@ export const HistoricalDataServiceInfo = { toJSON(message: HistoricalDataServiceInfo): unknown { const obj: any = {}; - if (message.chainId !== undefined) { + if (message.chainId !== undefined && message.chainId !== BigInt("0")) { obj.chainId = message.chainId.toString(); } if (message.timestamp !== undefined) { @@ -900,7 +900,7 @@ export const HistoricalDataServiceInfo = { }, fromPartial(object: DeepPartial): HistoricalDataServiceInfo { const message = createBaseHistoricalDataServiceInfo(); - message.chainId = object.chainId ?? undefined; + message.chainId = object.chainId ?? BigInt("0"); message.timestamp = (object.timestamp !== undefined && object.timestamp !== null) ? Timestamp.fromPartial(object.timestamp) : undefined; @@ -913,12 +913,12 @@ export const HistoricalDataServiceInfo = { }; function createBaseFullnodeInfo(): FullnodeInfo { - return { chainId: undefined, timestamp: undefined, knownLatestVersion: undefined }; + return { chainId: BigInt("0"), timestamp: undefined, knownLatestVersion: undefined }; } export const FullnodeInfo = { encode(message: FullnodeInfo, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.chainId !== undefined) { + if (message.chainId !== undefined && message.chainId !== BigInt("0")) { if (BigInt.asUintN(64, message.chainId) !== message.chainId) { throw new globalThis.Error("value provided for field message.chainId of type uint64 too large"); } @@ -1007,7 +1007,7 @@ export const FullnodeInfo = { fromJSON(object: any): FullnodeInfo { return { - chainId: isSet(object.chainId) ? BigInt(object.chainId) : undefined, + chainId: isSet(object.chainId) ? BigInt(object.chainId) : BigInt("0"), timestamp: isSet(object.timestamp) ? Timestamp.fromJSON(object.timestamp) : undefined, knownLatestVersion: isSet(object.knownLatestVersion) ? BigInt(object.knownLatestVersion) : undefined, }; @@ -1015,7 +1015,7 @@ export const FullnodeInfo = { toJSON(message: FullnodeInfo): unknown { const obj: any = {}; - if (message.chainId !== undefined) { + if (message.chainId !== undefined && message.chainId !== BigInt("0")) { obj.chainId = message.chainId.toString(); } if (message.timestamp !== undefined) { @@ -1032,7 +1032,7 @@ export const FullnodeInfo = { }, fromPartial(object: DeepPartial): FullnodeInfo { const message = createBaseFullnodeInfo(); - message.chainId = object.chainId ?? undefined; + message.chainId = object.chainId ?? BigInt("0"); message.timestamp = (object.timestamp !== undefined && object.timestamp !== null) ? Timestamp.fromPartial(object.timestamp) : undefined; @@ -1042,12 +1042,12 @@ export const FullnodeInfo = { }; function createBaseGrpcManagerInfo(): GrpcManagerInfo { - return { chainId: undefined, timestamp: undefined, knownLatestVersion: undefined, masterAddress: undefined }; + return { chainId: BigInt("0"), timestamp: undefined, knownLatestVersion: undefined, masterAddress: undefined }; } export const GrpcManagerInfo = { encode(message: GrpcManagerInfo, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.chainId !== undefined) { + if (message.chainId !== undefined && message.chainId !== BigInt("0")) { if (BigInt.asUintN(64, message.chainId) !== message.chainId) { throw new globalThis.Error("value provided for field message.chainId of type uint64 too large"); } @@ -1146,7 +1146,7 @@ export const GrpcManagerInfo = { fromJSON(object: any): GrpcManagerInfo { return { - chainId: isSet(object.chainId) ? BigInt(object.chainId) : undefined, + chainId: isSet(object.chainId) ? BigInt(object.chainId) : BigInt("0"), timestamp: isSet(object.timestamp) ? Timestamp.fromJSON(object.timestamp) : undefined, knownLatestVersion: isSet(object.knownLatestVersion) ? BigInt(object.knownLatestVersion) : undefined, masterAddress: isSet(object.masterAddress) ? globalThis.String(object.masterAddress) : undefined, @@ -1155,7 +1155,7 @@ export const GrpcManagerInfo = { toJSON(message: GrpcManagerInfo): unknown { const obj: any = {}; - if (message.chainId !== undefined) { + if (message.chainId !== undefined && message.chainId !== BigInt("0")) { obj.chainId = message.chainId.toString(); } if (message.timestamp !== undefined) { @@ -1175,7 +1175,7 @@ export const GrpcManagerInfo = { }, fromPartial(object: DeepPartial): GrpcManagerInfo { const message = createBaseGrpcManagerInfo(); - message.chainId = object.chainId ?? undefined; + message.chainId = object.chainId ?? BigInt("0"); message.timestamp = (object.timestamp !== undefined && object.timestamp !== null) ? Timestamp.fromPartial(object.timestamp) : undefined;