diff --git a/.gitignore b/.gitignore index fd552c93..b988c1dd 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ /registry-cli /db test-ledger/ +.vscode diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..54640152 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +FROM alpine:3.14 + +RUN apk update +RUN apk upgrade +RUN apk add --no-cache bash sqlite rust cargo openssl-dev eudev-dev linux-headers + +WORKDIR /usr/local/ + +ADD program program/ +ADD cli cli/ +ADD bot bot/ +ADD sql sql/ +ADD Cargo.lock . +ADD Cargo.toml . +ADD clean-score-all-mainnet.bash . +ADD score-all-mainnet.sh . +ADD import-into-sqlite.sh . + +RUN cargo build + +CMD ./clean-score-all-mainnet.bash diff --git a/RELEASE.md b/RELEASE.md index 6dbfb6bf..596c7a85 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,11 +1,14 @@ +# stake-o-matic -The release of the binaries is fully automated. Do not create a Github release -manually. +## Build with cargo and run +```bash +cargo build +wget "https://github.com/marinade-finance/staking-status/raw/main/scores.sqlite3" -O "db/score-sqlite3.db" +./clean-score-all-mainnet.bash +``` -#### Release Process -1. Create a new tag for the next available release number, see - https://github.com/solana-labs/stake-o-matic/tags, and push it to the repo: - eg, `git tag v42 && git push origin v42` -2. The GitHub workflow automatically triggers a new build, creates a release - with the name of the tag, and uploads the release artifacts. You can monitor - the release process at https://github.com/solana-labs/stake-o-matic/actions +## Build with docker and run +```bash +./docker-build.bash +./docker-run.bash +``` diff --git a/bot/src/data_center_info.rs b/bot/src/data_center_info.rs index fa236292..49061d97 100644 --- a/bot/src/data_center_info.rs +++ b/bot/src/data_center_info.rs @@ -1,5 +1,5 @@ use { - crate::validators_app, + crate::{validators_app, ByIdentityInfo}, log::*, serde::{Deserialize, Serialize}, solana_sdk::pubkey::Pubkey, @@ -75,7 +75,7 @@ impl std::fmt::Display for DataCenterInfo { #[derive(Debug, Default)] pub struct DataCenters { pub info: Vec, - pub by_identity: HashMap, + pub by_identity: HashMap, } pub fn get(cluster: &str) -> Result> { @@ -123,7 +123,15 @@ pub fn get(cluster: &str) -> Result> { }) .unwrap_or_default(); - by_identity.insert(identity, data_center_id.clone()); + by_identity.insert( + identity, + ByIdentityInfo { + data_center_id: data_center_id.clone(), + keybase_id: String::from(v.keybase_id.as_deref().unwrap_or("")), + name: String::from(v.name.as_deref().unwrap_or("")), + www_url: String::from(v.www_url.as_deref().unwrap_or("")), + }, + ); let mut data_center_info = data_center_map .entry(data_center_id.clone()) diff --git a/bot/src/db.rs b/bot/src/db.rs index b96d6558..0584c683 100644 --- a/bot/src/db.rs +++ b/bot/src/db.rs @@ -2,6 +2,7 @@ use { crate::{ data_center_info::{DataCenterId, DataCenterInfo}, generic_stake_pool::ValidatorStakeState, + Config, }, log::*, serde::{Deserialize, Serialize}, @@ -14,6 +15,33 @@ use { }, }; +#[derive(Default, Clone, Deserialize, Serialize)] +pub struct ScoreDiscounts { + pub can_halt_the_network_group: bool, +} + +#[derive(Debug, Default, Clone, Deserialize, Serialize)] +pub struct ByIdentityInfo { + pub data_center_id: DataCenterId, + pub keybase_id: String, + pub name: String, + pub www_url: String, +} + +#[derive(Default, Clone, Deserialize, Serialize)] +/// computed score (more granular than ValidatorStakeState) +pub struct ScoreData { + /// epoch_credits is the base score + pub epoch_credits: u64, + /// 50 => Average, 0=>worst, 100=twice the average + pub average_position: f64, + pub score_discounts: ScoreDiscounts, + pub commission: u8, + pub active_stake: u64, + pub data_center_concentration: f64, + pub validators_app_info: ByIdentityInfo, +} + #[derive(Default, Clone, Deserialize, Serialize)] pub struct ValidatorClassification { pub identity: Pubkey, // Validator identity @@ -22,6 +50,9 @@ pub struct ValidatorClassification { pub stake_state: ValidatorStakeState, pub stake_state_reason: String, + // added optional validator scoring data + pub score_data: Option, + // Summary of the action was taken this epoch to advance the validator's stake pub stake_action: Option, @@ -45,6 +76,50 @@ pub struct ValidatorClassification { pub prioritize_funding_in_next_epoch: Option, } +impl ScoreData { + pub fn score(&self, config: &Config) -> u64 { + if self.score_discounts.can_halt_the_network_group + || self.active_stake < config.score_min_stake + || self.average_position < config.min_avg_position + // if config.min_avg_position=100 => everybody passes + // if config.min_avg_position=50 => only validators above avg pass + || self.commission > config.score_max_commission + { + 0 + } else { + // if data_center_concentration = 25%, lose all score, + // data_center_concentration = 10%, lose 40% (rounded) + let discount_because_data_center_concentration = (self.data_center_concentration + * config.score_concentration_point_discount as f64) + as u64; + + // score discounts according to commission + // apply commission % as a discount to credits_observed. + // The rationale es: + // If you're the top performer validator and get 300K credits, but you have 50% commission, + // from our user's point of view, it's the same as a 150K credits validator with 0% commission, + // both represent the same APY for the user. + // So to treat both the same we apply commission to self.epoch_credits + let discount_because_commission = self.commission as u64 * self.epoch_credits / 100; + + // give extra score to above average validators in order to increase APY for our users + let points_added_above_average: u64 = if self.average_position > 50.0 { + let above = self.average_position - 50.0; + let multiplier = if above * above > 25.0 { 25.0 } else {above * above}; + (multiplier * self.epoch_credits as f64) as u64 + } else { + 0 + }; + + //result + self.epoch_credits + .saturating_sub(discount_because_commission) + .saturating_sub(discount_because_data_center_concentration) + .saturating_add(points_added_above_average) + } + } +} + impl ValidatorClassification { pub fn stake_state_streak(&self) -> usize { let mut streak = 1; diff --git a/bot/src/main.rs b/bot/src/main.rs index b94e8652..1ed2e119 100644 --- a/bot/src/main.rs +++ b/bot/src/main.rs @@ -187,7 +187,7 @@ impl std::fmt::Display for Cluster { } #[derive(Debug)] -struct Config { +pub struct Config { json_rpc_url: String, cluster: Cluster, db_path: PathBuf, @@ -196,6 +196,17 @@ struct Config { dry_run: bool, + /// compute score foll all validators in the cluster + score_all: bool, + /// max commission accepted to score (0-100) + score_max_commission: u8, + /// score min stake required + score_min_stake: u64, + /// score discount per concentration percentage point + score_concentration_point_discount: u32, + /// min average position considering credits_observed, 50.0 = average + min_avg_position: f64, + /// Quality validators produce within this percentage of the cluster average skip rate over /// the previous epoch quality_block_producer_percentage: usize, @@ -226,7 +237,7 @@ struct Config { /// None: skip infrastructure concentration check max_infrastructure_concentration: Option, - /// How validators with infrastruction concentration above `max_infrastructure_concentration` + /// How validators with infrastructure concentration above `max_infrastructure_concentration` /// will be affected. Accepted values are: /// 1) "warn" - Stake unaffected. A warning message is notified /// 2) "destake" - Removes all validator stake @@ -273,6 +284,11 @@ impl Config { require_classification: false, markdown_path: None, dry_run: true, + score_all: false, + score_max_commission: 8, + score_min_stake: sol_to_lamports(100.0), + score_concentration_point_discount: 1_500, + min_avg_position: 40.0, quality_block_producer_percentage: 15, max_poor_block_producer_percentage: 20, max_commission: 100, @@ -293,7 +309,9 @@ impl Config { } fn cluster_db_path_for(&self, cluster: Cluster) -> PathBuf { - self.db_path.join(format!("data-{}", cluster)) + // store db on different dir for score-all to not mess with SPL-stake-pool distribution usage + let dir = if self.score_all { "score-all" } else { "data" }; + self.db_path.join(format!("{}-{}", dir, cluster)) } fn cluster_db_path(&self) -> PathBuf { @@ -317,7 +335,7 @@ fn app_version() -> String { }) } -fn get_config() -> BoxResult<(Config, RpcClient, Box)> { +fn get_config() -> BoxResult<(Config, RpcClient, Option>)> { let default_confirmed_block_cache_path = default_confirmed_block_cache_path() .to_str() .unwrap() @@ -602,6 +620,44 @@ fn get_config() -> BoxResult<(Config, RpcClient, Box)> { .validator(is_amount) ) ) + .subcommand( + SubCommand::with_name("score-all").about("Score all validators in the cluster") + .arg( + Arg::with_name("score_max_commission") + .long("score-max-commission") + .takes_value(true) + .required(false) + .help("scoring max accepted commission") + ) + .arg( + Arg::with_name("score_min_stake") + .long("score-min-stake") + .takes_value(true) + .required(false) + .help("scoring min stake required") + ) + .arg( + Arg::with_name("commission_point_discount") + .long ("commission-point-discount") + .takes_value(true) + .required(false) + .help("score to discount for each commission point") + ) + .arg( + Arg::with_name("concentration_point_discount") + .long ("concentration-point-discount") + .takes_value(true) + .required(false) + .help("score to discount for each concentration percentage point") + ) + .arg( + Arg::with_name("min_avg_position") + .long ("min-avg-position") + .takes_value(true) + .required(false) + .help("min avg position required considering epoch_credits") + ) + ) .get_matches(); let dry_run = !matches.is_present("confirm"); @@ -665,6 +721,24 @@ fn get_config() -> BoxResult<(Config, RpcClient, Box)> { ) .unwrap(); + // score-all command and arguments + let ( + score_all, + score_max_commission, + score_min_stake, + score_concentration_point_discount, + min_avg_position, + ) = match matches.subcommand() { + ("score-all", Some(matches)) => ( + true, + value_t!(matches, "score_max_commission", u8).unwrap_or(10), + value_t!(matches, "score_min_stake", u64).unwrap_or(sol_to_lamports(100.0)), + value_t!(matches, "concentration_point_discount", u32).unwrap_or(2000), + value_t!(matches, "min_avg_position", f64).unwrap_or(50.0), + ), + _ => (false, 0, 0, 0, 0.0), + }; + let config = Config { json_rpc_url, cluster, @@ -672,6 +746,11 @@ fn get_config() -> BoxResult<(Config, RpcClient, Box)> { require_classification, markdown_path, dry_run, + score_all, + score_max_commission, + score_min_stake, + score_concentration_point_discount, + min_avg_position, quality_block_producer_percentage, max_poor_block_producer_percentage, max_commission, @@ -721,7 +800,7 @@ fn get_config() -> BoxResult<(Config, RpcClient, Box)> { } } - let stake_pool: Box = match matches.subcommand() { + let stake_pool: Option> = match matches.subcommand() { ("stake-pool-v0", Some(matches)) => { let authorized_staker = keypair_of(matches, "authorized_staker").unwrap(); let reserve_stake_address = pubkey_of(matches, "reserve_stake_address").unwrap(); @@ -729,29 +808,35 @@ fn get_config() -> BoxResult<(Config, RpcClient, Box)> { sol_to_lamports(value_t_or_exit!(matches, "min_reserve_stake_balance", f64)); let baseline_stake_amount = sol_to_lamports(value_t_or_exit!(matches, "baseline_stake_amount", f64)); - Box::new(stake_pool_v0::new( + Some(Box::new(stake_pool_v0::new( &rpc_client, authorized_staker, baseline_stake_amount, reserve_stake_address, min_reserve_stake_balance, - )?) + )?)) } ("stake-pool", Some(matches)) => { let authorized_staker = keypair_of(matches, "authorized_staker").unwrap(); let pool_address = pubkey_of(matches, "pool_address").unwrap(); let baseline_stake_amount = sol_to_lamports(value_t_or_exit!(matches, "baseline_stake_amount", f64)); - Box::new(stake_pool::new( + Some(Box::new(stake_pool::new( &rpc_client, authorized_staker, pool_address, baseline_stake_amount, - )?) + )?)) } - _ => unreachable!(), + _ => None, }; + // guard - let's make sure score-all can not be set for distribution + if score_all && (stake_pool.is_some() || !dry_run) { + error!("DO NOT combine score-all with `--confirm` or `stake-pool`"); + process::exit(1); + } + Ok((config, rpc_client, stake_pool)) } @@ -968,6 +1053,7 @@ fn get_self_stake_by_vote_account( info!("Fetching stake accounts..."); let all_stake_accounts = rpc_client.get_program_accounts(&stake::program::id())?; + info!("{} stake accounts", all_stake_accounts.len()); let stake_history_account = rpc_client .get_account_with_commitment(&sysvar::stake_history::id(), CommitmentConfig::finalized())? @@ -1100,10 +1186,40 @@ fn classify( .flat_map(|(v, sp)| v.into_iter().map(move |v| (v, sp))) .collect::>(); - let (vote_account_info, total_active_stake) = get_vote_account_info(rpc_client, last_epoch)?; + let (mut vote_account_info, total_active_stake) = + get_vote_account_info(rpc_client, last_epoch)?; + + // compute cumulative_stake_limit => active_stake of the last validator inside the can-halt-the-network group + // we later set score=0 to all validators whose stake >= concentrated_validators_stake_limit + // sort by active_stake + vote_account_info.sort_by(|a, b| b.active_stake.cmp(&a.active_stake)); + let mut accumulated: u64 = 0; + let mut count_halt_group: u32 = 0; + let limit: u64 = total_active_stake / 100 * 33; + let mut last_under_nakamoto_active_stake = limit; + for info in &vote_account_info { + last_under_nakamoto_active_stake = info.active_stake; + accumulated += info.active_stake; + count_halt_group += 1; + if accumulated > limit { + break; + } + } + info!( + "validators:{} total_active_stake:{}, can_halt_the_network:top {}, last under-nakamoto-coefficient active-stake: {}", + &vote_account_info.len(), + total_active_stake, + count_halt_group, + lamports_to_sol(last_under_nakamoto_active_stake), + ); - let self_stake_by_vote_account = - get_self_stake_by_vote_account(rpc_client, epoch, &vote_account_info)?; + // Note: get_self_stake_by_vote_account is expensive because it does a RPC call for each validator + // we skip this data gathering if config.min_self_stake_lamports==0 + let self_stake_by_vote_account = if config.min_self_stake_lamports > 0 { + get_self_stake_by_vote_account(rpc_client, epoch, &vote_account_info)? + } else { + HashMap::new() + }; let (cluster_nodes_with_old_version, min_release_version): (HashMap, _) = match config.min_release_version { @@ -1113,7 +1229,7 @@ fn classify( .into_iter() .filter_map(|rpc_contact_info| { if let Ok(identity) = Pubkey::from_str(&rpc_contact_info.pubkey) { - if validator_list.contains(&identity) { + if config.score_all || validator_list.contains(&identity) { if let Some(ref version) = rpc_contact_info.version { if let Ok(semver) = semver::Version::parse(version) { if semver < *min_release_version { @@ -1238,6 +1354,7 @@ fn classify( None } else { let mut validator_classifications = HashMap::new(); + let mut total_skipped: u32 = 0; for VoteAccountInfo { identity, @@ -1247,18 +1364,30 @@ fn classify( epoch_credits, } in vote_account_info { - if !validator_list.contains(&identity) { + if !config.score_all && !validator_list.contains(&identity) { + total_skipped += 1; continue; } + let mut score_discounts = db::ScoreDiscounts::default(); + let participant = identity_to_participant.get(&identity).cloned(); - let current_data_center = data_centers + let validators_app_info = data_centers .by_identity .get(&identity) .cloned() .unwrap_or_default(); + let current_data_center = validators_app_info.data_center_id.clone(); + + // score: check data center concentration + let data_center_info = data_centers + .info + .iter() + .find(|x| x.id == current_data_center) + .unwrap(); + let previous_classification = previous_epoch_validator_classifications .map(|p| p.get(&identity)) .flatten(); @@ -1303,13 +1432,16 @@ fn classify( } }); - let insufficent_self_stake_msg = - format!("Insufficient self stake: {}", Sol(self_stake)); - if !config.enforce_min_self_stake && self_stake < config.min_self_stake_lamports { - validator_notes.push(insufficent_self_stake_msg.clone()); + let insufficient_self_stake_msg = + format!("insufficient self stake: {}", Sol(self_stake)); + if config.min_self_stake_lamports > 0 + && !config.enforce_min_self_stake + && self_stake < config.min_self_stake_lamports + { + validator_notes.push(insufficient_self_stake_msg.clone()); } - let insufficent_testnet_participation = testnet_participation + let insufficient_testnet_participation = testnet_participation .as_ref() .map(|testnet_participation| { if let Some(participant) = participant { @@ -1326,12 +1458,16 @@ fn classify( }) .flatten(); + // no score if in the can-halt-the-network group + score_discounts.can_halt_the_network_group = + active_stake >= last_under_nakamoto_active_stake; + let (stake_state, reason) = if let Some(reason) = infrastructure_concentration_destake_reason { (ValidatorStakeState::None, reason) } else if config.enforce_min_self_stake && self_stake < config.min_self_stake_lamports { - (ValidatorStakeState::None, insufficent_self_stake_msg) + (ValidatorStakeState::None, insufficient_self_stake_msg) } else if active_stake > config.max_active_stake_lamports { ( ValidatorStakeState::None, @@ -1342,10 +1478,13 @@ fn classify( ValidatorStakeState::None, format!("Commission is too high: {}% commission", commission), ) - } else if let Some(insufficent_testnet_participation) = - insufficent_testnet_participation + } else if let Some(insufficient_testnet_participation) = + insufficient_testnet_participation { - (ValidatorStakeState::None, insufficent_testnet_participation) + ( + ValidatorStakeState::None, + insufficient_testnet_participation, + ) } else if poor_voters.contains(&identity) { ( ValidatorStakeState::None, @@ -1379,7 +1518,7 @@ fn classify( ) } else { assert!(!poor_voters.contains(&identity)); - assert!(not_in_leader_schedule.contains(&identity)); + assert!(config.score_all || not_in_leader_schedule.contains(&identity)); ( // If the validator is not in the leader schedule but was Bonus previously, // maintain Bonus. @@ -1447,6 +1586,15 @@ fn classify( identity, vote_address, stake_state, + score_data: Some(ScoreData { + epoch_credits, + average_position: epoch_credits as f64 / avg_epoch_credits as f64 * 50.0, + score_discounts, + commission, + active_stake, + data_center_concentration: data_center_info.stake_percent, + validators_app_info, + }), stake_states: Some(stake_states), stake_action: None, stake_state_reason: reason, @@ -1462,6 +1610,11 @@ fn classify( "{} validators processed", validator_classifications.len() )); + info!( + "{} validators, {} skipped", + &validator_classifications.len(), + total_skipped + ); Some(validator_classifications) }; @@ -1477,7 +1630,7 @@ fn classify( fn main() -> BoxResult<()> { solana_logger::setup_with_default("solana=info"); - let (config, rpc_client, mut stake_pool) = get_config()?; + let (config, rpc_client, optional_stake_pool) = get_config()?; info!("Loading participants..."); let participants = get_participants_with_state( @@ -1616,21 +1769,22 @@ fn main() -> BoxResult<()> { }) .collect(); - let (stake_pool_notes, validator_stake_actions, unfunded_validators) = - stake_pool.apply(&rpc_client, config.dry_run, &desired_validator_stake)?; - notifications.extend(stake_pool_notes.clone()); - epoch_classification.notes.extend(stake_pool_notes); - - for identity in unfunded_validators { - validator_classifications - .entry(identity) - .and_modify(|e| e.prioritize_funding_in_next_epoch = Some(true)); - } + if let Some(mut stake_pool) = optional_stake_pool { + let (stake_pool_notes, validator_stake_actions, unfunded_validators) = + stake_pool.apply(&rpc_client, config.dry_run, &desired_validator_stake)?; + notifications.extend(stake_pool_notes.clone()); + epoch_classification.notes.extend(stake_pool_notes); + for identity in unfunded_validators { + validator_classifications + .entry(identity) + .and_modify(|e| e.prioritize_funding_in_next_epoch = Some(true)); + } - for (identity, stake_action) in validator_stake_actions { - validator_classifications - .entry(identity) - .and_modify(|e| e.stake_action = Some(stake_action)); + for (identity, stake_action) in validator_stake_actions { + validator_classifications + .entry(identity) + .and_modify(|e| e.stake_action = Some(stake_action)); + } } validator_notes.sort(); @@ -1642,7 +1796,6 @@ fn main() -> BoxResult<()> { if first_time { EpochClassification::new(epoch_classification).save(epoch, &config.cluster_db_path())?; - generate_markdown(epoch, &config)?; } if post_notifications { @@ -1652,13 +1805,16 @@ fn main() -> BoxResult<()> { } } + //conditional to: matches.is_present("markdown") + generate_markdown(epoch, &config)?; + Ok(()) } fn generate_markdown(epoch: Epoch, config: &Config) -> BoxResult<()> { let markdown_path = match config.markdown_path.as_ref() { Some(d) => d, - None => return Ok(()), + None => return Ok(()), // exit if !matches.is_present("markdown") }; fs::create_dir_all(&markdown_path)?; @@ -1731,9 +1887,18 @@ fn generate_markdown(epoch: Epoch, config: &Config) -> BoxResult<()> { if let Some(ref validator_classifications) = epoch_classification.validator_classifications { + let mut validator_detail_csv = vec![]; + validator_detail_csv.push("epoch,keybase_id,name,identity,vote_address,score,average_position,commission,active_stake,epoch_credits,data_center_concentration,can_halt_the_network_group,stake_state,stake_state_reason,www_url".into()); let mut validator_classifications = validator_classifications.iter().collect::>(); - validator_classifications.sort_by(|a, b| a.0.cmp(b.0)); + // sort by credits, desc + validator_classifications.sort_by(|a, b| { + b.1.score_data + .as_ref() + .unwrap() + .epoch_credits + .cmp(&a.1.score_data.as_ref().unwrap().epoch_credits) + }); for (identity, classification) in validator_classifications { let validator_markdown = validators_markdown.entry(identity).or_default(); @@ -1755,6 +1920,32 @@ fn generate_markdown(epoch: Epoch, config: &Config) -> BoxResult<()> { "* Stake reason: {}", classification.stake_state_reason )); + + //epoch,keybase_id,name,identity,vote_address,score,average_position,commission,active_stake,epoch_credits,data_center_concentration,can_halt_the_network_group,stake_state,stake_state_reason,www_url + if let Some(score_data) = &classification.score_data { + let score = score_data.score(config); + + let csv_line = format!( + r#"{},"{}","{}","{}","{}",{},{},{},{},{},{:.4},{},"{:?}","{}","{}""#, + epoch, + escape_quotes(&score_data.validators_app_info.keybase_id), + escape_quotes(&score_data.validators_app_info.name), + identity.to_string(), + classification.vote_address, + score, + score_data.average_position, + score_data.commission, + lamports_to_sol(score_data.active_stake), + score_data.epoch_credits, + score_data.data_center_concentration, + score_data.score_discounts.can_halt_the_network_group, + classification.stake_state, + escape_quotes(&classification.stake_state_reason), + escape_quotes(&score_data.validators_app_info.www_url), + ); + validator_detail_csv.push(csv_line); + } + if let Some(ref stake_action) = classification.stake_action { validator_markdown.push(format!("* Staking activity: {}", stake_action)); } @@ -1788,13 +1979,22 @@ fn generate_markdown(epoch: Epoch, config: &Config) -> BoxResult<()> { validator_markdown.push(format!("* {}", note)); } } + // save {cluster}-validator-detail.csv (repeating the cluster in the name is intentional) + let filename = config + .cluster_db_path() + .join(format!("{}-validator-detail.csv", config.cluster)); + info!("Writing {}", filename.display()); + let mut file = File::create(filename)?; + file.write_all(&validator_detail_csv.join("\n").into_bytes())?; } } for (identity, validator_markdown) in validators_markdown { let markdown = validator_markdown.join("\n"); let filename = markdown_path.join(format!("Validator-{}.md", identity)); - info!("Writing {}", filename.display()); + if !config.score_all { + info!("Writing {}", filename.display()) + } let mut file = File::create(filename)?; file.write_all(&markdown.into_bytes())?; } @@ -1808,6 +2008,10 @@ fn generate_markdown(epoch: Epoch, config: &Config) -> BoxResult<()> { Ok(()) } +fn escape_quotes(original: &String) -> String { + original.replace("\"", "\"\"") +} + #[cfg(test)] mod test { use super::*; diff --git a/clean-score-all-mainnet.bash b/clean-score-all-mainnet.bash new file mode 100755 index 00000000..0ede3c04 --- /dev/null +++ b/clean-score-all-mainnet.bash @@ -0,0 +1,5 @@ +#!/bin/bash +set -ex +rm -rf db/score-all-mainnet-beta +. ./score-all-mainnet.sh +. ./import-into-sqlite.sh diff --git a/docker-build.bash b/docker-build.bash new file mode 100755 index 00000000..bdc5c834 --- /dev/null +++ b/docker-build.bash @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +docker build -t stake-o-matic . diff --git a/docker-run.bash b/docker-run.bash new file mode 100755 index 00000000..e69b9bb6 --- /dev/null +++ b/docker-run.bash @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -x + +SCRIPT_DIR="$( cd "$(dirname "$0")" ; pwd -P )" +DB_PATH="$SCRIPT_DIR/db" +SQLITE_SCORES_PATH="$DB_PATH/score-sqlite3.db" +HISTORIC_DATA="https://github.com/marinade-finance/staking-status/raw/main/scores.sqlite3" + +mkdir -p "$DB_PATH" +wget "$HISTORIC_DATA" -O "$SQLITE_SCORES_PATH" + +docker run \ + --name stake-o-matic \ + --user "$UID" \ + --rm \ + --volume "$DB_PATH:/usr/local/db" \ + --env "VALIDATORS_APP_TOKEN=$VALIDATORS_APP_TOKEN" \ + stake-o-matic ./clean-score-all-mainnet.bash diff --git a/import-into-sqlite.sh b/import-into-sqlite.sh new file mode 100644 index 00000000..3a5834a1 --- /dev/null +++ b/import-into-sqlite.sh @@ -0,0 +1,2 @@ +sqlite3 <./sql/import.sql +date diff --git a/score-all-mainnet.sh b/score-all-mainnet.sh new file mode 100755 index 00000000..ec7881c5 --- /dev/null +++ b/score-all-mainnet.sh @@ -0,0 +1,7 @@ +date +./target/debug/solana-stake-o-matic --cluster mainnet-beta --markdown $* \ + score-all \ + --score-max-commission 10 \ + --score-min-stake 100 \ + --concentration-point-discount 1500 \ + --min-avg-position 50 \ No newline at end of file diff --git a/score-all-testnet.sh b/score-all-testnet.sh new file mode 100755 index 00000000..ae34fb8e --- /dev/null +++ b/score-all-testnet.sh @@ -0,0 +1,9 @@ +./target/debug/solana-stake-o-matic --markdown $* \ + --min-epoch-credit-percentage-of-average 20 \ + score-all \ + --score-max-commission 10 \ + --score-min-stake 100 \ + --commission-point-discount 5000 \ + --concentration-point-discount 1000 + +# --cluster mainnet-beta \ No newline at end of file diff --git a/sql/average-epochs.sql b/sql/average-epochs.sql new file mode 100644 index 00000000..da55c6c1 --- /dev/null +++ b/sql/average-epochs.sql @@ -0,0 +1,23 @@ + +select +261 as epoch , +max(keybase_id) , +max(name), +max(identity) , +vote_address, +avg(score), +avg(avg_position), +avg(commission), +avg(active_stake), +CAST(avg(epoch_credits) as INTEGER), +avg(data_center_concentration), +min(can_halt_the_network_group), +min(stake_state), +min(stake_state_reason), +max(www_url), +avg(pct), +avg(stake_conc), +CAST(avg(adj_credits) as INTEGER) +from scores +where epoch BETWEEN 260 and 262 +group by vote_address diff --git a/sql/average.sql b/sql/average.sql new file mode 100644 index 00000000..c9efc1fc --- /dev/null +++ b/sql/average.sql @@ -0,0 +1,27 @@ +.open ./db/score-sqlite3.db + +drop table avg; + +create table AVG as +select epoch,keybase_id,name,score, case when score=0 then 0 else b_score end as b_score, b_score-score as delta_score, avg_position, ap, commission, c2, epoch_credits, ec2, ec2-epoch_credits as delta_credits, 0.0 as pct, vote_address from scores A +left outer JOIN (select round( avg(epoch_credits) * (100-avg(commission))/100 * (100-avg(data_center_concentration)*4)/100 * (avg(avg_position)-49) * (avg(avg_position)-49) ) as B_score, avg(avg_position) as ap, avg(commission) as c2, avg(epoch_credits) as ec2, vote_address as va2 from scores B +where B.epoch between (select distinct epoch from imported)-2 and (select distinct epoch from imported) +group by vote_address) +on va2 = a.vote_address +where A.epoch = (select distinct epoch from imported) +--and score=0 and b_score>0 +--and score>0 WE MUST INCLUDE ALL RECORDS - so update-scores checks all validators health +order by b_score desc +; + +-- compute PCT (informative) +update avg as U +set pct = B_score / (select sum(A.b_score) from avg A where A.epoch = U.epoch) * 100 +; + +-- show validators with pct assgined (informative) +select * from AVG +order by pct desc +where pct>0; + +.exit diff --git a/sql/check-commision-changes.sql b/sql/check-commision-changes.sql new file mode 100644 index 00000000..0adfe5c9 --- /dev/null +++ b/sql/check-commision-changes.sql @@ -0,0 +1,11 @@ +-- +-- CHECK FOR commision changes +-- +drop if exists table t1; +create table t1 as +select vote_address,commission from scores where epoch=243 +EXCEPT select vote_address,commission from scores where epoch=242 +; +select * from scores where vote_address in ( select vote_address from t1 ) +order by vote_address,epoch +; diff --git a/sql/control.sql b/sql/control.sql new file mode 100644 index 00000000..073d8acb --- /dev/null +++ b/sql/control.sql @@ -0,0 +1,13 @@ +.open db/score-sqlite3.db +.mode column +.headers ON +select epoch,rank,keybase_id,name, round(pct,4) as pct, avg_score, ROUND(mult,4) as mult, + round(avg_pos,4) as avg_pos, + epoch_credits,avg_ec,delta_credits, + avg_commiss,round(dcc2,5) as dcc2 from AVG +where pct>0 +order by rank +LIMIT 20 +; +select count(*) as validators_with_pct from avg where pct<>0; +.exit diff --git a/sql/copy-from-scores2.sql b/sql/copy-from-scores2.sql new file mode 100644 index 00000000..665830a4 --- /dev/null +++ b/sql/copy-from-scores2.sql @@ -0,0 +1,21 @@ +insert into scores +select +epoch , +null as keybase_id , +name , +null as identity , +vote_address , +score , +average_position as avg_position , +commission , +avg_active_stake as active_stake , +this_epoch_credits epoch_credits , +data_center_concentration , +null as can_halt_the_network_group , +null as stake_state , +null as stake_state_reason , +null as www_url , +pct , +null as stake_conc , +null as adj_credits +from scores2 \ No newline at end of file diff --git a/sql/fields-list.sql b/sql/fields-list.sql new file mode 100644 index 00000000..24a6d906 --- /dev/null +++ b/sql/fields-list.sql @@ -0,0 +1 @@ +select name,"," from pragma_table_info("scores") \ No newline at end of file diff --git a/sql/fixes.sql b/sql/fixes.sql new file mode 100644 index 00000000..301a7342 --- /dev/null +++ b/sql/fixes.sql @@ -0,0 +1,4 @@ +-- SQLite +--drop table mainnet; +--delete FROM mainnet where identity='identity'; +--select sum(active_stake)/1e9 from mainnet where active_stake is not null; diff --git a/sql/import.sql b/sql/import.sql new file mode 100644 index 00000000..c19e3dac --- /dev/null +++ b/sql/import.sql @@ -0,0 +1,129 @@ +.open ./db/score-sqlite3.db + +-- create table to receive stake-o-matic data +DROP TABLE IF EXISTS imported; +CREATE TABLE imported( + epoch INT, + keybase_id TEXT, + name TEXT, + identity TEXT, + vote_address TEXT, + score INTEGER, + avg_position REAL, + commission SHORT, + active_stake INTEGER, + epoch_credits INTEGER, + data_center_concentration DOUBLE, + can_halt_the_network_group BOOL, + stake_state TEXT, + stake_state_reason TEXT, + www_url TEXT +); + +-- import stake-o-matic data +.mode csv +.import ./db/score-all-mainnet-beta/mainnet-beta-validator-detail.csv imported +--remove header row +delete FROM imported where identity='identity'; + +--add pct and stake-concentration columns +ALTER table imported add pct FLOAT; +ALTER table imported add stake_conc FLOAT; +ALTER table imported add adj_credits INTEGER; +UPDATE imported set + pct = round(score * 100.0 / (select sum(score) from imported),4), + stake_conc = round(active_stake * 100.0 / (select sum(active_stake) from imported),4), + adj_credits = CAST((epoch_credits * (100-commission-3*data_center_concentration)/100) as INTEGER) + ; + +--recompute avg_position based on adj_credits +update imported +set avg_position = adj_credits * 50 / (select avg(adj_credits) from scores B where adj_credits>30000); + + +--control, show total staked +select DISTINCT epoch from imported; +select 'validators',count(*),'total staked',sum(active_stake) from imported; +select 'validators with 0 score count:',count(*), + "sum stake",sum(active_stake) + from imported + where pct=0 + ; +select 'validators with non-zero score count:',count(*), + "sum stake",sum(active_stake) + from imported + where pct>0 + ; +select 'avg epoch_credits',avg(epoch_credits), + 'max epoch credits',max(epoch_credits), + 'min epoch credits',min(epoch_credits), min(epoch_credits)/avg(epoch_credits)*100, "% of avg", + char(10) || 'max score',max(score), + 'min score',min(score), + char(10) || 'max pct',max(pct), + 'min pct',min(pct) + from imported + where pct>0; + +-- add imported epoch to table scores +create TABLE if not EXISTS scores as select * from imported; +DELETE FROM scores where epoch = (select DISTINCT epoch from imported); +INSERT INTO scores select * from imported; + + +-- recompute avg table with last 5 epochs +-- if score=0 from imported => below nakamoto coefficient, or commission 100% or less than 100 SOL staked +-- also we set score=0 if below 50% avg or less than 5 epochs on record +-- create pct column and set to zero, will update after when selecting top 250 +DROP TABLE IF EXISTS avg; +create table AVG as +select 0 as rank, epoch,keybase_id, vote_address,name, + case when score=0 or mult<=0 or score_records<5 or COALESCE(avg_active_stake,0)<100 then 0 else ROUND(base_score*mult) end as avg_score, + base_score, ap-49 mult, ap as avg_pos, commission, round(c2,2) as avg_commiss, dcc2, + epoch_credits, cast(ec2 as integer) as avg_ec, epoch_credits-ec2 as delta_credits, + 0.0 as pct, score_records, avg_active_stake, + can_halt_the_network_group, identity, stake_conc +from imported A +left outer JOIN ( + select count(*) as score_records, + round( avg(b.adj_credits) ) as base_score, + avg(b.avg_position) as ap, avg(b.avg_position)-49 as mult, avg(b.commission) as c2, ROUND(avg(b.epoch_credits)) as ec2, + avg(b.data_center_concentration) as dcc2, b.vote_address as va2, avg(b.active_stake) as avg_active_stake + from scores B + where B.epoch between (select distinct epoch from imported)-4 and (select distinct epoch from imported) + group by vote_address + ) + on va2 = a.vote_address +where A.epoch = (select distinct epoch from imported) +--and score>0 NOTE: WE MUST INCLUDE ALL RECORDS - so update-scores checks all validators' health +order by base_score desc +; + +-- compute rank +drop table if exists temp; +create table temp as select vote_address, RANK() over (order by avg_score DESC) as rank from avg; +-- set rank in avg table +update avg +set rank = (select rank from temp where temp.vote_address=avg.vote_address); + +-- SELECT TOP 250 +drop table if exists temp; +create table temp as select * from avg order by avg_score desc LIMIT 250; +-- set pct ONLY ON selected TOP validators +update avg as U +set pct = avg_score / (select sum(A.avg_score) from temp A where A.epoch = U.epoch) * 100 +where exists (select 1 from temp A where A.vote_address = U.vote_address) +; + +-- show top validators with pct assgined (informative) +.mode column +.headers ON +select epoch,rank,keybase_id,name, round(pct,4) as pct, avg_score, ROUND(mult,4) as mult, + round(avg_pos,4) as avg_pos, + epoch_credits,avg_ec,delta_credits, + avg_commiss,round(dcc2,5) as dcc2 from AVG +where pct>0 +order by rank +LIMIT 15 +; +select count(*) as validators_with_pct from avg where pct<>0; +.exit diff --git a/sql/queries.sql b/sql/queries.sql new file mode 100644 index 00000000..c47067e1 --- /dev/null +++ b/sql/queries.sql @@ -0,0 +1,15 @@ +-- * -- +select pct, A.* +from mainnet as A +where score>0 +order by pct desc +-- * -- +-- compare epochs +select epoch,keybase_id,name,score,b_score, avg_position, ap, commission, c2, epoch_credits, ec2, can_halt_the_network_group as h1, h2 from scores A +JOIN (select score as B_score, avg_position as ap, commission as c2, epoch_credits as ec2, can_halt_the_network_group as h2, vote_address as va2 from scores B +where B.epoch = 214) +on va2 = a.vote_address +where A.epoch = 213 +--and score=0 and b_score>0 +order by score desc +-- * -- \ No newline at end of file diff --git a/sql/test-queries.sql b/sql/test-queries.sql new file mode 100644 index 00000000..c4a15d7d --- /dev/null +++ b/sql/test-queries.sql @@ -0,0 +1,51 @@ +-- SQLite3 +/*SELECT identity, ` stake_state`, ` score`, ` commission`, ` active_stake`, ` epoch_credits` +, ` stake_state_reason` +FROM mainnet +order by ` epoch_credits` desc; +*/ +--.schema data + +/*CREATE TABLE mainnet( + identity TEXT, + score INTEGER, + commission SHORT, + active_stake INTEGER, + epoch_credits INTEGER, + stake_state TEXT, + stake_state_reason TEXT +) +*/ +--insert into data +--select * from mainnet +/*select identity, + score, + commission, + active_stake/1e9, + epoch_credits + --max(epoch_credits), + --avg(epoch_credits) + from mainnet +order by active_stake desc; +*/ + +--select sum(active_stake) from mainnet where active_stake is not null; +/* +select 'below half avg epoch_credits',count(*), + "stake",sum(active_stake)/1e9 + from mainnet + where epoch_credits < (select avg(epoch_credits)*0.50 from mainnet) +*/ + +-- if epoch_credits < 60% of max epoch_credits, discard +/* +select max(epoch_credits), min(epoch_credits) + from mainnet + where epoch_credits > (select max(epoch_credits)*0.50 from mainnet) + order by epoch_credits desc +*/ + +select (score * 100 / (select sum(score) from mainnet)) pct, A.* +from mainnet A + +--select sum(score) from mainnet \ No newline at end of file