Skip to content

Commit

Permalink
Merge branch 'master' into shred-index-next
Browse files Browse the repository at this point in the history
  • Loading branch information
cpubot authored Jan 7, 2025
2 parents bc9da48 + e48e123 commit 00f2a09
Show file tree
Hide file tree
Showing 20 changed files with 193 additions and 143 deletions.
99 changes: 55 additions & 44 deletions Cargo.lock

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion accounts-db/src/accounts_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use {
crate::{accounts_db::AccountsDb, accounts_hash::AccountHash},
dashmap::DashMap,
seqlock::SeqLock,
solana_nohash_hasher::BuildNoHashHasher,
solana_sdk::{
account::{AccountSharedData, ReadableAccount},
clock::Slot,
Expand Down Expand Up @@ -152,7 +153,7 @@ impl CachedAccountInner {

#[derive(Debug, Default)]
pub struct AccountsCache {
cache: DashMap<Slot, SlotCache>,
cache: DashMap<Slot, SlotCache, BuildNoHashHasher<Slot>>,
// Queue of potentially unflushed roots. Random eviction + cache too large
// could have triggered a flush of this slot already
maybe_unflushed_roots: RwLock<BTreeSet<Slot>>,
Expand Down
12 changes: 5 additions & 7 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ use {
smallvec::SmallVec,
solana_lattice_hash::lt_hash::LtHash,
solana_measure::{meas_dur, measure::Measure, measure_us},
solana_nohash_hasher::{IntMap, IntSet},
solana_nohash_hasher::{BuildNoHashHasher, IntMap, IntSet},
solana_rayon_threadlimit::get_thread_count,
solana_sdk::{
account::{Account, AccountSharedData, ReadableAccount},
Expand Down Expand Up @@ -783,7 +783,8 @@ struct StorageSizeAndCount {
/// number of accounts in the storage including both alive and dead accounts
pub count: usize,
}
type StorageSizeAndCountMap = DashMap<AccountsFileId, StorageSizeAndCount>;
type StorageSizeAndCountMap =
DashMap<AccountsFileId, StorageSizeAndCount, BuildNoHashHasher<AccountsFileId>>;

impl GenerateIndexTimings {
pub fn report(&self, startup_stats: &StartupStats) {
Expand Down Expand Up @@ -1552,7 +1553,7 @@ pub struct AccountsDb {
/// Set of unique keys per slot which is used
/// to drive clean_accounts
/// Generated by calculate_accounts_delta_hash
uncleaned_pubkeys: DashMap<Slot, Vec<Pubkey>>,
uncleaned_pubkeys: DashMap<Slot, Vec<Pubkey>, BuildNoHashHasher<Slot>>,

#[cfg(test)]
load_delay: u64,
Expand Down Expand Up @@ -2058,7 +2059,7 @@ impl AccountsDb {
storage: AccountStorage::default(),
accounts_cache: AccountsCache::default(),
sender_bg_hasher: None,
uncleaned_pubkeys: DashMap::new(),
uncleaned_pubkeys: DashMap::default(),
next_id: AtomicAccountsFileId::new(0),
shrink_candidate_slots: Mutex::new(ShrinkCandidates::default()),
write_version: AtomicU64::new(0),
Expand Down Expand Up @@ -6318,9 +6319,6 @@ impl AccountsDb {
// `max_flush_root` in the accounts cache.
self.accounts_cache.set_max_flush_root(root);
}

// Only add to the uncleaned roots set *after* we've flushed the previous roots,
// so that clean will actually be able to clean the slots.
let num_new_roots = cached_roots.len();
(num_new_roots, num_roots_flushed, flush_stats)
}
Expand Down
14 changes: 9 additions & 5 deletions accounts-db/src/read_only_accounts_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#[cfg(feature = "dev-context-only-utils")]
use qualifier_attr::qualifiers;
use {
ahash::random_state::RandomState as AHashRandomState,
dashmap::{mapref::entry::Entry, DashMap},
index_list::{Index, IndexList},
log::*,
Expand Down Expand Up @@ -71,7 +72,7 @@ struct AtomicReadOnlyCacheStats {
#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
#[derive(Debug)]
pub(crate) struct ReadOnlyAccountsCache {
cache: Arc<DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry>>,
cache: Arc<DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry, AHashRandomState>>,
/// When an item is first entered into the cache, it is added to the end of
/// the queue. Also each time an entry is looked up from the cache it is
/// moved to the end of the queue. As a result, items in the queue are
Expand Down Expand Up @@ -104,7 +105,7 @@ impl ReadOnlyAccountsCache {
ms_to_skip_lru_update: u32,
) -> Self {
assert!(max_data_size_lo <= max_data_size_hi);
let cache = Arc::new(DashMap::default());
let cache = Arc::new(DashMap::with_hasher(AHashRandomState::default()));
let queue = Arc::new(Mutex::<IndexList<ReadOnlyCacheKey>>::default());
let data_size = Arc::new(AtomicUsize::default());
let stats = Arc::new(AtomicReadOnlyCacheStats::default());
Expand Down Expand Up @@ -202,6 +203,9 @@ impl ReadOnlyAccountsCache {
self.data_size.fetch_sub(account_size, Ordering::Relaxed);
entry.account = account;
entry.slot = slot;
entry
.last_update_time
.store(ReadOnlyAccountCacheEntry::timestamp(), Ordering::Release);
// Move the entry to the end of the queue.
let mut queue = self.queue.lock().unwrap();
queue.remove(entry.index());
Expand Down Expand Up @@ -233,7 +237,7 @@ impl ReadOnlyAccountsCache {
/// Removes `key` from the cache, if present, and returns the removed account
fn do_remove(
key: &ReadOnlyCacheKey,
cache: &DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry>,
cache: &DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry, AHashRandomState>,
queue: &Mutex<IndexList<ReadOnlyCacheKey>>,
data_size: &AtomicUsize,
) -> Option<AccountSharedData> {
Expand Down Expand Up @@ -289,7 +293,7 @@ impl ReadOnlyAccountsCache {
max_data_size_lo: usize,
max_data_size_hi: usize,
data_size: Arc<AtomicUsize>,
cache: Arc<DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry>>,
cache: Arc<DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry, AHashRandomState>>,
queue: Arc<Mutex<IndexList<ReadOnlyCacheKey>>>,
stats: Arc<AtomicReadOnlyCacheStats>,
) -> thread::JoinHandle<()> {
Expand Down Expand Up @@ -333,7 +337,7 @@ impl ReadOnlyAccountsCache {
fn evict(
target_data_size: usize,
data_size: &AtomicUsize,
cache: &DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry>,
cache: &DashMap<ReadOnlyCacheKey, ReadOnlyAccountCacheEntry, AHashRandomState>,
queue: &Mutex<IndexList<ReadOnlyCacheKey>>,
) -> u64 {
let mut num_evicts = 0;
Expand Down
2 changes: 1 addition & 1 deletion docs/src/consensus/fork-generation.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ so a validator observing `E3` and `E3'` can slash L3 and safely choose `x` for
that slot. Once a validator commits to a fork, other forks can be discarded
below that tick count. For any slot, validators need only consider a single "has
entries" chain or a "ticks only" chain to be proposed by a leader. But multiple
virtual entries may overlap as they link back to the a previous slot.
virtual entries may overlap as they link back to the previous slot.

#### Time Division

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ title: instruction introspection
## Problem

Some smart contract programs may want to verify that another Instruction is present in a
given Message since that Instruction could be be performing a verification of certain data,
given Message since that Instruction could be performing a verification of certain data,
in a precompiled function. (See secp256k1_instruction for an example).

## Solution
Expand Down
2 changes: 1 addition & 1 deletion docs/src/implemented-proposals/repair-service.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ Each validator advertises separately on gossip the various parts of an

- The `stash`: An epoch-long compressed set of all completed slots.
- The `cache`: The Run-length Encoding (RLE) of the latest `N` completed
slots starting from some some slot `M`, where `N` is the number of slots
slots starting from some slot `M`, where `N` is the number of slots
that will fit in an MTU-sized packet.

`Epoch Slots` in gossip are updated every time a validator receives a
Expand Down
6 changes: 3 additions & 3 deletions docs/src/proposals/accounts-db-replication.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ The `ReplicaSlotConfirmationServer`: this service is responsible for serving the
`ReplicaSlotConfirmationRequest` and sends the `ReplicaSlotConfirmationResponse` back to the requestor.
The response consists of a vector of new slots the validator knows of which is later than the
specified last_replicated_slot. This service also runs in the main validator. This service
gets the slots for replication from the BankForks, BlockCommitmentCache and OptimiscallyConfirmBank.
gets the slots for replication from the BankForks, BlockCommitmentCache and OptimisticallyConfirmedBank.

The `ReplicaAccountsRequestor`: this service is responsible for sending the request
`ReplicaAccountsRequest` to its peer validator or replica for the `ReplicaAccountInfo` for a
Expand Down Expand Up @@ -131,7 +131,7 @@ Following are the client RPC APIs supported by the replica node in JsonRpcAccoun
- getMultipleAccounts
- getProgramAccounts
- getMinimumBalanceForRentExemption
- getInflationGovenor
- getInflationGovernor
- getInflationRate
- getEpochSchedule
- getRecentBlockhash
Expand All @@ -153,7 +153,7 @@ Following APIs are not included:
- getClusterNodes
- getRecentPerformanceSamples
- getGenesisHash
- getSignatueStatuses
- getSignatureStatuses
- getMaxRetransmitSlot
- getMaxShredInsertSlot
- sendTransaction
Expand Down
2 changes: 1 addition & 1 deletion docs/src/proposals/ledger-replication-to-implement.md
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ We have the following constraints:

in CBC mode with `NUM_CHACHA_ROUNDS` of encryption.

6. The archiver initializes a chacha rng with the a signed recent PoH value as
6. The archiver initializes a chacha rng with the signed recent PoH value as

the seed.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ Let `remaining_set` be all other nodes with contact info not contained in
`epoch_set`.

If `epoch_set.len < 2*FANOUT` then we may randomly select up to
`2*FANOUT - epoch_set.len` nodes to to retransmit to from `remaining_set`.
`2*FANOUT - epoch_set.len` nodes to retransmit to from `remaining_set`.

## Receiving retransmitted shred

Expand Down
43 changes: 24 additions & 19 deletions gossip/src/contact_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ use {

pub const SOCKET_ADDR_UNSPECIFIED: SocketAddr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), /*port:*/ 0u16);
const EMPTY_SOCKET_ADDR_CACHE: [SocketAddr; SOCKET_CACHE_SIZE] =
[SOCKET_ADDR_UNSPECIFIED; SOCKET_CACHE_SIZE];

const SOCKET_TAG_GOSSIP: u8 = 0;
const SOCKET_TAG_RPC: u8 = 2;
Expand Down Expand Up @@ -86,7 +88,7 @@ pub struct ContactInfo {
extensions: Vec<Extension>,
// Only sanitized socket-addrs can be cached!
#[serde(skip_serializing)]
cache: [Result<SocketAddr, Error>; SOCKET_CACHE_SIZE],
cache: [SocketAddr; SOCKET_CACHE_SIZE],
}

#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
Expand Down Expand Up @@ -125,7 +127,10 @@ macro_rules! get_socket {
($name:ident, $key:ident) => {
#[inline]
pub fn $name(&self) -> Option<SocketAddr> {
self.cache[usize::from($key)].ok()
let socket = &self.cache[usize::from($key)];
(socket != &SOCKET_ADDR_UNSPECIFIED)
.then_some(socket)
.copied()
}
};
($name:ident, $udp:ident, $quic:ident) => {
Expand All @@ -135,7 +140,10 @@ macro_rules! get_socket {
Protocol::QUIC => $quic,
Protocol::UDP => $udp,
};
self.cache[usize::from(key)].ok()
let socket = &self.cache[usize::from(key)];
(socket != &SOCKET_ADDR_UNSPECIFIED)
.then_some(socket)
.copied()
}
};
}
Expand Down Expand Up @@ -187,7 +195,7 @@ impl ContactInfo {
addrs: Vec::<IpAddr>::default(),
sockets: Vec::<SocketEntry>::default(),
extensions: Vec::<Extension>::default(),
cache: new_empty_cache(),
cache: EMPTY_SOCKET_ADDR_CACHE,
}
}

Expand Down Expand Up @@ -334,7 +342,7 @@ impl ContactInfo {
}
}
if let Some(entry) = self.cache.get_mut(usize::from(key)) {
*entry = Ok(socket); // socket is already sanitized above.
*entry = socket; // socket is already sanitized above.
}
debug_assert_matches!(sanitize_entries(&self.addrs, &self.sockets), Ok(()));
Ok(())
Expand All @@ -349,7 +357,7 @@ impl ContactInfo {
}
self.maybe_remove_addr(entry.index);
if let Some(entry) = self.cache.get_mut(usize::from(key)) {
*entry = Err(Error::SocketNotFound(key));
*entry = SOCKET_ADDR_UNSPECIFIED;
}
}
}
Expand Down Expand Up @@ -463,11 +471,6 @@ impl ContactInfo {
}
}

fn new_empty_cache() -> [Result<SocketAddr, Error>; SOCKET_CACHE_SIZE] {
debug_assert!(SOCKET_CACHE_SIZE < usize::from(u8::MAX));
std::array::from_fn(|key| Err(Error::SocketNotFound(key as u8)))
}

fn get_node_outset() -> u64 {
let now = SystemTime::now();
let elapsed = now.duration_since(UNIX_EPOCH).unwrap();
Expand Down Expand Up @@ -518,7 +521,7 @@ impl TryFrom<ContactInfoLite> for ContactInfo {
addrs,
sockets,
extensions,
cache: new_empty_cache(),
cache: EMPTY_SOCKET_ADDR_CACHE,
};
// Populate node.cache.
// Only sanitized socket-addrs can be cached!
Expand All @@ -532,7 +535,9 @@ impl TryFrom<ContactInfoLite> for ContactInfo {
continue;
};
let socket = SocketAddr::new(addr, port);
*entry = sanitize_socket(&socket).map(|()| socket);
if sanitize_socket(&socket).is_ok() {
*entry = socket;
}
}
Ok(node)
}
Expand Down Expand Up @@ -642,7 +647,7 @@ impl solana_frozen_abi::abi_example::AbiExample for ContactInfo {
addrs: Vec::<IpAddr>::example(),
sockets: Vec::<SocketEntry>::example(),
extensions: vec![],
cache: new_empty_cache(),
cache: EMPTY_SOCKET_ADDR_CACHE,
}
}
}
Expand Down Expand Up @@ -807,7 +812,7 @@ mod tests {
addrs: Vec::default(),
sockets: Vec::default(),
extensions: Vec::default(),
cache: new_empty_cache(),
cache: EMPTY_SOCKET_ADDR_CACHE,
};
let mut sockets = HashMap::<u8, SocketAddr>::new();
for _ in 0..1 << 14 {
Expand All @@ -827,10 +832,10 @@ mod tests {
if usize::from(key) < SOCKET_CACHE_SIZE {
assert_eq!(
node.cache[usize::from(key)],
match socket {
None => Err(Error::SocketNotFound(key)),
Some(&socket) => sanitize_socket(&socket).map(|()| socket),
}
socket
.filter(|socket| sanitize_socket(socket).is_ok())
.copied()
.unwrap_or(SOCKET_ADDR_UNSPECIFIED),
);
}
}
Expand Down
8 changes: 4 additions & 4 deletions ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -941,7 +941,7 @@ impl Blockstore {
fn try_shred_recovery(
&self,
erasure_metas: &BTreeMap<ErasureSetId, WorkingEntry<ErasureMeta>>,
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
index_working_set: &HashMap<u64, IndexMetaWorkingSetEntry>,
prev_inserted_shreds: &HashMap<ShredId, Shred>,
leader_schedule_cache: &LeaderScheduleCache,
reed_solomon_cache: &ReedSolomonCache,
Expand All @@ -956,8 +956,8 @@ impl Blockstore {
.filter_map(|(erasure_set, working_erasure_meta)| {
let erasure_meta = working_erasure_meta.as_ref();
let slot = erasure_set.slot();
let index_meta_entry = index_working_set.get_mut(&slot).expect("Index");
let index = &mut index_meta_entry.index;
let index_meta_entry = index_working_set.get(&slot).expect("Index");
let index = &index_meta_entry.index;
match erasure_meta.status(index) {
ErasureMetaStatus::CanRecover => self
.recover_shreds(
Expand Down Expand Up @@ -994,7 +994,7 @@ impl Blockstore {
let recovered_shreds: Vec<_> = self
.try_shred_recovery(
&shred_insertion_tracker.erasure_metas,
&mut shred_insertion_tracker.index_working_set,
&shred_insertion_tracker.index_working_set,
&shred_insertion_tracker.just_inserted_shreds,
leader_schedule_cache,
reed_solomon_cache,
Expand Down
8 changes: 7 additions & 1 deletion programs/address-lookup-table/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,15 @@ solana-program = { workspace = true }
thiserror = { workspace = true }

[target.'cfg(not(target_os = "solana"))'.dependencies]
solana-bincode = { workspace = true }
solana-clock = { workspace = true }
solana-instruction = { workspace = true }
solana-log-collector = { workspace = true }
solana-packet = { workspace = true }
solana-program-runtime = { workspace = true }
solana-sdk = { workspace = true }
solana-pubkey = { workspace = true }
solana-system-interface = { workspace = true }
solana-transaction-context = { workspace = true, features = ["bincode"] }

[lib]
crate-type = ["lib"]
Expand Down
Loading

0 comments on commit 00f2a09

Please sign in to comment.