From a2953d65f67e57d1de79151cb52d328a436f4d7c Mon Sep 17 00:00:00 2001 From: Adam Reichold Date: Mon, 11 Dec 2023 11:32:39 +0100 Subject: [PATCH] Opportunistically seed forked block caches from current one. --- src/core/searcher.rs | 5 +++-- src/store/reader.rs | 15 +++++++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/core/searcher.rs b/src/core/searcher.rs index 4b758040a5..f4bc516a91 100644 --- a/src/core/searcher.rs +++ b/src/core/searcher.rs @@ -157,10 +157,11 @@ impl Searcher { let futures = groups .into_iter() - .map(|((segment_ord, _cache_key), doc_ids)| { + .map(|((segment_ord, cache_key), doc_ids)| { // Each group fetches documents from exactly one block and // therefore gets an independent block cache of size one. - let store_reader = self.inner.store_readers[segment_ord as usize].fork_cache(1); + let store_reader = + self.inner.store_readers[segment_ord as usize].fork_cache(1, &[cache_key]); async move { let mut docs = Vec::new(); diff --git a/src/store/reader.rs b/src/store/reader.rs index 0c0de9c128..cf3e136637 100644 --- a/src/store/reader.rs +++ b/src/store/reader.rs @@ -148,15 +148,26 @@ impl StoreReader { } /// Clones the given store reader with an independent block cache of the given size. + /// + /// `cache_keys` is used to seed the forked cache from the current cache + /// if some blocks are already available. #[cfg(feature = "quickwit")] - pub(crate) fn fork_cache(&self, cache_num_blocks: usize) -> Self { - Self { + pub(crate) fn fork_cache(&self, cache_num_blocks: usize, cache_keys: &[CacheKey]) -> Self { + let forked = Self { decompressor: self.decompressor, data: self.data.clone(), cache: BlockCache::new(cache_num_blocks), skip_index: Arc::clone(&self.skip_index), space_usage: self.space_usage.clone(), + }; + + for &CacheKey(pos) in cache_keys { + if let Some(block) = self.cache.get_from_cache(pos) { + forked.cache.put_into_cache(pos, block); + } } + + forked } pub(crate) fn block_checkpoints(&self) -> impl Iterator + '_ {