diff --git a/api/src/config.rs b/api/src/config.rs index e606770ddc9..49b6a098d27 100644 --- a/api/src/config.rs +++ b/api/src/config.rs @@ -339,6 +339,7 @@ impl BackendConfigV2 { } None => return false, }, + "noop" => return true, _ => return false, } @@ -755,6 +756,9 @@ pub struct FileCacheConfig { /// Key for data encryption, a heximal representation of [u8; 32]. #[serde(default)] pub encryption_key: String, + /// disbale chunk map, it is assumed that all data is ready + #[serde(default)] + pub disable_chunk_map: bool, } impl FileCacheConfig { @@ -842,6 +846,9 @@ pub struct RafsConfigV2 { /// Filesystem prefetching configuration. #[serde(default)] pub prefetch: PrefetchConfigV2, + // Only use cache, don't access the backend + #[serde(default)] + pub use_cache_only: bool, } impl RafsConfigV2 { @@ -1260,6 +1267,9 @@ impl TryFrom<&BackendConfig> for BackendConfigV2 { "registry" => { config.registry = Some(serde_json::from_value(value.backend_config.clone())?); } + "noop" => { + // do nothing, noop don't have config + } v => { return Err(Error::new( ErrorKind::InvalidInput, @@ -1366,6 +1376,9 @@ struct RafsConfig { // ZERO value means, amplifying user io is not enabled. #[serde(default = "default_batch_size")] pub amplify_io: usize, + // Only use cache, don't access the backend + #[serde(default)] + pub use_cache_only: bool, } impl TryFrom for ConfigV2 { @@ -1383,6 +1396,7 @@ impl TryFrom for ConfigV2 { access_pattern: v.access_pattern, latest_read_files: v.latest_read_files, prefetch: v.fs_prefetch.into(), + use_cache_only: v.use_cache_only, }; if !cache.prefetch.enable && rafs.prefetch.enable { cache.prefetch = rafs.prefetch.clone(); diff --git a/smoke/tests/blobcache_test.go b/smoke/tests/blobcache_test.go index 36ba897c540..1152a2dbff6 100644 --- a/smoke/tests/blobcache_test.go +++ b/smoke/tests/blobcache_test.go @@ -163,6 +163,57 @@ func (a *BlobCacheTestSuite) TestCommandFlags(t *testing.T) { } } +func (a *BlobCacheTestSuite) TestUseBlobCacheToRun(t *testing.T) { + ctx, _, ociBlobDigest := a.prepareTestEnv(t) + defer ctx.Destroy(t) + + // Generate blobcache + tool.Run(t, fmt.Sprintf("%s create -t targz-ref --bootstrap %s --blob-cache-dir %s %s", + ctx.Binary.Builder, ctx.Env.BootstrapPath, ctx.Env.CacheDir, + filepath.Join(ctx.Env.BlobDir, ociBlobDigest.Hex()))) + + // Remove the origin blob + os.Remove(filepath.Join(ctx.Env.BlobDir, ociBlobDigest.Hex())) + + nydusd, err := tool.NewNydusd(tool.NydusdConfig{ + NydusdPath: ctx.Binary.Nydusd, + BootstrapPath: ctx.Env.BootstrapPath, + ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"), + MountPath: ctx.Env.MountDir, + APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"), + BackendType: "localfs", + BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir), + EnablePrefetch: ctx.Runtime.EnablePrefetch, + BlobCacheDir: ctx.Env.CacheDir, + CacheType: ctx.Runtime.CacheType, + CacheCompressed: ctx.Runtime.CacheCompressed, + RafsMode: ctx.Runtime.RafsMode, + DigestValidate: false, + UseCacheOnly: true, + DisableChunkMap: true, + }) + require.NoError(t, err) + + err = nydusd.Mount() + require.NoError(t, err) + defer func() { + if err := nydusd.Umount(); err != nil { + log.L.WithError(err).Errorf("umount") + } + }() + + // make sure blobcache ready + err = filepath.WalkDir(ctx.Env.MountDir, func(path string, entry fs.DirEntry, err error) error { + require.Nil(t, err) + if entry.Type().IsRegular() { + _, err = os.ReadFile(path) + require.NoError(t, err) + } + return nil + }) + require.NoError(t, err) +} + func (a *BlobCacheTestSuite) TestGenerateBlobcache(t *testing.T) { ctx, blobcacheDir, ociBlobDigest := a.prepareTestEnv(t) defer ctx.Destroy(t) @@ -200,9 +251,7 @@ func (a *BlobCacheTestSuite) TestGenerateBlobcache(t *testing.T) { err = filepath.WalkDir(ctx.Env.MountDir, func(path string, entry fs.DirEntry, err error) error { require.Nil(t, err) if entry.Type().IsRegular() { - targetPath, err := filepath.Rel(ctx.Env.MountDir, path) - require.NoError(t, err) - _, _ = os.ReadFile(targetPath) + _, _ = os.ReadFile(path) } return nil }) diff --git a/smoke/tests/tool/nydusd.go b/smoke/tests/tool/nydusd.go index adfae105743..63633120fc2 100644 --- a/smoke/tests/tool/nydusd.go +++ b/smoke/tests/tool/nydusd.go @@ -70,6 +70,8 @@ type NydusdConfig struct { AccessPattern bool PrefetchFiles []string AmplifyIO uint64 + DisableChunkMap bool + UseCacheOnly bool } type Nydusd struct { @@ -91,10 +93,12 @@ var configTpl = ` "type": "{{.CacheType}}", "config": { "compressed": {{.CacheCompressed}}, - "work_dir": "{{.BlobCacheDir}}" + "work_dir": "{{.BlobCacheDir}}", + "disable_chunk_map": {{.DisableChunkMap}} } } }, + "use_cache_only": {{.UseCacheOnly}}, "mode": "{{.RafsMode}}", "iostats_files": {{.IOStatsFiles}}, "fs_prefetch": { diff --git a/storage/src/backend/mod.rs b/storage/src/backend/mod.rs index aec8db9de06..5e17072bc60 100644 --- a/storage/src/backend/mod.rs +++ b/storage/src/backend/mod.rs @@ -40,6 +40,7 @@ pub mod http_proxy; pub mod localdisk; #[cfg(feature = "backend-localfs")] pub mod localfs; +pub mod noop; #[cfg(any(feature = "backend-oss", feature = "backend-s3"))] pub mod object_storage; #[cfg(feature = "backend-oss")] diff --git a/storage/src/backend/noop.rs b/storage/src/backend/noop.rs new file mode 100644 index 00000000000..1d387eeb8bb --- /dev/null +++ b/storage/src/backend/noop.rs @@ -0,0 +1,91 @@ +// Copyright (C) 2021-2023 Alibaba Cloud. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 + +//! Storage backend driver to reject all blob operations. + +use std::io::Result; +use std::sync::Arc; + +use fuse_backend_rs::file_buf::FileVolatileSlice; +use nydus_utils::metrics::BackendMetrics; + +use crate::backend::{BackendError, BackendResult, BlobBackend, BlobReader}; + +#[derive(Debug)] +pub enum NoopError { + Noop, +} + +/// a Noop backend, do nothing +#[derive(Default)] +pub struct Noop { + metrics: Arc, +} + +impl Noop { + pub fn new(id: Option<&str>) -> Result { + let id = id.ok_or_else(|| einval!("noop requires blob_id"))?; + Ok(Noop { + metrics: BackendMetrics::new(id, "noop"), + }) + } +} + +struct NoopEntry { + blob_id: String, + metrics: Arc, +} + +impl BlobReader for NoopEntry { + fn blob_size(&self) -> BackendResult { + Err(BackendError::Unsupported(format!( + "unsupport blob_size operation for {}", + self.blob_id, + ))) + } + + fn try_read(&self, _buf: &mut [u8], _offset: u64) -> BackendResult { + Err(BackendError::Unsupported(format!( + "unsupport try_read operation for {}", + self.blob_id, + ))) + } + + fn readv( + &self, + _bufs: &[FileVolatileSlice], + _offset: u64, + _max_size: usize, + ) -> BackendResult { + Err(BackendError::Unsupported(format!( + "unsupport readv operation for {}", + self.blob_id, + ))) + } + + fn metrics(&self) -> &BackendMetrics { + &self.metrics + } +} + +impl BlobBackend for Noop { + fn shutdown(&self) {} + + fn metrics(&self) -> &BackendMetrics { + &self.metrics + } + + fn get_reader(&self, blob_id: &str) -> BackendResult> { + Ok(Arc::new(NoopEntry { + blob_id: blob_id.to_owned(), + metrics: self.metrics.clone(), + })) + } +} + +impl Drop for Noop { + fn drop(&mut self) { + self.metrics.release().unwrap_or_else(|e| error!("{:?}", e)); + } +} diff --git a/storage/src/cache/filecache/mod.rs b/storage/src/cache/filecache/mod.rs index 2b158ca09b1..297d672ef2c 100644 --- a/storage/src/cache/filecache/mod.rs +++ b/storage/src/cache/filecache/mod.rs @@ -41,6 +41,7 @@ pub struct FileCacheMgr { work_dir: String, validate: bool, disable_indexed_map: bool, + disable_chunk_map: bool, cache_raw_data: bool, cache_encrypted: bool, cache_convergent_encryption: bool, @@ -71,6 +72,7 @@ impl FileCacheMgr { worker_mgr: Arc::new(worker_mgr), work_dir: work_dir.to_owned(), disable_indexed_map: blob_cfg.disable_indexed_map, + disable_chunk_map: blob_cfg.disable_chunk_map, validate: config.cache_validate, cache_raw_data: config.cache_compressed, cache_encrypted: blob_cfg.enable_encryption, @@ -230,8 +232,13 @@ impl FileCacheEntry { (file, None, chunk_map, true, true, false) } else { let blob_file_path = format!("{}/{}", mgr.work_dir, blob_id); - let (chunk_map, is_direct_chunkmap) = - Self::create_chunk_map(mgr, &blob_info, &blob_file_path)?; + let (chunk_map, is_direct_chunkmap) = if mgr.disable_chunk_map { + let chunk_map = + Arc::new(BlobStateMap::from(NoopChunkMap::new(true))) as Arc; + (chunk_map, true) + } else { + Self::create_chunk_map(mgr, &blob_info, &blob_file_path)? + }; // Validation is supported by RAFS v5 (which has no meta_ci) or v6 with chunk digest array. let validation_supported = !blob_info.meta_ci_is_valid() || blob_info.has_feature(BlobFeatures::INLINED_CHUNK_DIGEST); diff --git a/storage/src/cache/state/noop_chunk_map.rs b/storage/src/cache/state/noop_chunk_map.rs index 7e4d29bc415..641c7ccba5d 100644 --- a/storage/src/cache/state/noop_chunk_map.rs +++ b/storage/src/cache/state/noop_chunk_map.rs @@ -7,24 +7,61 @@ use std::io::Result; use crate::cache::state::{ChunkIndexGetter, ChunkMap}; use crate::device::BlobChunkInfo; +use super::RangeMap; + /// A dummy implementation of the [ChunkMap] trait. /// /// The `NoopChunkMap` is an dummy implementation of [ChunkMap], which just reports every chunk as /// always ready to use or not. It may be used to support disk based backend storage. pub struct NoopChunkMap { - cached: bool, + all_chunk_ready: bool, } impl NoopChunkMap { /// Create a new instance of `NoopChunkMap`. - pub fn new(cached: bool) -> Self { - Self { cached } + pub fn new(all_chunk_ready: bool) -> Self { + Self { all_chunk_ready } } } impl ChunkMap for NoopChunkMap { fn is_ready(&self, _chunk: &dyn BlobChunkInfo) -> Result { - Ok(self.cached) + Ok(self.all_chunk_ready) + } + + fn set_ready_and_clear_pending(&self, _chunk: &dyn BlobChunkInfo) -> Result<()> { + Ok(()) + } + + fn check_ready_and_mark_pending( + &self, + _chunk: &dyn BlobChunkInfo, + ) -> crate::StorageResult { + Ok(true) + } + + fn is_persist(&self) -> bool { + true + } + + fn as_range_map(&self) -> Option<&dyn RangeMap> { + Some(self) + } + + fn is_pending(&self, _chunk: &dyn BlobChunkInfo) -> Result { + Ok(false) + } + + fn is_ready_or_pending(&self, chunk: &dyn BlobChunkInfo) -> Result { + if matches!(self.is_pending(chunk), Ok(true)) { + Ok(true) + } else { + self.is_ready(chunk) + } + } + + fn clear_pending(&self, _chunk: &dyn BlobChunkInfo) { + panic!("no support of clear_pending()"); } } @@ -35,3 +72,28 @@ impl ChunkIndexGetter for NoopChunkMap { chunk.id() } } + +impl RangeMap for NoopChunkMap { + type I = u32; + + #[inline] + fn is_range_all_ready(&self) -> bool { + self.all_chunk_ready + } + + fn is_range_ready(&self, _start_index: u32, _count: u32) -> Result { + Ok(self.all_chunk_ready) + } + + fn check_range_ready_and_mark_pending( + &self, + _start_index: u32, + _count: u32, + ) -> Result>> { + Ok(None) + } + + fn set_range_ready_and_clear_pending(&self, _start_index: u32, _count: u32) -> Result<()> { + Ok(()) + } +} diff --git a/storage/src/factory.rs b/storage/src/factory.rs index cc37a4e913c..106d37867f6 100644 --- a/storage/src/factory.rs +++ b/storage/src/factory.rs @@ -27,6 +27,7 @@ use crate::backend::http_proxy; use crate::backend::localdisk; #[cfg(feature = "backend-localfs")] use crate::backend::localfs; +use crate::backend::noop::Noop; #[cfg(feature = "backend-oss")] use crate::backend::oss; #[cfg(feature = "backend-registry")] @@ -117,6 +118,11 @@ impl BlobFactory { ) -> IOResult> { let backend_cfg = config.get_backend_config()?; let cache_cfg = config.get_cache_config()?; + let use_cache_only = if let Ok(config) = config.get_rafs_config() { + config.use_cache_only + } else { + false + }; let key = BlobCacheMgrKey { config: config.clone(), }; @@ -125,7 +131,12 @@ impl BlobFactory { if let Some(mgr) = guard.get(&key) { return mgr.get_blob_cache(blob_info); } - let backend = Self::new_backend(backend_cfg, &blob_info.blob_id())?; + + let backend = if use_cache_only { + Self::new_noop_backend(&blob_info.blob_id())? + } else { + Self::new_backend(backend_cfg, &blob_info.blob_id())? + }; let mgr = match cache_cfg.cache_type.as_str() { "blobcache" | "filecache" => { let mgr = FileCacheMgr::new(cache_cfg, backend, ASYNC_RUNTIME.clone(), &config.id)?; @@ -190,6 +201,10 @@ impl BlobFactory { } } + pub fn new_noop_backend(blob_id: &str) -> IOResult> { + Ok(Arc::new(Noop::new(Some(blob_id))?)) + } + /// Create a storage backend for the blob with id `blob_id`. #[allow(unused_variables)] pub fn new_backend( @@ -227,6 +242,7 @@ impl BlobFactory { config.get_http_proxy_config()?, Some(blob_id), )?)), + "noop" => Ok(Arc::new(Noop::new(Some(blob_id))?)), _ => Err(einval!(format!( "unsupported backend type '{}'", config.backend_type