Skip to content

Commit

Permalink
Support direct use blob cache to run image
Browse files Browse the repository at this point in the history
In localfs mode, we can directly generate blob cache through -blob-cache-dir,
in this case, Nydusd can directly use blob cache to start, but it manages the state of
chunk through ChunkMap by default, and by default all chunks are not ready,
and will go to babckend to download, for which I introduced disable_chunk_map config,
to turn off the ability of ChunkMap, by default make all chunks are ready,
although Nydusd will still visit the backend to verify the validity of the blob
when initializing the backend. it requires that the original nydus blob must exist,
I introduced use_cache_only config to disable Nydusd access backend

1. add disable_chunk_map to disable ChunkMap func
2. add use_cache_only to disable nydusd access backend

Signed-off-by: zyfjeff <[email protected]>
  • Loading branch information
zyfjeff committed Oct 10, 2023
1 parent 9ab1ec1 commit 18ba431
Show file tree
Hide file tree
Showing 8 changed files with 249 additions and 7 deletions.
14 changes: 14 additions & 0 deletions api/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,7 @@ impl BackendConfigV2 {
}
None => return false,
},
"noop" => return true,
_ => return false,
}

Expand Down Expand Up @@ -755,6 +756,9 @@ pub struct FileCacheConfig {
/// Key for data encryption, a heximal representation of [u8; 32].
#[serde(default)]
pub encryption_key: String,
/// disbale chunk map, it is assumed that all data is ready
#[serde(default)]
pub disable_chunk_map: bool,
}

impl FileCacheConfig {
Expand Down Expand Up @@ -842,6 +846,9 @@ pub struct RafsConfigV2 {
/// Filesystem prefetching configuration.
#[serde(default)]
pub prefetch: PrefetchConfigV2,
// Only use cache, don't access the backend
#[serde(default)]
pub use_cache_only: bool,
}

impl RafsConfigV2 {
Expand Down Expand Up @@ -1260,6 +1267,9 @@ impl TryFrom<&BackendConfig> for BackendConfigV2 {
"registry" => {
config.registry = Some(serde_json::from_value(value.backend_config.clone())?);
}
"noop" => {
// do nothing, noop don't have config
}
v => {
return Err(Error::new(
ErrorKind::InvalidInput,
Expand Down Expand Up @@ -1366,6 +1376,9 @@ struct RafsConfig {
// ZERO value means, amplifying user io is not enabled.
#[serde(default = "default_batch_size")]
pub amplify_io: usize,
// Only use cache, don't access the backend
#[serde(default)]
pub use_cache_only: bool,
}

impl TryFrom<RafsConfig> for ConfigV2 {
Expand All @@ -1383,6 +1396,7 @@ impl TryFrom<RafsConfig> for ConfigV2 {
access_pattern: v.access_pattern,
latest_read_files: v.latest_read_files,
prefetch: v.fs_prefetch.into(),
use_cache_only: v.use_cache_only,
};
if !cache.prefetch.enable && rafs.prefetch.enable {
cache.prefetch = rafs.prefetch.clone();
Expand Down
54 changes: 51 additions & 3 deletions smoke/tests/blobcache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,56 @@ func (a *BlobCacheTestSuite) TestCommandFlags(t *testing.T) {
require.Contains(t, output, tc.expectedOutput)
}
}
func (a *BlobCacheTestSuite) TestUseBlobCacheToRun(t *testing.T) {
ctx, _, ociBlobDigest := a.prepareTestEnv(t)
defer ctx.Destroy(t)

// Generate blobcache
tool.Run(t, fmt.Sprintf("%s create -t targz-ref --bootstrap %s --blob-cache-dir %s %s",
ctx.Binary.Builder, ctx.Env.BootstrapPath, ctx.Env.CacheDir,
filepath.Join(ctx.Env.BlobDir, ociBlobDigest.Hex())))

// Remove the origin blob
os.Remove(filepath.Join(ctx.Env.BlobDir, ociBlobDigest.Hex()))

nydusd, err := tool.NewNydusd(tool.NydusdConfig{
NydusdPath: ctx.Binary.Nydusd,
BootstrapPath: ctx.Env.BootstrapPath,
ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"),
MountPath: ctx.Env.MountDir,
APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"),
BackendType: "localfs",
BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir),
EnablePrefetch: ctx.Runtime.EnablePrefetch,
BlobCacheDir: ctx.Env.CacheDir,
CacheType: ctx.Runtime.CacheType,
CacheCompressed: ctx.Runtime.CacheCompressed,
RafsMode: ctx.Runtime.RafsMode,
DigestValidate: false,
UseCacheOnly: true,
DisableChunkMap: true,
})
require.NoError(t, err)

err = nydusd.Mount()
require.NoError(t, err)
defer func() {
if err := nydusd.Umount(); err != nil {
log.L.WithError(err).Errorf("umount")
}
}()

// make sure blobcache ready
err = filepath.WalkDir(ctx.Env.MountDir, func(path string, entry fs.DirEntry, err error) error {
require.Nil(t, err)
if entry.Type().IsRegular() {
_, err = os.ReadFile(path)
require.NoError(t, err)
}
return nil
})
require.NoError(t, err)
}

func (a *BlobCacheTestSuite) TestGenerateBlobcache(t *testing.T) {
ctx, blobcacheDir, ociBlobDigest := a.prepareTestEnv(t)
Expand Down Expand Up @@ -200,9 +250,7 @@ func (a *BlobCacheTestSuite) TestGenerateBlobcache(t *testing.T) {
err = filepath.WalkDir(ctx.Env.MountDir, func(path string, entry fs.DirEntry, err error) error {
require.Nil(t, err)
if entry.Type().IsRegular() {
targetPath, err := filepath.Rel(ctx.Env.MountDir, path)
require.NoError(t, err)
_, _ = os.ReadFile(targetPath)
_, _ = os.ReadFile(path)
}
return nil
})
Expand Down
6 changes: 5 additions & 1 deletion smoke/tests/tool/nydusd.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ type NydusdConfig struct {
AccessPattern bool
PrefetchFiles []string
AmplifyIO uint64
DisableChunkMap bool
UseCacheOnly bool
}

type Nydusd struct {
Expand All @@ -91,10 +93,12 @@ var configTpl = `
"type": "{{.CacheType}}",
"config": {
"compressed": {{.CacheCompressed}},
"work_dir": "{{.BlobCacheDir}}"
"work_dir": "{{.BlobCacheDir}}",
"disable_chunk_map": {{.DisableChunkMap}}
}
}
},
"use_cache_only": {{.UseCacheOnly}},
"mode": "{{.RafsMode}}",
"iostats_files": {{.IOStatsFiles}},
"fs_prefetch": {
Expand Down
2 changes: 2 additions & 0 deletions storage/src/backend/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ pub mod registry;
#[cfg(feature = "backend-s3")]
pub mod s3;

pub mod noop;

/// Error codes related to storage backend operations.
#[derive(Debug)]
pub enum BackendError {
Expand Down
91 changes: 91 additions & 0 deletions storage/src/backend/noop.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0

//! Storage backend driver to access blobs on local filesystems.
use std::io::Result;
use std::sync::Arc;

use fuse_backend_rs::file_buf::FileVolatileSlice;
use nydus_utils::metrics::BackendMetrics;

use crate::backend::{BackendError, BackendResult, BlobBackend, BlobReader};

#[derive(Debug)]
pub enum NoopError {
Noop,
}

/// a Noop backend, do nothing
#[derive(Default)]
pub struct Noop {
metrics: Arc<BackendMetrics>,
}

impl Noop {
pub fn new(id: Option<&str>) -> Result<Self> {
let id = id.ok_or_else(|| einval!("noop requires blob_id"))?;
Ok(Noop {
metrics: BackendMetrics::new(id, "noop"),
})
}
}

struct NoopEntry {
blob_id: String,
metrics: Arc<BackendMetrics>,
}

impl BlobReader for NoopEntry {
fn blob_size(&self) -> BackendResult<u64> {
Err(BackendError::Unsupported(format!(
"unsupport blob_size operation for {}",
self.blob_id,
)))
}

fn try_read(&self, _buf: &mut [u8], _offset: u64) -> BackendResult<usize> {
Err(BackendError::Unsupported(format!(
"unsupport try_read operation for {}",
self.blob_id,
)))
}

fn readv(
&self,
_bufs: &[FileVolatileSlice],
_offset: u64,
_max_size: usize,
) -> BackendResult<usize> {
Err(BackendError::Unsupported(format!(
"unsupport readv operation for {}",
self.blob_id,
)))
}

fn metrics(&self) -> &BackendMetrics {
&self.metrics
}
}

impl BlobBackend for Noop {
fn shutdown(&self) {}

fn metrics(&self) -> &BackendMetrics {
&self.metrics
}

fn get_reader(&self, blob_id: &str) -> BackendResult<Arc<dyn BlobReader>> {
Ok(Arc::new(NoopEntry {
blob_id: blob_id.to_owned(),
metrics: self.metrics.clone(),
}))
}
}

impl Drop for Noop {
fn drop(&mut self) {
self.metrics.release().unwrap_or_else(|e| error!("{:?}", e));
}
}
11 changes: 9 additions & 2 deletions storage/src/cache/filecache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ pub struct FileCacheMgr {
work_dir: String,
validate: bool,
disable_indexed_map: bool,
disable_chunk_map: bool,
cache_raw_data: bool,
cache_encrypted: bool,
cache_convergent_encryption: bool,
Expand Down Expand Up @@ -71,6 +72,7 @@ impl FileCacheMgr {
worker_mgr: Arc::new(worker_mgr),
work_dir: work_dir.to_owned(),
disable_indexed_map: blob_cfg.disable_indexed_map,
disable_chunk_map: blob_cfg.disable_chunk_map,
validate: config.cache_validate,
cache_raw_data: config.cache_compressed,
cache_encrypted: blob_cfg.enable_encryption,
Expand Down Expand Up @@ -230,8 +232,13 @@ impl FileCacheEntry {
(file, None, chunk_map, true, true, false)
} else {
let blob_file_path = format!("{}/{}", mgr.work_dir, blob_id);
let (chunk_map, is_direct_chunkmap) =
Self::create_chunk_map(mgr, &blob_info, &blob_file_path)?;
let (chunk_map, is_direct_chunkmap) = if mgr.disable_chunk_map {
let chunk_map =
Arc::new(BlobStateMap::from(NoopChunkMap::new(true))) as Arc<dyn ChunkMap>;
(chunk_map, true)
} else {
Self::create_chunk_map(mgr, &blob_info, &blob_file_path)?
};
// Validation is supported by RAFS v5 (which has no meta_ci) or v6 with chunk digest array.
let validation_supported = !blob_info.meta_ci_is_valid()
|| blob_info.has_feature(BlobFeatures::INLINED_CHUNK_DIGEST);
Expand Down
61 changes: 61 additions & 0 deletions storage/src/cache/state/noop_chunk_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ use std::io::Result;
use crate::cache::state::{ChunkIndexGetter, ChunkMap};
use crate::device::BlobChunkInfo;

use super::RangeMap;

/// A dummy implementation of the [ChunkMap] trait.
///
/// The `NoopChunkMap` is an dummy implementation of [ChunkMap], which just reports every chunk as
Expand All @@ -26,6 +28,41 @@ impl ChunkMap for NoopChunkMap {
fn is_ready(&self, _chunk: &dyn BlobChunkInfo) -> Result<bool> {
Ok(self.cached)
}

fn set_ready_and_clear_pending(&self, _chunk: &dyn BlobChunkInfo) -> Result<()> {
Ok(())
}

fn check_ready_and_mark_pending(
&self,
_chunk: &dyn BlobChunkInfo,
) -> crate::StorageResult<bool> {
Ok(true)
}

fn is_persist(&self) -> bool {
true
}

fn as_range_map(&self) -> Option<&dyn RangeMap<I = u32>> {
Some(self)
}

fn is_pending(&self, _chunk: &dyn BlobChunkInfo) -> Result<bool> {
Ok(false)
}

fn is_ready_or_pending(&self, chunk: &dyn BlobChunkInfo) -> Result<bool> {
if matches!(self.is_pending(chunk), Ok(true)) {
Ok(true)
} else {
self.is_ready(chunk)
}
}

fn clear_pending(&self, _chunk: &dyn BlobChunkInfo) {
panic!("no support of clear_pending()");
}
}

impl ChunkIndexGetter for NoopChunkMap {
Expand All @@ -35,3 +72,27 @@ impl ChunkIndexGetter for NoopChunkMap {
chunk.id()
}
}

impl RangeMap for NoopChunkMap {
type I = u32;
#[inline]
fn is_range_all_ready(&self) -> bool {
true
}

fn is_range_ready(&self, _start_index: u32, _count: u32) -> Result<bool> {
Ok(true)
}

fn check_range_ready_and_mark_pending(
&self,
_start_index: u32,
_count: u32,
) -> Result<Option<Vec<u32>>> {
Ok(None)
}

fn set_range_ready_and_clear_pending(&self, _start_index: u32, _count: u32) -> Result<()> {
Ok(())
}
}
Loading

0 comments on commit 18ba431

Please sign in to comment.