Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support direct use blob cache to run image #1436

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions api/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,7 @@ impl BackendConfigV2 {
}
None => return false,
},
"noop" => return true,
_ => return false,
}

Expand Down Expand Up @@ -755,6 +756,9 @@ pub struct FileCacheConfig {
/// Key for data encryption, a heximal representation of [u8; 32].
#[serde(default)]
pub encryption_key: String,
/// disbale chunk map, it is assumed that all data is ready
#[serde(default)]
pub disable_chunk_map: bool,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this option similar to disable_indexed_map ? And should we export the option to the user?

}

impl FileCacheConfig {
Expand Down Expand Up @@ -842,6 +846,9 @@ pub struct RafsConfigV2 {
/// Filesystem prefetching configuration.
#[serde(default)]
pub prefetch: PrefetchConfigV2,
// Only use cache, don't access the backend
#[serde(default)]
pub use_cache_only: bool,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The option should be put into the device.cache ?

}

impl RafsConfigV2 {
Expand Down Expand Up @@ -1260,6 +1267,9 @@ impl TryFrom<&BackendConfig> for BackendConfigV2 {
"registry" => {
config.registry = Some(serde_json::from_value(value.backend_config.clone())?);
}
"noop" => {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do you need noop instead of using the localfs backend? You can amend localfs to work w/o the chunkmap. There is no need to introduce a new backend type for this IMO.

// do nothing, noop don't have config
}
v => {
return Err(Error::new(
ErrorKind::InvalidInput,
Expand Down Expand Up @@ -1366,6 +1376,9 @@ struct RafsConfig {
// ZERO value means, amplifying user io is not enabled.
#[serde(default = "default_batch_size")]
pub amplify_io: usize,
// Only use cache, don't access the backend
#[serde(default)]
pub use_cache_only: bool,
}

impl TryFrom<RafsConfig> for ConfigV2 {
Expand All @@ -1383,6 +1396,7 @@ impl TryFrom<RafsConfig> for ConfigV2 {
access_pattern: v.access_pattern,
latest_read_files: v.latest_read_files,
prefetch: v.fs_prefetch.into(),
use_cache_only: v.use_cache_only,
};
if !cache.prefetch.enable && rafs.prefetch.enable {
cache.prefetch = rafs.prefetch.clone();
Expand Down
55 changes: 52 additions & 3 deletions smoke/tests/blobcache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,57 @@ func (a *BlobCacheTestSuite) TestCommandFlags(t *testing.T) {
}
}

func (a *BlobCacheTestSuite) TestUseBlobCacheToRun(t *testing.T) {
zyfjeff marked this conversation as resolved.
Show resolved Hide resolved
ctx, _, ociBlobDigest := a.prepareTestEnv(t)
defer ctx.Destroy(t)

// Generate blobcache
tool.Run(t, fmt.Sprintf("%s create -t targz-ref --bootstrap %s --blob-cache-dir %s %s",
ctx.Binary.Builder, ctx.Env.BootstrapPath, ctx.Env.CacheDir,
filepath.Join(ctx.Env.BlobDir, ociBlobDigest.Hex())))

// Remove the origin blob
os.Remove(filepath.Join(ctx.Env.BlobDir, ociBlobDigest.Hex()))

nydusd, err := tool.NewNydusd(tool.NydusdConfig{
NydusdPath: ctx.Binary.Nydusd,
BootstrapPath: ctx.Env.BootstrapPath,
ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"),
MountPath: ctx.Env.MountDir,
APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"),
BackendType: "localfs",
BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir),
EnablePrefetch: ctx.Runtime.EnablePrefetch,
BlobCacheDir: ctx.Env.CacheDir,
CacheType: ctx.Runtime.CacheType,
CacheCompressed: ctx.Runtime.CacheCompressed,
RafsMode: ctx.Runtime.RafsMode,
DigestValidate: false,
UseCacheOnly: true,
DisableChunkMap: true,
})
require.NoError(t, err)

err = nydusd.Mount()
require.NoError(t, err)
defer func() {
if err := nydusd.Umount(); err != nil {
log.L.WithError(err).Errorf("umount")
}
}()

// make sure blobcache ready
err = filepath.WalkDir(ctx.Env.MountDir, func(path string, entry fs.DirEntry, err error) error {
require.Nil(t, err)
if entry.Type().IsRegular() {
_, err = os.ReadFile(path)
require.NoError(t, err)
}
return nil
})
require.NoError(t, err)
}

func (a *BlobCacheTestSuite) TestGenerateBlobcache(t *testing.T) {
ctx, blobcacheDir, ociBlobDigest := a.prepareTestEnv(t)
defer ctx.Destroy(t)
Expand Down Expand Up @@ -199,9 +250,7 @@ func (a *BlobCacheTestSuite) TestGenerateBlobcache(t *testing.T) {
err = filepath.WalkDir(ctx.Env.MountDir, func(path string, entry fs.DirEntry, err error) error {
require.Nil(t, err)
if entry.Type().IsRegular() {
targetPath, err := filepath.Rel(ctx.Env.MountDir, path)
require.NoError(t, err)
_, _ = os.ReadFile(targetPath)
_, _ = os.ReadFile(path)
}
return nil
})
Expand Down
6 changes: 5 additions & 1 deletion smoke/tests/tool/nydusd.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ type NydusdConfig struct {
AccessPattern bool
PrefetchFiles []string
AmplifyIO uint64
DisableChunkMap bool
UseCacheOnly bool
}

type Nydusd struct {
Expand All @@ -90,10 +92,12 @@ var configTpl = `
"type": "{{.CacheType}}",
"config": {
"compressed": {{.CacheCompressed}},
"work_dir": "{{.BlobCacheDir}}"
"work_dir": "{{.BlobCacheDir}}",
"disable_chunk_map": {{.DisableChunkMap}}
}
}
},
"use_cache_only": {{.UseCacheOnly}},
"mode": "{{.RafsMode}}",
"iostats_files": {{.IOStatsFiles}},
"fs_prefetch": {
Expand Down
1 change: 1 addition & 0 deletions storage/src/backend/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ pub mod http_proxy;
pub mod localdisk;
#[cfg(feature = "backend-localfs")]
pub mod localfs;
pub mod noop;
#[cfg(any(feature = "backend-oss", feature = "backend-s3"))]
pub mod object_storage;
#[cfg(feature = "backend-oss")]
Expand Down
91 changes: 91 additions & 0 deletions storage/src/backend/noop.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
// Copyright (C) 2021-2023 Alibaba Cloud. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0

//! Storage backend driver to reject all blob operations.

use std::io::Result;
use std::sync::Arc;

use fuse_backend_rs::file_buf::FileVolatileSlice;
use nydus_utils::metrics::BackendMetrics;

use crate::backend::{BackendError, BackendResult, BlobBackend, BlobReader};

#[derive(Debug)]
pub enum NoopError {
Noop,
}

/// a Noop backend, do nothing
#[derive(Default)]
pub struct Noop {
metrics: Arc<BackendMetrics>,
}

impl Noop {
pub fn new(id: Option<&str>) -> Result<Self> {
let id = id.ok_or_else(|| einval!("noop requires blob_id"))?;
Ok(Noop {
metrics: BackendMetrics::new(id, "noop"),
})
}
}

struct NoopEntry {
blob_id: String,
metrics: Arc<BackendMetrics>,
}

impl BlobReader for NoopEntry {
fn blob_size(&self) -> BackendResult<u64> {
Err(BackendError::Unsupported(format!(
"unsupport blob_size operation for {}",
self.blob_id,
)))
}

fn try_read(&self, _buf: &mut [u8], _offset: u64) -> BackendResult<usize> {
Err(BackendError::Unsupported(format!(
"unsupport try_read operation for {}",
self.blob_id,
)))
}

fn readv(
&self,
_bufs: &[FileVolatileSlice],
_offset: u64,
_max_size: usize,
) -> BackendResult<usize> {
Err(BackendError::Unsupported(format!(
"unsupport readv operation for {}",
self.blob_id,
)))
}

fn metrics(&self) -> &BackendMetrics {
&self.metrics
}
}

impl BlobBackend for Noop {
fn shutdown(&self) {}

fn metrics(&self) -> &BackendMetrics {
&self.metrics
}

fn get_reader(&self, blob_id: &str) -> BackendResult<Arc<dyn BlobReader>> {
Ok(Arc::new(NoopEntry {
blob_id: blob_id.to_owned(),
metrics: self.metrics.clone(),
}))
}
}

impl Drop for Noop {
fn drop(&mut self) {
self.metrics.release().unwrap_or_else(|e| error!("{:?}", e));
}
}
11 changes: 9 additions & 2 deletions storage/src/cache/filecache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ pub struct FileCacheMgr {
work_dir: String,
validate: bool,
disable_indexed_map: bool,
disable_chunk_map: bool,
cache_raw_data: bool,
cache_encrypted: bool,
cache_convergent_encryption: bool,
Expand Down Expand Up @@ -71,6 +72,7 @@ impl FileCacheMgr {
worker_mgr: Arc::new(worker_mgr),
work_dir: work_dir.to_owned(),
disable_indexed_map: blob_cfg.disable_indexed_map,
disable_chunk_map: blob_cfg.disable_chunk_map,
validate: config.cache_validate,
cache_raw_data: config.cache_compressed,
cache_encrypted: blob_cfg.enable_encryption,
Expand Down Expand Up @@ -230,8 +232,13 @@ impl FileCacheEntry {
(file, None, chunk_map, true, true, false)
} else {
let blob_file_path = format!("{}/{}", mgr.work_dir, blob_id);
let (chunk_map, is_direct_chunkmap) =
Self::create_chunk_map(mgr, &blob_info, &blob_file_path)?;
let (chunk_map, is_direct_chunkmap) = if mgr.disable_chunk_map {
let chunk_map =
Arc::new(BlobStateMap::from(NoopChunkMap::new(true))) as Arc<dyn ChunkMap>;
(chunk_map, true)
} else {
Self::create_chunk_map(mgr, &blob_info, &blob_file_path)?
};
// Validation is supported by RAFS v5 (which has no meta_ci) or v6 with chunk digest array.
let validation_supported = !blob_info.meta_ci_is_valid()
|| blob_info.has_feature(BlobFeatures::INLINED_CHUNK_DIGEST);
Expand Down
70 changes: 66 additions & 4 deletions storage/src/cache/state/noop_chunk_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,61 @@ use std::io::Result;
use crate::cache::state::{ChunkIndexGetter, ChunkMap};
use crate::device::BlobChunkInfo;

use super::RangeMap;

/// A dummy implementation of the [ChunkMap] trait.
///
/// The `NoopChunkMap` is an dummy implementation of [ChunkMap], which just reports every chunk as
/// always ready to use or not. It may be used to support disk based backend storage.
pub struct NoopChunkMap {
cached: bool,
all_chunk_ready: bool,
}

impl NoopChunkMap {
/// Create a new instance of `NoopChunkMap`.
pub fn new(cached: bool) -> Self {
Self { cached }
pub fn new(all_chunk_ready: bool) -> Self {
Self { all_chunk_ready }
}
}

impl ChunkMap for NoopChunkMap {
fn is_ready(&self, _chunk: &dyn BlobChunkInfo) -> Result<bool> {
Ok(self.cached)
Ok(self.all_chunk_ready)
}

fn set_ready_and_clear_pending(&self, _chunk: &dyn BlobChunkInfo) -> Result<()> {
Ok(())
}

fn check_ready_and_mark_pending(
&self,
_chunk: &dyn BlobChunkInfo,
) -> crate::StorageResult<bool> {
Ok(true)
}

fn is_persist(&self) -> bool {
true
}

fn as_range_map(&self) -> Option<&dyn RangeMap<I = u32>> {
Some(self)
}

fn is_pending(&self, _chunk: &dyn BlobChunkInfo) -> Result<bool> {
Ok(false)
}

fn is_ready_or_pending(&self, chunk: &dyn BlobChunkInfo) -> Result<bool> {
if matches!(self.is_pending(chunk), Ok(true)) {
Ok(true)
} else {
self.is_ready(chunk)
}
}

fn clear_pending(&self, _chunk: &dyn BlobChunkInfo) {
panic!("no support of clear_pending()");
}
}

Expand All @@ -35,3 +72,28 @@ impl ChunkIndexGetter for NoopChunkMap {
chunk.id()
}
}

impl RangeMap for NoopChunkMap {
type I = u32;
zyfjeff marked this conversation as resolved.
Show resolved Hide resolved

#[inline]
fn is_range_all_ready(&self) -> bool {
self.all_chunk_ready
}

fn is_range_ready(&self, _start_index: u32, _count: u32) -> Result<bool> {
Ok(self.all_chunk_ready)
}

fn check_range_ready_and_mark_pending(
&self,
_start_index: u32,
_count: u32,
) -> Result<Option<Vec<u32>>> {
Ok(None)
}

fn set_range_ready_and_clear_pending(&self, _start_index: u32, _count: u32) -> Result<()> {
Ok(())
}
}
Loading