Skip to content

Commit

Permalink
nydus-image: refactor unpack/compact cli interface
Browse files Browse the repository at this point in the history
Since unpack and compact subcommands does not need the entire nydusd
configuration file, let's refactor their cli interface and directly
take backend configuration file.

Specifically, we introduce `--backend-type`, `--backend-config` and
`--backend-config-file` options to specify the backend type and remove
`--config` option.

Signed-off-by: Yifan Zhao <zhaoyifan@sjtu.edu.cn>

Fixes: #1602
  • Loading branch information
SToPire committed Sep 9, 2024
1 parent 67107ea commit 33df145
Show file tree
Hide file tree
Showing 3 changed files with 211 additions and 62 deletions.
23 changes: 23 additions & 0 deletions docs/nydus-image.md
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,29 @@ data blobs: ["9e50ae5ac02b2ef6ffb86075720e49d95d8240eed4717dd8ac9c68cadba00762"]
-rw-r--r-- 1 root root 20480 3月 29 17:02 df01f389850b79cd5a6ca6db98495bb457aa0821b0558351c55537551322fb96
```

## Unpack Nydus Image
`nydus-image` tool supports to unpack Nydus image to a tar file.
```shell
# use --blob to specify RAFS data blob
nydus-image unpack --blob image/blob1 image/bootstrap --output tmp.tar

# use --blob-dir to specify the directory containing RAFS data blobs
nydus-image unpack --blob-dir=image/ image/bootstrap --output tmp.tar

# example-oss.config
{
"endpoint": "region.aliyuncs.com",
"scheme": "https",
"access_key_id": "",
"access_key_secret": "",
"bucket_name": "",
"object_prefix": "image/"
}

# use backend config file to specify remote storage for RAFS data blobs
nydus-image unpack --backend-type oss --backend-config-file example-oss.config image/bootstrap --output tmp.tar
```

## Compact Nydus Image
`nydus-image` tool supports to compact Nydus image for
1. reduce number of blobs
Expand Down
184 changes: 123 additions & 61 deletions src/bin/nydus-image/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -671,17 +671,48 @@ fn prepare_cmd_args(bti_string: &'static str) -> App {
.required(true),
)
.arg(
Arg::new("config")
.long("config")
.short('C')
.help("config to compactor")
.required(true),
Arg::new("backend-type")
.long("backend-type")
.help(format!(
"Type of backend [possible values: {}]",
BlobFactory::supported_backends()
.into_iter()
.filter(|x| x != "localfs")
.collect::<Vec<_>>()
.join(", ")
))
.required(false)
.group("backend"),
)
.arg(
Arg::new("backend-config")
.long("backend-config")
.help("config file of backend")
.required(true),
.help("Config string of backend")
.required(false),
)
.arg(
Arg::new("backend-config-file")
.long("backend-config-file")
.help("Config file of backend")
.conflicts_with("backend-config")
.required(false),
)
.arg(
Arg::new("blob")
.long("blob")
.short('b')
.help("Path to RAFS data blob file")
.required(false)
.group("backend"),
)
.arg(
Arg::new("blob-dir")
.long("blob-dir")
.short('D')
.help(
"Directory for localfs storage backend, hosting data blobs and cache files",
)
.group("backend"),
)
.arg( arg_chunk_dict )
.arg(
Expand All @@ -693,6 +724,11 @@ fn prepare_cmd_args(bti_string: &'static str) -> App {
.arg(
arg_output_json,
)
.group(
clap::ArgGroup::new("backend")
.args(&["backend-type", "blob", "blob-dir"])
.required(false),
),
);

app.subcommand(
Expand All @@ -703,10 +739,31 @@ fn prepare_cmd_args(bti_string: &'static str) -> App {
.help("File path of RAFS metadata")
.required_unless_present("bootstrap"),
)
.arg(
Arg::new("backend-type")
.long("backend-type")
.help(format!(
"Type of backend [possible values: {}]",
BlobFactory::supported_backends()
.into_iter()
.filter(|x| x != "localfs")
.collect::<Vec<_>>()
.join(", ")
))
.required(false)
.group("backend"),
)
.arg(
Arg::new("backend-config")
.long("backend-config")
.help("config file of backend")
.help("Config string of backend")
.required(false),
)
.arg(
Arg::new("backend-config-file")
.long("backend-config-file")
.help("Config file of backend")
.conflicts_with("backend-config")
.required(false),
)
.arg(
Expand All @@ -721,24 +778,29 @@ fn prepare_cmd_args(bti_string: &'static str) -> App {
Arg::new("blob")
.long("blob")
.short('b')
.help("path to RAFS data blob file")
.required(false),
.help("Path to RAFS data blob file")
.required(false)
.group("backend"),
)
.arg(
Arg::new("blob-dir")
.long("blob-dir")
.short('D')
.conflicts_with("config")
.help(
"Directory for localfs storage backend, hosting data blobs and cache files",
),
)
.group("backend"),
)
.arg(arg_config)
.arg(
Arg::new("output")
.long("output")
.help("path for output tar file")
.help("Path for output tar file")
.required(true),
)
.group(
clap::ArgGroup::new("backend")
.args(&["backend-type", "blob", "blob-dir"])
.required(false),
),
)
}
Expand Down Expand Up @@ -1436,17 +1498,14 @@ impl Command {
}

fn compact(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<()> {
let config =
Self::get_configuration(matches).context("failed to get configuration information")?;
config
.internal
.set_blob_accessible(matches.get_one::<String>("config").is_some());
let bootstrap_path = PathBuf::from(Self::get_bootstrap(matches)?);
let dst_bootstrap = match matches.get_one::<String>("output-bootstrap") {
None => bootstrap_path.with_extension("bootstrap.compact"),
Some(s) => PathBuf::from(s),
};

let (config, backend) = Self::get_backend(matches, "compactor")?;

let (rs, _) = RafsSuper::load_from_file(&bootstrap_path, config.clone(), false)?;
info!("load bootstrap {:?} successfully", bootstrap_path);
let chunk_dict = match matches.get_one::<String>("chunk-dict") {
Expand All @@ -1458,8 +1517,6 @@ impl Command {
)?),
};

let backend = Self::get_backend(matches, "compactor")?;

let config_file_path = matches.get_one::<String>("config").unwrap();
let file = File::open(config_file_path)
.with_context(|| format!("failed to open config file {}", config_file_path))?;
Expand All @@ -1478,42 +1535,13 @@ impl Command {

fn unpack(matches: &ArgMatches) -> Result<()> {
let bootstrap = Self::get_bootstrap(matches)?;
let config = Self::get_configuration(matches)?;
config
.internal
.set_blob_accessible(matches.get_one::<String>("config").is_some());
let output = matches.get_one::<String>("output").expect("pass in output");
if output.is_empty() {
return Err(anyhow!("invalid empty --output option"));
}
let (config, backend) = Self::get_backend(matches, "unpacker")?;

let blob = matches.get_one::<String>("blob").map(|s| s.as_str());
let backend: Option<Arc<dyn BlobBackend + Send + Sync>> = match blob {
Some(blob_path) => {
let blob_path = PathBuf::from(blob_path);
let local_fs_conf = LocalFsConfig {
blob_file: blob_path.to_str().unwrap().to_owned(),
dir: Default::default(),
alt_dirs: Default::default(),
};
let local_fs = LocalFs::new(&local_fs_conf, Some("unpacker"))
.with_context(|| format!("fail to create local backend for {:?}", blob_path))?;

Some(Arc::new(local_fs))
}
None => {
if let Some(backend) = &config.backend {
Some(BlobFactory::new_backend(&backend, "unpacker")?)
} else {
match Self::get_backend(matches, "unpacker") {
Ok(backend) => Some(backend),
Err(_) => bail!("one of `--blob`, `--blob-dir` and `--backend-config` must be specified"),
}
}
}
};

OCIUnpacker::new(bootstrap, backend, output)
OCIUnpacker::new(bootstrap, Some(backend), output)
.with_context(|| "fail to create unpacker")?
.unpack(config)
.with_context(|| "fail to unpack")
Expand Down Expand Up @@ -1778,15 +1806,49 @@ impl Command {
fn get_backend(
matches: &ArgMatches,
blob_id: &str,
) -> Result<Arc<dyn BlobBackend + Send + Sync>> {
let cfg_file = matches
.get_one::<String>("backend-config")
.context("missing backend-config argument")?;
let cfg = ConfigV2::from_file(cfg_file)?;
let backend_cfg = cfg.get_backend_config()?;
let backend = BlobFactory::new_backend(backend_cfg, blob_id)?;

Ok(backend)
) -> Result<(Arc<ConfigV2>, Arc<dyn BlobBackend + Send + Sync>)> {
let config: Arc<ConfigV2>;
let backend: Arc<dyn BlobBackend + Send + Sync>;
if let Some(p) = matches.get_one::<String>("blob") {
config = Arc::new(ConfigV2::default());
backend = {
let blob_path = PathBuf::from(p);
let local_fs_conf = LocalFsConfig {
blob_file: blob_path.to_str().unwrap().to_owned(),
dir: Default::default(),
alt_dirs: Default::default(),
};
let local_fs = LocalFs::new(&local_fs_conf, Some(blob_id))
.with_context(|| format!("fail to create local backend for {:?}", blob_path))?;

Arc::new(local_fs)
};
} else if let Some(dir) = matches.get_one::<String>("blob-dir") {
config = Arc::new(ConfigV2::new_localfs("", dir)?);
backend = BlobFactory::new_backend(&config.backend.as_ref().unwrap(), blob_id)?;
} else if let Some(backend_type) = matches.get_one::<String>("backend-type") {
let content =
if let Some(backend_file) = matches.get_one::<String>("backend-config-file") {
fs::read_to_string(backend_file).with_context(|| {
format!("fail to read backend config file {:?}", backend_file)
})?
} else if let Some(backend_config) = matches.get_one::<String>("backend-config") {
backend_config.clone()
} else {
bail!("--backend-config or --backend-config-file must be specified");
};

if backend_type == "localfs" {
bail!("Use --blob-dir or --blob to specify localfs backend");
} else {
backend = BlobFactory::new_backend_from_json(backend_type, &content, blob_id)?;
config = Arc::new(ConfigV2::default());
}
} else {
bail!("--blob, --blob-dir or --backend-type must be specified");
}

Ok((config, backend))
}

fn get_blob_id(matches: &ArgMatches) -> Result<String> {
Expand Down
66 changes: 65 additions & 1 deletion storage/src/factory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@ use std::sync::{Arc, Mutex};
use std::time::Duration;

use lazy_static::lazy_static;
use nydus_api::{default_user_io_batch_size, BackendConfigV2, ConfigV2};
use nydus_api::{
default_user_io_batch_size, BackendConfigV2, ConfigV2, HttpProxyConfig, LocalDiskConfig,
LocalFsConfig, OssConfig, RegistryConfig, S3Config,
};
use tokio::runtime::{Builder, Runtime};
use tokio::time;

Expand Down Expand Up @@ -201,6 +204,24 @@ impl BlobFactory {
}
}

pub fn supported_backends() -> Vec<String> {
let backends = vec![
#[cfg(feature = "backend-oss")]
"oss".to_string(),
#[cfg(feature = "backend-s3")]
"s3".to_string(),
#[cfg(feature = "backend-registry")]
"registry".to_string(),
#[cfg(feature = "backend-localfs")]
"localfs".to_string(),
#[cfg(feature = "backend-localdisk")]
"localdisk".to_string(),
#[cfg(feature = "backend-http-proxy")]
"http-proxy".to_string(),
];
backends
}

/// Create a storage backend for the blob with id `blob_id`.
#[allow(unused_variables)]
pub fn new_backend(
Expand Down Expand Up @@ -245,6 +266,49 @@ impl BlobFactory {
}
}

pub fn new_backend_from_json(
backend_type: &str,
content: &str,
blob_id: &str,
) -> IOResult<Arc<dyn BlobBackend + Send + Sync>> {
match backend_type {
#[cfg(feature = "backend-oss")]
"oss" => {
let cfg = serde_json::from_str::<OssConfig>(&content)?;
Ok(Arc::new(oss::Oss::new(&cfg, Some(blob_id))?))
}
#[cfg(feature = "backend-s3")]
"s3" => {
let cfg = serde_json::from_str::<S3Config>(&content)?;
Ok(Arc::new(s3::S3::new(&cfg, Some(blob_id))?))
}
#[cfg(feature = "backend-registry")]
"registry" => {
let cfg = serde_json::from_str::<RegistryConfig>(&content)?;
Ok(Arc::new(registry::Registry::new(&cfg, Some(blob_id))?))
}
#[cfg(feature = "backend-localfs")]
"localfs" => {
let cfg = serde_json::from_str::<LocalFsConfig>(&content)?;
Ok(Arc::new(localfs::LocalFs::new(&cfg, Some(blob_id))?))
}
#[cfg(feature = "backend-localdisk")]
"localdisk" => {
let cfg = serde_json::from_str::<LocalDiskConfig>(&content)?;
Ok(Arc::new(localdisk::LocalDisk::new(&cfg, Some(blob_id))?))
}
#[cfg(feature = "backend-http-proxy")]
"http-proxy" => {
let cfg = serde_json::from_str::<HttpProxyConfig>(&content)?;
Ok(Arc::new(http_proxy::HttpProxy::new(&cfg, Some(blob_id))?))
}
_ => Err(einval!(format!(
"unsupported backend type '{}'",
backend_type
))),
}
}

fn check_cache_stat(&self) {
let mgrs = self.mgrs.lock().unwrap();
for (_key, mgr) in mgrs.iter() {
Expand Down

0 comments on commit 33df145

Please sign in to comment.