diff --git a/docs/nydus-image.md b/docs/nydus-image.md index 7e7974eeed6..4af677eb433 100644 --- a/docs/nydus-image.md +++ b/docs/nydus-image.md @@ -182,6 +182,29 @@ data blobs: ["9e50ae5ac02b2ef6ffb86075720e49d95d8240eed4717dd8ac9c68cadba00762"] -rw-r--r-- 1 root root 20480 3月 29 17:02 df01f389850b79cd5a6ca6db98495bb457aa0821b0558351c55537551322fb96 ``` +## Unpack Nydus Image +`nydus-image` tool supports to unpack Nydus image to a tar file. +```shell +# use --blob to specify RAFS data blob +nydus-image unpack --blob image/blob1 image/bootstrap --output tmp.tar + +# use --blob-dir to specify the directory containing RAFS data blobs +nydus-image unpack --blob-dir=image/ image/bootstrap --output tmp.tar + +# example-oss.config +{ + "endpoint": "region.aliyuncs.com", + "scheme": "https", + "access_key_id": "", + "access_key_secret": "", + "bucket_name": "", + "object_prefix": "image/" +} + +# use backend config file to specify remote storage for RAFS data blobs +nydus-image unpack --backend-type oss --backend-config-file example-oss.config image/bootstrap --output tmp.tar +``` + ## Compact Nydus Image `nydus-image` tool supports to compact Nydus image for 1. reduce number of blobs diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index 5fa3a3a3c10..6be10283519 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -671,17 +671,48 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .required(true), ) .arg( - Arg::new("config") - .long("config") - .short('C') - .help("config to compactor") - .required(true), + Arg::new("backend-type") + .long("backend-type") + .help(format!( + "Type of backend [possible values: {}]", + BlobFactory::supported_backends() + .into_iter() + .filter(|x| x != "localfs") + .collect::>() + .join(", ") + )) + .required(false) + .group("backend"), ) .arg( Arg::new("backend-config") .long("backend-config") - .help("config file of backend") - .required(true), + .help("Config string of backend") + .required(false), + ) + .arg( + Arg::new("backend-config-file") + .long("backend-config-file") + .help("Config file of backend") + .conflicts_with("backend-config") + .required(false), + ) + .arg( + Arg::new("blob") + .long("blob") + .short('b') + .help("Path to RAFS data blob file") + .required(false) + .group("backend"), + ) + .arg( + Arg::new("blob-dir") + .long("blob-dir") + .short('D') + .help( + "Directory for localfs storage backend, hosting data blobs and cache files", + ) + .group("backend"), ) .arg( arg_chunk_dict ) .arg( @@ -693,6 +724,11 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .arg( arg_output_json, ) + .group( + clap::ArgGroup::new("backend") + .args(&["backend-type", "blob", "blob-dir"]) + .required(false), + ), ); app.subcommand( @@ -703,10 +739,31 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .help("File path of RAFS metadata") .required_unless_present("bootstrap"), ) + .arg( + Arg::new("backend-type") + .long("backend-type") + .help(format!( + "Type of backend [possible values: {}]", + BlobFactory::supported_backends() + .into_iter() + .filter(|x| x != "localfs") + .collect::>() + .join(", ") + )) + .required(false) + .group("backend"), + ) .arg( Arg::new("backend-config") .long("backend-config") - .help("config file of backend") + .help("Config string of backend") + .required(false), + ) + .arg( + Arg::new("backend-config-file") + .long("backend-config-file") + .help("Config file of backend") + .conflicts_with("backend-config") .required(false), ) .arg( @@ -721,24 +778,29 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { Arg::new("blob") .long("blob") .short('b') - .help("path to RAFS data blob file") - .required(false), + .help("Path to RAFS data blob file") + .required(false) + .group("backend"), ) .arg( Arg::new("blob-dir") .long("blob-dir") .short('D') - .conflicts_with("config") .help( "Directory for localfs storage backend, hosting data blobs and cache files", - ), + ) + .group("backend"), ) - .arg(arg_config) .arg( Arg::new("output") .long("output") - .help("path for output tar file") + .help("Path for output tar file") .required(true), + ) + .group( + clap::ArgGroup::new("backend") + .args(&["backend-type", "blob", "blob-dir"]) + .required(false), ), ) } @@ -1436,17 +1498,14 @@ impl Command { } fn compact(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<()> { - let config = - Self::get_configuration(matches).context("failed to get configuration information")?; - config - .internal - .set_blob_accessible(matches.get_one::("config").is_some()); let bootstrap_path = PathBuf::from(Self::get_bootstrap(matches)?); let dst_bootstrap = match matches.get_one::("output-bootstrap") { None => bootstrap_path.with_extension("bootstrap.compact"), Some(s) => PathBuf::from(s), }; + let (config, backend) = Self::get_backend(matches, "compactor")?; + let (rs, _) = RafsSuper::load_from_file(&bootstrap_path, config.clone(), false)?; info!("load bootstrap {:?} successfully", bootstrap_path); let chunk_dict = match matches.get_one::("chunk-dict") { @@ -1458,8 +1517,6 @@ impl Command { )?), }; - let backend = Self::get_backend(matches, "compactor")?; - let config_file_path = matches.get_one::("config").unwrap(); let file = File::open(config_file_path) .with_context(|| format!("failed to open config file {}", config_file_path))?; @@ -1478,42 +1535,13 @@ impl Command { fn unpack(matches: &ArgMatches) -> Result<()> { let bootstrap = Self::get_bootstrap(matches)?; - let config = Self::get_configuration(matches)?; - config - .internal - .set_blob_accessible(matches.get_one::("config").is_some()); let output = matches.get_one::("output").expect("pass in output"); if output.is_empty() { return Err(anyhow!("invalid empty --output option")); } + let (config, backend) = Self::get_backend(matches, "unpacker")?; - let blob = matches.get_one::("blob").map(|s| s.as_str()); - let backend: Option> = match blob { - Some(blob_path) => { - let blob_path = PathBuf::from(blob_path); - let local_fs_conf = LocalFsConfig { - blob_file: blob_path.to_str().unwrap().to_owned(), - dir: Default::default(), - alt_dirs: Default::default(), - }; - let local_fs = LocalFs::new(&local_fs_conf, Some("unpacker")) - .with_context(|| format!("fail to create local backend for {:?}", blob_path))?; - - Some(Arc::new(local_fs)) - } - None => { - if let Some(backend) = &config.backend { - Some(BlobFactory::new_backend(&backend, "unpacker")?) - } else { - match Self::get_backend(matches, "unpacker") { - Ok(backend) => Some(backend), - Err(_) => bail!("one of `--blob`, `--blob-dir` and `--backend-config` must be specified"), - } - } - } - }; - - OCIUnpacker::new(bootstrap, backend, output) + OCIUnpacker::new(bootstrap, Some(backend), output) .with_context(|| "fail to create unpacker")? .unpack(config) .with_context(|| "fail to unpack") @@ -1778,15 +1806,49 @@ impl Command { fn get_backend( matches: &ArgMatches, blob_id: &str, - ) -> Result> { - let cfg_file = matches - .get_one::("backend-config") - .context("missing backend-config argument")?; - let cfg = ConfigV2::from_file(cfg_file)?; - let backend_cfg = cfg.get_backend_config()?; - let backend = BlobFactory::new_backend(backend_cfg, blob_id)?; - - Ok(backend) + ) -> Result<(Arc, Arc)> { + let config: Arc; + let backend: Arc; + if let Some(p) = matches.get_one::("blob") { + config = Arc::new(ConfigV2::default()); + backend = { + let blob_path = PathBuf::from(p); + let local_fs_conf = LocalFsConfig { + blob_file: blob_path.to_str().unwrap().to_owned(), + dir: Default::default(), + alt_dirs: Default::default(), + }; + let local_fs = LocalFs::new(&local_fs_conf, Some(blob_id)) + .with_context(|| format!("fail to create local backend for {:?}", blob_path))?; + + Arc::new(local_fs) + }; + } else if let Some(dir) = matches.get_one::("blob-dir") { + config = Arc::new(ConfigV2::new_localfs("", dir)?); + backend = BlobFactory::new_backend(&config.backend.as_ref().unwrap(), blob_id)?; + } else if let Some(backend_type) = matches.get_one::("backend-type") { + let content = + if let Some(backend_file) = matches.get_one::("backend-config-file") { + fs::read_to_string(backend_file).with_context(|| { + format!("fail to read backend config file {:?}", backend_file) + })? + } else if let Some(backend_config) = matches.get_one::("backend-config") { + backend_config.clone() + } else { + bail!("--backend-config or --backend-config-file must be specified"); + }; + + if backend_type == "localfs" { + bail!("Use --blob-dir or --blob to specify localfs backend"); + } else { + backend = BlobFactory::new_backend_from_json(backend_type, &content, blob_id)?; + config = Arc::new(ConfigV2::default()); + } + } else { + bail!("--blob, --blob-dir or --backend-type must be specified"); + } + + Ok((config, backend)) } fn get_blob_id(matches: &ArgMatches) -> Result { diff --git a/storage/src/factory.rs b/storage/src/factory.rs index ef74a129b8d..e441e26209c 100644 --- a/storage/src/factory.rs +++ b/storage/src/factory.rs @@ -17,7 +17,10 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use lazy_static::lazy_static; -use nydus_api::{default_user_io_batch_size, BackendConfigV2, ConfigV2}; +use nydus_api::{ + default_user_io_batch_size, BackendConfigV2, ConfigV2, HttpProxyConfig, LocalDiskConfig, + LocalFsConfig, OssConfig, RegistryConfig, S3Config, +}; use tokio::runtime::{Builder, Runtime}; use tokio::time; @@ -201,6 +204,24 @@ impl BlobFactory { } } + pub fn supported_backends() -> Vec { + let backends = vec![ + #[cfg(feature = "backend-oss")] + "oss".to_string(), + #[cfg(feature = "backend-s3")] + "s3".to_string(), + #[cfg(feature = "backend-registry")] + "registry".to_string(), + #[cfg(feature = "backend-localfs")] + "localfs".to_string(), + #[cfg(feature = "backend-localdisk")] + "localdisk".to_string(), + #[cfg(feature = "backend-http-proxy")] + "http-proxy".to_string(), + ]; + backends + } + /// Create a storage backend for the blob with id `blob_id`. #[allow(unused_variables)] pub fn new_backend( @@ -245,6 +266,49 @@ impl BlobFactory { } } + pub fn new_backend_from_json( + backend_type: &str, + content: &str, + blob_id: &str, + ) -> IOResult> { + match backend_type { + #[cfg(feature = "backend-oss")] + "oss" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(oss::Oss::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-s3")] + "s3" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(s3::S3::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-registry")] + "registry" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(registry::Registry::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-localfs")] + "localfs" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(localfs::LocalFs::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-localdisk")] + "localdisk" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(localdisk::LocalDisk::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-http-proxy")] + "http-proxy" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(http_proxy::HttpProxy::new(&cfg, Some(blob_id))?)) + } + _ => Err(einval!(format!( + "unsupported backend type '{}'", + backend_type + ))), + } + } + fn check_cache_stat(&self) { let mgrs = self.mgrs.lock().unwrap(); for (_key, mgr) in mgrs.iter() {