Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions rafs/src/metadata/direct_v6.rs
Original file line number Diff line number Diff line change
Expand Up @@ -791,7 +791,11 @@ impl RafsInode for OndiskInodeWrapper {
OsString::from(name),
)?) as Arc<dyn RafsInode>)
} else {
Err(enoent!())
Err(enoent!(format!(
"invalid child name {:?}, parent {:?}",
OsString::from(name),
self.name(),
)))
}
}

Expand Down Expand Up @@ -841,7 +845,11 @@ impl RafsInode for OndiskInodeWrapper {
}
}

Err(enoent!("invalid child index"))
Err(enoent!(format!(
"invalid child index {}, parent {:?}",
idx,
self.name(),
)))
}

#[inline]
Expand Down
12 changes: 2 additions & 10 deletions rafs/src/metadata/layout/v6.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ use lazy_static::lazy_static;
use nydus_utils::{compress, digest, round_up, ByteSize};
use storage::device::{BlobFeatures, BlobInfo};
use storage::meta::{BlobMetaHeaderOndisk, BLOB_FEATURE_4K_ALIGNED};
use storage::RAFS_MAX_CHUNK_SIZE;

use crate::metadata::{layout::RafsXAttrs, RafsStore, RafsSuperFlags};
use crate::{impl_bootstrap_converter, impl_pub_getter_setter, RafsIoReader, RafsIoWrite};
Expand Down Expand Up @@ -353,10 +352,7 @@ impl RafsV6SuperBlockExt {
}

let chunk_size = u32::from_le(self.s_chunk_size) as u64;
if !chunk_size.is_power_of_two()
|| chunk_size < EROFS_BLOCK_SIZE
|| chunk_size > RAFS_MAX_CHUNK_SIZE
{
if !chunk_size.is_power_of_two() || chunk_size < EROFS_BLOCK_SIZE {
return Err(einval!("invalid chunk size in Rafs v6 extended superblock"));
}

Expand Down Expand Up @@ -1296,11 +1292,7 @@ impl RafsV6Blob {
}

let c_size = u32::from_le(self.chunk_size) as u64;
if c_size.count_ones() != 1
|| c_size < EROFS_BLOCK_SIZE
|| c_size > RAFS_MAX_CHUNK_SIZE
|| c_size != chunk_size as u64
{
if c_size.count_ones() != 1 || c_size < EROFS_BLOCK_SIZE || c_size != chunk_size as u64 {
error!(
"RafsV6Blob: idx {} invalid c_size {}, count_ones() {}",
blob_index,
Expand Down
7 changes: 4 additions & 3 deletions src/bin/nydus-image/builder/diff.rs
Original file line number Diff line number Diff line change
Expand Up @@ -511,6 +511,7 @@ impl DiffBuilder {

fn prepare_parent_chunk_map(
&mut self,
ctx: &BuildContext,
bootstrap_mgr: &mut BootstrapManager,
) -> Result<BlobManager> {
let mut blob_mgr = BlobManager::new();
Expand All @@ -523,7 +524,7 @@ impl DiffBuilder {
rs.load(r)
.context("failed to load superblock from bootstrap")?;
// Load blobs from the blob table of parent bootstrap.
blob_mgr.from_blob_table(rs.superblock.get_blob_infos());
blob_mgr.from_blob_table(ctx, rs.superblock.get_blob_infos());
rs.walk_dir(RAFS_ROOT_INODE, None, &mut |inode: &dyn RafsInode,
path: &Path|
-> Result<()> {
Expand Down Expand Up @@ -817,7 +818,7 @@ impl Builder for DiffBuilder {
) -> Result<BuildOutput> {
// Add blobs in parent bootstrap to blob map.
let mut parent_blob_mgr = self
.prepare_parent_chunk_map(bootstrap_mgr)
.prepare_parent_chunk_map(ctx, bootstrap_mgr)
.context("failed to load chunks from bootstrap")?;
let len = parent_blob_mgr.len();
if len > 0 {
Expand All @@ -829,7 +830,7 @@ impl Builder for DiffBuilder {
}

// Add blobs in chunk dict bootstrap to blob map.
dict_blob_mgr.extend_blob_table_from_chunk_dict()?;
dict_blob_mgr.extend_blob_table_from_chunk_dict(ctx)?;
let len = dict_blob_mgr.len();
if len > 0 {
for blob_idx in (0..len).rev() {
Expand Down
2 changes: 1 addition & 1 deletion src/bin/nydus-image/builder/directory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ impl Builder for DirectoryBuilder {
blob_ctx.set_chunk_dict(blob_mgr.get_chunk_dict());
blob_ctx.set_chunk_size(ctx.chunk_size);
blob_ctx.set_meta_info_enabled(ctx.fs_version == RafsVersion::V6);
blob_mgr.extend_blob_table_from_chunk_dict()?;
blob_mgr.extend_blob_table_from_chunk_dict(ctx)?;

let blob_index = blob_mgr.alloc_index()?;
let mut blob = Blob::new();
Expand Down
118 changes: 68 additions & 50 deletions src/bin/nydus-image/builder/stargz.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@ use nix::sys::stat::makedev;
use serde::{Deserialize, Serialize};

use nydus_utils::digest::{self, Algorithm, DigestHasher, RafsDigest};
use nydus_utils::ByteSize;
use nydus_utils::{try_round_up_4k, ByteSize};
use rafs::metadata::layout::v5::{RafsV5ChunkInfo, RafsV5Inode, RafsV5InodeFlags};
use rafs::metadata::layout::{RafsBlobTable, RafsXAttrs};
use rafs::metadata::layout::RafsXAttrs;
use rafs::metadata::Inode;
use storage::device::BlobChunkFlags;
use storage::meta::BlobMetaHeaderOndisk;

use crate::builder::Builder;
use crate::core::bootstrap::Bootstrap;
Expand Down Expand Up @@ -351,6 +352,7 @@ impl StargzIndexTreeBuilder {
let mut tree: Option<Tree> = None;
let mut last_reg_entry: Option<&TocEntry> = None;

let mut uncompress_offset = 0;
for entry in toc_index.entries.iter() {
if !entry.is_supported() {
continue;
Expand All @@ -369,28 +371,35 @@ impl StargzIndexTreeBuilder {
}

if (entry.is_reg() || entry.is_chunk()) && decompress_size != 0 {
let aligned_chunk_size = if ctx.aligned_chunk {
// Safe to unwrap because `chunk_size` is much less than u32::MAX.
try_round_up_4k(decompress_size).unwrap()
} else {
decompress_size
};
let pre_uncompress_offset = uncompress_offset;
uncompress_offset += aligned_chunk_size;

let block_id = entry.block_id(&ctx.blob_id)?;
let v5_chunk_info = ChunkWrapper::V5(RafsV5ChunkInfo {
block_id,
// Will be set later
blob_index: 0,
flags: BlobChunkFlags::COMPRESSED,
// No available data on entry
compress_size: 0,
uncompress_size: decompress_size as u32,
compress_offset: entry.offset as u64,
uncompress_offset: pre_uncompress_offset,
file_offset: entry.chunk_offset as u64,
index: 0,
reserved: 0,
});
let chunk = NodeChunk {
source: ChunkSource::Build,
inner: match ctx.fs_version {
RafsVersion::V5 => {
ChunkWrapper::V5(RafsV5ChunkInfo {
block_id,
// Will be set later
blob_index: 0,
flags: BlobChunkFlags::COMPRESSED,
// No available data on entry
compress_size: 0,
uncompress_size: decompress_size as u32,
compress_offset: entry.offset as u64,
// No available data on entry
uncompress_offset: 0,
file_offset: entry.chunk_offset as u64,
index: 0,
reserved: 0,
})
}
RafsVersion::V6 => todo!(),
RafsVersion::V5 => v5_chunk_info,
RafsVersion::V6 => v5_chunk_info,
},
};

Expand Down Expand Up @@ -504,32 +513,31 @@ impl StargzIndexTreeBuilder {
let gid = if explicit_uidgid { entry.gid } else { 0 };

// Parse inode info
let v5_inode = RafsV5Inode {
i_digest: RafsDigest::default(),
i_parent: 0,
i_ino: ino,
i_projid: 0,
i_uid: uid,
i_gid: gid,
i_mode: entry.mode(),
i_size: file_size,
i_nlink: entry.num_link,
i_blocks: 0,
i_flags: flags,
i_child_index: 0,
i_child_count: 0,
i_name_size: name_size,
i_symlink_size: symlink_size,
i_rdev: entry.rdev(),
// TODO: add mtime from entry.ModTime()
i_mtime: 0,
i_mtime_nsec: 0,
i_reserved: [0; 8],
};
let inode = match version {
RafsVersion::V5 => {
InodeWrapper::V5(RafsV5Inode {
i_digest: RafsDigest::default(),
i_parent: 0,
i_ino: ino,
i_projid: 0,
i_uid: uid,
i_gid: gid,
i_mode: entry.mode(),
i_size: file_size,
i_nlink: entry.num_link,
i_blocks: 0,
i_flags: flags,
i_child_index: 0,
i_child_count: 0,
i_name_size: name_size,
i_symlink_size: symlink_size,
i_rdev: entry.rdev(),
// TODO: add mtime from entry.ModTime()
i_mtime: 0,
i_mtime_nsec: 0,
i_reserved: [0; 8],
})
}
RafsVersion::V6 => todo!(),
RafsVersion::V5 => InodeWrapper::V5(v5_inode),
RafsVersion::V6 => InodeWrapper::V6(v5_inode),
};

let path = entry.path()?;
Expand Down Expand Up @@ -580,6 +588,10 @@ impl StargzBuilder {
let mut decompressed_blob_size = 0u64;
let mut compressed_blob_size = 0u64;
let blob_index = blob_mgr.alloc_index()?;

let mut header = BlobMetaHeaderOndisk::default();
header.set_4k_aligned(true);

let mut blob_ctx = BlobContext::new(
ctx.blob_id.clone(),
ctx.blob_storage.clone(),
Expand All @@ -588,6 +600,10 @@ impl StargzBuilder {
)?;
blob_ctx.set_chunk_dict(blob_mgr.get_chunk_dict());
blob_ctx.set_chunk_size(ctx.chunk_size);
blob_ctx.set_meta_info_enabled(ctx.fs_version == RafsVersion::V6);
blob_ctx.blob_meta_header = header;

let mut chunk_map = HashMap::new();

// Set blob index and inode digest for upper nodes
for node in &mut bootstrap_ctx.nodes {
Expand All @@ -598,8 +614,13 @@ impl StargzBuilder {
let mut inode_hasher = RafsDigest::hasher(digest::Algorithm::Sha256);

for chunk in node.chunks.iter_mut() {
let chunk_index = blob_ctx.alloc_index()?;
chunk.inner.set_index(chunk_index);
if let Some(chunk_index) = chunk_map.get(chunk.inner.id()) {
chunk.inner.set_index(*chunk_index);
} else {
let chunk_index = blob_ctx.alloc_index()?;
chunk.inner.set_index(chunk_index);
chunk_map.insert(*chunk.inner.id(), chunk.inner.index());
}
chunk.inner.set_blob_index(blob_index);
decompressed_blob_size += chunk.inner.uncompressed_size() as u64;
compressed_blob_size += chunk.inner.compressed_size() as u64;
Expand Down Expand Up @@ -661,9 +682,6 @@ impl Builder for StargzBuilder {

// Dump bootstrap file
let blob_table = blob_mgr.to_blob_table(ctx)?;
if let RafsBlobTable::V6(_) = blob_table {
todo!();
}
bootstrap.dump(ctx, &mut bootstrap_ctx, &blob_table)?;

bootstrap_mgr.add(bootstrap_ctx);
Expand Down
4 changes: 2 additions & 2 deletions src/bin/nydus-image/core/blob_compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -581,10 +581,10 @@ impl BlobCompactor {
BootstrapManager::new(Some(ArtifactStorage::SingleFile(d_bootstrap)), None);
let mut bootstrap_ctx = bootstrap_mgr.create_ctx(false)?;
let mut ori_blob_mgr = BlobManager::new();
ori_blob_mgr.from_blob_table(rs.superblock.get_blob_infos());
ori_blob_mgr.from_blob_table(&build_ctx, rs.superblock.get_blob_infos());
if let Some(dict) = chunk_dict {
ori_blob_mgr.set_chunk_dict(dict);
ori_blob_mgr.extend_blob_table_from_chunk_dict()?;
ori_blob_mgr.extend_blob_table_from_chunk_dict(&build_ctx)?;
}
if ori_blob_mgr.len() < cfg.layers_to_compact {
return Ok(None);
Expand Down
2 changes: 1 addition & 1 deletion src/bin/nydus-image/core/bootstrap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ impl Bootstrap {

// Reuse lower layer blob table,
// we need to append the blob entry of upper layer to the table
blob_mgr.from_blob_table(rs.superblock.get_blob_infos());
blob_mgr.from_blob_table(ctx, rs.superblock.get_blob_infos());

// Build node tree of lower layer from a bootstrap file, drop by to add
// chunks of lower node to chunk_cache for chunk deduplication on next.
Expand Down
44 changes: 25 additions & 19 deletions src/bin/nydus-image/core/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -372,30 +372,36 @@ impl BlobContext {
Ok(Self::new_with_writer(blob_id, writer, blob_offset))
}

pub fn from(blob: &BlobInfo, chunk_source: ChunkSource) -> Self {
let mut ctx = Self::new_with_writer(blob.blob_id().to_owned(), None, 0);
pub fn from(ctx: &BuildContext, blob: &BlobInfo, chunk_source: ChunkSource) -> Self {
let mut blob_ctx = Self::new_with_writer(blob.blob_id().to_owned(), None, 0);

ctx.blob_readahead_size = blob.readahead_size();
ctx.chunk_count = blob.chunk_count();
ctx.decompressed_blob_size = blob.uncompressed_size();
ctx.compressed_blob_size = blob.compressed_size();
ctx.chunk_source = chunk_source;
blob_ctx.blob_readahead_size = blob.readahead_size();
blob_ctx.chunk_count = blob.chunk_count();
blob_ctx.decompressed_blob_size = blob.uncompressed_size();
blob_ctx.compressed_blob_size = blob.compressed_size();
blob_ctx.chunk_size = blob.chunk_size();
blob_ctx.chunk_source = chunk_source;
blob_ctx.blob_meta_header.set_4k_aligned(ctx.aligned_chunk);

if blob.meta_ci_is_valid() {
ctx.blob_meta_header
blob_ctx
.blob_meta_header
.set_ci_compressor(blob.meta_ci_compressor());
ctx.blob_meta_header.set_ci_entries(blob.chunk_count());
ctx.blob_meta_header
blob_ctx.blob_meta_header.set_ci_entries(blob.chunk_count());
blob_ctx
.blob_meta_header
.set_ci_compressed_offset(blob.meta_ci_offset());
ctx.blob_meta_header
blob_ctx
.blob_meta_header
.set_ci_compressed_size(blob.meta_ci_compressed_size());
ctx.blob_meta_header
blob_ctx
.blob_meta_header
.set_ci_uncompressed_size(blob.meta_ci_uncompressed_size());
ctx.blob_meta_header.set_4k_aligned(true);
ctx.blob_meta_info_enabled = true;
blob_ctx.blob_meta_header.set_4k_aligned(true);
blob_ctx.blob_meta_info_enabled = true;
}

ctx
blob_ctx
}

pub fn new_with_writer(
Expand Down Expand Up @@ -570,10 +576,10 @@ impl BlobManager {
}

#[allow(clippy::wrong_self_convention)]
pub fn from_blob_table(&mut self, blob_table: Vec<Arc<BlobInfo>>) {
pub fn from_blob_table(&mut self, ctx: &BuildContext, blob_table: Vec<Arc<BlobInfo>>) {
self.blobs = blob_table
.iter()
.map(|entry| BlobContext::from(entry.as_ref(), ChunkSource::Parent))
.map(|entry| BlobContext::from(ctx, entry.as_ref(), ChunkSource::Parent))
.collect();
}

Expand All @@ -593,7 +599,7 @@ impl BlobManager {
/// Extend blobs which belong to ChunkDict and setup real_blob_idx map
/// should call this function after import parent bootstrap
/// otherwise will break blobs order
pub fn extend_blob_table_from_chunk_dict(&mut self) -> Result<()> {
pub fn extend_blob_table_from_chunk_dict(&mut self, ctx: &BuildContext) -> Result<()> {
let blobs = self.chunk_dict_ref.get_blobs();

for blob in blobs.iter() {
Expand All @@ -602,7 +608,7 @@ impl BlobManager {
.set_real_blob_idx(blob.blob_index(), real_idx);
} else {
let idx = self.alloc_index()?;
self.add(BlobContext::from(blob.as_ref(), ChunkSource::Dict));
self.add(BlobContext::from(ctx, blob.as_ref(), ChunkSource::Dict));
self.chunk_dict_ref
.set_real_blob_idx(blob.blob_index(), idx);
}
Expand Down
Loading