From b8c4bac0b832f4ac0d54f9b361d166066fd97349 Mon Sep 17 00:00:00 2001 From: likewu Date: Sat, 5 Jul 2025 18:15:44 +0800 Subject: [PATCH 01/26] fix --- .../ecstore/src/bucket/lifecycle/lifecycle.rs | 26 +++++++++---------- rustfs/src/storage/ecfs.rs | 4 +-- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs index a0ac8c1b6..2431d6ba8 100644 --- a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -1,4 +1,3 @@ -#![allow(unused_imports)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#![allow(unused_imports)] #![allow(unused_variables)] #![allow(unused_mut)] #![allow(unused_assignments)] @@ -41,7 +41,7 @@ const ERR_LIFECYCLE_DUPLICATE_ID: &str = "Rule ID must be unique. Found same ID const _ERR_XML_NOT_WELL_FORMED: &str = "The XML you provided was not well-formed or did not validate against our published schema"; const ERR_LIFECYCLE_BUCKET_LOCKED: &str = - "ExpiredObjectAllVersions element and DelMarkerExpiration action cannot be used on an object locked bucket"; + "ExpiredObjectAllVersions element and DelMarkerExpiration action cannot be used on an retention bucket"; #[derive(Debug, Clone, PartialEq, Eq)] pub enum IlmAction { @@ -102,30 +102,30 @@ impl RuleValidate for LifecycleRule { } fn validate_status(&self) -> Result<()> { - if self.Status.len() == 0 { - return errEmptyRuleStatus; + if self.status.len() == 0 { + return ErrEmptyRuleStatus; } - if self.Status != Enabled && self.Status != Disabled { - return errInvalidRuleStatus; + if self.status != Enabled && self.status != Disabled { + return ErrInvalidRuleStatus; } Ok(()) } fn validate_expiration(&self) -> Result<()> { - self.Expiration.Validate(); + self.expiration.validate(); } fn validate_noncurrent_expiration(&self) -> Result<()> { - self.NoncurrentVersionExpiration.Validate() + self.noncurrent_version_expiration.validate() } fn validate_prefix_and_filter(&self) -> Result<()> { - if !self.Prefix.set && self.Filter.IsEmpty() || self.Prefix.set && !self.Filter.IsEmpty() { - return errXMLNotWellFormed; + if !self.prefix.set && self.Filter.isempty() || self.prefix.set && !self.filter.isempty() { + return ErrXMLNotWellFormed; } - if !self.Prefix.set { - return self.Filter.Validate(); + if !self.prefix.set { + return self.filter.validate(); } Ok(()) } @@ -267,7 +267,7 @@ impl Lifecycle for BucketLifecycleConfiguration { r.validate()?; if let Some(expiration) = r.expiration.as_ref() { if let Some(expired_object_delete_marker) = expiration.expired_object_delete_marker { - if lr_retention && (!expired_object_delete_marker) { + if lr_retention && (expired_object_delete_marker) { return Err(std::io::Error::other(ERR_LIFECYCLE_BUCKET_LOCKED)); } } diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index f33dd4264..257d3821e 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1982,9 +1982,9 @@ impl S3 for FS { if let Some(rule) = rcfg.0.rule { if let Some(retention) = rule.default_retention { if let Some(mode) = retention.mode { - if mode == ObjectLockRetentionMode::from_static(ObjectLockRetentionMode::GOVERNANCE) { + //if mode == ObjectLockRetentionMode::from_static(ObjectLockRetentionMode::GOVERNANCE) { lr_retention = true; - } + //} } } } From f080c98d0e946a192d3fcde52060df35cba78750 Mon Sep 17 00:00:00 2001 From: likewu Date: Sat, 5 Jul 2025 19:40:41 +0800 Subject: [PATCH 02/26] fix --- rustfs/src/storage/ecfs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 257d3821e..9b1b9d03a 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1978,7 +1978,7 @@ impl S3 for FS { let mut lr_retention = false; let rcfg = metadata_sys::get_object_lock_config(&bucket).await; - if let Ok(rcfg) = rcfg { + /*if let Ok(rcfg) = rcfg { if let Some(rule) = rcfg.0.rule { if let Some(retention) = rule.default_retention { if let Some(mode) = retention.mode { @@ -1988,7 +1988,7 @@ impl S3 for FS { } } } - } + }*/ //info!("lifecycle_configuration: {:?}", &lifecycle_configuration); From 82dca58cdda3ee1ca5257eeedfeade06170b3861 Mon Sep 17 00:00:00 2001 From: likewu Date: Sat, 5 Jul 2025 19:46:38 +0800 Subject: [PATCH 03/26] fix --- rustfs/src/storage/ecfs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 9b1b9d03a..605612640 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1977,8 +1977,8 @@ impl S3 for FS { } = req.input; let mut lr_retention = false; - let rcfg = metadata_sys::get_object_lock_config(&bucket).await; - /*if let Ok(rcfg) = rcfg { + /*let rcfg = metadata_sys::get_object_lock_config(&bucket).await; + if let Ok(rcfg) = rcfg { if let Some(rule) = rcfg.0.rule { if let Some(retention) = rule.default_retention { if let Some(mode) = retention.mode { From c322a2ae503d14b1834f1c4edf543ba53c2fdc11 Mon Sep 17 00:00:00 2001 From: likewu Date: Sun, 6 Jul 2025 11:40:20 +0800 Subject: [PATCH 04/26] fix delete-marker expiration. add api_restore. --- .../ecstore/src/client/api_bucket_policy.rs | 1 - .../ecstore/src/client/api_error_response.rs | 1 - .../ecstore/src/client/api_get_object_acl.rs | 180 ++++++++++++++ .../src/client/api_get_object_attributes.rs | 229 ++++++++++++++++++ .../ecstore/src/client/api_get_object_file.rs | 141 +++++++++++ crates/ecstore/src/client/api_get_options.rs | 6 +- crates/ecstore/src/client/api_list.rs | 1 - crates/ecstore/src/client/api_put_object.rs | 1 - .../src/client/api_put_object_multipart.rs | 1 - .../src/client/api_put_object_streaming.rs | 1 - crates/ecstore/src/client/api_remove.rs | 1 - crates/ecstore/src/client/api_restore.rs | 163 +++++++++++++ crates/ecstore/src/client/api_s3_datatypes.rs | 1 - crates/ecstore/src/client/api_stat.rs | 153 ++++++++++++ crates/ecstore/src/client/mod.rs | 5 + crates/ecstore/src/tier/tier.rs | 2 +- rustfs/src/admin/handlers/tier.rs | 181 +++++++++++++- 17 files changed, 1055 insertions(+), 13 deletions(-) create mode 100644 crates/ecstore/src/client/api_get_object_acl.rs create mode 100644 crates/ecstore/src/client/api_get_object_attributes.rs create mode 100644 crates/ecstore/src/client/api_get_object_file.rs create mode 100644 crates/ecstore/src/client/api_restore.rs create mode 100644 crates/ecstore/src/client/api_stat.rs diff --git a/crates/ecstore/src/client/api_bucket_policy.rs b/crates/ecstore/src/client/api_bucket_policy.rs index d9adbfe0f..8ed9c6065 100644 --- a/crates/ecstore/src/client/api_bucket_policy.rs +++ b/crates/ecstore/src/client/api_bucket_policy.rs @@ -1,4 +1,3 @@ -#![allow(clippy::map_entry)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/crates/ecstore/src/client/api_error_response.rs b/crates/ecstore/src/client/api_error_response.rs index 6dbcbfb04..31ca06456 100644 --- a/crates/ecstore/src/client/api_error_response.rs +++ b/crates/ecstore/src/client/api_error_response.rs @@ -1,4 +1,3 @@ -#![allow(clippy::map_entry)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/crates/ecstore/src/client/api_get_object_acl.rs b/crates/ecstore/src/client/api_get_object_acl.rs new file mode 100644 index 000000000..1a9167a38 --- /dev/null +++ b/crates/ecstore/src/client/api_get_object_acl.rs @@ -0,0 +1,180 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![allow(unused_imports)] +#![allow(unused_variables)] +#![allow(unused_mut)] +#![allow(unused_assignments)] +#![allow(unused_must_use)] +#![allow(clippy::all)] + +use bytes::Bytes; +use http::{HeaderMap, HeaderValue}; +use s3s::dto::Owner; +use std::io::Cursor; +use std::collections::HashMap; +use tokio::io::BufReader; + +use rustfs_utils::EMPTY_STRING_SHA256_HASH; +use crate::client::{ + api_error_response::{err_invalid_argument, http_resp_to_error_response}, + api_get_options::GetObjectOptions, + transition_api::{to_object_info, ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient}, +}; + +#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)] +struct Grantee { + id: String, + display_name: String, + uri: String, +} + +#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)] +struct Grant { + grantee: Grantee, + permission: String, +} + +#[derive(Debug, Default, serde::Serialize, serde::Deserialize)] +pub struct AccessControlList { + pub grant: Vec, + pub permission: String, +} + +#[derive(Debug, Default, serde::Deserialize)] +pub struct AccessControlPolicy { + #[serde(skip)] + owner: Owner, + pub access_control_list: AccessControlList, +} + +impl TransitionClient { + pub async fn get_object_acl(&self, bucket_name: &str, object_name: &str) -> Result { + let mut url_values = HashMap::new(); + url_values.insert("acl".to_string(), "".to_string()); + let mut resp = self + .execute_method( + http::Method::GET, + &mut RequestMetadata { + bucket_name: bucket_name.to_string(), + object_name: object_name.to_string(), + query_values: url_values, + custom_header: HeaderMap::new(), + content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(), + content_body: ReaderImpl::Body(Bytes::new()), + content_length: 0, + content_md5_base64: "".to_string(), + stream_sha256: false, + trailer: HeaderMap::new(), + pre_sign_url: Default::default(), + add_crc: Default::default(), + extra_pre_sign_header: Default::default(), + bucket_location: Default::default(), + expires: Default::default(), + }, + ) + .await?; + + if resp.status() != http::StatusCode::OK { + let b = resp.body().bytes().expect("err").to_vec(); + return Err(std::io::Error::other(http_resp_to_error_response(resp, b, bucket_name, object_name))); + } + + let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); + let mut res = match serde_xml_rs::from_str::(&String::from_utf8(b).unwrap()) { + Ok(result) => result, + Err(err) => { + return Err(std::io::Error::other(err.to_string())); + } + }; + + let mut obj_info = self.stat_object(bucket_name, object_name, &GetObjectOptions::default()).await?; + + obj_info.owner.display_name = res.owner.display_name.clone(); + obj_info.owner.id = res.owner.id.clone(); + + //obj_info.grant.extend(res.access_control_list.grant); + + let canned_acl = get_canned_acl(&res); + if canned_acl != "" { + obj_info.metadata.insert("X-Amz-Acl", HeaderValue::from_str(&canned_acl).unwrap()); + return Ok(obj_info); + } + + let grant_acl = get_amz_grant_acl(&res); + /*for (k, v) in grant_acl { + obj_info.metadata.insert(HeaderName::from_bytes(k.as_bytes()).unwrap(), HeaderValue::from_str(&v.to_string()).unwrap()); + }*/ + + Ok(obj_info) + } +} + +fn get_canned_acl(ac_policy: &AccessControlPolicy) -> String { + let grants = ac_policy.access_control_list.grant.clone(); + + if grants.len() == 1 { + if grants[0].grantee.uri == "" && grants[0].permission == "FULL_CONTROL" { + return "private".to_string(); + } + } else if grants.len() == 2 { + for g in grants { + if g.grantee.uri == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && &g.permission == "READ" { + return "authenticated-read".to_string(); + } + if g.grantee.uri == "http://acs.amazonaws.com/groups/global/AllUsers" && &g.permission == "READ" { + return "public-read".to_string(); + } + if g.permission == "READ" && g.grantee.id == ac_policy.owner.id.clone().unwrap() { + return "bucket-owner-read".to_string(); + } + } + } else if grants.len() == 3 { + for g in grants { + if g.grantee.uri == "http://acs.amazonaws.com/groups/global/AllUsers" && g.permission == "WRITE" { + return "public-read-write".to_string(); + } + } + } + "".to_string() +} + +pub fn get_amz_grant_acl(ac_policy: &AccessControlPolicy) -> HashMap> { + let grants = ac_policy.access_control_list.grant.clone(); + let mut res = HashMap::>::new(); + + for g in grants { + let mut id = "id=".to_string(); + id.push_str(&g.grantee.id); + let permission: &str = &g.permission; + match permission { + "READ" => { + res.entry("X-Amz-Grant-Read".to_string()).or_insert(vec![]).push(id); + } + "WRITE" => { + res.entry("X-Amz-Grant-Write".to_string()).or_insert(vec![]).push(id); + } + "READ_ACP" => { + res.entry("X-Amz-Grant-Read-Acp".to_string()).or_insert(vec![]).push(id); + } + "WRITE_ACP" => { + res.entry("X-Amz-Grant-Write-Acp".to_string()).or_insert(vec![]).push(id); + } + "FULL_CONTROL" => { + res.entry("X-Amz-Grant-Full-Control".to_string()).or_insert(vec![]).push(id); + } + _ => (), + } + } + res +} diff --git a/crates/ecstore/src/client/api_get_object_attributes.rs b/crates/ecstore/src/client/api_get_object_attributes.rs new file mode 100644 index 000000000..9204e7dd5 --- /dev/null +++ b/crates/ecstore/src/client/api_get_object_attributes.rs @@ -0,0 +1,229 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![allow(unused_imports)] +#![allow(unused_variables)] +#![allow(unused_mut)] +#![allow(unused_assignments)] +#![allow(unused_must_use)] +#![allow(clippy::all)] + +use bytes::Bytes; +use http::{HeaderMap, HeaderValue}; +use time::OffsetDateTime; +use std::io::Cursor; +use std::collections::HashMap; +use tokio::io::BufReader; + +use s3s::{Body, dto::Owner}; +use s3s::header::{X_AMZ_OBJECT_ATTRIBUTES, X_AMZ_DELETE_MARKER, X_AMZ_METADATA_DIRECTIVE, X_AMZ_VERSION_ID, + X_AMZ_REQUEST_CHARGED, X_AMZ_RESTORE, X_AMZ_PART_NUMBER_MARKER, X_AMZ_MAX_PARTS,}; +use rustfs_utils::EMPTY_STRING_SHA256_HASH; +use crate::client::constants::{GET_OBJECT_ATTRIBUTES_MAX_PARTS, GET_OBJECT_ATTRIBUTES_TAGS, ISO8601_DATEFORMAT}; + +use crate::client::{ + api_error_response::err_invalid_argument, + api_get_options::GetObjectOptions, + transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info}, + api_get_object_acl::AccessControlPolicy, +}; + +struct ObjectAttributesOptions { + max_parts: i64, + version_id: String, + part_number_marker: i64, + //server_side_encryption: encrypt::ServerSide, +} + +struct ObjectAttributes { + version_id: String, + last_modified: OffsetDateTime, + object_attributes_response: ObjectAttributesResponse, +} + +impl ObjectAttributes { + fn new() -> Self { + Self { + version_id: "".to_string(), + last_modified: OffsetDateTime::now_utc(), + object_attributes_response: ObjectAttributesResponse::new(), + } + } +} + +#[derive(Debug, Default, serde::Deserialize)] +pub struct Checksum { + checksum_crc32: String, + checksum_crc32c: String, + checksum_sha1: String, + checksum_sha256: String, +} + +impl Checksum { + fn new() -> Self { + Self { + checksum_crc32: "".to_string(), + checksum_crc32c: "".to_string(), + checksum_sha1: "".to_string(), + checksum_sha256: "".to_string(), + } + } +} + +#[derive(Debug, Default, serde::Deserialize)] +struct ObjectParts { + parts_count: i64, + part_number_marker: i64, + next_part_number_marker: i64, + max_parts: i64, + is_truncated: bool, + parts: Vec, +} + +impl ObjectParts { + fn new() -> Self { + Self { + parts_count: 0, + part_number_marker: 0, + next_part_number_marker: 0, + max_parts: 0, + is_truncated: false, + parts: Vec::new(), + } + } +} + +#[derive(Debug, Default, serde::Deserialize)] +struct ObjectAttributesResponse { + etag: String, + storage_class: String, + object_size: i64, + checksum: Checksum, + object_parts: ObjectParts, +} + +impl ObjectAttributesResponse { + fn new() -> Self { + Self { + etag: "".to_string(), + storage_class: "".to_string(), + object_size: 0, + checksum: Checksum::new(), + object_parts: ObjectParts::new(), + } + } +} + +#[derive(Debug, Default, serde::Deserialize)] +struct ObjectAttributePart { + checksum_crc32: String, + checksum_crc32c: String, + checksum_sha1: String, + checksum_sha256: String, + part_number: i64, + size: i64, +} + +impl ObjectAttributes { + pub async fn parse_response(&mut self, resp: &mut http::Response) -> Result<(), std::io::Error> { + let h = resp.headers(); + let mod_time = OffsetDateTime::parse(h.get("Last-Modified").unwrap().to_str().unwrap(), ISO8601_DATEFORMAT).unwrap(); //RFC7231Time + self.last_modified = mod_time; + self.version_id = h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap().to_string(); + + let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); + let mut response = match serde_xml_rs::from_str::(&String::from_utf8(b).unwrap()) { + Ok(result) => result, + Err(err) => { + return Err(std::io::Error::other(err.to_string())); + } + }; + self.object_attributes_response = response; + + Ok(()) + } +} + +impl TransitionClient { + pub async fn get_object_attributes(&self, bucket_name: &str, object_name: &str, opts: ObjectAttributesOptions) -> Result { + let mut url_values = HashMap::new(); + url_values.insert("attributes".to_string(), "".to_string()); + if opts.version_id != "" { + url_values.insert("versionId".to_string(), opts.version_id); + } + + let mut headers = HeaderMap::new(); + headers.insert(X_AMZ_OBJECT_ATTRIBUTES, HeaderValue::from_str(GET_OBJECT_ATTRIBUTES_TAGS).unwrap()); + + if opts.part_number_marker > 0 { + headers.insert(X_AMZ_PART_NUMBER_MARKER, HeaderValue::from_str(&opts.part_number_marker.to_string()).unwrap()); + } + + if opts.max_parts > 0 { + headers.insert(X_AMZ_MAX_PARTS, HeaderValue::from_str(&opts.max_parts.to_string()).unwrap()); + } else { + headers.insert(X_AMZ_MAX_PARTS, HeaderValue::from_str(&GET_OBJECT_ATTRIBUTES_MAX_PARTS.to_string()).unwrap()); + } + + /*if opts.server_side_encryption.is_some() { + opts.server_side_encryption.Marshal(headers); + }*/ + + let mut resp = self + .execute_method( + http::Method::HEAD, + &mut RequestMetadata { + bucket_name: bucket_name.to_string(), + object_name: object_name.to_string(), + query_values: url_values, + custom_header: headers, + content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(), + content_md5_base64: "".to_string(), + content_body: ReaderImpl::Body(Bytes::new()), + content_length: 0, + stream_sha256: false, + trailer: HeaderMap::new(), + pre_sign_url: Default::default(), + add_crc: Default::default(), + extra_pre_sign_header: Default::default(), + bucket_location: Default::default(), + expires: Default::default(), + }, + ) + .await?; + + let h = resp.headers(); + let has_etag = h.get("ETag").unwrap().to_str().unwrap(); + if !has_etag.is_empty() { + return Err(std::io::Error::other("get_object_attributes is not supported by the current endpoint version")); + } + + if resp.status() != http::StatusCode::OK { + let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); + let err_body = String::from_utf8(b).unwrap(); + let mut er = match serde_xml_rs::from_str::(&err_body) { + Ok(result) => result, + Err(err) => { + return Err(std::io::Error::other(err.to_string())); + } + }; + + return Err(std::io::Error::other(er.access_control_list.permission)); + } + + let mut oa = ObjectAttributes::new(); + oa.parse_response(&mut resp).await?; + + Ok(oa) + } +} \ No newline at end of file diff --git a/crates/ecstore/src/client/api_get_object_file.rs b/crates/ecstore/src/client/api_get_object_file.rs new file mode 100644 index 000000000..6bda20699 --- /dev/null +++ b/crates/ecstore/src/client/api_get_object_file.rs @@ -0,0 +1,141 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![allow(unused_imports)] +#![allow(unused_variables)] +#![allow(unused_mut)] +#![allow(unused_assignments)] +#![allow(unused_must_use)] +#![allow(clippy::all)] + +use bytes::Bytes; +use http::HeaderMap; +use std::io::Cursor; +use tokio::io::BufReader; +#[cfg(not(windows))] +use std::os::unix::fs::PermissionsExt; +#[cfg(not(windows))] +use std::os::unix::fs::OpenOptionsExt; +#[cfg(not(windows))] +use std::os::unix::fs::MetadataExt; +#[cfg(windows)] +use std::os::windows::fs::MetadataExt; + +use crate::client::{ + api_error_response::err_invalid_argument, + api_get_options::GetObjectOptions, + transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info}, +}; + +impl TransitionClient { + pub async fn fget_object(&self, bucket_name: &str, object_name: &str, file_path: &str, opts: GetObjectOptions) -> Result<(), std::io::Error> { + match std::fs::metadata(file_path) { + Ok(file_path_stat) => { + let ft = file_path_stat.file_type(); + if ft.is_dir() { + return Err(std::io::Error::other(err_invalid_argument("filename is a directory."))); + } + }, + Err(err) => { + return Err(std::io::Error::other(err)); + } + } + + let path = std::path::Path::new(file_path); + if let Some(parent) = path.parent() { + if let Some(object_dir) = parent.file_name() { + match std::fs::create_dir_all(object_dir) { + Ok(_) => { + let dir = std::path::Path::new(object_dir); + if let Ok(dir_stat) = dir.metadata() { + #[cfg(not(windows))] + dir_stat.permissions().set_mode(0o700); + } + } + Err(err) => { + return Err(std::io::Error::other(err)); + } + } + } + } + + let object_stat = match self.stat_object(bucket_name, object_name, &opts).await { + Ok(object_stat) => object_stat, + Err(err) => { + return Err(std::io::Error::other(err)); + } + }; + + let mut file_part_path = file_path.to_string(); + file_part_path.push_str(""/*sum_sha256_hex(object_stat.etag.as_bytes())*/); + file_part_path.push_str(".part.rustfs"); + + #[cfg(not(windows))] + let file_part = match std::fs::OpenOptions::new().mode(0o600).open(file_part_path.clone()) { + Ok(file_part) => file_part, + Err(err) => { + return Err(std::io::Error::other(err)); + } + }; + #[cfg(windows)] + let file_part = match std::fs::OpenOptions::new().open(file_part_path.clone()) { + Ok(file_part) => file_part, + Err(err) => { + return Err(std::io::Error::other(err)); + } + }; + + let mut close_and_remove = true; + /*defer(|| { + if close_and_remove { + _ = file_part.close(); + let _ = std::fs::remove(file_part_path); + } + });*/ + + let st = match file_part.metadata() { + Ok(st) => st, + Err(err) => { + return Err(std::io::Error::other(err)); + } + }; + + let mut opts = opts; + #[cfg(windows)] + if st.file_size() > 0 { + opts.set_range(st.file_size() as i64, 0); + } + + let object_reader = match self.get_object(bucket_name, object_name, &opts) { + Ok(object_reader) => object_reader, + Err(err) => { + return Err(std::io::Error::other(err)); + } + }; + + /*if let Err(err) = std::fs::copy(file_part, object_reader) { + return Err(std::io::Error::other(err)); + }*/ + + close_and_remove = false; + /*if let Err(err) = file_part.close() { + return Err(std::io::Error::other(err)); + }*/ + + if let Err(err) = std::fs::rename(file_part_path, file_path) { + return Err(std::io::Error::other(err)); + } + + Ok(()) + } +} \ No newline at end of file diff --git a/crates/ecstore/src/client/api_get_options.rs b/crates/ecstore/src/client/api_get_options.rs index 324d3006f..3692b29b4 100644 --- a/crates/ecstore/src/client/api_get_options.rs +++ b/crates/ecstore/src/client/api_get_options.rs @@ -29,9 +29,9 @@ use crate::client::api_error_response::err_invalid_argument; #[derive(Default)] #[allow(dead_code)] pub struct AdvancedGetOptions { - replication_deletemarker: bool, - is_replication_ready_for_deletemarker: bool, - replication_proxy_request: String, + pub replication_delete_marker: bool, + pub is_replication_ready_for_delete_marker: bool, + pub replication_proxy_request: String, } pub struct GetObjectOptions { diff --git a/crates/ecstore/src/client/api_list.rs b/crates/ecstore/src/client/api_list.rs index bd5259e99..f978f0634 100644 --- a/crates/ecstore/src/client/api_list.rs +++ b/crates/ecstore/src/client/api_list.rs @@ -1,4 +1,3 @@ -#![allow(clippy::map_entry)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/crates/ecstore/src/client/api_put_object.rs b/crates/ecstore/src/client/api_put_object.rs index 0cfa03789..b0d81c6e4 100644 --- a/crates/ecstore/src/client/api_put_object.rs +++ b/crates/ecstore/src/client/api_put_object.rs @@ -1,4 +1,3 @@ -#![allow(clippy::map_entry)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/crates/ecstore/src/client/api_put_object_multipart.rs b/crates/ecstore/src/client/api_put_object_multipart.rs index 4bc685297..67854c72a 100644 --- a/crates/ecstore/src/client/api_put_object_multipart.rs +++ b/crates/ecstore/src/client/api_put_object_multipart.rs @@ -1,4 +1,3 @@ -#![allow(clippy::map_entry)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/crates/ecstore/src/client/api_put_object_streaming.rs b/crates/ecstore/src/client/api_put_object_streaming.rs index fa6d1c897..2e56dcac3 100644 --- a/crates/ecstore/src/client/api_put_object_streaming.rs +++ b/crates/ecstore/src/client/api_put_object_streaming.rs @@ -1,4 +1,3 @@ -#![allow(clippy::map_entry)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/crates/ecstore/src/client/api_remove.rs b/crates/ecstore/src/client/api_remove.rs index 853ee6413..a6845229a 100644 --- a/crates/ecstore/src/client/api_remove.rs +++ b/crates/ecstore/src/client/api_remove.rs @@ -1,4 +1,3 @@ -#![allow(clippy::map_entry)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/crates/ecstore/src/client/api_restore.rs b/crates/ecstore/src/client/api_restore.rs new file mode 100644 index 000000000..ad11fcf39 --- /dev/null +++ b/crates/ecstore/src/client/api_restore.rs @@ -0,0 +1,163 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![allow(unused_imports)] +#![allow(unused_variables)] +#![allow(unused_mut)] +#![allow(unused_assignments)] +#![allow(unused_must_use)] +#![allow(clippy::all)] + +use bytes::Bytes; +use http::HeaderMap; +use std::io::Cursor; +use tokio::io::BufReader; +use std::collections::HashMap; + +use crate::client::{ + api_error_response::{err_invalid_argument, http_resp_to_error_response}, api_get_object_acl::AccessControlList, api_get_options::GetObjectOptions, transition_api::{to_object_info, ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient} +}; + +const TIER_STANDARD: &str = "Standard"; +const TIER_BULK: &str = "Bulk"; +const TIER_EXPEDITED: &str = "Expedited"; + +#[derive(Debug, Default, serde::Serialize)] +struct GlacierJobParameters { + tier: String, +} + +#[derive(Debug, Default, serde::Serialize, serde::Deserialize)] +struct Encryption { + encryption_type: String, + kms_context: String, + kms_key_id: String, +} + +#[derive(Debug, Default, serde::Serialize, serde::Deserialize)] +struct MetadataEntry { + name: String, + value: String, +} + +#[derive(Debug, Default, serde::Serialize)] +struct S3 { + access_control_list: AccessControlList, + bucket_name: String, + prefix: String, + canned_acl: String, + encryption: Encryption, + storage_class: String, + //tagging: Tags, + user_metadata: MetadataEntry, +} + +#[derive(Debug, Default, serde::Serialize)] +struct SelectParameters { + expression_type: String, + expression: String, + //input_serialization: SelectObjectInputSerialization, + //output_serialization: SelectObjectOutputSerialization, +} + +#[derive(Debug, Default, serde::Serialize)] +struct OutputLocation(S3); + +#[derive(Debug, Default, serde::Serialize)] +struct RestoreRequest { + restore_type: String, + tier: String, + days: i64, + glacier_job_parameters: GlacierJobParameters, + description: String, + select_parameters: SelectParameters, + output_location: OutputLocation, +} + +impl RestoreRequest { + fn set_days(&mut self, v: i64) { + self.days = v; + } + + fn set_glacier_job_parameters(&mut self, v: GlacierJobParameters) { + self.glacier_job_parameters = v; + } + + fn set_type(&mut self, v: &str) { + self.restore_type = v.to_string(); + } + + fn set_tier(&mut self, v: &str) { + self.tier = v.to_string(); + } + + fn set_description(&mut self, v: &str) { + self.description = v.to_string(); + } + + fn set_select_parameters(&mut self, v: SelectParameters) { + self.select_parameters = v; + } + + fn set_output_location(&mut self, v: OutputLocation) { + self.output_location = v; + } +} + +impl TransitionClient { + pub async fn restore_object(&self, bucket_name: &str, object_name: &str, version_id: &str, restore_req: &RestoreRequest) -> Result<(), std::io::Error> { + let restore_request = match serde_xml_rs::to_string(restore_req) { + Ok(buf) => buf, + Err(e) => { + return Err(std::io::Error::other(e)); + } + }; + let restore_request_bytes = restore_request.as_bytes().to_vec(); + + let mut url_values = HashMap::new(); + url_values.insert("restore".to_string(), "".to_string()); + if version_id != "" { + url_values.insert("versionId".to_string(), version_id.to_string()); + } + + let restore_request_buffer = Bytes::from(restore_request_bytes.clone()); + let resp = self + .execute_method( + http::Method::HEAD, + &mut RequestMetadata { + bucket_name: bucket_name.to_string(), + object_name: object_name.to_string(), + query_values: url_values, + custom_header: HeaderMap::new(), + content_sha256_hex: "".to_string(), //sum_sha256_hex(&restore_request_bytes), + content_md5_base64: "".to_string(), //sum_md5_base64(&restore_request_bytes), + content_body: ReaderImpl::Body(restore_request_buffer), + content_length: restore_request_bytes.len() as i64, + stream_sha256: false, + trailer: HeaderMap::new(), + pre_sign_url: Default::default(), + add_crc: Default::default(), + extra_pre_sign_header: Default::default(), + bucket_location: Default::default(), + expires: Default::default(), + }, + ) + .await?; + + let b = resp.body().bytes().expect("err").to_vec(); + if resp.status() != http::StatusCode::ACCEPTED && resp.status() != http::StatusCode::OK { + return Err(std::io::Error::other(http_resp_to_error_response(resp, b, bucket_name, ""))); + } + Ok(()) + } +} \ No newline at end of file diff --git a/crates/ecstore/src/client/api_s3_datatypes.rs b/crates/ecstore/src/client/api_s3_datatypes.rs index a026c7312..ba26325e2 100644 --- a/crates/ecstore/src/client/api_s3_datatypes.rs +++ b/crates/ecstore/src/client/api_s3_datatypes.rs @@ -1,4 +1,3 @@ -#![allow(clippy::map_entry)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/crates/ecstore/src/client/api_stat.rs b/crates/ecstore/src/client/api_stat.rs new file mode 100644 index 000000000..b9f209f31 --- /dev/null +++ b/crates/ecstore/src/client/api_stat.rs @@ -0,0 +1,153 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![allow(unused_imports)] +#![allow(unused_variables)] +#![allow(unused_mut)] +#![allow(unused_assignments)] +#![allow(unused_must_use)] +#![allow(clippy::all)] + +use bytes::Bytes; +use http::{HeaderMap, HeaderValue}; +use rustfs_utils::EMPTY_STRING_SHA256_HASH; +use uuid::Uuid; +use std::{collections::HashMap, str::FromStr}; +use tokio::io::BufReader; + +use s3s::header::{X_AMZ_DELETE_MARKER, X_AMZ_VERSION_ID}; +use crate::client::{ + api_error_response::{err_invalid_argument, http_resp_to_error_response, ErrorResponse}, + api_get_options::GetObjectOptions, + transition_api::{to_object_info, ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient}, +}; + +impl TransitionClient { + pub async fn bucket_exists(&self, bucket_name: &str) -> Result { + let resp = self + .execute_method( + http::Method::HEAD, + &mut RequestMetadata { + bucket_name: bucket_name.to_string(), + object_name: "".to_string(), + query_values: HashMap::new(), + custom_header: HeaderMap::new(), + content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(), + content_md5_base64: "".to_string(), + content_body: ReaderImpl::Body(Bytes::new()), + content_length: 0, + stream_sha256: false, + trailer: HeaderMap::new(), + pre_sign_url: Default::default(), + add_crc: Default::default(), + extra_pre_sign_header: Default::default(), + bucket_location: Default::default(), + expires: Default::default(), + }, + ) + .await; + + if let Ok(resp) = resp { + let b = resp.body().bytes().expect("err").to_vec(); + let resperr = http_resp_to_error_response(resp, b, bucket_name, ""); + /*if to_error_response(resperr).code == "NoSuchBucket" { + return Ok(false); + } + if resp.status_code() != http::StatusCode::OK { + return Ok(false); + }*/ + } + Ok(true) + } + + pub async fn stat_object(&self, bucket_name: &str, object_name: &str, opts: &GetObjectOptions) -> Result { + let mut headers = opts.header(); + if opts.internal.replication_delete_marker { + headers.insert("X-Source-DeleteMarker", HeaderValue::from_str("true").unwrap()); + } + if opts.internal.is_replication_ready_for_delete_marker { + headers.insert("X-Check-Replication-Ready", HeaderValue::from_str("true").unwrap()); + } + + let resp = self + .execute_method( + http::Method::HEAD, + &mut RequestMetadata { + bucket_name: bucket_name.to_string(), + object_name: object_name.to_string(), + query_values: opts.to_query_values(), + custom_header: headers, + content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(), + content_md5_base64: "".to_string(), + content_body: ReaderImpl::Body(Bytes::new()), + content_length: 0, + stream_sha256: false, + trailer: HeaderMap::new(), + pre_sign_url: Default::default(), + add_crc: Default::default(), + extra_pre_sign_header: Default::default(), + bucket_location: Default::default(), + expires: Default::default(), + }, + ) + .await; + + match resp { + Ok(resp) => { + let h = resp.headers(); + let delete_marker = if let Some(x_amz_delete_marker) = h.get(X_AMZ_DELETE_MARKER.as_str()) { + x_amz_delete_marker.to_str().unwrap() == "true" + } else { false }; + let replication_ready = if let Some(x_amz_delete_marker) = h.get("X-Replication-Ready") { + x_amz_delete_marker.to_str().unwrap() == "true" + } else { false }; + if resp.status() != http::StatusCode::OK && resp.status() != http::StatusCode::PARTIAL_CONTENT { + if resp.status() == http::StatusCode::METHOD_NOT_ALLOWED && opts.version_id != "" && delete_marker { + let err_resp = ErrorResponse { + status_code: resp.status(), + code: s3s::S3ErrorCode::MethodNotAllowed, + message: "the specified method is not allowed against this resource.".to_string(), + bucket_name: bucket_name.to_string(), + key: object_name.to_string(), + ..Default::default() + }; + return Ok(ObjectInfo { + version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) { + Ok(v) => v, + Err(e) => { return Err(std::io::Error::other(e)); } + }, + is_delete_marker: delete_marker, + ..Default::default() + }); + //err_resp + } + return Ok(ObjectInfo { + version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) { + Ok(v) => v, + Err(e) => { return Err(std::io::Error::other(e)); } + }, + is_delete_marker: delete_marker, + replication_ready: replication_ready, + ..Default::default() + }); + //http_resp_to_error_response(resp, bucket_name, object_name) + } + + Ok(to_object_info(bucket_name, object_name, h).unwrap()) + } + Err(err) => { + return Err(std::io::Error::other(err)); + } + } + } +} diff --git a/crates/ecstore/src/client/mod.rs b/crates/ecstore/src/client/mod.rs index 25892d2fd..7cc781a69 100644 --- a/crates/ecstore/src/client/mod.rs +++ b/crates/ecstore/src/client/mod.rs @@ -23,6 +23,11 @@ pub mod api_put_object_common; pub mod api_put_object_multipart; pub mod api_put_object_streaming; pub mod api_remove; +pub mod api_restore; +pub mod api_stat; +pub mod api_get_object_acl; +pub mod api_get_object_attributes; +pub mod api_get_object_file; pub mod api_s3_datatypes; pub mod bucket_cache; pub mod constants; diff --git a/crates/ecstore/src/tier/tier.rs b/crates/ecstore/src/tier/tier.rs index 3d034f699..fdfed4983 100644 --- a/crates/ecstore/src/tier/tier.rs +++ b/crates/ecstore/src/tier/tier.rs @@ -1,4 +1,3 @@ -#![allow(unused_imports)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#![allow(unused_imports)] #![allow(unused_variables)] #![allow(unused_mut)] #![allow(unused_assignments)] diff --git a/rustfs/src/admin/handlers/tier.rs b/rustfs/src/admin/handlers/tier.rs index ff1efa10b..35c8b4c16 100644 --- a/rustfs/src/admin/handlers/tier.rs +++ b/rustfs/src/admin/handlers/tier.rs @@ -1,4 +1,3 @@ -#![allow(unused_variables, unused_mut, unused_must_use)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#![allow(unused_variables, unused_mut, unused_must_use)] use http::{HeaderMap, StatusCode}; //use iam::get_global_action_cred; @@ -461,3 +461,182 @@ impl Operation for ClearTier { Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)) } } + +/*pub struct PostRestoreObject {} +#[async_trait::async_trait] +impl Operation for PostRestoreObject { + async fn call(&self, req: S3Request, params: Params<'_, '_>) -> S3Result> { + let query = { + if let Some(query) = req.uri.query() { + let input: PostRestoreObject = + from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?; + input + } else { + PostRestoreObject::default() + } + }; + + let bucket = params.bucket; + if let Err(e) = un_escape_path(params.object) { + warn!("post restore object failed, e: {:?}", e); + return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed")); + } + + let Some(store) = new_object_layer_fn() else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); + }; + + let get_object_info = store.get_object_info(); + + if Err(err) = check_request_auth_type(req, policy::RestoreObjectAction, bucket, object) { + return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed")); + } + + if req.content_length <= 0 { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + } + let Some(opts) = post_restore_opts(req, bucket, object) else { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + }; + + let Some(obj_info) = getObjectInfo(ctx, bucket, object, opts) else { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + }; + + if obj_info.transitioned_object.status != lifecycle::TRANSITION_COMPLETE { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + } + + let mut api_err; + let Some(rreq) = parsere_store_request(req.body(), req.content_length) else { + let api_err = errorCodes.ToAPIErr(ErrMalformedXML); + api_err.description = err.Error() + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + }; + let mut status_code = http::StatusCode::OK; + let mut already_restored = false; + if Err(err) = rreq.validate(store) { + api_err = errorCodes.ToAPIErr(ErrMalformedXML) + api_err.description = err.Error() + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + } else { + if obj_info.restore_ongoing && rreq.Type != "SELECT" { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrObjectRestoreAlreadyInProgress".into()), "post restore object failed")); + } + if !obj_info.restore_ongoing && !obj_info.restore_expires.unix_timestamp() == 0 { + status_code = http::StatusCode::Accepted; + already_restored = true; + } + } + let restore_expiry = lifecycle::expected_expiry_time(OffsetDateTime::now_utc(), rreq.days); + let mut metadata = clone_mss(obj_info.user_defined); + + if rreq.type != "SELECT" { + obj_info.metadataOnly = true; + metadata[xhttp.AmzRestoreExpiryDays] = rreq.days; + metadata[xhttp.AmzRestoreRequestDate] = OffsetDateTime::now_utc().format(http::TimeFormat); + if already_restored { + metadata[xhttp.AmzRestore] = completedRestoreObj(restore_expiry).String() + } else { + metadata[xhttp.AmzRestore] = ongoingRestoreObj().String() + } + obj_info.user_defined = metadata; + if let Err(err) = store.copy_object(bucket, object, bucket, object, obj_info, ObjectOptions { + version_id: obj_info.version_id, + }, ObjectOptions { + version_id: obj_info.version_id, + m_time: obj_info.mod_time, + }) { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); + } + if already_restored { + return Ok(()); + } + } + + let restore_object = must_get_uuid(); + if rreq.output_location.s3.bucket_name != "" { + w.Header()[xhttp.AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restoreObject)} + } + w.WriteHeader(status_code) + send_event(EventArgs { + event_name: event::ObjectRestorePost, + bucket_name: bucket, + object: obj_info, + req_params: extract_req_params(r), + user_agent: req.user_agent(), + host: handlers::get_source_ip(r), + }); + tokio::spawn(async move { + if !rreq.SelectParameters.IsEmpty() { + let actual_size = obj_info.get_actual_size(); + if actual_size.is_err() { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); + } + + let object_rsc = s3select.NewObjectReadSeekCloser( + |offset int64| -> (io.ReadCloser, error) { + rs := &HTTPRangeSpec{ + IsSuffixLength: false, + Start: offset, + End: -1, + } + return getTransitionedObjectReader(bucket, object, rs, r.Header, + obj_info, ObjectOptions {version_id: obj_info.version_id}); + }, + actual_size.unwrap(), + ); + if err = rreq.SelectParameters.Open(objectRSC); err != nil { + if serr, ok := err.(s3select.SelectError); ok { + let encoded_error_response = encodeResponse(APIErrorResponse { + code: serr.ErrorCode(), + message: serr.ErrorMessage(), + bucket_name: bucket, + key: object, + resource: r.URL.Path, + request_id: w.Header().Get(xhttp.AmzRequestID), + host_id: globalDeploymentID(), + }); + //writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML) + Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); + } else { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); + } + return Ok(()); + } + let nr = httptest.NewRecorder(); + let rw = xhttp.NewResponseRecorder(nr); + rw.log_err_body = true; + rw.log_all_body = true; + rreq.select_parameters.evaluate(rw); + rreq.select_parameters.Close(); + return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); + } + let opts = ObjectOptions { + transition: TransitionOptions { + restore_request: rreq, + restore_expiry: restore_expiry, + }, + version_id: objInfo.version_id, + } + if Err(err) = store.restore_transitioned_object(bucket, object, opts) { + format!(format!("unable to restore transitioned bucket/object {}/{}: {}", bucket, object, err.to_string())); + return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); + } + + send_event(EventArgs { + EventName: event.ObjectRestoreCompleted, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: handlers.GetSourceIP(r), + }); + }); + + let mut header = HeaderMap::new(); + header.insert(CONTENT_TYPE, "application/json".parse().unwrap()); + + Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)) + } +}*/ From 60ef1b77b5f1b2e00520d64df54ed20c44329cad Mon Sep 17 00:00:00 2001 From: likewu Date: Sun, 6 Jul 2025 13:51:25 +0800 Subject: [PATCH 05/26] fix --- crates/ecstore/src/bucket/lifecycle/lifecycle.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs index 2431d6ba8..8eff441d2 100644 --- a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -642,7 +642,7 @@ pub fn expected_expiry_time(mod_time: OffsetDateTime, days: i32) -> OffsetDateTi } let t = mod_time .to_offset(offset!(-0:00:00)) - .saturating_add(Duration::days(0 /*days as i64*/)); //debug + .saturating_add(Duration::days(days as i64)); //debug let mut hour = 3600; if let Ok(env_ilm_hour) = env::var("_RUSTFS_ILM_HOUR") { if let Ok(num_hour) = env_ilm_hour.parse::() { From 29287d837ddc63605f28aceb9ed4f69afce8e842 Mon Sep 17 00:00:00 2001 From: likewu Date: Mon, 7 Jul 2025 21:15:35 +0800 Subject: [PATCH 06/26] time retry object upload --- .../ecstore/src/client/api_error_response.rs | 2 +- crates/ecstore/src/client/api_list.rs | 2 +- .../src/client/api_put_object_multipart.rs | 9 +- .../src/client/api_put_object_streaming.rs | 7 +- crates/ecstore/src/client/api_remove.rs | 7 +- crates/ecstore/src/client/api_restore.rs | 57 ++++---- crates/ecstore/src/client/api_stat.rs | 47 ++++--- crates/ecstore/src/client/bucket_cache.rs | 5 +- crates/ecstore/src/client/transition_api.rs | 55 ++++---- crates/utils/src/retry.rs | 122 ++++++++++++++---- 10 files changed, 212 insertions(+), 101 deletions(-) diff --git a/crates/ecstore/src/client/api_error_response.rs b/crates/ecstore/src/client/api_error_response.rs index 31ca06456..7b070a980 100644 --- a/crates/ecstore/src/client/api_error_response.rs +++ b/crates/ecstore/src/client/api_error_response.rs @@ -95,7 +95,7 @@ pub fn to_error_response(err: &std::io::Error) -> ErrorResponse { } pub fn http_resp_to_error_response( - resp: http::Response, + resp: &http::Response, b: Vec, bucket_name: &str, object_name: &str, diff --git a/crates/ecstore/src/client/api_list.rs b/crates/ecstore/src/client/api_list.rs index f978f0634..955122b9e 100644 --- a/crates/ecstore/src/client/api_list.rs +++ b/crates/ecstore/src/client/api_list.rs @@ -98,7 +98,7 @@ impl TransitionClient { ) .await?; if resp.status() != StatusCode::OK { - return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, ""))); + return Err(std::io::Error::other(http_resp_to_error_response(&resp, vec![], bucket_name, ""))); } //let mut list_bucket_result = ListBucketV2Result::default(); diff --git a/crates/ecstore/src/client/api_put_object_multipart.rs b/crates/ecstore/src/client/api_put_object_multipart.rs index 67854c72a..b02979a4e 100644 --- a/crates/ecstore/src/client/api_put_object_multipart.rs +++ b/crates/ecstore/src/client/api_put_object_multipart.rs @@ -236,7 +236,12 @@ impl TransitionClient { let resp = self.execute_method(http::Method::POST, &mut req_metadata).await?; //if resp.is_none() { if resp.status() != StatusCode::OK { - return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, object_name))); + return Err(std::io::Error::other(http_resp_to_error_response( + &resp, + vec![], + bucket_name, + object_name, + ))); } //} let initiate_multipart_upload_result = InitiateMultipartUploadResult::default(); @@ -293,7 +298,7 @@ impl TransitionClient { let resp = self.execute_method(http::Method::PUT, &mut req_metadata).await?; if resp.status() != StatusCode::OK { return Err(std::io::Error::other(http_resp_to_error_response( - resp, + &resp, vec![], &p.bucket_name.clone(), &p.object_name, diff --git a/crates/ecstore/src/client/api_put_object_streaming.rs b/crates/ecstore/src/client/api_put_object_streaming.rs index 2e56dcac3..e745693e1 100644 --- a/crates/ecstore/src/client/api_put_object_streaming.rs +++ b/crates/ecstore/src/client/api_put_object_streaming.rs @@ -477,7 +477,12 @@ impl TransitionClient { let resp = self.execute_method(http::Method::PUT, &mut req_metadata).await?; if resp.status() != StatusCode::OK { - return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, object_name))); + return Err(std::io::Error::other(http_resp_to_error_response( + &resp, + vec![], + bucket_name, + object_name, + ))); } let (exp_time, rule_id) = if let Some(h_x_amz_expiration) = resp.headers().get(X_AMZ_EXPIRATION) { diff --git a/crates/ecstore/src/client/api_remove.rs b/crates/ecstore/src/client/api_remove.rs index a6845229a..9cb67d86b 100644 --- a/crates/ecstore/src/client/api_remove.rs +++ b/crates/ecstore/src/client/api_remove.rs @@ -425,7 +425,12 @@ impl TransitionClient { }; } _ => { - return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, object_name))); + return Err(std::io::Error::other(http_resp_to_error_response( + &resp, + vec![], + bucket_name, + object_name, + ))); } } return Err(std::io::Error::other(error_response)); diff --git a/crates/ecstore/src/client/api_restore.rs b/crates/ecstore/src/client/api_restore.rs index ad11fcf39..8af4bacd4 100644 --- a/crates/ecstore/src/client/api_restore.rs +++ b/crates/ecstore/src/client/api_restore.rs @@ -20,12 +20,15 @@ use bytes::Bytes; use http::HeaderMap; +use std::collections::HashMap; use std::io::Cursor; use tokio::io::BufReader; -use std::collections::HashMap; use crate::client::{ - api_error_response::{err_invalid_argument, http_resp_to_error_response}, api_get_object_acl::AccessControlList, api_get_options::GetObjectOptions, transition_api::{to_object_info, ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient} + api_error_response::{err_invalid_argument, http_resp_to_error_response}, + api_get_object_acl::AccessControlList, + api_get_options::GetObjectOptions, + transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info}, }; const TIER_STANDARD: &str = "Standard"; @@ -40,32 +43,32 @@ struct GlacierJobParameters { #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] struct Encryption { encryption_type: String, - kms_context: String, - kms_key_id: String, + kms_context: String, + kms_key_id: String, } #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] struct MetadataEntry { - name: String, + name: String, value: String, } #[derive(Debug, Default, serde::Serialize)] struct S3 { access_control_list: AccessControlList, - bucket_name: String, - prefix: String, - canned_acl: String, - encryption: Encryption, - storage_class: String, + bucket_name: String, + prefix: String, + canned_acl: String, + encryption: Encryption, + storage_class: String, //tagging: Tags, - user_metadata: MetadataEntry, + user_metadata: MetadataEntry, } #[derive(Debug, Default, serde::Serialize)] struct SelectParameters { - expression_type: String, - expression: String, + expression_type: String, + expression: String, //input_serialization: SelectObjectInputSerialization, //output_serialization: SelectObjectOutputSerialization, } @@ -75,13 +78,13 @@ struct OutputLocation(S3); #[derive(Debug, Default, serde::Serialize)] struct RestoreRequest { - restore_type: String, - tier: String, - days: i64, + restore_type: String, + tier: String, + days: i64, glacier_job_parameters: GlacierJobParameters, - description: String, - select_parameters: SelectParameters, - output_location: OutputLocation, + description: String, + select_parameters: SelectParameters, + output_location: OutputLocation, } impl RestoreRequest { @@ -115,7 +118,13 @@ impl RestoreRequest { } impl TransitionClient { - pub async fn restore_object(&self, bucket_name: &str, object_name: &str, version_id: &str, restore_req: &RestoreRequest) -> Result<(), std::io::Error> { + pub async fn restore_object( + &self, + bucket_name: &str, + object_name: &str, + version_id: &str, + restore_req: &RestoreRequest, + ) -> Result<(), std::io::Error> { let restore_request = match serde_xml_rs::to_string(restore_req) { Ok(buf) => buf, Err(e) => { @@ -139,8 +148,8 @@ impl TransitionClient { object_name: object_name.to_string(), query_values: url_values, custom_header: HeaderMap::new(), - content_sha256_hex: "".to_string(), //sum_sha256_hex(&restore_request_bytes), - content_md5_base64: "".to_string(), //sum_md5_base64(&restore_request_bytes), + content_sha256_hex: "".to_string(), //sum_sha256_hex(&restore_request_bytes), + content_md5_base64: "".to_string(), //sum_md5_base64(&restore_request_bytes), content_body: ReaderImpl::Body(restore_request_buffer), content_length: restore_request_bytes.len() as i64, stream_sha256: false, @@ -156,8 +165,8 @@ impl TransitionClient { let b = resp.body().bytes().expect("err").to_vec(); if resp.status() != http::StatusCode::ACCEPTED && resp.status() != http::StatusCode::OK { - return Err(std::io::Error::other(http_resp_to_error_response(resp, b, bucket_name, ""))); + return Err(std::io::Error::other(http_resp_to_error_response(&resp, b, bucket_name, ""))); } Ok(()) } -} \ No newline at end of file +} diff --git a/crates/ecstore/src/client/api_stat.rs b/crates/ecstore/src/client/api_stat.rs index b9f209f31..8a0645584 100644 --- a/crates/ecstore/src/client/api_stat.rs +++ b/crates/ecstore/src/client/api_stat.rs @@ -21,16 +21,16 @@ use bytes::Bytes; use http::{HeaderMap, HeaderValue}; use rustfs_utils::EMPTY_STRING_SHA256_HASH; -use uuid::Uuid; use std::{collections::HashMap, str::FromStr}; use tokio::io::BufReader; +use uuid::Uuid; -use s3s::header::{X_AMZ_DELETE_MARKER, X_AMZ_VERSION_ID}; use crate::client::{ - api_error_response::{err_invalid_argument, http_resp_to_error_response, ErrorResponse}, + api_error_response::{ErrorResponse, err_invalid_argument, http_resp_to_error_response}, api_get_options::GetObjectOptions, - transition_api::{to_object_info, ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient}, + transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info}, }; +use s3s::header::{X_AMZ_DELETE_MARKER, X_AMZ_VERSION_ID}; impl TransitionClient { pub async fn bucket_exists(&self, bucket_name: &str) -> Result { @@ -59,7 +59,7 @@ impl TransitionClient { if let Ok(resp) = resp { let b = resp.body().bytes().expect("err").to_vec(); - let resperr = http_resp_to_error_response(resp, b, bucket_name, ""); + let resperr = http_resp_to_error_response(&resp, b, bucket_name, ""); /*if to_error_response(resperr).code == "NoSuchBucket" { return Ok(false); } @@ -70,7 +70,12 @@ impl TransitionClient { Ok(true) } - pub async fn stat_object(&self, bucket_name: &str, object_name: &str, opts: &GetObjectOptions) -> Result { + pub async fn stat_object( + &self, + bucket_name: &str, + object_name: &str, + opts: &GetObjectOptions, + ) -> Result { let mut headers = opts.header(); if opts.internal.replication_delete_marker { headers.insert("X-Source-DeleteMarker", HeaderValue::from_str("true").unwrap()); @@ -107,24 +112,30 @@ impl TransitionClient { let h = resp.headers(); let delete_marker = if let Some(x_amz_delete_marker) = h.get(X_AMZ_DELETE_MARKER.as_str()) { x_amz_delete_marker.to_str().unwrap() == "true" - } else { false }; + } else { + false + }; let replication_ready = if let Some(x_amz_delete_marker) = h.get("X-Replication-Ready") { x_amz_delete_marker.to_str().unwrap() == "true" - } else { false }; + } else { + false + }; if resp.status() != http::StatusCode::OK && resp.status() != http::StatusCode::PARTIAL_CONTENT { if resp.status() == http::StatusCode::METHOD_NOT_ALLOWED && opts.version_id != "" && delete_marker { let err_resp = ErrorResponse { status_code: resp.status(), - code: s3s::S3ErrorCode::MethodNotAllowed, - message: "the specified method is not allowed against this resource.".to_string(), + code: s3s::S3ErrorCode::MethodNotAllowed, + message: "the specified method is not allowed against this resource.".to_string(), bucket_name: bucket_name.to_string(), - key: object_name.to_string(), + key: object_name.to_string(), ..Default::default() }; return Ok(ObjectInfo { - version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) { + version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) { Ok(v) => v, - Err(e) => { return Err(std::io::Error::other(e)); } + Err(e) => { + return Err(std::io::Error::other(e)); + } }, is_delete_marker: delete_marker, ..Default::default() @@ -132,12 +143,14 @@ impl TransitionClient { //err_resp } return Ok(ObjectInfo { - version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) { + version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) { Ok(v) => v, - Err(e) => { return Err(std::io::Error::other(e)); } + Err(e) => { + return Err(std::io::Error::other(e)); + } }, - is_delete_marker: delete_marker, - replication_ready: replication_ready, + is_delete_marker: delete_marker, + replication_ready: replication_ready, ..Default::default() }); //http_resp_to_error_response(resp, bucket_name, object_name) diff --git a/crates/ecstore/src/client/bucket_cache.rs b/crates/ecstore/src/client/bucket_cache.rs index 4e6f50746..2df73f698 100644 --- a/crates/ecstore/src/client/bucket_cache.rs +++ b/crates/ecstore/src/client/bucket_cache.rs @@ -167,8 +167,7 @@ impl TransitionClient { content_sha256 = UNSIGNED_PAYLOAD.to_string(); } - req - .headers_mut() + req.headers_mut() .insert("X-Amz-Content-Sha256", content_sha256.parse().unwrap()); let req = rustfs_signer::sign_v4(req, 0, &access_key_id, &secret_access_key, &session_token, "us-east-1"); Ok(req) @@ -178,7 +177,7 @@ impl TransitionClient { async fn process_bucket_location_response(mut resp: http::Response, bucket_name: &str) -> Result { //if resp != nil { if resp.status() != StatusCode::OK { - let err_resp = http_resp_to_error_response(resp, vec![], bucket_name, ""); + let err_resp = http_resp_to_error_response(&resp, vec![], bucket_name, ""); match err_resp.code { S3ErrorCode::NotImplemented => { match err_resp.server.as_str() { diff --git a/crates/ecstore/src/client/transition_api.rs b/crates/ecstore/src/client/transition_api.rs index 512ec9954..fba6eda2f 100644 --- a/crates/ecstore/src/client/transition_api.rs +++ b/crates/ecstore/src/client/transition_api.rs @@ -19,7 +19,7 @@ #![allow(clippy::all)] use bytes::Bytes; -use futures::Future; +use futures::{Future, StreamExt}; use http::{HeaderMap, HeaderName}; use http::{ HeaderValue, Response, StatusCode, @@ -65,7 +65,9 @@ use crate::{checksum::ChecksumMode, store_api::GetObjectReader}; use rustfs_rio::HashReader; use rustfs_utils::{ net::get_endpoint_url, - retry::{MAX_RETRY, new_retry_timer}, + retry::{ + DEFAULT_RETRY_CAP, DEFAULT_RETRY_UNIT, MAX_JITTER, MAX_RETRY, RetryTimer, is_http_status_retryable, is_s3code_retryable, + }, }; use s3s::S3ErrorCode; use s3s::dto::ReplicationStatus; @@ -186,6 +188,7 @@ impl TransitionClient { clnt.trailing_header_support = opts.trailing_headers && clnt.override_signer_type == SignatureType::SignatureV4; + clnt.max_retries = MAX_RETRY; if opts.max_retries > 0 { clnt.max_retries = opts.max_retries; } @@ -313,12 +316,9 @@ impl TransitionClient { } //} - //let mut retry_timer = RetryTimer::new(); - //while let Some(v) = retry_timer.next().await { - for _ in [1; 1] - /*new_retry_timer(req_retry, default_retry_unit, default_retry_cap, max_jitter)*/ - { - let req = self.new_request(method, metadata).await?; + let mut retry_timer = RetryTimer::new(req_retry, DEFAULT_RETRY_UNIT, DEFAULT_RETRY_CAP, MAX_JITTER, self.random); + while let Some(v) = retry_timer.next().await { + let req = self.new_request(&method, metadata).await?; resp = self.doit(req).await?; @@ -329,7 +329,7 @@ impl TransitionClient { } let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); - let err_response = http_resp_to_error_response(resp, b.clone(), &metadata.bucket_name, &metadata.object_name); + let err_response = http_resp_to_error_response(&resp, b.clone(), &metadata.bucket_name, &metadata.object_name); if self.region == "" { match err_response.code { @@ -360,6 +360,14 @@ impl TransitionClient { } } + if is_s3code_retryable(err_response.code.as_str()) { + continue; + } + + if is_http_status_retryable(&resp.status()) { + continue; + } + break; } @@ -368,7 +376,7 @@ impl TransitionClient { async fn new_request( &self, - method: http::Method, + method: &http::Method, metadata: &mut RequestMetadata, ) -> Result, std::io::Error> { let location = metadata.bucket_location.clone(); @@ -387,7 +395,11 @@ impl TransitionClient { &metadata.query_values, )?; - let Ok(mut req) = Request::builder().method(method).uri(target_url.to_string()).body(Body::empty()) else { + let Ok(mut req) = Request::builder() + .method(method) + .uri(target_url.to_string()) + .body(Body::empty()) + else { return Err(std::io::Error::other("create request error")); }; @@ -428,13 +440,7 @@ impl TransitionClient { } } if signer_type == SignatureType::SignatureV2 { - req = rustfs_signer::pre_sign_v2( - req, - &access_key_id, - &secret_access_key, - metadata.expires, - is_virtual_host, - ); + req = rustfs_signer::pre_sign_v2(req, &access_key_id, &secret_access_key, metadata.expires, is_virtual_host); } else if signer_type == SignatureType::SignatureV4 { req = rustfs_signer::pre_sign_v4( req, @@ -458,9 +464,7 @@ impl TransitionClient { //req.content_length = metadata.content_length; if metadata.content_length <= -1 { let chunked_value = HeaderValue::from_str(&vec!["chunked"].join(",")).expect("err"); - req - .headers_mut() - .insert(http::header::TRANSFER_ENCODING, chunked_value); + req.headers_mut().insert(http::header::TRANSFER_ENCODING, chunked_value); } if metadata.content_md5_base64.len() > 0 { @@ -473,8 +477,7 @@ impl TransitionClient { } if signer_type == SignatureType::SignatureV2 { - req = - rustfs_signer::sign_v2(req, metadata.content_length, &access_key_id, &secret_access_key, is_virtual_host); + req = rustfs_signer::sign_v2(req, metadata.content_length, &access_key_id, &secret_access_key, is_virtual_host); } else if metadata.stream_sha256 && !self.secure { if metadata.trailer.len() > 0 { for (_, v) in &metadata.trailer { @@ -491,8 +494,8 @@ impl TransitionClient { } else if metadata.trailer.len() > 0 { sha_header = UNSIGNED_PAYLOAD_TRAILER.to_string(); } - req - .headers_mut().insert("X-Amz-Content-Sha256".parse::().unwrap(), sha_header.parse().expect("err")); + req.headers_mut() + .insert("X-Amz-Content-Sha256".parse::().unwrap(), sha_header.parse().expect("err")); req = rustfs_signer::sign_v4_trailer( req, @@ -516,7 +519,7 @@ impl TransitionClient { } Ok(req) - } + } pub fn set_user_agent(&self, req: &mut Request) { let headers = req.headers_mut(); diff --git a/crates/utils/src/retry.rs b/crates/utils/src/retry.rs index ce07253af..2ba5e6ab9 100644 --- a/crates/utils/src/retry.rs +++ b/crates/utils/src/retry.rs @@ -12,13 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::time::Duration; +use futures::Stream; +use hyper::http; +use lazy_static::lazy_static; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::{Duration, Instant}, +}; +use tokio::time::interval; pub const MAX_RETRY: i64 = 10; pub const MAX_JITTER: f64 = 1.0; pub const NO_JITTER: f64 = 0.0; -/* +pub const DEFAULT_RETRY_UNIT: Duration = Duration::from_millis(200); +pub const DEFAULT_RETRY_CAP: Duration = Duration::from_secs(1); + struct Delay { when: Instant, } @@ -26,30 +36,35 @@ struct Delay { impl Future for Delay { type Output = &'static str; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) - -> Poll<&'static str> - { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<&'static str> { if Instant::now() >= self.when { println!("Hello world"); Poll::Ready("done") } else { - // Ignore this line for now. cx.waker().wake_by_ref(); Poll::Pending } } } -struct RetryTimer { - rem: usize, - delay: Delay, +pub struct RetryTimer { + base_sleep: Duration, + max_sleep: Duration, + jitter: f64, + random: u64, + rem: i64, + delay: Duration, } impl RetryTimer { - fn new() -> Self { + pub fn new(max_retry: i64, base_sleep: Duration, max_sleep: Duration, jitter: f64, random: u64) -> Self { Self { - rem: 3, - delay: Delay { when: Instant::now() } + base_sleep, + max_sleep, + jitter, + random, + rem: max_retry, + delay: Duration::from_millis(0), } } } @@ -57,26 +72,83 @@ impl RetryTimer { impl Stream for RetryTimer { type Item = (); - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) - -> Poll> - { + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let jitter = self.jitter.clamp(NO_JITTER, MAX_JITTER); + + let attempt = MAX_RETRY - self.rem; + let mut sleep = self.base_sleep * (1 << attempt); + if sleep > self.max_sleep { + sleep = self.max_sleep; + } + if (jitter - NO_JITTER).abs() > 1e-9 { + sleep -= sleep * self.random as u32 * jitter as u32; + } + if self.rem == 0 { - // No more delays return Poll::Ready(None); } - match Pin::new(&mut self.delay).poll(cx) { - Poll::Ready(_) => { - let when = self.delay.when + Duration::from_millis(10); - self.delay = Delay { when }; - self.rem -= 1; - Poll::Ready(Some(())) - } + let when = self.delay + sleep; + self.delay = when; + self.rem -= 1; + let mut t = interval(when); + match t.poll_tick(cx) { + Poll::Ready(_) => Poll::Ready(Some(())), Poll::Pending => Poll::Pending, } } -}*/ +} + +lazy_static! { + static ref RETRYABLE_S3CODES: Vec = vec![ + "RequestError".to_string(), + "RequestTimeout".to_string(), + "Throttling".to_string(), + "ThrottlingException".to_string(), + "RequestLimitExceeded".to_string(), + "RequestThrottled".to_string(), + "InternalError".to_string(), + "ExpiredToken".to_string(), + "ExpiredTokenException".to_string(), + "SlowDown".to_string(), + ]; + + static ref RETRYABLE_HTTP_STATUSCODES: Vec = vec![ + http::StatusCode::REQUEST_TIMEOUT, + http::StatusCode::TOO_MANY_REQUESTS, + //499, + http::StatusCode::INTERNAL_SERVER_ERROR, + http::StatusCode::BAD_GATEWAY, + http::StatusCode::SERVICE_UNAVAILABLE, + http::StatusCode::GATEWAY_TIMEOUT, + //520, + ]; +} -pub fn new_retry_timer(_max_retry: i32, _base_sleep: Duration, _max_sleep: Duration, _jitter: f64) -> Vec { +pub fn is_s3code_retryable(s3code: &str) -> bool { + RETRYABLE_S3CODES.contains(&s3code.to_string()) +} + +pub fn is_http_status_retryable(http_statuscode: &http::StatusCode) -> bool { + RETRYABLE_HTTP_STATUSCODES.contains(http_statuscode) +} + +pub fn is_request_error_retryable(err: std::io::Error) -> bool { + /*if err == Err::Canceled) || err == Err::DeadlineExceeded) { + return ctx.Err() == nil; + } + let ue = err.(*url.Error); + if ue.is_ok() { + let e = ue.Unwrap(); + switch e.(type) { + case x509.UnknownAuthorityError: + return false; + } + switch e.Error() { + case "http: server gave HTTP response to HTTPS client": + return false; + } + } + true*/ todo!(); } From 96a33156854fb518432cd03566fea648d7cbb72a Mon Sep 17 00:00:00 2001 From: likewu Date: Mon, 7 Jul 2025 21:24:31 +0800 Subject: [PATCH 07/26] lock file --- Cargo.lock | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1f02a8e7e..6f59b89f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8312,8 +8312,12 @@ dependencies = [ "http 1.3.1", "hyper 1.6.0", "lazy_static", + "rand 0.9.1", "rustfs-utils", + "s3s", + "serde", "serde_urlencoded", + "tempfile", "time", "tracing", ] From 986ecd5e220c39da3fe4df7bb468e4c3bc2b39dc Mon Sep 17 00:00:00 2001 From: likewu Date: Tue, 8 Jul 2025 14:53:43 +0800 Subject: [PATCH 08/26] make fmt --- .../ecstore/src/client/api_get_object_acl.rs | 24 ++-- .../src/client/api_get_object_attributes.rs | 103 ++++++++++-------- .../ecstore/src/client/api_get_object_file.rs | 20 ++-- crates/ecstore/src/client/api_put_object.rs | 2 +- crates/ecstore/src/client/mod.rs | 8 +- crates/ecstore/src/set_disk.rs | 7 -- .../signer/src/request_signature_streaming.rs | 2 +- crates/signer/src/request_signature_v2.rs | 3 +- crates/signer/src/request_signature_v4.rs | 12 +- crates/utils/src/retry.rs | 24 +--- 10 files changed, 102 insertions(+), 103 deletions(-) diff --git a/crates/ecstore/src/client/api_get_object_acl.rs b/crates/ecstore/src/client/api_get_object_acl.rs index 1a9167a38..1eb769692 100644 --- a/crates/ecstore/src/client/api_get_object_acl.rs +++ b/crates/ecstore/src/client/api_get_object_acl.rs @@ -21,40 +21,40 @@ use bytes::Bytes; use http::{HeaderMap, HeaderValue}; use s3s::dto::Owner; -use std::io::Cursor; use std::collections::HashMap; +use std::io::Cursor; use tokio::io::BufReader; -use rustfs_utils::EMPTY_STRING_SHA256_HASH; use crate::client::{ api_error_response::{err_invalid_argument, http_resp_to_error_response}, api_get_options::GetObjectOptions, - transition_api::{to_object_info, ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient}, + transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info}, }; +use rustfs_utils::EMPTY_STRING_SHA256_HASH; #[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)] struct Grantee { - id: String, + id: String, display_name: String, - uri: String, + uri: String, } #[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)] struct Grant { - grantee: Grantee, + grantee: Grantee, permission: String, } #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] pub struct AccessControlList { - pub grant: Vec, + pub grant: Vec, pub permission: String, } #[derive(Debug, Default, serde::Deserialize)] pub struct AccessControlPolicy { #[serde(skip)] - owner: Owner, + owner: Owner, pub access_control_list: AccessControlList, } @@ -98,7 +98,9 @@ impl TransitionClient { } }; - let mut obj_info = self.stat_object(bucket_name, object_name, &GetObjectOptions::default()).await?; + let mut obj_info = self + .stat_object(bucket_name, object_name, &GetObjectOptions::default()) + .await?; obj_info.owner.display_name = res.owner.display_name.clone(); obj_info.owner.id = res.owner.id.clone(); @@ -107,7 +109,9 @@ impl TransitionClient { let canned_acl = get_canned_acl(&res); if canned_acl != "" { - obj_info.metadata.insert("X-Amz-Acl", HeaderValue::from_str(&canned_acl).unwrap()); + obj_info + .metadata + .insert("X-Amz-Acl", HeaderValue::from_str(&canned_acl).unwrap()); return Ok(obj_info); } diff --git a/crates/ecstore/src/client/api_get_object_attributes.rs b/crates/ecstore/src/client/api_get_object_attributes.rs index 9204e7dd5..705d5f628 100644 --- a/crates/ecstore/src/client/api_get_object_attributes.rs +++ b/crates/ecstore/src/client/api_get_object_attributes.rs @@ -20,33 +20,35 @@ use bytes::Bytes; use http::{HeaderMap, HeaderValue}; -use time::OffsetDateTime; -use std::io::Cursor; use std::collections::HashMap; +use std::io::Cursor; +use time::OffsetDateTime; use tokio::io::BufReader; -use s3s::{Body, dto::Owner}; -use s3s::header::{X_AMZ_OBJECT_ATTRIBUTES, X_AMZ_DELETE_MARKER, X_AMZ_METADATA_DIRECTIVE, X_AMZ_VERSION_ID, - X_AMZ_REQUEST_CHARGED, X_AMZ_RESTORE, X_AMZ_PART_NUMBER_MARKER, X_AMZ_MAX_PARTS,}; -use rustfs_utils::EMPTY_STRING_SHA256_HASH; use crate::client::constants::{GET_OBJECT_ATTRIBUTES_MAX_PARTS, GET_OBJECT_ATTRIBUTES_TAGS, ISO8601_DATEFORMAT}; +use rustfs_utils::EMPTY_STRING_SHA256_HASH; +use s3s::header::{ + X_AMZ_DELETE_MARKER, X_AMZ_MAX_PARTS, X_AMZ_METADATA_DIRECTIVE, X_AMZ_OBJECT_ATTRIBUTES, X_AMZ_PART_NUMBER_MARKER, + X_AMZ_REQUEST_CHARGED, X_AMZ_RESTORE, X_AMZ_VERSION_ID, +}; +use s3s::{Body, dto::Owner}; use crate::client::{ api_error_response::err_invalid_argument, + api_get_object_acl::AccessControlPolicy, api_get_options::GetObjectOptions, transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info}, - api_get_object_acl::AccessControlPolicy, }; struct ObjectAttributesOptions { - max_parts: i64, - version_id: String, - part_number_marker: i64, + max_parts: i64, + version_id: String, + part_number_marker: i64, //server_side_encryption: encrypt::ServerSide, } struct ObjectAttributes { - version_id: String, + version_id: String, last_modified: OffsetDateTime, object_attributes_response: ObjectAttributesResponse, } @@ -54,7 +56,7 @@ struct ObjectAttributes { impl ObjectAttributes { fn new() -> Self { Self { - version_id: "".to_string(), + version_id: "".to_string(), last_modified: OffsetDateTime::now_utc(), object_attributes_response: ObjectAttributesResponse::new(), } @@ -63,18 +65,18 @@ impl ObjectAttributes { #[derive(Debug, Default, serde::Deserialize)] pub struct Checksum { - checksum_crc32: String, + checksum_crc32: String, checksum_crc32c: String, - checksum_sha1: String, + checksum_sha1: String, checksum_sha256: String, } impl Checksum { fn new() -> Self { Self { - checksum_crc32: "".to_string(), + checksum_crc32: "".to_string(), checksum_crc32c: "".to_string(), - checksum_sha1: "".to_string(), + checksum_sha1: "".to_string(), checksum_sha256: "".to_string(), } } @@ -82,62 +84,62 @@ impl Checksum { #[derive(Debug, Default, serde::Deserialize)] struct ObjectParts { - parts_count: i64, - part_number_marker: i64, + parts_count: i64, + part_number_marker: i64, next_part_number_marker: i64, - max_parts: i64, - is_truncated: bool, - parts: Vec, + max_parts: i64, + is_truncated: bool, + parts: Vec, } impl ObjectParts { fn new() -> Self { Self { - parts_count: 0, - part_number_marker: 0, + parts_count: 0, + part_number_marker: 0, next_part_number_marker: 0, - max_parts: 0, - is_truncated: false, - parts: Vec::new(), + max_parts: 0, + is_truncated: false, + parts: Vec::new(), } } } #[derive(Debug, Default, serde::Deserialize)] struct ObjectAttributesResponse { - etag: String, + etag: String, storage_class: String, - object_size: i64, - checksum: Checksum, - object_parts: ObjectParts, + object_size: i64, + checksum: Checksum, + object_parts: ObjectParts, } impl ObjectAttributesResponse { fn new() -> Self { Self { - etag: "".to_string(), + etag: "".to_string(), storage_class: "".to_string(), - object_size: 0, - checksum: Checksum::new(), - object_parts: ObjectParts::new(), + object_size: 0, + checksum: Checksum::new(), + object_parts: ObjectParts::new(), } } } #[derive(Debug, Default, serde::Deserialize)] struct ObjectAttributePart { - checksum_crc32: String, + checksum_crc32: String, checksum_crc32c: String, - checksum_sha1: String, + checksum_sha1: String, checksum_sha256: String, - part_number: i64, - size: i64, + part_number: i64, + size: i64, } impl ObjectAttributes { pub async fn parse_response(&mut self, resp: &mut http::Response) -> Result<(), std::io::Error> { let h = resp.headers(); - let mod_time = OffsetDateTime::parse(h.get("Last-Modified").unwrap().to_str().unwrap(), ISO8601_DATEFORMAT).unwrap(); //RFC7231Time + let mod_time = OffsetDateTime::parse(h.get("Last-Modified").unwrap().to_str().unwrap(), ISO8601_DATEFORMAT).unwrap(); //RFC7231Time self.last_modified = mod_time; self.version_id = h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap().to_string(); @@ -155,7 +157,12 @@ impl ObjectAttributes { } impl TransitionClient { - pub async fn get_object_attributes(&self, bucket_name: &str, object_name: &str, opts: ObjectAttributesOptions) -> Result { + pub async fn get_object_attributes( + &self, + bucket_name: &str, + object_name: &str, + opts: ObjectAttributesOptions, + ) -> Result { let mut url_values = HashMap::new(); url_values.insert("attributes".to_string(), "".to_string()); if opts.version_id != "" { @@ -166,13 +173,19 @@ impl TransitionClient { headers.insert(X_AMZ_OBJECT_ATTRIBUTES, HeaderValue::from_str(GET_OBJECT_ATTRIBUTES_TAGS).unwrap()); if opts.part_number_marker > 0 { - headers.insert(X_AMZ_PART_NUMBER_MARKER, HeaderValue::from_str(&opts.part_number_marker.to_string()).unwrap()); + headers.insert( + X_AMZ_PART_NUMBER_MARKER, + HeaderValue::from_str(&opts.part_number_marker.to_string()).unwrap(), + ); } if opts.max_parts > 0 { headers.insert(X_AMZ_MAX_PARTS, HeaderValue::from_str(&opts.max_parts.to_string()).unwrap()); } else { - headers.insert(X_AMZ_MAX_PARTS, HeaderValue::from_str(&GET_OBJECT_ATTRIBUTES_MAX_PARTS.to_string()).unwrap()); + headers.insert( + X_AMZ_MAX_PARTS, + HeaderValue::from_str(&GET_OBJECT_ATTRIBUTES_MAX_PARTS.to_string()).unwrap(), + ); } /*if opts.server_side_encryption.is_some() { @@ -205,7 +218,9 @@ impl TransitionClient { let h = resp.headers(); let has_etag = h.get("ETag").unwrap().to_str().unwrap(); if !has_etag.is_empty() { - return Err(std::io::Error::other("get_object_attributes is not supported by the current endpoint version")); + return Err(std::io::Error::other( + "get_object_attributes is not supported by the current endpoint version", + )); } if resp.status() != http::StatusCode::OK { @@ -226,4 +241,4 @@ impl TransitionClient { Ok(oa) } -} \ No newline at end of file +} diff --git a/crates/ecstore/src/client/api_get_object_file.rs b/crates/ecstore/src/client/api_get_object_file.rs index 6bda20699..4c268497a 100644 --- a/crates/ecstore/src/client/api_get_object_file.rs +++ b/crates/ecstore/src/client/api_get_object_file.rs @@ -21,15 +21,15 @@ use bytes::Bytes; use http::HeaderMap; use std::io::Cursor; -use tokio::io::BufReader; #[cfg(not(windows))] -use std::os::unix::fs::PermissionsExt; +use std::os::unix::fs::MetadataExt; #[cfg(not(windows))] use std::os::unix::fs::OpenOptionsExt; #[cfg(not(windows))] -use std::os::unix::fs::MetadataExt; +use std::os::unix::fs::PermissionsExt; #[cfg(windows)] use std::os::windows::fs::MetadataExt; +use tokio::io::BufReader; use crate::client::{ api_error_response::err_invalid_argument, @@ -38,14 +38,20 @@ use crate::client::{ }; impl TransitionClient { - pub async fn fget_object(&self, bucket_name: &str, object_name: &str, file_path: &str, opts: GetObjectOptions) -> Result<(), std::io::Error> { + pub async fn fget_object( + &self, + bucket_name: &str, + object_name: &str, + file_path: &str, + opts: GetObjectOptions, + ) -> Result<(), std::io::Error> { match std::fs::metadata(file_path) { Ok(file_path_stat) => { let ft = file_path_stat.file_type(); if ft.is_dir() { return Err(std::io::Error::other(err_invalid_argument("filename is a directory."))); } - }, + } Err(err) => { return Err(std::io::Error::other(err)); } @@ -77,7 +83,7 @@ impl TransitionClient { }; let mut file_part_path = file_path.to_string(); - file_part_path.push_str(""/*sum_sha256_hex(object_stat.etag.as_bytes())*/); + file_part_path.push_str("" /*sum_sha256_hex(object_stat.etag.as_bytes())*/); file_part_path.push_str(".part.rustfs"); #[cfg(not(windows))] @@ -138,4 +144,4 @@ impl TransitionClient { Ok(()) } -} \ No newline at end of file +} diff --git a/crates/ecstore/src/client/api_put_object.rs b/crates/ecstore/src/client/api_put_object.rs index b0d81c6e4..544f16dc9 100644 --- a/crates/ecstore/src/client/api_put_object.rs +++ b/crates/ecstore/src/client/api_put_object.rs @@ -85,7 +85,7 @@ pub struct PutObjectOptions { pub expires: OffsetDateTime, pub mode: ObjectLockRetentionMode, pub retain_until_date: OffsetDateTime, - //pub server_side_encryption: encrypt.ServerSide, + //pub server_side_encryption: encrypt::ServerSide, pub num_threads: u64, pub storage_class: String, pub website_redirect_location: String, diff --git a/crates/ecstore/src/client/mod.rs b/crates/ecstore/src/client/mod.rs index 7cc781a69..9a87b475e 100644 --- a/crates/ecstore/src/client/mod.rs +++ b/crates/ecstore/src/client/mod.rs @@ -16,6 +16,9 @@ pub mod admin_handler_utils; pub mod api_bucket_policy; pub mod api_error_response; pub mod api_get_object; +pub mod api_get_object_acl; +pub mod api_get_object_attributes; +pub mod api_get_object_file; pub mod api_get_options; pub mod api_list; pub mod api_put_object; @@ -24,11 +27,8 @@ pub mod api_put_object_multipart; pub mod api_put_object_streaming; pub mod api_remove; pub mod api_restore; -pub mod api_stat; -pub mod api_get_object_acl; -pub mod api_get_object_attributes; -pub mod api_get_object_file; pub mod api_s3_datatypes; +pub mod api_stat; pub mod bucket_cache; pub mod constants; pub mod credentials; diff --git a/crates/ecstore/src/set_disk.rs b/crates/ecstore/src/set_disk.rs index a0b619074..6b0c19039 100644 --- a/crates/ecstore/src/set_disk.rs +++ b/crates/ecstore/src/set_disk.rs @@ -4667,11 +4667,9 @@ impl StorageAPI for SetDisks { return to_object_err(err, vec![bucket, object]); } }*/ - //let traceFn = GLOBAL_LifecycleSys.trace(fi.to_object_info(bucket, object, opts.Versioned || opts.VersionSuspended)); let dest_obj = gen_transition_objname(bucket); if let Err(err) = dest_obj { - //traceFn(ILMTransition, nil, err) return Err(to_object_err(err, vec![])); } let dest_obj = dest_obj.unwrap(); @@ -4679,8 +4677,6 @@ impl StorageAPI for SetDisks { let oi = ObjectInfo::from_file_info(&fi, bucket, object, opts.versioned || opts.version_suspended); let (pr, mut pw) = tokio::io::duplex(fi.erasure.block_size); - //let h = HeaderMap::new(); - //let reader = ReaderImpl::ObjectBody(GetObjectReader {stream: StreamingBlob::wrap(tokio_util::io::ReaderStream::new(pr)), object_info: oi}); let reader = ReaderImpl::ObjectBody(GetObjectReader { stream: Box::new(pr), object_info: oi, @@ -4717,9 +4713,7 @@ impl StorageAPI for SetDisks { m }) .await; - //pr.CloseWithError(err); if let Err(err) = rv { - //traceFn(ILMTransition, nil, err) return Err(StorageError::Io(err)); } let rv = rv.unwrap(); @@ -4783,7 +4777,6 @@ impl StorageAPI for SetDisks { //if err != nil { // return set_restore_header_fn(&mut oi, Some(toObjectErr(err, bucket, object))); //} - //defer gr.Close() let hash_reader = HashReader::new(gr, gr.obj_info.size, "", "", gr.obj_info.size); let p_reader = PutObjReader::new(StreamingBlob::from(Box::pin(hash_reader)), hash_reader.size()); if let Err(err) = self.put_object(bucket, object, &mut p_reader, &ropts).await { diff --git a/crates/signer/src/request_signature_streaming.rs b/crates/signer/src/request_signature_streaming.rs index 7f6c2ff0d..f4c36b17d 100644 --- a/crates/signer/src/request_signature_streaming.rs +++ b/crates/signer/src/request_signature_streaming.rs @@ -17,9 +17,9 @@ use lazy_static::lazy_static; use std::collections::HashMap; use time::{OffsetDateTime, macros::format_description}; -use s3s::Body; use super::request_signature_v4::{SERVICE_TYPE_S3, get_scope, get_signature, get_signing_key}; use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH; +use s3s::Body; const STREAMING_SIGN_ALGORITHM: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"; const STREAMING_SIGN_TRAILER_ALGORITHM: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"; diff --git a/crates/signer/src/request_signature_v2.rs b/crates/signer/src/request_signature_v2.rs index ded0c92c6..176667052 100644 --- a/crates/signer/src/request_signature_v2.rs +++ b/crates/signer/src/request_signature_v2.rs @@ -19,15 +19,14 @@ use std::collections::HashMap; use std::fmt::Write; use time::{OffsetDateTime, format_description}; -use s3s::Body; use super::utils::get_host_addr; use rustfs_utils::crypto::{base64_encode, hex, hmac_sha1}; +use s3s::Body; const _SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256"; const SIGN_V2_ALGORITHM: &str = "AWS"; fn encode_url2path(req: &request::Request, _virtual_host: bool) -> String { - req.uri().path().to_string() } diff --git a/crates/signer/src/request_signature_v4.rs b/crates/signer/src/request_signature_v4.rs index 95f14a294..5cc78204a 100644 --- a/crates/signer/src/request_signature_v4.rs +++ b/crates/signer/src/request_signature_v4.rs @@ -22,11 +22,11 @@ use std::fmt::Write; use time::{OffsetDateTime, macros::format_description}; use tracing::debug; -use rustfs_utils::crypto::{hex, hex_sha256, hmac_sha256}; -use s3s::Body; use super::constants::UNSIGNED_PAYLOAD; use super::request_signature_streaming_unsigned_trailer::streaming_unsigned_v4; use super::utils::{get_host_addr, sign_v4_trim_all}; +use rustfs_utils::crypto::{hex, hex_sha256, hmac_sha256}; +use s3s::Body; pub const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256"; pub const SERVICE_TYPE_S3: &str = "s3"; @@ -270,7 +270,6 @@ pub fn pre_sign_v4( .unwrap(), ); - *req.uri_mut() = Uri::from_parts(parts).unwrap(); req @@ -282,7 +281,12 @@ fn _post_pre_sign_signature_v4(policy_base64: &str, t: OffsetDateTime, secret_ac get_signature(signing_key, policy_base64) } -fn _sign_v4_sts(req: request::Request, access_key_id: &str, secret_access_key: &str, location: &str) -> request::Request { +fn _sign_v4_sts( + req: request::Request, + access_key_id: &str, + secret_access_key: &str, + location: &str, +) -> request::Request { sign_v4_inner(req, 0, access_key_id, secret_access_key, "", location, SERVICE_TYPE_STS, HeaderMap::new()) } diff --git a/crates/utils/src/retry.rs b/crates/utils/src/retry.rs index 2ba5e6ab9..4e9a06425 100644 --- a/crates/utils/src/retry.rs +++ b/crates/utils/src/retry.rs @@ -29,31 +29,12 @@ pub const NO_JITTER: f64 = 0.0; pub const DEFAULT_RETRY_UNIT: Duration = Duration::from_millis(200); pub const DEFAULT_RETRY_CAP: Duration = Duration::from_secs(1); -struct Delay { - when: Instant, -} - -impl Future for Delay { - type Output = &'static str; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<&'static str> { - if Instant::now() >= self.when { - println!("Hello world"); - Poll::Ready("done") - } else { - cx.waker().wake_by_ref(); - Poll::Pending - } - } -} - pub struct RetryTimer { base_sleep: Duration, max_sleep: Duration, jitter: f64, random: u64, rem: i64, - delay: Duration, } impl RetryTimer { @@ -64,7 +45,6 @@ impl RetryTimer { jitter, random, rem: max_retry, - delay: Duration::from_millis(0), } } } @@ -88,10 +68,8 @@ impl Stream for RetryTimer { return Poll::Ready(None); } - let when = self.delay + sleep; - self.delay = when; self.rem -= 1; - let mut t = interval(when); + let mut t = interval(sleep); match t.poll_tick(cx) { Poll::Ready(_) => Poll::Ready(Some(())), Poll::Pending => Poll::Pending, From 2fa631f568cb3f7163e9f0f778626c5909d5293b Mon Sep 17 00:00:00 2001 From: likewu Date: Tue, 8 Jul 2025 16:26:04 +0800 Subject: [PATCH 09/26] fix --- crates/ecstore/src/client/api_bucket_policy.rs | 4 ++-- crates/ecstore/src/client/api_get_object_acl.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/ecstore/src/client/api_bucket_policy.rs b/crates/ecstore/src/client/api_bucket_policy.rs index 8ed9c6065..9512e911b 100644 --- a/crates/ecstore/src/client/api_bucket_policy.rs +++ b/crates/ecstore/src/client/api_bucket_policy.rs @@ -63,7 +63,7 @@ impl TransitionClient { //defer closeResponse(resp) //if resp != nil { if resp.status() != StatusCode::NO_CONTENT && resp.status() != StatusCode::OK { - return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, ""))); + return Err(std::io::Error::other(http_resp_to_error_response(&resp, vec![], bucket_name, ""))); } //} Ok(()) @@ -98,7 +98,7 @@ impl TransitionClient { //defer closeResponse(resp) if resp.status() != StatusCode::NO_CONTENT { - return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, ""))); + return Err(std::io::Error::other(http_resp_to_error_response(&resp, vec![], bucket_name, ""))); } Ok(()) diff --git a/crates/ecstore/src/client/api_get_object_acl.rs b/crates/ecstore/src/client/api_get_object_acl.rs index 0ee14a426..06cacfb55 100644 --- a/crates/ecstore/src/client/api_get_object_acl.rs +++ b/crates/ecstore/src/client/api_get_object_acl.rs @@ -87,7 +87,7 @@ impl TransitionClient { if resp.status() != http::StatusCode::OK { let b = resp.body().bytes().expect("err").to_vec(); - return Err(std::io::Error::other(http_resp_to_error_response(resp, b, bucket_name, object_name))); + return Err(std::io::Error::other(http_resp_to_error_response(&resp, b, bucket_name, object_name))); } let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); From 59f69680e6e30ac15bd6005fdea495bd77ddc4f4 Mon Sep 17 00:00:00 2001 From: likewu Date: Wed, 9 Jul 2025 17:43:24 +0800 Subject: [PATCH 10/26] restore object --- .../bucket/lifecycle/bucket_lifecycle_ops.rs | 4 +- .../ecstore/src/bucket/lifecycle/lifecycle.rs | 20 +- .../src/client/api_put_object_common.rs | 1 - .../src/client/api_put_object_multipart.rs | 1 - crates/ecstore/src/cmd/bucket_replication.rs | 2 + crates/ecstore/src/store_api.rs | 4 + rustfs/src/admin/handlers/tier.rs | 179 ----------------- rustfs/src/storage/ecfs.rs | 188 ++++++++++++++++-- 8 files changed, 188 insertions(+), 211 deletions(-) diff --git a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs index 14cfe9985..eaf64b935 100644 --- a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs +++ b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs @@ -810,8 +810,8 @@ impl LifecycleOps for ObjectInfo { num_versions: self.num_versions, delete_marker: self.delete_marker, successor_mod_time: self.successor_mod_time, - //restore_ongoing: self.restore_ongoing, - //restore_expires: self.restore_expires, + restore_ongoing: self.restore_ongoing, + restore_expires: self.restore_expires, transition_status: self.transitioned_object.status.clone(), ..Default::default() } diff --git a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs index 8eff441d2..53f5f64ba 100644 --- a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -174,7 +174,7 @@ pub trait Lifecycle { async fn has_transition(&self) -> bool; fn has_expiry(&self) -> bool; async fn has_active_rules(&self, prefix: &str) -> bool; - async fn validate(&self, lr_retention: bool) -> Result<(), std::io::Error>; + async fn validate(&self, lr: &ObjectLockConfiguration) -> Result<(), std::io::Error>; async fn filter_rules(&self, obj: &ObjectOpts) -> Option>; async fn eval(&self, obj: &ObjectOpts) -> Event; async fn eval_inner(&self, obj: &ObjectOpts, now: OffsetDateTime) -> Event; @@ -255,7 +255,7 @@ impl Lifecycle for BucketLifecycleConfiguration { false } - async fn validate(&self, lr_retention: bool) -> Result<(), std::io::Error> { + async fn validate(&self, lr: &ObjectLockConfiguration) -> Result<(), std::io::Error> { if self.rules.len() > 1000 { return Err(std::io::Error::other(ERR_LIFECYCLE_TOO_MANY_RULES)); } @@ -265,13 +265,15 @@ impl Lifecycle for BucketLifecycleConfiguration { for r in &self.rules { r.validate()?; - if let Some(expiration) = r.expiration.as_ref() { - if let Some(expired_object_delete_marker) = expiration.expired_object_delete_marker { - if lr_retention && (expired_object_delete_marker) { - return Err(std::io::Error::other(ERR_LIFECYCLE_BUCKET_LOCKED)); + /*if let Some(object_lock_enabled) = lr.object_lock_enabled.as_ref() { + if let Some(expiration) = r.expiration.as_ref() { + if let Some(expired_object_delete_marker) = expiration.expired_object_delete_marker { + if object_lock_enabled.as_str() == ObjectLockEnabled::ENABLED && (expired_object_delete_marker) { + return Err(std::io::Error::other(ERR_LIFECYCLE_BUCKET_LOCKED)); + } } - } - } + } + }*/ } for (i, _) in self.rules.iter().enumerate() { if i == self.rules.len() - 1 { @@ -642,7 +644,7 @@ pub fn expected_expiry_time(mod_time: OffsetDateTime, days: i32) -> OffsetDateTi } let t = mod_time .to_offset(offset!(-0:00:00)) - .saturating_add(Duration::days(days as i64)); //debug + .saturating_add(Duration::days(days as i64)); let mut hour = 3600; if let Ok(env_ilm_hour) = env::var("_RUSTFS_ILM_HOUR") { if let Ok(num_hour) = env_ilm_hour.parse::() { diff --git a/crates/ecstore/src/client/api_put_object_common.rs b/crates/ecstore/src/client/api_put_object_common.rs index 0fbec8f45..6652d223f 100644 --- a/crates/ecstore/src/client/api_put_object_common.rs +++ b/crates/ecstore/src/client/api_put_object_common.rs @@ -12,7 +12,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#![allow(unused_imports)] #![allow(unused_variables)] #![allow(unused_mut)] #![allow(unused_assignments)] diff --git a/crates/ecstore/src/client/api_put_object_multipart.rs b/crates/ecstore/src/client/api_put_object_multipart.rs index b02979a4e..a315c0907 100644 --- a/crates/ecstore/src/client/api_put_object_multipart.rs +++ b/crates/ecstore/src/client/api_put_object_multipart.rs @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#![allow(unused_imports)] #![allow(unused_variables)] #![allow(unused_mut)] #![allow(unused_assignments)] diff --git a/crates/ecstore/src/cmd/bucket_replication.rs b/crates/ecstore/src/cmd/bucket_replication.rs index e30a56db2..10c1d6cbf 100644 --- a/crates/ecstore/src/cmd/bucket_replication.rs +++ b/crates/ecstore/src/cmd/bucket_replication.rs @@ -2014,6 +2014,8 @@ impl ReplicateObjectInfo { version_id: Uuid::try_parse(&self.version_id).ok(), delete_marker: self.delete_marker, transitioned_object: TransitionedObject::default(), + restore_ongoing: false, + restore_expires: Some(OffsetDateTime::now_utc()), user_tags: self.user_tags.clone(), parts: Vec::new(), is_latest: true, diff --git a/crates/ecstore/src/store_api.rs b/crates/ecstore/src/store_api.rs index a5f2add95..b1ff7b269 100644 --- a/crates/ecstore/src/store_api.rs +++ b/crates/ecstore/src/store_api.rs @@ -384,6 +384,8 @@ pub struct ObjectInfo { pub version_id: Option, pub delete_marker: bool, pub transitioned_object: TransitionedObject, + pub restore_ongoing: bool, + pub restore_expires: Option, pub user_tags: String, pub parts: Vec, pub is_latest: bool, @@ -418,6 +420,8 @@ impl Clone for ObjectInfo { version_id: self.version_id, delete_marker: self.delete_marker, transitioned_object: self.transitioned_object.clone(), + restore_ongoing: self.restore_ongoing, + restore_expires: self.restore_expires, user_tags: self.user_tags.clone(), parts: self.parts.clone(), is_latest: self.is_latest, diff --git a/rustfs/src/admin/handlers/tier.rs b/rustfs/src/admin/handlers/tier.rs index 35c8b4c16..a793a6337 100644 --- a/rustfs/src/admin/handlers/tier.rs +++ b/rustfs/src/admin/handlers/tier.rs @@ -461,182 +461,3 @@ impl Operation for ClearTier { Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)) } } - -/*pub struct PostRestoreObject {} -#[async_trait::async_trait] -impl Operation for PostRestoreObject { - async fn call(&self, req: S3Request, params: Params<'_, '_>) -> S3Result> { - let query = { - if let Some(query) = req.uri.query() { - let input: PostRestoreObject = - from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed"))?; - input - } else { - PostRestoreObject::default() - } - }; - - let bucket = params.bucket; - if let Err(e) = un_escape_path(params.object) { - warn!("post restore object failed, e: {:?}", e); - return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed")); - } - - let Some(store) = new_object_layer_fn() else { - return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); - }; - - let get_object_info = store.get_object_info(); - - if Err(err) = check_request_auth_type(req, policy::RestoreObjectAction, bucket, object) { - return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed")); - } - - if req.content_length <= 0 { - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); - } - let Some(opts) = post_restore_opts(req, bucket, object) else { - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); - }; - - let Some(obj_info) = getObjectInfo(ctx, bucket, object, opts) else { - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); - }; - - if obj_info.transitioned_object.status != lifecycle::TRANSITION_COMPLETE { - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); - } - - let mut api_err; - let Some(rreq) = parsere_store_request(req.body(), req.content_length) else { - let api_err = errorCodes.ToAPIErr(ErrMalformedXML); - api_err.description = err.Error() - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); - }; - let mut status_code = http::StatusCode::OK; - let mut already_restored = false; - if Err(err) = rreq.validate(store) { - api_err = errorCodes.ToAPIErr(ErrMalformedXML) - api_err.description = err.Error() - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); - } else { - if obj_info.restore_ongoing && rreq.Type != "SELECT" { - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrObjectRestoreAlreadyInProgress".into()), "post restore object failed")); - } - if !obj_info.restore_ongoing && !obj_info.restore_expires.unix_timestamp() == 0 { - status_code = http::StatusCode::Accepted; - already_restored = true; - } - } - let restore_expiry = lifecycle::expected_expiry_time(OffsetDateTime::now_utc(), rreq.days); - let mut metadata = clone_mss(obj_info.user_defined); - - if rreq.type != "SELECT" { - obj_info.metadataOnly = true; - metadata[xhttp.AmzRestoreExpiryDays] = rreq.days; - metadata[xhttp.AmzRestoreRequestDate] = OffsetDateTime::now_utc().format(http::TimeFormat); - if already_restored { - metadata[xhttp.AmzRestore] = completedRestoreObj(restore_expiry).String() - } else { - metadata[xhttp.AmzRestore] = ongoingRestoreObj().String() - } - obj_info.user_defined = metadata; - if let Err(err) = store.copy_object(bucket, object, bucket, object, obj_info, ObjectOptions { - version_id: obj_info.version_id, - }, ObjectOptions { - version_id: obj_info.version_id, - m_time: obj_info.mod_time, - }) { - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); - } - if already_restored { - return Ok(()); - } - } - - let restore_object = must_get_uuid(); - if rreq.output_location.s3.bucket_name != "" { - w.Header()[xhttp.AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restoreObject)} - } - w.WriteHeader(status_code) - send_event(EventArgs { - event_name: event::ObjectRestorePost, - bucket_name: bucket, - object: obj_info, - req_params: extract_req_params(r), - user_agent: req.user_agent(), - host: handlers::get_source_ip(r), - }); - tokio::spawn(async move { - if !rreq.SelectParameters.IsEmpty() { - let actual_size = obj_info.get_actual_size(); - if actual_size.is_err() { - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); - } - - let object_rsc = s3select.NewObjectReadSeekCloser( - |offset int64| -> (io.ReadCloser, error) { - rs := &HTTPRangeSpec{ - IsSuffixLength: false, - Start: offset, - End: -1, - } - return getTransitionedObjectReader(bucket, object, rs, r.Header, - obj_info, ObjectOptions {version_id: obj_info.version_id}); - }, - actual_size.unwrap(), - ); - if err = rreq.SelectParameters.Open(objectRSC); err != nil { - if serr, ok := err.(s3select.SelectError); ok { - let encoded_error_response = encodeResponse(APIErrorResponse { - code: serr.ErrorCode(), - message: serr.ErrorMessage(), - bucket_name: bucket, - key: object, - resource: r.URL.Path, - request_id: w.Header().Get(xhttp.AmzRequestID), - host_id: globalDeploymentID(), - }); - //writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML) - Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); - } else { - return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); - } - return Ok(()); - } - let nr = httptest.NewRecorder(); - let rw = xhttp.NewResponseRecorder(nr); - rw.log_err_body = true; - rw.log_all_body = true; - rreq.select_parameters.evaluate(rw); - rreq.select_parameters.Close(); - return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); - } - let opts = ObjectOptions { - transition: TransitionOptions { - restore_request: rreq, - restore_expiry: restore_expiry, - }, - version_id: objInfo.version_id, - } - if Err(err) = store.restore_transitioned_object(bucket, object, opts) { - format!(format!("unable to restore transitioned bucket/object {}/{}: {}", bucket, object, err.to_string())); - return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); - } - - send_event(EventArgs { - EventName: event.ObjectRestoreCompleted, - BucketName: bucket, - Object: objInfo, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: handlers.GetSourceIP(r), - }); - }); - - let mut header = HeaderMap::new(); - header.insert(CONTENT_TYPE, "application/json".parse().unwrap()); - - Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)) - } -}*/ diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 605612640..3123b1760 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -488,6 +488,169 @@ impl S3 for FS { Ok(S3Response::new(output)) } + async fn restore_object(&self, _req: S3Request) -> S3Result> { + Err(s3_error!(NotImplemented, "RestoreObject is not implemented yet")) + /* + let bucket = params.bucket; + if let Err(e) = un_escape_path(params.object) { + warn!("post restore object failed, e: {:?}", e); + return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed")); + } + + let Some(store) = new_object_layer_fn() else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); + }; + + let get_object_info = store.get_object_info(); + + if Err(err) = check_request_auth_type(req, policy::RestoreObjectAction, bucket, object) { + return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed")); + } + + if req.content_length <= 0 { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + } + let Some(opts) = post_restore_opts(req, bucket, object) else { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + }; + + let Some(obj_info) = get_object_info(bucket, object, opts) else { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + }; + + if obj_info.transitioned_object.status != lifecycle::TRANSITION_COMPLETE { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + } + + let mut api_err; + let Some(rreq) = parse_restore_request(req.body(), req.content_length) else { + let api_err = errorCodes.ToAPIErr(ErrMalformedXML); + api_err.description = err.Error() + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + }; + let mut status_code = http::StatusCode::OK; + let mut already_restored = false; + if Err(err) = rreq.validate(store) { + api_err = errorCodes.ToAPIErr(ErrMalformedXML) + api_err.description = err.Error() + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); + } else { + if obj_info.restore_ongoing && rreq.Type != "SELECT" { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrObjectRestoreAlreadyInProgress".into()), "post restore object failed")); + } + if !obj_info.restore_ongoing && !obj_info.restore_expires.unix_timestamp() == 0 { + status_code = http::StatusCode::Accepted; + already_restored = true; + } + } + let restore_expiry = lifecycle::expected_expiry_time(OffsetDateTime::now_utc(), rreq.days); + let mut metadata = clone_mss(obj_info.user_defined); + + if rreq.type != "SELECT" { + obj_info.metadataOnly = true; + metadata[xhttp.AmzRestoreExpiryDays] = rreq.days; + metadata[xhttp.AmzRestoreRequestDate] = OffsetDateTime::now_utc().format(http::TimeFormat); + if already_restored { + metadata[AmzRestore] = completed_restore_obj(restore_expiry).String() + } else { + metadata[AmzRestore] = ongoing_restore_obj().to_string() + } + obj_info.user_defined = metadata; + if let Err(err) = store.copy_object(bucket, object, bucket, object, obj_info, ObjectOptions { + version_id: obj_info.version_id, + }, ObjectOptions { + version_id: obj_info.version_id, + m_time: obj_info.mod_time, + }) { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); + } + if already_restored { + return Ok(()); + } + } + + let restore_object = must_get_uuid(); + if rreq.output_location.s3.bucket_name != "" { + w.Header()[AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restoreObject)} + } + w.WriteHeader(status_code) + send_event(EventArgs { + event_name: event::ObjectRestorePost, + bucket_name: bucket, + object: obj_info, + req_params: extract_req_params(r), + user_agent: req.user_agent(), + host: handlers::get_source_ip(r), + }); + tokio::spawn(async move { + if !rreq.SelectParameters.IsEmpty() { + let actual_size = obj_info.get_actual_size(); + if actual_size.is_err() { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); + } + + let object_rsc = s3select.NewObjectReadSeekCloser( + |offset int64| -> (io.ReadCloser, error) { + rs := &HTTPRangeSpec{ + IsSuffixLength: false, + Start: offset, + End: -1, + } + return getTransitionedObjectReader(bucket, object, rs, r.Header, + obj_info, ObjectOptions {version_id: obj_info.version_id}); + }, + actual_size.unwrap(), + ); + if err = rreq.SelectParameters.Open(objectRSC); err != nil { + if serr, ok := err.(s3select.SelectError); ok { + let encoded_error_response = encodeResponse(APIErrorResponse { + code: serr.ErrorCode(), + message: serr.ErrorMessage(), + bucket_name: bucket, + key: object, + resource: r.URL.Path, + request_id: w.Header().Get(xhttp.AmzRequestID), + host_id: globalDeploymentID(), + }); + //writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML) + Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); + } else { + return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed")); + } + return Ok(()); + } + let nr = httptest.NewRecorder(); + let rw = xhttp.NewResponseRecorder(nr); + rw.log_err_body = true; + rw.log_all_body = true; + rreq.select_parameters.evaluate(rw); + rreq.select_parameters.Close(); + return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); + } + let opts = ObjectOptions { + transition: TransitionOptions { + restore_request: rreq, + restore_expiry: restore_expiry, + }, + version_id: objInfo.version_id, + } + if Err(err) = store.restore_transitioned_object(bucket, object, opts) { + format!(format!("unable to restore transitioned bucket/object {}/{}: {}", bucket, object, err.to_string())); + return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)); + } + + send_event(EventArgs { + EventName: event.ObjectRestoreCompleted, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: handlers.GetSourceIP(r), + }); + }); + */ + } + /// Delete a bucket #[tracing::instrument(level = "debug", skip(self, req))] async fn delete_bucket(&self, req: S3Request) -> S3Result> { @@ -1976,27 +2139,14 @@ impl S3 for FS { .. } = req.input; - let mut lr_retention = false; - /*let rcfg = metadata_sys::get_object_lock_config(&bucket).await; - if let Ok(rcfg) = rcfg { - if let Some(rule) = rcfg.0.rule { - if let Some(retention) = rule.default_retention { - if let Some(mode) = retention.mode { - //if mode == ObjectLockRetentionMode::from_static(ObjectLockRetentionMode::GOVERNANCE) { - lr_retention = true; - //} - } - } - } - }*/ - - //info!("lifecycle_configuration: {:?}", &lifecycle_configuration); - let Some(input_cfg) = lifecycle_configuration else { return Err(s3_error!(InvalidArgument)) }; - if let Err(err) = input_cfg.validate(lr_retention).await { - //return Err(S3Error::with_message(S3ErrorCode::Custom("BucketLockValidateFailed".into()), "bucket lock validate failed.")); - return Err(S3Error::with_message(S3ErrorCode::Custom("ValidateFailed".into()), err.to_string())); + let rcfg = metadata_sys::get_object_lock_config(&bucket).await; + if let Ok(rcfg) = rcfg { + if let Err(err) = input_cfg.validate(&rcfg.0).await { + //return Err(S3Error::with_message(S3ErrorCode::Custom("BucketLockValidateFailed".into()), err.to_string())); + return Err(S3Error::with_message(S3ErrorCode::Custom("ValidateFailed".into()), err.to_string())); + } } if let Err(err) = validate_transition_tier(&input_cfg).await { From 312258431ba07edfc553d27d986b82cc23fd1e44 Mon Sep 17 00:00:00 2001 From: likewu Date: Thu, 10 Jul 2025 15:09:50 +0800 Subject: [PATCH 11/26] fix --- crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs | 4 ++-- crates/ecstore/src/bucket/lifecycle/lifecycle.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs index eaf64b935..4089c2190 100644 --- a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs +++ b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs @@ -810,8 +810,8 @@ impl LifecycleOps for ObjectInfo { num_versions: self.num_versions, delete_marker: self.delete_marker, successor_mod_time: self.successor_mod_time, - restore_ongoing: self.restore_ongoing, - restore_expires: self.restore_expires, + restore_ongoing: self.restore_ongoing, + restore_expires: self.restore_expires, transition_status: self.transitioned_object.status.clone(), ..Default::default() } diff --git a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs index 53f5f64ba..df3a23e70 100644 --- a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -272,7 +272,7 @@ impl Lifecycle for BucketLifecycleConfiguration { return Err(std::io::Error::other(ERR_LIFECYCLE_BUCKET_LOCKED)); } } - } + } }*/ } for (i, _) in self.rules.iter().enumerate() { From fc8c3b7ddca87ec5679828503d4998722df077ed Mon Sep 17 00:00:00 2001 From: likewu Date: Fri, 11 Jul 2025 08:55:54 +0800 Subject: [PATCH 12/26] fix --- crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs | 4 ++++ rustfs/src/storage/ecfs.rs | 6 ++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs index 4089c2190..7c46c65b4 100644 --- a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs +++ b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs @@ -345,8 +345,12 @@ impl ExpiryState { } pub async fn worker(rx: &mut Receiver>, api: Arc) { + //let cancel_token = GLOBAL_SCANNER_CANCEL_TOKEN + // .get() + // .ok_or_else(|| Error::other("Scanner not initialized"))?; loop { select! { + //_ = cancel_token.cancelled() => { _ = tokio::signal::ctrl_c() => { info!("got ctrl+c, exits"); break; diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index e63ca371d..254fcee58 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -501,8 +501,6 @@ impl S3 for FS { return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); }; - let get_object_info = store.get_object_info(); - if Err(err) = check_request_auth_type(req, policy::RestoreObjectAction, bucket, object) { return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed")); } @@ -514,7 +512,7 @@ impl S3 for FS { return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); }; - let Some(obj_info) = get_object_info(bucket, object, opts) else { + let Some(obj_info) = store.get_object_info(bucket, object, opts) else { return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed")); }; @@ -571,7 +569,7 @@ impl S3 for FS { let restore_object = must_get_uuid(); if rreq.output_location.s3.bucket_name != "" { - w.Header()[AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restoreObject)} + w.Header()[AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restore_object)} } w.WriteHeader(status_code) send_event(EventArgs { From 4c8db6077037ca620bdb876db7302c7878bd5f5a Mon Sep 17 00:00:00 2001 From: likewu Date: Fri, 11 Jul 2025 15:24:10 +0800 Subject: [PATCH 13/26] serde-rs-xml -> quick-xml --- Cargo.toml | 1 - crates/ecstore/Cargo.toml | 2 +- .../bucket/lifecycle/bucket_lifecycle_ops.rs | 6 +++--- .../ecstore/src/client/api_error_response.rs | 2 +- .../ecstore/src/client/api_get_object_acl.rs | 2 +- .../src/client/api_get_object_attributes.rs | 4 ++-- crates/ecstore/src/client/api_list.rs | 2 +- crates/ecstore/src/client/api_restore.rs | 2 +- crates/ecstore/src/client/api_s3_datatypes.rs | 4 ++-- crates/ecstore/src/client/bucket_cache.rs | 2 +- crates/utils/src/retry.rs | 18 ++++++++++-------- 11 files changed, 23 insertions(+), 22 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b0b33a742..ce7555138 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -214,7 +214,6 @@ s3s = { version = "0.12.0-minio-preview.1" } shadow-rs = { version = "1.2.0", default-features = false } serde = { version = "1.0.219", features = ["derive"] } serde_json = { version = "1.0.140", features = ["raw_value"] } -serde-xml-rs = "0.8.1" serde_urlencoded = "0.7.1" sha1 = "0.10.6" sha2 = "0.10.9" diff --git a/crates/ecstore/Cargo.toml b/crates/ecstore/Cargo.toml index ec9e498ce..1cc884ec8 100644 --- a/crates/ecstore/Cargo.toml +++ b/crates/ecstore/Cargo.toml @@ -50,7 +50,7 @@ serde.workspace = true time.workspace = true bytesize.workspace = true serde_json.workspace = true -serde-xml-rs.workspace = true +quick-xml.workspace = true s3s.workspace = true http.workspace = true url.workspace = true diff --git a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs index 7c46c65b4..b90b269e4 100644 --- a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs +++ b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs @@ -345,9 +345,9 @@ impl ExpiryState { } pub async fn worker(rx: &mut Receiver>, api: Arc) { - //let cancel_token = GLOBAL_SCANNER_CANCEL_TOKEN - // .get() - // .ok_or_else(|| Error::other("Scanner not initialized"))?; + //let cancel_token = + // get_background_services_cancel_token().ok_or_else(|| Error::other("Background services not initialized"))?; + loop { select! { //_ = cancel_token.cancelled() => { diff --git a/crates/ecstore/src/client/api_error_response.rs b/crates/ecstore/src/client/api_error_response.rs index 7b070a980..402c34df8 100644 --- a/crates/ecstore/src/client/api_error_response.rs +++ b/crates/ecstore/src/client/api_error_response.rs @@ -101,7 +101,7 @@ pub fn http_resp_to_error_response( object_name: &str, ) -> ErrorResponse { let err_body = String::from_utf8(b).unwrap(); - let err_resp_ = serde_xml_rs::from_str::(&err_body); + let err_resp_ = quick_xml::de::from_str::(&err_body); let mut err_resp = ErrorResponse::default(); if err_resp_.is_err() { match resp.status() { diff --git a/crates/ecstore/src/client/api_get_object_acl.rs b/crates/ecstore/src/client/api_get_object_acl.rs index 06cacfb55..1e811512e 100644 --- a/crates/ecstore/src/client/api_get_object_acl.rs +++ b/crates/ecstore/src/client/api_get_object_acl.rs @@ -91,7 +91,7 @@ impl TransitionClient { } let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); - let mut res = match serde_xml_rs::from_str::(&String::from_utf8(b).unwrap()) { + let mut res = match quick_xml::de::from_str::(&String::from_utf8(b).unwrap()) { Ok(result) => result, Err(err) => { return Err(std::io::Error::other(err.to_string())); diff --git a/crates/ecstore/src/client/api_get_object_attributes.rs b/crates/ecstore/src/client/api_get_object_attributes.rs index f236118d6..fd8015ad9 100644 --- a/crates/ecstore/src/client/api_get_object_attributes.rs +++ b/crates/ecstore/src/client/api_get_object_attributes.rs @@ -144,7 +144,7 @@ impl ObjectAttributes { self.version_id = h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap().to_string(); let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); - let mut response = match serde_xml_rs::from_str::(&String::from_utf8(b).unwrap()) { + let mut response = match quick_xml::de::from_str::(&String::from_utf8(b).unwrap()) { Ok(result) => result, Err(err) => { return Err(std::io::Error::other(err.to_string())); @@ -226,7 +226,7 @@ impl TransitionClient { if resp.status() != http::StatusCode::OK { let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); let err_body = String::from_utf8(b).unwrap(); - let mut er = match serde_xml_rs::from_str::(&err_body) { + let mut er = match quick_xml::de::from_str::(&err_body) { Ok(result) => result, Err(err) => { return Err(std::io::Error::other(err.to_string())); diff --git a/crates/ecstore/src/client/api_list.rs b/crates/ecstore/src/client/api_list.rs index 955122b9e..fdbffc680 100644 --- a/crates/ecstore/src/client/api_list.rs +++ b/crates/ecstore/src/client/api_list.rs @@ -103,7 +103,7 @@ impl TransitionClient { //let mut list_bucket_result = ListBucketV2Result::default(); let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); - let mut list_bucket_result = match serde_xml_rs::from_str::(&String::from_utf8(b).unwrap()) { + let mut list_bucket_result = match quick_xml::de::from_str::(&String::from_utf8(b).unwrap()) { Ok(result) => result, Err(err) => { return Err(std::io::Error::other(err.to_string())); diff --git a/crates/ecstore/src/client/api_restore.rs b/crates/ecstore/src/client/api_restore.rs index 356a81446..84b1ccb01 100644 --- a/crates/ecstore/src/client/api_restore.rs +++ b/crates/ecstore/src/client/api_restore.rs @@ -125,7 +125,7 @@ impl TransitionClient { version_id: &str, restore_req: &RestoreRequest, ) -> Result<(), std::io::Error> { - let restore_request = match serde_xml_rs::to_string(restore_req) { + let restore_request = match quick_xml::se::to_string(restore_req) { Ok(buf) => buf, Err(e) => { return Err(std::io::Error::other(e)); diff --git a/crates/ecstore/src/client/api_s3_datatypes.rs b/crates/ecstore/src/client/api_s3_datatypes.rs index ba26325e2..cd92b3ec6 100644 --- a/crates/ecstore/src/client/api_s3_datatypes.rs +++ b/crates/ecstore/src/client/api_s3_datatypes.rs @@ -279,7 +279,7 @@ pub struct CompleteMultipartUpload { impl CompleteMultipartUpload { pub fn marshal_msg(&self) -> Result { //let buf = serde_json::to_string(self)?; - let buf = match serde_xml_rs::to_string(self) { + let buf = match quick_xml::se::to_string(self) { Ok(buf) => buf, Err(e) => { return Err(std::io::Error::other(e)); @@ -329,7 +329,7 @@ pub struct DeleteMultiObjects { impl DeleteMultiObjects { pub fn marshal_msg(&self) -> Result { //let buf = serde_json::to_string(self)?; - let buf = match serde_xml_rs::to_string(self) { + let buf = match quick_xml::se::to_string(self) { Ok(buf) => buf, Err(e) => { return Err(std::io::Error::other(e)); diff --git a/crates/ecstore/src/client/bucket_cache.rs b/crates/ecstore/src/client/bucket_cache.rs index 2df73f698..2ddf860e3 100644 --- a/crates/ecstore/src/client/bucket_cache.rs +++ b/crates/ecstore/src/client/bucket_cache.rs @@ -208,7 +208,7 @@ async fn process_bucket_location_response(mut resp: http::Response, bucket //} let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec(); - let Document(location_constraint) = serde_xml_rs::from_str::(&String::from_utf8(b).unwrap()).unwrap(); + let Document(location_constraint) = quick_xml::de::from_str::(&String::from_utf8(b).unwrap()).unwrap(); let mut location = location_constraint; if location == "" { diff --git a/crates/utils/src/retry.rs b/crates/utils/src/retry.rs index 4e9a06425..cd8ba6aa3 100644 --- a/crates/utils/src/retry.rs +++ b/crates/utils/src/retry.rs @@ -14,11 +14,11 @@ use futures::Stream; use hyper::http; -use lazy_static::lazy_static; use std::{ pin::Pin, task::{Context, Poll}, time::{Duration, Instant}, + sync::LazyLock, }; use tokio::time::interval; @@ -77,8 +77,8 @@ impl Stream for RetryTimer { } } -lazy_static! { - static ref RETRYABLE_S3CODES: Vec = vec![ +static RETRYABLE_S3CODES: LazyLock> = LazyLock::new(|| { + vec![ "RequestError".to_string(), "RequestTimeout".to_string(), "Throttling".to_string(), @@ -89,9 +89,11 @@ lazy_static! { "ExpiredToken".to_string(), "ExpiredTokenException".to_string(), "SlowDown".to_string(), - ]; + ] +}); - static ref RETRYABLE_HTTP_STATUSCODES: Vec = vec![ +static RETRYABLE_HTTP_STATUSCODES: LazyLock> = LazyLock::new(|| { + vec![ http::StatusCode::REQUEST_TIMEOUT, http::StatusCode::TOO_MANY_REQUESTS, //499, @@ -100,8 +102,8 @@ lazy_static! { http::StatusCode::SERVICE_UNAVAILABLE, http::StatusCode::GATEWAY_TIMEOUT, //520, - ]; -} + ] +}); pub fn is_s3code_retryable(s3code: &str) -> bool { RETRYABLE_S3CODES.contains(&s3code.to_string()) @@ -111,7 +113,7 @@ pub fn is_http_status_retryable(http_statuscode: &http::StatusCode) -> bool { RETRYABLE_HTTP_STATUSCODES.contains(http_statuscode) } -pub fn is_request_error_retryable(err: std::io::Error) -> bool { +pub fn is_request_error_retryable(_err: std::io::Error) -> bool { /*if err == Err::Canceled) || err == Err::DeadlineExceeded) { return ctx.Err() == nil; } From e128a46a1c683c5501749a08e231406d701280ba Mon Sep 17 00:00:00 2001 From: likewu Date: Mon, 14 Jul 2025 14:50:56 +0800 Subject: [PATCH 14/26] fix --- Cargo.lock | 16 ++-------------- crates/utils/src/retry.rs | 2 +- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 41ae7c6ec..3e84204f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8002,6 +8002,7 @@ dependencies = [ "path-absolutize", "path-clean", "pin-project-lite", + "quick-xml", "rand 0.9.1", "reed-solomon-simd", "regex", @@ -8023,7 +8024,6 @@ dependencies = [ "rustls 0.23.28", "s3s", "serde", - "serde-xml-rs 0.8.1", "serde_json", "sha2 0.10.9", "shadow-rs", @@ -8272,7 +8272,7 @@ dependencies = [ "regex", "reqwest", "serde", - "serde-xml-rs 0.6.0", + "serde-xml-rs", "sha2 0.10.9", "urlencoding", ] @@ -8806,18 +8806,6 @@ dependencies = [ "xml-rs", ] -[[package]] -name = "serde-xml-rs" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53630160a98edebde0123eb4dfd0fce6adff091b2305db3154a9e920206eb510" -dependencies = [ - "log", - "serde", - "thiserror 1.0.69", - "xml-rs", -] - [[package]] name = "serde_derive" version = "1.0.219" diff --git a/crates/utils/src/retry.rs b/crates/utils/src/retry.rs index cd8ba6aa3..13d548d9a 100644 --- a/crates/utils/src/retry.rs +++ b/crates/utils/src/retry.rs @@ -16,9 +16,9 @@ use futures::Stream; use hyper::http; use std::{ pin::Pin, + sync::LazyLock, task::{Context, Poll}, time::{Duration, Instant}, - sync::LazyLock, }; use tokio::time::interval; From 83743dd5da3ca6991bc6a48a538522e71067c74e Mon Sep 17 00:00:00 2001 From: likewu Date: Tue, 22 Jul 2025 14:03:39 +0800 Subject: [PATCH 15/26] checksum --- Cargo.lock | 68 ++- Cargo.toml | 2 + crates/checksums/Cargo.toml | 48 ++ crates/checksums/README.md | 3 + crates/checksums/src/base64.rs | 29 ++ crates/checksums/src/error.rs | 31 ++ crates/checksums/src/http.rs | 221 +++++++++ crates/checksums/src/lib.rs | 442 ++++++++++++++++++ crates/ecstore/Cargo.toml | 2 + crates/ecstore/src/checksum.rs | 40 +- crates/ecstore/src/client/api_put_object.rs | 7 +- .../src/client/api_put_object_multipart.rs | 14 +- .../src/client/api_put_object_streaming.rs | 6 +- 13 files changed, 880 insertions(+), 33 deletions(-) create mode 100644 crates/checksums/Cargo.toml create mode 100644 crates/checksums/README.md create mode 100644 crates/checksums/src/base64.rs create mode 100644 crates/checksums/src/error.rs create mode 100644 crates/checksums/src/http.rs create mode 100644 crates/checksums/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 157b904e3..fd4f2a2e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2886,6 +2886,12 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.10.7" @@ -7024,6 +7030,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "prettyplease" version = "0.2.35" @@ -7954,6 +7970,27 @@ dependencies = [ "serde_json", ] +[[package]] +name = "rustfs-checksums" +version = "0.0.5" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "crc-fast", + "hex", + "http 1.3.1", + "http-body 1.0.1", + "md-5", + "pin-project-lite", + "pretty_assertions", + "sha1 0.10.6", + "sha2 0.10.9", + "tokio", + "tracing", + "tracing-test", +] + [[package]] name = "rustfs-common" version = "0.0.5" @@ -8021,13 +8058,14 @@ dependencies = [ "path-absolutize", "path-clean", "pin-project-lite", - "quick-xml", + "quick-xml 0.38.0", "rand 0.9.1", "reed-solomon-simd", "regex", "reqwest", "rmp", "rmp-serde", + "rustfs-checksums", "rustfs-common", "rustfs-config", "rustfs-filemeta", @@ -8044,6 +8082,7 @@ dependencies = [ "s3s", "serde", "serde_json", + "sha1 0.10.6", "sha2 0.10.9", "shadow-rs", "smallvec", @@ -10284,6 +10323,27 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-test" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" +dependencies = [ + "quote", + "syn 2.0.104", +] + [[package]] name = "tracing-wasm" version = "0.2.1" @@ -11623,6 +11683,12 @@ dependencies = [ "lzma-sys", ] +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "yoke" version = "0.8.0" diff --git a/Cargo.toml b/Cargo.toml index 4739fb8ea..c60377597 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,7 @@ members = [ "crates/s3select-api", # S3 Select API interface "crates/s3select-query", # S3 Select query engine "crates/signer", # client signer + "crates/checksums", # client checksums "crates/utils", # Utility functions and helpers "crates/workers", # Worker thread pools and task scheduling "crates/zip", # ZIP file handling and compression @@ -84,6 +85,7 @@ rustfs-utils = { path = "crates/utils", version = "0.0.5" } rustfs-rio = { path = "crates/rio", version = "0.0.5" } rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" } rustfs-signer = { path = "crates/signer", version = "0.0.5" } +rustfs-checksums = { path = "crates/checksums", version = "0.0.5" } rustfs-workers = { path = "crates/workers", version = "0.0.5" } aes-gcm = { version = "0.10.3", features = ["std"] } arc-swap = "1.7.1" diff --git a/crates/checksums/Cargo.toml b/crates/checksums/Cargo.toml new file mode 100644 index 000000000..71a6e547c --- /dev/null +++ b/crates/checksums/Cargo.toml @@ -0,0 +1,48 @@ +# Copyright 2024 RustFS Team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[package] +name = "rustfs-checksums" +edition.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true +homepage.workspace = true +description = "Checksum calculation and verification callbacks for HTTP request and response bodies sent by service clients generated by RustFS, ensuring data integrity and authenticity." +keywords = ["checksum-calculation", "verification", "integrity", "authenticity", "rustfs", "Minio"] +categories = ["web-programming", "development-tools", "checksum"] +documentation = "https://docs.rs/rustfs-signer/latest/rustfs_checksum/" + +[dependencies] +bytes = { workspace = true } +crc-fast = "1.3.0" +hex = { workspace = true } +http = { workspace = true } +http-body = { workspace = true } +base64-simd = { workspace = true } +md-5 = { workspace = true } +pin-project-lite = { workspace = true } +sha1 = { workspace = true } +sha2 = { workspace = true } +tracing = { workspace = true } + +[dev-dependencies] +bytes-utils = "0.1.2" +pretty_assertions = "1.3" +tracing-test = "0.2.1" + +[dev-dependencies.tokio] +version = "1.23.1" +features = ["macros", "rt"] diff --git a/crates/checksums/README.md b/crates/checksums/README.md new file mode 100644 index 000000000..bc74d8540 --- /dev/null +++ b/crates/checksums/README.md @@ -0,0 +1,3 @@ +# rustfs-checksums + +Checksum calculation and verification callbacks for HTTP request and response bodies sent by service clients generated by RustFS object storage. diff --git a/crates/checksums/src/base64.rs b/crates/checksums/src/base64.rs new file mode 100644 index 000000000..f2e1f298c --- /dev/null +++ b/crates/checksums/src/base64.rs @@ -0,0 +1,29 @@ +use base64_simd::STANDARD; +use std::error::Error; + +#[derive(Debug)] +pub struct DecodeError(base64_simd::Error); + +impl Error for DecodeError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.0) + } +} + +impl std::fmt::Display for DecodeError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "failed to decode base64") + } +} + +pub fn decode(input: impl AsRef) -> Result, DecodeError> { + STANDARD.decode_to_vec(input.as_ref()).map_err(DecodeError) +} + +pub fn encode(input: impl AsRef<[u8]>) -> String { + STANDARD.encode_to_string(input.as_ref()) +} + +pub fn encoded_length(length: usize) -> usize { + STANDARD.encoded_length(length) +} diff --git a/crates/checksums/src/error.rs b/crates/checksums/src/error.rs new file mode 100644 index 000000000..47fec5971 --- /dev/null +++ b/crates/checksums/src/error.rs @@ -0,0 +1,31 @@ +use std::error::Error; +use std::fmt; + +#[derive(Debug)] +pub struct UnknownChecksumAlgorithmError { + checksum_algorithm: String, +} + +impl UnknownChecksumAlgorithmError { + pub(crate) fn new(checksum_algorithm: impl Into) -> Self { + Self { + checksum_algorithm: checksum_algorithm.into(), + } + } + + pub fn checksum_algorithm(&self) -> &str { + &self.checksum_algorithm + } +} + +impl fmt::Display for UnknownChecksumAlgorithmError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + r#"unknown checksum algorithm "{}", please pass a known algorithm name ("crc32", "crc32c", "sha1", "sha256", "md5")"#, + self.checksum_algorithm + ) + } +} + +impl Error for UnknownChecksumAlgorithmError {} diff --git a/crates/checksums/src/http.rs b/crates/checksums/src/http.rs new file mode 100644 index 000000000..1ba569e1d --- /dev/null +++ b/crates/checksums/src/http.rs @@ -0,0 +1,221 @@ +use crate::base64; +use http::header::{HeaderMap, HeaderValue}; + +use crate::Crc64Nvme; +use crate::{ + Checksum, Crc32, Crc32c, Md5, Sha1, Sha256, CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, + SHA_1_NAME, SHA_256_NAME, +}; + +pub const CRC_32_HEADER_NAME: &str = "x-amz-checksum-crc32"; +pub const CRC_32_C_HEADER_NAME: &str = "x-amz-checksum-crc32c"; +pub const SHA_1_HEADER_NAME: &str = "x-amz-checksum-sha1"; +pub const SHA_256_HEADER_NAME: &str = "x-amz-checksum-sha256"; +pub const CRC_64_NVME_HEADER_NAME: &str = "x-amz-checksum-crc64nvme"; + +pub(crate) static MD5_HEADER_NAME: &str = "content-md5"; + +pub const CHECKSUM_ALGORITHMS_IN_PRIORITY_ORDER: [&str; 5] = [ + CRC_64_NVME_NAME, + CRC_32_C_NAME, + CRC_32_NAME, + SHA_1_NAME, + SHA_256_NAME, +]; + +pub trait HttpChecksum: Checksum + Send + Sync { + fn headers(self: Box) -> HeaderMap { + let mut header_map = HeaderMap::new(); + header_map.insert(self.header_name(), self.header_value()); + + header_map + } + + fn header_name(&self) -> &'static str; + + fn header_value(self: Box) -> HeaderValue { + let hash = self.finalize(); + HeaderValue::from_str(&base64::encode(&hash[..])) + .expect("base64 encoded bytes are always valid header values") + } + + fn size(&self) -> u64 { + let trailer_name_size_in_bytes = self.header_name().len(); + let base64_encoded_checksum_size_in_bytes = + base64::encoded_length(Checksum::size(self) as usize); + + let size = trailer_name_size_in_bytes + + ":".len() + + base64_encoded_checksum_size_in_bytes; + + size as u64 + } +} + +impl HttpChecksum for Crc32 { + fn header_name(&self) -> &'static str { + CRC_32_HEADER_NAME + } +} + +impl HttpChecksum for Crc32c { + fn header_name(&self) -> &'static str { + CRC_32_C_HEADER_NAME + } +} + +impl HttpChecksum for Crc64Nvme { + fn header_name(&self) -> &'static str { + CRC_64_NVME_HEADER_NAME + } +} + +impl HttpChecksum for Sha1 { + fn header_name(&self) -> &'static str { + SHA_1_HEADER_NAME + } +} + +impl HttpChecksum for Sha256 { + fn header_name(&self) -> &'static str { + SHA_256_HEADER_NAME + } +} + +impl HttpChecksum for Md5 { + fn header_name(&self) -> &'static str { + MD5_HEADER_NAME + } +} + +#[cfg(test)] +mod tests { + use crate::base64; + use bytes::Bytes; + + use crate::{ + ChecksumAlgorithm, CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, SHA_1_NAME, SHA_256_NAME, + }; + + use super::HttpChecksum; + + #[test] + fn test_trailer_length_of_crc32_checksum_body() { + let checksum = CRC_32_NAME + .parse::() + .unwrap() + .into_impl(); + let expected_size = 29; + let actual_size = HttpChecksum::size(&*checksum); + assert_eq!(expected_size, actual_size) + } + + #[test] + fn test_trailer_value_of_crc32_checksum_body() { + let checksum = CRC_32_NAME + .parse::() + .unwrap() + .into_impl(); + // The CRC32 of an empty string is all zeroes + let expected_value = Bytes::from_static(b"\0\0\0\0"); + let expected_value = base64::encode(&expected_value); + let actual_value = checksum.header_value(); + assert_eq!(expected_value, actual_value) + } + + #[test] + fn test_trailer_length_of_crc32c_checksum_body() { + let checksum = CRC_32_C_NAME + .parse::() + .unwrap() + .into_impl(); + let expected_size = 30; + let actual_size = HttpChecksum::size(&*checksum); + assert_eq!(expected_size, actual_size) + } + + #[test] + fn test_trailer_value_of_crc32c_checksum_body() { + let checksum = CRC_32_C_NAME + .parse::() + .unwrap() + .into_impl(); + // The CRC32C of an empty string is all zeroes + let expected_value = Bytes::from_static(b"\0\0\0\0"); + let expected_value = base64::encode(&expected_value); + let actual_value = checksum.header_value(); + assert_eq!(expected_value, actual_value) + } + + #[test] + fn test_trailer_length_of_crc64nvme_checksum_body() { + let checksum = CRC_64_NVME_NAME + .parse::() + .unwrap() + .into_impl(); + let expected_size = 37; + let actual_size = HttpChecksum::size(&*checksum); + assert_eq!(expected_size, actual_size) + } + + #[test] + fn test_trailer_value_of_crc64nvme_checksum_body() { + let checksum = CRC_64_NVME_NAME + .parse::() + .unwrap() + .into_impl(); + // The CRC64NVME of an empty string is all zeroes + let expected_value = Bytes::from_static(b"\0\0\0\0\0\0\0\0"); + let expected_value = base64::encode(&expected_value); + let actual_value = checksum.header_value(); + assert_eq!(expected_value, actual_value) + } + + #[test] + fn test_trailer_length_of_sha1_checksum_body() { + let checksum = SHA_1_NAME.parse::().unwrap().into_impl(); + let expected_size = 48; + let actual_size = HttpChecksum::size(&*checksum); + assert_eq!(expected_size, actual_size) + } + + #[test] + fn test_trailer_value_of_sha1_checksum_body() { + let checksum = SHA_1_NAME.parse::().unwrap().into_impl(); + // The SHA1 of an empty string is da39a3ee5e6b4b0d3255bfef95601890afd80709 + let expected_value = Bytes::from_static(&[ + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, + 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09, + ]); + let expected_value = base64::encode(&expected_value); + let actual_value = checksum.header_value(); + assert_eq!(expected_value, actual_value) + } + + #[test] + fn test_trailer_length_of_sha256_checksum_body() { + let checksum = SHA_256_NAME + .parse::() + .unwrap() + .into_impl(); + let expected_size = 66; + let actual_size = HttpChecksum::size(&*checksum); + assert_eq!(expected_size, actual_size) + } + + #[test] + fn test_trailer_value_of_sha256_checksum_body() { + let checksum = SHA_256_NAME + .parse::() + .unwrap() + .into_impl(); + let expected_value = Bytes::from_static(&[ + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, + 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, + 0x78, 0x52, 0xb8, 0x55, + ]); + let expected_value = base64::encode(&expected_value); + let actual_value = checksum.header_value(); + assert_eq!(expected_value, actual_value) + } +} diff --git a/crates/checksums/src/lib.rs b/crates/checksums/src/lib.rs new file mode 100644 index 000000000..4d4875dae --- /dev/null +++ b/crates/checksums/src/lib.rs @@ -0,0 +1,442 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] + +#![allow(clippy::derive_partial_eq_without_eq)] +#![warn( + // missing_docs, + rustdoc::missing_crate_level_docs, + unreachable_pub, + rust_2018_idioms +)] + +use crate::error::UnknownChecksumAlgorithmError; + +use bytes::Bytes; +use std::{fmt::Debug, str::FromStr}; + +mod base64; +pub mod error; +pub mod http; + +pub const CRC_32_NAME: &str = "crc32"; +pub const CRC_32_C_NAME: &str = "crc32c"; +pub const CRC_64_NVME_NAME: &str = "crc64nvme"; +pub const SHA_1_NAME: &str = "sha1"; +pub const SHA_256_NAME: &str = "sha256"; +pub const MD5_NAME: &str = "md5"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +#[non_exhaustive] +pub enum ChecksumAlgorithm { + #[default] + Crc32, + Crc32c, + #[deprecated] + Md5, + Sha1, + Sha256, + Crc64Nvme, +} + +impl FromStr for ChecksumAlgorithm { + type Err = UnknownChecksumAlgorithmError; + + fn from_str(checksum_algorithm: &str) -> Result { + if checksum_algorithm.eq_ignore_ascii_case(CRC_32_NAME) { + Ok(Self::Crc32) + } else if checksum_algorithm.eq_ignore_ascii_case(CRC_32_C_NAME) { + Ok(Self::Crc32c) + } else if checksum_algorithm.eq_ignore_ascii_case(SHA_1_NAME) { + Ok(Self::Sha1) + } else if checksum_algorithm.eq_ignore_ascii_case(SHA_256_NAME) { + Ok(Self::Sha256) + } else if checksum_algorithm.eq_ignore_ascii_case(MD5_NAME) { + // MD5 is now an alias for the default Crc32 since it is deprecated + Ok(Self::Crc32) + } else if checksum_algorithm.eq_ignore_ascii_case(CRC_64_NVME_NAME) { + Ok(Self::Crc64Nvme) + } else { + Err(UnknownChecksumAlgorithmError::new(checksum_algorithm)) + } + } +} + +impl ChecksumAlgorithm { + pub fn into_impl(self) -> Box { + match self { + Self::Crc32 => Box::::default(), + Self::Crc32c => Box::::default(), + Self::Crc64Nvme => Box::::default(), + #[allow(deprecated)] + Self::Md5 => Box::::default(), + Self::Sha1 => Box::::default(), + Self::Sha256 => Box::::default(), + } + } + + pub fn as_str(&self) -> &'static str { + match self { + Self::Crc32 => CRC_32_NAME, + Self::Crc32c => CRC_32_C_NAME, + Self::Crc64Nvme => CRC_64_NVME_NAME, + #[allow(deprecated)] + Self::Md5 => MD5_NAME, + Self::Sha1 => SHA_1_NAME, + Self::Sha256 => SHA_256_NAME, + } + } +} + +pub trait Checksum: Send + Sync { + fn update(&mut self, bytes: &[u8]); + fn finalize(self: Box) -> Bytes; + fn size(&self) -> u64; +} + +#[derive(Debug)] +struct Crc32 { + hasher: crc_fast::Digest, +} + +impl Default for Crc32 { + fn default() -> Self { + Self { + hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc), + } + } +} + +impl Crc32 { + fn update(&mut self, bytes: &[u8]) { + self.hasher.update(bytes); + } + + fn finalize(self) -> Bytes { + let checksum = self.hasher.finalize() as u32; + + Bytes::copy_from_slice(checksum.to_be_bytes().as_slice()) + } + + fn size() -> u64 { + 4 + } +} + +impl Checksum for Crc32 { + fn update(&mut self, bytes: &[u8]) { + Self::update(self, bytes) + } + fn finalize(self: Box) -> Bytes { + Self::finalize(*self) + } + fn size(&self) -> u64 { + Self::size() + } +} + +#[derive(Debug)] +struct Crc32c { + hasher: crc_fast::Digest, +} + +impl Default for Crc32c { + fn default() -> Self { + Self { + hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32Iscsi), + } + } +} + +impl Crc32c { + fn update(&mut self, bytes: &[u8]) { + self.hasher.update(bytes); + } + + fn finalize(self) -> Bytes { + let checksum = self.hasher.finalize() as u32; + + Bytes::copy_from_slice(checksum.to_be_bytes().as_slice()) + } + + fn size() -> u64 { + 4 + } +} + +impl Checksum for Crc32c { + fn update(&mut self, bytes: &[u8]) { + Self::update(self, bytes) + } + fn finalize(self: Box) -> Bytes { + Self::finalize(*self) + } + fn size(&self) -> u64 { + Self::size() + } +} + +#[derive(Debug)] +struct Crc64Nvme { + hasher: crc_fast::Digest, +} + +impl Default for Crc64Nvme { + fn default() -> Self { + Self { + hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc64Nvme), + } + } +} + +impl Crc64Nvme { + fn update(&mut self, bytes: &[u8]) { + self.hasher.update(bytes); + } + + fn finalize(self) -> Bytes { + Bytes::copy_from_slice(self.hasher.finalize().to_be_bytes().as_slice()) + } + + fn size() -> u64 { + 8 + } +} + +impl Checksum for Crc64Nvme { + fn update(&mut self, bytes: &[u8]) { + Self::update(self, bytes) + } + fn finalize(self: Box) -> Bytes { + Self::finalize(*self) + } + fn size(&self) -> u64 { + Self::size() + } +} + +#[derive(Debug, Default)] +struct Sha1 { + hasher: sha1::Sha1, +} + +impl Sha1 { + fn update(&mut self, bytes: &[u8]) { + use sha1::Digest; + self.hasher.update(bytes); + } + + fn finalize(self) -> Bytes { + use sha1::Digest; + Bytes::copy_from_slice(self.hasher.finalize().as_slice()) + } + + fn size() -> u64 { + use sha1::Digest; + sha1::Sha1::output_size() as u64 + } +} + +impl Checksum for Sha1 { + fn update(&mut self, bytes: &[u8]) { + Self::update(self, bytes) + } + + fn finalize(self: Box) -> Bytes { + Self::finalize(*self) + } + fn size(&self) -> u64 { + Self::size() + } +} + +#[derive(Debug, Default)] +struct Sha256 { + hasher: sha2::Sha256, +} + +impl Sha256 { + fn update(&mut self, bytes: &[u8]) { + use sha2::Digest; + self.hasher.update(bytes); + } + + fn finalize(self) -> Bytes { + use sha2::Digest; + Bytes::copy_from_slice(self.hasher.finalize().as_slice()) + } + + fn size() -> u64 { + use sha2::Digest; + sha2::Sha256::output_size() as u64 + } +} + +impl Checksum for Sha256 { + fn update(&mut self, bytes: &[u8]) { + Self::update(self, bytes); + } + fn finalize(self: Box) -> Bytes { + Self::finalize(*self) + } + fn size(&self) -> u64 { + Self::size() + } +} + +#[derive(Debug, Default)] +struct Md5 { + hasher: md5::Md5, +} + +impl Md5 { + fn update(&mut self, bytes: &[u8]) { + use md5::Digest; + self.hasher.update(bytes); + } + + fn finalize(self) -> Bytes { + use md5::Digest; + Bytes::copy_from_slice(self.hasher.finalize().as_slice()) + } + + fn size() -> u64 { + use md5::Digest; + md5::Md5::output_size() as u64 + } +} + +impl Checksum for Md5 { + fn update(&mut self, bytes: &[u8]) { + Self::update(self, bytes) + } + fn finalize(self: Box) -> Bytes { + Self::finalize(*self) + } + fn size(&self) -> u64 { + Self::size() + } +} + +#[cfg(test)] +mod tests { + use super::{ + http::{ + CRC_32_C_HEADER_NAME, CRC_32_HEADER_NAME, MD5_HEADER_NAME, SHA_1_HEADER_NAME, + SHA_256_HEADER_NAME, + }, + Crc32, Crc32c, Md5, Sha1, Sha256, + }; + + use crate::http::HttpChecksum; + use crate::ChecksumAlgorithm; + + use crate::base64; + use http::HeaderValue; + use pretty_assertions::assert_eq; + use std::fmt::Write; + + const TEST_DATA: &str = r#"test data"#; + + fn base64_encoded_checksum_to_hex_string(header_value: &HeaderValue) -> String { + let decoded_checksum = base64::decode(header_value.to_str().unwrap()).unwrap(); + let decoded_checksum = decoded_checksum + .into_iter() + .fold(String::new(), |mut acc, byte| { + write!(acc, "{byte:02X?}").expect("string will always be writeable"); + acc + }); + + format!("0x{}", decoded_checksum) + } + + #[test] + fn test_crc32_checksum() { + let mut checksum = Crc32::default(); + checksum.update(TEST_DATA.as_bytes()); + let checksum_result = Box::new(checksum).headers(); + let encoded_checksum = checksum_result.get(CRC_32_HEADER_NAME).unwrap(); + let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum); + + let expected_checksum = "0xD308AEB2"; + + assert_eq!(decoded_checksum, expected_checksum); + } + + #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))] + #[test] + fn test_crc32c_checksum() { + let mut checksum = Crc32c::default(); + checksum.update(TEST_DATA.as_bytes()); + let checksum_result = Box::new(checksum).headers(); + let encoded_checksum = checksum_result.get(CRC_32_C_HEADER_NAME).unwrap(); + let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum); + + let expected_checksum = "0x3379B4CA"; + + assert_eq!(decoded_checksum, expected_checksum); + } + + #[test] + fn test_crc64nvme_checksum() { + use crate::{http::CRC_64_NVME_HEADER_NAME, Crc64Nvme}; + let mut checksum = Crc64Nvme::default(); + checksum.update(TEST_DATA.as_bytes()); + let checksum_result = Box::new(checksum).headers(); + let encoded_checksum = checksum_result.get(CRC_64_NVME_HEADER_NAME).unwrap(); + let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum); + + let expected_checksum = "0xAECAF3AF9C98A855"; + + assert_eq!(decoded_checksum, expected_checksum); + } + + #[test] + fn test_sha1_checksum() { + let mut checksum = Sha1::default(); + checksum.update(TEST_DATA.as_bytes()); + let checksum_result = Box::new(checksum).headers(); + let encoded_checksum = checksum_result.get(SHA_1_HEADER_NAME).unwrap(); + let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum); + + let expected_checksum = "0xF48DD853820860816C75D54D0F584DC863327A7C"; + + assert_eq!(decoded_checksum, expected_checksum); + } + + #[test] + fn test_sha256_checksum() { + let mut checksum = Sha256::default(); + checksum.update(TEST_DATA.as_bytes()); + let checksum_result = Box::new(checksum).headers(); + let encoded_checksum = checksum_result.get(SHA_256_HEADER_NAME).unwrap(); + let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum); + + let expected_checksum = + "0x916F0027A575074CE72A331777C3478D6513F786A591BD892DA1A577BF2335F9"; + + assert_eq!(decoded_checksum, expected_checksum); + } + + #[test] + fn test_md5_checksum() { + let mut checksum = Md5::default(); + checksum.update(TEST_DATA.as_bytes()); + let checksum_result = Box::new(checksum).headers(); + let encoded_checksum = checksum_result.get(MD5_HEADER_NAME).unwrap(); + let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum); + + let expected_checksum = "0xEB733A00C0C9D336E65691A37AB54293"; + + assert_eq!(decoded_checksum, expected_checksum); + } + + #[test] + fn test_checksum_algorithm_returns_error_for_unknown() { + let error = "some invalid checksum algorithm" + .parse::() + .expect_err("it should error"); + assert_eq!( + "some invalid checksum algorithm", + error.checksum_algorithm() + ); + } +} diff --git a/crates/ecstore/Cargo.toml b/crates/ecstore/Cargo.toml index 238368f6b..0e12a8467 100644 --- a/crates/ecstore/Cargo.toml +++ b/crates/ecstore/Cargo.toml @@ -66,6 +66,7 @@ rmp-serde.workspace = true tokio-util = { workspace = true, features = ["io", "compat"] } base64 = { workspace = true } hmac = { workspace = true } +sha1 = { workspace = true } sha2 = { workspace = true } hex-simd = { workspace = true } path-clean = { workspace = true } @@ -98,6 +99,7 @@ rustfs-filemeta.workspace = true rustfs-utils = { workspace = true, features = ["full"] } rustfs-rio.workspace = true rustfs-signer.workspace = true +rustfs-checksums.workspace = true futures-util.workspace = true [target.'cfg(not(windows))'.dependencies] diff --git a/crates/ecstore/src/checksum.rs b/crates/ecstore/src/checksum.rs index e338b6b1e..a156a6817 100644 --- a/crates/ecstore/src/checksum.rs +++ b/crates/ecstore/src/checksum.rs @@ -20,7 +20,7 @@ #![allow(clippy::all)] use lazy_static::lazy_static; -use rustfs_utils::HashAlgorithm; +use rustfs_checksums::ChecksumAlgorithm; use std::collections::HashMap; use crate::client::{api_put_object::PutObjectOptions, api_s3_datatypes::ObjectPart}; @@ -123,33 +123,35 @@ impl ChecksumMode { if u == ChecksumMode::ChecksumCRC32 as u8 || u == ChecksumMode::ChecksumCRC32C as u8 { 4 } else if u == ChecksumMode::ChecksumSHA1 as u8 { - 4 //sha1.size + use sha1::Digest; + sha1::Sha1::output_size() as usize } else if u == ChecksumMode::ChecksumSHA256 as u8 { - 4 //sha256.size + use sha2::Digest; + sha2::Sha256::output_size() as usize } else if u == ChecksumMode::ChecksumCRC64NVME as u8 { - 4 //crc64.size + 8 } else { 0 } } - pub fn hasher(&self) -> Result { + pub fn hasher(&self) -> Result, std::io::Error> { match /*C_ChecksumMask & **/self { - /*ChecksumMode::ChecksumCRC32 => { - return Ok(Box::new(crc32fast::Hasher::new())); - }*/ - /*ChecksumMode::ChecksumCRC32C => { - return Ok(Box::new(crc32::new(crc32.MakeTable(crc32.Castagnoli)))); + ChecksumMode::ChecksumCRC32 => { + return Ok(ChecksumAlgorithm::Crc32.into_impl()); + } + ChecksumMode::ChecksumCRC32C => { + return Ok(ChecksumAlgorithm::Crc32c.into_impl()); } ChecksumMode::ChecksumSHA1 => { - return Ok(Box::new(sha1::new())); - }*/ + return Ok(ChecksumAlgorithm::Sha1.into_impl()); + } ChecksumMode::ChecksumSHA256 => { - return Ok(HashAlgorithm::SHA256); + return Ok(ChecksumAlgorithm::Sha256.into_impl()); + } + ChecksumMode::ChecksumCRC64NVME => { + return Ok(ChecksumAlgorithm::Crc64Nvme.into_impl()); } - /*ChecksumMode::ChecksumCRC64NVME => { - return Ok(Box::new(crc64nvme.New()); - }*/ _ => return Err(std::io::Error::other("unsupported checksum type")), } } @@ -170,7 +172,8 @@ impl ChecksumMode { return Ok("".to_string()); } let mut h = self.hasher()?; - let hash = h.hash_encode(b); + h.update(b); + let hash = h.finalize(); Ok(base64_encode(hash.as_ref())) } @@ -227,7 +230,8 @@ impl ChecksumMode { let c = self.base(); let crc_bytes = Vec::::with_capacity(p.len() * self.raw_byte_len() as usize); let mut h = self.hasher()?; - let hash = h.hash_encode(crc_bytes.as_ref()); + h.update(crc_bytes.as_ref()); + let hash = h.finalize(); Ok(Checksum { checksum_type: self.clone(), r: hash.as_ref().to_vec(), diff --git a/crates/ecstore/src/client/api_put_object.rs b/crates/ecstore/src/client/api_put_object.rs index a105db72d..19f38895d 100644 --- a/crates/ecstore/src/client/api_put_object.rs +++ b/crates/ecstore/src/client/api_put_object.rs @@ -135,7 +135,7 @@ impl Default for PutObjectOptions { #[allow(dead_code)] impl PutObjectOptions { - fn set_match_tag(&mut self, etag: &str) { + fn set_match_etag(&mut self, etag: &str) { if etag == "*" { self.custom_header .insert("If-Match", HeaderValue::from_str("*").expect("err")); @@ -145,7 +145,7 @@ impl PutObjectOptions { } } - fn set_match_tag_except(&mut self, etag: &str) { + fn set_match_etag_except(&mut self, etag: &str) { if etag == "*" { self.custom_header .insert("If-None-Match", HeaderValue::from_str("*").expect("err")); @@ -366,7 +366,8 @@ impl TransitionClient { md5_base64 = base64_encode(hash.as_ref()); } else { let mut crc = opts.auto_checksum.hasher()?; - let csum = crc.hash_encode(&buf[..length]); + crc.update(&buf[..length]); + let csum = crc.finalize(); if let Ok(header_name) = HeaderName::from_bytes(opts.auto_checksum.key().as_bytes()) { custom_header.insert(header_name, base64_encode(csum.as_ref()).parse().expect("err")); diff --git a/crates/ecstore/src/client/api_put_object_multipart.rs b/crates/ecstore/src/client/api_put_object_multipart.rs index a315c0907..272556b18 100644 --- a/crates/ecstore/src/client/api_put_object_multipart.rs +++ b/crates/ecstore/src/client/api_put_object_multipart.rs @@ -18,20 +18,15 @@ #![allow(clippy::all)] use bytes::Bytes; -use http::{HeaderMap, HeaderName, HeaderValue, StatusCode}; +use http::{HeaderMap, HeaderName, StatusCode}; use s3s::S3ErrorCode; -use std::io::Read; -use std::{collections::HashMap, sync::Arc}; -use time::{OffsetDateTime, format_description}; -use tokio_util::sync::CancellationToken; +use std::collections::HashMap; +use time::OffsetDateTime; use tracing::warn; -use tracing::{error, info}; -use url::form_urlencoded::Serializer; use uuid::Uuid; use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID}; use s3s::{Body, dto::StreamingBlob}; -//use crate::disk::{Reader, BufferReader}; use crate::client::{ api_error_response::{ err_entity_too_large, err_entity_too_small, err_invalid_argument, http_resp_to_error_response, to_error_response, @@ -132,7 +127,8 @@ impl TransitionClient { //} if hash_sums.len() == 0 { let mut crc = opts.auto_checksum.hasher()?; - let csum = crc.hash_encode(&buf[..length]); + crc.update(&buf[..length]); + let csum = crc.finalize(); if let Ok(header_name) = HeaderName::from_bytes(opts.auto_checksum.key().as_bytes()) { custom_header.insert(header_name, base64_encode(csum.as_ref()).parse().expect("err")); diff --git a/crates/ecstore/src/client/api_put_object_streaming.rs b/crates/ecstore/src/client/api_put_object_streaming.rs index e745693e1..ca985ecae 100644 --- a/crates/ecstore/src/client/api_put_object_streaming.rs +++ b/crates/ecstore/src/client/api_put_object_streaming.rs @@ -156,7 +156,8 @@ impl TransitionClient { md5_base64 = base64_encode(hash.as_ref()); } else { let mut crc = opts.auto_checksum.hasher()?; - let csum = crc.hash_encode(&buf[..length]); + crc.update(&buf[..length]); + let csum = crc.finalize(); if let Ok(header_name) = HeaderName::from_bytes(opts.auto_checksum.key().as_bytes()) { custom_header.insert(header_name, base64_encode(csum.as_ref()).parse().expect("err")); @@ -303,7 +304,8 @@ impl TransitionClient { let mut custom_header = HeaderMap::new(); if !opts.send_content_md5 { let mut crc = opts.auto_checksum.hasher()?; - let csum = crc.hash_encode(&buf[..length]); + crc.update(&buf[..length]); + let csum = crc.finalize(); if let Ok(header_name) = HeaderName::from_bytes(opts.auto_checksum.key().as_bytes()) { custom_header.insert(header_name, base64_encode(csum.as_ref()).parse().expect("err")); From 61ce97a63d36b89dae56e4ca4886b8c28023da05 Mon Sep 17 00:00:00 2001 From: likewu Date: Fri, 25 Jul 2025 11:18:58 +0800 Subject: [PATCH 16/26] fix --- crates/checksums/src/base64.rs | 14 +++ crates/checksums/src/error.rs | 14 +++ crates/checksums/src/http.rs | 91 +++++++------------ crates/checksums/src/lib.rs | 42 +++++---- crates/ecstore/src/checksum.rs | 21 ++++- .../src/client/api_put_object_multipart.rs | 4 +- 6 files changed, 104 insertions(+), 82 deletions(-) diff --git a/crates/checksums/src/base64.rs b/crates/checksums/src/base64.rs index f2e1f298c..89069e9af 100644 --- a/crates/checksums/src/base64.rs +++ b/crates/checksums/src/base64.rs @@ -1,3 +1,17 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use base64_simd::STANDARD; use std::error::Error; diff --git a/crates/checksums/src/error.rs b/crates/checksums/src/error.rs index 47fec5971..fcdb6d8d6 100644 --- a/crates/checksums/src/error.rs +++ b/crates/checksums/src/error.rs @@ -1,3 +1,17 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use std::error::Error; use std::fmt; diff --git a/crates/checksums/src/http.rs b/crates/checksums/src/http.rs index 1ba569e1d..915dd976c 100644 --- a/crates/checksums/src/http.rs +++ b/crates/checksums/src/http.rs @@ -1,11 +1,22 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use crate::base64; use http::header::{HeaderMap, HeaderValue}; use crate::Crc64Nvme; -use crate::{ - Checksum, Crc32, Crc32c, Md5, Sha1, Sha256, CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, - SHA_1_NAME, SHA_256_NAME, -}; +use crate::{CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, Checksum, Crc32, Crc32c, Md5, SHA_1_NAME, SHA_256_NAME, Sha1, Sha256}; pub const CRC_32_HEADER_NAME: &str = "x-amz-checksum-crc32"; pub const CRC_32_C_HEADER_NAME: &str = "x-amz-checksum-crc32c"; @@ -15,13 +26,8 @@ pub const CRC_64_NVME_HEADER_NAME: &str = "x-amz-checksum-crc64nvme"; pub(crate) static MD5_HEADER_NAME: &str = "content-md5"; -pub const CHECKSUM_ALGORITHMS_IN_PRIORITY_ORDER: [&str; 5] = [ - CRC_64_NVME_NAME, - CRC_32_C_NAME, - CRC_32_NAME, - SHA_1_NAME, - SHA_256_NAME, -]; +pub const CHECKSUM_ALGORITHMS_IN_PRIORITY_ORDER: [&str; 5] = + [CRC_64_NVME_NAME, CRC_32_C_NAME, CRC_32_NAME, SHA_1_NAME, SHA_256_NAME]; pub trait HttpChecksum: Checksum + Send + Sync { fn headers(self: Box) -> HeaderMap { @@ -35,18 +41,14 @@ pub trait HttpChecksum: Checksum + Send + Sync { fn header_value(self: Box) -> HeaderValue { let hash = self.finalize(); - HeaderValue::from_str(&base64::encode(&hash[..])) - .expect("base64 encoded bytes are always valid header values") + HeaderValue::from_str(&base64::encode(&hash[..])).expect("base64 encoded bytes are always valid header values") } fn size(&self) -> u64 { let trailer_name_size_in_bytes = self.header_name().len(); - let base64_encoded_checksum_size_in_bytes = - base64::encoded_length(Checksum::size(self) as usize); + let base64_encoded_checksum_size_in_bytes = base64::encoded_length(Checksum::size(self) as usize); - let size = trailer_name_size_in_bytes - + ":".len() - + base64_encoded_checksum_size_in_bytes; + let size = trailer_name_size_in_bytes + ":".len() + base64_encoded_checksum_size_in_bytes; size as u64 } @@ -93,18 +95,13 @@ mod tests { use crate::base64; use bytes::Bytes; - use crate::{ - ChecksumAlgorithm, CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, SHA_1_NAME, SHA_256_NAME, - }; + use crate::{CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, ChecksumAlgorithm, SHA_1_NAME, SHA_256_NAME}; use super::HttpChecksum; #[test] fn test_trailer_length_of_crc32_checksum_body() { - let checksum = CRC_32_NAME - .parse::() - .unwrap() - .into_impl(); + let checksum = CRC_32_NAME.parse::().unwrap().into_impl(); let expected_size = 29; let actual_size = HttpChecksum::size(&*checksum); assert_eq!(expected_size, actual_size) @@ -112,10 +109,7 @@ mod tests { #[test] fn test_trailer_value_of_crc32_checksum_body() { - let checksum = CRC_32_NAME - .parse::() - .unwrap() - .into_impl(); + let checksum = CRC_32_NAME.parse::().unwrap().into_impl(); // The CRC32 of an empty string is all zeroes let expected_value = Bytes::from_static(b"\0\0\0\0"); let expected_value = base64::encode(&expected_value); @@ -125,10 +119,7 @@ mod tests { #[test] fn test_trailer_length_of_crc32c_checksum_body() { - let checksum = CRC_32_C_NAME - .parse::() - .unwrap() - .into_impl(); + let checksum = CRC_32_C_NAME.parse::().unwrap().into_impl(); let expected_size = 30; let actual_size = HttpChecksum::size(&*checksum); assert_eq!(expected_size, actual_size) @@ -136,10 +127,7 @@ mod tests { #[test] fn test_trailer_value_of_crc32c_checksum_body() { - let checksum = CRC_32_C_NAME - .parse::() - .unwrap() - .into_impl(); + let checksum = CRC_32_C_NAME.parse::().unwrap().into_impl(); // The CRC32C of an empty string is all zeroes let expected_value = Bytes::from_static(b"\0\0\0\0"); let expected_value = base64::encode(&expected_value); @@ -149,10 +137,7 @@ mod tests { #[test] fn test_trailer_length_of_crc64nvme_checksum_body() { - let checksum = CRC_64_NVME_NAME - .parse::() - .unwrap() - .into_impl(); + let checksum = CRC_64_NVME_NAME.parse::().unwrap().into_impl(); let expected_size = 37; let actual_size = HttpChecksum::size(&*checksum); assert_eq!(expected_size, actual_size) @@ -160,10 +145,7 @@ mod tests { #[test] fn test_trailer_value_of_crc64nvme_checksum_body() { - let checksum = CRC_64_NVME_NAME - .parse::() - .unwrap() - .into_impl(); + let checksum = CRC_64_NVME_NAME.parse::().unwrap().into_impl(); // The CRC64NVME of an empty string is all zeroes let expected_value = Bytes::from_static(b"\0\0\0\0\0\0\0\0"); let expected_value = base64::encode(&expected_value); @@ -184,8 +166,8 @@ mod tests { let checksum = SHA_1_NAME.parse::().unwrap().into_impl(); // The SHA1 of an empty string is da39a3ee5e6b4b0d3255bfef95601890afd80709 let expected_value = Bytes::from_static(&[ - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, - 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09, + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, + 0x09, ]); let expected_value = base64::encode(&expected_value); let actual_value = checksum.header_value(); @@ -194,10 +176,7 @@ mod tests { #[test] fn test_trailer_length_of_sha256_checksum_body() { - let checksum = SHA_256_NAME - .parse::() - .unwrap() - .into_impl(); + let checksum = SHA_256_NAME.parse::().unwrap().into_impl(); let expected_size = 66; let actual_size = HttpChecksum::size(&*checksum); assert_eq!(expected_size, actual_size) @@ -205,14 +184,10 @@ mod tests { #[test] fn test_trailer_value_of_sha256_checksum_body() { - let checksum = SHA_256_NAME - .parse::() - .unwrap() - .into_impl(); + let checksum = SHA_256_NAME.parse::().unwrap().into_impl(); let expected_value = Bytes::from_static(&[ - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, - 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, - 0x78, 0x52, 0xb8, 0x55, + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, + 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, ]); let expected_value = base64::encode(&expected_value); let actual_value = checksum.header_value(); diff --git a/crates/checksums/src/lib.rs b/crates/checksums/src/lib.rs index 4d4875dae..bb8805ba3 100644 --- a/crates/checksums/src/lib.rs +++ b/crates/checksums/src/lib.rs @@ -1,5 +1,18 @@ -#![cfg_attr(docsrs, feature(doc_auto_cfg))] +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![cfg_attr(docsrs, feature(doc_auto_cfg))] #![allow(clippy::derive_partial_eq_without_eq)] #![warn( // missing_docs, @@ -319,15 +332,12 @@ impl Checksum for Md5 { #[cfg(test)] mod tests { use super::{ - http::{ - CRC_32_C_HEADER_NAME, CRC_32_HEADER_NAME, MD5_HEADER_NAME, SHA_1_HEADER_NAME, - SHA_256_HEADER_NAME, - }, Crc32, Crc32c, Md5, Sha1, Sha256, + http::{CRC_32_C_HEADER_NAME, CRC_32_HEADER_NAME, MD5_HEADER_NAME, SHA_1_HEADER_NAME, SHA_256_HEADER_NAME}, }; - use crate::http::HttpChecksum; use crate::ChecksumAlgorithm; + use crate::http::HttpChecksum; use crate::base64; use http::HeaderValue; @@ -338,12 +348,10 @@ mod tests { fn base64_encoded_checksum_to_hex_string(header_value: &HeaderValue) -> String { let decoded_checksum = base64::decode(header_value.to_str().unwrap()).unwrap(); - let decoded_checksum = decoded_checksum - .into_iter() - .fold(String::new(), |mut acc, byte| { - write!(acc, "{byte:02X?}").expect("string will always be writeable"); - acc - }); + let decoded_checksum = decoded_checksum.into_iter().fold(String::new(), |mut acc, byte| { + write!(acc, "{byte:02X?}").expect("string will always be writeable"); + acc + }); format!("0x{}", decoded_checksum) } @@ -377,7 +385,7 @@ mod tests { #[test] fn test_crc64nvme_checksum() { - use crate::{http::CRC_64_NVME_HEADER_NAME, Crc64Nvme}; + use crate::{Crc64Nvme, http::CRC_64_NVME_HEADER_NAME}; let mut checksum = Crc64Nvme::default(); checksum.update(TEST_DATA.as_bytes()); let checksum_result = Box::new(checksum).headers(); @@ -410,8 +418,7 @@ mod tests { let encoded_checksum = checksum_result.get(SHA_256_HEADER_NAME).unwrap(); let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum); - let expected_checksum = - "0x916F0027A575074CE72A331777C3478D6513F786A591BD892DA1A577BF2335F9"; + let expected_checksum = "0x916F0027A575074CE72A331777C3478D6513F786A591BD892DA1A577BF2335F9"; assert_eq!(decoded_checksum, expected_checksum); } @@ -434,9 +441,6 @@ mod tests { let error = "some invalid checksum algorithm" .parse::() .expect_err("it should error"); - assert_eq!( - "some invalid checksum algorithm", - error.checksum_algorithm() - ); + assert_eq!("some invalid checksum algorithm", error.checksum_algorithm()); } } diff --git a/crates/ecstore/src/checksum.rs b/crates/ecstore/src/checksum.rs index a156a6817..72c150fb8 100644 --- a/crates/ecstore/src/checksum.rs +++ b/crates/ecstore/src/checksum.rs @@ -103,15 +103,30 @@ impl ChecksumMode { } pub fn can_composite(&self) -> bool { - todo!(); + let s = EnumSet::from(*self).intersection(*C_ChecksumMask); + match s.as_u8() { + 2_u8 => true, + 4_u8 => true, + 8_u8 => true, + 16_u8 => true, + _ => false, + } } pub fn can_merge_crc(&self) -> bool { - todo!(); + /*switch c & checksumMask { + case ChecksumCRC32, ChecksumCRC32C, ChecksumCRC64NVME: + return true + } + return false*/ } pub fn full_object_requested(&self) -> bool { - todo!(); + /*switch self & (ChecksumFullObject | checksumMask) { + case ChecksumFullObjectCRC32C, ChecksumFullObjectCRC32, ChecksumCRC64NVME: + return true + } + return false*/ } pub fn key_capitalized(&self) -> String { diff --git a/crates/ecstore/src/client/api_put_object_multipart.rs b/crates/ecstore/src/client/api_put_object_multipart.rs index 272556b18..ec7557081 100644 --- a/crates/ecstore/src/client/api_put_object_multipart.rs +++ b/crates/ecstore/src/client/api_put_object_multipart.rs @@ -25,8 +25,6 @@ use time::OffsetDateTime; use tracing::warn; use uuid::Uuid; -use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID}; -use s3s::{Body, dto::StreamingBlob}; use crate::client::{ api_error_response::{ err_entity_too_large, err_entity_too_small, err_invalid_argument, http_resp_to_error_response, to_error_response, @@ -45,6 +43,8 @@ use crate::{ store_api::{GetObjectReader, StorageAPI}, }; use rustfs_utils::{crypto::base64_encode, path::trim_etag}; +use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID}; +use s3s::{Body, dto::StreamingBlob}; impl TransitionClient { pub async fn put_object_multipart( From 86e82fff9d7eb496e4fa273fccf204443742b906 Mon Sep 17 00:00:00 2001 From: likewu Date: Mon, 28 Jul 2025 11:36:54 +0800 Subject: [PATCH 17/26] fix --- Cargo.lock | 94 ++++++++++++++++++++++++++---------------------------- 1 file changed, 45 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c7d5b0dd..3f82752ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -504,9 +504,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1237c0ae75a0f3765f58910ff9cdd0a12eeb39ab2f4c7de23262f337f0aacbb3" +checksum = "19634d6336019ef220f09fd31168ce5c184b295cbf80345437cc36094ef223ca" dependencies = [ "async-lock", "cfg-if", @@ -517,8 +517,7 @@ dependencies = [ "polling", "rustix 1.0.8", "slab", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -534,9 +533,9 @@ dependencies = [ [[package]] name = "async-process" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde3f4e40e6021d7acffc90095cbd6dc54cb593903d1de5832f435eb274b85dc" +checksum = "65daa13722ad51e6ab1a1b9c01299142bc75135b337923cfa10e79bbbd669f00" dependencies = [ "async-channel", "async-io", @@ -548,7 +547,6 @@ dependencies = [ "event-listener", "futures-lite", "rustix 1.0.8", - "tracing", ] [[package]] @@ -564,9 +562,9 @@ dependencies = [ [[package]] name = "async-signal" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7605a4e50d4b06df3898d5a70bf5fde51ed9059b0434b73105193bc27acce0d" +checksum = "f567af260ef69e1d52c2b560ce0ea230763e6fbb9214a85d768760a920e3e3c1" dependencies = [ "async-io", "async-lock", @@ -577,7 +575,7 @@ dependencies = [ "rustix 1.0.8", "signal-hook-registry", "slab", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -718,9 +716,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.13.2" +version = "1.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08b5d4e069cbc868041a64bd68dc8cb39a0d79585cd6c5a24caa8c2d622121be" +checksum = "5c953fe1ba023e6b7730c0d4b031d06f267f23a46167dcbd40316644b10a17ba" dependencies = [ "aws-lc-sys", "zeroize", @@ -766,9 +764,9 @@ dependencies = [ [[package]] name = "aws-sdk-s3" -version = "1.99.0" +version = "1.100.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2d64d68c93000d5792b2a25fbeaafb90985fa80a1c8adfe93f24fb271296f5f" +checksum = "8c5eafbdcd898114b839ba68ac628e31c4cfc3e11dfca38dc1b2de2f35bb6270" dependencies = [ "aws-credential-types", "aws-runtime", @@ -800,9 +798,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.77.0" +version = "1.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18f2f37fea82468fe3f5a059542c05392ef680c4f7f00e0db02df8b6e5c7d0c6" +checksum = "dbd7bc4bd34303733bded362c4c997a39130eac4310257c79aae8484b1c4b724" dependencies = [ "aws-credential-types", "aws-runtime", @@ -822,9 +820,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.78.0" +version = "1.79.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecb4f6eada20e0193450cd48b12ed05e1e66baac86f39160191651b932f2b7d9" +checksum = "77358d25f781bb106c1a69531231d4fd12c6be904edb0c47198c604df5a2dbca" dependencies = [ "aws-credential-types", "aws-runtime", @@ -844,9 +842,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.79.0" +version = "1.80.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317377afba3498fca4948c5d32b399ef9a5ad35561a1e8a6f2ac7273dabf802d" +checksum = "06e3ed2a9b828ae7763ddaed41d51724d2661a50c45f845b08967e52f4939cfc" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1015,9 +1013,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.8.4" +version = "1.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3aaec682eb189e43c8a19c3dab2fe54590ad5f2cc2d26ab27608a20f2acf81c" +checksum = "660f70d9d8af6876b4c9aa8dcb0dbaf0f89b04ee9a4455bea1b4ba03b15f26f6" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -1039,9 +1037,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.8.3" +version = "1.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852b9226cb60b78ce9369022c0df678af1cac231c882d5da97a0c4e03be6e67" +checksum = "937a49ecf061895fca4a6dd8e864208ed9be7546c0527d04bc07d502ec5fba1c" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1586,9 +1584,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.29" +version = "1.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c1599538de2394445747c8cf7935946e3cc27e9625f889d979bfb2aaf569362" +checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" dependencies = [ "jobserver", "libc", @@ -5073,9 +5071,9 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "io-uring" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" dependencies = [ "bitflags 2.9.1", "cfg-if", @@ -5477,13 +5475,13 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" +checksum = "4488594b9328dee448adb906d8b126d9b7deb7cf5c22161ee591610bb1be83c0" dependencies = [ "bitflags 2.9.1", "libc", - "redox_syscall 0.5.13", + "redox_syscall 0.5.16", ] [[package]] @@ -6692,7 +6690,7 @@ checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.13", + "redox_syscall 0.5.16", "smallvec", "windows-targets 0.52.6", ] @@ -7071,17 +7069,16 @@ dependencies = [ [[package]] name = "polling" -version = "3.8.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b53a684391ad002dd6a596ceb6c74fd004fdce75f4be2e3f615068abbea5fd50" +checksum = "8ee9b2fa7a4517d2c91ff5bc6c297a427a96749d15f98fcdbb22c05571a4d4b7" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi", "pin-project-lite", "rustix 1.0.8", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -7161,9 +7158,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.35" +version = "0.2.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061c1221631e079b26479d25bbf2275bfe5917ae8419cd7e34f13bfc2aa7539a" +checksum = "ff24dfcda44452b9816fff4cd4227e1bb73ff5a2f1bc1105aa92fb8565ce44d2" dependencies = [ "proc-macro2", "syn 2.0.104", @@ -7609,9 +7606,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.13" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" +checksum = "7251471db004e509f4e75a62cca9435365b5ec7bcdff530d612ac7c87c44a792" dependencies = [ "bitflags 2.9.1", ] @@ -9625,14 +9622,13 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "rustversion", "syn 2.0.104", ] @@ -10112,9 +10108,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.46.1" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc3a2344dafbe23a245241fe8b09735b521110d30fcefbbd5feb1797ca35d17" +checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" dependencies = [ "backtrace", "bytes", @@ -10125,10 +10121,10 @@ dependencies = [ "pin-project-lite", "signal-hook-registry", "slab", - "socket2 0.5.10", + "socket2 0.6.0", "tokio-macros", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11134,9 +11130,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" dependencies = [ "rustls-pki-types", ] From 969bdc2efc89ab54df6de901b73b2b66a71341b6 Mon Sep 17 00:00:00 2001 From: likewu Date: Mon, 28 Jul 2025 20:38:39 +0800 Subject: [PATCH 18/26] fix --- cli/rustfs-gui/src/utils/helper.rs | 2 +- crates/checksums/src/base64.rs | 8 ++--- crates/checksums/src/lib.rs | 2 +- .../src/client/api_put_object_multipart.rs | 9 ++---- crates/utils/src/retry.rs | 32 +++++++++++-------- crates/utils/src/sys/user_agent.rs | 2 +- 6 files changed, 27 insertions(+), 28 deletions(-) diff --git a/cli/rustfs-gui/src/utils/helper.rs b/cli/rustfs-gui/src/utils/helper.rs index d13261c68..ec63f9312 100644 --- a/cli/rustfs-gui/src/utils/helper.rs +++ b/cli/rustfs-gui/src/utils/helper.rs @@ -371,7 +371,7 @@ impl ServiceManager { StdCommand::new("taskkill") .arg("/F") .arg("/PID") - .arg(&service_pid.to_string()) + .arg(service_pid.to_string()) .output()?; } diff --git a/crates/checksums/src/base64.rs b/crates/checksums/src/base64.rs index 89069e9af..d70389e4b 100644 --- a/crates/checksums/src/base64.rs +++ b/crates/checksums/src/base64.rs @@ -16,7 +16,7 @@ use base64_simd::STANDARD; use std::error::Error; #[derive(Debug)] -pub struct DecodeError(base64_simd::Error); +pub(crate) struct DecodeError(base64_simd::Error); impl Error for DecodeError { fn source(&self) -> Option<&(dyn Error + 'static)> { @@ -30,14 +30,14 @@ impl std::fmt::Display for DecodeError { } } -pub fn decode(input: impl AsRef) -> Result, DecodeError> { +pub(crate) fn decode(input: impl AsRef) -> Result, DecodeError> { STANDARD.decode_to_vec(input.as_ref()).map_err(DecodeError) } -pub fn encode(input: impl AsRef<[u8]>) -> String { +pub(crate) fn encode(input: impl AsRef<[u8]>) -> String { STANDARD.encode_to_string(input.as_ref()) } -pub fn encoded_length(length: usize) -> usize { +pub(crate) fn encoded_length(length: usize) -> usize { STANDARD.encoded_length(length) } diff --git a/crates/checksums/src/lib.rs b/crates/checksums/src/lib.rs index bb8805ba3..4501725ad 100644 --- a/crates/checksums/src/lib.rs +++ b/crates/checksums/src/lib.rs @@ -353,7 +353,7 @@ mod tests { acc }); - format!("0x{}", decoded_checksum) + format!("0x{decoded_checksum}") } #[test] diff --git a/crates/ecstore/src/client/api_put_object_multipart.rs b/crates/ecstore/src/client/api_put_object_multipart.rs index ec7557081..61902c7fa 100644 --- a/crates/ecstore/src/client/api_put_object_multipart.rs +++ b/crates/ecstore/src/client/api_put_object_multipart.rs @@ -34,17 +34,12 @@ use crate::client::{ api_s3_datatypes::{ CompleteMultipartUpload, CompleteMultipartUploadResult, CompletePart, InitiateMultipartUploadResult, ObjectPart, }, - constants::{ABS_MIN_PART_SIZE, ISO8601_DATEFORMAT, MAX_PART_SIZE, MAX_SINGLE_PUT_OBJECT_SIZE}, + constants::{ISO8601_DATEFORMAT, MAX_PART_SIZE, MAX_SINGLE_PUT_OBJECT_SIZE}, transition_api::{ReaderImpl, RequestMetadata, TransitionClient, UploadInfo}, }; -use crate::{ - checksum::ChecksumMode, - disk::DiskAPI, - store_api::{GetObjectReader, StorageAPI}, -}; +use crate::{checksum::ChecksumMode, disk::DiskAPI, store_api::StorageAPI}; use rustfs_utils::{crypto::base64_encode, path::trim_etag}; use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID}; -use s3s::{Body, dto::StreamingBlob}; impl TransitionClient { pub async fn put_object_multipart( diff --git a/crates/utils/src/retry.rs b/crates/utils/src/retry.rs index 13d548d9a..d8d69e03c 100644 --- a/crates/utils/src/retry.rs +++ b/crates/utils/src/retry.rs @@ -18,7 +18,7 @@ use std::{ pin::Pin, sync::LazyLock, task::{Context, Poll}, - time::{Duration, Instant}, + time::Duration, }; use tokio::time::interval; @@ -114,20 +114,24 @@ pub fn is_http_status_retryable(http_statuscode: &http::StatusCode) -> bool { } pub fn is_request_error_retryable(_err: std::io::Error) -> bool { - /*if err == Err::Canceled) || err == Err::DeadlineExceeded) { - return ctx.Err() == nil; + /*if err == Err::Canceled || err == Err::DeadlineExceeded { + return err() == nil; } - let ue = err.(*url.Error); - if ue.is_ok() { - let e = ue.Unwrap(); - switch e.(type) { - case x509.UnknownAuthorityError: - return false; - } - switch e.Error() { - case "http: server gave HTTP response to HTTPS client": - return false; - } + let uerr = err.(*url.Error); + if uerr.is_ok() { + let e = uerr.unwrap(); + return match e.type { + x509.UnknownAuthorityError => { + false + } + _ => true, + }; + return match e.error() { + "http: server gave HTTP response to HTTPS client" => { + false + } + _ => rue, + }; } true*/ todo!(); diff --git a/crates/utils/src/sys/user_agent.rs b/crates/utils/src/sys/user_agent.rs index e50b6f65e..fb549ad04 100644 --- a/crates/utils/src/sys/user_agent.rs +++ b/crates/utils/src/sys/user_agent.rs @@ -88,7 +88,7 @@ impl UserAgent { Some(version) => version, None => "Windows NT Unknown".to_string(), }; - format!("Windows NT {}", version) + format!("Windows NT {version}") } #[cfg(not(windows))] From c88a008f69d40fe44303a1441b8c32e5e5dcea18 Mon Sep 17 00:00:00 2001 From: likewu Date: Mon, 28 Jul 2025 21:16:45 +0800 Subject: [PATCH 19/26] fix --- crates/checksums/src/base64.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/checksums/src/base64.rs b/crates/checksums/src/base64.rs index d70389e4b..a7cc148ce 100644 --- a/crates/checksums/src/base64.rs +++ b/crates/checksums/src/base64.rs @@ -11,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#![allow(unused_must_use)] use base64_simd::STANDARD; use std::error::Error; From 72d0927a69733a618c0d3b7508a067362b3df0fa Mon Sep 17 00:00:00 2001 From: likewu Date: Tue, 29 Jul 2025 08:17:08 +0800 Subject: [PATCH 20/26] fix --- crates/checksums/src/base64.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/checksums/src/base64.rs b/crates/checksums/src/base64.rs index a7cc148ce..a05ae00f7 100644 --- a/crates/checksums/src/base64.rs +++ b/crates/checksums/src/base64.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. #![allow(unused_must_use)] +#![allow(clippy::all)] use base64_simd::STANDARD; use std::error::Error; From c36a0ae5b8037505c0b73b461bb83126c446c2e5 Mon Sep 17 00:00:00 2001 From: likewu Date: Tue, 29 Jul 2025 08:56:55 +0800 Subject: [PATCH 21/26] fix --- crates/checksums/src/base64.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/checksums/src/base64.rs b/crates/checksums/src/base64.rs index a05ae00f7..d434a4f0b 100644 --- a/crates/checksums/src/base64.rs +++ b/crates/checksums/src/base64.rs @@ -11,8 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#![allow(unused_must_use)] -#![allow(clippy::all)] +#![allow(dead_code)] use base64_simd::STANDARD; use std::error::Error; From 22dc2676f66072ef117bc4a8f0047bd546e93d6e Mon Sep 17 00:00:00 2001 From: likewu Date: Tue, 29 Jul 2025 09:57:26 +0800 Subject: [PATCH 22/26] fix --- crates/ecstore/src/client/api_put_object_multipart.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/ecstore/src/client/api_put_object_multipart.rs b/crates/ecstore/src/client/api_put_object_multipart.rs index 61902c7fa..84ba5810f 100644 --- a/crates/ecstore/src/client/api_put_object_multipart.rs +++ b/crates/ecstore/src/client/api_put_object_multipart.rs @@ -25,6 +25,7 @@ use time::OffsetDateTime; use tracing::warn; use uuid::Uuid; +use crate::checksum::ChecksumMode; use crate::client::{ api_error_response::{ err_entity_too_large, err_entity_too_small, err_invalid_argument, http_resp_to_error_response, to_error_response, @@ -37,7 +38,6 @@ use crate::client::{ constants::{ISO8601_DATEFORMAT, MAX_PART_SIZE, MAX_SINGLE_PUT_OBJECT_SIZE}, transition_api::{ReaderImpl, RequestMetadata, TransitionClient, UploadInfo}, }; -use crate::{checksum::ChecksumMode, disk::DiskAPI, store_api::StorageAPI}; use rustfs_utils::{crypto::base64_encode, path::trim_etag}; use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID}; From 6964363e0aab4aad1c58ab75a077a08ec7777ce7 Mon Sep 17 00:00:00 2001 From: likewu Date: Wed, 6 Aug 2025 09:37:19 +0800 Subject: [PATCH 23/26] ilm env --- crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs | 2 +- crates/ecstore/src/bucket/lifecycle/lifecycle.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs index e28488215..e73798455 100644 --- a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs +++ b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs @@ -587,7 +587,7 @@ impl TransitionState { pub async fn init_background_expiry(api: Arc) { let mut workers = num_cpus::get() / 2; //globalILMConfig.getExpirationWorkers() - if let Ok(env_expiration_workers) = env::var("_RUSTFS_EXPIRATION_WORKERS") { + if let Ok(env_expiration_workers) = env::var("_RUSTFS_ILM_EXPIRATION_WORKERS") { if let Ok(num_expirations) = env_expiration_workers.parse::() { workers = num_expirations; } diff --git a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs index 22caf860e..f592a3fd4 100644 --- a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -604,7 +604,7 @@ pub fn expected_expiry_time(mod_time: OffsetDateTime, days: i32) -> OffsetDateTi .to_offset(offset!(-0:00:00)) .saturating_add(Duration::days(days as i64)); let mut hour = 3600; - if let Ok(env_ilm_hour) = env::var("_RUSTFS_ILM_HOUR") { + if let Ok(env_ilm_hour) = env::var("_RUSTFS_ILM_PROCESS_TIME") { if let Ok(num_hour) = env_ilm_hour.parse::() { hour = num_hour; } From de1da2e870fe72b3062d13aee22a13e5daf27f1e Mon Sep 17 00:00:00 2001 From: likewu Date: Sun, 17 Aug 2025 08:57:50 +0800 Subject: [PATCH 24/26] scanner_item prefix object_name --- crates/ahm/src/scanner/data_scanner.rs | 92 ++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 5 deletions(-) diff --git a/crates/ahm/src/scanner/data_scanner.rs b/crates/ahm/src/scanner/data_scanner.rs index 3aa4b9dfb..98b587483 100644 --- a/crates/ahm/src/scanner/data_scanner.rs +++ b/crates/ahm/src/scanner/data_scanner.rs @@ -37,10 +37,12 @@ use crate::{ get_ahm_services_cancel_token, }; +use rustfs_utils::path::path_to_bucket_object_with_base_path; use rustfs_common::data_usage::DataUsageInfo; use rustfs_common::metrics::{Metric, Metrics, globalMetrics}; +use rustfs_common::data_usage::SizeSummary; use rustfs_ecstore::cmd::bucket_targets::VersioningConfig; - +use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys; use rustfs_ecstore::disk::RUSTFS_META_BUCKET; /// Custom scan mode enum for AHM scanner @@ -1263,11 +1265,91 @@ impl Scanner { } else { // Apply lifecycle actions if let Some(lifecycle_config) = &lifecycle_config { - let mut scanner_item = - ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone()); - if let Err(e) = scanner_item.apply_actions(&entry.name, entry.clone()).await { - error!("Failed to apply lifecycle actions for {}/{}: {}", bucket, entry.name, e); + let vcfg = BucketVersioningSys::get(bucket).await.ok(); + + let mut scanner_item = ScannerItem { + //path: Path::new(&self.root).join(&ent_name).to_string_lossy().to_string(), + bucket.to_string(), + prefix: Path::new(&prefix) + .parent() + .unwrap_or(Path::new("")) + .to_string_lossy() + .to_string(), + object_name: entry.name + .file_name() + .map(|name| name.to_string_lossy().into_owned()) + .unwrap_or_default(), + lifecycle: Some(lifecycle_config.clone()), + versioning_config.clone(), + }; + //ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone()); + let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) { + Ok(fivs) => fivs, + Err(err) => { + stop_fn(); + return Err(Error::other("skip this file").into()); + } + }; + let mut size_s = SizeSummary::default(); + let obj_infos = match scanner_item.apply_versions_actions(&fivs.versions).await { + Ok(obj_infos) => obj_infos, + Err(err) => { + stop_fn(); + return Err(Error::other("skip this file").into()); + } + }; + + let versioned = if let Some(vcfg) = vcfg.as_ref() { + vcfg.versioned(scanner_item.object_path().to_str().unwrap_or_default()) + } else { + false + }; + + let mut obj_deleted = false; + for info in obj_infos.iter() { + let done = ScannerMetrics::time(ScannerMetric::ApplyVersion); + let sz: i64; + (obj_deleted, sz) = scanner_item.apply_actions(info, &mut size_s).await; + + /*if obj_deleted { + break; + }*/ + + let actual_sz = match info.get_actual_size() { + Ok(size) => size, + Err(_) => continue, + }; + + if info.delete_marker { + size_s.delete_markers += 1; + } + + if info.version_id.is_some() && sz == actual_sz { + size_s.versions += 1; + } + + size_s.total_size += sz as usize; + + if info.delete_marker { + continue; + } } + + for free_version in fivs.free_versions.iter() { + let _obj_info = rustfs_ecstore::store_api::ObjectInfo::from_file_info( + free_version, + &item.bucket, + &item.object_path().to_string_lossy(), + versioned, + ); + let done = ScannerMetrics::time(ScannerMetric::TierObjSweep); + done(); + } + + // todo: global trace + /*if obj_deleted { + return Err(Error::other(ERR_IGNORE_FILE_CONTRIB).into()); + }*/ } // Store object metadata for later analysis From e529566c4a28eb79e1e1d769fb3814de41e015b2 Mon Sep 17 00:00:00 2001 From: likewu Date: Mon, 18 Aug 2025 17:01:56 +0800 Subject: [PATCH 25/26] oi --- crates/ahm/src/scanner/data_scanner.rs | 16 +- crates/ahm/src/scanner/lifecycle.rs | 197 +++++++++++++++++++++---- 2 files changed, 173 insertions(+), 40 deletions(-) diff --git a/crates/ahm/src/scanner/data_scanner.rs b/crates/ahm/src/scanner/data_scanner.rs index 98b587483..02d65fc50 100644 --- a/crates/ahm/src/scanner/data_scanner.rs +++ b/crates/ahm/src/scanner/data_scanner.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::path::{Path, PathBuf}; use std::{ collections::HashMap, sync::Arc, @@ -43,6 +44,7 @@ use rustfs_common::metrics::{Metric, Metrics, globalMetrics}; use rustfs_common::data_usage::SizeSummary; use rustfs_ecstore::cmd::bucket_targets::VersioningConfig; use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys; +use rustfs_ecstore::bucket::versioning::VersioningApi; use rustfs_ecstore::disk::RUSTFS_META_BUCKET; /// Custom scan mode enum for AHM scanner @@ -1269,18 +1271,17 @@ impl Scanner { let mut scanner_item = ScannerItem { //path: Path::new(&self.root).join(&ent_name).to_string_lossy().to_string(), - bucket.to_string(), + bucket: bucket.to_string(), prefix: Path::new(&prefix) .parent() .unwrap_or(Path::new("")) .to_string_lossy() .to_string(), - object_name: entry.name - .file_name() + object_name: entry.file_name() .map(|name| name.to_string_lossy().into_owned()) .unwrap_or_default(), lifecycle: Some(lifecycle_config.clone()), - versioning_config.clone(), + versioning: versioning_config.clone(), }; //ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone()); let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) { @@ -1307,7 +1308,6 @@ impl Scanner { let mut obj_deleted = false; for info in obj_infos.iter() { - let done = ScannerMetrics::time(ScannerMetric::ApplyVersion); let sz: i64; (obj_deleted, sz) = scanner_item.apply_actions(info, &mut size_s).await; @@ -1338,12 +1338,10 @@ impl Scanner { for free_version in fivs.free_versions.iter() { let _obj_info = rustfs_ecstore::store_api::ObjectInfo::from_file_info( free_version, - &item.bucket, - &item.object_path().to_string_lossy(), + &scanner_item.bucket, + &scanner_item.object_path().to_string_lossy(), versioned, ); - let done = ScannerMetrics::time(ScannerMetric::TierObjSweep); - done(); } // todo: global trace diff --git a/crates/ahm/src/scanner/lifecycle.rs b/crates/ahm/src/scanner/lifecycle.rs index 5d33399d6..b9bd31135 100644 --- a/crates/ahm/src/scanner/lifecycle.rs +++ b/crates/ahm/src/scanner/lifecycle.rs @@ -13,66 +13,197 @@ // limitations under the License. use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::path::{Path, PathBuf}; +use time::OffsetDateTime; +use rustfs_utils::path::path_join; use rustfs_common::metrics::IlmAction; -use rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc; -use rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::{apply_lifecycle_action, eval_action_from_lifecycle}; +use rustfs_common::data_usage::SizeSummary; +use rustfs_ecstore::bucket::lifecycle::{lifecycle, + lifecycle::Lifecycle, + bucket_lifecycle_audit::LcEventSrc, + bucket_lifecycle_ops::{ + apply_lifecycle_action, eval_action_from_lifecycle, + GLOBAL_ExpiryState, GLOBAL_TransitionState, + } +}; use rustfs_ecstore::bucket::metadata_sys::get_object_lock_config; use rustfs_ecstore::cmd::bucket_targets::VersioningConfig; -use rustfs_ecstore::store_api::ObjectInfo; +use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys; +use rustfs_ecstore::bucket::versioning::VersioningApi; +use rustfs_ecstore::bucket::object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion}; +use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete}; use rustfs_filemeta::FileMetaVersion; use rustfs_filemeta::metacache::MetaCacheEntry; +use rustfs_filemeta::FileInfo; use s3s::dto::BucketLifecycleConfiguration as LifecycleConfig; use tracing::info; +use crate::error::{Error, Result}; + +static SCANNER_EXCESS_OBJECT_VERSIONS: AtomicU64 = AtomicU64::new(100); +static SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE: AtomicU64 = AtomicU64::new(1024 * 1024 * 1024 * 1024); // 1 TB #[derive(Clone)] pub struct ScannerItem { - bucket: String, - lifecycle: Option>, - versioning: Option>, + pub bucket: String, + pub prefix: String, + pub object_name: String, + pub lifecycle: Option>, + pub versioning: Option>, } impl ScannerItem { pub fn new(bucket: String, lifecycle: Option>, versioning: Option>) -> Self { Self { bucket, + prefix: "".to_string(), + object_name: "".to_string(), lifecycle, versioning, } } - pub async fn apply_actions(&mut self, object: &str, mut meta: MetaCacheEntry) -> anyhow::Result<()> { - info!("apply_actions called for object: {}", object); - if self.lifecycle.is_none() { - info!("No lifecycle config for object: {}", object); - return Ok(()); + pub async fn apply_versions_actions(&self, fivs: &[FileInfo]) -> Result> { + let obj_infos = self.apply_newer_noncurrent_version_limit(fivs).await?; + if obj_infos.len() >= SCANNER_EXCESS_OBJECT_VERSIONS.load(Ordering::SeqCst) as usize { + // todo } - info!("Lifecycle config exists for object: {}", object); - let file_meta = match meta.xl_meta() { - Ok(meta) => meta, - Err(e) => { - tracing::error!("Failed to get xl_meta for {}: {}", object, e); - return Ok(()); - } + let mut cumulative_size = 0; + for obj_info in obj_infos.iter() { + cumulative_size += obj_info.size; + } + + if cumulative_size >= SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE.load(Ordering::SeqCst) as i64 { + //todo + } + + Ok(obj_infos) + } + + pub async fn apply_newer_noncurrent_version_limit(&self, fivs: &[FileInfo]) -> Result> { + let lock_enabled = if let Some(rcfg) = BucketObjectLockSys::get(&self.bucket).await { + rcfg.mode.is_some() + } else { + false }; + let _vcfg = BucketVersioningSys::get(&self.bucket).await?; - let latest_version = file_meta.versions.first().cloned().unwrap_or_default(); - let file_meta_version = FileMetaVersion::try_from(latest_version.meta.as_slice()).unwrap_or_default(); - - let obj_info = ObjectInfo { - bucket: self.bucket.clone(), - name: object.to_string(), - version_id: latest_version.header.version_id, - mod_time: latest_version.header.mod_time, - size: file_meta_version.object.as_ref().map_or(0, |o| o.size), - user_defined: serde_json::from_slice(file_meta.data.as_slice()).unwrap_or_default(), - ..Default::default() + let versioned = match BucketVersioningSys::get(&self.bucket).await { + Ok(vcfg) => vcfg.versioned(self.object_path().to_str().unwrap_or_default()), + Err(_) => false, }; + let mut object_infos = Vec::with_capacity(fivs.len()); + + if self.lifecycle.is_none() { + for info in fivs.iter() { + object_infos.push(ObjectInfo::from_file_info( + info, + &self.bucket, + &self.object_path().to_string_lossy(), + versioned, + )); + } + return Ok(object_infos); + } - self.apply_lifecycle(&obj_info).await; + let event = self + .lifecycle + .as_ref() + .expect("lifecycle err.").clone() + .noncurrent_versions_expiration_limit(&lifecycle::ObjectOpts { + name: self.object_path().to_string_lossy().to_string(), + ..Default::default() + }) + .await; + let lim = event.newer_noncurrent_versions; + if lim == 0 || fivs.len() <= lim + 1 { + for fi in fivs.iter() { + object_infos.push(ObjectInfo::from_file_info( + fi, + &self.bucket, + &self.object_path().to_string_lossy(), + versioned, + )); + } + return Ok(object_infos); + } + + let overflow_versions = &fivs[lim + 1..]; + for fi in fivs[..lim + 1].iter() { + object_infos.push(ObjectInfo::from_file_info( + fi, + &self.bucket, + &self.object_path().to_string_lossy(), + versioned, + )); + } - Ok(()) + let mut to_del = Vec::::with_capacity(overflow_versions.len()); + for fi in overflow_versions.iter() { + let obj = ObjectInfo::from_file_info(fi, &self.bucket, &self.object_path().to_string_lossy(), versioned); + if lock_enabled && enforce_retention_for_deletion(&obj) { + //if enforce_retention_for_deletion(&obj) { + /*if self.debug { + if obj.version_id.is_some() { + info!("lifecycle: {} v({}) is locked, not deleting\n", obj.name, obj.version_id.expect("err")); + } else { + info!("lifecycle: {} is locked, not deleting\n", obj.name); + } + }*/ + object_infos.push(obj); + continue; + } + + if OffsetDateTime::now_utc().unix_timestamp() + < lifecycle::expected_expiry_time(obj.successor_mod_time.expect("err"), event.noncurrent_days as i32) + .unix_timestamp() + { + object_infos.push(obj); + continue; + } + + to_del.push(ObjectToDelete { + object_name: obj.name, + version_id: obj.version_id, + }); + } + + if !to_del.is_empty() { + let mut expiry_state = GLOBAL_ExpiryState.write().await; + expiry_state.enqueue_by_newer_noncurrent(&self.bucket, to_del, event).await; + } + + Ok(object_infos) + } + + pub async fn apply_actions(&mut self, oi: &ObjectInfo, _size_s: &mut SizeSummary) -> (bool, i64) { + let (action, _size) = self.apply_lifecycle(oi).await; + + info!( + "apply_actions {} {} {:?} {:?}", + oi.bucket.clone(), + oi.name.clone(), + oi.version_id.clone(), + oi.user_defined.clone() + ); + + // Create a mutable clone if you need to modify fields + /*let mut oi = oi.clone(); + oi.replication_status = ReplicationStatusType::from( + oi.user_defined + .get("x-amz-bucket-replication-status") + .unwrap_or(&"PENDING".to_string()), + ); + info!("apply status is: {:?}", oi.replication_status); + self.heal_replication(&oi, _size_s).await;*/ + + if action.delete_all() { + return (true, 0); + } + + (false, oi.size) } async fn apply_lifecycle(&mut self, oi: &ObjectInfo) -> (IlmAction, i64) { @@ -122,4 +253,8 @@ impl ScannerItem { apply_lifecycle_action(&lc_evt, &LcEventSrc::Scanner, oi).await; (lc_evt.action, new_size) } + + pub fn object_path(&self) -> PathBuf { + path_join(&[PathBuf::from(self.prefix.clone()), PathBuf::from(self.object_name.clone())]) + } } From aefb7f11ca1320b76d98131041d5a559e0db88e2 Mon Sep 17 00:00:00 2001 From: likewu Date: Mon, 18 Aug 2025 22:02:04 +0800 Subject: [PATCH 26/26] fix --- Cargo.lock | 1 + crates/ahm/Cargo.toml | 1 + crates/ahm/src/scanner/data_scanner.rs | 16 ++++++---- crates/ahm/src/scanner/lifecycle.rs | 29 +++++++++---------- .../bucket/lifecycle/bucket_lifecycle_ops.rs | 2 +- .../ecstore/src/bucket/lifecycle/lifecycle.rs | 5 ++-- 6 files changed, 30 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ded20e538..b69b82461 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8123,6 +8123,7 @@ dependencies = [ "serial_test", "tempfile", "thiserror 2.0.12", + "time", "tokio", "tokio-util", "tracing", diff --git a/crates/ahm/Cargo.toml b/crates/ahm/Cargo.toml index d94f32aa1..0e7be349a 100644 --- a/crates/ahm/Cargo.toml +++ b/crates/ahm/Cargo.toml @@ -22,6 +22,7 @@ tokio = { workspace = true, features = ["full"] } tokio-util = { workspace = true } tracing = { workspace = true } serde = { workspace = true, features = ["derive"] } +time.workspace = true serde_json = { workspace = true } thiserror = { workspace = true } uuid = { workspace = true, features = ["v4", "serde"] } diff --git a/crates/ahm/src/scanner/data_scanner.rs b/crates/ahm/src/scanner/data_scanner.rs index 02d65fc50..e7dde0a04 100644 --- a/crates/ahm/src/scanner/data_scanner.rs +++ b/crates/ahm/src/scanner/data_scanner.rs @@ -38,14 +38,14 @@ use crate::{ get_ahm_services_cancel_token, }; -use rustfs_utils::path::path_to_bucket_object_with_base_path; use rustfs_common::data_usage::DataUsageInfo; -use rustfs_common::metrics::{Metric, Metrics, globalMetrics}; use rustfs_common::data_usage::SizeSummary; -use rustfs_ecstore::cmd::bucket_targets::VersioningConfig; -use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys; +use rustfs_common::metrics::{Metric, Metrics, globalMetrics}; use rustfs_ecstore::bucket::versioning::VersioningApi; +use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys; +use rustfs_ecstore::cmd::bucket_targets::VersioningConfig; use rustfs_ecstore::disk::RUSTFS_META_BUCKET; +use rustfs_utils::path::path_to_bucket_object_with_base_path; /// Custom scan mode enum for AHM scanner #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] @@ -1267,6 +1267,9 @@ impl Scanner { } else { // Apply lifecycle actions if let Some(lifecycle_config) = &lifecycle_config { + let sub_path = entry.path(); + let ent_name = Path::new(&folder.name).join(&sub_path); + let vcfg = BucketVersioningSys::get(bucket).await.ok(); let mut scanner_item = ScannerItem { @@ -1277,13 +1280,14 @@ impl Scanner { .unwrap_or(Path::new("")) .to_string_lossy() .to_string(), - object_name: entry.file_name() + object_name: ent_name + .file_name() .map(|name| name.to_string_lossy().into_owned()) .unwrap_or_default(), lifecycle: Some(lifecycle_config.clone()), versioning: versioning_config.clone(), }; - //ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone()); + //ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone()); let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) { Ok(fivs) => fivs, Err(err) => { diff --git a/crates/ahm/src/scanner/lifecycle.rs b/crates/ahm/src/scanner/lifecycle.rs index b9bd31135..bf3fbfd88 100644 --- a/crates/ahm/src/scanner/lifecycle.rs +++ b/crates/ahm/src/scanner/lifecycle.rs @@ -12,34 +12,32 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::path::{Path, PathBuf}; use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; -use std::path::{Path, PathBuf}; use time::OffsetDateTime; -use rustfs_utils::path::path_join; -use rustfs_common::metrics::IlmAction; +use crate::error::{Error, Result}; use rustfs_common::data_usage::SizeSummary; -use rustfs_ecstore::bucket::lifecycle::{lifecycle, - lifecycle::Lifecycle, +use rustfs_common::metrics::IlmAction; +use rustfs_ecstore::bucket::lifecycle::{ bucket_lifecycle_audit::LcEventSrc, - bucket_lifecycle_ops::{ - apply_lifecycle_action, eval_action_from_lifecycle, - GLOBAL_ExpiryState, GLOBAL_TransitionState, - } + bucket_lifecycle_ops::{GLOBAL_ExpiryState, GLOBAL_TransitionState, apply_lifecycle_action, eval_action_from_lifecycle}, + lifecycle, + lifecycle::Lifecycle, }; use rustfs_ecstore::bucket::metadata_sys::get_object_lock_config; -use rustfs_ecstore::cmd::bucket_targets::VersioningConfig; -use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys; -use rustfs_ecstore::bucket::versioning::VersioningApi; use rustfs_ecstore::bucket::object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion}; +use rustfs_ecstore::bucket::versioning::VersioningApi; +use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys; +use rustfs_ecstore::cmd::bucket_targets::VersioningConfig; use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete}; +use rustfs_filemeta::FileInfo; use rustfs_filemeta::FileMetaVersion; use rustfs_filemeta::metacache::MetaCacheEntry; -use rustfs_filemeta::FileInfo; +use rustfs_utils::path::path_join; use s3s::dto::BucketLifecycleConfiguration as LifecycleConfig; use tracing::info; -use crate::error::{Error, Result}; static SCANNER_EXCESS_OBJECT_VERSIONS: AtomicU64 = AtomicU64::new(100); static SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE: AtomicU64 = AtomicU64::new(1024 * 1024 * 1024 * 1024); // 1 TB @@ -111,7 +109,8 @@ impl ScannerItem { let event = self .lifecycle .as_ref() - .expect("lifecycle err.").clone() + .expect("lifecycle err.") + .clone() .noncurrent_versions_expiration_limit(&lifecycle::ObjectOpts { name: self.object_path().to_string_lossy().to_string(), ..Default::default() diff --git a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs index b960b217f..50e87a124 100644 --- a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs +++ b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_ops.rs @@ -1,4 +1,3 @@ -#![allow(unused_imports)] // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#![allow(unused_imports)] #![allow(unused_variables)] #![allow(unused_mut)] #![allow(unused_assignments)] diff --git a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs index 8fb6c88d8..82eb48719 100644 --- a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -25,6 +25,7 @@ use s3s::dto::{ use std::cmp::Ordering; use std::env; use std::fmt::Display; +use std::sync::Arc; use time::macros::{datetime, offset}; use time::{self, Duration, OffsetDateTime}; use tracing::info; @@ -138,7 +139,7 @@ pub trait Lifecycle { async fn eval(&self, obj: &ObjectOpts) -> Event; async fn eval_inner(&self, obj: &ObjectOpts, now: OffsetDateTime) -> Event; //fn set_prediction_headers(&self, w: http.ResponseWriter, obj: ObjectOpts); - async fn noncurrent_versions_expiration_limit(&self, obj: &ObjectOpts) -> Event; + async fn noncurrent_versions_expiration_limit(self: Arc, obj: &ObjectOpts) -> Event; } #[async_trait::async_trait] @@ -538,7 +539,7 @@ impl Lifecycle for BucketLifecycleConfiguration { Event::default() } - async fn noncurrent_versions_expiration_limit(&self, obj: &ObjectOpts) -> Event { + async fn noncurrent_versions_expiration_limit(self: Arc, obj: &ObjectOpts) -> Event { if let Some(filter_rules) = self.filter_rules(obj).await { for rule in filter_rules.iter() { if let Some(ref noncurrent_version_expiration) = rule.noncurrent_version_expiration {