From ee2edf1b9e0e0aa7ef50f281a7c068cc342ad4cb Mon Sep 17 00:00:00 2001 From: zine yu Date: Sat, 13 Dec 2025 19:56:03 +0800 Subject: [PATCH 1/7] feat(slayerfs): impl database store of plock Signed-off-by: zine yu --- project/slayerfs/src/meta/entities/mod.rs | 2 + .../slayerfs/src/meta/entities/plock_meta.rs | 20 ++ project/slayerfs/src/meta/file_lock.rs | 197 ++++++++++++++++++ project/slayerfs/src/meta/mod.rs | 1 + project/slayerfs/src/meta/store.rs | 50 +++++ .../src/meta/stores/database_store.rs | 182 +++++++++++++++- .../slayerfs/src/meta/stores/etcd_store.rs | 30 +++ .../slayerfs/src/meta/stores/redis_store.rs | 31 +++ 8 files changed, 512 insertions(+), 1 deletion(-) create mode 100644 project/slayerfs/src/meta/entities/plock_meta.rs create mode 100644 project/slayerfs/src/meta/file_lock.rs diff --git a/project/slayerfs/src/meta/entities/mod.rs b/project/slayerfs/src/meta/entities/mod.rs index 096abe54e..65ea74195 100644 --- a/project/slayerfs/src/meta/entities/mod.rs +++ b/project/slayerfs/src/meta/entities/mod.rs @@ -4,6 +4,7 @@ pub mod content_meta; pub mod etcd; pub mod file_meta; pub mod locks_meta; +pub mod plock_meta; pub mod session_meta; pub mod slice_meta; @@ -11,5 +12,6 @@ pub use access_meta::{Entity as AccessMeta, Model as AccessMetaModel}; pub use content_meta::{Entity as ContentMeta, EntryType, Model as ContentMetaModel}; pub use file_meta::{Entity as FileMeta, Model as FileMetaModel}; pub use locks_meta::Entity as LocksMeta; +pub use plock_meta::Entity as PlockMeta; #[allow(unused_imports)] pub use slice_meta::{Entity as SliceMeta, Model as SliceMetaModel}; diff --git a/project/slayerfs/src/meta/entities/plock_meta.rs b/project/slayerfs/src/meta/entities/plock_meta.rs new file mode 100644 index 000000000..db9bae175 --- /dev/null +++ b/project/slayerfs/src/meta/entities/plock_meta.rs @@ -0,0 +1,20 @@ +use sea_orm::entity::prelude::*; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)] +#[sea_orm(table_name = "plock")] +pub struct Model { + #[sea_orm(primary_key)] + pub id: i64, + #[sea_orm(unique)] + pub inode: i64, + #[sea_orm(unique)] + pub sid: Uuid, + #[sea_orm(unique)] + pub owner: i64, + pub records: Vec, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} +impl ActiveModelBehavior for ActiveModel {} diff --git a/project/slayerfs/src/meta/file_lock.rs b/project/slayerfs/src/meta/file_lock.rs new file mode 100644 index 000000000..0bb3e79b8 --- /dev/null +++ b/project/slayerfs/src/meta/file_lock.rs @@ -0,0 +1,197 @@ +use sea_orm::{ + TryGetError, Value, + sea_query::{self, ValueTypeErr}, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(u32)] +pub enum FileLockType { + ReadLock = libc::F_RDLCK as u32, + WriteLock = libc::F_WRLCK as u32, + UnLock = libc::F_UNLCK as u32, +} + +impl FileLockType { + pub fn from_u32(value: u32) -> Option { + match value { + x if x == Self::ReadLock as u32 => Some(Self::ReadLock), + x if x == Self::WriteLock as u32 => Some(Self::WriteLock), + x if x == Self::UnLock as u32 => Some(Self::UnLock), + _ => None, + } + } + + pub fn as_u32(&self) -> u32 { + *self as u32 + } +} + +impl std::convert::From for sea_orm::Value { + fn from(value: FileLockType) -> Self { + match value { + FileLockType::ReadLock => Value::Unsigned(Some(FileLockType::ReadLock as u32)), + FileLockType::WriteLock => Value::Unsigned(Some(FileLockType::WriteLock as u32)), + FileLockType::UnLock => Value::Unsigned(Some(FileLockType::UnLock as u32)), + } + } +} + +impl sea_orm::TryGetable for FileLockType { + fn try_get_by( + res: &sea_orm::QueryResult, + index: I, + ) -> Result { + let val: u32 = res.try_get_by(index)?; + FileLockType::from_u32(val).ok_or(TryGetError::DbErr(sea_orm::DbErr::Type( + "Failed to deserialize FIleLockType".to_string(), + ))) + } +} + +impl sea_query::ValueType for FileLockType { + fn try_from(v: Value) -> Result { + match v { + Value::Unsigned(Some(val)) => FileLockType::from_u32(val).ok_or(ValueTypeErr), + _ => Err(sea_query::ValueTypeErr), + } + } + + fn type_name() -> String { + "FlockType".to_string() + } + + fn array_type() -> sea_query::ArrayType { + sea_orm::sea_query::ArrayType::Unsigned + } + + fn column_type() -> sea_orm::ColumnType { + sea_orm::sea_query::ColumnType::Unsigned + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct PlockRecord { + pub lock_type: FileLockType, + pub pid: u32, + pub lock_range: FileLockRange, +} + +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, Eq, PartialEq, Hash)] +pub struct FileLockRange { + pub start: u64, + pub end: u64, +} + +impl FileLockRange { + pub fn new(start: u64, end: u64) -> Self { + Self { start, end } + } + + pub fn overlaps(&self, other: &Self) -> bool { + self.end >= other.start && self.start <= other.end + } +} +#[derive(Debug, Clone, Copy)] +pub struct FileLockQuery { + pub owner: u64, + pub lock_type: FileLockType, + pub range: FileLockRange, +} + +#[derive(Debug, Clone, Copy)] +pub struct FileLockInfo { + pub lock_type: FileLockType, + pub range: FileLockRange, + pub pid: u32, +} + +impl FileLockInfo { + pub fn unlocked() -> Self { + Self { + lock_type: FileLockType::UnLock, + range: FileLockRange::default(), + pid: 0, + } + } +} + +pub async fn get_plock( + range: FileLockRange, + query: FileLockQuery, + lock_owner: i64, + records: Vec, +) -> Option { + for record in records { + // Check if this lock overlaps with the requested range + if record.lock_range.overlaps(&range) { + // Check if the lock conflicts with the query + // Same owner can access its own locks + if lock_owner == query.owner as i64 { + return Some(FileLockInfo { + lock_type: record.lock_type, + range: record.lock_range, + pid: record.pid, + }); + } + + // Check compatibility based on lock types + match (record.lock_type, query.lock_type) { + (FileLockType::ReadLock, FileLockType::ReadLock) => { + // Read locks are compatible + continue; + } + (FileLockType::UnLock, _) => { + // Unlocked region + continue; + } + _ => { + // Conflict detected + return Some(FileLockInfo { + lock_type: record.lock_type, + range: record.lock_range, + pid: record.pid, + }); + } + } + } + } + None +} + +pub async fn check_conflicts( + owner: u64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + lock_owner: i64, + records: Vec, +) -> bool { + for record in records { + if record.lock_range.overlaps(&range) { + // skip if same owner (allow re-locking or upgrading) + if lock_owner == owner as i64 { + continue; + } + + // check lock compatibility + match (record.lock_type, lock_type) { + (FileLockType::ReadLock, FileLockType::ReadLock) => { + // read locks are compatible + continue; + } + _ => { + // conflict detected + if !block { + return true; + } + + // for blocking locks, we would implement retry logic here + // for now, just return conflict error + return true; + } + } + } + } + false +} diff --git a/project/slayerfs/src/meta/mod.rs b/project/slayerfs/src/meta/mod.rs index 52da075a7..83d6c691f 100644 --- a/project/slayerfs/src/meta/mod.rs +++ b/project/slayerfs/src/meta/mod.rs @@ -17,6 +17,7 @@ pub mod client; pub mod config; pub mod entities; pub mod factory; +pub mod file_lock; pub mod layer; pub mod migrations; pub mod permission; diff --git a/project/slayerfs/src/meta/store.rs b/project/slayerfs/src/meta/store.rs index e707c5f65..f8312d4c0 100644 --- a/project/slayerfs/src/meta/store.rs +++ b/project/slayerfs/src/meta/store.rs @@ -4,6 +4,7 @@ use crate::chuck::SliceDesc; use crate::meta::client::session::{Session, SessionInfo}; use crate::meta::entities::content_meta::EntryType; +use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use async_trait::async_trait; use std::collections::HashMap; use std::fmt; @@ -287,6 +288,23 @@ pub enum MetaError { #[error("Session not found: {0}")] SessionNotFound(Uuid), + #[error("Lock conflict on inode {inode} for owner {owner}, range: {range:?}")] + LockConflict { + inode: i64, + owner: u64, + range: FileLockRange, + }, + + #[error("Lock not found on inode {inode} for owner {owner}, range: {range:?}")] + LockNotFound { + inode: i64, + owner: u64, + range: FileLockRange, + }, + + #[error("Deadlock detected involving owners: {owners:?}")] + DeadlockDetected { owners: Vec }, + #[error("Invalid handle: {0}")] InvalidHandle(u64), @@ -738,4 +756,36 @@ pub trait MetaStore: Send + Sync { ); Err(MetaError::NotImplemented) } + + // ---------- File lock ---------- + + // returns the current lock owner for a range on a file. + async fn get_plock( + &self, + inode: i64, + range: FileLockRange, + query: FileLockQuery, + ) -> Result { + let _ = (inode, query, range); + Err(MetaError::NotImplemented) + } + + // sets a file range lock on given file. + async fn set_plock( + &self, + inode: i64, + owner: u64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), MetaError> { + let _ = (inode, owner, lock_type, pid, block, range); + Err(MetaError::NotImplemented) + } + + fn set_sid(&self, sid: Uuid) -> Result<(), MetaError> { + let _ = sid; + Err(MetaError::NotImplemented) + } } diff --git a/project/slayerfs/src/meta/stores/database_store.rs b/project/slayerfs/src/meta/stores/database_store.rs index 7283f3ff2..2f4d2dc03 100644 --- a/project/slayerfs/src/meta/stores/database_store.rs +++ b/project/slayerfs/src/meta/stores/database_store.rs @@ -7,12 +7,15 @@ use crate::meta::client::session::{Session, SessionInfo}; use crate::meta::config::{Config, DatabaseType}; use crate::meta::entities::session_meta::{self, Entity as SessionMeta}; use crate::meta::entities::slice_meta::{self, Entity as SliceMeta}; -use crate::meta::entities::*; +use crate::meta::file_lock::{ + FileLockInfo, FileLockQuery, FileLockRange, FileLockType, PlockRecord, +}; use crate::meta::store::{ DirEntry, FileAttr, LockName, MetaError, MetaStore, OpenFlags, SetAttrFlags, SetAttrRequest, StatFsSnapshot, }; use crate::meta::{INODE_ID_KEY, Permission, SLICE_ID_KEY}; +use crate::meta::{entities::*, file_lock}; use crate::vfs::fs::FileType; use async_trait::async_trait; use chrono::{Duration, Utc}; @@ -21,11 +24,13 @@ use sea_orm::prelude::Uuid; use sea_orm::*; use sea_query::Index; use std::path::Path; +use std::sync::OnceLock; use std::sync::atomic::{AtomicU64, Ordering}; /// Database-based metadata store pub struct DatabaseMetaStore { db: DatabaseConnection, + sid: OnceLock, _config: Config, next_inode: AtomicU64, next_slice: AtomicU64, @@ -49,6 +54,7 @@ impl DatabaseMetaStore { let next_slice = AtomicU64::new(Self::init_next_slice(&db).await?); let store = Self { db, + sid: OnceLock::new(), _config, next_inode, next_slice, @@ -71,6 +77,7 @@ impl DatabaseMetaStore { let next_slice = AtomicU64::new(Self::init_next_slice(&db).await?); let store = Self { db, + sid: OnceLock::new(), _config, next_inode, next_slice, @@ -168,6 +175,14 @@ impl DatabaseMetaStore { .create_table_from_entity(SliceMeta) .if_not_exists() .to_owned(), + schema + .create_table_from_entity(LocksMeta) + .if_not_exists() + .to_owned(), + schema + .create_table_from_entity(PlockMeta) + .if_not_exists() + .to_owned(), ]; for (i, stmt) in stmts.iter().enumerate() { @@ -257,6 +272,18 @@ impl DatabaseMetaStore { .map_err(MetaError::Database) } + /// Check file is existing + async fn file_is_existing(&self, inode: i64) -> Result { + let existing = FileMeta::find_by_id(inode) + .one(&self.db) + .await + .map_err(MetaError::Database)?; + match existing { + Some(_) => Ok(true), + None => Ok(true), + } + } + /// Create a new directory async fn create_directory(&self, parent_inode: i64, name: String) -> Result { // Start transaction @@ -1538,4 +1565,157 @@ impl MetaStore for DatabaseMetaStore { fn as_any(&self) -> &dyn std::any::Any { self } + + // returns the current lock owner for a range on a file. + async fn get_plock( + &self, + inode: i64, + range: FileLockRange, + query: FileLockQuery, + ) -> Result { + if !self.file_is_existing(inode).await? { + return Err(MetaError::NotFound(inode)); + }; + + // Find all locks that overlap with the requested range + let locks = plock_meta::Entity::find() + .filter(plock_meta::Column::Inode.eq(inode)) + .all(&self.db) + .await + .map_err(MetaError::Database)?; + + // Deserialize and check each lock for overlap and compatibility + for lock in locks { + let records: Vec = + serde_json::from_slice(&lock.records).map_err(MetaError::Serialization)?; + match file_lock::get_plock(range, query, lock.owner, records).await { + Some(info) => return Ok(info), + None => (), + } + } + + // No conflicting locks found + Ok(FileLockInfo::unlocked()) + } + + // sets a file range lock on given file. + async fn set_plock( + &self, + inode: i64, + owner: u64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), MetaError> { + if !self.file_is_existing(inode).await? { + return Err(MetaError::NotFound(inode)); + }; + // Start a transaction for atomic lock operation + let txn = self.db.begin().await.map_err(MetaError::Database)?; + + // First check if there are any conflicting locks + let existing_locks = plock_meta::Entity::find() + .filter(plock_meta::Column::Inode.eq(inode)) + .all(&txn) + .await + .map_err(MetaError::Database)?; + + // Check for conflicts + for lock in &existing_locks { + let records: Vec = + serde_json::from_slice(&lock.records).map_err(MetaError::Serialization)?; + let conflict = + file_lock::check_conflicts(owner, block, lock_type, range, lock.owner, records) + .await; + if conflict { + let _ = txn.rollback(); + return Err(MetaError::LockConflict { + inode, + owner, + range, + }); + } + } + + // Check if we already have a lock entry for this owner/inode + let existing_owner_lock = existing_locks.iter().find(|lock| { + lock.owner == owner as i64 && lock.sid == *self.sid.get().unwrap_or(&Uuid::nil()) + }); + + if let Some(existing) = existing_owner_lock { + // Update existing lock record + let mut records: Vec = + serde_json::from_slice(&existing.records).map_err(MetaError::Serialization)?; + + // Check if we're unlocking + if lock_type == FileLockType::UnLock { + // Remove any locks that match the range + records.retain(|record| !record.lock_range.overlaps(&range) || record.pid != pid); + } else { + // Add or update the lock + let new_record = PlockRecord { + lock_type, + pid, + lock_range: range, + }; + + // Check if we already have a lock for this range and pid + let mut found = false; + for record in &mut records { + if record.lock_range.overlaps(&range) && record.pid == pid { + record.lock_type = lock_type; + record.lock_range = range; + found = true; + break; + } + } + + if !found { + records.push(new_record); + } + } + + // Serialize updated records + let serialized = serde_json::to_vec(&records).map_err(MetaError::Serialization)?; + + // Update database + let mut active_lock = existing.clone().into_active_model(); + active_lock.records = Set(serialized); + active_lock + .update(&txn) + .await + .map_err(MetaError::Database)?; + } else { + // Create new lock entry + let records = vec![PlockRecord { + lock_type, + pid, + lock_range: range, + }]; + + let serialized = serde_json::to_vec(&records).map_err(MetaError::Serialization)?; + + let new_lock = plock_meta::ActiveModel { + id: NotSet, + inode: Set(inode), + sid: Set(*self.sid.get().unwrap_or(&Uuid::nil())), + owner: Set(owner as i64), + records: Set(serialized), + }; + + new_lock.insert(&txn).await.map_err(MetaError::Database)?; + } + + // Commit the transaction + txn.commit().await.map_err(MetaError::Database)?; + + Ok(()) + } + + fn set_sid(&self, sid: Uuid) -> Result<(), MetaError> { + self.sid + .set(sid) + .map_err(|_| MetaError::Internal("sid has been seted".to_string())) + } } diff --git a/project/slayerfs/src/meta/stores/etcd_store.rs b/project/slayerfs/src/meta/stores/etcd_store.rs index 9aa244a08..85ebfb145 100644 --- a/project/slayerfs/src/meta/stores/etcd_store.rs +++ b/project/slayerfs/src/meta/stores/etcd_store.rs @@ -9,6 +9,7 @@ use crate::meta::client::session::{Session, SessionInfo}; use crate::meta::config::{Config, DatabaseType}; use crate::meta::entities::etcd::*; use crate::meta::entities::*; +use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use crate::meta::store::{DirEntry, FileAttr, LockName, MetaError, MetaStore}; use crate::meta::stores::pool::IdPool; use crate::meta::{INODE_ID_KEY, Permission}; @@ -1885,6 +1886,35 @@ impl MetaStore for EtcdMetaStore { fn as_any(&self) -> &dyn std::any::Any { self } + + // returns the current lock owner for a range on a file. + async fn get_plock( + &self, + inode: i64, + range: FileLockRange, + query: FileLockQuery, + ) -> Result { + let _ = (inode, query, range); + Err(MetaError::NotImplemented) + } + + // sets a file range lock on given file. + async fn set_plock( + &self, + inode: i64, + owner: u64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), MetaError> { + let _ = (inode, owner, lock_type, pid, block, range); + Err(MetaError::NotImplemented) + } + fn set_sid(&self, sid: Uuid) -> Result<(), MetaError> { + let _ = sid; + Err(MetaError::NotImplemented) + } } #[cfg(test)] diff --git a/project/slayerfs/src/meta/stores/redis_store.rs b/project/slayerfs/src/meta/stores/redis_store.rs index bcf985e78..6a2d7280a 100644 --- a/project/slayerfs/src/meta/stores/redis_store.rs +++ b/project/slayerfs/src/meta/stores/redis_store.rs @@ -9,6 +9,7 @@ use crate::chuck::SliceDesc; use crate::meta::client::session::{Session, SessionInfo}; use crate::meta::config::{Config, DatabaseType}; +use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use crate::meta::store::{DirEntry, FileAttr, FileType, LockName, MetaError, MetaStore}; use crate::meta::{INODE_ID_KEY, SESSION_ID_KEY, SLICE_ID_KEY}; use async_trait::async_trait; @@ -645,6 +646,36 @@ impl MetaStore for RedisMetaStore { fn as_any(&self) -> &dyn Any { self } + + // returns the current lock owner for a range on a file. + async fn get_plock( + &self, + inode: i64, + range: FileLockRange, + query: FileLockQuery, + ) -> Result { + let _ = (inode, query, range); + Err(MetaError::NotImplemented) + } + + // sets a file range lock on given file. + async fn set_plock( + &self, + inode: i64, + owner: u64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), MetaError> { + let _ = (inode, owner, lock_type, pid, block, range); + Err(MetaError::NotImplemented) + } + + fn set_sid(&self, sid: Uuid) -> Result<(), MetaError> { + let _ = sid; + Err(MetaError::NotImplemented) + } } #[derive(Clone, Debug, Serialize, Deserialize)] From 4155f0dee7d710d6f3f967053a3575d923dd652c Mon Sep 17 00:00:00 2001 From: zine yu Date: Sun, 14 Dec 2025 18:55:46 +0800 Subject: [PATCH 2/7] feat(slayerfs): impl plock for database store Signed-off-by: zine yu --- .../slayerfs/src/meta/entities/plock_meta.rs | 6 +- project/slayerfs/src/meta/file_lock.rs | 191 +++++----- project/slayerfs/src/meta/store.rs | 14 +- .../src/meta/stores/database_store.rs | 328 +++++++++++------- .../slayerfs/src/meta/stores/etcd_store.rs | 7 +- .../slayerfs/src/meta/stores/redis_store.rs | 7 +- 6 files changed, 336 insertions(+), 217 deletions(-) diff --git a/project/slayerfs/src/meta/entities/plock_meta.rs b/project/slayerfs/src/meta/entities/plock_meta.rs index db9bae175..01e440421 100644 --- a/project/slayerfs/src/meta/entities/plock_meta.rs +++ b/project/slayerfs/src/meta/entities/plock_meta.rs @@ -5,12 +5,10 @@ use serde::{Deserialize, Serialize}; #[sea_orm(table_name = "plock")] pub struct Model { #[sea_orm(primary_key)] - pub id: i64, - #[sea_orm(unique)] pub inode: i64, - #[sea_orm(unique)] + #[sea_orm(primary_key)] pub sid: Uuid, - #[sea_orm(unique)] + #[sea_orm(primary_key)] pub owner: i64, pub records: Vec, } diff --git a/project/slayerfs/src/meta/file_lock.rs b/project/slayerfs/src/meta/file_lock.rs index 0bb3e79b8..455a8ae2b 100644 --- a/project/slayerfs/src/meta/file_lock.rs +++ b/project/slayerfs/src/meta/file_lock.rs @@ -1,8 +1,13 @@ +use std::collections::HashMap; + use sea_orm::{ TryGetError, Value, sea_query::{self, ValueTypeErr}, }; use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::meta::entities::{PlockMeta, plock_meta}; #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] #[repr(u32)] @@ -70,13 +75,117 @@ impl sea_query::ValueType for FileLockType { } } -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct PlockRecord { pub lock_type: FileLockType, pub pid: u32, pub lock_range: FileLockRange, } +impl PlockRecord { + pub fn new(lock_type: FileLockType, pid: u32, start: u64, end: u64) -> Self { + return Self { + lock_type, + pid, + lock_range: FileLockRange { start, end }, + }; + } + + pub async fn is_conflict(&self, locks: Vec) -> bool { + for lock in locks { + if self.lock_range.overlaps(&lock.lock_range) { + match (self.lock_type, lock.lock_type) { + (FileLockType::ReadLock, FileLockType::ReadLock) => {} + _ => return true, + } + } + } + + false + } + + pub fn update_locks(mut ls: Vec, nl: PlockRecord) -> Vec { + let mut i = 0; + let mut nl = nl; + let mut new_records = Vec::new(); // records need to insert + + while i < ls.len() && nl.lock_range.end > nl.lock_range.start { + let l = ls[i]; + + match () { + _ if l.lock_range.end < nl.lock_range.start => { + // skip + } + _ if l.lock_range.start < nl.lock_range.start => { + // split the current lock + let mut left = ls[i]; + left.lock_range.end = nl.lock_range.start; + + let middle = PlockRecord::new( + nl.lock_type, + nl.pid, + nl.lock_range.start, + l.lock_range.end, + ); + new_records.push((i + 1, middle)); + + ls[i] = left; + nl.lock_range.start = l.lock_range.end; + i += 1; + } + _ if l.lock_range.end < nl.lock_range.end => { + // Shrink the current lock range + ls[i].lock_type = nl.lock_type; + ls[i].lock_range.start = nl.lock_range.start; + nl.lock_range.start = l.lock_range.end; + } // Insert new lock and adjust next lock + _ if l.lock_range.start < nl.lock_range.end => { + new_records.push((i, nl)); + nl.lock_range.start = nl.lock_range.end; + } + _ => { + // Insert new lock + new_records.push((i, nl)); + nl.lock_range.start = nl.lock_range.end; + } + } + + i += 1; + } + + // Insert from back to front to avoid index shifting issues + for (pos, record) in new_records.into_iter().rev() { + ls.insert(pos, record); + } + if nl.lock_range.start < nl.lock_range.end { + ls.push(PlockRecord::new( + nl.lock_type, + nl.pid, + nl.lock_range.start, + nl.lock_range.end, + )); + } + + // Cleanup and merge + ls.retain(|r| r.lock_type != FileLockType::UnLock && r.lock_range.start < r.lock_range.end); + + let mut result: Vec = Vec::new(); + for record in ls { + if let Some(last) = result.last_mut() { + if last.lock_type == record.lock_type + && last.lock_range.end == record.lock_range.start + { + last.lock_range.end = record.lock_range.end; + continue; + } + } + result.push(record); + } + + result + } +} + #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, Eq, PartialEq, Hash)] pub struct FileLockRange { pub start: u64, @@ -115,83 +224,3 @@ impl FileLockInfo { } } } - -pub async fn get_plock( - range: FileLockRange, - query: FileLockQuery, - lock_owner: i64, - records: Vec, -) -> Option { - for record in records { - // Check if this lock overlaps with the requested range - if record.lock_range.overlaps(&range) { - // Check if the lock conflicts with the query - // Same owner can access its own locks - if lock_owner == query.owner as i64 { - return Some(FileLockInfo { - lock_type: record.lock_type, - range: record.lock_range, - pid: record.pid, - }); - } - - // Check compatibility based on lock types - match (record.lock_type, query.lock_type) { - (FileLockType::ReadLock, FileLockType::ReadLock) => { - // Read locks are compatible - continue; - } - (FileLockType::UnLock, _) => { - // Unlocked region - continue; - } - _ => { - // Conflict detected - return Some(FileLockInfo { - lock_type: record.lock_type, - range: record.lock_range, - pid: record.pid, - }); - } - } - } - } - None -} - -pub async fn check_conflicts( - owner: u64, - block: bool, - lock_type: FileLockType, - range: FileLockRange, - lock_owner: i64, - records: Vec, -) -> bool { - for record in records { - if record.lock_range.overlaps(&range) { - // skip if same owner (allow re-locking or upgrading) - if lock_owner == owner as i64 { - continue; - } - - // check lock compatibility - match (record.lock_type, lock_type) { - (FileLockType::ReadLock, FileLockType::ReadLock) => { - // read locks are compatible - continue; - } - _ => { - // conflict detected - if !block { - return true; - } - - // for blocking locks, we would implement retry logic here - // for now, just return conflict error - return true; - } - } - } - } - false -} diff --git a/project/slayerfs/src/meta/store.rs b/project/slayerfs/src/meta/store.rs index f8312d4c0..235c1094b 100644 --- a/project/slayerfs/src/meta/store.rs +++ b/project/slayerfs/src/meta/store.rs @@ -291,7 +291,7 @@ pub enum MetaError { #[error("Lock conflict on inode {inode} for owner {owner}, range: {range:?}")] LockConflict { inode: i64, - owner: u64, + owner: i64, range: FileLockRange, }, @@ -756,25 +756,23 @@ pub trait MetaStore: Send + Sync { ); Err(MetaError::NotImplemented) } - // ---------- File lock ---------- - // returns the current lock owner for a range on a file. + /// Gets lock information for a given file region. async fn get_plock( &self, inode: i64, - range: FileLockRange, - query: FileLockQuery, + query: &FileLockQuery, ) -> Result { - let _ = (inode, query, range); + let _ = (inode, query); Err(MetaError::NotImplemented) } - // sets a file range lock on given file. + /// Sets or clears a file segment lock (non-blocking). async fn set_plock( &self, inode: i64, - owner: u64, + owner: i64, block: bool, lock_type: FileLockType, range: FileLockRange, diff --git a/project/slayerfs/src/meta/stores/database_store.rs b/project/slayerfs/src/meta/stores/database_store.rs index 2f4d2dc03..30a8cd430 100644 --- a/project/slayerfs/src/meta/stores/database_store.rs +++ b/project/slayerfs/src/meta/stores/database_store.rs @@ -23,9 +23,18 @@ use log::info; use sea_orm::prelude::Uuid; use sea_orm::*; use sea_query::Index; +use std::collections::HashMap; +use std::hash::Hash; use std::path::Path; use std::sync::OnceLock; use std::sync::atomic::{AtomicU64, Ordering}; +use tracing::Instrument; + +#[derive(Eq, Hash, PartialEq)] +struct PlockHashMapKey { + pub sid: Uuid, + pub owner: i64, +} /// Database-based metadata store pub struct DatabaseMetaStore { @@ -515,6 +524,159 @@ impl DatabaseMetaStore { session.delete(conn).await.map_err(MetaError::Database)?; Ok(()) } + async fn try_set_plock( + &self, + inode: i64, + owner: i64, + new_lock: &PlockRecord, + lock_type: FileLockType, + range: FileLockRange, + ) -> Result<(), MetaError> { + let txn = self.db.begin().await.map_err(MetaError::Database)?; + + // chech file is existing + let exists = self.file_is_existing(inode).await?; + if !exists { + txn.rollback().await.map_err(MetaError::Database)?; + return Err(MetaError::NotFound(inode)); + } + + let sid = self + .sid + .get() + .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; + + match lock_type { + FileLockType::UnLock => { + // unlock file + let row = PlockMeta::find() + .filter(plock_meta::Column::Inode.eq(inode)) + .filter(plock_meta::Column::Owner.eq(owner)) + .filter(plock_meta::Column::Sid.eq(*sid)) + .one(&txn) + .await + .map_err(MetaError::Database)?; + + match row { + Some(plock) => { + let records: Vec = + serde_json::from_slice(&plock.records).unwrap_or_default(); + + if records.len() == 0 { + txn.commit().await.map_err(MetaError::Database)?; + return Ok(()); + } + + let new_records = PlockRecord::update_locks(records, new_lock.clone()); + let new_records_bytes = serde_json::to_vec(&new_records).map_err(|e| { + MetaError::Internal(format!( + "error to serialization Vec: {e}" + )) + })?; + + let mut active_model = plock_meta::ActiveModel { + inode: Set(inode), + sid: Set(*sid), + owner: Set(owner), + ..Default::default() + }; + + if new_records.len() == 0 { + let _ = PlockMeta::delete(active_model) + .exec(&txn) + .await + .map_err(MetaError::Database)?; + } else { + active_model.records = Set(new_records_bytes); + active_model + .insert(&txn) + .await + .map_err(MetaError::Database)?; + } + } + None => { + txn.commit().await.map_err(MetaError::Database)?; + return Ok(()); + } + } + + txn.commit().await.map_err(MetaError::Database)?; + Ok(()) + } + _ => { + let ps = PlockMeta::find() + .filter(plock_meta::Column::Inode.eq(inode)) + .all(&txn) + .await + .map_err(MetaError::Database)?; + + let mut locks = HashMap::new(); + for item in ps { + let key = PlockHashMapKey { + sid: item.sid, + owner: item.owner, + }; + locks.insert(key, item.records); + } + + let lkey = PlockHashMapKey { sid: *sid, owner }; + + // check conflict + let mut conflict_found = false; + for (k, d) in &locks { + if *k == lkey { + continue; + } + + let ls: Vec = serde_json::from_slice(&d).unwrap_or_default(); + for l in ls { + if (lock_type == FileLockType::WriteLock + || l.lock_type == FileLockType::WriteLock) + && range.end >= l.lock_range.start + && range.start <= l.lock_range.end + { + conflict_found = true; + break; + } + } + if conflict_found { + break; + } + } + + if conflict_found { + txn.rollback().await.map_err(MetaError::Database)?; + return Err(MetaError::LockConflict { + inode, + owner, + range, + }); + } + + let ls = + serde_json::from_slice(locks.get(&lkey).unwrap_or(&vec![])).unwrap_or_default(); + let ls = PlockRecord::update_locks(ls, new_lock.clone()); + + let records = serde_json::to_vec(&ls).map_err(|e| { + MetaError::Internal(format!("error to serialization Vec: {e}")) + })?; + + // lock records changed update + if locks.get(&lkey).map(|r| r != &records).unwrap_or(true) { + let plock = plock_meta::ActiveModel { + sid: Set(*sid), + owner: Set(owner), + inode: Set(inode), + records: Set(records), + }; + plock.save(&txn).await.map_err(MetaError::Database)?; + } + + txn.commit().await.map_err(MetaError::Database)?; + Ok(()) + } + } + } } #[async_trait] @@ -1570,147 +1732,81 @@ impl MetaStore for DatabaseMetaStore { async fn get_plock( &self, inode: i64, - range: FileLockRange, - query: FileLockQuery, + query: &FileLockQuery, ) -> Result { - if !self.file_is_existing(inode).await? { - return Err(MetaError::NotFound(inode)); - }; - - // Find all locks that overlap with the requested range - let locks = plock_meta::Entity::find() + let rows = PlockMeta::find() .filter(plock_meta::Column::Inode.eq(inode)) .all(&self.db) .await .map_err(MetaError::Database)?; - // Deserialize and check each lock for overlap and compatibility - for lock in locks { - let records: Vec = - serde_json::from_slice(&lock.records).map_err(MetaError::Serialization)?; - match file_lock::get_plock(range, query, lock.owner, records).await { - Some(info) => return Ok(info), - None => (), + for row in rows { + let locks: Vec = serde_json::from_slice(&row.records).unwrap_or_default(); + + for lock in locks { + if (lock.lock_type == FileLockType::WriteLock + || query.lock_type == FileLockType::WriteLock) + && lock.lock_range.overlaps(&query.range) + { + let sid = self + .sid + .get() + .ok_or(MetaError::Internal("sid not seted".to_string()))?; + + if *sid == row.sid { + return Ok(FileLockInfo { + lock_type: lock.lock_type, + range: lock.lock_range, + pid: lock.pid, + }); + } else { + return Ok(FileLockInfo { + lock_type: lock.lock_type, + range: lock.lock_range, + pid: 0, + }); + } + } } } - // No conflicting locks found - Ok(FileLockInfo::unlocked()) + Ok(FileLockInfo { + lock_type: FileLockType::UnLock, + range: FileLockRange { start: 0, end: 0 }, + pid: 0, + }) } // sets a file range lock on given file. async fn set_plock( &self, inode: i64, - owner: u64, + owner: i64, block: bool, lock_type: FileLockType, range: FileLockRange, pid: u32, ) -> Result<(), MetaError> { - if !self.file_is_existing(inode).await? { - return Err(MetaError::NotFound(inode)); - }; - // Start a transaction for atomic lock operation - let txn = self.db.begin().await.map_err(MetaError::Database)?; - - // First check if there are any conflicting locks - let existing_locks = plock_meta::Entity::find() - .filter(plock_meta::Column::Inode.eq(inode)) - .all(&txn) - .await - .map_err(MetaError::Database)?; - - // Check for conflicts - for lock in &existing_locks { - let records: Vec = - serde_json::from_slice(&lock.records).map_err(MetaError::Serialization)?; - let conflict = - file_lock::check_conflicts(owner, block, lock_type, range, lock.owner, records) - .await; - if conflict { - let _ = txn.rollback(); - return Err(MetaError::LockConflict { - inode, - owner, - range, - }); - } - } - - // Check if we already have a lock entry for this owner/inode - let existing_owner_lock = existing_locks.iter().find(|lock| { - lock.owner == owner as i64 && lock.sid == *self.sid.get().unwrap_or(&Uuid::nil()) - }); - - if let Some(existing) = existing_owner_lock { - // Update existing lock record - let mut records: Vec = - serde_json::from_slice(&existing.records).map_err(MetaError::Serialization)?; + let new_lock = PlockRecord::new(lock_type, pid, range.start, range.end); - // Check if we're unlocking - if lock_type == FileLockType::UnLock { - // Remove any locks that match the range - records.retain(|record| !record.lock_range.overlaps(&range) || record.pid != pid); - } else { - // Add or update the lock - let new_record = PlockRecord { - lock_type, - pid, - lock_range: range, - }; - - // Check if we already have a lock for this range and pid - let mut found = false; - for record in &mut records { - if record.lock_range.overlaps(&range) && record.pid == pid { - record.lock_type = lock_type; - record.lock_range = range; - found = true; - break; + loop { + let result = self + .try_set_plock(inode, owner, &new_lock, lock_type, range) + .await; + + match result { + Ok(()) => return Ok(()), + Err(MetaError::LockConflict { .. }) if block => { + if lock_type == FileLockType::WriteLock { + tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; + } else { + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; } + continue; } - - if !found { - records.push(new_record); - } + Err(e) => return Err(e), } - - // Serialize updated records - let serialized = serde_json::to_vec(&records).map_err(MetaError::Serialization)?; - - // Update database - let mut active_lock = existing.clone().into_active_model(); - active_lock.records = Set(serialized); - active_lock - .update(&txn) - .await - .map_err(MetaError::Database)?; - } else { - // Create new lock entry - let records = vec![PlockRecord { - lock_type, - pid, - lock_range: range, - }]; - - let serialized = serde_json::to_vec(&records).map_err(MetaError::Serialization)?; - - let new_lock = plock_meta::ActiveModel { - id: NotSet, - inode: Set(inode), - sid: Set(*self.sid.get().unwrap_or(&Uuid::nil())), - owner: Set(owner as i64), - records: Set(serialized), - }; - - new_lock.insert(&txn).await.map_err(MetaError::Database)?; } - - // Commit the transaction - txn.commit().await.map_err(MetaError::Database)?; - - Ok(()) } fn set_sid(&self, sid: Uuid) -> Result<(), MetaError> { diff --git a/project/slayerfs/src/meta/stores/etcd_store.rs b/project/slayerfs/src/meta/stores/etcd_store.rs index 85ebfb145..1fa955499 100644 --- a/project/slayerfs/src/meta/stores/etcd_store.rs +++ b/project/slayerfs/src/meta/stores/etcd_store.rs @@ -1891,10 +1891,9 @@ impl MetaStore for EtcdMetaStore { async fn get_plock( &self, inode: i64, - range: FileLockRange, - query: FileLockQuery, + query: &FileLockQuery, ) -> Result { - let _ = (inode, query, range); + let _ = (inode, query); Err(MetaError::NotImplemented) } @@ -1902,7 +1901,7 @@ impl MetaStore for EtcdMetaStore { async fn set_plock( &self, inode: i64, - owner: u64, + owner: i64, block: bool, lock_type: FileLockType, range: FileLockRange, diff --git a/project/slayerfs/src/meta/stores/redis_store.rs b/project/slayerfs/src/meta/stores/redis_store.rs index 6a2d7280a..cd356f415 100644 --- a/project/slayerfs/src/meta/stores/redis_store.rs +++ b/project/slayerfs/src/meta/stores/redis_store.rs @@ -651,10 +651,9 @@ impl MetaStore for RedisMetaStore { async fn get_plock( &self, inode: i64, - range: FileLockRange, - query: FileLockQuery, + query: &FileLockQuery, ) -> Result { - let _ = (inode, query, range); + let _ = (inode, query); Err(MetaError::NotImplemented) } @@ -662,7 +661,7 @@ impl MetaStore for RedisMetaStore { async fn set_plock( &self, inode: i64, - owner: u64, + owner: i64, block: bool, lock_type: FileLockType, range: FileLockRange, From f7af908a4a2c2fcb0f3f258cd839339327026ea0 Mon Sep 17 00:00:00 2001 From: zine yu Date: Tue, 16 Dec 2025 17:24:47 +0800 Subject: [PATCH 3/7] feat(slayerfs): impl plock for redis store and etcd store Signed-off-by: zine yu --- project/slayerfs/src/meta/entities/etcd.rs | 9 + project/slayerfs/src/meta/file_lock.rs | 46 ++++ .../src/meta/stores/database_store.rs | 48 +--- .../slayerfs/src/meta/stores/etcd_store.rs | 211 +++++++++++++++++- .../slayerfs/src/meta/stores/redis_store.rs | 196 +++++++++++++++- 5 files changed, 458 insertions(+), 52 deletions(-) diff --git a/project/slayerfs/src/meta/entities/etcd.rs b/project/slayerfs/src/meta/entities/etcd.rs index fcb7a2097..b5e278c75 100644 --- a/project/slayerfs/src/meta/entities/etcd.rs +++ b/project/slayerfs/src/meta/entities/etcd.rs @@ -2,9 +2,11 @@ use crate::meta::Permission; use crate::meta::entities::content_meta::EntryType; +use crate::meta::file_lock::PlockRecord; use crate::meta::store::{FileAttr, FileType}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use uuid::Uuid; /// Etcd entry information (reverse index: inode -> file/directory attributes) #[derive(Debug, Clone, Serialize, Deserialize)] @@ -114,3 +116,10 @@ impl EtcdForwardEntry { } } } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EtcdPlock { + pub sid: Uuid, + pub owner: i64, + pub records: Vec, +} diff --git a/project/slayerfs/src/meta/file_lock.rs b/project/slayerfs/src/meta/file_lock.rs index 455a8ae2b..afe009540 100644 --- a/project/slayerfs/src/meta/file_lock.rs +++ b/project/slayerfs/src/meta/file_lock.rs @@ -184,6 +184,52 @@ impl PlockRecord { result } + + pub fn check_confilct( + lock_type: &FileLockType, + range: &FileLockRange, + ls: &Vec, + ) -> bool { + for l in ls { + if (*lock_type == FileLockType::WriteLock || l.lock_type == FileLockType::WriteLock) + && range.end >= l.lock_range.start + && range.start <= l.lock_range.end + { + return true; + } + } + + return false; + } + + pub fn get_plock( + locks: &Vec, + query: &FileLockQuery, + self_sid: &Uuid, + lock_sid: &Uuid, + ) -> Option { + for lock in locks { + if (lock.lock_type == FileLockType::WriteLock + || query.lock_type == FileLockType::WriteLock) + && lock.lock_range.overlaps(&query.range) + { + if *self_sid == *lock_sid { + return Some(FileLockInfo { + lock_type: lock.lock_type, + range: lock.lock_range, + pid: lock.pid, + }); + } else { + return Some(FileLockInfo { + lock_type: lock.lock_type, + range: lock.lock_range, + pid: 0, + }); + } + } + } + return None; + } } #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, Eq, PartialEq, Hash)] diff --git a/project/slayerfs/src/meta/stores/database_store.rs b/project/slayerfs/src/meta/stores/database_store.rs index 30a8cd430..2accfd993 100644 --- a/project/slayerfs/src/meta/stores/database_store.rs +++ b/project/slayerfs/src/meta/stores/database_store.rs @@ -7,6 +7,7 @@ use crate::meta::client::session::{Session, SessionInfo}; use crate::meta::config::{Config, DatabaseType}; use crate::meta::entities::session_meta::{self, Entity as SessionMeta}; use crate::meta::entities::slice_meta::{self, Entity as SliceMeta}; +use crate::meta::entities::*; use crate::meta::file_lock::{ FileLockInfo, FileLockQuery, FileLockRange, FileLockType, PlockRecord, }; @@ -15,7 +16,6 @@ use crate::meta::store::{ StatFsSnapshot, }; use crate::meta::{INODE_ID_KEY, Permission, SLICE_ID_KEY}; -use crate::meta::{entities::*, file_lock}; use crate::vfs::fs::FileType; use async_trait::async_trait; use chrono::{Duration, Utc}; @@ -28,7 +28,6 @@ use std::hash::Hash; use std::path::Path; use std::sync::OnceLock; use std::sync::atomic::{AtomicU64, Ordering}; -use tracing::Instrument; #[derive(Eq, Hash, PartialEq)] struct PlockHashMapKey { @@ -289,7 +288,7 @@ impl DatabaseMetaStore { .map_err(MetaError::Database)?; match existing { Some(_) => Ok(true), - None => Ok(true), + None => Ok(false), } } @@ -629,16 +628,7 @@ impl DatabaseMetaStore { } let ls: Vec = serde_json::from_slice(&d).unwrap_or_default(); - for l in ls { - if (lock_type == FileLockType::WriteLock - || l.lock_type == FileLockType::WriteLock) - && range.end >= l.lock_range.start - && range.start <= l.lock_range.end - { - conflict_found = true; - break; - } - } + conflict_found = PlockRecord::check_confilct(&lock_type, &range, &ls); if conflict_found { break; } @@ -1739,34 +1729,16 @@ impl MetaStore for DatabaseMetaStore { .all(&self.db) .await .map_err(MetaError::Database)?; + let sid = self + .sid + .get() + .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; for row in rows { let locks: Vec = serde_json::from_slice(&row.records).unwrap_or_default(); - - for lock in locks { - if (lock.lock_type == FileLockType::WriteLock - || query.lock_type == FileLockType::WriteLock) - && lock.lock_range.overlaps(&query.range) - { - let sid = self - .sid - .get() - .ok_or(MetaError::Internal("sid not seted".to_string()))?; - - if *sid == row.sid { - return Ok(FileLockInfo { - lock_type: lock.lock_type, - range: lock.lock_range, - pid: lock.pid, - }); - } else { - return Ok(FileLockInfo { - lock_type: lock.lock_type, - range: lock.lock_range, - pid: 0, - }); - } - } + match PlockRecord::get_plock(&locks, &query, sid, &row.sid) { + Some(v) => return Ok(v), + None => {} } } diff --git a/project/slayerfs/src/meta/stores/etcd_store.rs b/project/slayerfs/src/meta/stores/etcd_store.rs index 1fa955499..6aa9b4be5 100644 --- a/project/slayerfs/src/meta/stores/etcd_store.rs +++ b/project/slayerfs/src/meta/stores/etcd_store.rs @@ -9,7 +9,9 @@ use crate::meta::client::session::{Session, SessionInfo}; use crate::meta::config::{Config, DatabaseType}; use crate::meta::entities::etcd::*; use crate::meta::entities::*; -use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; +use crate::meta::file_lock::{ + FileLockInfo, FileLockQuery, FileLockRange, FileLockType, PlockRecord, +}; use crate::meta::store::{DirEntry, FileAttr, LockName, MetaError, MetaStore}; use crate::meta::stores::pool::IdPool; use crate::meta::{INODE_ID_KEY, Permission}; @@ -22,6 +24,7 @@ use serde::de::DeserializeOwned; use serde_json; use std::collections::HashMap; use std::path::Path; +use std::sync::OnceLock; use tracing::{error, info, warn}; use uuid::Uuid; @@ -36,6 +39,7 @@ pub struct EtcdMetaStore { _config: Config, /// Local ID pools keyed by counter key (inode, slice, etc.) id_pools: IdPool, + sid: OnceLock, } #[allow(dead_code)] @@ -75,6 +79,10 @@ impl EtcdMetaStore { .and_then(|s| Uuid::parse_str(s).ok()) } + fn etcd_plock_key(inode: i64) -> String { + format!("p:{inode}") + } + /// Create or open an etcd metadata store pub async fn new(backend_path: &Path) -> Result { let _config = @@ -88,6 +96,7 @@ impl EtcdMetaStore { client, _config, id_pools: IdPool::default(), + sid: OnceLock::new(), }; store.init_root_directory().await?; @@ -104,6 +113,7 @@ impl EtcdMetaStore { client, _config, id_pools: IdPool::default(), + sid: OnceLock::new(), }; store.init_root_directory().await?; @@ -824,6 +834,154 @@ impl EtcdMetaStore { .map(|_| ()) .map_err(|e| MetaError::Internal(format!("Update parent children failed: {e}"))) } + + /// Check file is existing + async fn file_is_existing(&self, inode: i64) -> Result { + let key = Self::etcd_reverse_key(inode); + + let entry_info: Option = self.etcd_get_json(&key).await?; + match entry_info { + Some(entry) => { + return Ok(entry.is_file); + } + None => Ok(false), + } + } + + async fn try_set_plock( + &self, + inode: i64, + owner: i64, + new_lock: &PlockRecord, + lock_type: FileLockType, + range: FileLockRange, + ) -> Result<(), MetaError> { + let key = Self::etcd_plock_key(inode); + let sid = self + .sid + .get() + .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; + + match lock_type { + FileLockType::UnLock => { + // Unlock file + self.atomic_update( + &key, + |mut plocks: Vec| { + // Find the lock record for this owner and sid + let pos = plocks + .iter() + .position(|p| p.sid == *sid && p.owner == owner); + + if let Some(pos) = pos { + let plock = &mut plocks[pos]; + let records: Vec = plock.records.clone(); + if records.is_empty() { + // Remove this plock entry if no records + plocks.remove(pos); + return Ok((plocks, ())); + } + + // Update locks with new unlock request + let new_records = PlockRecord::update_locks(records, new_lock.clone()); + + if new_records.is_empty() { + // Remove this plock entry if no records after update + plocks.remove(pos); + return Ok((plocks, ())); + } + + // Update the records + plock.records = new_records; + } + + Ok((plocks, ())) + }, + || Ok((vec![], ())), // No existing locks, nothing to unlock + 10, + ) + .await + } + _ => { + // Lock request (ReadLock or WriteLock) + self.atomic_update( + &key, + |mut plocks: Vec| { + // Build a hashmap of locks for easier lookup + let mut locks = HashMap::new(); + for item in &plocks { + let key = (item.sid, item.owner); + locks.insert(key, item.records.clone()); + } + + let lkey = (*sid, owner); + + // Check for conflicts with other owners/sessions + let mut conflict_found = false; + for ((sid, _owner), records_vec) in &locks { + if (*sid, owner) == lkey { + continue; + } + + let ls: Vec = records_vec.clone(); // EtcdPlock already stores Vec + conflict_found = PlockRecord::check_confilct(&lock_type, &range, &ls); + if conflict_found { + break; + } + } + + if conflict_found { + return Err(MetaError::LockConflict { + inode, + owner, + range, + }); + } + + // Get existing locks for this owner/session + let ls = locks.get(&lkey).cloned().unwrap_or_default(); + + // Update locks with new request + let ls = PlockRecord::update_locks(ls, new_lock.clone()); + + // Check if we need to update the record + if locks.get(&lkey).map(|r| r != &ls).unwrap_or(true) { + // Find existing plock entry and update it, or add new one + if let Some(plock) = plocks + .iter_mut() + .find(|p| p.sid == *sid && p.owner == owner) + { + plock.records = ls; + } else { + let new_plock = EtcdPlock { + sid: *sid, + owner, + records: ls, + }; + plocks.push(new_plock); + } + } + + Ok((plocks, ())) + }, + || { + // No existing locks, create new one + let ls = PlockRecord::update_locks(vec![], new_lock.clone()); + + let new_plock = EtcdPlock { + sid: *sid, + owner, + records: ls, + }; + + Ok((vec![new_plock], ())) + }, + 10, + ) + .await + } + } + } } #[async_trait] @@ -1893,8 +2051,27 @@ impl MetaStore for EtcdMetaStore { inode: i64, query: &FileLockQuery, ) -> Result { - let _ = (inode, query); - Err(MetaError::NotImplemented) + let key = Self::etcd_plock_key(inode); + let sid = self + .sid + .get() + .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; + + let plocks: Vec = self.etcd_get_json(&key).await?.unwrap_or_default(); + + for plock in plocks { + let locks = &plock.records; + match PlockRecord::get_plock(locks, query, sid, &plock.sid) { + Some(v) => return Ok(v), + None => {} + } + } + + Ok(FileLockInfo { + lock_type: FileLockType::UnLock, + range: FileLockRange { start: 0, end: 0 }, + pid: 0, + }) } // sets a file range lock on given file. @@ -1907,12 +2084,32 @@ impl MetaStore for EtcdMetaStore { range: FileLockRange, pid: u32, ) -> Result<(), MetaError> { - let _ = (inode, owner, lock_type, pid, block, range); - Err(MetaError::NotImplemented) + let new_lock = PlockRecord::new(lock_type, pid, range.start, range.end); + + loop { + let result = self + .try_set_plock(inode, owner, &new_lock, lock_type, range) + .await; + + match result { + Ok(()) => return Ok(()), + Err(MetaError::LockConflict { .. }) if block => { + if lock_type == FileLockType::WriteLock { + tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; + } else { + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + } + continue; + } + Err(e) => return Err(e), + } + } } + fn set_sid(&self, sid: Uuid) -> Result<(), MetaError> { - let _ = sid; - Err(MetaError::NotImplemented) + self.sid + .set(sid) + .map_err(|_| MetaError::Internal("sid has been seted".to_string())) } } diff --git a/project/slayerfs/src/meta/stores/redis_store.rs b/project/slayerfs/src/meta/stores/redis_store.rs index cd356f415..07d9390ff 100644 --- a/project/slayerfs/src/meta/stores/redis_store.rs +++ b/project/slayerfs/src/meta/stores/redis_store.rs @@ -9,7 +9,9 @@ use crate::chuck::SliceDesc; use crate::meta::client::session::{Session, SessionInfo}; use crate::meta::config::{Config, DatabaseType}; -use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; +use crate::meta::file_lock::{ + FileLockInfo, FileLockQuery, FileLockRange, FileLockType, PlockRecord, +}; use crate::meta::store::{DirEntry, FileAttr, FileType, LockName, MetaError, MetaStore}; use crate::meta::{INODE_ID_KEY, SESSION_ID_KEY, SLICE_ID_KEY}; use async_trait::async_trait; @@ -34,13 +36,22 @@ const CHUNK_KEY_PREFIX: &str = "c"; const DELETED_SET_KEY: &str = "delslices"; const ALL_SESSIONS_KEY: &str = "allsessions"; const SESSION_INFOS_KEY: &str = "sessioninfos"; +const PLOCK_PREFIX: &str = "plock"; const LOCKS_KEY: &str = "locks"; const CHUNK_ID_BASE: u64 = 1_000_000_000u64; +#[derive(Debug, Clone, Serialize, Deserialize)] +struct RedisPlockEntry { + sid: Uuid, + owner: i64, + records: Vec, +} + /// Minimal Redis-backed meta store. pub struct RedisMetaStore { conn: ConnectionManager, _config: Config, + sid: std::sync::OnceLock, } impl RedisMetaStore { @@ -59,6 +70,7 @@ impl RedisMetaStore { let store = Self { conn, _config: config, + sid: std::sync::OnceLock::new(), }; store.init_root_directory().await?; Ok(store) @@ -287,6 +299,124 @@ impl RedisMetaStore { .await .map_err(redis_err) } + + fn plock_key(&self, inode: i64) -> String { + format!("{}:{}", PLOCK_PREFIX, inode) + } + + fn plock_field(&self, sid: &Uuid, owner: i64) -> String { + format!("{}:{}", sid, owner) + } + + async fn try_set_plock( + &self, + inode: i64, + owner: i64, + new_lock: &PlockRecord, + lock_type: FileLockType, + range: FileLockRange, + ) -> Result<(), MetaError> { + let mut conn = self.conn.clone(); + let plock_key = self.plock_key(inode); + let sid = self + .sid + .get() + .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; + let field = self.plock_field(&sid, owner); + + // Check if file exists + if self.get_node(inode).await?.is_none() { + return Err(MetaError::NotFound(inode)); + } + + match lock_type { + FileLockType::UnLock => { + // Handle unlock + let current_json: Option = + conn.hget(&plock_key, &field).await.map_err(redis_err)?; + + if let Some(json) = current_json { + let records: Vec = serde_json::from_str(&json).unwrap_or_default(); + + if records.is_empty() { + // Remove the field if no records + let _: () = conn.hdel(&plock_key, &field).await.map_err(redis_err)?; + return Ok(()); + } + + let new_records = PlockRecord::update_locks(records, new_lock.clone()); + + if new_records.is_empty() { + // Remove the field if no records after update + let _: () = conn.hdel(&plock_key, &field).await.map_err(redis_err)?; + } else { + let new_json = serde_json::to_string(&new_records).map_err(|e| { + MetaError::Internal(format!("Serialization error: {e}")) + })?; + let _: () = conn + .hset(&plock_key, &field, new_json) + .await + .map_err(redis_err)?; + } + } + Ok(()) + } + _ => { + // Handle lock request (ReadLock or WriteLock) + let current_json: Option = + conn.hget(&plock_key, &field).await.map_err(redis_err)?; + + // Get current locks for this owner/session + let current_records = if let Some(json) = current_json { + serde_json::from_str(&json).unwrap_or_default() + } else { + Vec::new() + }; + + // Check for conflicts with other locks + let all_fields: Vec = conn.hkeys(&plock_key).await.map_err(redis_err)?; + let mut conflict_found = false; + + for other_field in all_fields { + if other_field == field { + continue; + } + + let other_records_json: String = conn + .hget(&plock_key, &other_field) + .await + .map_err(redis_err)?; + let other_records: Vec = + serde_json::from_str(&other_records_json).unwrap_or_default(); + + conflict_found = + PlockRecord::check_confilct(&lock_type, &range, &other_records); + if conflict_found { + break; + } + } + + if conflict_found { + return Err(MetaError::LockConflict { + inode, + owner, + range, + }); + } + + // Update locks + let new_records = PlockRecord::update_locks(current_records, new_lock.clone()); + let new_json = serde_json::to_string(&new_records) + .map_err(|e| MetaError::Internal(format!("Serialization error: {e}")))?; + + let _: () = conn + .hset(&plock_key, &field, new_json) + .await + .map_err(redis_err)?; + Ok(()) + } + } + } } #[async_trait] @@ -653,8 +783,41 @@ impl MetaStore for RedisMetaStore { inode: i64, query: &FileLockQuery, ) -> Result { - let _ = (inode, query); - Err(MetaError::NotImplemented) + let mut conn = self.conn.clone(); + let plock_key = self.plock_key(inode); + let sid = self + .sid + .get() + .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; + + // Get all plock entries for this inode + let plock_entries: Vec = conn.hkeys(&plock_key).await.map_err(redis_err)?; + + for field in plock_entries { + let parts: Vec<&str> = field.split(':').collect(); + if parts.len() != 2 { + continue; + } + + let lock_sid = Uuid::parse_str(parts[0]) + .map_err(|_| MetaError::Internal("Invalid sid in plock field".to_string()))?; + let _lock_owner: i64 = parts[1] + .parse() + .map_err(|_| MetaError::Internal("Invalid owner in plock field".to_string()))?; + + let records_json: String = conn.hget(&plock_key, &field).await.map_err(redis_err)?; + let records: Vec = serde_json::from_str(&records_json).unwrap_or_default(); + + if let Some(info) = PlockRecord::get_plock(&records, query, &sid, &lock_sid) { + return Ok(info); + } + } + + Ok(FileLockInfo { + lock_type: FileLockType::UnLock, + range: FileLockRange { start: 0, end: 0 }, + pid: 0, + }) } // sets a file range lock on given file. @@ -667,13 +830,32 @@ impl MetaStore for RedisMetaStore { range: FileLockRange, pid: u32, ) -> Result<(), MetaError> { - let _ = (inode, owner, lock_type, pid, block, range); - Err(MetaError::NotImplemented) + let new_lock = PlockRecord::new(lock_type, pid, range.start, range.end); + + loop { + let result = self + .try_set_plock(inode, owner, &new_lock, lock_type, range) + .await; + + match result { + Ok(()) => return Ok(()), + Err(MetaError::LockConflict { .. }) if block => { + if lock_type == FileLockType::WriteLock { + tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; + } else { + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + } + continue; + } + Err(e) => return Err(e), + } + } } fn set_sid(&self, sid: Uuid) -> Result<(), MetaError> { - let _ = sid; - Err(MetaError::NotImplemented) + self.sid + .set(sid) + .map_err(|_| MetaError::Internal("sid has been seted".to_string())) } } From 2f9bd250d8bc25f8c74e4966cf31a6282cafba88 Mon Sep 17 00:00:00 2001 From: zine yu Date: Wed, 17 Dec 2025 15:06:34 +0800 Subject: [PATCH 4/7] feat(slayerfs): fix bugs of refis meta store Signed-off-by: zine yu --- .../slayerfs/doc/file_lock_testing_guide.md | 455 ++++++++++ project/slayerfs/src/meta/file_lock.rs | 62 +- .../src/meta/stores/database_store.rs | 775 ++++++++++++++++-- .../slayerfs/src/meta/stores/etcd_store.rs | 699 +++++++++++++--- .../slayerfs/src/meta/stores/redis_store.rs | 665 ++++++++++++++- .../slayerfs/tests/scripts/test_meta_store.sh | 274 +++++++ 6 files changed, 2740 insertions(+), 190 deletions(-) create mode 100644 project/slayerfs/doc/file_lock_testing_guide.md create mode 100755 project/slayerfs/tests/scripts/test_meta_store.sh diff --git a/project/slayerfs/doc/file_lock_testing_guide.md b/project/slayerfs/doc/file_lock_testing_guide.md new file mode 100644 index 000000000..1402baedc --- /dev/null +++ b/project/slayerfs/doc/file_lock_testing_guide.md @@ -0,0 +1,455 @@ +# SlayerFS 元数据后端文件锁单元测试方案 + +## 概述 + +本文档提供 SlayerFS 三个元数据存储后端(DatabaseStore、EtcdStore、RedisStore)文件锁功能的完整单元测试方案。 + +## 测试目标 + +- 验证三个存储后端的文件锁功能正确性 +- 确保锁操作的原子性和一致性 +- 测试并发场景下的锁行为 +- 验证性能基准 + +## 环境准备 + +### 必要服务启动 + +```bash +# 启动 Redis 服务 +docker run -d --name redis-test -p 6379:6379 redis:7-alpine + +# 启动 etcd 服务 +docker run -d --name etcd-test -p 2379:2379 quay.io/coreos/etcd:v3.5.0 \ + --data-dir=/etcd-data --name node1 \ + --listen-client-urls http://0.0.0.0:2379 \ + --advertise-client-urls http://0.0.0.0:2379 +``` + +### 环境变量配置 + +创建 `.env.test` 文件: +```bash +REDIS_URL=redis://localhost:6379 +ETCD_URL=http://localhost:2379 +DATABASE_URL=sqlite:///tmp/test_file_lock.db +``` + +## 测试执行 + +### 运行所有测试 + +```bash +# 运行所有文件锁单元测试 +cargo test --lib meta::stores::tests -- --ignored + +# 运行特定存储后端测试 +cargo test --lib meta::stores::database_store::tests -- --ignored # Database +cargo test --lib meta::stores::redis_store::tests -- --ignored # Redis +cargo test --lib meta::stores::etcd_store::tests -- --ignored # etcd +``` + +### 运行特定测试场景 + +```bash +# 基础功能测试 +cargo test test_basic_read_lock --lib -- --ignored +cargo test test_multiple_read_locks --lib -- --ignored +cargo test test_write_lock_conflict --lib -- --ignored +cargo test test_lock_release --lib -- --ignored +cargo test test_non_overlapping_locks --lib -- --ignored +``` + +## 测试覆盖范围 + +### 1. 基础功能测试 ✅ + +#### 1.1 基本读锁测试 (`test_basic_read_lock`) +- 验证读锁正确获取 +- 验证锁信息查询功能 +- 验证锁状态一致性 + +```rust +#[tokio::test] +#[ignore] +async fn test_basic_read_lock() { + let store = new_test_store().await; + let session_id = Uuid::now_v7(); + let inode = 12345; + let owner = 1001; + + // 设置会话 + store.set_sid(session_id).unwrap(); + + // 获取读锁 + store.set_plock( + inode, owner, false, FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, 1234 + ).await.unwrap(); + + // 验证锁存在 + let query = FileLockQuery { + owner: owner as u64, + lock_type: FileLockType::ReadLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info = store.get_plock(inode, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::ReadLock); + assert_eq!(lock_info.range.start, 0); + assert_eq!(lock_info.range.end, 100); + assert_eq!(lock_info.pid, 1234); +} +``` + +#### 1.2 多读锁共存测试 (`test_multiple_read_locks`) +- 验证多个读锁可以同时存在 +- 验证不同会话的读锁独立性 +- 验证锁状态正确查询 + +#### 1.3 写锁冲突检测 (`test_write_lock_conflict`) +- 验证写锁与读锁的冲突检测 +- 验证写锁与写锁的冲突检测 +- 验证非阻塞模式立即返回错误 + +#### 1.4 锁释放测试 (`test_lock_release`) +- 验证锁的正确释放 +- 验证释放后锁状态清除 +- 验证解锁操作的幂等性 + +#### 1.5 非重叠锁测试 (`test_non_overlapping_locks`) +- 验证非重叠范围的写锁可以共存 +- 验证锁范围检测的正确性 +- 验证锁状态查询的准确性 + +### 2. 边界条件测试 + +#### 2.1 零长度锁测试 +```rust +#[tokio::test] +#[ignore] +async fn test_zero_length_locks() { + let store = new_test_store().await; + let session_id = Uuid::now_v7(); + + store.set_sid(session_id).unwrap(); + + // 测试零长度锁 + let result = store.set_plock( + inode, owner, false, FileLockType::WriteLock, + FileLockRange { start: 100, end: 99 }, pid + ).await; + + // 零长度锁应该被拒绝或特殊处理 + assert!(result.is_err()); +} +``` + +#### 2.2 边界值测试 +```rust +#[tokio::test] +#[ignore] +async fn test_boundary_values() { + // 测试最大范围的锁 + // 测试最小范围的单字节锁 + // 测试负值处理 +} +``` + +### 3. 错误处理测试 + +#### 3.1 无效 inode 测试 +```rust +#[tokio::test] +#[ignore] +async fn test_invalid_inode_operations() { + let store = new_test_store().await; + let session_id = Uuid::now_v7(); + store.set_sid(session_id).unwrap(); + + // 对不存在的 inode 操作 + let result = store.set_plock( + -1, owner, false, FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, pid + ).await; + + assert!(result.is_err()); +} +``` + +#### 3.2 未设置会话测试 +```rust +#[tokio::test] +#[ignore] +async fn test_session_not_set() { + let store = new_test_store().await; + // 不设置会话直接操作 + + let result = store.set_plock( + inode, owner, false, FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, pid + ).await; + + assert!(result.is_err()); +} +``` + +### 4. 性能基准测试 + +#### 4.1 吞吐量测试 +```rust +#[tokio::test] +#[ignore] +async fn benchmark_lock_throughput() { + let store = Arc::new(new_test_store().await); + let file_ino = prepare_test_file(&store).await; + + let operations = 1000; + let start = Instant::now(); + + for i in 0..operations { + let session_id = Uuid::now_v7(); + store.set_sid(session_id).unwrap(); + + store.set_plock( + file_ino, i as i64, false, FileLockType::ReadLock, + FileLockRange { start: i * 10, end: i * 10 + 9 }, pid + ).await.unwrap(); + } + + let duration = start.elapsed(); + let ops_per_sec = operations as f64 / duration.as_secs_f64(); + + println!("吞吐量: {:.2} ops/sec", ops_per_sec); + assert!(ops_per_sec > 100.0, "吞吐量应该大于 100 ops/sec"); +} +``` + +#### 4.2 延迟测试 +```rust +#[tokio::test] +#[ignore] +async fn benchmark_lock_latency() { + let store = Arc::new(new_test_store().await); + let file_ino = prepare_test_file(&store).await; + + let iterations = 1000; + let mut latencies = Vec::new(); + + for i in 0..iterations { + let session_id = Uuid::now_v7(); + store.set_sid(session_id).unwrap(); + + let start = Instant::now(); + + store.set_plock( + file_ino, i % 100, false, FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, pid + ).await.unwrap(); + + let latency = start.elapsed(); + latencies.push(latency.as_micros()); + + // 立即释放 + store.set_plock( + file_ino, i % 100, false, FileLockType::UnLock, + FileLockRange { start: 0, end: 100 }, pid + ).await.unwrap(); + } + + latencies.sort(); + let p50 = latencies[latencies.len() / 2]; + let p95 = latencies[(latencies.len() * 95) / 100]; + + println!("P50 延迟: {} μs, P95 延迟: {} μs", p50, p95); + assert!(p50 < 5000, "P50 延迟应小于 5ms"); +} +``` + +## 测试自动化 + +### 自动化脚本 + +创建 `scripts/run_file_lock_tests.sh`: + +```bash +#!/bin/bash +set -e + +echo "=== SlayerFS 元数据后端文件锁测试 ===" + +# 检查 Docker 服务 +if ! docker ps | grep -q redis-test; then + echo "启动 Redis 测试服务..." + docker run -d --name redis-test -p 6379:6379 redis:7-alpine + sleep 2 +fi + +if ! docker ps | grep -q etcd-test; then + echo "启动 etcd 测试服务..." + docker run -d --name etcd-test -p 2379:2379 quay.io/coreos/etcd:v3.5.0 \ + --data-dir=/etcd-data --name node1 \ + --listen-client-urls http://0.0.0.0:2379 \ + --advertise-client-urls http://0.0.0.0:2379 + sleep 3 +fi + +echo "运行 DatabaseStore 文件锁测试..." +cargo test --lib meta::stores::database_store::tests -- --ignored + +echo "运行 RedisStore 文件锁测试..." +cargo test --lib meta::stores::redis_store::tests -- --ignored + +echo "运行 EtcdStore 文件锁测试..." +cargo test --lib meta::stores::etcd_store::tests -- --ignored + +echo "运行性能基准测试..." +cargo test benchmark --lib -- --ignored + +echo "=== 测试完成 ===" + +# 清理选项(注释掉如果需要保留服务) +# docker stop redis-test etcd-test +# docker rm redis-test etcd-test +``` + +### Docker Compose 配置 + +创建 `tests/docker-compose.test.yml`: + +```yaml +version: '3.8' + +services: + redis: + image: redis:7-alpine + ports: + - "6379:6379" + command: redis-server --appendonly yes + + etcd: + image: quay.io/coreos/etcd:v3.5.0 + ports: + - "2379:2379" + - "2380:2380" + environment: + - ETCD_AUTO_COMPACTION_MODE=revision + - ETCD_AUTO_COMPACTION_RETENTION=1000 + command: + - /usr/local/bin/etcd + - --data-dir=/etcd-data + - --name node1 + - --initial-advertise-peer-urls http://0.0.0.0:2380 + - --listen-peer-urls http://0.0.0.0:2380 + - --advertise-client-urls http://0.0.0.0:2379 + - --listen-client-urls http://0.0.0.0:2379 + - --initial-cluster node1=http://0.0.0.0:2380 +``` + +使用 Docker Compose 启动: +```bash +docker-compose -f tests/docker-compose.test.yml up -d +``` + +## 预期结果 + +### 成功标准 + +1. **DatabaseStore** (SQLite/PostgreSQL) + - ✅ 所有基础功能测试通过 + - ✅ 边界条件测试通过 + - ✅ 错误处理正确 + - ⏱ 吞吐量: >100 ops/sec + - ⏱ P50 延迟: <10ms + +2. **RedisStore** + - ✅ 所有基础功能测试通过 + - ✅ 高并发性能良好 + - ⏱ 吞吐量: >1000 ops/sec + - ⏱ P50 延迟: <1ms + +3. **EtcdStore** + - ✅ 所有基础功能测试通过 + - ✅ 分布式一致性正确 + - ⏱ 吞吐量: >50 ops/sec + - ⏱ P50 延迟: <50ms + +### 故障排查 + +#### Redis 连接失败 +```bash +# 检查 Redis 服务 +docker ps | grep redis +docker logs redis-test + +# 手动测试连接 +redis-cli -h localhost -p 6379 ping +``` + +#### etcd 连接失败 +```bash +# 检查 etcd 服务 +docker ps | grep etcd +docker logs etcd-test + +# 手动测试连接 +etcdctl --endpoints=http://localhost:2379 endpoint health +``` + +#### 测试超时 +```bash +# 增加测试超时时间 +export RUST_TEST_TIMEOUT=300 +cargo test --lib meta::stores::tests -- --ignored +``` + +## 持续集成集成 + +### GitHub Actions 配置 + +```yaml +name: File Lock Meta Store Tests + +on: [push, pull_request] + +jobs: + test-meta-stores: + runs-on: ubuntu-latest + + services: + redis: + image: redis:7-alpine + ports: + - 6379:6379 + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + etcd: + image: quay.io/coreos/etcd:v3.5.0 + ports: + - 2379:2379 + options: >- + --health-cmd "etcdctl endpoint health" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - uses: actions/checkout@v3 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + + - name: Run file lock tests + env: + REDIS_URL: redis://localhost:6379 + ETCD_URL: http://localhost:2379 + run: | + cargo test --lib meta::stores::tests -- --ignored +``` + +这个测试方案提供了全面的元数据后端文件锁功能验证,确保 SlayerFS 在三个不同存储后端上的文件锁功能正确性和性能。 \ No newline at end of file diff --git a/project/slayerfs/src/meta/file_lock.rs b/project/slayerfs/src/meta/file_lock.rs index afe009540..3be58bf78 100644 --- a/project/slayerfs/src/meta/file_lock.rs +++ b/project/slayerfs/src/meta/file_lock.rs @@ -1,5 +1,3 @@ -use std::collections::HashMap; - use sea_orm::{ TryGetError, Value, sea_query::{self, ValueTypeErr}, @@ -7,8 +5,6 @@ use sea_orm::{ use serde::{Deserialize, Serialize}; use uuid::Uuid; -use crate::meta::entities::{PlockMeta, plock_meta}; - #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] #[repr(u32)] pub enum FileLockType { @@ -133,20 +129,34 @@ impl PlockRecord { nl.lock_range.start = l.lock_range.end; i += 1; } - _ if l.lock_range.end < nl.lock_range.end => { - // Shrink the current lock range - ls[i].lock_type = nl.lock_type; - ls[i].lock_range.start = nl.lock_range.start; + _ if l.lock_range.end > nl.lock_range.end + && l.lock_range.start >= nl.lock_range.start => + { + // Exact or partial overlap from the right - shrink the current lock + ls[i].lock_range.start = nl.lock_range.end; nl.lock_range.start = l.lock_range.end; - } // Insert new lock and adjust next lock - _ if l.lock_range.start < nl.lock_range.end => { - new_records.push((i, nl)); - nl.lock_range.start = nl.lock_range.end; + } + _ if l.lock_range.start < nl.lock_range.start + && l.lock_range.end > nl.lock_range.end => + { + // Unlock range is inside current lock - split into two locks + let mut left_part = ls[i]; + left_part.lock_range.end = nl.lock_range.start; + + let right_part = + PlockRecord::new(l.lock_type, l.pid, nl.lock_range.end, l.lock_range.end); + + ls[i] = left_part; + new_records.push((i + 1, right_part)); + i += 1; } _ => { - // Insert new lock - new_records.push((i, nl)); - nl.lock_range.start = nl.lock_range.end; + // Exact match or unlock covers the current lock + // Remove this lock completely + ls.remove(i); + nl.lock_range.start = l.lock_range.end; + // Don't increment i since we want to process the next element (which shifted to current position) + continue; // Skip the i += 1 at the end of this iteration } } @@ -209,21 +219,17 @@ impl PlockRecord { lock_sid: &Uuid, ) -> Option { for lock in locks { - if (lock.lock_type == FileLockType::WriteLock - || query.lock_type == FileLockType::WriteLock) - && lock.lock_range.overlaps(&query.range) - { - if *self_sid == *lock_sid { - return Some(FileLockInfo { - lock_type: lock.lock_type, - range: lock.lock_range, - pid: lock.pid, - }); - } else { + if lock.lock_range.overlaps(&query.range) { + let conflict = match (lock.lock_type, query.lock_type) { + (FileLockType::ReadLock, FileLockType::ReadLock) => false, + _ => true, + }; + + if conflict { return Some(FileLockInfo { lock_type: lock.lock_type, range: lock.lock_range, - pid: 0, + pid: if self_sid == lock_sid { lock.pid } else { 0 }, }); } } @@ -249,7 +255,7 @@ impl FileLockRange { } #[derive(Debug, Clone, Copy)] pub struct FileLockQuery { - pub owner: u64, + pub owner: i64, pub lock_type: FileLockType, pub range: FileLockRange, } diff --git a/project/slayerfs/src/meta/stores/database_store.rs b/project/slayerfs/src/meta/stores/database_store.rs index 2accfd993..64f6472e5 100644 --- a/project/slayerfs/src/meta/stores/database_store.rs +++ b/project/slayerfs/src/meta/stores/database_store.rs @@ -18,7 +18,7 @@ use crate::meta::store::{ use crate::meta::{INODE_ID_KEY, Permission, SLICE_ID_KEY}; use crate::vfs::fs::FileType; use async_trait::async_trait; -use chrono::{Duration, Utc}; +use chrono::{Duration as ChronoDuration, Utc}; use log::info; use sea_orm::prelude::Uuid; use sea_orm::*; @@ -28,6 +28,7 @@ use std::hash::Hash; use std::path::Path; use std::sync::OnceLock; use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::Duration; #[derive(Eq, Hash, PartialEq)] struct PlockHashMapKey { @@ -136,7 +137,11 @@ impl DatabaseMetaStore { match &config.database.db_config { DatabaseType::Sqlite { url } => { info!("Connecting to SQLite: {}", url); - let opts = ConnectOptions::new(url.clone()); + let mut opts = ConnectOptions::new(url.clone()); + opts.max_connections(1) + .min_connections(1) + .connect_timeout(Duration::from_secs(30)) + .idle_timeout(Duration::from_secs(30)); let db = Database::connect(opts).await?; Ok(db) } @@ -195,10 +200,20 @@ impl DatabaseMetaStore { for (i, stmt) in stmts.iter().enumerate() { let sql = builder.build(stmt); - db.execute(sql).await.map_err(|e| { - eprintln!("Failed to execute statement {}: {}", i + 1, e); - MetaError::Database(e) - })?; + match db.execute(sql).await { + Ok(_) => info!("Statement {} executed successfully", i + 1), + Err(e) => { + if e.to_string().contains("duplicate key") { + info!( + "Table already exists for statement {}, skipping: {}", + i + 1, + e + ); + continue; + } + return Err(MetaError::Database(e)); + } + } } let index_stmt = Index::create() @@ -209,10 +224,16 @@ impl DatabaseMetaStore { .to_owned(); let index_sql = builder.build(&index_stmt); - db.execute(index_sql).await.map_err(|e| { - eprintln!("Failed to create index idx_content_meta_inode: {}", e); - MetaError::Database(e) - })?; + match db.execute(index_sql).await { + Ok(_) => info!("Index created successfully"), + Err(e) => { + if e.to_string().contains("already exists") { + info!("Index already exists, skipping: {}", e); + } else { + return Err(MetaError::Database(e)); + } + } + } info!("Database schema initialized successfully"); Ok(()) @@ -485,7 +506,7 @@ impl DatabaseMetaStore { } }; - if last_updated < current_time - Duration::seconds(7) { + if last_updated < current_time - ChronoDuration::seconds(7) { lock.last_updated = ActiveValue::Set(current_time); lock.update(&txn).await?; flag = true; @@ -533,9 +554,12 @@ impl DatabaseMetaStore { ) -> Result<(), MetaError> { let txn = self.db.begin().await.map_err(MetaError::Database)?; - // chech file is existing - let exists = self.file_is_existing(inode).await?; - if !exists { + // check file is existing using the same transaction + let exists = FileMeta::find_by_id(inode) + .one(&txn) + .await + .map_err(MetaError::Database)?; + if exists.is_none() { txn.rollback().await.map_err(MetaError::Database)?; return Err(MetaError::NotFound(inode)); } @@ -562,38 +586,42 @@ impl DatabaseMetaStore { serde_json::from_slice(&plock.records).unwrap_or_default(); if records.len() == 0 { + // No locks to unlock, transaction is complete txn.commit().await.map_err(MetaError::Database)?; return Ok(()); } - let new_records = PlockRecord::update_locks(records, new_lock.clone()); - let new_records_bytes = serde_json::to_vec(&new_records).map_err(|e| { - MetaError::Internal(format!( - "error to serialization Vec: {e}" - )) - })?; - - let mut active_model = plock_meta::ActiveModel { - inode: Set(inode), - sid: Set(*sid), - owner: Set(owner), - ..Default::default() - }; + let new_records = + PlockRecord::update_locks(records.clone(), new_lock.clone()); if new_records.len() == 0 { - let _ = PlockMeta::delete(active_model) - .exec(&txn) + // No more locks for this (inode, sid, owner) combination, delete the record + let delete_model = plock_meta::ActiveModel { + inode: Set(plock.inode), + sid: Set(plock.sid), + owner: Set(plock.owner), + ..Default::default() + }; + let _ = delete_model + .delete(&txn) .await .map_err(MetaError::Database)?; } else { + // Update the existing record with new lock list + let new_records_bytes = + serde_json::to_vec(&new_records).map_err(|e| { + MetaError::Internal(format!( + "error to serialization Vec: {e}" + )) + })?; + + let mut active_model: plock_meta::ActiveModel = plock.into(); active_model.records = Set(new_records_bytes); - active_model - .insert(&txn) - .await - .map_err(MetaError::Database)?; + active_model.save(&txn).await.map_err(MetaError::Database)?; } } None => { + // No existing lock record found txn.commit().await.map_err(MetaError::Database)?; return Ok(()); } @@ -613,7 +641,7 @@ impl DatabaseMetaStore { for item in ps { let key = PlockHashMapKey { sid: item.sid, - owner: item.owner, + owner: item.owner.try_into().unwrap(), }; locks.insert(key, item.records); } @@ -638,7 +666,7 @@ impl DatabaseMetaStore { txn.rollback().await.map_err(MetaError::Database)?; return Err(MetaError::LockConflict { inode, - owner, + owner: owner.try_into().unwrap(), range, }); } @@ -651,7 +679,7 @@ impl DatabaseMetaStore { MetaError::Internal(format!("error to serialization Vec: {e}")) })?; - // lock records changed update + // lock records changed update or insert if locks.get(&lkey).map(|r| r != &records).unwrap_or(true) { let plock = plock_meta::ActiveModel { sid: Set(*sid), @@ -659,7 +687,13 @@ impl DatabaseMetaStore { inode: Set(inode), records: Set(records), }; - plock.save(&txn).await.map_err(MetaError::Database)?; + + // Check if this is a new record or an update + if locks.contains_key(&lkey) { + plock.save(&txn).await.map_err(MetaError::Database)?; + } else { + plock.insert(&txn).await.map_err(MetaError::Database)?; + } } txn.commit().await.map_err(MetaError::Database)?; @@ -1651,7 +1685,7 @@ impl MetaStore for DatabaseMetaStore { async fn new_session(&self, session_info: SessionInfo) -> Result { let txn = self.db.begin().await.map_err(MetaError::Database)?; let session_id = Uuid::now_v7(); - let expire = (Utc::now() + Duration::minutes(5)).timestamp_millis(); + let expire = (Utc::now() + ChronoDuration::minutes(5)).timestamp_millis(); let payload = serde_json::to_vec(&session_info).map_err(MetaError::Serialization)?; let session = session_meta::ActiveModel { session_id: Set(session_id), @@ -1672,7 +1706,7 @@ impl MetaStore for DatabaseMetaStore { async fn refresh_session(&self, session_id: Uuid) -> Result<(), MetaError> { let txn = self.db.begin().await.map_err(MetaError::Database)?; - let expire = (Utc::now() + Duration::minutes(5)).timestamp_millis(); + let expire = (Utc::now() + ChronoDuration::minutes(5)).timestamp_millis(); let session = SessionMeta::find() .filter(session_meta::Column::SessionId.eq(session_id)) .one(&txn) @@ -1724,19 +1758,24 @@ impl MetaStore for DatabaseMetaStore { inode: i64, query: &FileLockQuery, ) -> Result { - let rows = PlockMeta::find() - .filter(plock_meta::Column::Inode.eq(inode)) - .all(&self.db) - .await - .map_err(MetaError::Database)?; let sid = self .sid .get() .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; - for row in rows { + // Query specific owner and session + let row = PlockMeta::find() + .filter(plock_meta::Column::Inode.eq(inode)) + .filter(plock_meta::Column::Owner.eq(query.owner as i64)) + .filter(plock_meta::Column::Sid.eq(*sid)) + .one(&self.db) + .await + .map_err(MetaError::Database)?; + + if let Some(row) = row { let locks: Vec = serde_json::from_slice(&row.records).unwrap_or_default(); - match PlockRecord::get_plock(&locks, &query, sid, &row.sid) { + + match PlockRecord::get_plock(&locks, query, sid, &row.sid) { Some(v) => return Ok(v), None => {} } @@ -1763,7 +1802,13 @@ impl MetaStore for DatabaseMetaStore { loop { let result = self - .try_set_plock(inode, owner, &new_lock, lock_type, range) + .try_set_plock( + inode, + owner.try_into().unwrap(), + &new_lock, + lock_type, + range, + ) .await; match result { @@ -1787,3 +1832,639 @@ impl MetaStore for DatabaseMetaStore { .map_err(|_| MetaError::Internal("sid has been seted".to_string())) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::meta::config::{CacheConfig, ClientOptions, DatabaseConfig}; + use crate::meta::file_lock::{FileLockQuery, FileLockRange, FileLockType}; + use tokio::time; + + fn test_config() -> Config { + Config { + database: DatabaseConfig { + db_config: DatabaseType::Sqlite { + url: "sqlite:file::memory:".to_string(), + }, + }, + cache: CacheConfig::default(), + client: ClientOptions::default(), + } + } + + /// Configuration for shared database testing (multi-session) + fn shared_db_config() -> Config { + Config { + database: DatabaseConfig { + db_config: DatabaseType::Sqlite { + url: "postgres://slayerfs:slayerfs@127.0.0.1:5432/database".to_string(), + }, + }, + cache: CacheConfig::default(), + client: ClientOptions::default(), + } + } + + async fn new_test_store() -> DatabaseMetaStore { + DatabaseMetaStore::from_config(test_config()) + .await + .expect("Failed to create test database store") + } + + /// Create a new test store with pre-configured session ID + async fn new_test_store_with_session(session_id: Uuid) -> DatabaseMetaStore { + let store = new_test_store().await; + store.set_sid(session_id).expect("Failed to set session ID"); + store + } + + /// Create multiple test stores for testing multiple sessions + async fn create_test_stores(count: usize) -> Vec { + let mut stores = Vec::with_capacity(count); + for _ in 0..count { + stores.push(new_test_store().await); + } + stores + } + + /// Helper struct to manage multiple test sessions + struct TestSessionManager { + stores: Vec, + session_ids: Vec, + } + + use std::sync::LazyLock; + use tokio::sync::Mutex; + + // 静态初始化,确保只执行一次 + static SHARED_DB_INIT: LazyLock> = LazyLock::new(|| Mutex::new(())); + + impl TestSessionManager { + async fn new(session_count: usize) -> Self { + // 获取锁,确保串行初始化 + let _guard = SHARED_DB_INIT.lock().await; + + use std::env; + // Clean up existing shared test database + let temp_dir = env::temp_dir(); + let db_path = temp_dir.join("slayerfs_shared_test.db"); + + // 只在第一次初始化时清理 + static FIRST_INIT: std::sync::Once = std::sync::Once::new(); + FIRST_INIT.call_once(|| { + let _ = std::fs::remove_file(&db_path); + }); + + let mut stores = Vec::with_capacity(session_count); + let mut session_ids = Vec::with_capacity(session_count); + + // 创建第一个 store(会初始化数据库) + let config = shared_db_config(); + let first_store = DatabaseMetaStore::from_config(config.clone()) + .await + .expect("Failed to create shared test database store"); + + let first_session_id = Uuid::now_v7(); + first_store + .set_sid(first_session_id) + .expect("Failed to set session ID"); + + stores.push(first_store); + session_ids.push(first_session_id); + + // 后续的 store 复用已初始化的数据库 + for _ in 1..session_count { + let store = DatabaseMetaStore::from_config(config.clone()) + .await + .expect("Failed to create shared test database store"); + + let session_id = Uuid::now_v7(); + store.set_sid(session_id).expect("Failed to set session ID"); + + stores.push(store); + session_ids.push(session_id); + + time::sleep(time::Duration::from_millis(5)).await; + } + + Self { + stores, + session_ids, + } + } + + fn get_store(&self, index: usize) -> &DatabaseMetaStore { + &self.stores[index] + } + + fn get_session_id(&self, index: usize) -> Uuid { + self.session_ids[index] + } + } + + #[tokio::test] + async fn test_basic_read_lock() { + let store = new_test_store().await; + let session_id = Uuid::now_v7(); + let owner: u64 = 1001; + + // Set session + store.set_sid(session_id).unwrap(); + + // Create a file first + let parent = store.root_ino(); + let file_ino = store + .create_file(parent, "test_file.txt".to_string()) + .await + .unwrap(); + + // Acquire read lock + store + .set_plock( + file_ino, + owner as i64, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Verify lock exists + let query = FileLockQuery { + owner: owner as i64, + lock_type: FileLockType::ReadLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::UnLock); + } + + #[tokio::test] + async fn test_multiple_read_locks() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: i64 = 1001; + let owner2: i64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_multiple_read_locks_file.txt".to_string()) + .await + .unwrap(); + + // First session acquires read lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Second session should be able to acquire read lock on same range + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2 as i64, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 5678, + ) + .await + .unwrap(); + + // Verify both locks exist by querying each session + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::ReadLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::UnLock); + + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::ReadLock); + assert_eq!(lock_info2.range.start, 0); + assert_eq!(lock_info2.range.end, 100); + assert_eq!(lock_info2.pid, 5678); + } + + #[tokio::test] + async fn test_write_lock_conflict() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: u64 = 1001; + let owner2: u64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_write_lock_conflict_file.txt".to_string()) + .await + .unwrap(); + + // First session acquires read lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Second session should not be able to acquire write lock on overlapping range + let store2 = session_mgr.get_store(1); + let result = store2 + .set_plock( + file_ino, + owner2 as i64, + false, // non-blocking + FileLockType::WriteLock, + FileLockRange { + start: 50, + end: 150, + }, // Overlapping range + 5678, + ) + .await; + + assert!(result.is_err()); + match result.unwrap_err() { + MetaError::LockConflict { + inode: err_inode, + owner: err_owner, + range: err_range, + } => { + assert_eq!(err_inode, file_ino); + assert_eq!(err_owner, owner2 as i64); + assert_eq!(err_range.start, 50); + assert_eq!(err_range.end, 150); + } + _ => panic!("Expected LockConflict error"), + } + } + + #[tokio::test] + async fn test_lock_release() { + let session_id = Uuid::now_v7(); + let owner = 1001; + + // Create a store with pre-configured session + let store = new_test_store_with_session(session_id).await; + + // Create a file first + let parent = store.root_ino(); + let file_ino = store + .create_file(parent, "test_file.txt".to_string()) + .await + .unwrap(); + + // Acquire lock + store + .set_plock( + file_ino, + owner, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Verify lock exists + let query = FileLockQuery { + owner: owner as i64, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + + // Release lock + store + .set_plock( + file_ino, + owner, + false, + FileLockType::UnLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Verify lock is released + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::UnLock); + } + + #[tokio::test] + async fn test_non_overlapping_locks() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: i64 = 1001; + let owner2: i64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_none_overlapping_locks_file.txt".to_string()) + .await + .unwrap(); + + // First session acquires lock on range 0-100 + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Second session should be able to acquire lock on non-overlapping range 200-300 + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 200, + end: 300, + }, + 5678, + ) + .await + .unwrap(); + + // Verify both locks exist + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::WriteLock, + range: FileLockRange { + start: 200, + end: 300, + }, + }; + + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.range.start, 0); + assert_eq!(lock_info1.range.end, 100); + assert_eq!(lock_info1.pid, 1234); + + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info2.range.start, 200); + assert_eq!(lock_info2.range.end, 300); + assert_eq!(lock_info2.pid, 5678); + } + + #[tokio::test] + async fn test_concurrent_read_write_locks() { + // Test multiple sessions acquiring different types of locks + let session_mgr = TestSessionManager::new(3).await; + + // Create a file + let store0 = session_mgr.get_store(0); + let parent = store0.root_ino(); + let file_ino = store0 + .create_file(parent, "concurrent_test.txt".to_string()) + .await + .unwrap(); + + let owner1: i64 = 1001; + let owner2: i64 = 1002; + let owner3: i64 = 1003; + + // Session 1: Acquire write lock on range 0-100 + { + let store1 = session_mgr.get_store(0); + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1111, + ) + .await + .expect("Failed to acquire write lock"); + } + + // Session 2: Acquire read lock on range 200-300 (should succeed) + { + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2 as i64, + false, + FileLockType::ReadLock, + FileLockRange { + start: 200, + end: 300, + }, + 2222, + ) + .await + .expect("Failed to acquire read lock"); + } + + // Session 3: Try to acquire write lock on overlapping range 50-150 (should fail) + { + let store3 = session_mgr.get_store(2); + let result = store3 + .set_plock( + file_ino, + owner3 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 50, + end: 150, + }, + 3333, + ) + .await; + + // Verify it fails with LockConflict + assert!(result.is_err()); + match result.unwrap_err() { + MetaError::LockConflict { .. } => {} + _ => panic!("Expected LockConflict error"), + } + } + + // Verify successful locks exist + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::ReadLock, + range: FileLockRange { + start: 200, + end: 300, + }, + }; + + // Check locks from different sessions + { + let store1 = session_mgr.get_store(0); + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + } + + { + let store2 = session_mgr.get_store(1); + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::UnLock); + } + } + + #[tokio::test] + async fn test_cross_session_lock_visibility() { + // Test that locks set by one session are visible to another session + let session_mgr = TestSessionManager::new(2).await; + + let owner1: u64 = 1001; + + // Create a file + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "visibility_test.txt".to_string()) + .await + .unwrap(); + + // Session 1 acquires a write lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 0, + end: 1000, + }, + 4444, + ) + .await + .unwrap(); + + // Session 2 should be able to see the lock (and respect it) + let store2 = session_mgr.get_store(1); + let conflict_result = store2 + .set_plock( + file_ino, + 2002, // different owner + false, + FileLockType::WriteLock, + FileLockRange { + start: 500, + end: 600, + }, // overlapping range + 5555, + ) + .await; + + // Should fail due to lock conflict + assert!(conflict_result.is_err()); + match conflict_result.unwrap_err() { + MetaError::LockConflict { .. } => {} + _ => panic!("Expected LockConflict error"), + } + + // Session 1 releases the lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::UnLock, + FileLockRange { + start: 0, + end: 1000, + }, + 4444, + ) + .await + .unwrap(); + + // Now Session 2 should be able to acquire the lock + store2 + .set_plock( + file_ino, + 2002, + false, + FileLockType::WriteLock, + FileLockRange { + start: 500, + end: 600, + }, + 5555, + ) + .await + .unwrap(); + + // Verify the lock exists + let query = FileLockQuery { + owner: 2002, + lock_type: FileLockType::WriteLock, + range: FileLockRange { + start: 500, + end: 600, + }, + }; + + let lock_info = store2.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.pid, 5555); + } +} diff --git a/project/slayerfs/src/meta/stores/etcd_store.rs b/project/slayerfs/src/meta/stores/etcd_store.rs index 6aa9b4be5..e28a6722f 100644 --- a/project/slayerfs/src/meta/stores/etcd_store.rs +++ b/project/slayerfs/src/meta/stores/etcd_store.rs @@ -933,7 +933,7 @@ impl EtcdMetaStore { if conflict_found { return Err(MetaError::LockConflict { inode, - owner, + owner: owner.try_into().unwrap(), range, }); } @@ -955,7 +955,7 @@ impl EtcdMetaStore { } else { let new_plock = EtcdPlock { sid: *sid, - owner, + owner: owner.try_into().unwrap(), records: ls, }; plocks.push(new_plock); @@ -970,7 +970,7 @@ impl EtcdMetaStore { let new_plock = EtcdPlock { sid: *sid, - owner, + owner: owner.try_into().unwrap(), records: ls, }; @@ -2088,7 +2088,13 @@ impl MetaStore for EtcdMetaStore { loop { let result = self - .try_set_plock(inode, owner, &new_lock, lock_type, range) + .try_set_plock( + inode, + owner.try_into().unwrap(), + &new_lock, + lock_type, + range, + ) .await; match result { @@ -2115,18 +2121,33 @@ impl MetaStore for EtcdMetaStore { #[cfg(test)] mod tests { - use super::*; - use crate::chuck::SliceDesc; - use crate::meta::config::{CacheConfig, ClientOptions, Config, DatabaseConfig, DatabaseType}; - use crate::meta::entities::etcd::{EtcdDirChildren, EtcdEntryInfo}; - use crate::meta::{INODE_ID_KEY, Permission}; - use std::sync::Arc; + use crate::meta::MetaStore; + use crate::meta::config::Config; + use crate::meta::config::{CacheConfig, ClientOptions, DatabaseConfig, DatabaseType}; + use crate::meta::file_lock::{FileLockQuery, FileLockRange, FileLockType}; + use crate::meta::store::MetaError; + use crate::meta::stores::EtcdMetaStore; + use tokio::time; + use uuid::Uuid; fn test_config() -> Config { Config { database: DatabaseConfig { db_config: DatabaseType::Etcd { - urls: vec!["http://127.0.0.1:2379".to_string()], + urls: vec!["127.0.0.1:2379".to_string()], + }, + }, + cache: CacheConfig::default(), + client: ClientOptions::default(), + } + } + + /// Configuration for shared database testing (multi-session) + fn shared_db_config() -> Config { + Config { + database: DatabaseConfig { + db_config: DatabaseType::Etcd { + urls: vec!["127.0.0.1:2379".to_string()], }, }, cache: CacheConfig::default(), @@ -2134,143 +2155,603 @@ mod tests { } } - async fn new_store() -> EtcdMetaStore { + async fn new_test_store() -> EtcdMetaStore { EtcdMetaStore::from_config(test_config()) .await - .expect("connect etcd on 2379") + .expect("Failed to create test database store") + } + + /// Create a new test store with pre-configured session ID + async fn new_test_store_with_session(session_id: Uuid) -> EtcdMetaStore { + let store = new_test_store().await; + store.set_sid(session_id).expect("Failed to set session ID"); + store + } + + /// Create multiple test stores for testing multiple sessions + async fn create_test_stores(count: usize) -> Vec { + let mut stores = Vec::with_capacity(count); + for _ in 0..count { + stores.push(new_test_store().await); + } + stores + } + + /// Helper struct to manage multiple test sessions + struct TestSessionManager { + stores: Vec, + session_ids: Vec, + } + + use std::sync::LazyLock; + use tokio::sync::Mutex; + + // 静态初始化,确保只执行一次 + static SHARED_DB_INIT: LazyLock> = LazyLock::new(|| Mutex::new(())); + + impl TestSessionManager { + async fn new(session_count: usize) -> Self { + // 获取锁,确保串行初始化 + let _guard = SHARED_DB_INIT.lock().await; + + use std::env; + // Clean up existing shared test database + let temp_dir = env::temp_dir(); + let db_path = temp_dir.join("slayerfs_shared_test.db"); + + static FIRST_INIT: std::sync::Once = std::sync::Once::new(); + FIRST_INIT.call_once(|| { + let _ = std::fs::remove_file(&db_path); + }); + + let mut stores = Vec::with_capacity(session_count); + let mut session_ids = Vec::with_capacity(session_count); + + let config = shared_db_config(); + let first_store = EtcdMetaStore::from_config(config.clone()) + .await + .expect("Failed to create shared test database store"); + + let first_session_id = Uuid::now_v7(); + first_store + .set_sid(first_session_id) + .expect("Failed to set session ID"); + + stores.push(first_store); + session_ids.push(first_session_id); + + for _ in 1..session_count { + let store = EtcdMetaStore::from_config(config.clone()) + .await + .expect("Failed to create shared test database store"); + + let session_id = Uuid::now_v7(); + store.set_sid(session_id).expect("Failed to set session ID"); + + stores.push(store); + session_ids.push(session_id); + + time::sleep(time::Duration::from_millis(5)).await; + } + + Self { + stores, + session_ids, + } + } + + fn get_store(&self, index: usize) -> &EtcdMetaStore { + &self.stores[index] + } + + fn get_session_id(&self, index: usize) -> Uuid { + self.session_ids[index] + } } - #[ignore] #[tokio::test] - async fn atomic_update_creates_and_updates_children_map() { - let store = new_store().await; - let parent = 4242; + async fn test_basic_read_lock() { + let store = new_test_store().await; + let session_id = Uuid::now_v7(); + let owner: i64 = 1001; + + // Set session + store.set_sid(session_id).unwrap(); + // Create a file first + let parent = store.root_ino(); + let file_ino = store + .create_file(parent, "test_basic_read_lock_file.txt".to_string()) + .await + .unwrap(); + + // Acquire read lock store - .update_parent_children( - parent, - |m| { - m.insert("child1".into(), 1001); - }, - 5, + .set_plock( + file_ino, + owner, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, ) .await .unwrap(); - store - .update_parent_children( - parent, - |m| { - m.insert("child2".into(), 1002); - }, - 5, + // Verify lock exists + let query = FileLockQuery { + owner: owner, + lock_type: FileLockType::ReadLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::UnLock); + } + + #[tokio::test] + async fn test_multiple_read_locks() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: i64 = 1001; + let owner2: i64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_multiple_read_locks_file.txt".to_string()) + .await + .unwrap(); + + // First session acquires read lock + store1 + .set_plock( + file_ino, + owner1, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, ) .await .unwrap(); - let key = EtcdMetaStore::etcd_children_key(parent); - let children = store - .etcd_get_json::(&key) + // Second session should be able to acquire read lock on same range + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 5678, + ) .await - .unwrap() .unwrap(); - assert_eq!(children.children.get("child1"), Some(&1001)); - assert_eq!(children.children.get("child2"), Some(&1002)); + + // Verify both locks exist by querying each session + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::ReadLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::ReadLock); + assert_eq!(lock_info1.range.start, 0); + assert_eq!(lock_info1.range.end, 100); + assert_eq!(lock_info1.pid, 1234); + + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::UnLock); } - #[ignore] #[tokio::test] - async fn set_file_size_updates_mod_time() { - let store = new_store().await; - let inode = store.generate_id(INODE_ID_KEY).await.unwrap(); - let now = chrono::Utc::now().timestamp_nanos_opt().unwrap_or(0); + async fn test_write_lock_conflict() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: u64 = 1001; + let owner2: u64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_write_lock_conflict_file.txt".to_string()) + .await + .unwrap(); - let entry = EtcdEntryInfo { - is_file: true, - size: Some(0), - version: Some(0), - access_time: now, - modify_time: now, - create_time: now, - permission: Permission::new(0o644, 0, 0), - nlink: 1, - parent_inode: 1, - entry_name: format!("testfile-{inode}"), - deleted: false, - symlink_target: None, + // First session acquires read lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Second session should not be able to acquire write lock on overlapping range + let store2 = session_mgr.get_store(1); + let result = store2 + .set_plock( + file_ino, + owner2 as i64, + false, // non-blocking + FileLockType::WriteLock, + FileLockRange { + start: 50, + end: 150, + }, // Overlapping range + 5678, + ) + .await; + + assert!(result.is_err()); + match result.unwrap_err() { + MetaError::LockConflict { + inode: err_inode, + owner: err_owner, + range: err_range, + } => { + assert_eq!(err_inode, file_ino); + assert_eq!(err_owner, owner2 as i64); + assert_eq!(err_range.start, 50); + assert_eq!(err_range.end, 150); + } + _ => panic!("Expected LockConflict error"), + } + } + + #[tokio::test] + async fn test_lock_release() { + let session_id = Uuid::now_v7(); + let owner = 1001; + + // Create a store with pre-configured session + let store = new_test_store_with_session(session_id).await; + + // Create a file first + let parent = store.root_ino(); + let file_ino = store + .create_file(parent, "test_lock_release_file.txt".to_string()) + .await + .unwrap(); + + // Acquire lock + store + .set_plock( + file_ino, + owner, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Verify lock exists + let query = FileLockQuery { + owner: owner, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, }; - let reverse_key = EtcdMetaStore::etcd_reverse_key(inode); - store.etcd_put_json(&reverse_key, &entry).await.unwrap(); - store.set_file_size(inode, 1234).await.unwrap(); - let updated: EtcdEntryInfo = store.etcd_get_json(&reverse_key).await.unwrap().unwrap(); - assert_eq!(updated.size, Some(1234)); - assert!(updated.modify_time >= now); + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + + // Release lock + store + .set_plock( + file_ino, + owner, + false, + FileLockType::UnLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Verify lock is released + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::UnLock); } - #[ignore] #[tokio::test] - async fn append_slice_initializes_and_appends() { - let store = new_store().await; - let chunk_id = 7; - let s1 = SliceDesc { - slice_id: 1, - chunk_id, - offset: 0, - length: 10, + async fn test_non_overlapping_locks() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: i64 = 1001; + let owner2: i64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_none_overlapping_locks_file.txt".to_string()) + .await + .unwrap(); + + // First session acquires lock on range 0-100 + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Second session should be able to acquire lock on non-overlapping range 200-300 + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 200, + end: 300, + }, + 5678, + ) + .await + .unwrap(); + + // Verify both locks exist + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, }; - let s2 = SliceDesc { - slice_id: 2, - chunk_id, - offset: 10, - length: 5, + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::WriteLock, + range: FileLockRange { + start: 200, + end: 300, + }, }; - store.append_slice(chunk_id, s1).await.unwrap(); - store.append_slice(chunk_id, s2).await.unwrap(); + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.range.start, 0); + assert_eq!(lock_info1.range.end, 100); + assert_eq!(lock_info1.pid, 1234); - let slices = store.get_slices(chunk_id).await.unwrap(); - assert_eq!(slices.len(), 2); - assert!(slices.contains(&s1)); - assert!(slices.contains(&s2)); + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info2.range.start, 200); + assert_eq!(lock_info2.range.end, 300); + assert_eq!(lock_info2.pid, 5678); } - #[ignore] #[tokio::test] - async fn generate_id_is_concurrent_safe() { - use std::collections::HashSet; - let store = Arc::new(new_store().await); - let key = format!( - "slayerfs:test_concurrent:{}", - chrono::Utc::now().timestamp_nanos_opt().unwrap(), - ); + async fn test_concurrent_read_write_locks() { + // Test multiple sessions acquiring different types of locks + let session_mgr = TestSessionManager::new(3).await; + + // Create a file + let store0 = session_mgr.get_store(0); + let parent = store0.root_ino(); + let file_ino = store0 + .create_file(parent, "test_concurrent_read_write_locks.txt".to_string()) + .await + .unwrap(); - let ids_per_task = 1000; - let mut handles = Vec::new(); - for _ in 0..4 { - let store = Arc::clone(&store); - let key = key.clone(); - handles.push(tokio::spawn(async move { - let mut ids = Vec::with_capacity(ids_per_task); - for _ in 0..ids_per_task { - ids.push(store.generate_id(&key).await.expect("generate id")); - } - ids - })); + let owner1: i64 = 1001; + let owner2: i64 = 1002; + let owner3: i64 = 1003; + + // Session 1: Acquire write lock on range 0-100 + { + let store1 = session_mgr.get_store(0); + store1 + .set_plock( + file_ino, + owner1, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1111, + ) + .await + .expect("Failed to acquire write lock"); } - let mut ids = Vec::with_capacity(ids_per_task * 4); - for handle in handles { - ids.extend(handle.await.expect("join")); + // Session 2: Acquire read lock on range 200-300 (should succeed) + { + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2 as i64, + false, + FileLockType::ReadLock, + FileLockRange { + start: 200, + end: 300, + }, + 2222, + ) + .await + .expect("Failed to acquire read lock"); } - let unique: HashSet<_> = ids.iter().cloned().collect(); - assert_eq!(unique.len(), ids_per_task * 4); + // Session 3: Try to acquire write lock on overlapping range 50-150 (should fail) + { + let store3 = session_mgr.get_store(2); + let result = store3 + .set_plock( + file_ino, + owner3 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 50, + end: 150, + }, + 3333, + ) + .await; + + // Verify it fails with LockConflict + assert!(result.is_err()); + match result.unwrap_err() { + MetaError::LockConflict { .. } => {} + _ => panic!("Expected LockConflict error"), + } + } + + // Verify successful locks exist + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::ReadLock, + range: FileLockRange { + start: 200, + end: 300, + }, + }; - let mut unique = unique.into_iter().collect::>(); - unique.sort(); + // Check locks from different sessions + { + let store1 = session_mgr.get_store(0); + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + } + + { + let store2 = session_mgr.get_store(1); + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::UnLock); + } + } + + #[tokio::test] + async fn test_cross_session_lock_visibility() { + // Test that locks set by one session are visible to another session + let session_mgr = TestSessionManager::new(2).await; + + let owner1: u64 = 1001; + + // Create a file + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_cross_session_lock_visibility.txt".to_string()) + .await + .unwrap(); + + // Session 1 acquires a write lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 0, + end: 1000, + }, + 4444, + ) + .await + .unwrap(); + + // Session 2 should be able to see the lock (and respect it) + let store2 = session_mgr.get_store(1); + let conflict_result = store2 + .set_plock( + file_ino, + 2002, // different owner + false, + FileLockType::WriteLock, + FileLockRange { + start: 500, + end: 600, + }, // overlapping range + 5555, + ) + .await; + + // Should fail due to lock conflict + assert!(conflict_result.is_err()); + match conflict_result.unwrap_err() { + MetaError::LockConflict { .. } => {} + _ => panic!("Expected LockConflict error"), + } + + // Session 1 releases the lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::UnLock, + FileLockRange { + start: 0, + end: 1000, + }, + 4444, + ) + .await + .unwrap(); + + // Now Session 2 should be able to acquire the lock + store2 + .set_plock( + file_ino, + 2002, + false, + FileLockType::WriteLock, + FileLockRange { + start: 500, + end: 600, + }, + 5555, + ) + .await + .unwrap(); + + // Verify the lock exists + let query = FileLockQuery { + owner: 2002, + lock_type: FileLockType::WriteLock, + range: FileLockRange { + start: 500, + end: 600, + }, + }; - let should = (2..=4001).collect::>(); - assert_eq!(unique, should); + let lock_info = store2.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.pid, 5555); } } diff --git a/project/slayerfs/src/meta/stores/redis_store.rs b/project/slayerfs/src/meta/stores/redis_store.rs index 07d9390ff..f56d965bd 100644 --- a/project/slayerfs/src/meta/stores/redis_store.rs +++ b/project/slayerfs/src/meta/stores/redis_store.rs @@ -312,7 +312,7 @@ impl RedisMetaStore { &self, inode: i64, owner: i64, - new_lock: &PlockRecord, + new_lock: PlockRecord, lock_type: FileLockType, range: FileLockRange, ) -> Result<(), MetaError> { @@ -344,7 +344,7 @@ impl RedisMetaStore { return Ok(()); } - let new_records = PlockRecord::update_locks(records, new_lock.clone()); + let new_records = PlockRecord::update_locks(records, new_lock); if new_records.is_empty() { // Remove the field if no records after update @@ -399,7 +399,7 @@ impl RedisMetaStore { if conflict_found { return Err(MetaError::LockConflict { inode, - owner, + owner: owner.try_into().unwrap(), range, }); } @@ -790,10 +790,25 @@ impl MetaStore for RedisMetaStore { .get() .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; + // First, try to get locks from current session's field + let current_field = format!("{}:{}", sid, query.owner); + let records_json: Result = conn.hget(&plock_key, ¤t_field).await; + if let Ok(records_json) = records_json { + let records: Vec = serde_json::from_str(&records_json).unwrap_or_default(); + if let Some(v) = PlockRecord::get_plock(&records, query, &sid, &sid) { + return Ok(v); + } + } + // Get all plock entries for this inode let plock_entries: Vec = conn.hkeys(&plock_key).await.map_err(redis_err)?; for field in plock_entries { + // Skip current field as we already checked it + if field == current_field { + continue; + } + let parts: Vec<&str> = field.split(':').collect(); if parts.len() != 2 { continue; @@ -808,8 +823,9 @@ impl MetaStore for RedisMetaStore { let records_json: String = conn.hget(&plock_key, &field).await.map_err(redis_err)?; let records: Vec = serde_json::from_str(&records_json).unwrap_or_default(); - if let Some(info) = PlockRecord::get_plock(&records, query, &sid, &lock_sid) { - return Ok(info); + match PlockRecord::get_plock(&records, query, &sid, &lock_sid) { + Some(v) => return Ok(v), + None => {} } } @@ -834,7 +850,7 @@ impl MetaStore for RedisMetaStore { loop { let result = self - .try_set_plock(inode, owner, &new_lock, lock_type, range) + .try_set_plock(inode, owner.try_into().unwrap(), new_lock, lock_type, range) .await; match result { @@ -979,3 +995,640 @@ fn millis_to_system_time(ms: i64) -> Result { fn redis_err(err: redis::RedisError) -> MetaError { MetaError::Internal(format!("Redis error: {err}")) } + +#[cfg(test)] +mod tests { + use crate::meta::MetaStore; + use crate::meta::config::Config; + use crate::meta::config::{CacheConfig, ClientOptions, DatabaseConfig, DatabaseType}; + use crate::meta::file_lock::{FileLockQuery, FileLockRange, FileLockType}; + use crate::meta::store::MetaError; + use crate::meta::stores::RedisMetaStore; + use tokio::time; + use uuid::Uuid; + + fn test_config() -> Config { + Config { + database: DatabaseConfig { + db_config: DatabaseType::Redis { + url: "redis://127.0.0.1:6379/0".to_string(), + }, + }, + cache: CacheConfig::default(), + client: ClientOptions::default(), + } + } + + /// Configuration for shared database testing (multi-session) + fn shared_db_config() -> Config { + Config { + database: DatabaseConfig { + db_config: DatabaseType::Redis { + url: "redis://127.0.0.1:6379/0".to_string(), + }, + }, + cache: CacheConfig::default(), + client: ClientOptions::default(), + } + } + + async fn new_test_store() -> RedisMetaStore { + RedisMetaStore::from_config(test_config()) + .await + .expect("Failed to create test database store") + } + + /// Create a new test store with pre-configured session ID + async fn new_test_store_with_session(session_id: Uuid) -> RedisMetaStore { + let store = new_test_store().await; + store.set_sid(session_id).expect("Failed to set session ID"); + store + } + + /// Create multiple test stores for testing multiple sessions + async fn create_test_stores(count: usize) -> Vec { + let mut stores = Vec::with_capacity(count); + for _ in 0..count { + stores.push(new_test_store().await); + } + stores + } + + /// Helper struct to manage multiple test sessions + struct TestSessionManager { + stores: Vec, + session_ids: Vec, + } + + use std::sync::LazyLock; + use tokio::sync::Mutex; + + // 静态初始化,确保只执行一次 + static SHARED_DB_INIT: LazyLock> = LazyLock::new(|| Mutex::new(())); + + impl TestSessionManager { + async fn new(session_count: usize) -> Self { + // 获取锁,确保串行初始化 + let _guard = SHARED_DB_INIT.lock().await; + + use std::env; + // Clean up existing shared test database + let temp_dir = env::temp_dir(); + let db_path = temp_dir.join("slayerfs_shared_test.db"); + + static FIRST_INIT: std::sync::Once = std::sync::Once::new(); + FIRST_INIT.call_once(|| { + let _ = std::fs::remove_file(&db_path); + }); + + let mut stores = Vec::with_capacity(session_count); + let mut session_ids = Vec::with_capacity(session_count); + + let config = shared_db_config(); + let first_store = RedisMetaStore::from_config(config.clone()) + .await + .expect("Failed to create shared test database store"); + + let first_session_id = Uuid::now_v7(); + first_store + .set_sid(first_session_id) + .expect("Failed to set session ID"); + + stores.push(first_store); + session_ids.push(first_session_id); + + for _ in 1..session_count { + let store = RedisMetaStore::from_config(config.clone()) + .await + .expect("Failed to create shared test database store"); + + let session_id = Uuid::now_v7(); + store.set_sid(session_id).expect("Failed to set session ID"); + + stores.push(store); + session_ids.push(session_id); + + time::sleep(time::Duration::from_millis(5)).await; + } + + Self { + stores, + session_ids, + } + } + + fn get_store(&self, index: usize) -> &RedisMetaStore { + &self.stores[index] + } + + fn get_session_id(&self, index: usize) -> Uuid { + self.session_ids[index] + } + } + + #[tokio::test] + async fn test_basic_read_lock() { + let store = new_test_store().await; + let session_id = Uuid::now_v7(); + let owner: i64 = 1001; + + // Set session + store.set_sid(session_id).unwrap(); + + // Create a file first + let parent = store.root_ino(); + let file_ino = store + .create_file(parent, "test_basic_read_lock_file.txt".to_string()) + .await + .unwrap(); + + // Acquire read lock + store + .set_plock( + file_ino, + owner, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Verify lock exists + let query = FileLockQuery { + owner: owner, + lock_type: FileLockType::ReadLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::UnLock); + } + + #[tokio::test] + async fn test_multiple_read_locks() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: i64 = 1001; + let owner2: i64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_multiple_read_locks_file.txt".to_string()) + .await + .unwrap(); + + // First session acquires read lock + store1 + .set_plock( + file_ino, + owner1, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Second session should be able to acquire read lock on same range + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 5678, + ) + .await + .unwrap(); + + // Verify both locks exist by querying each session + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::ReadLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::ReadLock); + assert_eq!(lock_info1.range.start, 0); + assert_eq!(lock_info1.range.end, 100); + assert_eq!(lock_info1.pid, 1234); + + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::UnLock); + } + + #[tokio::test] + async fn test_write_lock_conflict() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: u64 = 1001; + let owner2: u64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_write_lock_conflict_file.txt".to_string()) + .await + .unwrap(); + + // First session acquires read lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::ReadLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Second session should not be able to acquire write lock on overlapping range + let store2 = session_mgr.get_store(1); + let result = store2 + .set_plock( + file_ino, + owner2 as i64, + false, // non-blocking + FileLockType::WriteLock, + FileLockRange { + start: 50, + end: 150, + }, // Overlapping range + 5678, + ) + .await; + + assert!(result.is_err()); + match result.unwrap_err() { + MetaError::LockConflict { + inode: err_inode, + owner: err_owner, + range: err_range, + } => { + assert_eq!(err_inode, file_ino); + assert_eq!(err_owner, owner2 as i64); + assert_eq!(err_range.start, 50); + assert_eq!(err_range.end, 150); + } + _ => panic!("Expected LockConflict error"), + } + } + + #[tokio::test] + async fn test_lock_release() { + let session_id = Uuid::now_v7(); + let owner = 1001; + + // Create a store with pre-configured session + let store = new_test_store_with_session(session_id).await; + + // Create a file first + let parent = store.root_ino(); + let file_ino = store + .create_file(parent, "test_lock_release_file.txt".to_string()) + .await + .unwrap(); + + // Acquire lock + store + .set_plock( + file_ino, + owner, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Verify lock exists + let query = FileLockQuery { + owner: owner, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + + // Release lock + store + .set_plock( + file_ino, + owner, + false, + FileLockType::UnLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Verify lock is released + let lock_info = store.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::UnLock); + } + + #[tokio::test] + async fn test_non_overlapping_locks() { + // Create session manager with 2 sessions + let session_mgr = TestSessionManager::new(2).await; + + let owner1: i64 = 1001; + let owner2: i64 = 1002; + + // Create a file first using the first session + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_none_overlapping_locks_file.txt".to_string()) + .await + .unwrap(); + + // First session acquires lock on range 0-100 + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1234, + ) + .await + .unwrap(); + + // Second session should be able to acquire lock on non-overlapping range 200-300 + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 200, + end: 300, + }, + 5678, + ) + .await + .unwrap(); + + // Verify both locks exist + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::WriteLock, + range: FileLockRange { + start: 200, + end: 300, + }, + }; + + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.range.start, 0); + assert_eq!(lock_info1.range.end, 100); + assert_eq!(lock_info1.pid, 1234); + + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info2.range.start, 200); + assert_eq!(lock_info2.range.end, 300); + assert_eq!(lock_info2.pid, 5678); + } + + #[tokio::test] + async fn test_concurrent_read_write_locks() { + // Test multiple sessions acquiring different types of locks + let session_mgr = TestSessionManager::new(3).await; + + // Create a file + let store0 = session_mgr.get_store(0); + let parent = store0.root_ino(); + let file_ino = store0 + .create_file(parent, "test_concurrent_read_write_locks.txt".to_string()) + .await + .unwrap(); + + let owner1: i64 = 1001; + let owner2: i64 = 1002; + let owner3: i64 = 1003; + + // Session 1: Acquire write lock on range 0-100 + { + let store1 = session_mgr.get_store(0); + store1 + .set_plock( + file_ino, + owner1, + false, + FileLockType::WriteLock, + FileLockRange { start: 0, end: 100 }, + 1111, + ) + .await + .expect("Failed to acquire write lock"); + } + + // Session 2: Acquire read lock on range 200-300 (should succeed) + { + let store2 = session_mgr.get_store(1); + store2 + .set_plock( + file_ino, + owner2 as i64, + false, + FileLockType::ReadLock, + FileLockRange { + start: 200, + end: 300, + }, + 2222, + ) + .await + .expect("Failed to acquire read lock"); + } + + // Session 3: Try to acquire write lock on overlapping range 50-150 (should fail) + { + let store3 = session_mgr.get_store(2); + let result = store3 + .set_plock( + file_ino, + owner3 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 50, + end: 150, + }, + 3333, + ) + .await; + + // Verify it fails with LockConflict + assert!(result.is_err()); + match result.unwrap_err() { + MetaError::LockConflict { .. } => {} + _ => panic!("Expected LockConflict error"), + } + } + + // Verify successful locks exist + let query1 = FileLockQuery { + owner: owner1, + lock_type: FileLockType::WriteLock, + range: FileLockRange { start: 0, end: 100 }, + }; + + let query2 = FileLockQuery { + owner: owner2, + lock_type: FileLockType::ReadLock, + range: FileLockRange { + start: 200, + end: 300, + }, + }; + + // Check locks from different sessions + { + let store1 = session_mgr.get_store(0); + let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); + assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + } + + { + let store2 = session_mgr.get_store(1); + let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); + assert_eq!(lock_info2.lock_type, FileLockType::UnLock); + } + } + + #[tokio::test] + async fn test_cross_session_lock_visibility() { + // Test that locks set by one session are visible to another session + let session_mgr = TestSessionManager::new(2).await; + + let owner1: u64 = 1001; + + // Create a file + let store1 = session_mgr.get_store(0); + let parent = store1.root_ino(); + let file_ino = store1 + .create_file(parent, "test_cross_session_lock_visibility.txt".to_string()) + .await + .unwrap(); + + // Session 1 acquires a write lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::WriteLock, + FileLockRange { + start: 0, + end: 1000, + }, + 4444, + ) + .await + .unwrap(); + + // Session 2 should be able to see the lock (and respect it) + let store2 = session_mgr.get_store(1); + let conflict_result = store2 + .set_plock( + file_ino, + 2002, // different owner + false, + FileLockType::WriteLock, + FileLockRange { + start: 500, + end: 600, + }, // overlapping range + 5555, + ) + .await; + + // Should fail due to lock conflict + assert!(conflict_result.is_err()); + match conflict_result.unwrap_err() { + MetaError::LockConflict { .. } => {} + _ => panic!("Expected LockConflict error"), + } + + // Session 1 releases the lock + store1 + .set_plock( + file_ino, + owner1 as i64, + false, + FileLockType::UnLock, + FileLockRange { + start: 0, + end: 1000, + }, + 4444, + ) + .await + .unwrap(); + + // Now Session 2 should be able to acquire the lock + store2 + .set_plock( + file_ino, + 2002, + false, + FileLockType::WriteLock, + FileLockRange { + start: 500, + end: 600, + }, + 5555, + ) + .await + .unwrap(); + + // Verify the lock exists + let query = FileLockQuery { + owner: 2002, + lock_type: FileLockType::WriteLock, + range: FileLockRange { + start: 500, + end: 600, + }, + }; + + let lock_info = store2.get_plock(file_ino, &query).await.unwrap(); + assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.pid, 5555); + } +} diff --git a/project/slayerfs/tests/scripts/test_meta_store.sh b/project/slayerfs/tests/scripts/test_meta_store.sh new file mode 100755 index 000000000..19498f94b --- /dev/null +++ b/project/slayerfs/tests/scripts/test_meta_store.sh @@ -0,0 +1,274 @@ +#!/bin/bash + +# 设置变量 +NETWORK_NAME="slayerfs-network" +ETCD_IMAGE="quay.io/coreos/etcd:v3.6.0" +REDIS_IMAGE="redis:7.2-alpine" +POSTGRES_IMAGE="postgres:15-alpine" + +ETCD_CONTAINER="etcd-slayerfs-test" +REDIS_CONTAINER="redis-slayerfs-test" +POSTGRES_CONTAINER="pgsql-slayerfs-test" + +# 颜色定义 +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# 测试结果统计 +TEST_PASSED=0 +TEST_FAILED=0 +TEST_TOTAL=0 + +# 日志输出函数 +log_info() { + echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" +} + +# 检查Docker是否运行 +check_docker() { + if ! docker info > /dev/null 2>&1; then + log_error "Docker 服务未运行或当前用户无权限访问" + exit 1 + fi + log_success "Docker 服务运行正常" +} + +# 清理函数 - 确保容器和网络被正确清理 +cleanup() { + log_info "正在清理容器和网络..." + + # 停止容器 + for container in "$ETCD_CONTAINER" "$REDIS_CONTAINER" "$POSTGRES_CONTAINER"; do + if docker ps -a --format "{{.Names}}" | grep -q "^${container}$"; then + log_info "停止容器: $container" + docker stop "$container" > /dev/null 2>&1 + docker rm "$container" > /dev/null 2>&1 + fi + done + + # 删除网络 + if docker network ls --format "{{.Name}}" | grep -q "^${NETWORK_NAME}$"; then + log_info "删除网络: $NETWORK_NAME" + docker network rm "$NETWORK_NAME" > /dev/null 2>&1 + fi + + log_success "清理完成" + + # 显示测试结果汇总 + echo "" + echo "==========================================" + echo " 测试结果汇总" + echo "==========================================" + echo "总测试数: $TEST_TOTAL" + echo -e "通过: ${GREEN}$TEST_PASSED${NC}" + echo -e "失败: ${RED}$TEST_FAILED${NC}" + echo "==========================================" + + # 如果有失败的测试,以非零退出码退出 + if [ $TEST_FAILED -gt 0 ]; then + exit 1 + fi + exit 0 +} + +# 检查容器健康状态 +check_container_health() { + local container_name=$1 + local check_type=$2 + + case $check_type in + "etcd") + docker exec "$container_name" etcdctl endpoint health > /dev/null 2>&1 + ;; + "redis") + docker exec "$container_name" redis-cli ping | grep -q "PONG" + ;; + "postgres") + docker exec "$container_name" pg_isready -U slayerfs > /dev/null 2>&1 + ;; + *) + return 1 + ;; + esac + + return $? +} + +# 等待容器启动 +wait_for_container() { + local container_name=$1 + local check_type=$2 + local max_attempts=${3:-30} + local wait_seconds=${4:-2} + + log_info "等待 $container_name 启动..." + + for ((i=1; i<=max_attempts; i++)); do + if check_container_health "$container_name" "$check_type"; then + log_success "$container_name 启动成功" + return 0 + fi + log_info "尝试 $i/$max_attempts: $container_name 尚未就绪,等待 ${wait_seconds}秒..." + sleep $wait_seconds + done + + log_error "$container_name 在 ${max_attempts} 次尝试后仍未启动" + return 1 +} + +# 并行启动容器 +start_containers() { + log_info "创建网络 $NETWORK_NAME..." + docker network create --driver bridge "$NETWORK_NAME" 2>/dev/null || true + + # 启动 PostgreSQL 容器 + log_info "启动 PostgreSQL 容器..." + docker run -d \ + --name "$POSTGRES_CONTAINER" \ + --network "$NETWORK_NAME" \ + -p 5432:5432 \ + -e POSTGRES_DB=database \ + -e POSTGRES_USER=slayerfs \ + -e POSTGRES_PASSWORD=slayerfs \ + -e POSTGRES_INITDB_ARGS="--encoding=UTF8 --locale=C" \ + "$POSTGRES_IMAGE" > /dev/null 2>&1 + + # 启动 Redis 容器 + log_info "启动 Redis 容器..." + docker run -d \ + --name "$REDIS_CONTAINER" \ + --network "$NETWORK_NAME" \ + -p 6379:6379 \ + "$REDIS_IMAGE" \ + redis-server \ + --appendonly yes \ + --appendfsync everysec > /dev/null 2>&1 + + # 启动 etcd 容器 + log_info "启动 etcd 容器..." + docker run -d \ + --name "$ETCD_CONTAINER" \ + --network "$NETWORK_NAME" \ + -p 2379:2379 \ + -p 2380:2380 \ + -e ETCDCTL_API=3 \ + "$ETCD_IMAGE" \ + /usr/local/bin/etcd \ + --name etcd-single \ + --data-dir /etcd-data \ + --listen-client-urls http://0.0.0.0:2379 \ + --advertise-client-urls http://etcd:2379 \ + --listen-peer-urls http://0.0.0.0:2380 \ + --initial-advertise-peer-urls http://etcd:2380 \ + --initial-cluster etcd-single=http://etcd:2380 \ + --initial-cluster-token etcd-cluster-token \ + --initial-cluster-state new \ + --auto-compaction-retention=1 \ + --quota-backend-bytes=8589934592 > /dev/null 2>&1 + + # 等待所有容器启动 + wait_for_container "$POSTGRES_CONTAINER" "postgres" 15 2 || return 1 + wait_for_container "$REDIS_CONTAINER" "redis" 10 2 || return 1 + wait_for_container "$ETCD_CONTAINER" "etcd" 10 2 || return 1 + + return 0 +} + +# 运行测试并统计结果 +run_test() { + local test_name=$1 + local test_command=$2 + + TEST_TOTAL=$((TEST_TOTAL + 1)) + + echo "" + log_info "开始测试: $test_name" + echo "运行命令: $test_command" + echo "" + + # 运行测试 + if eval "$test_command"; then + log_success "测试通过: $test_name" + TEST_PASSED=$((TEST_PASSED + 1)) + return 0 + else + log_error "测试失败: $test_name" + TEST_FAILED=$((TEST_FAILED + 1)) + return 1 + fi +} + +# 设置信号捕获,确保脚本退出时执行清理 +trap cleanup SIGINT SIGTERM EXIT + +# 主程序开始 +echo "==========================================" +echo " SlayerFS 集成测试脚本" +echo "==========================================" + +# 检查 Docker 服务 +check_docker + +# 清理可能存在的旧容器和网络 +log_info "清理可能存在的旧资源..." +docker stop "$ETCD_CONTAINER" "$REDIS_CONTAINER" "$POSTGRES_CONTAINER" 2>/dev/null || true +docker rm "$ETCD_CONTAINER" "$REDIS_CONTAINER" "$POSTGRES_CONTAINER" 2>/dev/null || true +docker network rm "$NETWORK_NAME" 2>/dev/null || true + +# 启动容器 +if ! start_containers; then + log_error "容器启动失败,测试终止" + exit 1 +fi + +# 显示容器状态 +echo "" +log_success "所有容器已成功启动:" +echo "==========================================" +echo "服务名称 | 容器名称 | 访问地址" +echo "------------------------------------------" +echo "etcd | $ETCD_CONTAINER | http://localhost:2379" +echo "Redis | $REDIS_CONTAINER | redis://localhost:6379" +echo "PostgreSQL | $POSTGRES_CONTAINER | postgresql://slayerfs:slayerfs@localhost:5432/database" +echo "==========================================" +echo "" + +# 等待所有服务完全就绪 +log_info "等待服务完全就绪..." +sleep 5 + +# 运行测试 +echo "===============开始运行测试===============" + +# Redis 存储测试 +run_test "RedisMetaStore" "cargo test --lib meta::stores::redis_store -- --nocapture" + +# etcd 存储测试 +run_test "EtcdMetaStore" "cargo test --lib meta::stores::etcd_store -- --nocapture" + +# 数据库存储测试 +run_test "DatabaseMetaStore" "cargo test --lib meta::stores::database_store -- --nocapture" + +# 如果所有测试都通过了,可以运行集成测试 +if [ $TEST_FAILED -eq 0 ]; then + log_info "基础测试全部通过,运行集成测试..." + run_test "集成测试" "cargo test --test integration_tests -- --nocapture 2>&1 | head -100" +fi + +echo "" +log_info "测试执行完成" From 7e1ef7825e81a851d3785296a608522bf972278c Mon Sep 17 00:00:00 2001 From: zine yu Date: Wed, 17 Dec 2025 18:23:00 +0800 Subject: [PATCH 5/7] feat(slayerfs): impl plock for slayerfs Signed-off-by: zine yu --- buckal.snap | 5 +- project/Cargo.lock | 25 +--- project/rfuse3/BUCK | 1 + project/rfuse3/src/raw/session.rs | 4 +- project/slayerfs/BUCK | 4 +- project/slayerfs/Cargo.toml | 2 +- project/slayerfs/src/fuse/mod.rs | 116 +++++++++++++++-- project/slayerfs/src/meta/client.rs | 30 +++++ project/slayerfs/src/meta/file_lock.rs | 69 +++------- project/slayerfs/src/meta/layer.rs | 13 ++ .../src/meta/stores/database_store.rs | 121 ++++++------------ .../slayerfs/src/meta/stores/etcd_store.rs | 110 ++++++---------- .../slayerfs/src/meta/stores/redis_store.rs | 101 ++++++--------- project/slayerfs/src/vfs/fs.rs | 59 +++++++++ project/slayerfs/src/vfs/sdk.rs | 23 ++++ third-party/rust/crates/rfuse3/0.0.5/BUCK | 50 -------- 16 files changed, 379 insertions(+), 354 deletions(-) delete mode 100644 third-party/rust/crates/rfuse3/0.0.5/BUCK diff --git a/buckal.snap b/buckal.snap index dc6d76a36..789f9aa75 100644 --- a/buckal.snap +++ b/buckal.snap @@ -12,11 +12,11 @@ version = 1 "path+file://($WORKSPACE)/libnetwork#0.1.0" = "5a10bfb7787e55e9f3c8dab30c7d43e8512db8edc6b835bf190319de27bfaef1" "path+file://($WORKSPACE)/libscheduler#0.1.0" = "219b70093748941358966ea056a93ac067eb24dc1ff9a5592ccc31da8ffebce7" "path+file://($WORKSPACE)/libvault#0.1.0" = "c31d5bfca736950caacb26da4d482dce19954ce577453ce62185a59ba835b613" -"path+file://($WORKSPACE)/rfuse3#0.0.5" = "a8d0556393d94b80e63134b93ed133da78317291537f2bcadda7edf666760e6f" +"path+file://($WORKSPACE)/rfuse3#0.0.5" = "b8adcd184f301f85aa9ebc217c1e565eceb27df805106b3b6ec07d893d5faa9e" "path+file://($WORKSPACE)/rkb#0.1.0" = "9e00182a5a0df52907588281a8d44994a5068924745664491b2554af51e00562" "path+file://($WORKSPACE)/rkl#0.1.0" = "3ee3a2a2ae9d2f1ff6198e80db33aaf7f556724f52a2dfa86c902c0367a5234a" "path+file://($WORKSPACE)/rks#0.1.0" = "e46f658cd7c57ba39cf8b962fbc00bcff2e5c330393b1abdf8e4214490bbcd71" -"path+file://($WORKSPACE)/slayerfs#0.1.0" = "b5d7c12ae1b1947cc3fc68cd775e08ca4ca2b99130ae6130155e3e5e76bde1ab" +"path+file://($WORKSPACE)/slayerfs#0.1.0" = "361f2681f4466791c3e5f9667f13eb6fa7b73514badd882ef55e673c4b7a98c2" "registry+https://github.com/rust-lang/crates.io-index#addr2line@0.25.1" = "c3300f450f9672eda9f05513f92264ed5a7cae6959297eb04934d95465c377e7" "registry+https://github.com/rust-lang/crates.io-index#adler2@2.0.1" = "7081ce7693ca45bff0e855374e6b7f386805bba1de9f7c288b1c706b99abef7f" "registry+https://github.com/rust-lang/crates.io-index#ahash@0.7.8" = "263bbc26dec8ade458658cf9dae3ef8fc606e85f04c7b47abb4f9255319689ac" @@ -539,7 +539,6 @@ version = 1 "registry+https://github.com/rust-lang/crates.io-index#resolv-conf@0.7.5" = "d0b46914101b977a76ced07b4cab0ce42e22d123697cc036e548a98e2059311f" "registry+https://github.com/rust-lang/crates.io-index#rfc6979@0.3.1" = "9cef3b799f4f4ca18b4dfa4fe0a57a42b26cabaa248ab94b3b161d788ee371ef" "registry+https://github.com/rust-lang/crates.io-index#rfuse3@0.0.3" = "191c7be4bfd58164500f08333d767b8d82c196b1024a8a34dd7926771f0352b6" -"registry+https://github.com/rust-lang/crates.io-index#rfuse3@0.0.5" = "812d117bbb02cfb1bfaead4e0babe7821cf9d9755ab9a72b389a2e0d51c301a8" "registry+https://github.com/rust-lang/crates.io-index#rgb@0.8.52" = "8bdf9be2788b697d8404ffdbfa7abe1d7aec9454c0f296aa7cf4d8faf50109e6" "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14" = "714b197c6078f4eb7100f1ada3f2a862f09a327cda92c9d977dd36cd777f74af" "registry+https://github.com/rust-lang/crates.io-index#rkyv@0.7.45" = "1705251b30038af2fb4d55eb91d30fcf44d21e4146595c5add53ec142d9a368a" diff --git a/project/Cargo.lock b/project/Cargo.lock index a063818b9..d3cc62e48 100644 --- a/project/Cargo.lock +++ b/project/Cargo.lock @@ -6049,29 +6049,6 @@ dependencies = [ "which", ] -[[package]] -name = "rfuse3" -version = "0.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848f037ea6f58bf93045c366e6099146b2b227c06646c584b64ed7d4a089ad71" -dependencies = [ - "aligned_box", - "async-notify", - "async-trait", - "bincode", - "bytes", - "futures-channel", - "futures-util", - "libc", - "nix 0.29.0", - "serde", - "slab", - "tokio", - "tracing", - "trait-make", - "which", -] - [[package]] name = "rgb" version = "0.8.52" @@ -7164,7 +7141,7 @@ dependencies = [ "pprof", "rand 0.9.2", "redis", - "rfuse3 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rfuse3 0.0.5", "sea-orm", "serde", "serde_json", diff --git a/project/rfuse3/BUCK b/project/rfuse3/BUCK index daf9cb9cd..91b4f2d40 100644 --- a/project/rfuse3/BUCK +++ b/project/rfuse3/BUCK @@ -22,6 +22,7 @@ rust_library( edition = "2021", features = [ "default", + "file-lock", "tokio-runtime", "unprivileged", ], diff --git a/project/rfuse3/src/raw/session.rs b/project/rfuse3/src/raw/session.rs index 6e992ebf5..aef50fe95 100644 --- a/project/rfuse3/src/raw/session.rs +++ b/project/rfuse3/src/raw/session.rs @@ -2669,7 +2669,7 @@ async fn worker_getlk( lk_in.owner, lk_in.lk.start, lk_in.lk.end, - lk_in.lk.typ, + lk_in.lk.r#type, lk_in.lk.pid, ) .await @@ -2745,7 +2745,7 @@ async fn worker_setlk( lk_in.owner, lk_in.lk.start, lk_in.lk.end, - lk_in.lk.typ, + lk_in.lk.r#type, lk_in.lk.pid, is_blocking, ) diff --git a/project/slayerfs/BUCK b/project/slayerfs/BUCK index 7d911791a..5725d013d 100644 --- a/project/slayerfs/BUCK +++ b/project/slayerfs/BUCK @@ -23,6 +23,7 @@ rust_binary( rustc_flags = ["@$(location :slayerfs-manifest[env_flags])"], visibility = ["PUBLIC"], deps = [ + "//project/rfuse3:rfuse3", "//third-party/rust/crates/anyhow/1.0.100:anyhow", "//third-party/rust/crates/async-recursion/1.1.1:async-recursion", "//third-party/rust/crates/async-trait/0.1.89:async-trait", @@ -50,7 +51,6 @@ rust_binary( "//third-party/rust/crates/moka/0.12.11:moka", "//third-party/rust/crates/rand/0.9.2:rand", "//third-party/rust/crates/redis/0.26.1:redis", - "//third-party/rust/crates/rfuse3/0.0.5:rfuse3", "//third-party/rust/crates/sea-orm/1.1.17:sea-orm", "//third-party/rust/crates/serde/1.0.228:serde", "//third-party/rust/crates/serde_json/1.0.145:serde_json", @@ -75,6 +75,7 @@ rust_library( rustc_flags = ["@$(location :slayerfs-manifest[env_flags])"], visibility = ["PUBLIC"], deps = [ + "//project/rfuse3:rfuse3", "//third-party/rust/crates/anyhow/1.0.100:anyhow", "//third-party/rust/crates/async-recursion/1.1.1:async-recursion", "//third-party/rust/crates/async-trait/0.1.89:async-trait", @@ -102,7 +103,6 @@ rust_library( "//third-party/rust/crates/moka/0.12.11:moka", "//third-party/rust/crates/rand/0.9.2:rand", "//third-party/rust/crates/redis/0.26.1:redis", - "//third-party/rust/crates/rfuse3/0.0.5:rfuse3", "//third-party/rust/crates/sea-orm/1.1.17:sea-orm", "//third-party/rust/crates/serde/1.0.228:serde", "//third-party/rust/crates/serde_json/1.0.145:serde_json", diff --git a/project/slayerfs/Cargo.toml b/project/slayerfs/Cargo.toml index 1b8d5dee5..064be8b34 100644 --- a/project/slayerfs/Cargo.toml +++ b/project/slayerfs/Cargo.toml @@ -33,7 +33,7 @@ serde_yaml = { workspace = true } thiserror = { workspace = true } chrono = { workspace = true, features = ["serde"] } log = { workspace = true } -rfuse3 = { version = "0.0.5", features = ["tokio-runtime","unprivileged"]} +rfuse3 = { version = "0.0.5", features = ["file-lock", "tokio-runtime", "unprivileged"], path = "../rfuse3" } etcd-client = { workspace = true } clap = { workspace = true, features = ["derive"] } auto_impl = { workspace = true } diff --git a/project/slayerfs/src/fuse/mod.rs b/project/slayerfs/src/fuse/mod.rs index 9020048dc..17abf058b 100644 --- a/project/slayerfs/src/fuse/mod.rs +++ b/project/slayerfs/src/fuse/mod.rs @@ -1,21 +1,22 @@ //! FUSE adapter and request handling -//! This module provides the FUSE (Filesystem in Userspace) integration for SlayerFS. -//! It implements the adapter and request handling logic required to expose the virtual filesystem -//! to the operating system via the FUSE protocol. -//! -//! Main components: -//! - `adapter`: Contains the FUSE adapter implementation. -//! - `mount`: Handles mounting the virtual filesystem using FUSE. -//! - Implementation of the `Filesystem` trait for `VFS`, enabling translation of FUSE requests -//! into virtual filesystem operations. -//! - Helpers for attribute and file type conversion between VFS and FUSE representations. -//! -//! The module also includes platform-specific tests for mounting and basic operations, +//! This module provides the FUSE (Filesystem in Userspace) integration for SlayerFS. +//! It implements the adapter and request handling logic required to expose the virtual filesystem +//! to the operating system via the FUSE protocol. +//! +//! Main components: +//! - `adapter`: Contains the FUSE adapter implementation. +//! - `mount`: Handles mounting the virtual filesystem using FUSE. +//! - Implementation of the `Filesystem` trait for `VFS`, enabling translation of FUSE requests +//! into virtual filesystem operations. +//! - Helpers for attribute and file type conversion between VFS and FUSE representations. +//! +//! The module also includes platform-specific tests for mounting and basic operations, //! and provides utilities for mapping VFS metadata to FUSE attributes. pub mod adapter; pub mod mount; use crate::chuck::store::BlockStore; use crate::meta::MetaStore; +use crate::meta::file_lock::{FileLockQuery, FileLockRange, FileLockType}; use crate::meta::store::MetaError; use crate::vfs::fs::{FileAttr as VfsFileAttr, FileType as VfsFileType, VFS}; use bytes::Bytes; @@ -24,7 +25,7 @@ use rfuse3::Result as FuseResult; use rfuse3::raw::Request; use rfuse3::raw::reply::{ DirectoryEntry, DirectoryEntryPlus, ReplyAttr, ReplyCreated, ReplyData, ReplyDirectory, - ReplyDirectoryPlus, ReplyEntry, ReplyInit, ReplyOpen, ReplyStatFs, ReplyWrite, + ReplyDirectoryPlus, ReplyEntry, ReplyInit, ReplyLock, ReplyOpen, ReplyStatFs, ReplyWrite, }; use std::ffi::{OsStr, OsString}; use std::num::NonZeroU32; @@ -887,6 +888,95 @@ where Ok(()) } + // Test for a POSIX file lock + async fn getlk( + &self, + _req: Request, + inode: u64, + _fh: u64, + lock_owner: u64, + start: u64, + end: u64, + lock_type: u32, + _pid: u32, + ) -> FuseResult { + // Convert FUSE lock type to our internal type + let fl_type = match lock_type as i32 { + libc::F_RDLCK => FileLockType::Read, + libc::F_WRLCK => FileLockType::Write, + libc::F_UNLCK => FileLockType::UnLock, + _ => return Err(libc::EINVAL.into()), + }; + + let query = FileLockQuery { + owner: lock_owner as i64, + lock_type: fl_type, + range: FileLockRange { start, end }, + }; + + match self.get_plock_ino(inode as i64, &query).await { + Ok(info) => { + // Convert internal lock type back to FUSE type + let fuse_type = match info.lock_type { + FileLockType::Read => libc::F_RDLCK, + FileLockType::Write => libc::F_WRLCK, + FileLockType::UnLock => libc::F_UNLCK, + }; + Ok(ReplyLock { + r#type: fuse_type as u32, + start: info.range.start, + end: info.range.end, + pid: info.pid, + }) + } + Err(e) => Err(match e { + MetaError::NotFound(_) => libc::ENOENT, + MetaError::NotSupported(_) => libc::ENOSYS, + _ => libc::EIO, + } + .into()), + } + } + + // Acquire, modify or release a POSIX file lock + async fn setlk( + &self, + _req: Request, + inode: u64, + _fh: u64, + lock_owner: u64, + start: u64, + end: u64, + lock_type: u32, + pid: u32, + block: bool, + ) -> FuseResult<()> { + // Convert FUSE lock type to our internal type + let fl_type = match lock_type as i32 { + libc::F_RDLCK => FileLockType::Read, + libc::F_WRLCK => FileLockType::Write, + libc::F_UNLCK => FileLockType::UnLock, + _ => return Err(libc::EINVAL.into()), + }; + + let range = FileLockRange { start, end }; + + // Note: block parameter is ignored for now, non-blocking only + match self + .set_plock_ino(inode as i64, lock_owner as i64, block, fl_type, range, pid) + .await + { + Ok(()) => Ok(()), + Err(e) => Err(match e { + MetaError::NotFound(_) => libc::ENOENT, + MetaError::AlreadyExists { .. } => libc::EAGAIN, // Lock conflict + MetaError::NotSupported(_) => libc::ENOSYS, + _ => libc::EIO, + } + .into()), + } + } + // Forget (kernel reference drop); no inode ref tracking yet so no-op async fn forget(&self, _req: Request, _inode: u64, _nlookup: u64) {} diff --git a/project/slayerfs/src/meta/client.rs b/project/slayerfs/src/meta/client.rs index 9039c776b..5a479ca1b 100644 --- a/project/slayerfs/src/meta/client.rs +++ b/project/slayerfs/src/meta/client.rs @@ -5,12 +5,14 @@ pub mod session; use crate::chuck::SliceDesc; use crate::meta::config::{CacheCapacity, CacheTtl}; +use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use crate::meta::layer::MetaLayer; use crate::meta::store::{ DirEntry, FileAttr, MetaError, MetaStore, OpenFlags, SetAttrFlags, SetAttrRequest, StatFsSnapshot, }; use crate::meta::stores::{CacheInvalidationEvent, EtcdMetaStore, EtcdWatchWorker, WatchConfig}; +use uuid::Uuid; use crate::vfs::fs::FileType; use async_trait::async_trait; use dashmap::DashMap; @@ -286,6 +288,18 @@ impl MetaClient { self.session_manager.shutdown().await; } + /// Get the current session ID if a session is active. + #[allow(dead_code)] + pub async fn session_id(&self) -> Option { + *self.session_manager.session_id.read().await + } + + /// Get the current process ID. + #[allow(dead_code)] + pub fn process_id(&self) -> u32 { + std::process::id() + } + /// Finds and removes stale sessions using store-provided helpers. /// /// Returns the number of sessions successfully cleaned. Failures are @@ -1063,6 +1077,22 @@ impl MetaLayer for MetaClient { MetaClient::shutdown_session(self).await; Ok(()) } + + async fn get_plock(&self, inode: i64, query: &FileLockQuery) -> Result { + self.store.get_plock(inode, query).await + } + + async fn set_plock( + &self, + inode: i64, + owner: i64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), MetaError> { + self.store.set_plock(inode, owner, block, lock_type, range, pid).await + } } #[cfg(test)] diff --git a/project/slayerfs/src/meta/file_lock.rs b/project/slayerfs/src/meta/file_lock.rs index 3be58bf78..87afe0325 100644 --- a/project/slayerfs/src/meta/file_lock.rs +++ b/project/slayerfs/src/meta/file_lock.rs @@ -8,16 +8,16 @@ use uuid::Uuid; #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] #[repr(u32)] pub enum FileLockType { - ReadLock = libc::F_RDLCK as u32, - WriteLock = libc::F_WRLCK as u32, + Read = libc::F_RDLCK as u32, + Write = libc::F_WRLCK as u32, UnLock = libc::F_UNLCK as u32, } impl FileLockType { pub fn from_u32(value: u32) -> Option { match value { - x if x == Self::ReadLock as u32 => Some(Self::ReadLock), - x if x == Self::WriteLock as u32 => Some(Self::WriteLock), + x if x == Self::Read as u32 => Some(Self::Read), + x if x == Self::Write as u32 => Some(Self::Write), x if x == Self::UnLock as u32 => Some(Self::UnLock), _ => None, } @@ -31,8 +31,8 @@ impl FileLockType { impl std::convert::From for sea_orm::Value { fn from(value: FileLockType) -> Self { match value { - FileLockType::ReadLock => Value::Unsigned(Some(FileLockType::ReadLock as u32)), - FileLockType::WriteLock => Value::Unsigned(Some(FileLockType::WriteLock as u32)), + FileLockType::Read => Value::Unsigned(Some(FileLockType::Read as u32)), + FileLockType::Write => Value::Unsigned(Some(FileLockType::Write as u32)), FileLockType::UnLock => Value::Unsigned(Some(FileLockType::UnLock as u32)), } } @@ -80,24 +80,11 @@ pub struct PlockRecord { impl PlockRecord { pub fn new(lock_type: FileLockType, pid: u32, start: u64, end: u64) -> Self { - return Self { + Self { lock_type, pid, lock_range: FileLockRange { start, end }, - }; - } - - pub async fn is_conflict(&self, locks: Vec) -> bool { - for lock in locks { - if self.lock_range.overlaps(&lock.lock_range) { - match (self.lock_type, lock.lock_type) { - (FileLockType::ReadLock, FileLockType::ReadLock) => {} - _ => return true, - } - } } - - false } pub fn update_locks(mut ls: Vec, nl: PlockRecord) -> Vec { @@ -181,13 +168,12 @@ impl PlockRecord { let mut result: Vec = Vec::new(); for record in ls { - if let Some(last) = result.last_mut() { - if last.lock_type == record.lock_type - && last.lock_range.end == record.lock_range.start - { - last.lock_range.end = record.lock_range.end; - continue; - } + if let Some(last) = result.last_mut() + && last.lock_type == record.lock_type + && last.lock_range.end == record.lock_range.start + { + last.lock_range.end = record.lock_range.end; + continue; } result.push(record); } @@ -201,7 +187,7 @@ impl PlockRecord { ls: &Vec, ) -> bool { for l in ls { - if (*lock_type == FileLockType::WriteLock || l.lock_type == FileLockType::WriteLock) + if (*lock_type == FileLockType::Write || l.lock_type == FileLockType::Write) && range.end >= l.lock_range.start && range.start <= l.lock_range.end { @@ -209,7 +195,7 @@ impl PlockRecord { } } - return false; + false } pub fn get_plock( @@ -220,11 +206,10 @@ impl PlockRecord { ) -> Option { for lock in locks { if lock.lock_range.overlaps(&query.range) { - let conflict = match (lock.lock_type, query.lock_type) { - (FileLockType::ReadLock, FileLockType::ReadLock) => false, - _ => true, - }; - + let conflict = !matches!( + (lock.lock_type, query.lock_type), + (FileLockType::Read, FileLockType::Read) + ); if conflict { return Some(FileLockInfo { lock_type: lock.lock_type, @@ -234,7 +219,7 @@ impl PlockRecord { } } } - return None; + None } } @@ -245,10 +230,6 @@ pub struct FileLockRange { } impl FileLockRange { - pub fn new(start: u64, end: u64) -> Self { - Self { start, end } - } - pub fn overlaps(&self, other: &Self) -> bool { self.end >= other.start && self.start <= other.end } @@ -266,13 +247,3 @@ pub struct FileLockInfo { pub range: FileLockRange, pub pid: u32, } - -impl FileLockInfo { - pub fn unlocked() -> Self { - Self { - lock_type: FileLockType::UnLock, - range: FileLockRange::default(), - pid: 0, - } - } -} diff --git a/project/slayerfs/src/meta/layer.rs b/project/slayerfs/src/meta/layer.rs index d6e523d2c..3ac4662e3 100644 --- a/project/slayerfs/src/meta/layer.rs +++ b/project/slayerfs/src/meta/layer.rs @@ -2,6 +2,7 @@ use async_trait::async_trait; use crate::chuck::SliceDesc; use crate::meta::client::session::SessionInfo; +use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use crate::meta::store::{ DirEntry, FileAttr, FileType, MetaError, OpenFlags, SetAttrFlags, SetAttrRequest, StatFsSnapshot, @@ -80,4 +81,16 @@ pub trait MetaLayer: Send + Sync { // ---------- Session lifecycle ---------- async fn start_session(&self, session_info: SessionInfo) -> Result<(), MetaError>; async fn shutdown_session(&self) -> Result<(), MetaError>; + + // ---------- File lock operations ---------- + async fn get_plock(&self, inode: i64, query: &FileLockQuery) -> Result; + async fn set_plock( + &self, + inode: i64, + owner: i64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), MetaError>; } diff --git a/project/slayerfs/src/meta/stores/database_store.rs b/project/slayerfs/src/meta/stores/database_store.rs index 64f6472e5..6fa8a9662 100644 --- a/project/slayerfs/src/meta/stores/database_store.rs +++ b/project/slayerfs/src/meta/stores/database_store.rs @@ -301,18 +301,6 @@ impl DatabaseMetaStore { .map_err(MetaError::Database) } - /// Check file is existing - async fn file_is_existing(&self, inode: i64) -> Result { - let existing = FileMeta::find_by_id(inode) - .one(&self.db) - .await - .map_err(MetaError::Database)?; - match existing { - Some(_) => Ok(true), - None => Ok(false), - } - } - /// Create a new directory async fn create_directory(&self, parent_inode: i64, name: String) -> Result { // Start transaction @@ -585,16 +573,15 @@ impl DatabaseMetaStore { let records: Vec = serde_json::from_slice(&plock.records).unwrap_or_default(); - if records.len() == 0 { + if records.is_empty() { // No locks to unlock, transaction is complete txn.commit().await.map_err(MetaError::Database)?; return Ok(()); } - let new_records = - PlockRecord::update_locks(records.clone(), new_lock.clone()); + let new_records = PlockRecord::update_locks(records.clone(), *new_lock); - if new_records.len() == 0 { + if new_records.is_empty() { // No more locks for this (inode, sid, owner) combination, delete the record let delete_model = plock_meta::ActiveModel { inode: Set(plock.inode), @@ -641,7 +628,7 @@ impl DatabaseMetaStore { for item in ps { let key = PlockHashMapKey { sid: item.sid, - owner: item.owner.try_into().unwrap(), + owner: item.owner, }; locks.insert(key, item.records); } @@ -655,7 +642,7 @@ impl DatabaseMetaStore { continue; } - let ls: Vec = serde_json::from_slice(&d).unwrap_or_default(); + let ls: Vec = serde_json::from_slice(d).unwrap_or_default(); conflict_found = PlockRecord::check_confilct(&lock_type, &range, &ls); if conflict_found { break; @@ -666,14 +653,14 @@ impl DatabaseMetaStore { txn.rollback().await.map_err(MetaError::Database)?; return Err(MetaError::LockConflict { inode, - owner: owner.try_into().unwrap(), + owner, range, }); } let ls = serde_json::from_slice(locks.get(&lkey).unwrap_or(&vec![])).unwrap_or_default(); - let ls = PlockRecord::update_locks(ls, new_lock.clone()); + let ls = PlockRecord::update_locks(ls, *new_lock); let records = serde_json::to_vec(&ls).map_err(|e| { MetaError::Internal(format!("error to serialization Vec: {e}")) @@ -1766,7 +1753,7 @@ impl MetaStore for DatabaseMetaStore { // Query specific owner and session let row = PlockMeta::find() .filter(plock_meta::Column::Inode.eq(inode)) - .filter(plock_meta::Column::Owner.eq(query.owner as i64)) + .filter(plock_meta::Column::Owner.eq(query.owner)) .filter(plock_meta::Column::Sid.eq(*sid)) .one(&self.db) .await @@ -1775,9 +1762,8 @@ impl MetaStore for DatabaseMetaStore { if let Some(row) = row { let locks: Vec = serde_json::from_slice(&row.records).unwrap_or_default(); - match PlockRecord::get_plock(&locks, query, sid, &row.sid) { - Some(v) => return Ok(v), - None => {} + if let Some(v) = PlockRecord::get_plock(&locks, query, sid, &row.sid) { + return Ok(v); } } @@ -1802,19 +1788,13 @@ impl MetaStore for DatabaseMetaStore { loop { let result = self - .try_set_plock( - inode, - owner.try_into().unwrap(), - &new_lock, - lock_type, - range, - ) + .try_set_plock(inode, owner, &new_lock, lock_type, range) .await; match result { Ok(()) => return Ok(()), Err(MetaError::LockConflict { .. }) if block => { - if lock_type == FileLockType::WriteLock { + if lock_type == FileLockType::Write { tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; } else { tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; @@ -1878,19 +1858,9 @@ mod tests { store } - /// Create multiple test stores for testing multiple sessions - async fn create_test_stores(count: usize) -> Vec { - let mut stores = Vec::with_capacity(count); - for _ in 0..count { - stores.push(new_test_store().await); - } - stores - } - /// Helper struct to manage multiple test sessions struct TestSessionManager { stores: Vec, - session_ids: Vec, } use std::sync::LazyLock; @@ -1947,19 +1917,12 @@ mod tests { time::sleep(time::Duration::from_millis(5)).await; } - Self { - stores, - session_ids, - } + Self { stores } } fn get_store(&self, index: usize) -> &DatabaseMetaStore { &self.stores[index] } - - fn get_session_id(&self, index: usize) -> Uuid { - self.session_ids[index] - } } #[tokio::test] @@ -1984,7 +1947,7 @@ mod tests { file_ino, owner as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -1994,7 +1957,7 @@ mod tests { // Verify lock exists let query = FileLockQuery { owner: owner as i64, - lock_type: FileLockType::ReadLock, + lock_type: FileLockType::Read, range: FileLockRange { start: 0, end: 100 }, }; @@ -2024,7 +1987,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2038,7 +2001,7 @@ mod tests { file_ino, owner2 as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 5678, ) @@ -2048,13 +2011,13 @@ mod tests { // Verify both locks exist by querying each session let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::ReadLock, + lock_type: FileLockType::Read, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; @@ -2062,7 +2025,7 @@ mod tests { assert_eq!(lock_info1.lock_type, FileLockType::UnLock); let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); - assert_eq!(lock_info2.lock_type, FileLockType::ReadLock); + assert_eq!(lock_info2.lock_type, FileLockType::Read); assert_eq!(lock_info2.range.start, 0); assert_eq!(lock_info2.range.end, 100); assert_eq!(lock_info2.pid, 5678); @@ -2090,7 +2053,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2104,7 +2067,7 @@ mod tests { file_ino, owner2 as i64, false, // non-blocking - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 50, end: 150, @@ -2150,7 +2113,7 @@ mod tests { file_ino, owner, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2160,12 +2123,12 @@ mod tests { // Verify lock exists let query = FileLockQuery { owner: owner as i64, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let lock_info = store.get_plock(file_ino, &query).await.unwrap(); - assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.lock_type, FileLockType::Write); // Release lock store @@ -2207,7 +2170,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2221,7 +2184,7 @@ mod tests { file_ino, owner2 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 200, end: 300, @@ -2234,13 +2197,13 @@ mod tests { // Verify both locks exist let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 200, end: 300, @@ -2248,13 +2211,13 @@ mod tests { }; let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); - assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.lock_type, FileLockType::Write); assert_eq!(lock_info1.range.start, 0); assert_eq!(lock_info1.range.end, 100); assert_eq!(lock_info1.pid, 1234); let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); - assert_eq!(lock_info2.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info2.lock_type, FileLockType::Write); assert_eq!(lock_info2.range.start, 200); assert_eq!(lock_info2.range.end, 300); assert_eq!(lock_info2.pid, 5678); @@ -2285,7 +2248,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1111, ) @@ -2301,7 +2264,7 @@ mod tests { file_ino, owner2 as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 200, end: 300, @@ -2320,7 +2283,7 @@ mod tests { file_ino, owner3 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 50, end: 150, @@ -2340,13 +2303,13 @@ mod tests { // Verify successful locks exist let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::ReadLock, + lock_type: FileLockType::Read, range: FileLockRange { start: 200, end: 300, @@ -2357,7 +2320,7 @@ mod tests { { let store1 = session_mgr.get_store(0); let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); - assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.lock_type, FileLockType::Write); } { @@ -2388,7 +2351,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 1000, @@ -2405,7 +2368,7 @@ mod tests { file_ino, 2002, // different owner false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 500, end: 600, @@ -2443,7 +2406,7 @@ mod tests { file_ino, 2002, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 500, end: 600, @@ -2456,7 +2419,7 @@ mod tests { // Verify the lock exists let query = FileLockQuery { owner: 2002, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 500, end: 600, @@ -2464,7 +2427,7 @@ mod tests { }; let lock_info = store2.get_plock(file_ino, &query).await.unwrap(); - assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.lock_type, FileLockType::Write); assert_eq!(lock_info.pid, 5555); } } diff --git a/project/slayerfs/src/meta/stores/etcd_store.rs b/project/slayerfs/src/meta/stores/etcd_store.rs index e28a6722f..49dc5e329 100644 --- a/project/slayerfs/src/meta/stores/etcd_store.rs +++ b/project/slayerfs/src/meta/stores/etcd_store.rs @@ -841,9 +841,7 @@ impl EtcdMetaStore { let entry_info: Option = self.etcd_get_json(&key).await?; match entry_info { - Some(entry) => { - return Ok(entry.is_file); - } + Some(entry) => Ok(entry.is_file), None => Ok(false), } } @@ -883,7 +881,7 @@ impl EtcdMetaStore { } // Update locks with new unlock request - let new_records = PlockRecord::update_locks(records, new_lock.clone()); + let new_records = PlockRecord::update_locks(records, *new_lock); if new_records.is_empty() { // Remove this plock entry if no records after update @@ -933,7 +931,7 @@ impl EtcdMetaStore { if conflict_found { return Err(MetaError::LockConflict { inode, - owner: owner.try_into().unwrap(), + owner, range, }); } @@ -942,7 +940,7 @@ impl EtcdMetaStore { let ls = locks.get(&lkey).cloned().unwrap_or_default(); // Update locks with new request - let ls = PlockRecord::update_locks(ls, new_lock.clone()); + let ls = PlockRecord::update_locks(ls, *new_lock); // Check if we need to update the record if locks.get(&lkey).map(|r| r != &ls).unwrap_or(true) { @@ -955,7 +953,7 @@ impl EtcdMetaStore { } else { let new_plock = EtcdPlock { sid: *sid, - owner: owner.try_into().unwrap(), + owner, records: ls, }; plocks.push(new_plock); @@ -966,11 +964,11 @@ impl EtcdMetaStore { }, || { // No existing locks, create new one - let ls = PlockRecord::update_locks(vec![], new_lock.clone()); + let ls = PlockRecord::update_locks(vec![], *new_lock); let new_plock = EtcdPlock { sid: *sid, - owner: owner.try_into().unwrap(), + owner, records: ls, }; @@ -2061,9 +2059,8 @@ impl MetaStore for EtcdMetaStore { for plock in plocks { let locks = &plock.records; - match PlockRecord::get_plock(locks, query, sid, &plock.sid) { - Some(v) => return Ok(v), - None => {} + if let Some(v) = PlockRecord::get_plock(locks, query, sid, &plock.sid) { + return Ok(v); } } @@ -2088,19 +2085,13 @@ impl MetaStore for EtcdMetaStore { loop { let result = self - .try_set_plock( - inode, - owner.try_into().unwrap(), - &new_lock, - lock_type, - range, - ) + .try_set_plock(inode, owner, &new_lock, lock_type, range) .await; match result { Ok(()) => return Ok(()), Err(MetaError::LockConflict { .. }) if block => { - if lock_type == FileLockType::WriteLock { + if lock_type == FileLockType::Write { tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; } else { tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; @@ -2168,19 +2159,9 @@ mod tests { store } - /// Create multiple test stores for testing multiple sessions - async fn create_test_stores(count: usize) -> Vec { - let mut stores = Vec::with_capacity(count); - for _ in 0..count { - stores.push(new_test_store().await); - } - stores - } - /// Helper struct to manage multiple test sessions struct TestSessionManager { stores: Vec, - session_ids: Vec, } use std::sync::LazyLock; @@ -2234,19 +2215,12 @@ mod tests { time::sleep(time::Duration::from_millis(5)).await; } - Self { - stores, - session_ids, - } + Self { stores } } fn get_store(&self, index: usize) -> &EtcdMetaStore { &self.stores[index] } - - fn get_session_id(&self, index: usize) -> Uuid { - self.session_ids[index] - } } #[tokio::test] @@ -2271,7 +2245,7 @@ mod tests { file_ino, owner, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2280,8 +2254,8 @@ mod tests { // Verify lock exists let query = FileLockQuery { - owner: owner, - lock_type: FileLockType::ReadLock, + owner, + lock_type: FileLockType::Read, range: FileLockRange { start: 0, end: 100 }, }; @@ -2311,7 +2285,7 @@ mod tests { file_ino, owner1, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2325,7 +2299,7 @@ mod tests { file_ino, owner2, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 5678, ) @@ -2335,18 +2309,18 @@ mod tests { // Verify both locks exist by querying each session let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::ReadLock, + lock_type: FileLockType::Read, range: FileLockRange { start: 0, end: 100 }, }; let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); - assert_eq!(lock_info1.lock_type, FileLockType::ReadLock); + assert_eq!(lock_info1.lock_type, FileLockType::Read); assert_eq!(lock_info1.range.start, 0); assert_eq!(lock_info1.range.end, 100); assert_eq!(lock_info1.pid, 1234); @@ -2377,7 +2351,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2391,7 +2365,7 @@ mod tests { file_ino, owner2 as i64, false, // non-blocking - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 50, end: 150, @@ -2437,7 +2411,7 @@ mod tests { file_ino, owner, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2447,12 +2421,12 @@ mod tests { // Verify lock exists let query = FileLockQuery { owner: owner, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let lock_info = store.get_plock(file_ino, &query).await.unwrap(); - assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.lock_type, FileLockType::Write); // Release lock store @@ -2494,7 +2468,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -2508,7 +2482,7 @@ mod tests { file_ino, owner2 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 200, end: 300, @@ -2521,13 +2495,13 @@ mod tests { // Verify both locks exist let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 200, end: 300, @@ -2535,13 +2509,13 @@ mod tests { }; let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); - assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.lock_type, FileLockType::Write); assert_eq!(lock_info1.range.start, 0); assert_eq!(lock_info1.range.end, 100); assert_eq!(lock_info1.pid, 1234); let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); - assert_eq!(lock_info2.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info2.lock_type, FileLockType::Write); assert_eq!(lock_info2.range.start, 200); assert_eq!(lock_info2.range.end, 300); assert_eq!(lock_info2.pid, 5678); @@ -2572,7 +2546,7 @@ mod tests { file_ino, owner1, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1111, ) @@ -2588,7 +2562,7 @@ mod tests { file_ino, owner2 as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 200, end: 300, @@ -2607,7 +2581,7 @@ mod tests { file_ino, owner3 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 50, end: 150, @@ -2627,13 +2601,13 @@ mod tests { // Verify successful locks exist let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::ReadLock, + lock_type: FileLockType::Read, range: FileLockRange { start: 200, end: 300, @@ -2644,7 +2618,7 @@ mod tests { { let store1 = session_mgr.get_store(0); let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); - assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.lock_type, FileLockType::Write); } { @@ -2675,7 +2649,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 1000, @@ -2692,7 +2666,7 @@ mod tests { file_ino, 2002, // different owner false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 500, end: 600, @@ -2730,7 +2704,7 @@ mod tests { file_ino, 2002, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 500, end: 600, @@ -2743,7 +2717,7 @@ mod tests { // Verify the lock exists let query = FileLockQuery { owner: 2002, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 500, end: 600, @@ -2751,7 +2725,7 @@ mod tests { }; let lock_info = store2.get_plock(file_ino, &query).await.unwrap(); - assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.lock_type, FileLockType::Write); assert_eq!(lock_info.pid, 5555); } } diff --git a/project/slayerfs/src/meta/stores/redis_store.rs b/project/slayerfs/src/meta/stores/redis_store.rs index f56d965bd..7b55550a4 100644 --- a/project/slayerfs/src/meta/stores/redis_store.rs +++ b/project/slayerfs/src/meta/stores/redis_store.rs @@ -40,13 +40,6 @@ const PLOCK_PREFIX: &str = "plock"; const LOCKS_KEY: &str = "locks"; const CHUNK_ID_BASE: u64 = 1_000_000_000u64; -#[derive(Debug, Clone, Serialize, Deserialize)] -struct RedisPlockEntry { - sid: Uuid, - owner: i64, - records: Vec, -} - /// Minimal Redis-backed meta store. pub struct RedisMetaStore { conn: ConnectionManager, @@ -322,7 +315,7 @@ impl RedisMetaStore { .sid .get() .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; - let field = self.plock_field(&sid, owner); + let field = self.plock_field(sid, owner); // Check if file exists if self.get_node(inode).await?.is_none() { @@ -399,13 +392,13 @@ impl RedisMetaStore { if conflict_found { return Err(MetaError::LockConflict { inode, - owner: owner.try_into().unwrap(), + owner, range, }); } // Update locks - let new_records = PlockRecord::update_locks(current_records, new_lock.clone()); + let new_records = PlockRecord::update_locks(current_records, new_lock); let new_json = serde_json::to_string(&new_records) .map_err(|e| MetaError::Internal(format!("Serialization error: {e}")))?; @@ -795,7 +788,7 @@ impl MetaStore for RedisMetaStore { let records_json: Result = conn.hget(&plock_key, ¤t_field).await; if let Ok(records_json) = records_json { let records: Vec = serde_json::from_str(&records_json).unwrap_or_default(); - if let Some(v) = PlockRecord::get_plock(&records, query, &sid, &sid) { + if let Some(v) = PlockRecord::get_plock(&records, query, sid, sid) { return Ok(v); } } @@ -823,9 +816,8 @@ impl MetaStore for RedisMetaStore { let records_json: String = conn.hget(&plock_key, &field).await.map_err(redis_err)?; let records: Vec = serde_json::from_str(&records_json).unwrap_or_default(); - match PlockRecord::get_plock(&records, query, &sid, &lock_sid) { - Some(v) => return Ok(v), - None => {} + if let Some(v) = PlockRecord::get_plock(&records, query, sid, &lock_sid) { + return Ok(v); } } @@ -850,13 +842,13 @@ impl MetaStore for RedisMetaStore { loop { let result = self - .try_set_plock(inode, owner.try_into().unwrap(), new_lock, lock_type, range) + .try_set_plock(inode, owner, new_lock, lock_type, range) .await; match result { Ok(()) => return Ok(()), Err(MetaError::LockConflict { .. }) if block => { - if lock_type == FileLockType::WriteLock { + if lock_type == FileLockType::Write { tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; } else { tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; @@ -1045,19 +1037,9 @@ mod tests { store } - /// Create multiple test stores for testing multiple sessions - async fn create_test_stores(count: usize) -> Vec { - let mut stores = Vec::with_capacity(count); - for _ in 0..count { - stores.push(new_test_store().await); - } - stores - } - /// Helper struct to manage multiple test sessions struct TestSessionManager { stores: Vec, - session_ids: Vec, } use std::sync::LazyLock; @@ -1111,19 +1093,12 @@ mod tests { time::sleep(time::Duration::from_millis(5)).await; } - Self { - stores, - session_ids, - } + Self { stores } } fn get_store(&self, index: usize) -> &RedisMetaStore { &self.stores[index] } - - fn get_session_id(&self, index: usize) -> Uuid { - self.session_ids[index] - } } #[tokio::test] @@ -1148,7 +1123,7 @@ mod tests { file_ino, owner, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -1158,7 +1133,7 @@ mod tests { // Verify lock exists let query = FileLockQuery { owner: owner, - lock_type: FileLockType::ReadLock, + lock_type: FileLockType::Read, range: FileLockRange { start: 0, end: 100 }, }; @@ -1188,7 +1163,7 @@ mod tests { file_ino, owner1, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -1202,7 +1177,7 @@ mod tests { file_ino, owner2, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 5678, ) @@ -1212,18 +1187,18 @@ mod tests { // Verify both locks exist by querying each session let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::ReadLock, + lock_type: FileLockType::Read, range: FileLockRange { start: 0, end: 100 }, }; let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); - assert_eq!(lock_info1.lock_type, FileLockType::ReadLock); + assert_eq!(lock_info1.lock_type, FileLockType::Read); assert_eq!(lock_info1.range.start, 0); assert_eq!(lock_info1.range.end, 100); assert_eq!(lock_info1.pid, 1234); @@ -1254,7 +1229,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -1268,7 +1243,7 @@ mod tests { file_ino, owner2 as i64, false, // non-blocking - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 50, end: 150, @@ -1314,7 +1289,7 @@ mod tests { file_ino, owner, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -1324,12 +1299,12 @@ mod tests { // Verify lock exists let query = FileLockQuery { owner: owner, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let lock_info = store.get_plock(file_ino, &query).await.unwrap(); - assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.lock_type, FileLockType::Write); // Release lock store @@ -1371,7 +1346,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1234, ) @@ -1385,7 +1360,7 @@ mod tests { file_ino, owner2 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 200, end: 300, @@ -1398,13 +1373,13 @@ mod tests { // Verify both locks exist let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 200, end: 300, @@ -1412,13 +1387,13 @@ mod tests { }; let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); - assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.lock_type, FileLockType::Write); assert_eq!(lock_info1.range.start, 0); assert_eq!(lock_info1.range.end, 100); assert_eq!(lock_info1.pid, 1234); let lock_info2 = store2.get_plock(file_ino, &query2).await.unwrap(); - assert_eq!(lock_info2.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info2.lock_type, FileLockType::Write); assert_eq!(lock_info2.range.start, 200); assert_eq!(lock_info2.range.end, 300); assert_eq!(lock_info2.pid, 5678); @@ -1449,7 +1424,7 @@ mod tests { file_ino, owner1, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 100 }, 1111, ) @@ -1465,7 +1440,7 @@ mod tests { file_ino, owner2 as i64, false, - FileLockType::ReadLock, + FileLockType::Read, FileLockRange { start: 200, end: 300, @@ -1484,7 +1459,7 @@ mod tests { file_ino, owner3 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 50, end: 150, @@ -1504,13 +1479,13 @@ mod tests { // Verify successful locks exist let query1 = FileLockQuery { owner: owner1, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 0, end: 100 }, }; let query2 = FileLockQuery { owner: owner2, - lock_type: FileLockType::ReadLock, + lock_type: FileLockType::Read, range: FileLockRange { start: 200, end: 300, @@ -1521,7 +1496,7 @@ mod tests { { let store1 = session_mgr.get_store(0); let lock_info1 = store1.get_plock(file_ino, &query1).await.unwrap(); - assert_eq!(lock_info1.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info1.lock_type, FileLockType::Write); } { @@ -1552,7 +1527,7 @@ mod tests { file_ino, owner1 as i64, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 0, end: 1000, @@ -1569,7 +1544,7 @@ mod tests { file_ino, 2002, // different owner false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 500, end: 600, @@ -1607,7 +1582,7 @@ mod tests { file_ino, 2002, false, - FileLockType::WriteLock, + FileLockType::Write, FileLockRange { start: 500, end: 600, @@ -1620,7 +1595,7 @@ mod tests { // Verify the lock exists let query = FileLockQuery { owner: 2002, - lock_type: FileLockType::WriteLock, + lock_type: FileLockType::Write, range: FileLockRange { start: 500, end: 600, @@ -1628,7 +1603,7 @@ mod tests { }; let lock_info = store2.get_plock(file_ino, &query).await.unwrap(); - assert_eq!(lock_info.lock_type, FileLockType::WriteLock); + assert_eq!(lock_info.lock_type, FileLockType::Write); assert_eq!(lock_info.pid, 5555); } } diff --git a/project/slayerfs/src/vfs/fs.rs b/project/slayerfs/src/vfs/fs.rs index 9fbb6ecb9..650314f8a 100644 --- a/project/slayerfs/src/vfs/fs.rs +++ b/project/slayerfs/src/vfs/fs.rs @@ -6,6 +6,7 @@ use crate::chuck::store::BlockStore; use crate::chuck::writer::ChunkWriter; use crate::meta::client::{MetaClient, MetaClientOptions}; use crate::meta::config::{CacheCapacity, CacheTtl}; +use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use crate::meta::store::MetaError; use crate::meta::{MetaLayer, MetaStore}; use dashmap::{DashMap, Entry}; @@ -1072,6 +1073,64 @@ where self.state.modified.cleanup_older_than(ttl).await; } + /// Get file lock information for a given inode and query. + pub async fn get_plock_ino( + &self, + inode: i64, + query: &FileLockQuery, + ) -> Result { + self.core.meta_layer.get_plock(inode, query).await + } + + /// Set file lock for a given inode. + pub async fn set_plock_ino( + &self, + inode: i64, + owner: i64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), MetaError> { + self.core.meta_layer.set_plock(inode, owner, block, lock_type, range, pid).await + } + + /// Get file lock information by path. + pub async fn get_plock( + &self, + path: &str, + query: &FileLockQuery, + ) -> Result { + let path = Self::norm_path(path); + let (inode, _) = self.core.meta_layer.lookup_path(&path) + .await + .map_err(|e| e.to_string())? + .ok_or_else(|| "not found".to_string())?; + self.core.meta_layer.get_plock(inode, query) + .await + .map_err(|e| e.to_string()) + } + + /// Set file lock by path. + pub async fn set_plock( + &self, + path: &str, + owner: i64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), String> { + let path = Self::norm_path(path); + let (inode, _) = self.core.meta_layer.lookup_path(&path) + .await + .map_err(|e| e.to_string())? + .ok_or_else(|| "not found".to_string())?; + self.core.meta_layer.set_plock(inode, owner, block, lock_type, range, pid) + .await + .map_err(|e| e.to_string()) + } + async fn ensure_inode_registered(&self, ino: i64) -> Result, String> { // fast path to check whether there is an existing inode. if let Some(inode) = self.state.files.inode(ino) { diff --git a/project/slayerfs/src/vfs/sdk.rs b/project/slayerfs/src/vfs/sdk.rs index 1870b961c..a99f7e099 100644 --- a/project/slayerfs/src/vfs/sdk.rs +++ b/project/slayerfs/src/vfs/sdk.rs @@ -7,6 +7,7 @@ use crate::chuck::chunk::ChunkLayout; use crate::chuck::store::BlockStore; +use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use crate::meta::MetaStore; use crate::meta::factory::create_meta_store_from_url; use crate::meta::store::MetaError; @@ -92,6 +93,28 @@ impl Client { pub async fn truncate(&self, path: &str, size: u64) -> Result<(), String> { self.fs.truncate(path, size).await } + + /// Get file lock information for a given path and query. + pub async fn get_plock( + &self, + path: &str, + query: &FileLockQuery, + ) -> Result { + self.fs.get_plock(path, query).await + } + + /// Set file lock for a given path. + pub async fn set_plock( + &self, + path: &str, + owner: i64, + block: bool, + lock_type: FileLockType, + range: FileLockRange, + pid: u32, + ) -> Result<(), String> { + self.fs.set_plock(path, owner, block, lock_type, range, pid).await + } } // ============== Convenience builder (LocalFs backend) ============== diff --git a/third-party/rust/crates/rfuse3/0.0.5/BUCK b/third-party/rust/crates/rfuse3/0.0.5/BUCK deleted file mode 100644 index f6e4b03c3..000000000 --- a/third-party/rust/crates/rfuse3/0.0.5/BUCK +++ /dev/null @@ -1,50 +0,0 @@ -# @generated by `cargo buckal` - -load("@buckal//:cargo_manifest.bzl", "cargo_manifest") -load("@buckal//:wrapper.bzl", "buildscript_run", "rust_binary", "rust_library") - -http_archive( - name = "rfuse3-vendor", - urls = ["https://static.crates.io/crates/rfuse3/rfuse3-0.0.5.crate"], - sha256 = "848f037ea6f58bf93045c366e6099146b2b227c06646c584b64ed7d4a089ad71", - type = "tar.gz", - strip_prefix = "rfuse3-0.0.5", - out = "vendor", -) - -cargo_manifest( - name = "rfuse3-manifest", - vendor = ":rfuse3-vendor", -) - -rust_library( - name = "rfuse3", - srcs = [":rfuse3-vendor"], - crate = "rfuse3", - crate_root = "vendor/src/lib.rs", - edition = "2021", - features = [ - "default", - "tokio-runtime", - "unprivileged", - ], - rustc_flags = ["@$(location :rfuse3-manifest[env_flags])"], - visibility = ["PUBLIC"], - deps = [ - "//third-party/rust/crates/aligned_box/0.3.0:aligned_box", - "//third-party/rust/crates/async-notify/0.3.0:async-notify", - "//third-party/rust/crates/async-trait/0.1.89:async-trait", - "//third-party/rust/crates/bincode/1.3.3:bincode", - "//third-party/rust/crates/bytes/1.10.1:bytes", - "//third-party/rust/crates/futures-channel/0.3.31:futures-channel", - "//third-party/rust/crates/futures-util/0.3.31:futures-util", - "//third-party/rust/crates/libc/0.2.178:libc", - "//third-party/rust/crates/nix/0.29.0:nix", - "//third-party/rust/crates/serde/1.0.228:serde", - "//third-party/rust/crates/slab/0.4.11:slab", - "//third-party/rust/crates/tokio/1.48.0:tokio", - "//third-party/rust/crates/tracing/0.1.43:tracing", - "//third-party/rust/crates/trait-make/0.1.0:trait-make", - "//third-party/rust/crates/which/4.4.2:which", - ], -) From 5b4ab724e85a18e31fe3dbd58ba4012897743a4b Mon Sep 17 00:00:00 2001 From: zine yu Date: Wed, 17 Dec 2025 20:25:18 +0800 Subject: [PATCH 6/7] fix(slayerfs): correct typos and grammar Signed-off-by: zine yu --- buckal.snap | 4 ---- project/slayerfs/src/meta/file_lock.rs | 10 +++++----- project/slayerfs/src/meta/stores/database_store.rs | 4 ++-- project/slayerfs/src/meta/stores/etcd_store.rs | 2 +- project/slayerfs/src/meta/stores/redis_store.rs | 6 +++--- project/slayerfs/tests/scripts/xfstests_slayer.sh | 4 ++-- 6 files changed, 13 insertions(+), 17 deletions(-) diff --git a/buckal.snap b/buckal.snap index 3a0e9940c..eb2c4180e 100644 --- a/buckal.snap +++ b/buckal.snap @@ -531,11 +531,7 @@ version = 1 "registry+https://github.com/rust-lang/crates.io-index#reqwest@0.12.25" = "970e511e0ddc75b68c9cecac67bae0ff1ece7ba7a2c16615f0e9357404c2a3b5" "registry+https://github.com/rust-lang/crates.io-index#resolv-conf@0.7.6" = "dcd6a2030b4795ed2fd231d0a8b67ebac2d869e654dcdcaf5c51937d562a01be" "registry+https://github.com/rust-lang/crates.io-index#rfc6979@0.3.1" = "9cef3b799f4f4ca18b4dfa4fe0a57a42b26cabaa248ab94b3b161d788ee371ef" -<<<<<<< HEAD -"registry+https://github.com/rust-lang/crates.io-index#rfuse3@0.0.3" = "191c7be4bfd58164500f08333d767b8d82c196b1024a8a34dd7926771f0352b6" -======= "registry+https://github.com/rust-lang/crates.io-index#rfuse3@0.0.5" = "4c0f1a76d3e126fc18c0d217672b145507286b6549a83eb55968a4bb0672e005" ->>>>>>> main "registry+https://github.com/rust-lang/crates.io-index#rgb@0.8.52" = "8bdf9be2788b697d8404ffdbfa7abe1d7aec9454c0f296aa7cf4d8faf50109e6" "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14" = "c00a1ddcf818020e5271eafeee4039edcc680a66fcf00501f83f28f253a7381f" "registry+https://github.com/rust-lang/crates.io-index#rkyv@0.7.45" = "696526e2a115738bd5270d4fc1fceddf70699a2c77bb1a5f03355cdfb6bdc2a6" diff --git a/project/slayerfs/src/meta/file_lock.rs b/project/slayerfs/src/meta/file_lock.rs index 87afe0325..e5f67347e 100644 --- a/project/slayerfs/src/meta/file_lock.rs +++ b/project/slayerfs/src/meta/file_lock.rs @@ -45,7 +45,7 @@ impl sea_orm::TryGetable for FileLockType { ) -> Result { let val: u32 = res.try_get_by(index)?; FileLockType::from_u32(val).ok_or(TryGetError::DbErr(sea_orm::DbErr::Type( - "Failed to deserialize FIleLockType".to_string(), + "Failed to deserialize FileLockType".to_string(), ))) } } @@ -181,15 +181,15 @@ impl PlockRecord { result } - pub fn check_confilct( + pub fn check_conflict( lock_type: &FileLockType, range: &FileLockRange, ls: &Vec, ) -> bool { for l in ls { if (*lock_type == FileLockType::Write || l.lock_type == FileLockType::Write) - && range.end >= l.lock_range.start - && range.start <= l.lock_range.end + && range.end > l.lock_range.start + && range.start < l.lock_range.end { return true; } @@ -231,7 +231,7 @@ pub struct FileLockRange { impl FileLockRange { pub fn overlaps(&self, other: &Self) -> bool { - self.end >= other.start && self.start <= other.end + self.end > other.start && self.start < other.end } } #[derive(Debug, Clone, Copy)] diff --git a/project/slayerfs/src/meta/stores/database_store.rs b/project/slayerfs/src/meta/stores/database_store.rs index a3a0427ed..785bc5686 100644 --- a/project/slayerfs/src/meta/stores/database_store.rs +++ b/project/slayerfs/src/meta/stores/database_store.rs @@ -677,7 +677,7 @@ impl DatabaseMetaStore { } let ls: Vec = serde_json::from_slice(d).unwrap_or_default(); - conflict_found = PlockRecord::check_confilct(&lock_type, &range, &ls); + conflict_found = PlockRecord::check_conflict(&lock_type, &range, &ls); if conflict_found { break; } @@ -1924,7 +1924,7 @@ mod tests { fn shared_db_config() -> Config { Config { database: DatabaseConfig { - db_config: DatabaseType::Sqlite { + db_config: DatabaseType::Postgres { url: "postgres://slayerfs:slayerfs@127.0.0.1:5432/database".to_string(), }, }, diff --git a/project/slayerfs/src/meta/stores/etcd_store.rs b/project/slayerfs/src/meta/stores/etcd_store.rs index 616e98784..445d9cc66 100644 --- a/project/slayerfs/src/meta/stores/etcd_store.rs +++ b/project/slayerfs/src/meta/stores/etcd_store.rs @@ -955,7 +955,7 @@ impl EtcdMetaStore { } let ls: Vec = records_vec.clone(); // EtcdPlock already stores Vec - conflict_found = PlockRecord::check_confilct(&lock_type, &range, &ls); + conflict_found = PlockRecord::check_conflict(&lock_type, &range, &ls); if conflict_found { break; } diff --git a/project/slayerfs/src/meta/stores/redis_store.rs b/project/slayerfs/src/meta/stores/redis_store.rs index 9c957ae79..ce75bd341 100644 --- a/project/slayerfs/src/meta/stores/redis_store.rs +++ b/project/slayerfs/src/meta/stores/redis_store.rs @@ -355,7 +355,7 @@ impl RedisMetaStore { let sid = self .sid .get() - .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; + .ok_or_else(|| MetaError::Internal("sid not set".to_string()))?; let field = self.plock_field(sid, owner); // Check if file exists @@ -424,7 +424,7 @@ impl RedisMetaStore { serde_json::from_str(&other_records_json).unwrap_or_default(); conflict_found = - PlockRecord::check_confilct(&lock_type, &range, &other_records); + PlockRecord::check_conflict(&lock_type, &range, &other_records); if conflict_found { break; } @@ -1057,7 +1057,7 @@ impl MetaStore for RedisMetaStore { fn set_sid(&self, sid: Uuid) -> Result<(), MetaError> { self.sid .set(sid) - .map_err(|_| MetaError::Internal("sid has been seted".to_string())) + .map_err(|_| MetaError::Internal("sid already been set".to_string())) } } diff --git a/project/slayerfs/tests/scripts/xfstests_slayer.sh b/project/slayerfs/tests/scripts/xfstests_slayer.sh index fe3c45700..aa6c6b943 100755 --- a/project/slayerfs/tests/scripts/xfstests_slayer.sh +++ b/project/slayerfs/tests/scripts/xfstests_slayer.sh @@ -66,7 +66,7 @@ CONFIG_PATH="$redis_config" LOG_FILE="$log_file" PERSISTENCE_BIN="$persistence_bin" -BACKEND_DIR="$backend_dir" +BACKEND_DIR="$backend_dir" MOUNT_DIR="$mount_dir" @@ -84,4 +84,4 @@ sudo cp "$current_dir/xfstests_slayer.exclude" /tmp/xfstests-dev/ # run tests. cd /tmp/xfstests-dev -sudo LC_ALL=C ./check -fuse -E xfstests_slayer.exclude \ No newline at end of file +sudo LC_ALL=C ./check -fuse -E xfstests_slayer.exclude From b166372ea9a2d9d00eacd728b21a2c15701f53cb Mon Sep 17 00:00:00 2001 From: zine yu Date: Thu, 18 Dec 2025 17:01:28 +0800 Subject: [PATCH 7/7] fix(slayerfs): Fixed the behavior of database store's get_plock Signed-off-by: zine yu --- project/slayerfs/src/fuse/mod.rs | 2 +- project/slayerfs/src/meta/client.rs | 12 +++++++++--- project/slayerfs/src/meta/layer.rs | 3 ++- project/slayerfs/src/meta/stores/database_store.rs | 9 +++------ project/slayerfs/src/vfs/sdk.rs | 6 ++++-- 5 files changed, 19 insertions(+), 13 deletions(-) diff --git a/project/slayerfs/src/fuse/mod.rs b/project/slayerfs/src/fuse/mod.rs index 38b63f465..86c1450ee 100644 --- a/project/slayerfs/src/fuse/mod.rs +++ b/project/slayerfs/src/fuse/mod.rs @@ -1120,7 +1120,7 @@ where Ok(()) => Ok(()), Err(e) => Err(match e { MetaError::NotFound(_) => libc::ENOENT, - MetaError::AlreadyExists { .. } => libc::EAGAIN, // Lock conflict + MetaError::LockConflict { .. } => libc::EAGAIN, // Lock conflict MetaError::NotSupported(_) => libc::ENOSYS, _ => libc::EIO, } diff --git a/project/slayerfs/src/meta/client.rs b/project/slayerfs/src/meta/client.rs index d60fd1330..0827b2cb3 100644 --- a/project/slayerfs/src/meta/client.rs +++ b/project/slayerfs/src/meta/client.rs @@ -12,7 +12,6 @@ use crate::meta::store::{ StatFsSnapshot, }; use crate::meta::stores::{CacheInvalidationEvent, EtcdMetaStore, EtcdWatchWorker, WatchConfig}; -use uuid::Uuid; use crate::vfs::fs::FileType; use async_trait::async_trait; use dashmap::DashMap; @@ -24,6 +23,7 @@ use std::time::Duration; use std::{collections::HashSet, process}; use tokio::sync::{Mutex, mpsc}; use tracing::{info, warn}; +use uuid::Uuid; use crate::vfs::extract_ino_and_chunk_index; use cache::InodeCache; @@ -1087,7 +1087,11 @@ impl MetaLayer for MetaClient { Ok(()) } - async fn get_plock(&self, inode: i64, query: &FileLockQuery) -> Result { + async fn get_plock( + &self, + inode: i64, + query: &FileLockQuery, + ) -> Result { self.store.get_plock(inode, query).await } @@ -1100,7 +1104,9 @@ impl MetaLayer for MetaClient { range: FileLockRange, pid: u32, ) -> Result<(), MetaError> { - self.store.set_plock(inode, owner, block, lock_type, range, pid).await + self.store + .set_plock(inode, owner, block, lock_type, range, pid) + .await } } diff --git a/project/slayerfs/src/meta/layer.rs b/project/slayerfs/src/meta/layer.rs index 3ac4662e3..5ed1b34a7 100644 --- a/project/slayerfs/src/meta/layer.rs +++ b/project/slayerfs/src/meta/layer.rs @@ -83,7 +83,8 @@ pub trait MetaLayer: Send + Sync { async fn shutdown_session(&self) -> Result<(), MetaError>; // ---------- File lock operations ---------- - async fn get_plock(&self, inode: i64, query: &FileLockQuery) -> Result; + async fn get_plock(&self, inode: i64, query: &FileLockQuery) + -> Result; async fn set_plock( &self, inode: i64, diff --git a/project/slayerfs/src/meta/stores/database_store.rs b/project/slayerfs/src/meta/stores/database_store.rs index 785bc5686..fc1702938 100644 --- a/project/slayerfs/src/meta/stores/database_store.rs +++ b/project/slayerfs/src/meta/stores/database_store.rs @@ -1838,16 +1838,13 @@ impl MetaStore for DatabaseMetaStore { .get() .ok_or_else(|| MetaError::Internal("sid not seted".to_string()))?; - // Query specific owner and session - let row = PlockMeta::find() + let rows = PlockMeta::find() .filter(plock_meta::Column::Inode.eq(inode)) - .filter(plock_meta::Column::Owner.eq(query.owner)) - .filter(plock_meta::Column::Sid.eq(*sid)) - .one(&self.db) + .all(&self.db) .await .map_err(MetaError::Database)?; - if let Some(row) = row { + for row in rows { let locks: Vec = serde_json::from_slice(&row.records).unwrap_or_default(); if let Some(v) = PlockRecord::get_plock(&locks, query, sid, &row.sid) { diff --git a/project/slayerfs/src/vfs/sdk.rs b/project/slayerfs/src/vfs/sdk.rs index a99f7e099..373fb8257 100644 --- a/project/slayerfs/src/vfs/sdk.rs +++ b/project/slayerfs/src/vfs/sdk.rs @@ -7,9 +7,9 @@ use crate::chuck::chunk::ChunkLayout; use crate::chuck::store::BlockStore; -use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use crate::meta::MetaStore; use crate::meta::factory::create_meta_store_from_url; +use crate::meta::file_lock::{FileLockInfo, FileLockQuery, FileLockRange, FileLockType}; use crate::meta::store::MetaError; use crate::vfs::fs::{DirEntry, FileAttr, VFS}; use std::path::Path; @@ -113,7 +113,9 @@ impl Client { range: FileLockRange, pid: u32, ) -> Result<(), String> { - self.fs.set_plock(path, owner, block, lock_type, range, pid).await + self.fs + .set_plock(path, owner, block, lock_type, range, pid) + .await } }