diff --git a/.github/workflows/build_tests.yaml b/.github/workflows/build_tests.yaml index 10fb9bf..b3c1016 100644 --- a/.github/workflows/build_tests.yaml +++ b/.github/workflows/build_tests.yaml @@ -19,5 +19,11 @@ jobs: - name: Install Protoc uses: arduino/setup-protoc@v1 + - name: Test default + run: cargo test + + - name: Build Master-Node + run: cargo test --features master-node + - name: Build default run: cargo build --all-features \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 2d8ed6d..aae083b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,4 +8,5 @@ members = [ "my-no-sql-sdk", "my-no-sql-core", "my-no-sql-tests", + "my-no-sql-server-core", ] diff --git a/MY_NO_SQL_ENTITY_DESIGN_PATTERNS.md b/MY_NO_SQL_ENTITY_DESIGN_PATTERNS.md new file mode 100644 index 0000000..ed0c221 --- /dev/null +++ b/MY_NO_SQL_ENTITY_DESIGN_PATTERNS.md @@ -0,0 +1,278 @@ +## MyNoSql Entity Design Patterns + +### Purpose +- Provide consistent guidance for defining MyNoSql entities and enum-based models used by readers/writers. +- Align with MyNoSql server concepts of `PartitionKey`, `RowKey`, `TimeStamp`, and optional `Expires`. +- Serve as an instruction set for AI agents so generated code follows established patterns without guesswork. + +### Core principles +- Entities always carry partition, row, and timestamp metadata; these identify rows per the server model. +- Use `my-no-sql-macros` to autogenerate required fields and trait impls instead of hand-writing boilerplate. +- Serde names `PartitionKey`, `RowKey`, `TimeStamp`, and `Expires` are reserved; avoid collisions or custom renames that reuse them. +- Table rows are identified by `(PartitionKey, RowKey)`; partitions group related rows; `TimeStamp` tracks server-side update time. +- Writers use HTTP CRUD; readers subscribe and keep local copies—design entities to be compact and stable. + +### Table name validation (my-no-sql-server) +- Length: 3–63 characters. +- Allowed characters: lowercase latin letters `a`-`z`, digits `0`-`9`, and dash `-`. +- No consecutive dashes (e.g., `my--table-name` is invalid). +- Do not start or end with a dash; prefer starting with a letter (e.g., `-my-table-name` and `my-table-name-` are invalid). +- Keep names lowercase and stable; changing table names is a breaking change for both readers and writers. +- Reference: [Table Name Validation](https://github.com/MyJetTools/my-no-sql-server/wiki/Table-Name-Validation). + +### Where entities live and how they flow +- Entities are stored in `my-no-sql-server`. +- Reader API: `MyNoSqlDataReader` subscribes to the table and keeps the latest snapshot locally in the application for fast reads. +- Writer API: `MyNoSqlDataWriter` is used by writers to push inserts/updates/deletes to the server. +- Design entities so they are stable over time; breaking schema changes can break both readers (subscriptions) and writers (HTTP). + +### Pattern 1: Table = Model +- Use `#[my_no_sql_entity("table-name")]` on a struct that represents a single table’s rows. +- The macro injects: + - `partition_key: String`, `row_key: String`, `time_stamp: Timestamp` with proper Serde casing. + - `MyNoSqlEntity` and `MyNoSqlEntitySerializer` impls (serialize/deserialize ready for readers/writers). +- Optional `with_expires = true` adds `expires: Timestamp` with Serde name `Expires` for TTL semantics. +- Derive serde and clone/debug as needed; apply `#[serde(rename_all = "...")]` for payload fields to keep consistent casing. +- Example: + ```rust + #[my_no_sql_macros::my_no_sql_entity("bidask-snapshots")] + #[derive(Serialize, Deserialize, Debug, Clone)] + #[serde(rename_all = "PascalCase")] + pub struct BidAskSnapshot { + pub unix_timestamp_with_milis: u64, + pub bid: f64, + pub ask: f64, + pub base: String, + pub quote: String, + } + ``` +- Example with TTL: + ```rust + #[my_no_sql_macros::my_no_sql_entity("sessions", with_expires = true)] + #[derive(Serialize, Deserialize, Clone)] + pub struct SessionEntity { + pub user_id: String, + pub token: String, + pub device: String, + // `expires` is auto-added by the macro because with_expires = true + } + ``` + +### Pattern 2: PartitionKey + RowKey = Model (enum) +- Use when each `(PartitionKey, RowKey)` pair maps to a distinct model type. +- Declare an enum with `#[enum_of_my_no_sql_entity(table_name:"", generate_unwraps)]`. +- For each variant model: + - Annotate with `#[enum_model(partition_key:"...", row_key:"...")]`. + - Derive serde and other traits required by your service. +- The macros generate: + - Correct partition/row keys per variant. + - Serialization helpers plus `unwrap_caseX` accessors when `generate_unwraps` is set. +- Example: + ```rust + #[enum_of_my_no_sql_entity(table_name:"Test", generate_unwraps)] + pub enum MyNoSqlEnumEntityTest { + Case1(Struct1), + Case2(Struct2), + } + + #[enum_model(partition_key:"pk1", row_key:"rk1")] + #[derive(Serialize, Deserialize, Clone)] + pub struct Struct1 { + pub field1: String, + pub field2: i32, + } + + #[enum_model(partition_key:"pk2", row_key:"rk2")] + #[derive(Serialize, Deserialize, Clone)] + pub struct Struct2 { + pub field3: String, + pub field4: i32, + } + ``` +- More elaborate enum example (mixed casing and TTL in one variant): + ```rust + #[enum_of_my_no_sql_entity(table_name:"notifications", generate_unwraps)] + pub enum NotificationEntity { + #[enum_model(partition_key:"email", row_key:"welcome")] + EmailWelcome(EmailWelcomeModel), + #[enum_model(partition_key:"push", row_key:"security")] + PushSecurity(PushSecurityModel), + } + + #[derive(Serialize, Deserialize, Clone)] + #[serde(rename_all = "PascalCase")] + pub struct EmailWelcomeModel { + pub subject: String, + pub body: String, + } + + #[derive(Serialize, Deserialize, Clone)] + #[serde(rename_all = "camelCase")] + pub struct PushSecurityModel { + pub title: String, + pub message: String, + pub severity: String, + } + ``` + +### Field and serde rules +- Do not rename payload fields to reserved serde names (`PartitionKey`, `RowKey`, `TimeStamp`, `Expires`); the macro enforces uniqueness. +- When adding an `expires` field manually, use type `Timestamp`; otherwise enable `with_expires`. +- Keep `LAZY_DESERIALIZATION` as the default (`false`) unless the macro adds support for a specialized flow. +- Prefer `#[serde(rename_all = "...")]` instead of renaming individual fields when possible. +- Avoid floats for keys; keep keys ASCII-safe and stable (no trailing slashes or whitespace). + +### Testing guidance +- Validate serialization/deserialization paths using the examples under `my-no-sql-tests/src/macros_tests`. +- For enums, assert `unwrap_caseX` helpers and key selection per variant. +- Add unit tests per model that: + - Serialize then deserialize and compare fields. + - For enums, confirm correct variant after round-trip and that unwrap helpers work. + - Validate `expires` presence when `with_expires = true`. + +### When to choose each pattern +- Use **Table = Model** when all rows in a table share the same schema. +- Use **PartitionKey + RowKey = Model (enum)** when a table hosts heterogeneous payloads selected by keys. + +### Key design guidance +- PartitionKey: + - Group data that is frequently read together. + - Keep length modest; avoid unbounded cardinality when possible. +- RowKey: + - Unique within a partition. + - Prefer stable identifiers (ids, symbols, timestamps encoded consistently). +- TimeStamp: + - Auto-managed; used by server for last-write tracking and sync ordering. +- Expires (TTL): + - Use for session-like or cache-like data; choose UTC epoch milliseconds. + +### Best practices: constant keys +- When PartitionKey and/or RowKey are fixed for a table, expose them as `const` on the entity and initialize defaults with those constants. This keeps writers/readers aligned and avoids typos. +- Example: + ```rust + use serde::*; + + service_sdk::macros::use_my_no_sql_entity!(); + + #[my_no_sql_entity("atr-settings")] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct AtrSettingsEntity { + pub percent: f64, + pub candles_count: i32, + } + + impl AtrSettingsEntity { + pub const PARTITION_KEY: &'static str = "*"; + pub const ROW_KEY: &'static str = "*"; + } + + impl Default for AtrSettingsEntity { + fn default() -> Self { + Self { + partition_key: Self::PARTITION_KEY.to_string(), + row_key: Self::ROW_KEY.to_string(), + time_stamp: Default::default(), + percent: 0.8, + candles_count: 365, + } + } + } + ``` + +### Best practices: meaningful keys as helpers +- When keys encode business meaning, provide helper functions to generate and to read them back, to avoid duplicating string literals across the codebase. +- Example: + ```rust + use serde::*; + use trading_robot_common::CandleType; + + service_sdk::macros::use_my_no_sql_entity!(); + + // PartitionKey: instrument_id + // RowKey: interval ("1m", "5m", "1h", "1d", "1M") + #[my_no_sql_entity("atr-values")] + #[derive(Serialize, Deserialize, Debug, Clone)] + pub struct AtrValueMyNoSqlEntity { + pub value: f64, + } + + impl AtrValueMyNoSqlEntity { + pub fn generate_partition_key(instrument_id: &str) -> &str { + instrument_id + } + + pub fn generate_row_key(candle_type: CandleType) -> &'static str { + candle_type.as_str() + } + + pub fn get_instrument(&self) -> &str { + &self.partition_key + } + + pub fn get_candle_type(&self) -> CandleType { + CandleType::from_str(&self.row_key) + } + } + ``` + +### Macro options cheat sheet +- `#[my_no_sql_entity("table", with_expires = true)]` → adds `expires`. +- `#[enum_of_my_no_sql_entity(table_name:"table", generate_unwraps)]` → generates unwrap helpers per variant. +- `#[enum_model(partition_key:"...", row_key:"...")]` → binds a variant model to fixed keys. +- The macros auto-implement: + - `MyNoSqlEntity` with `TABLE_NAME`, `LAZY_DESERIALIZATION = false`, `get_partition_key`, `get_row_key`, `get_time_stamp`. + - `MyNoSqlEntitySerializer` with standard binary serialize/deserialize. + +### Minimal working template (table = model) +```rust +use my_no_sql_macros::my_no_sql_entity; +use serde::{Deserialize, Serialize}; + +#[my_no_sql_entity("table-name")] +#[derive(Serialize, Deserialize, Clone)] +pub struct MyEntity { + pub value: String, + pub count: i32, +} +``` + +### Minimal working template (enum model) +```rust +use my_no_sql_macros::{enum_model, enum_of_my_no_sql_entity}; +use serde::{Deserialize, Serialize}; + +#[enum_of_my_no_sql_entity(table_name:"events", generate_unwraps)] +pub enum EventEntity { + Create(CreateEvent), + Delete(DeleteEvent), +} + +#[enum_model(partition_key:"pk-create", row_key:"rk-create")] +#[derive(Serialize, Deserialize, Clone)] +pub struct CreateEvent { + pub id: String, + pub payload: String, +} + +#[enum_model(partition_key:"pk-delete", row_key:"rk-delete")] +#[derive(Serialize, Deserialize, Clone)] +pub struct DeleteEvent { + pub id: String, + pub reason: String, +} +``` + +### AI generation checklist +- Choose the pattern (single model vs enum). +- Set `table_name` accurately and keep it stable. +- Apply `#[serde(rename_all = "...")]` for payload fields. +- Never introduce fields named `PartitionKey`, `RowKey`, `TimeStamp`, `Expires` manually unless following macro requirements. +- For TTL needs, prefer `with_expires = true` over a custom `expires` unless special handling is required. +- Derive `Serialize`, `Deserialize`, and `Clone`; add `Debug` when useful for logs/tests. +- Provide unit tests that round-trip entities and assert variant unwraps. + +### Anti-patterns to avoid +- Manually defining partition/row/timestamp fields when using the macros (causes duplication or serde conflicts). +- Reusing the same `(partition_key, row_key)` for multiple variants in enum models. +- Using dynamic keys in enum models (keys must be compile-time constants in attributes). +- Introducing serde renames that clash with reserved casing tokens. diff --git a/my-no-sql-abstractions/Cargo.toml b/my-no-sql-abstractions/Cargo.toml index cdd11a0..c6319a2 100644 --- a/my-no-sql-abstractions/Cargo.toml +++ b/my-no-sql-abstractions/Cargo.toml @@ -1,8 +1,11 @@ [package] name = "my-no-sql-abstractions" -version = "0.3.0" +version = "0.4.1" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +rust-extensions = { tag = "0.1.5", git = "https://github.com/MyJetTools/rust-extensions.git" } +serde = { version = "*", features = ["derive"] } +serde_json = "*" diff --git a/my-no-sql-abstractions/src/lib.rs b/my-no-sql-abstractions/src/lib.rs index 72dce9b..2618c19 100644 --- a/my-no-sql-abstractions/src/lib.rs +++ b/my-no-sql-abstractions/src/lib.rs @@ -2,3 +2,5 @@ mod data_sync_period; mod my_no_sql_entity; pub use data_sync_period::DataSynchronizationPeriod; pub use my_no_sql_entity::*; +mod timestamp_type; +pub use timestamp_type::*; diff --git a/my-no-sql-abstractions/src/my_no_sql_entity.rs b/my-no-sql-abstractions/src/my_no_sql_entity.rs index ac4906c..98b886a 100644 --- a/my-no-sql-abstractions/src/my_no_sql_entity.rs +++ b/my-no-sql-abstractions/src/my_no_sql_entity.rs @@ -1,13 +1,16 @@ +use crate::Timestamp; + pub trait MyNoSqlEntity { const TABLE_NAME: &'static str; + const LAZY_DESERIALIZATION: bool; fn get_partition_key(&self) -> &str; fn get_row_key(&self) -> &str; - fn get_time_stamp(&self) -> i64; + fn get_time_stamp(&self) -> Timestamp; } -pub trait MyNoSqlEntitySerializer { +pub trait MyNoSqlEntitySerializer: Sized { fn serialize_entity(&self) -> Vec; - fn deserialize_entity(src: &[u8]) -> Self; + fn deserialize_entity(src: &[u8]) -> Result; } pub trait GetMyNoSqlEntity { diff --git a/my-no-sql-abstractions/src/timestamp_type.rs b/my-no-sql-abstractions/src/timestamp_type.rs new file mode 100644 index 0000000..c0a5e56 --- /dev/null +++ b/my-no-sql-abstractions/src/timestamp_type.rs @@ -0,0 +1,191 @@ +use std::fmt::{Debug, Display}; + +use rust_extensions::date_time::DateTimeAsMicroseconds; +use serde::{Deserialize, Deserializer}; + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct Timestamp(i64); + +impl Timestamp { + pub fn to_date_time(&self) -> DateTimeAsMicroseconds { + DateTimeAsMicroseconds::new(self.0) + } + pub fn is_default(&self) -> bool { + self.0 == 0 + } + + pub fn to_i64(&self) -> i64 { + self.0 + } +} + +impl Into for DateTimeAsMicroseconds { + fn into(self) -> Timestamp { + Timestamp(self.unix_microseconds) + } +} + +impl Into for Timestamp { + fn into(self) -> DateTimeAsMicroseconds { + DateTimeAsMicroseconds::new(self.0) + } +} + +impl Display for Timestamp { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.0 == 0 { + return f.write_str("null"); + } + + let timestamp = self.to_date_time().to_rfc3339(); + f.write_str(timestamp.as_str()) + } +} + +impl Debug for Timestamp { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.0 == 0 { + f.debug_tuple("Timestamp").field(&"null").finish() + } else { + let timestamp = self.to_date_time().to_rfc3339(); + f.debug_tuple("Timestamp").field(×tamp).finish() + } + } +} + +impl serde::Serialize for Timestamp { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + if self.0 == 0 { + return serializer.serialize_none(); + } + let rfc3339 = self.to_date_time().to_rfc3339(); + + match rfc3339.find("+") { + Some(index) => serializer.serialize_str(&rfc3339[..index]), + None => serializer.serialize_str(&rfc3339), + } + } +} + +impl<'de> Deserialize<'de> for Timestamp { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer); + + if s.is_err() { + return Ok(Timestamp(0)); + } + + let s = s.unwrap(); + + let datetime = DateTimeAsMicroseconds::from_str(s.as_str()); + + if datetime.is_none() { + println!("Can not parse timestamp: {}", s); + } + + Ok(Timestamp(datetime.unwrap().unix_microseconds)) + } +} + +impl Default for Timestamp { + fn default() -> Self { + Self(0) + } +} + +impl Into for i64 { + fn into(self) -> Timestamp { + Timestamp(self) + } +} + +impl Into for u64 { + fn into(self) -> Timestamp { + Timestamp(self as i64) + } +} + +pub fn skip_timestamp_serializing(timestamp: &Timestamp) -> bool { + timestamp.is_default() +} + +#[cfg(test)] +mod test { + use rust_extensions::date_time::{DateTimeAsMicroseconds, DateTimeStruct}; + use serde::{Deserialize, Serialize}; + + use super::Timestamp; + + #[derive(Debug, Serialize, Deserialize)] + pub struct MyType { + pub my_field: i32, + #[serde(skip_serializing_if = "super::skip_timestamp_serializing")] + pub timestamp: Timestamp, + } + + #[test] + fn test_serialization() { + use rust_extensions::date_time::DateTimeAsMicroseconds; + + let my_type = MyType { + my_field: 15, + timestamp: DateTimeAsMicroseconds::from_str("2025-01-01T12:00:00.123456") + .unwrap() + .into(), + }; + + println!("{:?}", my_type); + + let serialized = serde_json::to_string(&my_type).unwrap(); + + println!("Serialized: {}", serialized); + + let result_type: MyType = serde_json::from_str(serialized.as_str()).unwrap(); + + assert_eq!(my_type.my_field, result_type.my_field); + assert_eq!(my_type.timestamp.0, result_type.timestamp.0); + } + + #[test] + fn test_serialization_none() { + use rust_extensions::date_time::DateTimeAsMicroseconds; + + let my_type = MyType { + my_field: 15, + timestamp: DateTimeAsMicroseconds::new(0).into(), + }; + + println!("{:?}", my_type); + + let serialized = serde_json::to_string(&my_type).unwrap(); + + println!("Serialized: {}", serialized); + + let result_type: MyType = serde_json::from_str(serialized.as_str()).unwrap(); + + assert_eq!(my_type.my_field, result_type.my_field); + assert_eq!(my_type.timestamp.0, result_type.timestamp.0); + } + + #[test] + fn test_from_real_example() { + let time_stamp = DateTimeAsMicroseconds::from_str("2024-11-29T14:59:15.6145").unwrap(); + + let dt_struct: DateTimeStruct = time_stamp.into(); + + assert_eq!(dt_struct.year, 2024); + assert_eq!(dt_struct.month, 11); + assert_eq!(dt_struct.day, 29); + + assert_eq!(dt_struct.time.hour, 14); + assert_eq!(dt_struct.time.min, 59); + assert_eq!(dt_struct.time.sec, 15); + assert_eq!(dt_struct.time.micros, 614500); + } +} diff --git a/my-no-sql-core/Cargo.toml b/my-no-sql-core/Cargo.toml index d2c6e90..b2edce4 100644 --- a/my-no-sql-core/Cargo.toml +++ b/my-no-sql-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "my-no-sql-core" -version = "0.3.0" +version = "0.4.1" edition = "2021" [features] @@ -13,8 +13,8 @@ debug_db_row = [] [dependencies] my-no-sql-abstractions = { path = "../my-no-sql-abstractions" } -my-json = { tag = "0.2.2", git = "https://github.com/MyJetTools/my-json.git" } -rust-extensions = { tag = "0.1.4", git = "https://github.com/MyJetTools/rust-extensions.git" } +my-json = { tag = "0.3.2", git = "https://github.com/MyJetTools/my-json.git" } +rust-extensions = { tag = "0.1.5", git = "https://github.com/MyJetTools/rust-extensions.git" } tokio = { version = "*", features = ["full"] } serde_json = { version = "*" } serde = { version = "*", features = ["derive"] } diff --git a/my-no-sql-core/src/db/db_partition/db_partition.rs b/my-no-sql-core/src/db/db_partition/db_partition.rs index 98077f8..32952b8 100644 --- a/my-no-sql-core/src/db/db_partition/db_partition.rs +++ b/my-no-sql-core/src/db/db_partition/db_partition.rs @@ -1,4 +1,4 @@ -use my_json::json_writer::JsonObject; +use my_json::json_writer::JsonValueWriter; #[cfg(feature = "master-node")] use rust_extensions::date_time::AtomicDateTimeAsMicroseconds; @@ -132,11 +132,11 @@ impl DbPartition { result.get_result() } - pub fn get_all_rows<'s>(&'s self) -> std::slice::Iter> { + pub fn get_all_rows<'s>(&'s self) -> std::slice::Iter<'s, Arc> { self.rows.get_all() } - pub fn get_all_rows_cloned<'s>(&'s self) -> Vec> { + pub fn get_all_rows_cloned(&self) -> Vec> { self.rows.get_all().map(|itm| itm.clone()).collect() } @@ -200,18 +200,19 @@ impl DbPartition { } } -impl JsonObject for &'_ DbPartition { - fn write_into(&self, dest: &mut Vec) { +impl JsonValueWriter for &'_ DbPartition { + const IS_ARRAY: bool = false; + fn write(&self, dest: &mut String) { let mut first_element = true; - dest.push(b'['); + dest.push('['); for db_row in self.rows.get_all() { if first_element { first_element = false; } else { - dest.push(b','); + dest.push(','); } - db_row.as_ref().write_into(dest) + db_row.as_ref().write(dest) } - dest.push(b']'); + dest.push(']'); } } diff --git a/my-no-sql-core/src/db/db_partition/db_rows_container.rs b/my-no-sql-core/src/db/db_partition/db_rows_container.rs index 30673d4..09fdb81 100644 --- a/my-no-sql-core/src/db/db_partition/db_rows_container.rs +++ b/my-no-sql-core/src/db/db_partition/db_rows_container.rs @@ -69,13 +69,17 @@ impl DbRowsContainer { pub fn insert(&mut self, db_row: Arc) -> Option> { #[cfg(feature = "master-node")] - self.rows_with_expiration_index.add(&db_row); + let added = self.rows_with_expiration_index.add(&db_row); let (_, removed_db_row) = self.data.insert_or_replace(db_row); #[cfg(feature = "master-node")] - if let Some(removed_db_row) = &removed_db_row { - self.rows_with_expiration_index.remove(removed_db_row); + if let Some(added) = added { + if added { + if let Some(removed_db_row) = &removed_db_row { + self.rows_with_expiration_index.remove(removed_db_row); + } + } } removed_db_row @@ -100,7 +104,7 @@ impl DbRowsContainer { return self.data.contains(row_key); } - pub fn get_all<'s>(&'s self) -> std::slice::Iter> { + pub fn get_all<'s>(&'s self) -> std::slice::Iter<'s, Arc> { self.data.iter() } @@ -152,7 +156,7 @@ fn are_expires_the_same( #[cfg(feature = "master-node")] #[cfg(test)] -mod tests { +mod expiration_tests { use rust_extensions::date_time::DateTimeAsMicroseconds; @@ -165,34 +169,36 @@ mod tests { let test_json = r#"{ "PartitionKey": "test", "RowKey": "test", - "Expires": "2019-01-01T00:00:00", + "Expires": "2019-01-01T00:00:00" }"#; let time_stamp = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &time_stamp).unwrap(); + let db_row = + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &time_stamp).unwrap(); let mut db_rows = DbRowsContainer::new(); db_rows.insert(Arc::new(db_row)); - assert_eq!(1, db_rows.rows_with_expiration_index.len()) + db_rows.rows_with_expiration_index.assert_len(1); } #[test] fn test_that_index_does_not_appear_since_we_do_not_have_expiration() { let test_json = r#"{ "PartitionKey": "test", - "RowKey": "test", + "RowKey": "test" }"#; let time_stamp = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &time_stamp).unwrap(); + let db_row = + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &time_stamp).unwrap(); let mut db_rows = DbRowsContainer::new(); db_rows.insert(Arc::new(db_row)); - assert_eq!(0, db_rows.rows_with_expiration_index.len()) + db_rows.rows_with_expiration_index.assert_len(0); } #[test] @@ -203,7 +209,8 @@ mod tests { "Expires": "2019-01-01T00:00:00" }"#; let time_stamp = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &time_stamp).unwrap(); + let db_row = + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &time_stamp).unwrap(); let mut db_rows = DbRowsContainer::new(); @@ -211,7 +218,7 @@ mod tests { db_rows.remove("test"); - assert_eq!(0, db_rows.rows_with_expiration_index.len()) + db_rows.rows_with_expiration_index.assert_len(0); } #[test] @@ -223,13 +230,14 @@ mod tests { let time_stamp = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &time_stamp).unwrap(); + let db_row = + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &time_stamp).unwrap(); let mut db_rows = DbRowsContainer::new(); db_rows.insert(Arc::new(db_row)); - assert_eq!(0, db_rows.rows_with_expiration_index.len()); + db_rows.rows_with_expiration_index.assert_len(0); let new_expiration_time = DateTimeAsMicroseconds::new(2); @@ -241,7 +249,7 @@ mod tests { .rows_with_expiration_index .has_data_with_expiration_moment(DateTimeAsMicroseconds::new(2)) ); - assert_eq!(1, db_rows.rows_with_expiration_index.len()); + db_rows.rows_with_expiration_index.assert_len(1); } #[test] @@ -249,11 +257,12 @@ mod tests { let test_json = r#"{ "PartitionKey": "test", "RowKey": "test", - "Expires": "2019-01-01T00:00:00", + "Expires": "2019-01-01T00:00:00" }"#; let time_stamp = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &time_stamp).unwrap(); + let db_row = + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &time_stamp).unwrap(); let mut db_rows = DbRowsContainer::new(); @@ -267,7 +276,7 @@ mod tests { .rows_with_expiration_index .has_data_with_expiration_moment(current_expiration) ); - assert_eq!(1, db_rows.rows_with_expiration_index.len()); + db_rows.rows_with_expiration_index.assert_len(1); db_rows.update_expiration_time("test", Some(DateTimeAsMicroseconds::new(2))); @@ -277,7 +286,8 @@ mod tests { .rows_with_expiration_index .has_data_with_expiration_moment(DateTimeAsMicroseconds::new(2)) ); - assert_eq!(1, db_rows.rows_with_expiration_index.len()); + + db_rows.rows_with_expiration_index.assert_len(1); } #[test] @@ -287,11 +297,11 @@ mod tests { let test_json = r#"{ "PartitionKey": "test", "RowKey": "test", - "Expires": "2019-01-01T00:00:00", + "Expires": "2019-01-01T00:00:00" }"#; let now = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &now).unwrap(); + let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &now).unwrap(); db_rows.insert(Arc::new(db_row)); @@ -316,12 +326,12 @@ mod tests { let test_json = r#"{ "PartitionKey": "test", "RowKey": "test", - "Expires": "2019-01-01T00:00:00", + "Expires": "2019-01-01T00:00:00" }"#; let now = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &now).unwrap(); + let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &now).unwrap(); db_rows.insert(Arc::new(db_row)); @@ -342,11 +352,11 @@ mod tests { let test_json = r#"{ "PartitionKey": "test", "RowKey": "test", - "Expires": "2019-01-01T00:00:00", + "Expires": "2019-01-01T00:00:00" }"#; let now = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &now).unwrap(); + let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &now).unwrap(); db_rows.insert(Arc::new(db_row)); @@ -367,12 +377,14 @@ mod tests { let json = r#"{ "PartitionKey": "test", - "RowKey": "test1", + "RowKey": "test1" }"#; - let db_row = - DbJsonEntity::parse_into_db_row(json.as_bytes(), &JsonTimeStamp::from_date_time(now)) - .unwrap(); + let db_row = DbJsonEntity::parse_into_db_row( + json.as_bytes().into(), + &JsonTimeStamp::from_date_time(now), + ) + .unwrap(); db_rows.insert(Arc::new(db_row)); @@ -382,11 +394,11 @@ mod tests { let raw_json = r#"{ "PartitionKey": "test", - "RowKey": "test2", + "RowKey": "test2" }"#; let db_row = DbJsonEntity::parse_into_db_row( - raw_json.as_bytes(), + raw_json.as_bytes().into(), &JsonTimeStamp::from_date_time(now), ) .unwrap(); @@ -399,11 +411,11 @@ mod tests { let json_db_row = r#"{ "PartitionKey": "test", - "RowKey": "test3", + "RowKey": "test3" }"#; let db_row = DbJsonEntity::parse_into_db_row( - json_db_row.as_bytes(), + json_db_row.as_bytes().into(), &JsonTimeStamp::from_date_time(now), ) .unwrap(); @@ -416,11 +428,11 @@ mod tests { let raw_json = r#"{ "PartitionKey": "test", - "RowKey": "test4", + "RowKey": "test4" }"#; let db_row = DbJsonEntity::parse_into_db_row( - raw_json.as_bytes(), + raw_json.as_bytes().into(), &JsonTimeStamp::from_date_time(now), ) .unwrap(); @@ -431,4 +443,48 @@ mod tests { assert_eq!("test1", db_rows_to_gc.get(0).unwrap().get_row_key()); } + + #[test] + fn check_we_update_row_with_the_same_expiration_date() { + let mut db_rows = DbRowsContainer::new(); + + let row = r#"{"Count":1,"PartitionKey":"in-progress-count1","RowKey":"my-id","Expires":"2025-03-12T10:55:46.0507979Z"}"#; + let now = JsonTimeStamp::now(); + let db_json_entity = DbJsonEntity::parse(row.as_bytes(), &now).unwrap(); + let db_row = Arc::new(db_json_entity.into_db_row().unwrap()); + db_rows.insert(db_row); + db_rows.rows_with_expiration_index.assert_len(1); + + let row = r#"{"Count":1,"PartitionKey":"in-progress-count1","RowKey":"my-id","Expires":"2025-03-12T10:55:46.0507979Z"}"#; + let now = JsonTimeStamp::now(); + let db_json_entity = DbJsonEntity::parse(row.as_bytes(), &now).unwrap(); + let db_row = Arc::new(db_json_entity.into_db_row().unwrap()); + db_rows.insert(db_row); + db_rows.rows_with_expiration_index.assert_len(1); + + db_rows.remove("my-id"); + } + + #[test] + fn check_we_update_same_row_with_new_expiration_date() { + let mut db_rows = DbRowsContainer::new(); + + let row = r#"{"Count":1,"PartitionKey":"in-progress-count1","RowKey":"my-id","Expires":"2025-03-12T10:55:48.0507979Z"}"#; + let now = JsonTimeStamp::now(); + let db_json_entity = DbJsonEntity::parse(row.as_bytes(), &now).unwrap(); + let db_row = Arc::new(db_json_entity.into_db_row().unwrap()); + db_rows.insert(db_row); + db_rows.rows_with_expiration_index.assert_len(1); + + let row = r#"{"Count":1,"PartitionKey":"in-progress-count1","RowKey":"my-id","Expires":"2025-03-12T10:55:50.0507979Z"}"#; + let now = JsonTimeStamp::now(); + let db_json_entity = DbJsonEntity::parse(row.as_bytes(), &now).unwrap(); + let db_row = Arc::new(db_json_entity.into_db_row().unwrap()); + db_rows.insert(db_row); + db_rows.rows_with_expiration_index.assert_len(1); + + db_rows.remove("my-id"); + + db_rows.rows_with_expiration_index.assert_len(0); + } } diff --git a/my-no-sql-core/src/db/db_row/db_row.rs b/my-no-sql-core/src/db/db_row/db_row.rs index 65a1862..d4572ee 100644 --- a/my-no-sql-core/src/db/db_row/db_row.rs +++ b/my-no-sql-core/src/db/db_row/db_row.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use my_json::json_writer::JsonObject; +use my_json::json_writer::JsonValueWriter; #[cfg(feature = "master-node")] use rust_extensions::date_time::AtomicDateTimeAsMicroseconds; #[cfg(feature = "master-node")] @@ -114,7 +114,7 @@ impl DbRow { } } #[cfg(feature = "master-node")] - pub fn write_json(&self, out: &mut Vec) { + pub fn write_json(&self, out: &mut String) { let expires_value = self.get_expires(); if expires_value.is_none() { @@ -122,23 +122,39 @@ impl DbRow { if let Some(before_separator) = find_json_separator_before(&self.raw, expires.key.start - 1) { - out.extend_from_slice(&self.raw[..before_separator]); - out.extend_from_slice(&self.raw[expires.value.end..]); + unsafe { + out.push_str(std::str::from_utf8_unchecked(&self.raw[..before_separator])); + out.push_str(std::str::from_utf8_unchecked( + &self.raw[expires.value.end..], + )); + } return; } if let Some(after_separator) = find_json_separator_after(&self.raw, expires.value.end) { - out.extend_from_slice(&self.raw[..expires.key.start]); - out.extend_from_slice(&self.raw[after_separator..]); + unsafe { + out.push_str(std::str::from_utf8_unchecked( + &self.raw[..expires.key.start], + )); + out.push_str(std::str::from_utf8_unchecked(&self.raw[after_separator..])); + } return; } - out.extend_from_slice(&self.raw[..expires.key.start]); - out.extend_from_slice(&self.raw[expires.value.end..]); + unsafe { + out.push_str(std::str::from_utf8_unchecked( + &self.raw[..expires.key.start], + )); + out.push_str(std::str::from_utf8_unchecked( + &self.raw[expires.value.end..], + )); + } } else { - out.extend_from_slice(&self.raw); + unsafe { + out.push_str(std::str::from_utf8_unchecked(&self.raw)); + } } return; @@ -146,28 +162,35 @@ impl DbRow { let expires_value = expires_value.unwrap(); - if let Some(expires) = &self.expires { - out.extend_from_slice(&self.raw[..expires.key.start]); - inject_expires(out, expires_value); - out.extend_from_slice(&self.raw[expires.value.end..]); - } else { - let end_of_json = crate::db_json_entity::get_the_end_of_the_json(&self.raw); - out.extend_from_slice(&self.raw[..end_of_json]); - out.push(b','); - inject_expires(out, expires_value); - out.extend_from_slice(&self.raw[end_of_json..]); + unsafe { + if let Some(expires) = &self.expires { + out.push_str(std::str::from_utf8_unchecked( + &self.raw[..expires.key.start], + )); + inject_expires(out, expires_value); + out.push_str(std::str::from_utf8_unchecked( + &self.raw[expires.value.end..], + )); + } else { + let end_of_json = crate::db_json_entity::get_the_end_of_the_json(&self.raw); + out.push_str(std::str::from_utf8_unchecked(&self.raw[..end_of_json])); + out.push(','); + inject_expires(out, expires_value); + out.push_str(std::str::from_utf8_unchecked(&self.raw[end_of_json..])); + } } } #[cfg(not(feature = "master-node"))] - pub fn write_json(&self, out: &mut Vec) { - out.extend_from_slice(&self.raw); + pub fn write_json(&self, out: &mut String) { + let str = unsafe { std::str::from_utf8_unchecked(&self.raw) }; + out.push_str(str); } pub fn to_vec(&self) -> Vec { - let mut result = Vec::new(); + let mut result = String::new(); self.write_json(&mut result); - result + result.into_bytes() } } @@ -198,12 +221,12 @@ impl RowKeyParameter for Arc { } #[cfg(feature = "master-node")] -fn inject_expires(out: &mut Vec, expires_value: DateTimeAsMicroseconds) { - out.push(b'"'); - out.extend_from_slice(crate::db_json_entity::consts::EXPIRES.as_bytes()); - out.extend_from_slice("\":\"".as_bytes()); - out.extend_from_slice(&expires_value.to_rfc3339().as_bytes()[..19]); - out.push(b'"'); +fn inject_expires(out: &mut String, expires_value: DateTimeAsMicroseconds) { + out.push('"'); + out.push_str(crate::db_json_entity::consts::EXPIRES); + out.push_str("\":\""); + out.push_str(&expires_value.to_rfc3339()[..19]); + out.push('"'); } #[cfg(feature = "master-node")] fn find_json_separator_before(src: &[u8], pos: usize) -> Option { @@ -246,8 +269,10 @@ fn find_json_separator_after(src: &[u8], pos: usize) -> Option { None } -impl JsonObject for &'_ DbRow { - fn write_into(&self, dest: &mut Vec) { +impl JsonValueWriter for &'_ DbRow { + const IS_ARRAY: bool = false; + + fn write(&self, dest: &mut String) { self.write_json(dest) } } diff --git a/my-no-sql-core/src/db/db_row/test_expires_update.rs b/my-no-sql-core/src/db/db_row/test_expires_update.rs index c446a49..9b8eb6b 100644 --- a/my-no-sql-core/src/db/db_row/test_expires_update.rs +++ b/my-no-sql-core/src/db/db_row/test_expires_update.rs @@ -10,28 +10,32 @@ mod test { let test_json = r#"{ "PartitionKey": "TestPk", "RowKey": "TestRk", - "Expires": "2019-01-01T00:00:00", + "Expires": "2019-01-01T00:00:00" }"#; let inject_time_stamp = JsonTimeStamp::now(); let db_row = - DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &inject_time_stamp).unwrap(); + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &inject_time_stamp) + .unwrap(); let new_expires = DateTimeAsMicroseconds::from_str("2020-01-02T01:02:03").unwrap(); db_row.update_expires(new_expires.into()); - let mut result_json = Vec::new(); + let mut result_json = String::new(); db_row.write_json(&mut result_json); - let result_entity = DbJsonEntity::new(&result_json).unwrap(); + let result_entity = DbJsonEntity::new(result_json.as_bytes().into()).unwrap(); - assert_eq!(result_entity.get_partition_key(&result_json), "TestPk"); - assert_eq!(result_entity.get_row_key(&result_json), "TestRk"); + assert_eq!( + result_entity.get_partition_key(result_json.as_bytes()), + "TestPk" + ); + assert_eq!(result_entity.get_row_key(result_json.as_bytes()), "TestRk"); assert_eq!( - result_entity.get_expires(&result_json).unwrap(), + result_entity.get_expires(result_json.as_bytes()).unwrap(), &new_expires.to_rfc3339()[..19] ); } @@ -45,23 +49,27 @@ mod test { let inject_time_stamp = JsonTimeStamp::now(); let db_row = - DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &inject_time_stamp).unwrap(); + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &inject_time_stamp) + .unwrap(); let new_expires = DateTimeAsMicroseconds::from_str("2020-01-02T01:02:03").unwrap(); db_row.update_expires(new_expires.into()); - let mut result_json = Vec::new(); + let mut result_json = String::new(); db_row.write_json(&mut result_json); - let result_entity = DbJsonEntity::new(&result_json).unwrap(); + let result_entity = DbJsonEntity::new(result_json.as_bytes().into()).unwrap(); - assert_eq!(result_entity.get_partition_key(&result_json), "Pk"); - assert_eq!(result_entity.get_row_key(&result_json), "Rk"); + assert_eq!( + result_entity.get_partition_key(result_json.as_bytes()), + "Pk" + ); + assert_eq!(result_entity.get_row_key(result_json.as_bytes()), "Rk"); assert_eq!( - result_entity.get_expires(&result_json).unwrap(), + result_entity.get_expires(result_json.as_bytes()).unwrap(), &new_expires.to_rfc3339()[..19] ); } @@ -73,22 +81,26 @@ mod test { let inject_time_stamp = JsonTimeStamp::now(); let db_row = - DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &inject_time_stamp).unwrap(); + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &inject_time_stamp) + .unwrap(); db_row.update_expires(None); - let mut result_json = Vec::new(); + let mut result_json = String::new(); db_row.write_json(&mut result_json); - println!("Result: {}", std::str::from_utf8(&result_json).unwrap()); + println!("Result: {}", result_json); - let result_entity = DbJsonEntity::new(&result_json).unwrap(); + let result_entity = DbJsonEntity::new(result_json.as_bytes().into()).unwrap(); - assert_eq!(result_entity.get_partition_key(&result_json), "Pk"); - assert_eq!(result_entity.get_row_key(&result_json), "Rk"); + assert_eq!( + result_entity.get_partition_key(result_json.as_bytes()), + "Pk" + ); + assert_eq!(result_entity.get_row_key(result_json.as_bytes()), "Rk"); - assert!(result_entity.get_expires(&result_json).is_none()); + assert!(result_entity.get_expires(result_json.as_bytes()).is_none()); } #[test] @@ -97,22 +109,26 @@ mod test { let inject_time_stamp = JsonTimeStamp::now(); let db_row = - DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &inject_time_stamp).unwrap(); + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &inject_time_stamp) + .unwrap(); db_row.update_expires(None); - let mut result_json = Vec::new(); + let mut result_json = String::new(); db_row.write_json(&mut result_json); - println!("Result: {}", std::str::from_utf8(&result_json).unwrap()); + println!("Result: {}", result_json); - let result_entity = DbJsonEntity::new(&result_json).unwrap(); + let result_entity = DbJsonEntity::new(result_json.as_bytes().into()).unwrap(); - assert_eq!(result_entity.get_partition_key(&result_json), "Pk"); - assert_eq!(result_entity.get_row_key(&result_json), "Rk"); + assert_eq!( + result_entity.get_partition_key(result_json.as_bytes()), + "Pk" + ); + assert_eq!(result_entity.get_row_key(result_json.as_bytes()), "Rk"); - assert!(result_entity.get_expires(&result_json).is_none()); + assert!(result_entity.get_expires(result_json.as_bytes()).is_none()); } #[test] @@ -121,22 +137,26 @@ mod test { let inject_time_stamp = JsonTimeStamp::now(); let db_row = - DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &inject_time_stamp).unwrap(); + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &inject_time_stamp) + .unwrap(); db_row.update_expires(None); - let mut result_json = Vec::new(); + let mut result_json = String::new(); db_row.write_json(&mut result_json); - println!("Result: {}", std::str::from_utf8(&result_json).unwrap()); + println!("Result: {}", result_json); - let result_entity = DbJsonEntity::new(&result_json).unwrap(); + let result_entity = DbJsonEntity::new(result_json.as_bytes().into()).unwrap(); - assert_eq!(result_entity.get_partition_key(&result_json), "Pk"); - assert_eq!(result_entity.get_row_key(&result_json), "Rk"); + assert_eq!( + result_entity.get_partition_key(result_json.as_bytes()), + "Pk" + ); + assert_eq!(result_entity.get_row_key(result_json.as_bytes()), "Rk"); - assert!(result_entity.get_expires(&result_json).is_none()); + assert!(result_entity.get_expires(result_json.as_bytes()).is_none()); } #[test] @@ -148,25 +168,25 @@ mod test { let inject_time_stamp = JsonTimeStamp::now(); let db_row = - DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &inject_time_stamp).unwrap(); + DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &inject_time_stamp) + .unwrap(); db_row.update_expires(None); - let mut result_json = Vec::new(); + let mut result_json = String::new(); db_row.write_json(&mut result_json); - println!( - "Result: {}. Len: {}", - std::str::from_utf8(&result_json).unwrap(), - result_json.len() - ); + println!("Result: {}. Len: {}", result_json, result_json.len()); - let result_entity = DbJsonEntity::new(&result_json).unwrap(); + let result_entity = DbJsonEntity::new(result_json.as_bytes().into()).unwrap(); - assert_eq!(result_entity.get_partition_key(&result_json), "Pk"); - assert_eq!(result_entity.get_row_key(&result_json), "Rk"); + assert_eq!( + result_entity.get_partition_key(result_json.as_bytes()), + "Pk" + ); + assert_eq!(result_entity.get_row_key(result_json.as_bytes()), "Rk"); - assert!(result_entity.get_expires(&result_json).is_none()); + assert!(result_entity.get_expires(result_json.as_bytes()).is_none()); } } diff --git a/my-no-sql-core/src/db/db_table/db_partitions_container.rs b/my-no-sql-core/src/db/db_table/db_partitions_container.rs index d093b59..ffc33c1 100644 --- a/my-no-sql-core/src/db/db_table/db_partitions_container.rs +++ b/my-no-sql-core/src/db/db_table/db_partitions_container.rs @@ -32,11 +32,11 @@ impl DbPartitionsContainer { self.partitions.len() } - pub fn get_partitions(&self) -> std::slice::Iter { + pub fn get_partitions<'s>(&'s self) -> std::slice::Iter<'s, DbPartition> { self.partitions.iter() } - pub fn get_partitions_mut(&mut self) -> std::slice::IterMut { + pub fn get_partitions_mut<'s>(&'s mut self) -> std::slice::IterMut<'s, DbPartition> { self.partitions.iter_mut() } #[cfg(feature = "master-node")] diff --git a/my-no-sql-core/src/db/db_table/db_table_attributes.rs b/my-no-sql-core/src/db/db_table/db_table_attributes.rs index d03993b..6e042a6 100644 --- a/my-no-sql-core/src/db/db_table/db_table_attributes.rs +++ b/my-no-sql-core/src/db/db_table/db_table_attributes.rs @@ -19,6 +19,12 @@ impl DbTableAttributes { } } +impl Default for DbTableAttributes { + fn default() -> Self { + Self::create_default() + } +} + impl DbTableAttributes { pub fn new( persist: bool, diff --git a/my-no-sql-core/src/db/db_table/db_table.rs b/my-no-sql-core/src/db/db_table/db_table_inner.rs similarity index 91% rename from my-no-sql-core/src/db/db_table/db_table.rs rename to my-no-sql-core/src/db/db_table/db_table_inner.rs index c695cc0..9af04c6 100644 --- a/my-no-sql-core/src/db/db_table/db_table.rs +++ b/my-no-sql-core/src/db/db_table/db_table_inner.rs @@ -1,17 +1,17 @@ use my_json::json_writer::JsonArrayWriter; #[cfg(feature = "master-node")] use rust_extensions::date_time::DateTimeAsMicroseconds; -use rust_extensions::sorted_vec::SortedVecWithStrKey; +use rust_extensions::sorted_vec::{EntityWithStrKey, SortedVecWithStrKey}; use std::sync::Arc; use crate::db::{DbPartition, DbRow, PartitionKey, PartitionKeyParameter, RowKeyParameter}; #[cfg(feature = "master-node")] use super::DbTableAttributes; -use super::{AllDbRowsIterator, AvgSize, ByRowKeyIterator, DbPartitionsContainer}; +use super::{AllDbRowsIterator, AvgSize, ByRowKeyIterator, DbPartitionsContainer, DbTableName}; -pub struct DbTable { - pub name: String, +pub struct DbTableInner { + pub name: DbTableName, pub partitions: DbPartitionsContainer, pub avg_size: AvgSize, #[cfg(feature = "master-node")] @@ -20,16 +20,26 @@ pub struct DbTable { pub attributes: DbTableAttributes, } -impl DbTable { +impl EntityWithStrKey for DbTableInner { + fn get_key(&self) -> &str { + self.name.as_str() + } +} + +impl DbTableInner { #[cfg(not(feature = "master-node"))] - pub fn new(name: String) -> Self { + pub fn new(name: DbTableName) -> Self { Self { - name: name.into(), + name, partitions: DbPartitionsContainer::new(), avg_size: AvgSize::new(), } } + pub fn restore_partition(&mut self, db_partition: DbPartition) { + self.partitions.insert(db_partition); + } + pub fn get_partitions_amount(&self) -> usize { self.partitions.len() } @@ -61,7 +71,7 @@ impl DbTable { for db_partition in self.partitions.get_partitions() { for db_row in db_partition.get_all_rows() { - json_array_writer.write(db_row.as_ref()) + json_array_writer = json_array_writer.write(db_row.as_ref()); } } @@ -90,7 +100,7 @@ impl DbTable { if let Some(db_partition) = self.partitions.get(partition_key) { for db_row in db_partition.get_all_rows() { - json_array_writer.write(db_row.as_ref()) + json_array_writer = json_array_writer.write(db_row.as_ref()); } } @@ -107,25 +117,25 @@ impl DbTable { self.partitions.get(partition_key) } #[inline] - pub fn get_partitions(&self) -> std::slice::Iter { + pub fn get_partitions<'s>(&'s self) -> std::slice::Iter<'s, DbPartition> { self.partitions.get_partitions() } } /// Insert Operations -impl DbTable { +impl DbTableInner { #[inline] pub fn insert_or_replace_row( &mut self, - db_row: &Arc, + db_row: Arc, #[cfg(feature = "master-node")] set_last_write_moment: Option, ) -> (PartitionKey, Option>) { - self.avg_size.add(db_row); + self.avg_size.add(&db_row); - let db_partition = self.partitions.add_partition_if_not_exists(db_row); + let db_partition = self.partitions.add_partition_if_not_exists(&db_row); - let removed_db_row = db_partition.insert_or_replace_row(db_row.clone()); + let removed_db_row = db_partition.insert_or_replace_row(db_row); #[cfg(feature = "master-node")] if let Some(set_last_write_moment) = set_last_write_moment { @@ -194,7 +204,7 @@ impl DbTable { /// /// -impl DbTable { +impl DbTableInner { pub fn remove_row( &mut self, partition_key: &impl PartitionKeyParameter, diff --git a/my-no-sql-core/src/db/db_table/db_table_master_node.rs b/my-no-sql-core/src/db/db_table/db_table_master_node.rs index 45fe25d..eefd5af 100644 --- a/my-no-sql-core/src/db/db_table/db_table_master_node.rs +++ b/my-no-sql-core/src/db/db_table/db_table_master_node.rs @@ -5,7 +5,9 @@ use rust_extensions::{ use crate::db::PartitionKey; -use super::{AvgSize, DataToGc, DbPartitionsContainer, DbTable, DbTableAttributes}; +use super::{ + AvgSize, DataToGc, DbPartitionsContainer, DbTableAttributes, DbTableInner, DbTableName, +}; pub struct PartitionLastWriteMoment { pub partition_key: PartitionKey, @@ -18,8 +20,8 @@ impl EntityWithStrKey for PartitionLastWriteMoment { } } -impl DbTable { - pub fn new(name: String, attributes: DbTableAttributes) -> Self { +impl DbTableInner { + pub fn new(name: DbTableName, attributes: DbTableAttributes) -> Self { Self { name, partitions: DbPartitionsContainer::new(), @@ -105,7 +107,7 @@ mod tests { use std::sync::Arc; use crate::{ - db::DbTable, + db::DbTableInner, db_json_entity::{DbJsonEntity, JsonTimeStamp}, }; @@ -113,19 +115,17 @@ mod tests { #[test] fn test_insert_record() { - let mut db_table = DbTable::new( - "test-table".to_string(), - DbTableAttributes::create_default(), - ); + let mut db_table = + DbTableInner::new("test-table".into(), DbTableAttributes::create_default()); let now = JsonTimeStamp::now(); let test_json = r#"{ "PartitionKey": "test", - "RowKey": "test", + "RowKey": "test" }"#; - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &now).unwrap(); + let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &now).unwrap(); let db_row = Arc::new(db_row); @@ -137,19 +137,17 @@ mod tests { #[test] fn test_insert_and_insert_or_replace() { - let mut db_table = DbTable::new( - "test-table".to_string(), - DbTableAttributes::create_default(), - ); + let mut db_table = + DbTableInner::new("test-table".into(), DbTableAttributes::create_default()); let now = JsonTimeStamp::now(); let test_json = r#"{ "PartitionKey": "test", - "RowKey": "test", + "RowKey": "test" }"#; - let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &now).unwrap(); + let db_row = DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &now).unwrap(); let db_row = Arc::new(db_row); @@ -161,11 +159,11 @@ mod tests { "AAA": "111" }"#; - let db_row2 = DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &now).unwrap(); + let db_row2 = DbJsonEntity::parse_into_db_row(test_json.as_bytes().into(), &now).unwrap(); let db_row2 = Arc::new(db_row2); - db_table.insert_or_replace_row(&db_row2, None); + db_table.insert_or_replace_row(db_row2.clone(), None); assert_eq!(db_table.get_table_size(), db_row2.get_src_as_slice().len()); assert_eq!(db_table.get_partitions_amount(), 1); diff --git a/my-no-sql-core/src/db/db_table/db_table_name.rs b/my-no-sql-core/src/db/db_table/db_table_name.rs new file mode 100644 index 0000000..d7d85f6 --- /dev/null +++ b/my-no-sql-core/src/db/db_table/db_table_name.rs @@ -0,0 +1,32 @@ +use std::{fmt::Display, sync::Arc}; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct DbTableName(Arc); + +impl DbTableName { + pub fn as_str(&self) -> &str { + self.0.as_str() + } + + pub fn to_string(&self) -> String { + self.0.to_string() + } +} + +impl Display for DbTableName { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Into for String { + fn into(self) -> DbTableName { + DbTableName(Arc::new(self)) + } +} + +impl<'s> Into for &'s str { + fn into(self) -> DbTableName { + DbTableName(Arc::new(self.to_string())) + } +} diff --git a/my-no-sql-core/src/db/db_table/mod.rs b/my-no-sql-core/src/db/db_table/mod.rs index f64e387..419e67b 100644 --- a/my-no-sql-core/src/db/db_table/mod.rs +++ b/my-no-sql-core/src/db/db_table/mod.rs @@ -1,13 +1,13 @@ -mod db_table; #[cfg(feature = "master-node")] mod db_table_attributes; +mod db_table_inner; #[cfg(feature = "master-node")] pub mod db_table_master_node; #[cfg(feature = "master-node")] pub use db_table_attributes::*; -pub use db_table::*; +pub use db_table_inner::*; #[cfg(feature = "master-node")] mod data_to_gc; @@ -27,3 +27,5 @@ mod all_db_rows_iterator; pub use all_db_rows_iterator::*; mod by_row_key_iterator; pub use by_row_key_iterator::*; +mod db_table_name; +pub use db_table_name::*; diff --git a/my-no-sql-core/src/db_json_entity/db_json_entity.rs b/my-no-sql-core/src/db_json_entity/db_json_entity.rs index a4f6e4a..d2e57c4 100644 --- a/my-no-sql-core/src/db_json_entity/db_json_entity.rs +++ b/my-no-sql-core/src/db_json_entity/db_json_entity.rs @@ -1,6 +1,7 @@ use crate::db::DbRow; -use my_json::json_reader::array_parser::ArrayToJsonObjectsSplitter; +use my_json::json_reader::JsonArrayIterator; + use rust_extensions::date_time::DateTimeAsMicroseconds; use std::sync::Arc; @@ -11,7 +12,7 @@ use super::DbRowContentCompiler; use super::JsonKeyValuePosition; use super::JsonTimeStamp; use super::KeyValueContentPosition; -use my_json::json_reader::JsonFirstLineReader; +use my_json::json_reader::JsonFirstLineIterator; pub struct DbJsonEntity { pub partition_key: JsonKeyValuePosition, @@ -22,7 +23,10 @@ pub struct DbJsonEntity { } impl DbJsonEntity { - pub fn new(raw: &[u8]) -> Result { + pub fn from_slice(src: &[u8]) -> Result { + Self::new(JsonFirstLineIterator::new(src)) + } + pub fn new(json_first_line_reader: JsonFirstLineIterator) -> Result { let mut partition_key = None; let mut row_key = None; let mut expires = None; @@ -30,31 +34,40 @@ impl DbJsonEntity { let mut expires_value = None; - for line in JsonFirstLineReader::new(raw) { - let line = line?; + while let Some(line) = json_first_line_reader.get_next() { + let (name_ref, value_ref) = line?; - let name = line.get_name()?; + let name = name_ref.as_unescaped_str()?; match name { super::consts::PARTITION_KEY => { - partition_key = Some(JsonKeyValuePosition::new(&line)); + if value_ref.as_str().is_none() { + return Err(DbEntityParseFail::FieldPartitionKeyCanNotBeNull); + } + + partition_key = + Some(JsonKeyValuePosition::new(&name_ref.data, &value_ref.data)); } super::consts::ROW_KEY => { - row_key = Some(JsonKeyValuePosition::new(&line)); + if value_ref.as_str().is_none() { + return Err(DbEntityParseFail::FieldRowKeyCanNotBeNull); + } + row_key = Some(JsonKeyValuePosition::new(&name_ref.data, &value_ref.data)); } super::consts::EXPIRES => { - expires_value = line.get_value()?.as_date_time(); - expires = Some(JsonKeyValuePosition::new(&line)) + expires_value = value_ref.as_date_time(); + expires = Some(JsonKeyValuePosition::new(&name_ref.data, &value_ref.data)); } super::consts::TIME_STAMP => { - time_stamp = Some(JsonKeyValuePosition::new(&line)); + time_stamp = Some(JsonKeyValuePosition::new(&name_ref.data, &value_ref.data)); } _ => { if rust_extensions::str_utils::compare_strings_case_insensitive( name, super::consts::TIME_STAMP_LOWER_CASE, ) { - time_stamp = Some(JsonKeyValuePosition::new(&line)); + time_stamp = + Some(JsonKeyValuePosition::new(&name_ref.data, &value_ref.data)); } } } @@ -64,29 +77,13 @@ impl DbJsonEntity { return Err(DbEntityParseFail::FieldPartitionKeyIsRequired); } - let partition_key = partition_key.unwrap(); - - if partition_key.key.len() > 255 { - return Err(DbEntityParseFail::PartitionKeyIsTooLong); - } - - if partition_key.value.is_null(raw) { - return Err(DbEntityParseFail::FieldPartitionKeyCanNotBeNull); - } - if row_key.is_none() { return Err(DbEntityParseFail::FieldRowKeyIsRequired); } - let row_key = row_key.unwrap(); - - if row_key.value.is_null(raw) { - return Err(DbEntityParseFail::FieldRowKeyCanNotBeNull); - } - let result = Self { - partition_key, - row_key, + partition_key: partition_key.unwrap(), + row_key: row_key.unwrap(), expires, time_stamp, expires_value, @@ -99,7 +96,7 @@ impl DbJsonEntity { raw: &'s [u8], time_stamp_to_inject: &'s JsonTimeStamp, ) -> Result, DbEntityParseFail> { - let entity = Self::new(raw)?; + let entity = Self::new(JsonFirstLineIterator::new(raw))?; return Ok(DbJsonEntityWithContent::new( raw, @@ -108,33 +105,36 @@ impl DbJsonEntity { )); } - pub fn parse_into_db_row(src: &[u8], now: &JsonTimeStamp) -> Result { + pub fn parse_into_db_row( + json_first_line_reader: JsonFirstLineIterator, + now: &JsonTimeStamp, + ) -> Result { let mut partition_key = None; let mut row_key = None; let mut expires = None; let mut time_stamp = None; let mut expires_value = None; - let mut raw = DbRowContentCompiler::new(src.len()); + let mut raw = DbRowContentCompiler::new(json_first_line_reader.as_slice().len()); - for line in JsonFirstLineReader::new(&src) { - let line = line?; + while let Some(line) = json_first_line_reader.get_next() { + let (name_ref, value_ref) = line?; - let name = line.get_name()?; + let name = name_ref.as_unescaped_str().unwrap(); match name { super::consts::PARTITION_KEY => { - partition_key = Some(raw.append(src, &line)); + partition_key = Some(raw.append(&name_ref, &value_ref)); } super::consts::ROW_KEY => { - row_key = Some(raw.append(src, &line)); + row_key = Some(raw.append(&name_ref, &value_ref)); time_stamp = raw .append_str_value(super::consts::TIME_STAMP, now.as_str()) .into(); } super::consts::EXPIRES => { - expires_value = line.get_value()?.as_date_time(); - expires = Some(raw.append(src, &line)); + expires_value = value_ref.as_date_time(); + expires = Some(raw.append(&name_ref, &value_ref)); } super::consts::TIME_STAMP => {} _ => { @@ -143,12 +143,14 @@ impl DbJsonEntity { super::consts::TIME_STAMP_LOWER_CASE, ) { } else { - raw.append(src, &line); + raw.append(&name_ref, &value_ref); } } } } + let content = raw.into_vec(); + if partition_key.is_none() { return Err(DbEntityParseFail::FieldPartitionKeyIsRequired); } @@ -159,7 +161,7 @@ impl DbJsonEntity { return Err(DbEntityParseFail::PartitionKeyIsTooLong); } - if partition_key.value.is_null(src) { + if partition_key.value.is_null(content.as_slice()) { return Err(DbEntityParseFail::FieldPartitionKeyCanNotBeNull); } @@ -169,7 +171,7 @@ impl DbJsonEntity { let row_key = row_key.unwrap(); - if row_key.value.is_null(src) { + if row_key.value.is_null(content.as_slice()) { return Err(DbEntityParseFail::FieldRowKeyCanNotBeNull); } @@ -181,7 +183,7 @@ impl DbJsonEntity { expires_value, }; - let result = DbRow::new(db_json_entity, raw.into_vec()); + let result = DbRow::new(db_json_entity, content); Ok(result) } @@ -209,9 +211,10 @@ impl DbJsonEntity { None } - pub fn restore_into_db_row(raw: &[u8]) -> Result { - let db_row = Self::new(raw)?; - let result = DbRow::new(db_row, raw.to_vec()); + pub fn restore_into_db_row(raw: Vec) -> Result { + let json_first_line_reader = JsonFirstLineIterator::new(raw.as_slice()); + let db_row = Self::new(json_first_line_reader)?; + let result = DbRow::new(db_row, raw); Ok(result) } @@ -221,9 +224,14 @@ impl DbJsonEntity { ) -> Result>, DbEntityParseFail> { let mut result = Vec::new(); - for json in src.split_array_json_to_objects() { + let json_array_iterator = JsonArrayIterator::new(src)?; + + while let Some(json) = json_array_iterator.get_next() { let json = json?; - let db_row = DbJsonEntity::parse_into_db_row(json, inject_time_stamp)?; + let db_row = DbJsonEntity::parse_into_db_row( + json.unwrap_as_object().unwrap(), + inject_time_stamp, + )?; result.push(Arc::new(db_row)); } return Ok(result); @@ -232,9 +240,11 @@ impl DbJsonEntity { pub fn restore_as_vec(src: &[u8]) -> Result>, DbEntityParseFail> { let mut result = Vec::new(); - for json in src.split_array_json_to_objects() { + let json_array_iterator = JsonArrayIterator::new(src)?; + + while let Some(json) = json_array_iterator.get_next() { let json = json?; - let db_entity = DbJsonEntity::restore_into_db_row(json)?; + let db_entity = DbJsonEntity::restore_into_db_row(json.as_bytes().to_vec())?; result.push(Arc::new(db_entity)); } return Ok(result); @@ -246,9 +256,14 @@ impl DbJsonEntity { ) -> Result>)>, DbEntityParseFail> { let mut result = Vec::new(); - for json in src.split_array_json_to_objects() { + let json_array_iterator = JsonArrayIterator::new(src)?; + + while let Some(json) = json_array_iterator.get_next() { let json = json?; - let db_row = DbJsonEntity::parse_into_db_row(json, inject_time_stamp)?; + let db_row = DbJsonEntity::parse_into_db_row( + json.unwrap_as_object().unwrap(), + inject_time_stamp, + )?; let partition_key = db_row.get_partition_key(); @@ -272,9 +287,11 @@ impl DbJsonEntity { ) -> Result>)>, DbEntityParseFail> { let mut result = Vec::new(); - for json in src.split_array_json_to_objects() { + let json_array_iterator = JsonArrayIterator::new(src)?; + + while let Some(json) = json_array_iterator.get_next() { let json = json?; - let db_row = DbJsonEntity::restore_into_db_row(json)?; + let db_row = DbJsonEntity::restore_into_db_row(json.as_bytes().to_vec())?; let partition_key = db_row.get_partition_key(); @@ -386,7 +403,7 @@ fn inject_time_stamp_key_value( pub fn get_the_end_of_the_json(data: &[u8]) -> usize { for i in (0..data.len()).rev() { - if data[i] == my_json::json_reader::consts::CLOSE_BRACKET { + if data[i] == my_json::consts::CLOSE_BRACKET { return i; } } @@ -397,12 +414,44 @@ pub fn get_the_end_of_the_json(data: &[u8]) -> usize { #[cfg(test)] mod tests { + use my_json::json_reader::{AsJsonSlice, JsonFirstLineIterator}; use rust_extensions::date_time::DateTimeAsMicroseconds; use crate::db_json_entity::{DbEntityParseFail, JsonTimeStamp}; use super::DbJsonEntity; + #[test] + pub fn test_partition_key_and_row_key_and_time_stamp_are_ok() { + let src_json = r#"{"TwoFaMethods": {}, + "PartitionKey": "ff95cdae9f7e4f1a847f6b83ad68b495", + "RowKey": "6c09c7f0e44d4ef79cfdd4252ebd54ab", + "TimeStamp": "2022-03-17T09:28:27.5923", + "Expires": "2022-03-17T13:28:29.6537478Z" + }"#; + + let json_first_line_reader = JsonFirstLineIterator::new(src_json.as_bytes()); + + let json_time = JsonTimeStamp::now(); + + let entity = DbJsonEntity::parse_into_db_row(json_first_line_reader, &json_time).unwrap(); + + let json_first_line_reader: JsonFirstLineIterator = entity.get_src_as_slice().into(); + + let dest_entity = + DbJsonEntity::parse_into_db_row(json_first_line_reader, &json_time).unwrap(); + + assert_eq!( + "ff95cdae9f7e4f1a847f6b83ad68b495", + dest_entity.get_partition_key() + ); + + assert_eq!( + "6c09c7f0e44d4ef79cfdd4252ebd54ab", + dest_entity.get_row_key() + ); + } + #[test] pub fn parse_expires_with_z() { let src_json = r#"{"TwoFaMethods": {}, @@ -412,7 +461,9 @@ mod tests { "Expires": "2022-03-17T13:28:29.6537478Z" }"#; - let entity = DbJsonEntity::new(src_json.as_bytes()).unwrap(); + let json_first_line_reader = JsonFirstLineIterator::new(src_json.as_bytes()); + + let entity = DbJsonEntity::new(json_first_line_reader).unwrap(); let expires = entity.expires_value.as_ref().unwrap(); @@ -443,7 +494,9 @@ mod tests { "Expires": "2022-03-17T13:28:29.6537478Z" }"#; - let result = DbJsonEntity::new(src_json.as_bytes()); + let json_first_line_reader = JsonFirstLineIterator::new(src_json.as_bytes()); + + let result = DbJsonEntity::new(json_first_line_reader); if let Err(DbEntityParseFail::FieldPartitionKeyCanNotBeNull) = result { } else { @@ -455,7 +508,9 @@ mod tests { let src_json = r#"{"value":{"is_enabled":true,"fee_percent":5.0,"min_balance_usd":100.0,"fee_period_days":30,"inactivity_period_days":90},"PartitionKey":"*","RowKey":"*"}"#; let time_stamp = JsonTimeStamp::now(); - let db_row = DbJsonEntity::parse_into_db_row(src_json.as_bytes(), &time_stamp).unwrap(); + + let json_first_line_reader = JsonFirstLineIterator::new(src_json.as_bytes()); + let db_row = DbJsonEntity::parse_into_db_row(json_first_line_reader, &time_stamp).unwrap(); println!( "{:?}", @@ -471,7 +526,9 @@ mod tests { let mut json = r#"{"PartitionKey":"PK", "RowKey":"RK"} "#.as_bytes().to_vec(); - let mut db_json_entity = DbJsonEntity::new(json.as_slice()).unwrap(); + let json_first_line_reader = JsonFirstLineIterator::new(json.as_slice()); + + let mut db_json_entity = DbJsonEntity::new(json_first_line_reader).unwrap(); db_json_entity.inject_at_the_end_of_json(&mut json, &json_ts); @@ -500,7 +557,9 @@ mod tests { let json = r#"{"PartitionKey":"Pk", "RowKey":"Rk", "timestamp":null}"#; - let db_row = DbJsonEntity::parse_into_db_row(json.as_bytes(), &json_ts).unwrap(); + let json_first_line_reader = JsonFirstLineIterator::new(json.as_slice()); + + let db_row = DbJsonEntity::parse_into_db_row(json_first_line_reader, &json_ts).unwrap(); assert_eq!(db_row.get_partition_key(), "Pk",); assert_eq!(db_row.get_row_key(), "Rk",); @@ -514,7 +573,10 @@ mod tests { let json = r#"{"PartitionKey":"Pk", "RowKey":"Rk", "timestamp":"12345678901234567890123456789012345678901234567890"}"#; - let db_json_entity = DbJsonEntity::parse_into_db_row(json.as_bytes(), &json_ts).unwrap(); + let json_first_line_reader = JsonFirstLineIterator::new(json.as_bytes()); + + let db_json_entity = + DbJsonEntity::parse_into_db_row(json_first_line_reader, &json_ts).unwrap(); assert_eq!(db_json_entity.get_partition_key(), "Pk",); assert_eq!(db_json_entity.get_row_key(), "Rk",); @@ -532,8 +594,10 @@ mod tests { let inject_time_stamp = JsonTimeStamp::now(); + let json_first_line_reader = JsonFirstLineIterator::new(test_json.as_bytes()); + let db_row = - DbJsonEntity::parse_into_db_row(test_json.as_bytes(), &inject_time_stamp).unwrap(); + DbJsonEntity::parse_into_db_row(json_first_line_reader, &inject_time_stamp).unwrap(); assert_eq!(db_row.get_partition_key(), "Pk"); assert_eq!(db_row.get_row_key(), "Rk"); diff --git a/my-no-sql-core/src/db_json_entity/db_json_entity_with_content.rs b/my-no-sql-core/src/db_json_entity/db_json_entity_with_content.rs index b75c4a3..e98ec23 100644 --- a/my-no-sql-core/src/db_json_entity/db_json_entity_with_content.rs +++ b/my-no-sql-core/src/db_json_entity/db_json_entity_with_content.rs @@ -1,3 +1,5 @@ +use my_json::json_reader::JsonFirstLineIterator; + use crate::db::DbRow; use super::{DbEntityParseFail, DbJsonEntity, JsonTimeStamp}; @@ -34,6 +36,7 @@ impl<'s> DbJsonEntityWithContent<'s> { } pub fn into_db_row(self) -> Result { - DbJsonEntity::parse_into_db_row(self.raw, &self.time_stamp) + let first_line_reader = JsonFirstLineIterator::new(self.raw); + DbJsonEntity::parse_into_db_row(first_line_reader, &self.time_stamp) } } diff --git a/my-no-sql-core/src/db_json_entity/db_row_content_compiler/db_row_content_compiler.rs b/my-no-sql-core/src/db_json_entity/db_row_content_compiler/db_row_content_compiler.rs index e1ae670..db701cd 100644 --- a/my-no-sql-core/src/db_json_entity/db_row_content_compiler/db_row_content_compiler.rs +++ b/my-no-sql-core/src/db_json_entity/db_row_content_compiler/db_row_content_compiler.rs @@ -1,4 +1,4 @@ -use my_json::json_reader::JsonFirstLine; +use my_json::json_reader::{JsonFieldNameRef, JsonValueRef}; use crate::db_json_entity::{JsonKeyValuePosition, KeyValueContentPosition}; @@ -37,15 +37,18 @@ impl DbRowContentCompiler { } } - pub fn append(&mut self, src: &[u8], line: &JsonFirstLine) -> JsonKeyValuePosition { + pub fn append( + &mut self, + name: &JsonFieldNameRef, + the_value: &JsonValueRef, + ) -> JsonKeyValuePosition { self.append_first_line(); let mut key = KeyValueContentPosition { start: self.content.len(), end: 0, }; - self.content - .extend_from_slice(&src[line.name_start..line.name_end]); + self.content.extend_from_slice(name.as_slice()); key.end = self.content.len(); @@ -55,8 +58,7 @@ impl DbRowContentCompiler { start: self.content.len(), end: 0, }; - self.content - .extend_from_slice(&src[line.value_start..line.value_end]); + self.content.extend_from_slice(the_value.as_slice()); value.end = self.content.len(); diff --git a/my-no-sql-core/src/db_json_entity/json_key_value_position.rs b/my-no-sql-core/src/db_json_entity/json_key_value_position.rs index cc07434..648365d 100644 --- a/my-no-sql-core/src/db_json_entity/json_key_value_position.rs +++ b/my-no-sql-core/src/db_json_entity/json_key_value_position.rs @@ -1,4 +1,4 @@ -use my_json::json_reader::JsonFirstLine; +use my_json::json_reader::{JsonContentOffset, JsonValue}; #[derive(Debug, Clone)] pub struct KeyValueContentPosition { @@ -31,16 +31,16 @@ pub struct JsonKeyValuePosition { } impl JsonKeyValuePosition { - pub fn new(src: &JsonFirstLine) -> Self { + pub fn new(name: &JsonContentOffset, value: &JsonValue) -> Self { Self { key: KeyValueContentPosition { - start: src.name_start, - end: src.name_end, + start: name.start, + end: name.end, }, value: KeyValueContentPosition { - start: src.value_start, - end: src.value_end, + start: value.start, + end: value.end, }, } } diff --git a/my-no-sql-core/src/entity_serializer.rs b/my-no-sql-core/src/entity_serializer.rs index 5362c60..4f2fec2 100644 --- a/my-no-sql-core/src/entity_serializer.rs +++ b/my-no-sql-core/src/entity_serializer.rs @@ -1,3 +1,4 @@ +use my_json::json_reader::JsonFirstLineIterator; use my_no_sql_abstractions::MyNoSqlEntity; use serde::{de::DeserializeOwned, Serialize}; @@ -10,29 +11,33 @@ where serde_json::to_vec(&entity).unwrap() } -pub fn deserialize(data: &[u8]) -> TMyNoSqlEntity +pub fn deserialize(data: &[u8]) -> Result where TMyNoSqlEntity: MyNoSqlEntity + DeserializeOwned, { let parse_result: Result = serde_json::from_slice(&data); match parse_result { - Ok(el) => return el, + Ok(el) => return Ok(el), Err(err) => { - let db_entity = DbJsonEntity::new(data); + + let json_first_line_iterator = JsonFirstLineIterator::new(data); + let db_entity = DbJsonEntity::new(json_first_line_iterator); match db_entity { Ok(db_entity) => { - panic!( + return Err(format!( "Table: {}. Can not parse entity with PartitionKey: [{}] and RowKey: [{}]. Err: {:?}", TMyNoSqlEntity::TABLE_NAME, db_entity.get_partition_key(data), db_entity.get_row_key(data), err - ); + )) + ; } Err(err) => { - panic!( + return Err(format!( "Table: {}. Can not extract partitionKey and rowKey. Looks like entity broken at all. Err: {:?}", TMyNoSqlEntity::TABLE_NAME, err - ) + )) + } } } @@ -57,14 +62,14 @@ pub fn inject_partition_key_and_row_key( let to_insert = if let Some(row_key) = row_key { format!( "\"PartitionKey\":\"{}\",\"RowKey\":\"{}\",", - my_json::EscapedJsonString::new(partition_key).as_str(), - my_json::EscapedJsonString::new(row_key).as_str() + my_json::json_string_value::escape_json_string_value(partition_key).as_str(), + my_json::json_string_value::escape_json_string_value(row_key).as_str(), ) .into_bytes() } else { format!( "\"PartitionKey\":\"{}\",", - my_json::EscapedJsonString::new(partition_key).as_str(), + my_json::json_string_value::escape_json_string_value(partition_key).as_str(), ) .into_bytes() }; diff --git a/my-no-sql-core/src/expiration_index.rs b/my-no-sql-core/src/expiration_index.rs index 748e0d4..7c2278a 100644 --- a/my-no-sql-core/src/expiration_index.rs +++ b/my-no-sql-core/src/expiration_index.rs @@ -48,27 +48,43 @@ impl> ExpirationIndexContainer) { + pub fn add(&mut self, item: &impl ExpirationIndex) -> Option { let expiration_moment = item.get_expiration_moment(); if item.get_expiration_moment().is_none() { - return; + return None; } let expiration_moment = expiration_moment.unwrap(); - match self.find_index(expiration_moment) { + let added = match self.find_index(expiration_moment) { Ok(index) => { - self.index[index].items.push(item.to_owned()); + let items = &mut self.index[index].items; + + if items + .iter() + .any(|itm| item.get_id_as_str() == itm.get_id_as_str()) + { + false + } else { + self.index[index].items.push(item.to_owned()); + false + } } Err(index) => { self.index.insert( index, ExpirationIndexItem::new(expiration_moment, item.to_owned()), ); + + true } + }; + + if added { + self.amount += 1; } - self.amount += 1; + Some(added) } pub fn update( @@ -80,7 +96,7 @@ impl> ExpirationIndexContainer) { @@ -107,16 +123,23 @@ impl> ExpirationIndexContainer { + #[cfg(not(test))] + println!( + "Somehow we did not find the index for expiration moment {} of '{}'. Expiration moment as rfc3339 is {}", + expiration_moment.unix_microseconds, key_as_str, expiration_moment.to_rfc3339() + ); + + #[cfg(test)] panic!( "Somehow we did not find the index for expiration moment {} of '{}'. Expiration moment as rfc3339 is {}", expiration_moment.unix_microseconds, key_as_str, expiration_moment.to_rfc3339() ); } } - - self.amount -= 1; } pub fn get_items_to_expire( @@ -152,6 +175,19 @@ impl> ExpirationIndexContainer, + flurl: FlUrl, + url: &str, table_name: &'static str, - params: CreateTableParams, + params: &CreateTableParams, sync_period: DataSynchronizationPeriod, ) -> Result<(), DataWriterError> { - let url = settings.get_url().await; - let fl_url = FlUrl::new(url.clone()) + let fl_url = flurl .append_path_segment("Tables") .append_path_segment("CreateIfNotExists") .append_data_sync_period(&sync_period) @@ -34,20 +33,18 @@ pub async fn create_table_if_not_exists( let fl_url = params.populate_params(fl_url); - let mut response = fl_url.post(None).await?; + let mut response = fl_url.post(FlUrlBody::Empty).await?; - create_table_errors_handler(&mut response, "create_table_if_not_exists", url.as_str()).await + create_table_errors_handler(&mut response, "create_table_if_not_exists", url).await } pub async fn create_table( - settings: &Arc, + flurl: FlUrl, + url: &str, table_name: &str, params: CreateTableParams, sync_period: &DataSynchronizationPeriod, ) -> Result<(), DataWriterError> { - let url = settings.get_url().await; - let flurl = FlUrl::new(url.as_str()); - let fl_url = flurl .append_path_segment("Tables") .append_path_segment("Create") @@ -56,24 +53,22 @@ pub async fn create_table( let fl_url = params.populate_params(fl_url); - let mut response = fl_url.post(None).await?; + let mut response = fl_url.post(FlUrlBody::Empty).await?; - create_table_errors_handler(&mut response, "create_table", url.as_str()).await + create_table_errors_handler(&mut response, "create_table", url).await } pub async fn insert_entity( - settings: &Arc, + flurl: FlUrl, entity: &TEntity, sync_period: &DataSynchronizationPeriod, ) -> Result<(), DataWriterError> { - let flurl = get_fl_url(settings).await; - let response = flurl .append_path_segment(ROW_CONTROLLER) .append_path_segment("Insert") .append_data_sync_period(sync_period) .with_table_name_as_query_param(TEntity::TABLE_NAME) - .post(entity.serialize_entity().into()) + .post(FlUrlBody::Json(entity.serialize_entity())) .await?; if is_ok_result(&response) { @@ -88,33 +83,34 @@ pub async fn insert_entity( - settings: &Arc, + flurl: FlUrl, entity: &TEntity, sync_period: &DataSynchronizationPeriod, ) -> Result<(), DataWriterError> { - let flurl = get_fl_url(settings).await; + let entity = entity.serialize_entity(); let response = flurl .append_path_segment(ROW_CONTROLLER) .append_path_segment("InsertOrReplace") .append_data_sync_period(sync_period) .with_table_name_as_query_param(TEntity::TABLE_NAME) - .post(entity.serialize_entity().into()) + .post(FlUrlBody::Json(entity)) .await?; if is_ok_result(&response) { return Ok(()); } - let reason = response.receive_body().await?; - let reason = String::from_utf8(reason)?; - return Err(DataWriterError::Error(reason)); + let body = response.receive_body().await?; + let body = String::from_utf8(body)?; + + return Err(DataWriterError::Error(body)); } pub async fn bulk_insert_or_replace< TEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send, >( - settings: &Arc, + flurl: FlUrl, entities: &[TEntity], sync_period: &DataSynchronizationPeriod, ) -> Result<(), DataWriterError> { @@ -122,8 +118,6 @@ pub async fn bulk_insert_or_replace< return Ok(()); } - let flurl = get_fl_url(settings).await; - let response = flurl .append_path_segment(BULK_CONTROLLER) .append_path_segment("InsertOrReplace") @@ -142,13 +136,11 @@ pub async fn bulk_insert_or_replace< } pub async fn get_entity( - settings: &Arc, + flurl: FlUrl, partition_key: &str, row_key: &str, update_read_statistics: Option<&UpdateReadStatistics>, ) -> Result, DataWriterError> { - let flurl = get_fl_url(settings).await; - let mut request = flurl .append_path_segment(ROW_CONTROLLER) .with_partition_key_as_query_param(partition_key) @@ -168,7 +160,7 @@ pub async fn get_entity( - settings: &Arc, + flurl: FlUrl, partition_key: &str, update_read_statistics: Option<&UpdateReadStatistics>, ) -> Result>, DataWriterError> { - let flurl = get_fl_url(settings).await; let mut request = flurl .append_path_segment(ROW_CONTROLLER) .with_partition_key_as_query_param(partition_key) @@ -217,11 +208,11 @@ pub async fn get_enum_case_models_by_partition_key< + Send + 'static, >( - settings: &Arc, + flurl: FlUrl, update_read_statistics: Option<&UpdateReadStatistics>, ) -> Result>, DataWriterError> { let result: Option> = - get_by_partition_key(settings, TResult::PARTITION_KEY, update_read_statistics).await?; + get_by_partition_key(flurl, TResult::PARTITION_KEY, update_read_statistics).await?; match result { Some(entities) => { @@ -246,11 +237,11 @@ pub async fn get_enum_case_model< + Send + 'static, >( - settings: &Arc, + flurl: FlUrl, update_read_statistics: Option<&UpdateReadStatistics>, ) -> Result, DataWriterError> { let entity: Option = get_entity( - settings, + flurl, TResult::PARTITION_KEY, TResult::ROW_KEY, update_read_statistics, @@ -264,12 +255,11 @@ pub async fn get_enum_case_model< } pub async fn get_by_row_key( - settings: &Arc, + flurl: FlUrl, row_key: &str, ) -> Result>, DataWriterError> { - let flurl = get_fl_url(settings).await; - let mut response = flurl + .append_path_segment(API_SEGMENT) .append_path_segment(ROW_CONTROLLER) .with_row_key_as_query_param(row_key) .with_table_name_as_query_param(TEntity::TABLE_NAME) @@ -290,6 +280,49 @@ pub async fn get_by_row_key, + limit: Option, +) -> Result, DataWriterError> { + #[derive(Serialize, Deserialize)] + pub struct GetPartitionsJsonResult { + pub amount: usize, + pub data: Vec, + } + let mut response = flurl + .append_path_segment(API_SEGMENT) + .append_path_segment(PARTITIONS_CONTROLLER) + .with_table_name_as_query_param(table_name) + .with_skip_as_query_param(skip) + .with_limit_as_query_param(limit) + .get() + .await?; + + if response.get_status_code() == 404 { + return Err(DataWriterError::TableNotFound(table_name.to_string())); + } + + check_error(&mut response).await?; + + if is_ok_result(&response) { + let result: Result = + serde_json::from_slice(response.get_body_as_slice().await?); + match result { + Ok(result) => return Ok(result.data), + Err(err) => { + return Err(DataWriterError::Error(format!( + "Failed to deserialize: {:?}", + err + ))) + } + } + } + + return Ok(vec![]); +} + pub async fn delete_enum_case< TEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send, TResult: MyNoSqlEntity @@ -299,10 +332,10 @@ pub async fn delete_enum_case< + Send + 'static, >( - settings: &Arc, + flurl: FlUrl, ) -> Result, DataWriterError> { let entity: Option = - delete_row(settings, TResult::PARTITION_KEY, TResult::ROW_KEY).await?; + delete_row(flurl, TResult::PARTITION_KEY, TResult::ROW_KEY).await?; match entity { Some(entity) => Ok(Some(entity.into())), @@ -319,10 +352,10 @@ pub async fn delete_enum_case_with_row_key< + Send + 'static, >( - settings: &Arc, + flurl: FlUrl, row_key: &str, ) -> Result, DataWriterError> { - let entity: Option = delete_row(settings, TResult::PARTITION_KEY, row_key).await?; + let entity: Option = delete_row(flurl, TResult::PARTITION_KEY, row_key).await?; match entity { Some(entity) => Ok(Some(entity.into())), @@ -331,12 +364,12 @@ pub async fn delete_enum_case_with_row_key< } pub async fn delete_row( - settings: &Arc, + flurl: FlUrl, partition_key: &str, row_key: &str, ) -> Result, DataWriterError> { - let flurl = get_fl_url(settings).await; let mut response = flurl + .append_path_segment(API_SEGMENT) .append_path_segment(ROW_CONTROLLER) .with_partition_key_as_query_param(partition_key) .with_row_key_as_query_param(row_key) @@ -351,7 +384,7 @@ pub async fn delete_row, + flurl: FlUrl, table_name: &str, partition_keys: &[&str], ) -> Result<(), DataWriterError> { - let flurl = get_fl_url(settings).await; let mut response = flurl .append_path_segment(ROWS_CONTROLLER) .with_table_name_as_query_param(table_name) @@ -381,9 +413,8 @@ pub async fn delete_partitions( } pub async fn get_all( - settings: &Arc, + flurl: FlUrl, ) -> Result>, DataWriterError> { - let flurl = get_fl_url(settings).await; let mut response = flurl .append_path_segment(ROW_CONTROLLER) .with_table_name_as_query_param(TEntity::TABLE_NAME) @@ -407,11 +438,10 @@ pub async fn get_all( - settings: &Arc, + flurl: FlUrl, entities: &[TEntity], sync_period: &DataSynchronizationPeriod, ) -> Result<(), DataWriterError> { - let flurl = get_fl_url(settings).await; let mut response = flurl .append_path_segment(BULK_CONTROLLER) .append_path_segment("CleanAndBulkInsert") @@ -428,12 +458,11 @@ pub async fn clean_table_and_bulk_insert< pub async fn clean_partition_and_bulk_insert< TEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send, >( - settings: &Arc, + flurl: FlUrl, partition_key: &str, entities: &[TEntity], sync_period: &DataSynchronizationPeriod, ) -> Result<(), DataWriterError> { - let flurl = get_fl_url(settings).await; let mut response = flurl .append_path_segment(BULK_CONTROLLER) .append_path_segment("CleanAndBulkInsert") @@ -452,16 +481,11 @@ fn is_ok_result(response: &FlUrlResponse) -> bool { response.get_status_code() >= 200 && response.get_status_code() < 300 } -async fn get_fl_url(settings: &Arc) -> FlUrl { - let url = settings.get_url().await; - FlUrl::new(url) -} - fn serialize_entities_to_body( entities: &[TEntity], -) -> Option> { +) -> FlUrlBody { if entities.len() == 0 { - return Some(vec![b'[', b']']); + FlUrlBody::Json(vec![b'[', b']']); } let mut json_array_writer = JsonArrayWriter::new(); @@ -469,10 +493,10 @@ fn serialize_entities_to_body( for entity in entities { let payload = entity.serialize_entity(); let payload: RawJsonObject = payload.into(); - json_array_writer.write(payload); + json_array_writer = json_array_writer.write(payload); } - Some(json_array_writer.build()) + FlUrlBody::Json(json_array_writer.build().into_bytes()) } async fn check_error(response: &mut FlUrlResponse) -> Result<(), DataWriterError> { @@ -485,7 +509,7 @@ async fn check_error(response: &mut FlUrlResponse) -> Result<(), DataWriterError if let Err(err) = &result { my_logger::LOGGER.write_error( - format!("FlUrlRequest to {}", response.url.as_str()), + format!("FlUrlRequest to {}", response.url.to_string()), format!("{:?}", err), None.into(), ); @@ -527,12 +551,50 @@ fn deserialize_entities( src: &[u8], ) -> Result, DataWriterError> { let mut result = Vec::new(); + + let json_array_iterator = JsonArrayIterator::new(src); + + if let Err(err) = &json_array_iterator { + panic!( + "Can not deserialize entities for table: {}. Err: {:?}", + TEntity::TABLE_NAME, + err + ); + } + + let json_array_iterator = json_array_iterator.unwrap(); + + while let Some(item) = json_array_iterator.get_next() { + let itm = item.unwrap(); + + match TEntity::deserialize_entity(itm.as_bytes()) { + Ok(entity) => { + result.push(entity); + } + Err(err) => { + println!( + "Table: '{}', Entity: {:?}", + TEntity::TABLE_NAME, + std::str::from_utf8(itm.as_bytes()) + ); + panic!("Can not deserialize entity: {}", err); + } + } + } + Ok(result) + + /* + let mut result = Vec::new(); + + + for itm in JsonArrayIterator::new(src) { let itm = itm.unwrap(); - result.push(TEntity::deserialize_entity(itm)); + result.push(TEntity::deserialize_entity(itm).unwrap()); } Ok(result) + */ } async fn create_table_errors_handler( @@ -557,7 +619,7 @@ async fn create_table_errors_handler( #[cfg(test)] mod tests { - use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; + use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer, Timestamp}; use serde::Serialize; use serde_derive::Deserialize; @@ -570,6 +632,7 @@ mod tests { impl MyNoSqlEntity for TestEntity { const TABLE_NAME: &'static str = "test"; + const LAZY_DESERIALIZATION: bool = false; fn get_partition_key(&self) -> &str { &self.partition_key @@ -579,8 +642,8 @@ mod tests { &self.row_key } - fn get_time_stamp(&self) -> i64 { - 0 + fn get_time_stamp(&self) -> Timestamp { + Timestamp::default() } } @@ -589,7 +652,7 @@ mod tests { my_no_sql_core::entity_serializer::serialize(self) } - fn deserialize_entity(src: &[u8]) -> Self { + fn deserialize_entity(src: &[u8]) -> Result { my_no_sql_core::entity_serializer::deserialize(src) } } @@ -615,8 +678,10 @@ mod tests { }, ]; - let as_json = super::serialize_entities_to_body(&entities).unwrap(); + let as_json = super::serialize_entities_to_body(&entities); + + let body = as_json.into_vec(); - println!("{}", std::str::from_utf8(&as_json).unwrap()); + println!("{}", std::str::from_utf8(&body).unwrap()); } } diff --git a/my-no-sql-data-writer/src/my_no_sql_data_writer/fl_url_ext.rs b/my-no-sql-data-writer/src/my_no_sql_data_writer/fl_url_ext.rs index fff9f34..d8528f4 100644 --- a/my-no-sql-data-writer/src/my_no_sql_data_writer/fl_url_ext.rs +++ b/my-no-sql-data-writer/src/my_no_sql_data_writer/fl_url_ext.rs @@ -3,14 +3,13 @@ use my_no_sql_abstractions::DataSynchronizationPeriod; pub trait FlUrlExt { fn with_table_name_as_query_param(self, table_name: &str) -> FlUrl; - fn append_data_sync_period(self, sync_period: &DataSynchronizationPeriod) -> FlUrl; - fn with_partition_key_as_query_param(self, partition_key: &str) -> FlUrl; fn with_partition_keys_as_query_param(self, partition_keys: &[&str]) -> FlUrl; fn with_row_key_as_query_param(self, partition_key: &str) -> FlUrl; - fn with_persist_as_query_param(self, persist: bool) -> FlUrl; + fn with_skip_as_query_param(self, skip: Option) -> FlUrl; + fn with_limit_as_query_param(self, limit: Option) -> FlUrl; } impl FlUrlExt for FlUrl { @@ -47,8 +46,20 @@ impl FlUrlExt for FlUrl { fn with_row_key_as_query_param(self, row_key: &str) -> FlUrl { self.append_query_param("rowKey", Some(row_key)) } - fn with_persist_as_query_param(self, persist: bool) -> FlUrl { - let value = if persist { "1" } else { "0" }; - self.append_query_param("persist", Some(value)) + + fn with_skip_as_query_param(self, skip: Option) -> FlUrl { + if let Some(skip) = skip { + self.append_query_param("skip", Some(skip.to_string())) + } else { + self + } + } + + fn with_limit_as_query_param(self, limit: Option) -> FlUrl { + if let Some(limit) = limit { + self.append_query_param("limit", Some(limit.to_string())) + } else { + self + } } } diff --git a/my-no-sql-data-writer/src/my_no_sql_data_writer/fl_url_factory.rs b/my-no-sql-data-writer/src/my_no_sql_data_writer/fl_url_factory.rs new file mode 100644 index 0000000..ab615b3 --- /dev/null +++ b/my-no-sql-data-writer/src/my_no_sql_data_writer/fl_url_factory.rs @@ -0,0 +1,83 @@ +use std::sync::Arc; + +use flurl::FlUrl; + +use rust_extensions::UnsafeValue; + +use super::{CreateTableParams, DataWriterError, MyNoSqlWriterSettings}; + +#[derive(Clone)] +pub struct FlUrlFactory { + settings: Arc, + auto_create_table_params: Option>, + + #[cfg(feature = "with-ssh")] + pub ssh_security_credentials_resolver: + Option>, + + create_table_is_called: Arc>, + table_name: &'static str, +} + +impl FlUrlFactory { + pub fn new( + settings: Arc, + auto_create_table_params: Option>, + table_name: &'static str, + ) -> Self { + Self { + auto_create_table_params, + + create_table_is_called: UnsafeValue::new(false).into(), + settings, + table_name, + + #[cfg(feature = "with-ssh")] + ssh_security_credentials_resolver: None, + } + } + + async fn create_fl_url(&self, url: &str) -> FlUrl { + let fl_url = flurl::FlUrl::new(url); + + #[cfg(feature = "with-ssh")] + if let Some(ssh_security_credentials_resolver) = &self.ssh_security_credentials_resolver { + return fl_url + .set_ssh_security_credentials_resolver(ssh_security_credentials_resolver.clone()); + } + + fl_url + } + + pub async fn get_fl_url(&self) -> Result<(FlUrl, String), DataWriterError> { + let url = self.settings.get_url().await; + if !self.create_table_is_called.get_value() { + if let Some(crate_table_params) = &self.auto_create_table_params { + self.create_table_if_not_exists(url.as_str(), crate_table_params) + .await?; + } + + self.create_table_is_called.set_value(true); + } + + let result = self.create_fl_url(url.as_str()).await; + + Ok((result, url)) + } + + pub async fn create_table_if_not_exists( + &self, + url: &str, + create_table_params: &CreateTableParams, + ) -> Result<(), DataWriterError> { + let fl_url = self.create_fl_url(url).await; + super::execution::create_table_if_not_exists( + fl_url, + url, + self.table_name, + create_table_params, + my_no_sql_abstractions::DataSynchronizationPeriod::Sec1, + ) + .await + } +} diff --git a/my-no-sql-data-writer/src/my_no_sql_data_writer/mod.rs b/my-no-sql-data-writer/src/my_no_sql_data_writer/mod.rs index fbff7eb..fcee8b0 100644 --- a/my-no-sql-data-writer/src/my_no_sql_data_writer/mod.rs +++ b/my-no-sql-data-writer/src/my_no_sql_data_writer/mod.rs @@ -10,3 +10,5 @@ mod execution; mod fl_url_ext; mod with_retries; pub use with_retries::*; +mod fl_url_factory; +pub use fl_url_factory::*; diff --git a/my-no-sql-data-writer/src/my_no_sql_data_writer/my_no_sql_data_writer.rs b/my-no-sql-data-writer/src/my_no_sql_data_writer/my_no_sql_data_writer.rs index 564c4ed..de2e743 100644 --- a/my-no-sql-data-writer/src/my_no_sql_data_writer/my_no_sql_data_writer.rs +++ b/my-no-sql-data-writer/src/my_no_sql_data_writer/my_no_sql_data_writer.rs @@ -1,13 +1,14 @@ -use std::{marker::PhantomData, sync::Arc, time::Duration}; +use std::{marker::PhantomData, sync::Arc}; use flurl::FlUrl; + use my_no_sql_abstractions::{DataSynchronizationPeriod, MyNoSqlEntity, MyNoSqlEntitySerializer}; use serde::{Deserialize, Serialize}; use crate::{MyNoSqlDataWriterWithRetries, MyNoSqlWriterSettings}; -use super::{DataWriterError, UpdateReadStatistics}; +use super::{fl_url_factory::FlUrlFactory, DataWriterError, UpdateReadStatistics}; pub struct CreateTableParams { pub persist: bool, @@ -40,9 +41,9 @@ impl CreateTableParams { } pub struct MyNoSqlDataWriter { - settings: Arc, sync_period: DataSynchronizationPeriod, phantom: PhantomData, + fl_url_factory: FlUrlFactory, } impl MyNoSqlDataWriter { @@ -51,25 +52,30 @@ impl MyNoSqlData auto_create_table_params: Option, sync_period: DataSynchronizationPeriod, ) -> Self { - if let Some(create_table_params) = auto_create_table_params { - tokio::spawn(super::execution::create_table_if_not_exists( - settings.clone(), - TEntity::TABLE_NAME, - create_table_params, - sync_period, - )); - } + let settings_cloned = settings.clone(); + tokio::spawn(async move { + crate::PING_POOL + .register(settings_cloned, TEntity::TABLE_NAME) + .await; + }); Self { - settings, phantom: PhantomData, sync_period, + fl_url_factory: FlUrlFactory::new( + settings, + auto_create_table_params.map(|itm| itm.into()), + TEntity::TABLE_NAME, + ), } } pub async fn create_table(&self, params: CreateTableParams) -> Result<(), DataWriterError> { + let (fl_url, url) = self.fl_url_factory.get_fl_url().await?; + super::execution::create_table( - &self.settings, + fl_url, + url.as_str(), TEntity::TABLE_NAME, params, &self.sync_period, @@ -77,12 +83,24 @@ impl MyNoSqlData .await } + #[cfg(feature = "with-ssh")] + pub fn set_ssh_security_credentials_resolver( + &mut self, + resolver: Arc< + dyn flurl::my_ssh::ssh_settings::SshSecurityCredentialsResolver + Send + Sync, + >, + ) { + self.fl_url_factory.ssh_security_credentials_resolver = Some(resolver); + } + pub async fn create_table_if_not_exists( &self, - params: CreateTableParams, + params: &CreateTableParams, ) -> Result<(), DataWriterError> { + let (fl_url, url) = self.fl_url_factory.get_fl_url().await?; super::execution::create_table_if_not_exists( - self.settings.clone(), + fl_url, + url.as_str(), TEntity::TABLE_NAME, params, self.sync_period, @@ -90,32 +108,30 @@ impl MyNoSqlData .await } - pub fn with_retries( - &self, - delay_between_attempts: Duration, - max_attempts: usize, - ) -> MyNoSqlDataWriterWithRetries { + pub fn with_retries(&self, max_attempts: usize) -> MyNoSqlDataWriterWithRetries { MyNoSqlDataWriterWithRetries::new( - self.settings.clone(), + self.fl_url_factory.clone(), self.sync_period, - delay_between_attempts, max_attempts, ) } pub async fn insert_entity(&self, entity: &TEntity) -> Result<(), DataWriterError> { - super::execution::insert_entity(&self.settings, entity, &self.sync_period).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::insert_entity(fl_url, entity, &self.sync_period).await } pub async fn insert_or_replace_entity(&self, entity: &TEntity) -> Result<(), DataWriterError> { - super::execution::insert_or_replace_entity(&self.settings, entity, &self.sync_period).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::insert_or_replace_entity(fl_url, entity, &self.sync_period).await } pub async fn bulk_insert_or_replace( &self, entities: &[TEntity], ) -> Result<(), DataWriterError> { - super::execution::bulk_insert_or_replace(&self.settings, entities, &self.sync_period).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::bulk_insert_or_replace(fl_url, entities, &self.sync_period).await } pub async fn get_entity( @@ -124,8 +140,9 @@ impl MyNoSqlData row_key: &str, update_read_statistics: Option, ) -> Result, DataWriterError> { + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; super::execution::get_entity( - &self.settings, + fl_url, partition_key, row_key, update_read_statistics.as_ref(), @@ -138,8 +155,9 @@ impl MyNoSqlData partition_key: &str, update_read_statistics: Option, ) -> Result>, DataWriterError> { + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; super::execution::get_by_partition_key( - &self.settings, + fl_url, partition_key, update_read_statistics.as_ref(), ) @@ -157,8 +175,9 @@ impl MyNoSqlData &self, update_read_statistics: Option, ) -> Result>, DataWriterError> { + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; super::execution::get_enum_case_models_by_partition_key( - &self.settings, + fl_url, update_read_statistics.as_ref(), ) .await @@ -175,14 +194,25 @@ impl MyNoSqlData &self, update_read_statistics: Option, ) -> Result, DataWriterError> { - super::execution::get_enum_case_model(&self.settings, update_read_statistics.as_ref()).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::get_enum_case_model(fl_url, update_read_statistics.as_ref()).await } pub async fn get_by_row_key( &self, row_key: &str, ) -> Result>, DataWriterError> { - super::execution::get_by_row_key(&self.settings, row_key).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::get_by_row_key(fl_url, row_key).await + } + + pub async fn get_partition_keys( + &self, + skip: Option, + limit: Option, + ) -> Result, DataWriterError> { + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::get_partition_keys(fl_url, TEntity::TABLE_NAME, skip, limit).await } pub async fn delete_enum_case< @@ -195,7 +225,8 @@ impl MyNoSqlData >( &self, ) -> Result, DataWriterError> { - super::execution::delete_enum_case(&self.settings).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::delete_enum_case(fl_url).await } pub async fn delete_enum_case_with_row_key< @@ -209,7 +240,8 @@ impl MyNoSqlData &self, row_key: &str, ) -> Result, DataWriterError> { - super::execution::delete_enum_case_with_row_key(&self.settings, row_key).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::delete_enum_case_with_row_key(fl_url, row_key).await } pub async fn delete_row( @@ -217,24 +249,26 @@ impl MyNoSqlData partition_key: &str, row_key: &str, ) -> Result, DataWriterError> { - super::execution::delete_row(&self.settings, partition_key, row_key).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::delete_row(fl_url, partition_key, row_key).await } pub async fn delete_partitions(&self, partition_keys: &[&str]) -> Result<(), DataWriterError> { - super::execution::delete_partitions(&self.settings, TEntity::TABLE_NAME, partition_keys) - .await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::delete_partitions(fl_url, TEntity::TABLE_NAME, partition_keys).await } pub async fn get_all(&self) -> Result>, DataWriterError> { - super::execution::get_all(&self.settings).await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::get_all(fl_url).await } pub async fn clean_table_and_bulk_insert( &self, entities: &[TEntity], ) -> Result<(), DataWriterError> { - super::execution::clean_table_and_bulk_insert(&self.settings, entities, &self.sync_period) - .await + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::clean_table_and_bulk_insert(fl_url, entities, &self.sync_period).await } pub async fn clean_partition_and_bulk_insert( @@ -242,8 +276,9 @@ impl MyNoSqlData partition_key: &str, entities: &[TEntity], ) -> Result<(), DataWriterError> { + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; super::execution::clean_partition_and_bulk_insert( - &self.settings, + fl_url, partition_key, entities, &self.sync_period, diff --git a/my-no-sql-data-writer/src/my_no_sql_data_writer/settings.rs b/my-no-sql-data-writer/src/my_no_sql_data_writer/settings.rs index 70d62ef..651b764 100644 --- a/my-no-sql-data-writer/src/my_no_sql_data_writer/settings.rs +++ b/my-no-sql-data-writer/src/my_no_sql_data_writer/settings.rs @@ -1,4 +1,6 @@ #[async_trait::async_trait] pub trait MyNoSqlWriterSettings { async fn get_url(&self) -> String; + fn get_app_name(&self) -> &'static str; + fn get_app_version(&self) -> &'static str; } diff --git a/my-no-sql-data-writer/src/my_no_sql_data_writer/with_retries.rs b/my-no-sql-data-writer/src/my_no_sql_data_writer/with_retries.rs index 3e83836..76b28b3 100644 --- a/my-no-sql-data-writer/src/my_no_sql_data_writer/with_retries.rs +++ b/my-no-sql-data-writer/src/my_no_sql_data_writer/with_retries.rs @@ -1,13 +1,14 @@ -use std::{marker::PhantomData, sync::Arc, time::Duration}; +use std::marker::PhantomData; use my_no_sql_abstractions::{DataSynchronizationPeriod, MyNoSqlEntity, MyNoSqlEntitySerializer}; -use crate::{DataWriterError, MyNoSqlWriterSettings, UpdateReadStatistics}; +use crate::{DataWriterError, UpdateReadStatistics}; + +use super::fl_url_factory::FlUrlFactory; pub struct MyNoSqlDataWriterWithRetries { - settings: Arc, + fl_url_factory: FlUrlFactory, sync_period: DataSynchronizationPeriod, - attempt_delay: Duration, phantom: PhantomData, max_attempts: usize, } @@ -16,84 +17,38 @@ impl MyNoSqlDataWriterWithRetries { pub fn new( - settings: Arc, + fl_url_factory: FlUrlFactory, sync_period: DataSynchronizationPeriod, - attempt_delay: Duration, max_attempts: usize, ) -> Self { Self { - settings, phantom: PhantomData, sync_period, - attempt_delay, + max_attempts, + fl_url_factory, } } pub async fn insert_entity(&self, entity: &TEntity) -> Result<(), DataWriterError> { - let mut attempt_no = 0; - loop { - let result = - super::execution::insert_entity(&self.settings, entity, &self.sync_period).await; - - if result.is_ok() { - return result; - } - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::insert_entity(fl_url, entity, &self.sync_period).await } pub async fn insert_or_replace_entity(&self, entity: &TEntity) -> Result<(), DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::insert_or_replace_entity( - &self.settings, - entity, - &self.sync_period, - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::insert_or_replace_entity(fl_url, entity, &self.sync_period).await } pub async fn bulk_insert_or_replace( &self, entities: &[TEntity], ) -> Result<(), DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::bulk_insert_or_replace( - &self.settings, - entities, - &self.sync_period, - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::bulk_insert_or_replace(fl_url, entities, &self.sync_period).await } pub async fn get_entity( @@ -102,25 +57,15 @@ impl row_key: &str, update_read_statistics: Option, ) -> Result, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::get_entity( - &self.settings, - partition_key, - row_key, - update_read_statistics.as_ref(), - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::get_entity( + fl_url, + partition_key, + row_key, + update_read_statistics.as_ref(), + ) + .await } pub async fn get_by_partition_key( @@ -128,24 +73,14 @@ impl partition_key: &str, update_read_statistics: Option, ) -> Result>, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::get_by_partition_key( - &self.settings, - partition_key, - update_read_statistics.as_ref(), - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::get_by_partition_key( + fl_url, + partition_key, + update_read_statistics.as_ref(), + ) + .await } pub async fn get_enum_case_models_by_partition_key< @@ -159,23 +94,13 @@ impl &self, update_read_statistics: Option, ) -> Result>, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::get_enum_case_models_by_partition_key( - &self.settings, - update_read_statistics.as_ref(), - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::get_enum_case_models_by_partition_key( + fl_url, + update_read_statistics.as_ref(), + ) + .await } pub async fn get_enum_case_model< @@ -189,42 +114,18 @@ impl &self, update_read_statistics: Option, ) -> Result, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::get_enum_case_model( - &self.settings, - update_read_statistics.as_ref(), - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::get_enum_case_model(fl_url, update_read_statistics.as_ref()).await } pub async fn get_by_row_key( &self, row_key: &str, ) -> Result>, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::get_by_row_key(&self.settings, row_key).await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::get_by_row_key(fl_url, row_key).await } pub async fn delete_enum_case< @@ -237,19 +138,9 @@ impl >( &self, ) -> Result, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::delete_enum_case(&self.settings).await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::delete_enum_case(fl_url).await } pub async fn delete_enum_case_with_row_key< @@ -263,20 +154,9 @@ impl &self, row_key: &str, ) -> Result, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = - super::execution::delete_enum_case_with_row_key(&self.settings, row_key).await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::delete_enum_case_with_row_key(fl_url, row_key).await } pub async fn delete_row( @@ -284,80 +164,30 @@ impl partition_key: &str, row_key: &str, ) -> Result, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::delete_row(&self.settings, partition_key, row_key).await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::delete_row(fl_url, partition_key, row_key).await } pub async fn delete_partitions(&self, partition_keys: &[&str]) -> Result<(), DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::delete_partitions( - &self.settings, - TEntity::TABLE_NAME, - partition_keys, - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::delete_partitions(fl_url, TEntity::TABLE_NAME, partition_keys).await } pub async fn get_all(&self) -> Result>, DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::get_all(&self.settings).await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::get_all(fl_url).await } pub async fn clean_table_and_bulk_insert( &self, entities: &[TEntity], ) -> Result<(), DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::clean_table_and_bulk_insert( - &self.settings, - entities, - &self.sync_period, - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::clean_table_and_bulk_insert(fl_url, entities, &self.sync_period).await } pub async fn clean_partition_and_bulk_insert( @@ -365,38 +195,23 @@ impl partition_key: &str, entities: &[TEntity], ) -> Result<(), DataWriterError> { - let mut attempt_no = 0; - loop { - let result = super::execution::clean_partition_and_bulk_insert( - &self.settings, - partition_key, - entities, - &self.sync_period, - ) - .await; - - match result { - Ok(result) => return Ok(result), - Err(err) => { - handle_retry_error(err, attempt_no, self.max_attempts, self.attempt_delay) - .await?; - attempt_no += 1 - } - } - } - } -} - -async fn handle_retry_error( - err: DataWriterError, - attempt_no: usize, - max_attempts: usize, - attempt_delay: Duration, -) -> Result<(), DataWriterError> { - if attempt_no < max_attempts { - tokio::time::sleep(attempt_delay).await; - return Ok(()); + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + let fl_url = fl_url.with_retries(self.max_attempts); + super::execution::clean_partition_and_bulk_insert( + fl_url, + partition_key, + entities, + &self.sync_period, + ) + .await + } + + pub async fn get_partition_keys( + &self, + skip: Option, + limit: Option, + ) -> Result, DataWriterError> { + let (fl_url, _) = self.fl_url_factory.get_fl_url().await?; + super::execution::get_partition_keys(fl_url, TEntity::TABLE_NAME, skip, limit).await } - - Err(err) } diff --git a/my-no-sql-data-writer/src/ping_pool.rs b/my-no-sql-data-writer/src/ping_pool.rs new file mode 100644 index 0000000..a5ca77a --- /dev/null +++ b/my-no-sql-data-writer/src/ping_pool.rs @@ -0,0 +1,132 @@ +use std::{collections::HashMap, sync::Arc, time::Duration}; + +use flurl::body::FlUrlBody; +use serde::{Deserialize, Serialize}; +use tokio::sync::Mutex; + +use crate::{FlUrlFactory, MyNoSqlWriterSettings}; + +pub struct PingDataItem { + pub name: &'static str, + pub version: &'static str, + + pub table_settings: Vec<( + String, + Arc, + )>, +} + +pub struct PingPoolInner { + items: Vec, + started: bool, +} + +impl PingPoolInner { + pub fn new() -> Self { + Self { + items: Vec::new(), + started: false, + } + } +} + +pub struct PingPool { + data: Mutex, +} + +impl PingPool { + pub fn new() -> Self { + Self { + data: Mutex::new(PingPoolInner::new()), + } + } + + pub async fn register( + &self, + + settings: Arc, + table: &str, + ) { + let mut data = self.data.lock().await; + if !data.started { + tokio::spawn(async move { ping_loop().await }); + data.started = true; + } + + let index = data.items.iter().position(|x| { + x.name == settings.get_app_name() && x.version == settings.get_app_version() + }); + + if let Some(index) = index { + let item = &mut data.items[index]; + item.table_settings.push((table.to_string(), settings)); + } else { + let item = PingDataItem { + name: settings.get_app_name(), + version: settings.get_app_version(), + + table_settings: vec![((table.to_string(), settings))], + }; + + data.items.push(item); + } + } +} + +async fn ping_loop() { + let delay = Duration::from_secs(30); + loop { + tokio::time::sleep(delay).await; + + let access = crate::PING_POOL.data.lock().await; + + for itm in access.items.iter() { + let mut url_to_ping = HashMap::new(); + for (table, settings) in itm.table_settings.iter() { + let url = settings.get_url().await; + let entry = url_to_ping + .entry(url) + .or_insert_with(|| (settings.clone(), Vec::new())); + entry.1.push(table.to_string()); + } + + for (_, (settings, tables)) in url_to_ping { + let factory = FlUrlFactory::new(settings, None, ""); + + let ping_model = PingModel { + name: itm.name.to_string(), + version: itm.version.to_string(), + tables, + }; + + let fl_url = factory.get_fl_url().await; + + if let Err(err) = &fl_url { + println!("{}:{} ping error: {:?}", itm.name, itm.version, err); + continue; + } + + let fl_url_response = fl_url + .unwrap() + .0 + .with_retries(3) + .append_path_segment("api") + .append_path_segment("ping") + .post(FlUrlBody::as_json(&ping_model)) + .await; + + if let Err(err) = &fl_url_response { + println!("{}:{} ping error: {:?}", itm.name, itm.version, err); + continue; + } + } + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PingModel { + pub name: String, + pub version: String, + pub tables: Vec, +} diff --git a/my-no-sql-macros/Cargo.toml b/my-no-sql-macros/Cargo.toml index 767db82..dff5d7b 100644 --- a/my-no-sql-macros/Cargo.toml +++ b/my-no-sql-macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "my-no-sql-macros" -version = "0.3.0" +version = "0.4.1" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -9,7 +9,7 @@ edition = "2021" proc-macro = true [dependencies] -types-reader = { tag = "0.5.0", git = "https://github.com/MyJetTools/types-reader.git" } +types-reader = { tag = "0.5.1", git = "https://github.com/MyJetTools/types-reader.git" } syn = { version = "*", features = ["extra-traits"] } diff --git a/my-no-sql-macros/src/entity_utils.rs b/my-no-sql-macros/src/entity_utils.rs index afa6437..c51300c 100644 --- a/my-no-sql-macros/src/entity_utils.rs +++ b/my-no-sql-macros/src/entity_utils.rs @@ -1,11 +1,15 @@ -use quote::ToTokens; +use std::collections::HashSet; -pub fn compile_struct_with_new_fields( +use quote::{quote, ToTokens}; +use syn::Ident; +use types_reader::StructProperty; + +pub fn compile_src_with_new_fields( ast: &proc_macro2::TokenStream, add_pk: bool, add_rk: bool, add_timestamp: bool, -) -> (syn::Ident, proc_macro2::TokenStream) { +) -> (Ident, proc_macro2::TokenStream) { let mut result: Vec = Vec::new(); let mut struct_name = None; let mut passed_struct_name = false; @@ -54,6 +58,94 @@ pub fn compile_struct_with_new_fields( (struct_name, quote::quote!(#(#result)*)) } +pub fn extract_attributes(ast: &proc_macro2::TokenStream) -> proc_macro2::TokenStream { + let mut derive_result = Vec::new(); + + for item in ast.into_token_stream() { + if let proc_macro2::TokenTree::Ident(item) = &item { + let to_str = item.to_string(); + if to_str == "pub" || to_str == "struct" { + break; + } + } + + derive_result.push(item); + } + + quote! {#(#derive_result)*} +} + +pub fn compile_struct_with_new_fields( + struct_name: &Ident, + derive: proc_macro2::TokenStream, + fields: &[StructProperty], + render_expires: bool, +) -> Result { + let mut structure_fields = Vec::new(); + + let mut serde_fields = HashSet::new(); + serde_fields.insert("PartitionKey"); + serde_fields.insert("RowKey"); + serde_fields.insert("TimeStamp"); + + for field in fields { + if field.name == "expires" { + if !field.ty.as_str().as_str().ends_with("Timestamp") { + return field.throw_error("Field must be a Timestamp"); + } + } + + if let Some(rename_attr) = field.attrs.try_get_attr("serde") { + let param_rename = rename_attr.try_get_named_param("rename"); + if let Some(param_rename) = param_rename { + let param_rename = param_rename.unwrap_any_value_as_str()?; + let param_rename = param_rename.as_str()?; + + if serde_fields.contains(param_rename) { + return field.throw_error("Field with the same Serde name exists"); + } + + serde_fields.insert(param_rename); + } + } + + if serde_fields.contains(field.name.as_str()) { + return field.throw_error("Field with the same Serde name exists"); + } + + serde_fields.insert(field.name.as_str()); + + let field = field.field; + + structure_fields.push(quote::quote! {#field,}); + } + + if render_expires { + structure_fields.push(quote::quote! { + #[serde(rename="Expires")] + pub expires: my_no_sql_sdk::abstractions::Timestamp, + }); + } + + // #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] + let result = quote! { + + #derive + pub struct #struct_name{ + #[serde(rename="PartitionKey")] + pub partition_key: String, + #[serde(rename="RowKey")] + pub row_key: String, + #[serde(rename="TimeStamp")] + #[serde(skip_serializing_if = "my_no_sql_sdk::abstractions::skip_timestamp_serializing")] + pub time_stamp: my_no_sql_sdk::abstractions::Timestamp, + #(#structure_fields)* + } + }; + + Ok(result) +} + fn populate_tokens( result_tokens: &mut Vec, add_pk: bool, @@ -89,16 +181,14 @@ pub fn get_row_key_token() -> proc_macro2::TokenStream { pub fn get_time_stamp_token() -> proc_macro2::TokenStream { quote::quote! { #[serde(rename = "TimeStamp")] - pub time_stamp: String, + pub time_stamp: my_no_sql_sdk::abstractions::Timestamp, } } pub fn get_fn_get_time_stamp_token() -> proc_macro2::TokenStream { quote::quote! { - fn get_time_stamp(&self) -> i64 { - my_no_sql_sdk::core::rust_extensions::date_time::DateTimeAsMicroseconds::parse_iso_string(self.time_stamp.as_str()) - .unwrap() - .unix_microseconds + fn get_time_stamp(&self) -> my_no_sql_sdk::abstractions::Timestamp { + self.time_stamp } } } @@ -109,8 +199,9 @@ pub fn get_fn_standard_serialize_deserialize() -> proc_macro2::TokenStream { my_no_sql_sdk::core::entity_serializer::serialize(self) } - fn deserialize_entity(src: &[u8]) -> Self { - my_no_sql_sdk::core::entity_serializer::deserialize(src) + + fn deserialize_entity(src: &[u8]) -> Result { + my_no_sql_sdk::core::entity_serializer::deserialize(src) } } } diff --git a/my-no-sql-macros/src/enum_model/generate.rs b/my-no-sql-macros/src/enum_model/generate.rs index 2a370aa..d73e8a7 100644 --- a/my-no-sql-macros/src/enum_model/generate.rs +++ b/my-no-sql-macros/src/enum_model/generate.rs @@ -14,7 +14,7 @@ pub fn generate( let row_key = parameters.row_key; let (struct_name, new_struct) = - compile_struct_with_new_fields(&input, false, row_key.is_none(), true); + compile_src_with_new_fields(&input, false, row_key.is_none(), true); let fn_get_time_stamp = get_fn_get_time_stamp_token(); @@ -50,6 +50,8 @@ pub fn generate( impl my_no_sql_sdk::abstractions::MyNoSqlEntity for #struct_name { const TABLE_NAME: &'static str = ""; + const LAZY_DESERIALIZATION: bool = true; + fn get_partition_key(&self) -> &str { Self::PARTITION_KEY diff --git a/my-no-sql-macros/src/enum_of_my_no_sql_entity/generate.rs b/my-no-sql-macros/src/enum_of_my_no_sql_entity/generate.rs index d730fa1..ea4077a 100644 --- a/my-no-sql-macros/src/enum_of_my_no_sql_entity/generate.rs +++ b/my-no-sql-macros/src/enum_of_my_no_sql_entity/generate.rs @@ -50,6 +50,7 @@ pub fn generate( const TABLE_NAME: &'static str = #table_name; + const LAZY_DESERIALIZATION: bool = true; fn get_partition_key(&self) -> &str { use my_no_sql_sdk::abstractions::MyNoSqlEntity; @@ -65,7 +66,7 @@ pub fn generate( } } - fn get_time_stamp(&self) -> i64 { + fn get_time_stamp(&self) -> my_no_sql_sdk::abstractions::Timestamp { use my_no_sql_sdk::abstractions::MyNoSqlEntity; match self { #time_stamps @@ -86,7 +87,7 @@ pub fn generate( my_no_sql_sdk::core::entity_serializer::inject_partition_key_and_row_key(result, self.get_partition_key(), row_key) } - fn deserialize_entity(src: &[u8]) -> Self { + fn deserialize_entity(src: &[u8]) -> Result { #deserialize_cases } } @@ -162,7 +163,7 @@ fn get_deserialize_cases(enum_cases: &[EnumCase]) -> Result Result Result { #[default] pub table_name: &'s str, + pub with_expires: Option, } #[proc_macro_attribute] pub fn my_no_sql_entity(attr: TokenStream, input: TokenStream) -> TokenStream { - match crate::my_no_sql_entity::generate(attr.into(), input.into()) { + match crate::my_no_sql_entity::generate(attr.into(), input) { Ok(result) => result.into(), Err(err) => err.into_compile_error().into(), } diff --git a/my-no-sql-macros/src/my_no_sql_entity/generate.rs b/my-no-sql-macros/src/my_no_sql_entity/generate.rs index 3a19e14..ebea872 100644 --- a/my-no-sql-macros/src/my_no_sql_entity/generate.rs +++ b/my-no-sql-macros/src/my_no_sql_entity/generate.rs @@ -1,21 +1,36 @@ extern crate proc_macro; use proc_macro::TokenStream; -use types_reader::TokensObject; +use syn::DeriveInput; +use types_reader::{StructProperty, TokensObject}; use crate::MyNoSqlEntityParameters; pub fn generate( attr: proc_macro2::TokenStream, - input: proc_macro2::TokenStream, + input: proc_macro::TokenStream, ) -> Result { - let ast = proc_macro2::TokenStream::from(input); + let input_token_stream: proc_macro2::TokenStream = input.clone().into(); + + let derive = crate::entity_utils::extract_attributes(&input_token_stream); + + let input: DeriveInput = syn::parse(input).unwrap(); + + let struct_name = &input.ident; + + let fields = StructProperty::read(&input)?; let attr: TokensObject = attr.try_into()?; let params = MyNoSqlEntityParameters::try_from(&attr)?; - let result = super::generate_base_impl(&ast, params.table_name)?; + let result = super::generate_base_impl( + struct_name, + derive, + fields.as_slice(), + params.table_name, + params.with_expires.unwrap_or(false), + )?; Ok(result.into()) } diff --git a/my-no-sql-macros/src/my_no_sql_entity/generate_base_impl.rs b/my-no-sql-macros/src/my_no_sql_entity/generate_base_impl.rs index 7a8ba96..d5be979 100644 --- a/my-no-sql-macros/src/my_no_sql_entity/generate_base_impl.rs +++ b/my-no-sql-macros/src/my_no_sql_entity/generate_base_impl.rs @@ -1,10 +1,16 @@ +use syn::Ident; +use types_reader::StructProperty; + use crate::entity_utils::*; pub fn generate_base_impl( - ast: &proc_macro2::TokenStream, + struct_name: &Ident, + derive: proc_macro2::TokenStream, + fields: &[StructProperty], table_name: &str, + render_expires: bool, ) -> Result { - let (struct_name, new_struct) = compile_struct_with_new_fields(ast, true, true, true); + let new_struct = compile_struct_with_new_fields(struct_name, derive, fields, render_expires)?; let fn_get_time_stamp = get_fn_get_time_stamp_token(); @@ -18,6 +24,8 @@ pub fn generate_base_impl( const TABLE_NAME: &'static str = #table_name; + const LAZY_DESERIALIZATION: bool = false; + fn get_partition_key(&self) -> &str { &self.partition_key } diff --git a/my-no-sql-sdk/Cargo.toml b/my-no-sql-sdk/Cargo.toml index eee19e0..dc1b8f0 100644 --- a/my-no-sql-sdk/Cargo.toml +++ b/my-no-sql-sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "my-no-sql-sdk" -version = "0.3.0" +version = "0.4.1" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -11,18 +11,27 @@ data-writer = ["dep:my-no-sql-data-writer"] macros = ["dep:my-no-sql-macros", "dep:rust-extensions"] data-reader = ["dep:my-no-sql-tcp-reader"] tcp-contracts = ["dep:my-no-sql-tcp-shared"] -master-node = ["my-no-sql-core/master-node"] +master-node = [ + "my-no-sql-core/master-node", + "my-no-sql-server-core/master-node", +] + +read-node = ["my-no-sql-server-core"] + debug_db_row = ["my-no-sql-core/debug_db_row"] +with-ssh = ["my-no-sql-data-writer?/with-ssh"] + [dependencies] my-no-sql-abstractions = { path = "../my-no-sql-abstractions" } my-no-sql-core = { path = "../my-no-sql-core" } +my-no-sql-server-core = { path = "../my-no-sql-server-core", optional = true } my-no-sql-data-writer = { optional = true, path = "../my-no-sql-data-writer" } my-no-sql-macros = { optional = true, path = "../my-no-sql-macros" } my-no-sql-tcp-reader = { optional = true, path = "../my-no-sql-tcp-reader" } my-no-sql-tcp-shared = { optional = true, path = "../my-no-sql-tcp-shared" } -rust-extensions = { optional = true, tag = "0.1.4", git = "https://github.com/MyJetTools/rust-extensions.git" } +rust-extensions = { optional = true, tag = "0.1.5", git = "https://github.com/MyJetTools/rust-extensions.git" } # my_no_sql::macros::my_no_sql_entity diff --git a/my-no-sql-sdk/src/lib.rs b/my-no-sql-sdk/src/lib.rs index e77b4a7..f7ec51b 100644 --- a/my-no-sql-sdk/src/lib.rs +++ b/my-no-sql-sdk/src/lib.rs @@ -15,3 +15,6 @@ pub extern crate my_no_sql_tcp_reader as reader; #[cfg(feature = "tcp-contracts")] pub extern crate my_no_sql_tcp_shared as tcp_contracts; + +#[cfg(any(feature = "master-node", feature = "read-node"))] +pub extern crate my_no_sql_server_core as server; diff --git a/my-no-sql-server-core/Cargo.toml b/my-no-sql-server-core/Cargo.toml new file mode 100644 index 0000000..18df88f --- /dev/null +++ b/my-no-sql-server-core/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "my-no-sql-server-core" +version = "0.4.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +default = [] +master-node = ["my-no-sql-core/master-node"] + +[dependencies] +rust-extensions = { tag = "0.1.5", git = "https://github.com/MyJetTools/rust-extensions.git", features = [ + "with-tokio", + "base64", + +] } +tokio = { version = "*", features = ["full"] } +my-no-sql-core = { path = "../my-no-sql-core" } diff --git a/my-no-sql-server-core/src/db_instance.rs b/my-no-sql-server-core/src/db_instance.rs new file mode 100644 index 0000000..f3298a5 --- /dev/null +++ b/my-no-sql-server-core/src/db_instance.rs @@ -0,0 +1,50 @@ +use my_no_sql_core::db::DbTableName; +use tokio::sync::RwLock; + +use std::{collections::HashMap, sync::Arc}; + +use super::DbTable; + +pub struct DbInstance { + pub tables: RwLock>>, +} + +impl DbInstance { + pub fn new() -> DbInstance { + DbInstance { + tables: RwLock::new(HashMap::new()), + } + } + + pub async fn get_table_names(&self) -> Vec { + let read_access = self.tables.read().await; + + return read_access + .values() + .into_iter() + .map(|table| table.name.clone()) + .collect(); + } + + pub async fn get_tables(&self) -> Vec> { + let read_access = self.tables.read().await; + + return read_access + .values() + .into_iter() + .map(|table| table.clone()) + .collect(); + } + + pub async fn get_table(&self, table_name: &str) -> Option> { + let read_access = self.tables.read().await; + + let result = read_access.get(table_name)?; + return Some(result.clone()); + } + + pub async fn delete_table(&self, table_name: &str) -> Option> { + let mut write_access = self.tables.write().await; + write_access.remove(table_name) + } +} diff --git a/my-no-sql-server-core/src/db_snapshots/db_partition_snapshot.rs b/my-no-sql-server-core/src/db_snapshots/db_partition_snapshot.rs new file mode 100644 index 0000000..d2a6e88 --- /dev/null +++ b/my-no-sql-server-core/src/db_snapshots/db_partition_snapshot.rs @@ -0,0 +1,41 @@ +use my_no_sql_core::db::{DbPartition, PartitionKey}; + +use super::DbRowsSnapshot; + +pub struct DbPartitionSnapshot { + #[cfg(feature = "master-node")] + pub last_read_moment: rust_extensions::date_time::DateTimeAsMicroseconds, + #[cfg(feature = "master-node")] + pub last_write_moment: rust_extensions::date_time::DateTimeAsMicroseconds, + pub partition_key: PartitionKey, + pub db_rows_snapshot: DbRowsSnapshot, +} + +#[cfg(feature = "master-node")] +impl DbPartitionSnapshot { + pub fn has_to_persist( + &self, + written_in_blob: rust_extensions::date_time::DateTimeAsMicroseconds, + ) -> bool { + written_in_blob.unix_microseconds < self.last_write_moment.unix_microseconds + } +} + +impl Into for &DbPartition { + fn into(self) -> DbRowsSnapshot { + DbRowsSnapshot::new_from_snapshot(self.rows.get_all().map(|itm| itm.clone()).collect()) + } +} + +impl Into for &DbPartition { + fn into(self) -> DbPartitionSnapshot { + DbPartitionSnapshot { + #[cfg(feature = "master-node")] + last_read_moment: self.last_read_moment.as_date_time(), + #[cfg(feature = "master-node")] + last_write_moment: self.last_write_moment, + partition_key: self.partition_key.clone(), + db_rows_snapshot: self.into(), + } + } +} diff --git a/my-no-sql-server-core/src/db_snapshots/db_rows_by_partitions_snapshot.rs b/my-no-sql-server-core/src/db_snapshots/db_rows_by_partitions_snapshot.rs new file mode 100644 index 0000000..89d8539 --- /dev/null +++ b/my-no-sql-server-core/src/db_snapshots/db_rows_by_partitions_snapshot.rs @@ -0,0 +1,61 @@ +use std::sync::Arc; + +use my_no_sql_core::db::{DbRow, PartitionKey, PartitionKeyParameter}; +use my_no_sql_core::my_json::json_writer::JsonArrayWriter; + +pub struct DbRowsByPartitionsSnapshot { + pub partitions: Vec<(PartitionKey, Vec>)>, +} + +impl DbRowsByPartitionsSnapshot { + pub fn new() -> Self { + Self { + partitions: Vec::new(), + } + } + + pub fn has_elements(&self) -> bool { + self.partitions.len() > 0 + } + + fn get_or_create_partition( + &mut self, + partition_key: impl PartitionKeyParameter, + ) -> &mut Vec> { + let index = self + .partitions + .binary_search_by(|itm| itm.0.as_str().cmp(partition_key.as_str())); + + match index { + Ok(index) => self.partitions.get_mut(index).unwrap().1.as_mut(), + Err(index) => { + self.partitions + .insert(index, (partition_key.to_partition_key(), Vec::new())); + self.partitions.get_mut(index).unwrap().1.as_mut() + } + } + } + + pub fn add_row(&mut self, partition_key: impl PartitionKeyParameter, db_row: Arc) { + self.get_or_create_partition(partition_key).push(db_row); + } + + pub fn add_rows( + &mut self, + partition_key: impl PartitionKeyParameter, + db_rows: Vec>, + ) { + self.get_or_create_partition(partition_key).extend(db_rows); + } + + pub fn as_json_array(&self) -> JsonArrayWriter { + let mut json_array_writer = JsonArrayWriter::new(); + for (_, snapshot) in self.partitions.iter() { + for db_row in snapshot { + json_array_writer = json_array_writer.write(db_row.as_ref()); + } + } + + json_array_writer + } +} diff --git a/my-no-sql-server-core/src/db_snapshots/db_rows_snapshot.rs b/my-no-sql-server-core/src/db_snapshots/db_rows_snapshot.rs new file mode 100644 index 0000000..28288e5 --- /dev/null +++ b/my-no-sql-server-core/src/db_snapshots/db_rows_snapshot.rs @@ -0,0 +1,43 @@ +use std::sync::Arc; + +use my_no_sql_core::db::DbRow; +use my_no_sql_core::my_json::json_writer::JsonArrayWriter; + +pub struct DbRowsSnapshot { + pub db_rows: Vec>, +} + +impl DbRowsSnapshot { + pub fn new() -> Self { + Self { + db_rows: Vec::new(), + } + } + + pub fn new_from_snapshot(db_rows: Vec>) -> Self { + Self { db_rows } + } + + pub fn with_capacity(capacity: usize) -> Self { + Self { + db_rows: Vec::with_capacity(capacity), + } + } + + pub fn push(&mut self, db_row: Arc) { + self.db_rows.push(db_row); + } + + pub fn len(&self) -> usize { + self.db_rows.len() + } + + pub fn as_json_array(&self) -> JsonArrayWriter { + let mut json_array_writer = JsonArrayWriter::new(); + for db_row in &self.db_rows { + json_array_writer = json_array_writer.write(db_row.as_ref()); + } + + json_array_writer + } +} diff --git a/my-no-sql-server-core/src/db_snapshots/db_table_snapshot.rs b/my-no-sql-server-core/src/db_snapshots/db_table_snapshot.rs new file mode 100644 index 0000000..6267700 --- /dev/null +++ b/my-no-sql-server-core/src/db_snapshots/db_table_snapshot.rs @@ -0,0 +1,47 @@ +use my_no_sql_core::db::DbTableInner; +use my_no_sql_core::my_json::json_writer::JsonArrayWriter; +#[cfg(feature = "master-node")] +use rust_extensions::date_time::DateTimeAsMicroseconds; + +use super::DbPartitionSnapshot; + +pub struct DbTableSnapshot { + #[cfg(feature = "master-node")] + pub attr: my_no_sql_core::db::DbTableAttributes, + #[cfg(feature = "master-node")] + pub last_write_moment: DateTimeAsMicroseconds, + pub by_partition: Vec, +} + +impl DbTableSnapshot { + pub fn new( + #[cfg(feature = "master-node")] last_write_moment: DateTimeAsMicroseconds, + db_table: &DbTableInner, + ) -> Self { + let mut by_partition = Vec::new(); + + for db_partition in db_table.partitions.get_partitions() { + by_partition.push(db_partition.into()); + } + + Self { + #[cfg(feature = "master-node")] + attr: db_table.attributes.clone(), + #[cfg(feature = "master-node")] + last_write_moment, + by_partition, + } + } + + pub fn as_json_array(&self) -> JsonArrayWriter { + let mut json_array_writer = JsonArrayWriter::new(); + + for db_partition_snapshot in self.by_partition.iter() { + for db_row in &db_partition_snapshot.db_rows_snapshot.db_rows { + json_array_writer = json_array_writer.write(db_row.as_ref()); + } + } + + json_array_writer + } +} diff --git a/my-no-sql-server-core/src/db_snapshots/mod.rs b/my-no-sql-server-core/src/db_snapshots/mod.rs new file mode 100644 index 0000000..fdcc05a --- /dev/null +++ b/my-no-sql-server-core/src/db_snapshots/mod.rs @@ -0,0 +1,10 @@ +mod db_partition_snapshot; +mod db_rows_by_partitions_snapshot; +mod db_rows_snapshot; + +mod db_table_snapshot; +pub use db_partition_snapshot::DbPartitionSnapshot; +pub use db_rows_by_partitions_snapshot::DbRowsByPartitionsSnapshot; +pub use db_rows_snapshot::DbRowsSnapshot; + +pub use db_table_snapshot::DbTableSnapshot; diff --git a/my-no-sql-server-core/src/db_table.rs b/my-no-sql-server-core/src/db_table.rs new file mode 100644 index 0000000..e487f7e --- /dev/null +++ b/my-no-sql-server-core/src/db_table.rs @@ -0,0 +1,98 @@ +use std::{collections::VecDeque, sync::Arc}; + +use crate::db_snapshots::{DbPartitionSnapshot, DbTableSnapshot}; +use my_no_sql_core::db::*; +use my_no_sql_core::my_json::json_writer::JsonArrayWriter; +use tokio::sync::RwLock; + +#[cfg(feature = "master-node")] +use my_no_sql_core::db::DbTableAttributes; + +pub struct DbTable { + pub name: DbTableName, + pub data: RwLock, +} + +impl DbTable { + pub fn new(db_table: DbTableInner) -> Arc { + let result = Self { + name: db_table.name.clone(), + data: RwLock::new(db_table), + }; + + Arc::new(result) + } + + pub async fn get_table_as_json_array(&self) -> JsonArrayWriter { + let read_access = self.data.read().await; + read_access.get_table_as_json_array() + } + + pub async fn get_all_as_vec_dequeue(&self) -> VecDeque> { + let read_access = self.data.read().await; + + let mut result = VecDeque::new(); + + for (_, db_row) in read_access.get_all_rows(None, None) { + result.push_back(db_row.clone()); + } + + result + } + + pub async fn get_table_snapshot(&self) -> DbTableSnapshot { + let read_access = self.data.read().await; + + DbTableSnapshot { + #[cfg(feature = "master-node")] + last_write_moment: read_access.get_last_write_moment(), + by_partition: get_partitions_snapshot(&read_access), + #[cfg(feature = "master-node")] + attr: read_access.attributes.clone(), + } + } + + pub async fn get_partitions_amount(&self) -> usize { + let read_access = self.data.read().await; + read_access.partitions.len() + } + #[cfg(feature = "master-node")] + pub async fn get_persist_table(&self) -> bool { + let read_access = self.data.read().await; + read_access.attributes.persist + } + + pub async fn get_table_size(&self) -> usize { + let read_access = self.data.read().await; + read_access.get_table_size() + } + + #[cfg(feature = "master-node")] + pub async fn get_max_partitions_amount(&self) -> Option { + let read_access = self.data.read().await; + read_access.attributes.max_partitions_amount + } + + #[cfg(feature = "master-node")] + pub async fn get_attributes(&self) -> DbTableAttributes { + let read_access = self.data.read().await; + read_access.attributes.clone() + } + + #[cfg(feature = "master-node")] + pub async fn get_partition_snapshot(&self, partition_key: &str) -> Option { + let read_access = self.data.read().await; + let db_partition = read_access.get_partition(partition_key)?; + Some(db_partition.into()) + } +} + +fn get_partitions_snapshot(db_table: &DbTableInner) -> Vec { + let mut result = Vec::with_capacity(db_table.partitions.len()); + + for db_partition in db_table.partitions.get_partitions() { + result.push(db_partition.into()); + } + + result +} diff --git a/my-no-sql-server-core/src/lib.rs b/my-no-sql-server-core/src/lib.rs new file mode 100644 index 0000000..20a19fd --- /dev/null +++ b/my-no-sql-server-core/src/lib.rs @@ -0,0 +1,7 @@ +mod db_instance; +mod db_table; +pub use db_instance::*; +pub use db_table::*; + +pub mod db_snapshots; +pub extern crate rust_extensions; diff --git a/my-no-sql-tcp-reader/Cargo.toml b/my-no-sql-tcp-reader/Cargo.toml index 4103860..504a71d 100644 --- a/my-no-sql-tcp-reader/Cargo.toml +++ b/my-no-sql-tcp-reader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "my-no-sql-tcp-reader" -version = "0.3.0" +version = "0.4.1" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -13,14 +13,14 @@ my-no-sql-tcp-shared = { path = "../my-no-sql-tcp-shared" } my-no-sql-abstractions = { path = "../my-no-sql-abstractions" } my-no-sql-core = { path = "../my-no-sql-core" } -rust-extensions = { tag = "0.1.4", git = "https://github.com/MyJetTools/rust-extensions.git" } -my-tcp-sockets = { tag = "0.1.9", git = "https://github.com/MyJetTools/my-tcp-sockets.git" } -my-logger = { tag = "1.1.0", git = "https://github.com/MyJetTools/my-logger.git" } -my-json = { tag = "0.2.2", git = "https://github.com/MyJetTools/my-json.git" } +rust-extensions = { tag = "0.1.5", git = "https://github.com/MyJetTools/rust-extensions.git" } +my-tcp-sockets = { tag = "0.1.12", git = "https://github.com/MyJetTools/my-tcp-sockets.git" } +my-logger = { tag = "1.2.1", git = "https://github.com/MyJetTools/my-logger.git" } +my-json = { tag = "0.3.2", git = "https://github.com/MyJetTools/my-json.git" } tokio = { version = "*", features = ["full"] } tokio-util = "*" -async-trait = "0.1.72" +async-trait = "*" serde = { version = "*", features = ["derive"] } serde_json = "*" serde_derive = "*" diff --git a/my-no-sql-tcp-reader/src/data_reader_entities_set.rs b/my-no-sql-tcp-reader/src/data_reader_entities_set.rs new file mode 100644 index 0000000..e3c880f --- /dev/null +++ b/my-no-sql-tcp-reader/src/data_reader_entities_set.rs @@ -0,0 +1,213 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; + +use crate::subscribers::{LazyMyNoSqlEntity, MyNoSqlDataReaderCallBacksPusher}; + +pub struct DataReaderEntitiesSet< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, +> { + entities: Option>>>, + table_name: &'static str, +} + +impl + DataReaderEntitiesSet +{ + pub fn new(table_name: &'static str) -> Self { + Self { + entities: None, + table_name, + } + } + + pub fn is_initialized(&self) -> bool { + self.entities.is_some() + } + + pub fn as_ref( + &self, + ) -> Option<&BTreeMap>>> { + self.entities.as_ref() + } + + pub fn as_mut( + &mut self, + ) -> Option<&mut BTreeMap>>> { + self.entities.as_mut() + } + + fn init_and_get_table( + &mut self, + ) -> &mut BTreeMap>> { + if self.entities.is_none() { + println!("MyNoSqlTcpReader table {} is initialized", self.table_name); + self.entities = Some(BTreeMap::new()); + return self.entities.as_mut().unwrap(); + } + + return self.entities.as_mut().unwrap(); + } + + pub fn init_table<'s>( + &'s mut self, + data: BTreeMap>>, + ) -> InitTableResult<'s, TMyNoSqlEntity> { + let mut new_table: BTreeMap>> = + BTreeMap::new(); + + for (partition_key, src_entities_by_partition) in data { + new_table.insert(partition_key.to_string(), BTreeMap::new()); + + let by_partition = new_table.get_mut(partition_key.as_str()).unwrap(); + + for entity in src_entities_by_partition { + by_partition.insert(entity.get_row_key().to_string(), entity); + } + } + + let table_before = self.entities.replace(new_table); + + InitTableResult { + table_now: self.entities.as_ref().unwrap(), + table_before, + } + } + + pub fn init_partition<'s>( + &'s mut self, + partition_key: &str, + src_entities: BTreeMap>>, + ) -> InitPartitionResult<'s, TMyNoSqlEntity> { + let entities = self.init_and_get_table(); + + let mut new_partition = BTreeMap::new(); + + let before_partition = entities.remove(partition_key); + + for (row_key, entities) in src_entities { + for entity in entities { + new_partition.insert(row_key.clone(), entity); + } + } + + entities.insert(partition_key.to_string(), new_partition); + + InitPartitionResult { + partition_before: entities.get(partition_key).unwrap(), + partition_now: before_partition, + } + } + + pub fn update_rows( + &mut self, + src_data: BTreeMap>>, + callbacks: &Option>>, + ) { + let entities = self.init_and_get_table(); + + for (partition_key, src_entities) in src_data { + let mut updates = if callbacks.is_some() { + Some(Vec::new()) + } else { + None + }; + + if !entities.contains_key(partition_key.as_str()) { + entities.insert(partition_key.to_string(), BTreeMap::new()); + } + + let by_partition = entities.get_mut(partition_key.as_str()).unwrap(); + + for entity in src_entities { + if let Some(updates) = updates.as_mut() { + updates.push(entity.clone()); + } + by_partition.insert(entity.get_row_key().to_string(), entity); + } + + if let Some(callbacks) = callbacks { + if let Some(updates) = updates { + if updates.len() > 0 { + callbacks.inserted_or_replaced(partition_key.as_str(), updates); + } + } + } + } + } + + pub fn delete_rows( + &mut self, + rows_to_delete: Vec, + callbacks: &Option>>, + ) { + let mut deleted_rows = if callbacks.is_some() { + Some(BTreeMap::new()) + } else { + None + }; + + let entities = self.init_and_get_table(); + + for row_to_delete in &rows_to_delete { + let mut delete_partition = false; + if let Some(partition) = entities.get_mut(row_to_delete.partition_key.as_str()) { + if partition.remove(row_to_delete.row_key.as_str()).is_some() { + if let Some(deleted_rows) = deleted_rows.as_mut() { + if !deleted_rows.contains_key(row_to_delete.partition_key.as_str()) { + deleted_rows + .insert(row_to_delete.partition_key.to_string(), Vec::new()); + } + + deleted_rows + .get_mut(row_to_delete.partition_key.as_str()) + .unwrap() + .push( + partition + .get(row_to_delete.row_key.as_str()) + .unwrap() + .clone(), + ); + } + } + + delete_partition = partition.len() == 0; + } + + if delete_partition { + entities.remove(row_to_delete.partition_key.as_str()); + } + } + + if let Some(callbacks) = callbacks.as_ref() { + if let Some(partitions) = deleted_rows { + for (partition_key, rows) in partitions { + callbacks.deleted(partition_key.as_str(), rows); + } + } + } + } + + pub fn get_partition_keys(&self) -> Vec { + match self.entities.as_ref() { + Some(entities) => entities.keys().cloned().collect(), + None => Vec::new(), + } + } +} + +pub struct InitTableResult< + 's, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, +> { + pub table_now: &'s BTreeMap>>, + pub table_before: Option>>>, +} + +pub struct InitPartitionResult< + 's, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, +> { + pub partition_before: &'s BTreeMap>, + pub partition_now: Option>>, +} diff --git a/my-no-sql-tcp-reader/src/lib.rs b/my-no-sql-tcp-reader/src/lib.rs index 1776664..e781e1a 100644 --- a/my-no-sql-tcp-reader/src/lib.rs +++ b/my-no-sql-tcp-reader/src/lib.rs @@ -1,11 +1,16 @@ +mod data_reader_entities_set; mod my_no_sql_tcp_connection; mod settings; mod subscribers; mod tcp_events; +pub use data_reader_entities_set::*; pub use my_no_sql_tcp_connection::MyNoSqlTcpConnection; pub use settings::*; -pub use subscribers::{MyNoSqlDataReaderCallBacks, MyNoSqlDataReaderData, MyNoSqlDataReaderTcp, MyNoSqlDataReader}; +pub use subscribers::{ + LazyMyNoSqlEntity, MyNoSqlDataReader, MyNoSqlDataReaderCallBacks, MyNoSqlDataReaderData, + MyNoSqlDataReaderTcp, +}; #[cfg(feature = "mocks")] pub use subscribers::MyNoSqlDataReaderMock; diff --git a/my-no-sql-tcp-reader/src/my_no_sql_tcp_connection.rs b/my-no-sql-tcp-reader/src/my_no_sql_tcp_connection.rs index de05a83..1c73151 100644 --- a/my-no-sql-tcp-reader/src/my_no_sql_tcp_connection.rs +++ b/my-no-sql-tcp-reader/src/my_no_sql_tcp_connection.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration}; use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use my_no_sql_tcp_shared::{sync_to_main::SyncToMainNodeHandler, MyNoSqlTcpSerializerFactory}; -use my_tcp_sockets::TcpClient; +use my_tcp_sockets::{TcpClient, TlsSettings}; use rust_extensions::{AppStates, StrOrString}; use crate::{ @@ -18,6 +18,10 @@ impl my_tcp_sockets::TcpClientSocketSettings for TcpConnectionSettings { async fn get_host_port(&self) -> Option { self.settings.get_host_port().await.into() } + + async fn get_tls_settings(&self) -> Option { + None + } } pub struct MyNoSqlTcpConnection { diff --git a/my-no-sql-tcp-reader/src/subscribers/callback_triggers.rs b/my-no-sql-tcp-reader/src/subscribers/callback_triggers.rs index b62448b..d3ba745 100644 --- a/my-no-sql-tcp-reader/src/subscribers/callback_triggers.rs +++ b/my-no-sql-tcp-reader/src/subscribers/callback_triggers.rs @@ -1,16 +1,16 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; -use super::MyNoSqlDataReaderCallBacks; +use super::{LazyMyNoSqlEntity, MyNoSqlDataReaderCallBacks}; pub async fn trigger_table_difference< - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks, >( callbacks: &TMyNoSqlDataReaderCallBacks, - before: Option>>>, - now_entities: &BTreeMap>>, + before: Option>>>, + now_entities: &BTreeMap>>, ) { match before { Some(before) => { @@ -23,11 +23,11 @@ pub async fn trigger_table_difference< } pub async fn trigger_brand_new_table< - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks, >( callbacks: &TMyNoSqlDataReaderCallBacks, - now_entities: &BTreeMap>>, + now_entities: &BTreeMap>>, ) { for (partition_key, now_partition) in now_entities { let mut added_entities = Vec::new(); @@ -44,12 +44,12 @@ pub async fn trigger_brand_new_table< } pub async fn trigger_old_and_new_table_difference< - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks, >( callbacks: &TMyNoSqlDataReaderCallBacks, - mut before: BTreeMap>>, - now_entities: &BTreeMap>>, + mut before: BTreeMap>>, + now_entities: &BTreeMap>>, ) { for (now_partition_key, now_partition) in now_entities { let before_partition = before.remove(now_partition_key); @@ -79,13 +79,13 @@ pub async fn trigger_old_and_new_table_difference< } pub async fn trigger_partition_difference< - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks, >( callbacks: &TMyNoSqlDataReaderCallBacks, partition_key: &str, - before_partition: Option>>, - now_partition: &BTreeMap>, + before_partition: Option>>, + now_partition: &BTreeMap>, ) { match before_partition { Some(mut before_partition) => { @@ -125,12 +125,12 @@ pub async fn trigger_partition_difference< } pub async fn trigger_brand_new_partition< - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks, >( callbacks: &TMyNoSqlDataReaderCallBacks, partition_key: &str, - partition: &BTreeMap>, + partition: &BTreeMap>, ) { let mut inserted_or_replaced = Vec::new(); for entity in partition.values() { @@ -146,17 +146,17 @@ pub async fn trigger_brand_new_partition< #[cfg(test)] mod tests { - use std::{collections::BTreeMap, sync::Arc}; + use std::collections::BTreeMap; - use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; + use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer, Timestamp}; use serde_derive::{Deserialize, Serialize}; use tokio::sync::Mutex; - use crate::subscribers::MyNoSqlDataReaderCallBacks; + use crate::subscribers::{LazyMyNoSqlEntity, MyNoSqlDataReaderCallBacks}; struct TestCallbacksInner { - inserted_or_replaced_entities: BTreeMap>>, - deleted: BTreeMap>>, + inserted_or_replaced_entities: BTreeMap>>, + deleted: BTreeMap>>, } pub struct TestCallbacks { @@ -176,7 +176,11 @@ mod tests { #[async_trait::async_trait] impl MyNoSqlDataReaderCallBacks for TestCallbacks { - async fn inserted_or_replaced(&self, partition_key: &str, entities: Vec>) { + async fn inserted_or_replaced( + &self, + partition_key: &str, + entities: Vec>, + ) { let mut write_access = self.data.lock().await; match write_access .inserted_or_replaced_entities @@ -194,7 +198,7 @@ mod tests { } } - async fn deleted(&self, partition_key: &str, entities: Vec>) { + async fn deleted(&self, partition_key: &str, entities: Vec>) { let mut write_access = self.data.lock().await; match write_access.deleted.get_mut(partition_key) { Some(db_partition) => { @@ -229,6 +233,7 @@ mod tests { impl MyNoSqlEntity for TestRow { const TABLE_NAME: &'static str = "Test"; + const LAZY_DESERIALIZATION: bool = false; fn get_partition_key(&self) -> &str { self.partition_key.as_str() @@ -236,8 +241,8 @@ mod tests { fn get_row_key(&self) -> &str { self.row_key.as_str() } - fn get_time_stamp(&self) -> i64 { - self.timestamp + fn get_time_stamp(&self) -> Timestamp { + self.timestamp.into() } } @@ -246,7 +251,7 @@ mod tests { my_no_sql_core::entity_serializer::serialize(self) } - fn deserialize_entity(src: &[u8]) -> Self { + fn deserialize_entity(src: &[u8]) -> Result { my_no_sql_core::entity_serializer::deserialize(src) } } @@ -255,15 +260,15 @@ mod tests { pub async fn test_we_had_data_in_table_and_new_table_is_empty() { let test_callback = TestCallbacks::new(); - let mut before_rows = BTreeMap::new(); + let mut before_rows: BTreeMap> = BTreeMap::new(); before_rows.insert( "RK1".to_string(), - Arc::new(TestRow::new("PK1".to_string(), "RK1".to_string(), 1)), + TestRow::new("PK1".to_string(), "RK1".to_string(), 1).into(), ); before_rows.insert( "RK2".to_string(), - Arc::new(TestRow::new("PK1".to_string(), "RK2".to_string(), 1)), + TestRow::new("PK1".to_string(), "RK2".to_string(), 1).into(), ); let mut before = BTreeMap::new(); @@ -283,15 +288,15 @@ mod tests { pub async fn test_brand_new_table() { let test_callback = TestCallbacks::new(); - let mut after_rows = BTreeMap::new(); + let mut after_rows: BTreeMap> = BTreeMap::new(); after_rows.insert( "RK1".to_string(), - Arc::new(TestRow::new("PK1".to_string(), "RK1".to_string(), 1)), + TestRow::new("PK1".to_string(), "RK1".to_string(), 1).into(), ); after_rows.insert( "RK2".to_string(), - Arc::new(TestRow::new("PK1".to_string(), "RK2".to_string(), 1)), + TestRow::new("PK1".to_string(), "RK2".to_string(), 1).into(), ); let mut after = BTreeMap::new(); @@ -315,24 +320,24 @@ mod tests { pub async fn test_we_have_updates_in_table() { let test_callback = TestCallbacks::new(); - let mut before_partition = BTreeMap::new(); + let mut before_partition: BTreeMap> = BTreeMap::new(); before_partition.insert( "RK1".to_string(), - Arc::new(TestRow::new("PK1".to_string(), "RK1".to_string(), 1)), + TestRow::new("PK1".to_string(), "RK1".to_string(), 1).into(), ); before_partition.insert( "RK2".to_string(), - Arc::new(TestRow::new("PK1".to_string(), "RK2".to_string(), 1)), + TestRow::new("PK1".to_string(), "RK2".to_string(), 1).into(), ); let mut before = BTreeMap::new(); before.insert("PK1".to_string(), before_partition); - let mut after_partition = BTreeMap::new(); + let mut after_partition: BTreeMap> = BTreeMap::new(); after_partition.insert( "RK2".to_string(), - Arc::new(TestRow::new("PK1".to_string(), "RK2".to_string(), 2)), + TestRow::new("PK1".to_string(), "RK2".to_string(), 2).into(), ); let mut after = BTreeMap::new(); diff --git a/my-no-sql-tcp-reader/src/subscribers/entity_with_lazy_deserialization.rs b/my-no-sql-tcp-reader/src/subscribers/entity_with_lazy_deserialization.rs new file mode 100644 index 0000000..6843762 --- /dev/null +++ b/my-no-sql-tcp-reader/src/subscribers/entity_with_lazy_deserialization.rs @@ -0,0 +1,87 @@ +use std::{fmt::Debug, sync::Arc}; + +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; +use my_no_sql_core::db_json_entity::DbJsonEntity; + +pub struct EntityRawData { + pub db_json_entity: DbJsonEntity, + pub data: Vec, +} + +pub enum LazyMyNoSqlEntity< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, +> { + Raw(Arc), + Deserialized(Arc), +} + +impl + LazyMyNoSqlEntity +{ + pub fn get_partition_key(&self) -> &str { + match self { + LazyMyNoSqlEntity::Deserialized(entity) => entity.get_partition_key(), + LazyMyNoSqlEntity::Raw(src) => src.db_json_entity.get_partition_key(&src.data), + } + } + + pub fn get_row_key(&self) -> &str { + match self { + LazyMyNoSqlEntity::Deserialized(entity) => entity.get_row_key(), + LazyMyNoSqlEntity::Raw(src) => src.db_json_entity.get_row_key(&src.data), + } + } + + pub fn get(&mut self) -> &Arc { + match self { + LazyMyNoSqlEntity::Deserialized(entity) => return entity, + LazyMyNoSqlEntity::Raw(src) => { + let entity = TMyNoSqlEntity::deserialize_entity(&src.data).unwrap(); + let entity = Arc::new(entity); + *self = LazyMyNoSqlEntity::Deserialized(entity.clone()); + } + } + + match self { + LazyMyNoSqlEntity::Deserialized(entity) => entity, + LazyMyNoSqlEntity::Raw(_) => panic!("We should have deserialized it"), + } + } + + pub fn clone(&self) -> Self { + match self { + LazyMyNoSqlEntity::Deserialized(entity) => { + LazyMyNoSqlEntity::Deserialized(entity.clone()) + } + LazyMyNoSqlEntity::Raw(src) => LazyMyNoSqlEntity::Raw(src.clone()), + } + } +} + +impl + From for LazyMyNoSqlEntity +{ + fn from(value: TMyNoSqlEntity) -> Self { + LazyMyNoSqlEntity::Deserialized(Arc::new(value)) + } +} + +impl + std::fmt::Debug for LazyMyNoSqlEntity +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LazyMyNoSqlEntity::Deserialized(entity) => write!(f, "Deserialized({:?})", entity), + LazyMyNoSqlEntity::Raw(data) => { + write!( + f, + "Raw(PartitionKey: {}, RowKey: {}, Timestamp:{:?}, DataSize: {})", + self.get_partition_key(), + self.get_row_key(), + data.db_json_entity.get_time_stamp(data.data.as_slice()), + data.data.len() + ) + } + } + } +} diff --git a/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder.rs b/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder.rs index 8b2233e..a22b32c 100644 --- a/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder.rs +++ b/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use rust_extensions::date_time::DateTimeAsMicroseconds; use super::{super::my_no_sql_data_reader_tcp::MyNoSqlDataReaderInner, GetEntitiesBuilderInner}; @@ -8,13 +8,17 @@ use super::{super::my_no_sql_data_reader_tcp::MyNoSqlDataReaderInner, GetEntitie #[cfg(feature = "mocks")] use super::GetEntitiesBuilderMock; -pub enum GetEntitiesBuilder { +pub enum GetEntitiesBuilder< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { Inner(GetEntitiesBuilderInner), #[cfg(feature = "mocks")] Mock(GetEntitiesBuilderMock), } -impl GetEntitiesBuilder { +impl + GetEntitiesBuilder +{ pub fn new(partition_key: String, inner: Arc>) -> Self { Self::Inner(GetEntitiesBuilderInner::new(partition_key, inner)) } diff --git a/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder_inner.rs b/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder_inner.rs index d54d29c..ebe1074 100644 --- a/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder_inner.rs +++ b/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder_inner.rs @@ -1,18 +1,20 @@ use std::{collections::BTreeMap, sync::Arc}; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use my_no_sql_tcp_shared::sync_to_main::UpdateEntityStatisticsData; use rust_extensions::date_time::DateTimeAsMicroseconds; use super::super::my_no_sql_data_reader_tcp::MyNoSqlDataReaderInner; -pub struct GetEntitiesBuilderInner { +pub struct GetEntitiesBuilderInner< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { partition_key: String, update_statistic_data: UpdateEntityStatisticsData, inner: Arc>, } -impl +impl GetEntitiesBuilderInner { pub fn new(partition_key: String, inner: Arc>) -> Self { @@ -41,7 +43,7 @@ impl pub async fn get_as_vec(&self) -> Option>> { let db_rows = { - let reader = self.inner.get_data().read().await; + let mut reader = self.inner.get_data().lock().await; reader.get_by_partition_as_vec(self.partition_key.as_str()) }?; @@ -63,7 +65,7 @@ impl filter: impl Fn(&TMyNoSqlEntity) -> bool, ) -> Option>> { let db_rows = { - let reader = self.inner.get_data().read().await; + let mut reader = self.inner.get_data().lock().await; reader.get_by_partition_as_vec_with_filter(&self.partition_key, filter) }?; @@ -82,7 +84,7 @@ impl pub async fn get_as_btree_map(&self) -> Option>> { let db_rows = { - let reader = self.inner.get_data().read().await; + let mut reader = self.inner.get_data().lock().await; reader.get_by_partition(&self.partition_key) }?; @@ -104,7 +106,7 @@ impl filter: impl Fn(&TMyNoSqlEntity) -> bool, ) -> Option>> { let db_rows = { - let reader = self.inner.get_data().read().await; + let mut reader = self.inner.get_data().lock().await; reader.get_by_partition_with_filter(&self.partition_key, filter) }?; diff --git a/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder_mock.rs b/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder_mock.rs index 324fe77..10b6b2c 100644 --- a/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder_mock.rs +++ b/my-no-sql-tcp-reader/src/subscribers/get_entities_builder/get_entities_builder_mock.rs @@ -1,18 +1,22 @@ use std::{collections::BTreeMap, sync::Arc}; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use my_no_sql_tcp_shared::sync_to_main::UpdateEntityStatisticsData; use rust_extensions::date_time::DateTimeAsMicroseconds; use crate::subscribers::MyNoSqlDataReaderMockInner; -pub struct GetEntitiesBuilderMock { +pub struct GetEntitiesBuilderMock< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { partition_key: String, update_statistic_data: UpdateEntityStatisticsData, inner: Arc>, } -impl GetEntitiesBuilderMock { +impl + GetEntitiesBuilderMock +{ pub fn new( partition_key: String, inner: Arc>, diff --git a/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder.rs b/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder.rs index 598bb42..40b8072 100644 --- a/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder.rs +++ b/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder.rs @@ -1,18 +1,21 @@ use std::sync::Arc; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use rust_extensions::date_time::DateTimeAsMicroseconds; #[cfg(feature = "mocks")] use super::GetEntityBuilderMock; use super::{super::my_no_sql_data_reader_tcp::MyNoSqlDataReaderInner, GetEntityBuilderInner}; -pub enum GetEntityBuilder<'s, TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static> { +pub enum GetEntityBuilder< + 's, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { Inner(GetEntityBuilderInner<'s, TMyNoSqlEntity>), #[cfg(feature = "mocks")] Mock(GetEntityBuilderMock<'s, TMyNoSqlEntity>), } -impl<'s, TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static> +impl<'s, TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static> GetEntityBuilder<'s, TMyNoSqlEntity> { pub fn new( diff --git a/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder_inner.rs b/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder_inner.rs index b68f4a7..91ef6a4 100644 --- a/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder_inner.rs +++ b/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder_inner.rs @@ -1,19 +1,22 @@ use std::sync::Arc; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use my_no_sql_tcp_shared::sync_to_main::UpdateEntityStatisticsData; use rust_extensions::date_time::DateTimeAsMicroseconds; use super::super::my_no_sql_data_reader_tcp::MyNoSqlDataReaderInner; -pub struct GetEntityBuilderInner<'s, TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static> { +pub struct GetEntityBuilderInner< + 's, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { partition_key: &'s str, row_key: &'s str, update_statistic_data: UpdateEntityStatisticsData, inner: Arc>, } -impl<'s, TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static> +impl<'s, TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static> GetEntityBuilderInner<'s, TMyNoSqlEntity> { pub fn new( @@ -47,7 +50,7 @@ impl<'s, TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static> pub async fn execute(&self) -> Option> { let result = { - let reader = self.inner.get_data().read().await; + let mut reader = self.inner.get_data().lock().await; reader.get_entity(self.partition_key, self.row_key) }; diff --git a/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder_mock.rs b/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder_mock.rs index 3037859..cab55b3 100644 --- a/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder_mock.rs +++ b/my-no-sql-tcp-reader/src/subscribers/get_entity_builder/get_entity_builder_mock.rs @@ -1,19 +1,22 @@ use std::sync::Arc; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use my_no_sql_tcp_shared::sync_to_main::UpdateEntityStatisticsData; use rust_extensions::date_time::DateTimeAsMicroseconds; use crate::subscribers::MyNoSqlDataReaderMockInner; -pub struct GetEntityBuilderMock<'s, TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static> { +pub struct GetEntityBuilderMock< + 's, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { partition_key: &'s str, row_key: &'s str, update_statistic_data: UpdateEntityStatisticsData, inner: Arc>, } -impl<'s, TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static> +impl<'s, TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static> GetEntityBuilderMock<'s, TMyNoSqlEntity> { pub fn new( diff --git a/my-no-sql-tcp-reader/src/subscribers/mod.rs b/my-no-sql-tcp-reader/src/subscribers/mod.rs index 765aaf1..0fabee5 100644 --- a/my-no-sql-tcp-reader/src/subscribers/mod.rs +++ b/my-no-sql-tcp-reader/src/subscribers/mod.rs @@ -26,3 +26,5 @@ pub use my_no_sql_data_reader_mock::*; mod my_no_sql_data_reader_mock_inner; #[cfg(feature = "mocks")] pub use my_no_sql_data_reader_mock_inner::*; +mod entity_with_lazy_deserialization; +pub use entity_with_lazy_deserialization::*; diff --git a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader.rs b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader.rs index 7760ec5..461a9f7 100644 --- a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader.rs +++ b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader.rs @@ -1,13 +1,16 @@ use std::{collections::BTreeMap, sync::Arc}; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use crate::MyNoSqlDataReaderCallBacks; use super::{GetEntitiesBuilder, GetEntityBuilder}; #[async_trait::async_trait] -pub trait MyNoSqlDataReader { +pub trait MyNoSqlDataReader< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> +{ async fn get_table_snapshot_as_vec(&self) -> Option>>; async fn get_by_partition_key( @@ -20,6 +23,8 @@ pub trait MyNoSqlDataReader Option>>; + async fn get_partition_keys(&self) -> Vec; + async fn get_entity(&self, partition_key: &str, row_key: &str) -> Option>; async fn get_enum_case_model< @@ -61,6 +66,7 @@ pub trait MyNoSqlDataReader> + Sync @@ -87,7 +93,7 @@ pub trait MyNoSqlDataReader GetEntityBuilder; + ) -> GetEntityBuilder<'s, TMyNoSqlEntity>; async fn has_partition(&self, partition_key: &str) -> bool; diff --git a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_callbacks.rs b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_callbacks.rs index 9921243..420c291 100644 --- a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_callbacks.rs +++ b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_callbacks.rs @@ -1,26 +1,37 @@ -use std::sync::Arc; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; -use my_no_sql_abstractions::MyNoSqlEntity; +use super::LazyMyNoSqlEntity; #[async_trait::async_trait] -pub trait MyNoSqlDataReaderCallBacks { - async fn inserted_or_replaced(&self, partition_key: &str, entities: Vec>); - async fn deleted(&self, partition_key: &str, entities: Vec>); +pub trait MyNoSqlDataReaderCallBacks< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, +> +{ + async fn inserted_or_replaced( + &self, + partition_key: &str, + entities: Vec>, + ); + async fn deleted(&self, partition_key: &str, entities: Vec>); } #[async_trait::async_trait] -impl +impl MyNoSqlDataReaderCallBacks for () { async fn inserted_or_replaced( &self, _partition_key: &str, - _entities: Vec>, + _entities: Vec>, ) { panic!("This is a dumb implementation") } - async fn deleted(&self, _partition_key: &str, _entities: Vec>) { + async fn deleted( + &self, + _partition_key: &str, + _entities: Vec>, + ) { panic!("This is a dumb implementation") } } diff --git a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_callbacks_pusher.rs b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_callbacks_pusher.rs index e260fda..002be1e 100644 --- a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_callbacks_pusher.rs +++ b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_callbacks_pusher.rs @@ -1,28 +1,30 @@ use std::sync::Arc; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use rust_extensions::{ events_loop::{EventsLoop, EventsLoopTick}, ApplicationStates, }; -use super::MyNoSqlDataReaderCallBacks; +use super::{LazyMyNoSqlEntity, MyNoSqlDataReaderCallBacks}; -pub enum PusherEvents { - InsertedOrReplaced(String, Vec>), - Deleted(String, Vec>), +pub enum PusherEvents< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, +> { + InsertedOrReplaced(String, Vec>), + Deleted(String, Vec>), } pub struct MyNoSqlDataReaderCallBacksPusher where - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, { events_loop: EventsLoop>, } impl MyNoSqlDataReaderCallBacksPusher where - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, { pub async fn new< TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks + Send + Sync + 'static, @@ -40,14 +42,22 @@ where Self { events_loop } } - pub fn inserted_or_replaced(&self, partition_key: &str, entities: Vec>) { + pub fn inserted_or_replaced( + &self, + partition_key: &str, + entities: Vec>, + ) { self.events_loop.send(PusherEvents::InsertedOrReplaced( partition_key.to_string(), entities, )); } - pub fn deleted(&self, partition_key: &str, entities: Vec>) { + pub fn deleted( + &self, + partition_key: &str, + entities: Vec>, + ) { self.events_loop .send(PusherEvents::Deleted(partition_key.to_string(), entities)); } @@ -57,23 +67,31 @@ where impl MyNoSqlDataReaderCallBacks for MyNoSqlDataReaderCallBacksPusher where - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, { - async fn inserted_or_replaced(&self, partition_key: &str, entities: Vec>) { + async fn inserted_or_replaced( + &self, + partition_key: &str, + entities: Vec>, + ) { self.events_loop.send(PusherEvents::InsertedOrReplaced( partition_key.to_string(), entities, )); } - async fn deleted(&self, partition_key: &str, entities: Vec>) { + async fn deleted( + &self, + partition_key: &str, + entities: Vec>, + ) { self.events_loop .send(PusherEvents::Deleted(partition_key.to_string(), entities)); } } pub struct MyNoSqlDataReaderCallBacksSender< - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks, > { callbacks: Arc, @@ -81,7 +99,7 @@ pub struct MyNoSqlDataReaderCallBacksSender< } impl< - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks + Send + Sync + 'static, > MyNoSqlDataReaderCallBacksSender { @@ -92,7 +110,7 @@ impl< #[async_trait::async_trait] impl< - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, TMyNoSqlDataReaderCallBacks: MyNoSqlDataReaderCallBacks + Send + Sync + 'static, > EventsLoopTick> for MyNoSqlDataReaderCallBacksSender diff --git a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_data.rs b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_data.rs index 61471dc..ad2b2b1 100644 --- a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_data.rs +++ b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_data.rs @@ -1,28 +1,30 @@ use std::{collections::BTreeMap, sync::Arc}; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use rust_extensions::ApplicationStates; -use super::{MyNoSqlDataReaderCallBacks, MyNoSqlDataReaderCallBacksPusher}; +use crate::DataReaderEntitiesSet; -pub struct MyNoSqlDataReaderData { - table_name: &'static str, - entities: Option>>>, +use super::{LazyMyNoSqlEntity, MyNoSqlDataReaderCallBacks, MyNoSqlDataReaderCallBacksPusher}; + +pub struct MyNoSqlDataReaderData< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, +> { + entities: DataReaderEntitiesSet, callbacks: Option>>, app_states: Arc, } impl MyNoSqlDataReaderData where - TMyNoSqlEntity: MyNoSqlEntity + Send + Sync + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Send + Sync + 'static, { pub async fn new( table_name: &'static str, app_states: Arc, ) -> Self { Self { - table_name, - entities: None, + entities: DataReaderEntitiesSet::new(table_name), callbacks: None, app_states, } @@ -39,38 +41,18 @@ where self.callbacks = Some(Arc::new(pusher)); } - fn get_init_table(&mut self) -> &mut BTreeMap>> { - if self.entities.is_none() { - println!("Initialized data for table {}", self.table_name); - self.entities = Some(BTreeMap::new()); - return self.entities.as_mut().unwrap(); - } - - return self.entities.as_mut().unwrap(); - } - - pub async fn init_table(&mut self, data: BTreeMap>) { - let mut new_table: BTreeMap>> = - BTreeMap::new(); - - for (partition_key, src_entities_by_partition) in data { - new_table.insert(partition_key.to_string(), BTreeMap::new()); - - let by_partition = new_table.get_mut(partition_key.as_str()).unwrap(); - - for entity in src_entities_by_partition { - let entity = Arc::new(entity); - by_partition.insert(entity.get_row_key().to_string(), entity); - } - } - let before = self.entities.replace(new_table); + pub async fn init_table( + &mut self, + data: BTreeMap>>, + ) { + let init_table_result = self.entities.init_table(data); if let Some(callbacks) = self.callbacks.as_ref() { super::callback_triggers::trigger_table_difference( callbacks.as_ref(), - before, - self.entities.as_ref().unwrap(), + init_table_result.table_before, + init_table_result.table_now, ) .await; } @@ -79,131 +61,62 @@ where pub async fn init_partition( &mut self, partition_key: &str, - src_entities: BTreeMap>, + src_entities: BTreeMap>>, ) { - let callbacks = self.callbacks.clone(); + //let callbacks = self.callbacks.clone(); - let entities = self.get_init_table(); + let init_partition_result = self.entities.init_partition(partition_key, src_entities); - let mut new_partition = BTreeMap::new(); - - let before_partition = entities.remove(partition_key); - - for (row_key, entities) in src_entities { - for entity in entities { - new_partition.insert(row_key.clone(), Arc::new(entity)); - } - } - - entities.insert(partition_key.to_string(), new_partition); - - if let Some(callbacks) = callbacks { + if let Some(callbacks) = self.callbacks.as_ref() { super::callback_triggers::trigger_partition_difference( callbacks.as_ref(), partition_key, - before_partition, - entities.get(partition_key).unwrap(), + init_partition_result.partition_now, + init_partition_result.partition_before, ) .await; } } - pub fn update_rows(&mut self, src_data: BTreeMap>) { - let callbacks = self.callbacks.clone(); - - let entities = self.get_init_table(); - - for (partition_key, src_entities) in src_data { - let mut updates = if callbacks.is_some() { - Some(Vec::new()) - } else { - None - }; - - if !entities.contains_key(partition_key.as_str()) { - entities.insert(partition_key.to_string(), BTreeMap::new()); - } + pub fn update_rows( + &mut self, + src_data: BTreeMap>>, + ) { + self.entities.update_rows(src_data, &self.callbacks); + } - let by_partition = entities.get_mut(partition_key.as_str()).unwrap(); + pub fn delete_rows(&mut self, rows_to_delete: Vec) { + self.entities.delete_rows(rows_to_delete, &self.callbacks); + } - for entity in src_entities { - let entity = Arc::new(entity); - if let Some(updates) = updates.as_mut() { - updates.push(entity.clone()); - } - by_partition.insert(entity.get_row_key().to_string(), entity); - } + pub fn get_partition_keys(&self) -> Vec { + self.entities.get_partition_keys() + } - if let Some(callbacks) = callbacks.as_ref() { - if let Some(updates) = updates { - if updates.len() > 0 { - callbacks.inserted_or_replaced(partition_key.as_str(), updates); - } - } - } + pub fn get_table_snapshot( + &mut self, + ) -> Option>>> { + let entities = self.entities.as_mut()?; + if entities.len() == 0 { + return None; } - } - pub fn delete_rows(&mut self, rows_to_delete: Vec) { - let callbacks = self.callbacks.clone(); - - let mut deleted_rows = if callbacks.is_some() { - Some(BTreeMap::new()) - } else { - None - }; - - let entities = self.get_init_table(); - - for row_to_delete in &rows_to_delete { - let mut delete_partition = false; - if let Some(partition) = entities.get_mut(row_to_delete.partition_key.as_str()) { - if partition.remove(row_to_delete.row_key.as_str()).is_some() { - if let Some(deleted_rows) = deleted_rows.as_mut() { - if !deleted_rows.contains_key(row_to_delete.partition_key.as_str()) { - deleted_rows - .insert(row_to_delete.partition_key.to_string(), Vec::new()); - } - - deleted_rows - .get_mut(row_to_delete.partition_key.as_str()) - .unwrap() - .push( - partition - .get(row_to_delete.row_key.as_str()) - .unwrap() - .clone(), - ); - } - } - - delete_partition = partition.len() == 0; - } + let mut result: BTreeMap>> = BTreeMap::new(); + for (partition_key, entities) in entities.iter_mut() { + let mut to_insert = BTreeMap::new(); - if delete_partition { - entities.remove(row_to_delete.partition_key.as_str()); + for (row_key, entity) in entities.iter_mut() { + to_insert.insert(row_key.clone(), entity.get().clone()); } - } - if let Some(callbacks) = callbacks.as_ref() { - if let Some(partitions) = deleted_rows { - for (partition_key, rows) in partitions { - callbacks.deleted(partition_key.as_str(), rows); - } - } + result.insert(partition_key.clone(), to_insert); } - } - pub fn get_table_snapshot( - &self, - ) -> Option>>> { - let entities = self.entities.as_ref()?; - - return Some(entities.clone()); + return Some(result); } - pub fn get_table_snapshot_as_vec(&self) -> Option>> { - let entities = self.entities.as_ref()?; + pub fn get_table_snapshot_as_vec(&mut self) -> Option>> { + let entities = self.entities.as_mut()?; if entities.len() == 0 { return None; @@ -211,49 +124,60 @@ where let mut result = Vec::new(); - for partition in entities.values() { - for entity in partition.values() { - result.push(entity.clone()); + for partition in entities.values_mut() { + for entity in partition.values_mut() { + result.push(entity.get().clone()); } } Some(result) } - pub fn get_entity(&self, partition_key: &str, row_key: &str) -> Option> { - let entities = self.entities.as_ref()?; + pub fn get_entity( + &mut self, + partition_key: &str, + row_key: &str, + ) -> Option> { + let entities = self.entities.as_mut()?; - let partition = entities.get(partition_key)?; + let partition = entities.get_mut(partition_key)?; - let row = partition.get(row_key)?; + let row = partition.get_mut(row_key)?; - Some(row.clone()) + Some(row.get().clone()) } pub fn get_by_partition( - &self, + &mut self, partition_key: &str, ) -> Option>> { - let entities = self.entities.as_ref()?; + let entities = self.entities.as_mut()?; + + let partition = entities.get_mut(partition_key)?; + + let mut result = BTreeMap::new(); - let partition = entities.get(partition_key)?; + for itm in partition.iter_mut() { + result.insert(itm.0.clone(), itm.1.get().clone()); + } - Some(partition.clone()) + Some(result) } pub fn get_by_partition_with_filter( - &self, + &mut self, partition_key: &str, filter: impl Fn(&TMyNoSqlEntity) -> bool, ) -> Option>> { - let entities = self.entities.as_ref()?; + let entities = self.entities.as_mut()?; - let partition = entities.get(partition_key)?; + let partition = entities.get_mut(partition_key)?; let mut result = BTreeMap::new(); - for db_row in partition.values() { - if filter(db_row) { + for db_row in partition.values_mut() { + let db_row = db_row.get(); + if filter(&db_row) { result.insert(db_row.get_row_key().to_string(), db_row.clone()); } } @@ -261,6 +185,15 @@ where Some(result) } + pub fn iter_entities<'s>( + &'s mut self, + partition_key: &str, + ) -> Option>> { + let entities = self.entities.as_mut()?; + let partition = entities.get_mut(partition_key)?; + Some(partition.values_mut()) + } + pub fn has_partition(&self, partition_key: &str) -> bool { let entities = self.entities.as_ref(); @@ -273,10 +206,13 @@ where entities.contains_key(partition_key) } - pub fn get_by_partition_as_vec(&self, partition_key: &str) -> Option>> { - let entities = self.entities.as_ref()?; + pub fn get_by_partition_as_vec( + &mut self, + partition_key: &str, + ) -> Option>> { + let entities = self.entities.as_mut()?; - let partition = entities.get(partition_key)?; + let partition = entities.get_mut(partition_key)?; if partition.len() == 0 { return None; @@ -284,21 +220,21 @@ where let mut result = Vec::with_capacity(partition.len()); - for db_row in partition.values() { - result.push(db_row.clone()); + for db_row in partition.values_mut() { + result.push(db_row.get().clone()); } Some(result) } pub fn get_by_partition_as_vec_with_filter( - &self, + &mut self, partition_key: &str, filter: impl Fn(&TMyNoSqlEntity) -> bool, ) -> Option>> { - let entities = self.entities.as_ref()?; + let entities = self.entities.as_mut()?; - let partition = entities.get(partition_key)?; + let partition = entities.get_mut(partition_key)?; if partition.len() == 0 { return None; @@ -306,7 +242,8 @@ where let mut result = Vec::with_capacity(partition.len()); - for db_row in partition.values() { + for db_row in partition.values_mut() { + let db_row = db_row.get(); if filter(db_row.as_ref()) { result.push(db_row.clone()); } @@ -316,6 +253,6 @@ where } pub async fn has_entities_at_all(&self) -> bool { - self.entities.is_some() + self.entities.is_initialized() } } diff --git a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_mock.rs b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_mock.rs index f012fae..c0a72c3 100644 --- a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_mock.rs +++ b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_mock.rs @@ -1,18 +1,20 @@ use std::{collections::BTreeMap, sync::Arc}; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use crate::MyNoSqlDataReaderCallBacks; use super::{GetEntitiesBuilder, GetEntityBuilder, MyNoSqlDataReader, MyNoSqlDataReaderMockInner}; -pub struct MyNoSqlDataReaderMock { +pub struct MyNoSqlDataReaderMock< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { pub inner: Arc>, } impl MyNoSqlDataReaderMock where - TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, { pub fn new() -> Self { Self { @@ -31,7 +33,7 @@ where #[async_trait::async_trait] impl MyNoSqlDataReader for MyNoSqlDataReaderMock where - TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, { async fn get_table_snapshot_as_vec(&self) -> Option>> { let result = self.inner.get_table_snapshot_as_vec().await; @@ -50,6 +52,10 @@ where self.inner.get_by_partition_key(partition_key).await } + async fn get_partition_keys(&self) -> Vec { + self.inner.get_partition_keys().await + } + async fn get_by_partition_key_as_vec( &self, partition_key: &str, @@ -69,7 +75,7 @@ where &'s self, partition_key: &'s str, row_key: &'s str, - ) -> GetEntityBuilder { + ) -> GetEntityBuilder<'s, TMyNoSqlEntity> { GetEntityBuilder::new_mock(partition_key, row_key, self.inner.clone()) } diff --git a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_mock_inner.rs b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_mock_inner.rs index c806717..ec62d29 100644 --- a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_mock_inner.rs +++ b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_mock_inner.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use my_no_sql_abstractions::MyNoSqlEntity; +use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use rust_extensions::{lazy::LazyVec, AppStates}; use tokio::sync::RwLock; @@ -11,12 +11,14 @@ use crate::MyNoSqlDataReaderCallBacks; use super::MyNoSqlDataReaderCallBacksPusher; -pub struct MyNoSqlDataReaderMockInnerData { +pub struct MyNoSqlDataReaderMockInnerData< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { pub items: BTreeMap>>, pub callbacks: Option>>, } -impl +impl MyNoSqlDataReaderMockInnerData { pub fn new() -> Self { @@ -27,14 +29,16 @@ impl } } -pub struct MyNoSqlDataReaderMockInner { +pub struct MyNoSqlDataReaderMockInner< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { pub inner: RwLock>, app_states: Arc, } impl MyNoSqlDataReaderMockInner where - TMyNoSqlEntity: MyNoSqlEntity + Sync + Send + 'static, + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, { pub fn new() -> Self { Self { @@ -110,6 +114,11 @@ where read_access.items.get(partition_key).cloned() } + pub async fn get_partition_keys(&self) -> Vec { + let read_access = self.inner.read().await; + read_access.items.keys().cloned().collect() + } + pub async fn get_by_partition_key_as_vec( &self, partition_key: &str, diff --git a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_tcp.rs b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_tcp.rs index 43c5761..10d892e 100644 --- a/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_tcp.rs +++ b/my-no-sql-tcp-reader/src/subscribers/my_no_sql_data_reader_tcp.rs @@ -1,25 +1,29 @@ use std::{collections::BTreeMap, sync::Arc, time::Duration}; use async_trait::async_trait; -use my_json::json_reader::array_parser::JsonArrayIterator; +use my_json::json_reader::JsonArrayIterator; use my_no_sql_abstractions::{MyNoSqlEntity, MyNoSqlEntitySerializer}; use my_no_sql_tcp_shared::sync_to_main::SyncToMainNodeHandler; use rust_extensions::{ApplicationStates, StrOrString}; use serde::de::DeserializeOwned; -use tokio::sync::RwLock; +use tokio::sync::Mutex; use super::{ - GetEntitiesBuilder, GetEntityBuilder, MyNoSqlDataReader, MyNoSqlDataReaderCallBacks, - MyNoSqlDataReaderData, UpdateEvent, + EntityRawData, GetEntitiesBuilder, GetEntityBuilder, LazyMyNoSqlEntity, MyNoSqlDataReader, + MyNoSqlDataReaderCallBacks, MyNoSqlDataReaderData, UpdateEvent, }; -pub struct MyNoSqlDataReaderInner { - data: RwLock>, +pub struct MyNoSqlDataReaderInner< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { + data: Mutex>, sync_handler: Arc, } -impl MyNoSqlDataReaderInner { - pub fn get_data(&self) -> &RwLock> { +impl + MyNoSqlDataReaderInner +{ + pub fn get_data(&self) -> &Mutex> { &self.data } @@ -28,7 +32,9 @@ impl MyNoSqlDataReaderInn } } -pub struct MyNoSqlDataReaderTcp { +pub struct MyNoSqlDataReaderTcp< + TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + 'static, +> { inner: Arc>, } @@ -42,7 +48,7 @@ where ) -> Self { Self { inner: Arc::new(MyNoSqlDataReaderInner { - data: RwLock::new( + data: Mutex::new( MyNoSqlDataReaderData::new(TMyNoSqlEntity::TABLE_NAME, app_states).await, ), sync_handler, @@ -53,12 +59,12 @@ where pub async fn get_table_snapshot( &self, ) -> Option>>> { - let reader = self.inner.data.read().await; + let mut reader = self.inner.data.lock().await; return reader.get_table_snapshot(); } pub async fn get_table_snapshot_as_vec(&self) -> Option>> { - let reader = self.inner.data.read().await; + let mut reader = self.inner.data.lock().await; reader.get_table_snapshot_as_vec() } @@ -66,7 +72,7 @@ where &self, partition_key: &str, ) -> Option>> { - let reader = self.inner.data.read().await; + let mut reader = self.inner.data.lock().await; reader.get_by_partition(partition_key) } @@ -74,7 +80,7 @@ where &self, partition_key: &str, ) -> Option>> { - let reader = self.inner.data.read().await; + let mut reader = self.inner.data.lock().await; reader.get_by_partition_as_vec(partition_key) } @@ -83,7 +89,7 @@ where partition_key: &str, row_key: &str, ) -> Option> { - let reader = self.inner.data.read().await; + let mut reader = self.inner.data.lock().await; reader.get_entity(partition_key, row_key) } @@ -98,20 +104,53 @@ where &'s self, partition_key: &'s str, row_key: &'s str, - ) -> GetEntityBuilder { + ) -> GetEntityBuilder<'s, TMyNoSqlEntity> { GetEntityBuilder::new(partition_key, row_key, self.inner.clone()) } pub async fn has_partition(&self, partition_key: &str) -> bool { - let reader: tokio::sync::RwLockReadGuard<'_, MyNoSqlDataReaderData> = - self.inner.data.read().await; + let reader = self.inner.data.lock().await; reader.has_partition(partition_key) } - pub fn deserialize_array(&self, data: &[u8]) -> BTreeMap> { + pub async fn iter_and_find_entity_inside_partition( + &self, + partition_key: &str, + predicate: impl Fn(&TMyNoSqlEntity) -> bool, + ) -> Option> { + let mut reader = self.inner.data.lock().await; + + if let Some(entities) = reader.iter_entities(partition_key) { + for entity in entities { + let entity = entity.get(); + + if predicate(&entity) { + return Some(entity.clone()); + } + } + } + + None + } + + pub fn deserialize_array( + &self, + data: &[u8], + ) -> BTreeMap>> { + let json_array_iterator = JsonArrayIterator::new(data); + + if let Err(err) = &json_array_iterator { + panic!( + "Table: {}. The whole array of json entities is broken. Err: {:?}", + TMyNoSqlEntity::TABLE_NAME, + err + ); + } + + let json_array_iterator = json_array_iterator.unwrap(); let mut result = BTreeMap::new(); - for db_entity in JsonArrayIterator::new(data) { + while let Some(db_entity) = json_array_iterator.get_next() { if let Err(err) = &db_entity { panic!( "Table: {}. The whole array of json entities is broken. Err: {:?}", @@ -122,14 +161,41 @@ where let db_entity_data = db_entity.unwrap(); - let el = TMyNoSqlEntity::deserialize_entity(db_entity_data); + let item_to_insert = if TMyNoSqlEntity::LAZY_DESERIALIZATION { + let data = db_entity_data.as_bytes().to_vec(); + let db_json_entity = + my_no_sql_core::db_json_entity::DbJsonEntity::from_slice(&data).unwrap(); + + LazyMyNoSqlEntity::Raw( + EntityRawData { + db_json_entity, + data, + } + .into(), + ) + } else { + let result = TMyNoSqlEntity::deserialize_entity(db_entity_data.as_bytes()); + + match result { + Ok(result) => LazyMyNoSqlEntity::Deserialized(Arc::new(result)), + Err(err) => { + println!( + "Invalid entity to deserialize. Table: {}. Content: {:?}", + TMyNoSqlEntity::TABLE_NAME, + db_entity_data.as_str() + ); + + panic!("Can not lazy deserialize entity. Err: {}", err); + } + } + }; - let partition_key = el.get_partition_key(); + let partition_key = item_to_insert.get_partition_key(); if !result.contains_key(partition_key) { result.insert(partition_key.to_string(), Vec::new()); } - result.get_mut(partition_key).unwrap().push(el); + result.get_mut(partition_key).unwrap().push(item_to_insert); } result @@ -175,6 +241,11 @@ where Some(TResult::from(entity)) } + + pub async fn get_partition_keys(&self) -> Vec { + let write_access = self.inner.data.lock().await; + write_access.get_partition_keys() + } } #[async_trait] @@ -184,26 +255,26 @@ impl Upda async fn init_table(&self, data: Vec) { let data = self.deserialize_array(data.as_slice()); - let mut write_access = self.inner.data.write().await; + let mut write_access = self.inner.data.lock().await; write_access.init_table(data).await; } async fn init_partition(&self, partition_key: &str, data: Vec) { let data = self.deserialize_array(data.as_slice()); - let mut write_access = self.inner.data.write().await; + let mut write_access = self.inner.data.lock().await; write_access.init_partition(partition_key, data).await; } async fn update_rows(&self, data: Vec) { let data = self.deserialize_array(data.as_slice()); - let mut write_access = self.inner.data.write().await; + let mut write_access = self.inner.data.lock().await; write_access.update_rows(data); } async fn delete_rows(&self, rows_to_delete: Vec) { - let mut write_access = self.inner.data.write().await; + let mut write_access = self.inner.data.lock().await; write_access.delete_rows(rows_to_delete); } } @@ -214,6 +285,9 @@ where TMyNoSqlEntity: MyNoSqlEntity + MyNoSqlEntitySerializer + Sync + Send + DeserializeOwned + 'static, { + async fn get_partition_keys(&self) -> Vec { + self.get_partition_keys().await + } async fn get_table_snapshot_as_vec(&self) -> Option>> { self.get_table_snapshot_as_vec().await } @@ -244,7 +318,7 @@ where &'s self, partition_key: &'s str, row_key: &'s str, - ) -> GetEntityBuilder { + ) -> GetEntityBuilder<'s, TMyNoSqlEntity> { self.get_entity_with_callback_to_server(partition_key, row_key) } @@ -255,7 +329,7 @@ where async fn wait_until_first_data_arrives(&self) { loop { { - let reader = self.inner.data.read().await; + let reader = self.inner.data.lock().await; if reader.has_entities_at_all().await { return; } @@ -271,7 +345,7 @@ where &self, callbacks: Arc, ) { - let mut write_access = self.inner.data.write().await; + let mut write_access = self.inner.data.lock().await; write_access.assign_callback(callbacks).await; } } diff --git a/my-no-sql-tcp-reader/src/tcp_events.rs b/my-no-sql-tcp-reader/src/tcp_events.rs index 0b00fe4..e532b7d 100644 --- a/my-no-sql-tcp-reader/src/tcp_events.rs +++ b/my-no-sql-tcp-reader/src/tcp_events.rs @@ -7,7 +7,8 @@ use my_tcp_sockets::{tcp_connection::TcpSocketConnection, SocketEventCallback}; use crate::subscribers::Subscribers; -pub type TcpConnection = TcpSocketConnection; +pub type MyNoSqlTcpConnection = + TcpSocketConnection; pub struct TcpEvents { app_name: String, pub subscribers: Subscribers, @@ -22,20 +23,11 @@ impl TcpEvents { sync_handler, } } - pub async fn handle_incoming_packet( - &self, - _tcp_contract: MyNoSqlTcpContract, - _connection: Arc, - ) { - } } #[async_trait::async_trait] impl SocketEventCallback for TcpEvents { - async fn connected( - &self, - connection: Arc>, - ) { + async fn connected(&self, connection: Arc) { let contract = MyNoSqlTcpContract::Greeting { name: self.app_name.to_string(), }; @@ -54,19 +46,12 @@ impl SocketEventCallback for .tcp_events_pusher_new_connection_established(connection); } - async fn disconnected( - &self, - connection: Arc>, - ) { + async fn disconnected(&self, connection: Arc) { self.sync_handler .tcp_events_pusher_connection_disconnected(connection); } - async fn payload( - &self, - _connection: &Arc>, - contract: MyNoSqlTcpContract, - ) { + async fn payload(&self, _connection: &Arc, contract: MyNoSqlTcpContract) { match contract { MyNoSqlTcpContract::Ping => {} MyNoSqlTcpContract::Pong => {} diff --git a/my-no-sql-tcp-shared/Cargo.toml b/my-no-sql-tcp-shared/Cargo.toml index ec134ab..7fad6da 100644 --- a/my-no-sql-tcp-shared/Cargo.toml +++ b/my-no-sql-tcp-shared/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "my-no-sql-tcp-shared" -version = "0.3.0" +version = "0.4.1" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -my-tcp-sockets = { tag = "0.1.9", git = "https://github.com/MyJetTools/my-tcp-sockets.git" } -rust-extensions = { tag = "0.1.4", git = "https://github.com/MyJetTools/rust-extensions.git" } +my-tcp-sockets = { tag = "0.1.12", git = "https://github.com/MyJetTools/my-tcp-sockets.git" } +rust-extensions = { tag = "0.1.5", git = "https://github.com/MyJetTools/rust-extensions.git" } tokio = { version = "*", features = ["full"] } tokio-util = "*" diff --git a/my-no-sql-tcp-shared/src/payload_compressor.rs b/my-no-sql-tcp-shared/src/payload_compressor.rs index fef72c6..06f233e 100644 --- a/my-no-sql-tcp-shared/src/payload_compressor.rs +++ b/my-no-sql-tcp-shared/src/payload_compressor.rs @@ -7,8 +7,8 @@ pub fn compress(payload: &[u8]) -> Result, zip::result::ZipError> { { let mut zip = zip::ZipWriter::new(&mut writer); - let options = - zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Deflated); + let options = zip::write::SimpleFileOptions::default() + .compression_method(zip::CompressionMethod::Deflated); zip.start_file("d", options)?; diff --git a/my-no-sql-tcp-shared/src/tcp_contracts.rs b/my-no-sql-tcp-shared/src/tcp_contracts.rs index 59b5b36..e73c7c0 100644 --- a/my-no-sql-tcp-shared/src/tcp_contracts.rs +++ b/my-no-sql-tcp-shared/src/tcp_contracts.rs @@ -1,6 +1,6 @@ use my_tcp_sockets::{ socket_reader::{ReadingTcpContractFail, SocketReader, SocketReaderInMem}, - TcpWriteBuffer, + TcpSerializerState, TcpWriteBuffer, }; use rust_extensions::date_time::DateTimeAsMicroseconds; @@ -496,3 +496,11 @@ impl my_tcp_sockets::TcpContract for MyNoSqlTcpContract { } } } + +impl TcpSerializerState for () { + fn is_tcp_contract_related_to_metadata(&self, _contract: &MyNoSqlTcpContract) -> bool { + false + } + + fn apply_tcp_contract(&mut self, _contract: &MyNoSqlTcpContract) {} +} diff --git a/my-no-sql-tcp-shared/src/tcp_serializer.rs b/my-no-sql-tcp-shared/src/tcp_serializer.rs index b93b99d..af861f1 100644 --- a/my-no-sql-tcp-shared/src/tcp_serializer.rs +++ b/my-no-sql-tcp-shared/src/tcp_serializer.rs @@ -1,6 +1,6 @@ use my_tcp_sockets::{ socket_reader::{ReadingTcpContractFail, SocketReader}, - TcpSerializerFactory, TcpSerializerState, TcpSocketSerializer, TcpWriteBuffer, + TcpSerializerFactory, TcpSocketSerializer, TcpWriteBuffer, }; use crate::MyNoSqlTcpContract; @@ -38,13 +38,6 @@ impl TcpSocketSerializer for MyNoSqlReaderTcpSerializer } } -impl TcpSerializerState for () { - fn is_tcp_contract_related_to_metadata(&self, _: &MyNoSqlTcpContract) -> bool { - false - } - fn apply_tcp_contract(&mut self, _: &MyNoSqlTcpContract) {} -} - pub struct MyNoSqlTcpSerializerFactory; #[async_trait::async_trait] diff --git a/my-no-sql-tests/Cargo.toml b/my-no-sql-tests/Cargo.toml index 9b3b80c..9a3129b 100644 --- a/my-no-sql-tests/Cargo.toml +++ b/my-no-sql-tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "my-no-sql-tests" -version = "0.3.0" +version = "0.4.1" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -12,3 +12,5 @@ async-trait = "*" my-no-sql-macros = { path = "../my-no-sql-macros" } serde = { version = "*", features = ["derive"] } my-no-sql-sdk = { path = "../my-no-sql-sdk" } + +my-no-sql-tcp-reader = { path = "../my-no-sql-tcp-reader" } diff --git a/my-no-sql-tests/src/lib.rs b/my-no-sql-tests/src/lib.rs index 8e24695..ab36453 100644 --- a/my-no-sql-tests/src/lib.rs +++ b/my-no-sql-tests/src/lib.rs @@ -1,2 +1,8 @@ #[cfg(test)] mod macros_tests; +#[cfg(test)] +mod test_new_enum_cases_added; +#[cfg(test)] +mod test_same_timestamp; +#[cfg(test)] +mod tests_from_real_life; diff --git a/my-no-sql-tests/src/macros_tests/enum_test.rs b/my-no-sql-tests/src/macros_tests/enum_test.rs index 747d377..3810336 100644 --- a/my-no-sql-tests/src/macros_tests/enum_test.rs +++ b/my-no-sql-tests/src/macros_tests/enum_test.rs @@ -25,7 +25,7 @@ pub struct Struct2 { #[test] fn test_serialize_deserialize_case_1() { let src_model = Struct1 { - time_stamp: "".to_string(), + time_stamp: Default::default(), field1: "test".to_string(), field2: 123, }; @@ -33,7 +33,7 @@ fn test_serialize_deserialize_case_1() { let vec = entity.serialize_entity(); - let dest = MyNoSqlEnumEntityTest::deserialize_entity(&vec); + let dest = MyNoSqlEnumEntityTest::deserialize_entity(&vec).unwrap(); let model = dest.unwrap_case1(); @@ -45,7 +45,7 @@ fn test_serialize_deserialize_case_1() { fn test_serialize_deserialize_case_2() { let src_model = Struct2 { row_key: "rk2".to_string(), - time_stamp: "".to_string(), + time_stamp: Default::default(), field3: "test3".to_string(), field4: 1234, }; @@ -53,7 +53,7 @@ fn test_serialize_deserialize_case_2() { let vec = entity.serialize_entity(); - let dest = MyNoSqlEnumEntityTest::deserialize_entity(&vec); + let dest = MyNoSqlEnumEntityTest::deserialize_entity(&vec).unwrap(); let model = dest.unwrap_case2(); diff --git a/my-no-sql-tests/src/test_new_enum_cases_added/mod.rs b/my-no-sql-tests/src/test_new_enum_cases_added/mod.rs new file mode 100644 index 0000000..a1a46e3 --- /dev/null +++ b/my-no-sql-tests/src/test_new_enum_cases_added/mod.rs @@ -0,0 +1 @@ +mod test_enum_with_added_new_type; diff --git a/my-no-sql-tests/src/test_new_enum_cases_added/test_enum_with_added_new_type.rs b/my-no-sql-tests/src/test_new_enum_cases_added/test_enum_with_added_new_type.rs new file mode 100644 index 0000000..c34f96f --- /dev/null +++ b/my-no-sql-tests/src/test_new_enum_cases_added/test_enum_with_added_new_type.rs @@ -0,0 +1,55 @@ +use my_no_sql_macros::*; + +use serde::*; + +#[enum_of_my_no_sql_entity(table_name:"Test", generate_unwraps)] +pub enum MyNoSqlEnumEntityTestVer1 { + Case1(Struct1), +} + +#[enum_of_my_no_sql_entity(table_name:"Test", generate_unwraps)] +pub enum MyNoSqlEnumEntityTestVer2 { + Case1(Struct1), + Case2(Struct2), +} + +#[enum_model(partition_key:"pk1", row_key: "rk1")] +#[derive(Serialize, Deserialize, Clone)] +pub struct Struct1 { + pub field1: String, + pub field2: i32, +} + +#[enum_model(partition_key:"pk2", row_key: "rk2")] +#[derive(Serialize, Deserialize, Clone)] +pub struct Struct2 { + pub field3: String, + pub field4: i32, +} + +#[cfg(test)] +mod tests { + + use my_no_sql_sdk::abstractions::MyNoSqlEntitySerializer; + + use super::*; + + #[test] + fn test() { + let model_ver2 = MyNoSqlEnumEntityTestVer2::Case2(Struct2 { + time_stamp: Default::default(), + field3: "field3".to_string(), + field4: 4, + }); + + let result = model_ver2.serialize_entity(); + + let model_ver1 = MyNoSqlEnumEntityTestVer1::deserialize_entity(result.as_slice()); + + assert!(model_ver1.is_err()); + + if let Err(err) = model_ver1 { + println!("{}", err); + } + } +} diff --git a/my-no-sql-tests/src/test_same_timestamp.rs b/my-no-sql-tests/src/test_same_timestamp.rs new file mode 100644 index 0000000..e61af0b --- /dev/null +++ b/my-no-sql-tests/src/test_same_timestamp.rs @@ -0,0 +1,42 @@ +use my_no_sql_macros::my_no_sql_entity; +use serde::*; + +#[my_no_sql_entity(table_name:"test-table", with_expires:true)] +#[derive(Debug, Serialize, Deserialize)] +pub struct MyEntity { + pub ts: String, +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use my_no_sql_sdk::{ + abstractions::MyNoSqlEntitySerializer, + core::rust_extensions::date_time::DateTimeAsMicroseconds, + }; + + use super::MyEntity; + + #[test] + fn test() { + let entity = MyEntity { + partition_key: "test".to_string(), + row_key: "test".to_string(), + time_stamp: Default::default(), + expires: DateTimeAsMicroseconds::now() + .add(Duration::from_secs(5)) + .into(), + ts: "str".to_string(), + }; + + let result = entity.serialize_entity(); + + let result = MyEntity::deserialize_entity(&result).unwrap(); + + assert_eq!(entity.partition_key.as_str(), result.partition_key.as_str()); + assert_eq!(entity.row_key.as_str(), result.row_key.as_str()); + assert_eq!(entity.time_stamp, result.time_stamp); + assert_eq!(entity.ts, result.ts); + } +} diff --git a/my-no-sql-tests/src/tests_from_real_life.rs b/my-no-sql-tests/src/tests_from_real_life.rs new file mode 100644 index 0000000..1a1788d --- /dev/null +++ b/my-no-sql-tests/src/tests_from_real_life.rs @@ -0,0 +1,44 @@ +use my_no_sql_macros::my_no_sql_entity; +use serde::*; + +#[my_no_sql_entity("payout-withdrawal-settings")] +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "PascalCase")] +pub struct PayoutWithdrawalSettingsMyNoSqlEntity { + pub value: f64, + pub currency: String, +} + +#[cfg(test)] +mod tests { + + use my_no_sql_sdk::{ + abstractions::MyNoSqlEntitySerializer, core::rust_extensions::date_time::DateTimeStruct, + }; + + use super::PayoutWithdrawalSettingsMyNoSqlEntity; + + #[test] + fn test() { + let src = "{\"PartitionKey\":\"bank-transfer\",\"RowKey\":\"max\",\"TimeStamp\":\"2024-11-29T14:59:15.6145\",\"Value\":15000.0,\"Currency\":\"USD\"}"; + + let entity = + PayoutWithdrawalSettingsMyNoSqlEntity::deserialize_entity(src.as_bytes()).unwrap(); + + assert_eq!(entity.value, 15000.0); + assert_eq!(entity.currency, "USD"); + assert_eq!(entity.partition_key, "bank-transfer"); + assert_eq!(entity.row_key, "max"); + + let dt_struct: DateTimeStruct = entity.time_stamp.to_date_time().into(); + + assert_eq!(dt_struct.year, 2024); + assert_eq!(dt_struct.month, 11); + assert_eq!(dt_struct.day, 29); + + assert_eq!(dt_struct.time.hour, 14); + assert_eq!(dt_struct.time.min, 59); + assert_eq!(dt_struct.time.sec, 15); + assert_eq!(dt_struct.time.micros, 614500); + } +}