Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 45 additions & 24 deletions codegen/src/worktable/generator/queries/delete.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,62 @@ impl Generator {
quote! {}
};
let full_row_delete = self.gen_full_row_delete();
let full_row_delete_without_lock = self.gen_full_row_delete_without_lock();

Ok(quote! {
impl #table_ident {
#full_row_delete
#full_row_delete_without_lock
#custom_deletes
}
})
}

fn gen_full_row_delete(&mut self) -> TokenStream {
let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string());
let pk_ident = name_generator.get_primary_key_type_ident();
let lock_ident = name_generator.get_lock_type_ident();
let delete_logic = self.gen_delete_logic();

quote! {
pub async fn delete(&self, pk: #pk_ident) -> core::result::Result<(), WorkTableError> {
if let Some(lock) = self.0.lock_map.get(&pk) {
lock.lock_await().await; // Waiting for all locks released
}

let lock_id = self.0.lock_map.next_id();
let lock = std::sync::Arc::new(#lock_ident::with_lock(lock_id.into())); //Creates new LockType with None
self.0.lock_map.insert(pk.clone(), lock.clone()); // adds LockType to LockMap

#delete_logic

lock.unlock(); // Releases locks
self.0.lock_map.remove(&pk); // Removes locks

core::result::Result::Ok(())
}
}
}

fn gen_full_row_delete_without_lock(&mut self) -> TokenStream {
let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string());
let pk_ident = name_generator.get_primary_key_type_ident();
let delete_logic = self.gen_delete_logic();

quote! {
pub async fn delete_without_lock(&self, pk: #pk_ident) -> core::result::Result<(), WorkTableError> {
#delete_logic
core::result::Result::Ok(())
}
}
}

fn gen_delete_logic(&self) -> TokenStream {
let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string());
let pk_ident = name_generator.get_primary_key_type_ident();
let secondary_events_ident = name_generator.get_space_secondary_index_events_ident();

let delete_logic = if self.is_persist {
let process = if self.is_persist {
quote! {
let secondary_keys_events = self.0.indexes.delete_row_cdc(row, link)?;
let (_, primary_key_events) = TableIndexCdc::remove_cdc(&self.0.pk_map, pk.clone(), link);
Expand All @@ -60,34 +101,14 @@ impl Generator {
}
};

let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string());
let lock_ident = name_generator.get_lock_type_ident();

quote! {
pub async fn delete(&self, pk: #pk_ident) -> core::result::Result<(), WorkTableError> {

if let Some(lock) = self.0.lock_map.get(&pk) {
lock.lock_await().await; // Waiting for all locks released
}

let lock_id = self.0.lock_map.next_id();
let lock = std::sync::Arc::new(#lock_ident::with_lock(lock_id.into())); //Creates new LockType with None
self.0.lock_map.insert(pk.clone(), lock.clone()); // adds LockType to LockMap

let link = self.0
let link = self.0
.pk_map
.get(&pk)
.map(|v| v.get().value)
.ok_or(WorkTableError::NotFound)?;

let row = self.select(pk.clone()).unwrap();
#delete_logic

lock.unlock(); // Releases locks
self.0.lock_map.remove(&pk); // Removes locks

core::result::Result::Ok(())
}
let row = self.select(pk.clone()).unwrap();
#process
}
}

Expand Down
1 change: 1 addition & 0 deletions codegen/src/worktable/generator/queries/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@ mod delete;
mod locks;
mod select;
pub mod r#type;
mod unsized_;
mod update;
100 changes: 100 additions & 0 deletions codegen/src/worktable/generator/queries/unsized_.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
use proc_macro2::{Ident, Span, TokenStream};
use quote::quote;

use crate::name_generator::WorktableNameGenerator;
use crate::worktable::generator::Generator;

impl Generator {
pub fn gen_unsized_impls(&self) -> TokenStream {
if self.columns.is_sized {
quote! {}
} else {
let unsized_field_len_fns = self.gen_get_unsized_field_len_wt_fn();
let unsized_field_len_query_fns = self.gen_get_unsized_field_len_query_fn();
quote! {
#unsized_field_len_fns
#unsized_field_len_query_fns
}
}
}

fn gen_get_unsized_field_len_wt_fn(&self) -> TokenStream {
let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string());
let table_ident = name_generator.get_work_table_ident();

let unsized_fields: Vec<_> = self
.columns
.columns_map
.iter()
.filter_map(|(k, v)| {
if v.to_string() == "String" {
Some(k)
} else {
None
}
})
.map(|f| {
let fn_ident = Ident::new(format!("get_{}_size", f).as_str(), Span::call_site());
quote! {
fn #fn_ident(&self, link: Link) -> core::result::Result<usize, WorkTableError> {
self.0.data
.with_ref(link, |row_ref| row_ref.inner.#f.len())
.map_err(WorkTableError::PagesError)
}
}
})
.collect();

quote! {
impl #table_ident {
#(#unsized_fields)*
}
}
}

fn gen_get_unsized_field_len_query_fn(&self) -> TokenStream {
if let Some(q) = &self.queries {
let query_impls: Vec<_> = q
.updates
.iter()
.filter(|(_, op)| {
op.columns
.iter()
.any(|c| self.columns.columns_map.get(c).unwrap().to_string() == "String")
})
.map(|(i, op)| {
let archived_ident =
Ident::new(format!("Archived{}Query", i).as_str(), Span::call_site());
let unsized_fields: Vec<_> = op
.columns
.iter()
.filter(|c| {
self.columns.columns_map.get(c).unwrap().to_string() == "String"
})
.map(|c| {
let fn_ident =
Ident::new(format!("get_{}_size", c).as_str(), Span::call_site());
quote! {
pub fn #fn_ident(&self) -> usize {
self.#c.len()
}
}
})
.collect();

quote! {
impl #archived_ident {
#(#unsized_fields)*
}
}
})
.collect();

quote! {
#(#query_impls)*
}
} else {
quote! {}
}
}
}
Loading
Loading