diff --git a/README.md b/README.md index a631fb93..9a17a0df 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,10 @@ KERIOX is an open-source Rust implementation of the [ Key Event Receipt Infrastr KERI provides the same security and verifiability properties for transactions as a blockchain or distributed ledger can, without the overhead of requiring an absolute global ordering of transactions. Because of this, there is no need for a canonical chain and thus there is no "KERI Chain" or "KERI Network". KERI Identifiers can be generated independently in a self-sovereign and privacy-preserving manner and are secured via a self-certifying post-quantum resistant key management scheme based on blinded pre-rotation, auditable and flexible key events and a distributed conflict resolution algorithm called KAACE. +## Architecture + +KERIOX is designed around pluggable abstractions that allow it to run in diverse environments. The `EventDatabase` trait abstracts storage so that backends can be swapped at compile time; `redb` is the default (feature-flagged as `storage-redb`), and an in-memory implementation is available for testing or custom backends. Notification dispatch is also pluggable via `NotificationBus`, which supports injectable dispatch strategies suitable for serverless environments such as SQS-backed message routing. The `KeriRuntime` struct bundles the processor, storage, escrows, and notification bus into a single composable unit, enabling thin Lambda handlers or other lightweight entry points. + ## License EUPL 1.2 @@ -39,3 +43,4 @@ This repository provides the implementation of the KERI protocol. [`keriox_core` - [Witness](./components/witness): the KERI Witness - [Watcher](./components/watcher): the KERI Watcher - [Controller](./components/controller): the client for accessing the infrastructure +- [SDK](./keriox_sdk): high-level SDK providing `KeriRuntime` and `Controller` for KERI+TEL operations diff --git a/components/controller/Cargo.toml b/components/controller/Cargo.toml index f03d92b1..457b1daf 100644 --- a/components/controller/Cargo.toml +++ b/components/controller/Cargo.toml @@ -10,6 +10,9 @@ repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] +default = ["storage-redb"] +storage-redb = ["keri-core/storage-redb", "teliox/storage-redb"] +storage-postgres = ["keri-core/storage-postgres", "teliox/storage-postgres"] query_cache = ["rusqlite"] [dependencies] @@ -30,6 +33,23 @@ async-trait = "0.1.57" [dev-dependencies] witness = { path = "../witness" } tempfile = { version = "3.1" } +sqlx = { version = "0.8", features = ["postgres", "runtime-tokio"] } + +[[test]] +name = "test_kel_managing_postgres" +required-features = ["storage-postgres"] + +[[test]] +name = "test_tel_managing_postgres" +required-features = ["storage-postgres"] + +[[test]] +name = "test_group_incept_postgres" +required-features = ["storage-postgres"] + +[[test]] +name = "test_delegated_incept_postgres" +required-features = ["storage-postgres", "query_cache"] [package.metadata.release] publish = false diff --git a/components/controller/src/communication.rs b/components/controller/src/communication.rs index fe938f0e..17101ffd 100644 --- a/components/controller/src/communication.rs +++ b/components/controller/src/communication.rs @@ -3,8 +3,10 @@ use std::sync::Arc; use futures::future::join_all; use keri_core::{ actor::{error::ActorError, parse_event_stream, possible_response::PossibleResponse}, + database::{EscrowCreator, EventDatabase}, event_message::signed_event_message::{Message, Notice, Op, SignedEventMessage}, oobi::{EndRole, LocationScheme, Oobi, Scheme}, + oobi_manager::storage::OobiStorageBackend, prefix::{BasicPrefix, IdentifierPrefix}, query::{ mailbox::SignedMailboxQuery, @@ -12,7 +14,7 @@ use keri_core::{ }, transport::{Transport, TransportError}, }; -use teliox::{event::verifiable_event::VerifiableEvent, query::SignedTelQuery}; +use teliox::{database::TelEventDatabase, event::verifiable_event::VerifiableEvent, query::SignedTelQuery}; use crate::{ error::ControllerError, @@ -53,15 +55,25 @@ impl From for SendingError { } } -pub struct Communication { - pub events: Arc, +pub struct Communication +where + D: EventDatabase + EscrowCreator + 'static, + T: TelEventDatabase + 'static, + S: OobiStorageBackend, +{ + pub events: Arc>, pub transport: Box, pub tel_transport: Box, } -impl Communication { +impl Communication +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub fn new( - known_events: Arc, + known_events: Arc>, transport: Box + Send + Sync>, tel_transport: Box, ) -> Self { diff --git a/components/controller/src/controller/mod.rs b/components/controller/src/controller/mod.rs index ab4106c7..376d3e9a 100644 --- a/components/controller/src/controller/mod.rs +++ b/components/controller/src/controller/mod.rs @@ -1,12 +1,15 @@ use std::sync::Arc; use keri_core::{ + database::{EscrowCreator, EventDatabase}, event_message::signature::Signature, oobi::LocationScheme, + oobi_manager::storage::OobiStorageBackend, prefix::{BasicPrefix, IdentifierPrefix, SelfSigningPrefix}, processor::validator::VerificationError, state::IdentifierState, }; +use teliox::database::TelEventDatabase; #[cfg(feature = "query_cache")] use crate::identifier::mechanics::cache::IdentifierCache; @@ -19,48 +22,24 @@ use crate::{ }; pub mod verifying; -pub struct Controller { - pub known_events: Arc, - pub communication: Arc, +pub struct Controller +where + D: EventDatabase + EscrowCreator + 'static, + T: TelEventDatabase + 'static, + S: OobiStorageBackend, +{ + pub known_events: Arc>, + pub communication: Arc>, #[cfg(feature = "query_cache")] pub cache: Arc, } -impl Controller { - pub fn new(config: ControllerConfig) -> Result { - let ControllerConfig { - db_path, - initial_oobis, - escrow_config, - transport, - tel_transport, - } = config; - std::fs::create_dir_all(&db_path).unwrap(); - let mut query_db_path = db_path.clone(); - query_db_path.push("query_cache"); - - let events = Arc::new(KnownEvents::new(db_path, escrow_config)?); - - #[cfg(feature = "query_cache")] - let query_cache = Arc::new(IdentifierCache::new(&query_db_path)?); - let comm = Arc::new(Communication { - events: events.clone(), - transport, - tel_transport, - }); - - let controller = Self { - known_events: events.clone(), - communication: comm, - #[cfg(feature = "query_cache")] - cache: query_cache, - }; - if !initial_oobis.is_empty() { - async_std::task::block_on(controller.setup_witnesses(&initial_oobis)).unwrap(); - } - Ok(controller) - } - +impl Controller +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub async fn incept( &self, public_keys: Vec, @@ -77,7 +56,7 @@ impl Controller { &self, event: &[u8], sig: &SelfSigningPrefix, - ) -> Result { + ) -> Result, ControllerError> { let initialized_id = self.known_events.finalize_inception(event, sig).unwrap(); Ok(Identifier::new( initialized_id, @@ -111,3 +90,13 @@ impl Controller { self.known_events.get_state(id) } } + +#[cfg(feature = "storage-redb")] +mod redb; +#[cfg(feature = "storage-redb")] +pub use redb::{RedbController, RedbIdentifier}; + +#[cfg(feature = "storage-postgres")] +mod postgres; +#[cfg(feature = "storage-postgres")] +pub use postgres::{PostgresController, PostgresIdentifier}; diff --git a/components/controller/src/controller/postgres.rs b/components/controller/src/controller/postgres.rs new file mode 100644 index 00000000..2710b7f5 --- /dev/null +++ b/components/controller/src/controller/postgres.rs @@ -0,0 +1,64 @@ +use std::sync::Arc; + +use keri_core::database::postgres::PostgresDatabase; +use keri_core::database::postgres::oobi_storage::PostgresOobiStorage; +use teliox::database::postgres::PostgresTelDatabase; + +#[cfg(feature = "query_cache")] +use crate::identifier::mechanics::cache::IdentifierCache; +use crate::{ + communication::Communication, + config::ControllerConfig, + error::ControllerError, + identifier::Identifier, + known_events::PostgresKnownEvents, +}; + +use super::Controller; + +pub type PostgresController = Controller; +pub type PostgresIdentifier = Identifier; + +impl PostgresController { + pub async fn new_postgres( + database_url: &str, + config: ControllerConfig, + ) -> Result { + let ControllerConfig { + db_path: _db_path, + initial_oobis, + escrow_config, + transport, + tel_transport, + } = config; + + #[cfg(feature = "query_cache")] + let mut query_db_path = _db_path.clone(); + #[cfg(feature = "query_cache")] + query_db_path.push("query_cache"); + + let events = Arc::new( + PostgresKnownEvents::with_postgres(database_url, escrow_config).await?, + ); + + #[cfg(feature = "query_cache")] + let query_cache = Arc::new(IdentifierCache::new(&query_db_path)?); + + let comm = Arc::new(Communication { + events: events.clone(), + transport, + tel_transport, + }); + + let controller = Self { + known_events: events.clone(), + communication: comm, + #[cfg(feature = "query_cache")] + cache: query_cache, + }; + if !initial_oobis.is_empty() { + controller.setup_witnesses(&initial_oobis).await.unwrap(); + } + Ok(controller) + } +} diff --git a/components/controller/src/controller/redb.rs b/components/controller/src/controller/redb.rs new file mode 100644 index 00000000..05467544 --- /dev/null +++ b/components/controller/src/controller/redb.rs @@ -0,0 +1,58 @@ +use std::sync::Arc; + +use keri_core::database::redb::RedbDatabase; +use keri_core::oobi_manager::storage::RedbOobiStorage; +use teliox::database::redb::RedbTelDatabase; + +#[cfg(feature = "query_cache")] +use crate::identifier::mechanics::cache::IdentifierCache; +use crate::{ + communication::Communication, + config::ControllerConfig, + error::ControllerError, + identifier::Identifier, + known_events::RedbKnownEvents, +}; + +use super::Controller; + +pub type RedbController = Controller; +pub type RedbIdentifier = Identifier; + +impl RedbController { + pub fn new(config: ControllerConfig) -> Result { + let ControllerConfig { + db_path, + initial_oobis, + escrow_config, + transport, + tel_transport, + } = config; + std::fs::create_dir_all(&db_path).unwrap(); + #[cfg(feature = "query_cache")] + let mut query_db_path = db_path.clone(); + #[cfg(feature = "query_cache")] + query_db_path.push("query_cache"); + + let events = Arc::new(RedbKnownEvents::with_redb(db_path, escrow_config)?); + + #[cfg(feature = "query_cache")] + let query_cache = Arc::new(IdentifierCache::new(&query_db_path)?); + let comm = Arc::new(Communication { + events: events.clone(), + transport, + tel_transport, + }); + + let controller = Self { + known_events: events.clone(), + communication: comm, + #[cfg(feature = "query_cache")] + cache: query_cache, + }; + if !initial_oobis.is_empty() { + async_std::task::block_on(controller.setup_witnesses(&initial_oobis)).unwrap(); + } + Ok(controller) + } +} diff --git a/components/controller/src/controller/verifying.rs b/components/controller/src/controller/verifying.rs index 67c66310..209058b8 100644 --- a/components/controller/src/controller/verifying.rs +++ b/components/controller/src/controller/verifying.rs @@ -1,14 +1,22 @@ use cesrox::{parse_many, payload::Payload, ParsedData}; use itertools::Itertools; use keri_core::{ + database::{EscrowCreator, EventDatabase}, event_message::signature::{get_signatures, Signature}, oobi::Oobi, + oobi_manager::storage::OobiStorageBackend, processor::validator::{EventValidator, VerificationError}, }; +use teliox::database::TelEventDatabase; use crate::{error::ControllerError, known_events::KnownEvents}; -impl KnownEvents { +impl KnownEvents +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub fn verify(&self, data: &[u8], signature: &Signature) -> Result<(), VerificationError> { let verifier = EventValidator::new(self.storage.events_db.clone()); verifier.verify(data, signature) diff --git a/components/controller/src/error.rs b/components/controller/src/error.rs index 957afa1d..b7f936b5 100644 --- a/components/controller/src/error.rs +++ b/components/controller/src/error.rs @@ -1,7 +1,7 @@ use keri_core::{ - actor::prelude::VersionError, database::redb::RedbError, - event_message::cesr_adapter::ParseError, oobi::Scheme, prefix::IdentifierPrefix, - processor::validator::VerificationError, + actor::prelude::VersionError, + event_message::cesr_adapter::ParseError, oobi::Scheme, oobi::error::OobiError, + prefix::IdentifierPrefix, processor::validator::VerificationError, }; use thiserror::Error; @@ -12,8 +12,8 @@ use crate::{ #[derive(Error, Debug)] pub enum ControllerError { - #[error("Redb error: {0}")] - RedbError(#[from] RedbError), + #[error("Database error: {0}")] + DatabaseError(String), #[cfg(feature = "query_cache")] #[error("SQL error: {0}")] @@ -58,9 +58,18 @@ pub enum ControllerError { #[error("Error: {0}")] OtherError(String), + #[error("Oobi error: {0}")] + OobiError(String), + #[error(transparent)] Mechanic(#[from] MechanicsError), #[error("Watcher response error: {0}")] WatcherResponseError(#[from] WatcherResponseError), } + +impl From for ControllerError { + fn from(e: OobiError) -> Self { + ControllerError::OobiError(e.to_string()) + } +} diff --git a/components/controller/src/identifier/mechanics/broadcast.rs b/components/controller/src/identifier/mechanics/broadcast.rs index e132af4c..ef819090 100644 --- a/components/controller/src/identifier/mechanics/broadcast.rs +++ b/components/controller/src/identifier/mechanics/broadcast.rs @@ -1,11 +1,13 @@ use futures::future::join_all; use keri_core::{ actor::prelude::SelfAddressingIdentifier, - database::EventDatabase, + database::{EscrowCreator, EventDatabase}, event_message::signed_event_message::{Message, Notice}, oobi::Scheme, + oobi_manager::storage::OobiStorageBackend, prefix::IdentifierPrefix, }; +use teliox::database::TelEventDatabase; use crate::{communication::SendingError, identifier::Identifier}; @@ -19,7 +21,12 @@ pub enum BroadcastingError { CacheSavingError, } -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ /// Send new receipts obtained via [`Self::finalize_query`] to specified witnesses. /// Returns number of new receipts sent per witness or first error. pub async fn broadcast_receipts( @@ -94,7 +101,7 @@ mod test { let seed = "AK8F6AAiYDpXlWdj2O5F5-6wNCCNJh2A4XOlqwR_HwwH"; let witness_root = Builder::new().prefix("test-wit1-db").tempdir().unwrap(); Arc::new( - WitnessListener::setup( + WitnessListener::setup_with_redb( url::Url::parse("http://witness1/").unwrap(), witness_root.path(), Some(seed.to_string()), @@ -107,7 +114,7 @@ mod test { let seed = "AJZ7ZLd7unQ4IkMUwE69NXcvDO9rrmmRH_Xk3TPu9BpP"; let witness_root = Builder::new().prefix("test-wit2-db").tempdir().unwrap(); Arc::new( - WitnessListener::setup( + WitnessListener::setup_with_redb( url::Url::parse("http://witness2/").unwrap(), witness_root.path(), Some(seed.to_string()), diff --git a/components/controller/src/identifier/mechanics/delegate.rs b/components/controller/src/identifier/mechanics/delegate.rs index 250788d8..0546ab58 100644 --- a/components/controller/src/identifier/mechanics/delegate.rs +++ b/components/controller/src/identifier/mechanics/delegate.rs @@ -1,4 +1,6 @@ use keri_core::actor::prelude::HashFunctionCode; +use keri_core::database::{EscrowCreator, EventDatabase}; +use keri_core::oobi_manager::storage::OobiStorageBackend; use keri_core::{ actor::prelude::SerializationFormats, event::{ @@ -8,12 +10,18 @@ use keri_core::{ event_message::msg::KeriEvent, mailbox::exchange::{Exchange, ExchangeMessage, ForwardTopic, FwdArgs}, }; +use teliox::database::TelEventDatabase; use crate::identifier::Identifier; use super::MechanicsError; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ /// Generates delegating event (ixn) and exchange event that contains /// delegated event which will be send to delegate after ixn finalization. pub fn delegate( diff --git a/components/controller/src/identifier/mechanics/group.rs b/components/controller/src/identifier/mechanics/group.rs index f4299392..0df68a69 100644 --- a/components/controller/src/identifier/mechanics/group.rs +++ b/components/controller/src/identifier/mechanics/group.rs @@ -1,5 +1,6 @@ use keri_core::{ actor::{event_generator, MaterialPath}, + database::{EscrowCreator, EventDatabase}, event::{sections::threshold::SignatureThreshold, KeyEvent}, event_message::{ cesr_adapter::{parse_event_type, EventType}, @@ -9,14 +10,21 @@ use keri_core::{ EventTypeTag, }, mailbox::exchange::{Exchange, ForwardTopic, SignedExchange}, + oobi_manager::storage::OobiStorageBackend, prefix::{BasicPrefix, IdentifierPrefix, IndexedSignature, SelfSigningPrefix}, }; +use teliox::database::TelEventDatabase; use crate::identifier::Identifier; use super::MechanicsError; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ /// Init group identifier /// /// Returns serialized group icp and list of exchange messages to sign. diff --git a/components/controller/src/identifier/mechanics/kel_managing.rs b/components/controller/src/identifier/mechanics/kel_managing.rs index c3bc2a9e..f797235c 100644 --- a/components/controller/src/identifier/mechanics/kel_managing.rs +++ b/components/controller/src/identifier/mechanics/kel_managing.rs @@ -1,5 +1,6 @@ use keri_core::{ actor::{event_generator, prelude::SelfAddressingIdentifier}, + database::{EscrowCreator, EventDatabase}, event::{ event_data::EventData, sections::{seal::Seal, KeyConfig}, @@ -11,16 +12,23 @@ use keri_core::{ signed_event_message::{Message, Notice}, }, oobi::{LocationScheme, Scheme}, + oobi_manager::storage::OobiStorageBackend, prefix::{BasicPrefix, IdentifierPrefix, IndexedSignature, SelfSigningPrefix}, }; use keri_core::prefix::CesrPrimitive; +use teliox::database::TelEventDatabase; use crate::identifier::Identifier; use super::MechanicsError; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ /// Generate and return rotation event for Identifier pub async fn rotate( &self, diff --git a/components/controller/src/identifier/mechanics/mailbox.rs b/components/controller/src/identifier/mechanics/mailbox.rs index 1c774a7b..de750f37 100644 --- a/components/controller/src/identifier/mechanics/mailbox.rs +++ b/components/controller/src/identifier/mechanics/mailbox.rs @@ -1,5 +1,6 @@ use keri_core::{ actor::event_generator, + database::{EscrowCreator, EventDatabase}, error::Error, event::{ event_data::EventData, @@ -9,14 +10,21 @@ use keri_core::{ Message, Notice, SignedEventMessage, SignedNontransferableReceipt, }, mailbox::{exchange::ForwardTopic, MailboxResponse}, + oobi_manager::storage::OobiStorageBackend, prefix::IdentifierPrefix, }; +use teliox::database::TelEventDatabase; use crate::{error::ControllerError, identifier::Identifier, mailbox_updating::ActionRequired}; use super::{MechanicsError, ResponseProcessingError}; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub(crate) async fn mailbox_response( &self, recipient: &IdentifierPrefix, diff --git a/components/controller/src/identifier/mechanics/notify_witness.rs b/components/controller/src/identifier/mechanics/notify_witness.rs index 6761f5e0..6dcda1b9 100644 --- a/components/controller/src/identifier/mechanics/notify_witness.rs +++ b/components/controller/src/identifier/mechanics/notify_witness.rs @@ -1,10 +1,18 @@ use futures::future::join_all; +use keri_core::database::{EscrowCreator, EventDatabase}; +use keri_core::oobi_manager::storage::OobiStorageBackend; +use teliox::database::TelEventDatabase; use crate::identifier::Identifier; use super::MechanicsError; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub async fn notify_witnesses(&mut self) -> Result { let mut n = 0; let to_notify = self.to_notify.iter().filter_map(|ev| { diff --git a/components/controller/src/identifier/mechanics/query_mailbox.rs b/components/controller/src/identifier/mechanics/query_mailbox.rs index bde87bed..8f0f0f10 100644 --- a/components/controller/src/identifier/mechanics/query_mailbox.rs +++ b/components/controller/src/identifier/mechanics/query_mailbox.rs @@ -1,5 +1,7 @@ use keri_core::actor::possible_response::PossibleResponse; use keri_core::actor::prelude::HashFunctionCode; +use keri_core::database::{EscrowCreator, EventDatabase}; +use keri_core::oobi_manager::storage::OobiStorageBackend; use keri_core::{ actor::prelude::SerializationFormats, oobi::Scheme, @@ -9,6 +11,7 @@ use keri_core::{ query_event::SignedQuery, }, }; +use teliox::database::TelEventDatabase; #[cfg(not(feature = "query_cache"))] use crate::mailbox_updating::MailboxReminder; @@ -32,7 +35,12 @@ pub enum ResponseProcessingError { Delegate(keri_core::error::Error), } -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ /// Generates query message of route `mbx` to query own identifier mailbox. pub fn query_mailbox( &self, diff --git a/components/controller/src/identifier/mechanics/tel_managing.rs b/components/controller/src/identifier/mechanics/tel_managing.rs index ec2259a8..6edf545d 100644 --- a/components/controller/src/identifier/mechanics/tel_managing.rs +++ b/components/controller/src/identifier/mechanics/tel_managing.rs @@ -1,12 +1,15 @@ use keri_core::{ + database::{EscrowCreator, EventDatabase}, event::{ sections::seal::{EventSeal, Seal}, KeyEvent, }, event_message::{msg::TypedEvent, EventTypeTag}, + oobi_manager::storage::OobiStorageBackend, prefix::{IdentifierPrefix, SelfSigningPrefix}, }; use teliox::{ + database::TelEventDatabase, event::verifiable_event::VerifiableEvent, seal::{AttachedSourceSeal, EventSourceSeal}, }; @@ -15,7 +18,12 @@ use crate::{error::ControllerError, identifier::Identifier}; use super::MechanicsError; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ /// Generate `vcp` event and `ixn` event with seal to `vcp`. To finalize /// the process, `ixn` need to be signed confirmed with `finalize_event` /// function. diff --git a/components/controller/src/identifier/mechanics/watcher_configuration.rs b/components/controller/src/identifier/mechanics/watcher_configuration.rs index 5fa85a28..a8113ecc 100644 --- a/components/controller/src/identifier/mechanics/watcher_configuration.rs +++ b/components/controller/src/identifier/mechanics/watcher_configuration.rs @@ -1,16 +1,24 @@ use keri_core::{ actor::event_generator, + database::{EscrowCreator, EventDatabase}, event_message::cesr_adapter::{parse_event_type, EventType}, oobi::{Role, Scheme}, + oobi_manager::storage::OobiStorageBackend, prefix::{IdentifierPrefix, SelfSigningPrefix}, query::reply_event::{ReplyEvent, ReplyRoute}, }; +use teliox::database::TelEventDatabase; use crate::identifier::Identifier; use super::MechanicsError; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ /// Generates reply event with `end_role_add` route. pub fn add_watcher(&self, watcher_id: IdentifierPrefix) -> Result { String::from_utf8( diff --git a/components/controller/src/identifier/mod.rs b/components/controller/src/identifier/mod.rs index 25e83160..aac979b9 100644 --- a/components/controller/src/identifier/mod.rs +++ b/components/controller/src/identifier/mod.rs @@ -5,15 +5,17 @@ use std::{ use keri_core::{ actor::prelude::SelfAddressingIdentifier, + database::{EscrowCreator, EventDatabase}, event::{event_data::EventData, sections::seal::EventSeal}, event_message::signed_event_message::{Notice, SignedEventMessage}, oobi::Oobi, + oobi_manager::storage::OobiStorageBackend, prefix::{BasicPrefix, IdentifierPrefix}, state::IdentifierState, }; #[cfg(feature = "query_cache")] use mechanics::cache::IdentifierCache; -use teliox::state::{vc_state::TelState, ManagerTelState}; +use teliox::{database::TelEventDatabase, state::{vc_state::TelState, ManagerTelState}}; use crate::{communication::Communication, error::ControllerError, known_events::KnownEvents}; @@ -25,11 +27,16 @@ pub mod query; pub mod signing; pub mod tel; -pub struct Identifier { +pub struct Identifier +where + D: EventDatabase + EscrowCreator + 'static, + T: TelEventDatabase + 'static, + S: OobiStorageBackend, +{ id: IdentifierPrefix, registry_id: Option, - pub known_events: Arc, - communication: Arc, + pub known_events: Arc>, + communication: Arc>, pub to_notify: Vec, #[cfg(feature = "query_cache")] query_cache: Arc, @@ -40,12 +47,17 @@ pub struct Identifier { cached_identifiers: Mutex>, } -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub fn new( id: IdentifierPrefix, registry_id: Option, - known_events: Arc, - communication: Arc, + known_events: Arc>, + communication: Arc>, #[cfg(feature = "query_cache")] db: Arc, ) -> Self { // Load events that need to be notified to witnesses diff --git a/components/controller/src/identifier/nontransferable.rs b/components/controller/src/identifier/nontransferable.rs index 15a572b1..bce26da9 100644 --- a/components/controller/src/identifier/nontransferable.rs +++ b/components/controller/src/identifier/nontransferable.rs @@ -9,25 +9,37 @@ use keri_core::{ possible_response::PossibleResponse, prelude::{HashFunctionCode, SerializationFormats}, }, + database::{EscrowCreator, EventDatabase}, event_message::{ msg::KeriEvent, signature::{Nontransferable, Signature}, timestamped::Timestamped, }, oobi::Scheme, + oobi_manager::storage::OobiStorageBackend, query::query_event::{LogsQueryArgs, QueryEvent, QueryRoute, SignedKelQuery}, }; -use teliox::query::{SignedTelQuery, TelQueryArgs, TelQueryEvent, TelQueryRoute}; +use teliox::{database::TelEventDatabase, query::{SignedTelQuery, TelQueryArgs, TelQueryEvent, TelQueryRoute}}; use super::mechanics::MechanicsError; -pub struct NontransferableIdentifier { +pub struct NontransferableIdentifier +where + D: EventDatabase + EscrowCreator + 'static, + T: TelEventDatabase + 'static, + S: OobiStorageBackend, +{ id: BasicPrefix, - communication: Arc, + communication: Arc>, } -impl NontransferableIdentifier { - pub fn new(public_key: BasicPrefix, communication: Arc) -> Self { +impl NontransferableIdentifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ + pub fn new(public_key: BasicPrefix, communication: Arc>) -> Self { Self { id: public_key, communication, diff --git a/components/controller/src/identifier/query.rs b/components/controller/src/identifier/query.rs index 5c1929bc..41d64fe9 100644 --- a/components/controller/src/identifier/query.rs +++ b/components/controller/src/identifier/query.rs @@ -6,8 +6,10 @@ use futures::future::join_all; use keri_core::actor::error::ActorError; use keri_core::actor::possible_response::PossibleResponse; use keri_core::actor::prelude::HashFunctionCode; +use keri_core::database::{EscrowCreator, EventDatabase}; use keri_core::error::Error; use keri_core::oobi::Scheme; +use keri_core::oobi_manager::storage::OobiStorageBackend; use keri_core::prefix::IndexedSignature; use keri_core::query::query_event::SignedKelQuery; use keri_core::{ @@ -16,6 +18,7 @@ use keri_core::{ prefix::{IdentifierPrefix, SelfSigningPrefix}, query::query_event::{LogsQueryArgs, QueryEvent, QueryRoute}, }; +use teliox::database::TelEventDatabase; use super::Identifier; @@ -39,7 +42,12 @@ pub enum WatcherResponseError { PoisonError, } -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub fn query_watchers( &self, about_who: &EventSeal, diff --git a/components/controller/src/identifier/signing.rs b/components/controller/src/identifier/signing.rs index 33dcc09f..e4bbcc60 100644 --- a/components/controller/src/identifier/signing.rs +++ b/components/controller/src/identifier/signing.rs @@ -1,15 +1,23 @@ use cesrox::ParsedData; use keri_core::{ + database::{EscrowCreator, EventDatabase}, event::sections::seal::EventSeal, event_message::signature::{Signature, SignerData}, + oobi_manager::storage::OobiStorageBackend, prefix::{IndexedSignature, SelfSigningPrefix}, }; +use teliox::database::TelEventDatabase; use crate::error::ControllerError; use super::Identifier; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub fn sign_with_index( &self, signature: SelfSigningPrefix, diff --git a/components/controller/src/identifier/tel.rs b/components/controller/src/identifier/tel.rs index d73f135c..da39d8b6 100644 --- a/components/controller/src/identifier/tel.rs +++ b/components/controller/src/identifier/tel.rs @@ -1,10 +1,13 @@ use keri_core::actor::prelude::{HashFunctionCode, SelfAddressingIdentifier, SerializationFormats}; +use keri_core::database::{EscrowCreator, EventDatabase}; use keri_core::event::sections::seal::{EventSeal, Seal}; use keri_core::event::KeyEvent; use keri_core::event_message::msg::{KeriEvent, TypedEvent}; use keri_core::event_message::timestamped::Timestamped; use keri_core::event_message::EventTypeTag; +use keri_core::oobi_manager::storage::OobiStorageBackend; use keri_core::prefix::{IdentifierPrefix, IndexedSignature, SelfSigningPrefix}; +use teliox::database::TelEventDatabase; use teliox::event::verifiable_event::VerifiableEvent; use teliox::query::{SignedTelQuery, TelQueryArgs, TelQueryEvent, TelQueryRoute}; use teliox::seal::{AttachedSourceSeal, EventSourceSeal}; @@ -14,7 +17,12 @@ use crate::error::ControllerError; use super::mechanics::MechanicsError; use super::Identifier; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ /// Generate `iss` event and `ixn` event with seal to `iss`. To finalize /// the process, `ixn` need to be signed confirmed with `finalize_event` /// function. diff --git a/components/controller/src/known_events.rs b/components/controller/src/known_events.rs index 5acfa3bc..aac24b98 100644 --- a/components/controller/src/known_events.rs +++ b/components/controller/src/known_events.rs @@ -2,10 +2,11 @@ use std::path::PathBuf; use std::sync::Arc; use keri_core::actor::parse_event_stream; -use keri_core::database::redb::{RedbDatabase, RedbError}; +use keri_core::database::{EscrowCreator, EventDatabase}; use keri_core::error::Error; use keri_core::event_message::signed_event_message::SignedNontransferableReceipt; use keri_core::oobi::LocationScheme; +use keri_core::oobi_manager::storage::OobiStorageBackend; use keri_core::prefix::{BasicPrefix, IdentifierPrefix, IndexedSignature, SelfSigningPrefix}; use keri_core::processor::escrow::partially_witnessed_escrow::PartiallyWitnessedEscrow; @@ -29,8 +30,8 @@ use keri_core::{ }, query::reply_event::{ReplyEvent, ReplyRoute, SignedReply}, }; -use teliox::database::redb::RedbTelDatabase; -use teliox::database::{EscrowDatabase, TelEventDatabase}; +use teliox::database::TelEventDatabase; +use teliox::database::TelEscrowDatabase; use teliox::processor::escrow::default_escrow_bus as tel_escrow_bus; use teliox::processor::storage::TelEventStorage; use teliox::tel::Tel; @@ -42,75 +43,64 @@ use crate::identifier::mechanics::MechanicsError; pub enum OobiRetrieveError { #[error("No oobi for {0} identifier")] MissingOobi(IdentifierPrefix, Option), - #[error(transparent)] - DbError(#[from] RedbError), + #[error("Database error: {0}")] + DbError(String), } -pub struct KnownEvents { - processor: BasicProcessor, - pub storage: Arc>, - pub oobi_manager: OobiManager, - pub partially_witnessed_escrow: Arc>, - pub tel: Arc>, +impl From for OobiRetrieveError { + fn from(e: keri_core::oobi::error::OobiError) -> Self { + OobiRetrieveError::DbError(e.to_string()) + } } -impl KnownEvents { - pub fn new(db_path: PathBuf, escrow_config: EscrowConfig) -> Result { - let event_database = { - let mut path = db_path.clone(); - path.push("events_database"); - Arc::new(RedbDatabase::new(&path)?) - }; - - let oobi_manager = OobiManager::new(event_database.clone()); +pub struct KnownEvents +where + D: EventDatabase + EscrowCreator + 'static, + T: TelEventDatabase + 'static, + S: OobiStorageBackend, +{ + processor: BasicProcessor, + pub storage: Arc>, + pub oobi_manager: OobiManager, + pub partially_witnessed_escrow: Arc>, + pub tel: Arc>, +} +impl KnownEvents +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ + pub fn new( + event_db: Arc, + oobi_storage: S, + tel_db: Arc, + tel_escrow_db: impl TelEscrowDatabase + 'static, + escrow_config: EscrowConfig, + ) -> Result { + let oobi_manager = OobiManager::with_storage(oobi_storage); let ( mut notification_bus, - ( - _out_of_order_escrow, - _partially_signed_escrow, - partially_witnessed_escrow, - _delegation_escrow, - _duplicates, - ), - ) = default_escrow_bus(event_database.clone(), escrow_config); - - let kel_storage = Arc::new(EventStorage::new(event_database.clone())); - - // Initiate tel and it's escrows - let tel_events_db = { - let mut path = db_path.clone(); - path.push("tel"); - path.push("events"); - Arc::new(RedbTelDatabase::new(&path)?) - }; - - let tel_escrow_db = { - let mut path = db_path.clone(); - path.push("tel"); - path.push("escrow"); - EscrowDatabase::new(&path).map_err(|e| ControllerError::OtherError(e.to_string()))? - }; - let (tel_bus, missing_issuer, _out_of_order, _missing_registy) = - tel_escrow_bus(tel_events_db.clone(), kel_storage.clone(), tel_escrow_db)?; - - let tel_storage = Arc::new(TelEventStorage::new(tel_events_db.clone())); + escrow_set, + ) = default_escrow_bus(event_db.clone(), escrow_config, None); + let kel_storage = Arc::new(EventStorage::new(event_db.clone())); + let (tel_bus, missing_issuer, _out_of_order, _missing_registry) = + tel_escrow_bus(tel_db.clone(), kel_storage.clone(), tel_escrow_db) + .map_err(|e| ControllerError::OtherError(e.to_string()))?; + let tel_storage = Arc::new(TelEventStorage::new(tel_db.clone())); let tel = Arc::new(Tel::new(tel_storage, kel_storage.clone(), Some(tel_bus))); - notification_bus.register_observer( missing_issuer.clone(), vec![JustNotification::KeyEventAdded], ); - - let controller = Self { - processor: BasicProcessor::new(event_database.clone(), Some(notification_bus)), + Ok(Self { + processor: BasicProcessor::new(event_db.clone(), Some(notification_bus)), storage: kel_storage, oobi_manager, - partially_witnessed_escrow, + partially_witnessed_escrow: escrow_set.partially_witnessed, tel, - }; - - Ok(controller) + }) } pub fn save(&self, message: &Message) -> Result<(), MechanicsError> { @@ -319,59 +309,6 @@ impl KnownEvents { } } - /// Generate and return rotation event for given identifier data - // pub fn rotate( - // &self, - // id: IdentifierPrefix, - // current_keys: Vec, - // new_next_keys: Vec, - // new_next_threshold: u64, - // witness_to_add: Vec, - // witness_to_remove: Vec, - // witness_threshold: u64, - // ) -> Result { - // let witnesses_to_add = witness_to_add - // .iter() - // .map(|wit| { - // if let IdentifierPrefix::Basic(bp) = &wit.eid { - // Ok(bp.clone()) - // } else { - // Err(ControllerError::WrongWitnessPrefixError) - // } - // }) - // .collect::, _>>()?; - - // let state = self - // .storage - // .get_state(&id) - // .ok_or(ControllerError::UnknownIdentifierError)?; - - // event_generator::rotate( - // state, - // current_keys, - // new_next_keys, - // new_next_threshold, - // witnesses_to_add, - // witness_to_remove, - // witness_threshold, - // ) - // .map_err(|e| ControllerError::EventGenerationError(e.to_string())) - // } - - /// Generate and return interaction event for given identifier data - // pub fn anchor( - // &self, - // id: IdentifierPrefix, - // payload: &[SelfAddressingIdentifier], - // ) -> Result { - // let state = self - // .storage - // .get_state(&id) - // .ok_or(ControllerError::UnknownIdentifierError)?; - // event_generator::anchor(state, payload) - // .map_err(|e| ControllerError::EventGenerationError(e.to_string())) - // } - /// Generate and return interaction event for given identifier data pub fn anchor_with_seal( &self, @@ -511,3 +448,73 @@ impl KnownEvents { .ok_or(MechanicsError::UnknownIdentifierError(id.clone())) } } + +#[cfg(feature = "storage-redb")] +use keri_core::database::redb::RedbDatabase; +#[cfg(feature = "storage-redb")] +use keri_core::oobi_manager::storage::RedbOobiStorage; +#[cfg(feature = "storage-redb")] +use teliox::database::redb::RedbTelDatabase; +#[cfg(feature = "storage-redb")] +use teliox::database::EscrowDatabase; + +#[cfg(feature = "storage-redb")] +pub type RedbKnownEvents = KnownEvents; + +#[cfg(feature = "storage-redb")] +impl RedbKnownEvents { + pub fn with_redb(db_path: PathBuf, escrow_config: EscrowConfig) -> Result { + let event_database = { + let mut path = db_path.clone(); + path.push("events_database"); + Arc::new(RedbDatabase::new(&path).map_err(|e| ControllerError::DatabaseError(e.to_string()))?) + }; + let oobi_storage = RedbOobiStorage::new(event_database.raw_db()) + .map_err(|e| ControllerError::DatabaseError(e.to_string()))?; + let tel_db = { + let mut path = db_path.clone(); + path.push("tel"); + path.push("events"); + Arc::new(RedbTelDatabase::new(&path).map_err(|e| ControllerError::OtherError(e.to_string()))?) + }; + let tel_escrow_db = { + let mut path = db_path.clone(); + path.push("tel"); + path.push("escrow"); + EscrowDatabase::new(&path).map_err(|e| ControllerError::OtherError(e.to_string()))? + }; + Self::new(event_database, oobi_storage, tel_db, tel_escrow_db, escrow_config) + } +} + +#[cfg(feature = "storage-postgres")] +use keri_core::database::postgres::PostgresDatabase; +#[cfg(feature = "storage-postgres")] +use keri_core::database::postgres::oobi_storage::PostgresOobiStorage; +#[cfg(feature = "storage-postgres")] +use teliox::database::postgres::{PostgresTelDatabase, PostgresTelEscrowDatabase}; + +#[cfg(feature = "storage-postgres")] +pub type PostgresKnownEvents = KnownEvents; + +#[cfg(feature = "storage-postgres")] +impl PostgresKnownEvents { + pub async fn with_postgres( + database_url: &str, + escrow_config: EscrowConfig, + ) -> Result { + let event_db = Arc::new( + PostgresDatabase::new(database_url) + .await + .map_err(|e| ControllerError::DatabaseError(e.to_string()))?, + ); + event_db + .run_migrations() + .await + .map_err(|e| ControllerError::DatabaseError(e.to_string()))?; + let oobi_storage = PostgresOobiStorage::new(event_db.pool.clone()); + let tel_db = Arc::new(PostgresTelDatabase::new(event_db.pool.clone())); + let tel_escrow_db = PostgresTelEscrowDatabase::new(event_db.pool.clone()); + Self::new(event_db, oobi_storage, tel_db, tel_escrow_db, escrow_config) + } +} diff --git a/components/controller/src/lib.rs b/components/controller/src/lib.rs index cde2fd73..cd162e00 100644 --- a/components/controller/src/lib.rs +++ b/components/controller/src/lib.rs @@ -16,3 +16,13 @@ pub use keri_core::signer::{CryptoBox, KeyManager}; pub use teliox::{ event::parse_tel_query_stream, state::vc_state::TelState, state::ManagerTelState, }; + +#[cfg(feature = "storage-redb")] +pub use known_events::RedbKnownEvents; +#[cfg(feature = "storage-redb")] +pub use controller::{RedbController, RedbIdentifier}; + +#[cfg(feature = "storage-postgres")] +pub use known_events::PostgresKnownEvents; +#[cfg(feature = "storage-postgres")] +pub use controller::{PostgresController, PostgresIdentifier}; diff --git a/components/controller/src/oobi.rs b/components/controller/src/oobi.rs index ad91dad8..6cc83270 100644 --- a/components/controller/src/oobi.rs +++ b/components/controller/src/oobi.rs @@ -1,12 +1,20 @@ use keri_core::{ + database::{EscrowCreator, EventDatabase}, oobi::{EndRole, LocationScheme, Role}, + oobi_manager::storage::OobiStorageBackend, prefix::IdentifierPrefix, query::reply_event::ReplyRoute, }; +use teliox::database::TelEventDatabase; use crate::{error::ControllerError, identifier::Identifier, known_events::OobiRetrieveError}; -impl Identifier { +impl Identifier +where + D: EventDatabase + EscrowCreator + Send + Sync + 'static, + T: TelEventDatabase + Send + Sync + 'static, + S: OobiStorageBackend, +{ pub fn get_location( &self, identifier: &IdentifierPrefix, diff --git a/components/controller/tests/common/mod.rs b/components/controller/tests/common/mod.rs new file mode 100644 index 00000000..9f337926 --- /dev/null +++ b/components/controller/tests/common/mod.rs @@ -0,0 +1,64 @@ +use sqlx::postgres::PgPoolOptions; + +/// Returns the base postgres URL (without database name) from DATABASE_URL, or a default. +fn base_url() -> String { + let url = std::env::var("DATABASE_URL") + .unwrap_or_else(|_| "postgres://postgres:postgres@localhost:5432/placeholder".to_string()); + let (base, _) = url.rsplit_once('/').expect("Invalid DATABASE_URL"); + base.to_string() +} + +/// Returns a database name unique to this test binary so parallel test runs never share a database. +fn binary_db_name() -> String { + // Use the explicit DATABASE_URL db name when set, otherwise derive from the binary name. + if let Ok(url) = std::env::var("DATABASE_URL") { + let (_, db_name) = url.rsplit_once('/').expect("Invalid DATABASE_URL"); + return db_name.to_string(); + } + let binary_name = std::env::current_exe() + .ok() + .and_then(|p| p.file_stem().map(|s| s.to_string_lossy().into_owned())) + .unwrap_or_else(|| "unknown".to_string()); + // Sanitize: keep only alphanumeric and underscores, max 63 chars (Postgres limit). + let safe: String = binary_name + .chars() + .map(|c| if c.is_alphanumeric() || c == '_' { c } else { '_' }) + .take(55) + .collect(); + format!("keri_test_{}", safe) +} + +pub fn get_database_url() -> String { + format!("{}/{}", base_url(), binary_db_name()) +} + +/// Drops and recreates this binary's test database once per process so each run starts fresh. +/// Each test binary gets its own database, so concurrent `cargo test` runs don't race. +pub fn ensure_clean_db() { + static INIT: std::sync::Mutex = std::sync::Mutex::new(false); + let mut done = INIT.lock().unwrap(); + if *done { + return; + } + let result = std::panic::catch_unwind(|| { + async_std::task::block_on(async { + let db_name = binary_db_name(); + let admin = PgPoolOptions::new() + .max_connections(2) + .connect(&format!("{}/postgres", base_url())) + .await + .expect("Failed to connect to admin db"); + let _ = sqlx::query(&format!("DROP DATABASE IF EXISTS \"{}\" WITH (FORCE)", db_name)) + .execute(&admin) + .await; + sqlx::query(&format!("CREATE DATABASE \"{}\"", db_name)) + .execute(&admin) + .await + .expect("Failed to create test database"); + }); + }); + if result.is_err() { + panic!("ensure_clean_db failed — check DATABASE_URL and postgres connection"); + } + *done = true; +} diff --git a/components/controller/tests/test_delegated_incept.rs b/components/controller/tests/test_delegated_incept.rs index 5b43c66a..640b03c2 100644 --- a/components/controller/tests/test_delegated_incept.rs +++ b/components/controller/tests/test_delegated_incept.rs @@ -27,7 +27,7 @@ mod test_delegated_incept { let seed = "AK8F6AAiYDpXlWdj2O5F5-6wNCCNJh2A4XOlqwR_HwwH"; let witness_root = Builder::new().prefix("test-wit1-db").tempdir().unwrap(); Arc::new( - WitnessListener::setup( + WitnessListener::setup_with_redb( url::Url::parse("http://witness1:3232/").unwrap(), witness_root.path(), Some(seed.to_string()), diff --git a/components/controller/tests/test_delegated_incept_postgres.rs b/components/controller/tests/test_delegated_incept_postgres.rs new file mode 100644 index 00000000..d0740833 --- /dev/null +++ b/components/controller/tests/test_delegated_incept_postgres.rs @@ -0,0 +1,270 @@ +#[cfg(feature = "query_cache")] +mod test_delegated_incept_postgres { + mod common { + include!("common/mod.rs"); + } + + use std::{collections::HashMap, sync::Arc}; + + use keri_controller::{ + config::ControllerConfig, + controller::PostgresController, + error::ControllerError, + mailbox_updating::ActionRequired, + LocationScheme, + }; + use keri_core::{ + event_message::signed_event_message::Message, + prefix::{BasicPrefix, IdentifierPrefix, IndexedSignature, SelfSigningPrefix}, + signer::{CryptoBox, KeyManager}, + transport::test::{TestActorMap, TestTransport}, + }; + use tempfile::Builder; + use url::Host; + use witness::{WitnessEscrowConfig, WitnessListener}; + + #[async_std::test] + async fn test_delegated_incept_postgres() -> Result<(), ControllerError> { + use url::Url; + + common::ensure_clean_db(); + + // Setup test witness (redb-backed — witness storage backend is independent) + let witness = { + let seed = "AK8F6AAiYDpXlWdj2O5F5-6wNCCNJh2A4XOlqwR_HwwH"; + let witness_root = Builder::new().prefix("test-wit1-db").tempdir().unwrap(); + Arc::new( + WitnessListener::setup_with_redb( + Url::parse("http://witness1:3232/").unwrap(), + witness_root.path(), + Some(seed.to_string()), + WitnessEscrowConfig::default(), + ) + .unwrap(), + ) + }; + + let witness_id_basic = witness.get_prefix(); + let witness_id = IdentifierPrefix::Basic(witness_id_basic.clone()); + assert_eq!( + witness_id.to_string(), + "BErocgXD2RGSyvn3MObcx59jeOsEQhv2TqHirVkzrp0Q" + ); + let wit_location = LocationScheme { + eid: witness_id, + scheme: keri_core::oobi::Scheme::Http, + url: Url::parse("http://witness1:3232").unwrap(), + }; + + let mut actors: TestActorMap = HashMap::new(); + actors.insert((Host::Domain("witness1".to_string()), 3232), witness); + let transport = TestTransport::new(actors); + + let delegatee_root = Builder::new().prefix("test-db").tempdir().unwrap(); + let delegator_root = Builder::new().prefix("test-db2").tempdir().unwrap(); + + // Setup delegatee identifier + let delegatee_controller = Arc::new( + PostgresController::new_postgres( + &common::get_database_url(), + ControllerConfig { + db_path: delegatee_root.path().to_owned(), + transport: Box::new(transport.clone()), + ..Default::default() + }, + ) + .await?, + ); + + let delegatee_keypair = CryptoBox::new()?; + let pk = BasicPrefix::Ed25519(delegatee_keypair.public_key()); + let npk = BasicPrefix::Ed25519(delegatee_keypair.next_public_key()); + + let icp_event = delegatee_controller + .incept(vec![pk], vec![npk], vec![wit_location.clone()], 1) + .await?; + let signature = + SelfSigningPrefix::Ed25519Sha512(delegatee_keypair.sign(icp_event.as_bytes())?); + + let mut delegatee_identifier = + delegatee_controller.finalize_incept(icp_event.as_bytes(), &signature)?; + delegatee_identifier.notify_witnesses().await?; + + let query = delegatee_identifier + .query_mailbox(delegatee_identifier.id(), &[witness_id_basic.clone()])?; + for qry in query { + let signature = + SelfSigningPrefix::Ed25519Sha512(delegatee_keypair.sign(&qry.encode()?)?); + delegatee_identifier + .finalize_query_mailbox(vec![(qry, signature)]) + .await?; + } + + // Setup delegator identifier + let delegator_controller = Arc::new( + PostgresController::new_postgres( + &common::get_database_url(), + ControllerConfig { + db_path: delegator_root.path().to_owned(), + transport: Box::new(transport.clone()), + ..Default::default() + }, + ) + .await?, + ); + + let delegator_keypair = CryptoBox::new()?; + let pk = BasicPrefix::Ed25519(delegator_keypair.public_key()); + let npk = BasicPrefix::Ed25519(delegator_keypair.next_public_key()); + + let icp_event = delegator_controller + .incept(vec![pk], vec![npk], vec![wit_location], 1) + .await?; + let signature = + SelfSigningPrefix::Ed25519Sha512(delegator_keypair.sign(icp_event.as_bytes())?); + + let mut delegator = + delegator_controller.finalize_incept(icp_event.as_bytes(), &signature)?; + delegator.notify_witnesses().await?; + + let query = delegator.query_mailbox(&delegator.id(), &[witness_id_basic.clone()])?; + for qry in query { + let signature = + SelfSigningPrefix::Ed25519Sha512(delegator_keypair.sign(&qry.encode()?)?); + let ar = delegator + .finalize_query_mailbox(vec![(qry, signature)]) + .await?; + assert!(ar.is_empty()); + } + + // Generate delegated inception + let (delegated_inception, exn_messages) = delegatee_identifier.incept_group( + vec![], + 1, + Some(1), + Some(vec![witness_id_basic.clone()]), + Some(1), + Some(delegator.id().clone()), + )?; + + let signature_icp = SelfSigningPrefix::Ed25519Sha512( + delegatee_keypair.sign(delegated_inception.as_bytes())?, + ); + let signature_exn = + SelfSigningPrefix::Ed25519Sha512(delegatee_keypair.sign(exn_messages[0].as_bytes())?); + let exn_index_signature = delegatee_identifier.sign_with_index(signature_exn, 0)?; + + let delegate_id = delegatee_identifier + .finalize_group_incept( + delegated_inception.as_bytes(), + signature_icp.clone(), + vec![(exn_messages[0].as_bytes().to_vec(), exn_index_signature)], + ) + .await?; + + let kel = delegatee_controller.get_kel_with_receipts(&delegate_id); + assert!(kel.is_none()); + + let query = delegator.query_mailbox(delegator.id(), &[witness_id_basic.clone()])?; + for qry in query { + let signature = + SelfSigningPrefix::Ed25519Sha512(delegator_keypair.sign(&qry.encode()?)?); + let ar = delegator + .finalize_query_mailbox(vec![(qry, signature)]) + .await?; + + assert_eq!(ar.len(), 1); + match &ar[0] { + ActionRequired::MultisigRequest(_, _) => unreachable!(), + ActionRequired::DelegationRequest(delegating_event, exn) => { + let signature_ixn = SelfSigningPrefix::Ed25519Sha512( + delegator_keypair.sign(&delegating_event.encode()?)?, + ); + let signature_exn = SelfSigningPrefix::Ed25519Sha512( + delegator_keypair.sign(&exn.encode()?)?, + ); + let exn_index_signature = + delegator.sign_with_index(signature_exn, 0).unwrap(); + delegator + .finalize_group_event( + &delegating_event.encode()?, + signature_ixn.clone(), + vec![], + ) + .await?; + delegator.notify_witnesses().await?; + + let query = + delegator.query_mailbox(delegator.id(), &[witness_id_basic.clone()])?; + for qry in query { + let signature = SelfSigningPrefix::Ed25519Sha512( + delegator_keypair.sign(&qry.encode()?)?, + ); + let action_required = delegator + .finalize_query_mailbox(vec![(qry, signature)]) + .await?; + assert!(action_required.is_empty()); + } + + let data_signature = IndexedSignature::new_both_same(signature_ixn, 0); + delegator + .finalize_exchange(&exn.encode()?, exn_index_signature, data_signature) + .await?; + + let delegators_state = delegator_controller.find_state(delegator.id())?; + assert_eq!(delegators_state.sn, 1); + } + }; + } + + let query = delegator.query_mailbox(delegator.id(), &[witness_id_basic.clone()])?; + for qry in query { + let signature = + SelfSigningPrefix::Ed25519Sha512(delegator_keypair.sign(&qry.encode()?)?); + let ar = delegator + .finalize_query_mailbox(vec![(qry, signature)]) + .await?; + assert_eq!(ar.len(), 0); + } + + let delegators_kel = delegator_controller + .get_kel_with_receipts(&delegator.id()) + .unwrap(); + delegatee_controller + .known_events + .save(&Message::Notice(delegators_kel[0].clone()))?; + + let query = + delegatee_identifier.query_mailbox(&delegate_id, &[witness_id_basic.clone()])?; + for qry in query { + let signature = + SelfSigningPrefix::Ed25519Sha512(delegatee_keypair.sign(&qry.encode()?)?); + let ar = delegatee_identifier + .finalize_query_mailbox(vec![(qry, signature)]) + .await?; + assert!(ar.is_empty()) + } + + let state = delegatee_identifier.find_state(delegator.id())?; + assert_eq!(state.sn, 1); + + let state = delegatee_identifier.find_state(&delegate_id); + assert!(state.is_err()); + + let query = + delegatee_identifier.query_mailbox(&delegate_id, &[witness_id_basic.clone()])?; + for qry in query { + let signature = + SelfSigningPrefix::Ed25519Sha512(delegatee_keypair.sign(&qry.encode()?)?); + let ar = delegatee_identifier + .finalize_query_mailbox(vec![(qry, signature)]) + .await?; + assert!(ar.is_empty()); + } + + let state = delegatee_identifier.find_state(&delegate_id)?; + assert_eq!(state.sn, 0); + + Ok(()) + } +} diff --git a/components/controller/tests/test_group_incept_postgres.rs b/components/controller/tests/test_group_incept_postgres.rs new file mode 100644 index 00000000..a2da154d --- /dev/null +++ b/components/controller/tests/test_group_incept_postgres.rs @@ -0,0 +1,75 @@ +mod common; + +use std::sync::Arc; + +use keri_controller::{config::ControllerConfig, controller::PostgresController, error::ControllerError}; +use keri_core::{ + prefix::{BasicPrefix, SelfSigningPrefix}, + signer::{CryptoBox, KeyManager}, +}; +use tempfile::Builder; + +#[async_std::test] +async fn test_group_incept_postgres() -> Result<(), ControllerError> { + common::ensure_clean_db(); + + let root = Builder::new().prefix("test-db").tempdir().unwrap(); + let controller = Arc::new( + PostgresController::new_postgres( + &common::get_database_url(), + ControllerConfig { + db_path: root.path().to_owned(), + ..Default::default() + }, + ) + .await?, + ); + + let km1 = CryptoBox::new()?; + let km2 = CryptoBox::new()?; + + let pk = BasicPrefix::Ed25519(km1.public_key()); + let npk = BasicPrefix::Ed25519(km1.next_public_key()); + + let icp_event = controller.incept(vec![pk], vec![npk], vec![], 0).await?; + let signature = SelfSigningPrefix::Ed25519Sha512(km1.sign(icp_event.as_bytes())?); + let mut identifier1 = controller.finalize_incept(icp_event.as_bytes(), &signature)?; + + let pk = BasicPrefix::Ed25519(km2.public_key()); + let npk = BasicPrefix::Ed25519(km2.next_public_key()); + + let icp_event = controller.incept(vec![pk], vec![npk], vec![], 0).await?; + let signature = SelfSigningPrefix::Ed25519Sha512(km2.sign(icp_event.as_bytes())?); + let mut identifier2 = controller.finalize_incept(icp_event.as_bytes(), &signature)?; + + let (group_inception, exn_messages) = + identifier1.incept_group(vec![identifier2.id().clone()], 2, Some(2), None, None, None)?; + + let signature_icp = SelfSigningPrefix::Ed25519Sha512(km1.sign(group_inception.as_bytes())?); + let signature_exn = SelfSigningPrefix::Ed25519Sha512(km1.sign(exn_messages[0].as_bytes())?); + let exn_index_signature = identifier1.sign_with_index(signature_exn, 0)?; + + // Group initiator uses `finalize_group_incept` to send multisig request to other participants. + let group_id = identifier1 + .finalize_group_incept( + group_inception.as_bytes(), + signature_icp, + vec![(exn_messages[0].as_bytes().to_vec(), exn_index_signature)], + ) + .await?; + + let kel = controller.get_kel_with_receipts(&group_id); + // Event is not yet accepted — needs both signatures. + assert!(kel.is_none()); + + // identifier2 receives the group icp from identifier1's mailbox (shared controller). + let signature_icp = SelfSigningPrefix::Ed25519Sha512(km2.sign(group_inception.as_bytes())?); + identifier2 + .finalize_group_event(group_inception.as_bytes(), signature_icp, vec![]) + .await?; + + let kel = controller.get_kel_with_receipts(&group_id); + assert!(kel.is_some()); + + Ok(()) +} diff --git a/components/controller/tests/test_kel_managing_postgres.rs b/components/controller/tests/test_kel_managing_postgres.rs new file mode 100644 index 00000000..4a615097 --- /dev/null +++ b/components/controller/tests/test_kel_managing_postgres.rs @@ -0,0 +1,85 @@ +mod common; + +use cesrox::primitives::codes::self_addressing::SelfAddressing; +use keri_core::{ + actor::prelude::HashFunction, + prefix::{BasicPrefix, SelfSigningPrefix}, + signer::{CryptoBox, KeyManager}, +}; + +use keri_controller::{ + config::ControllerConfig, + controller::PostgresController, + error::ControllerError, +}; +use tempfile::Builder; + +#[async_std::test] +async fn test_kel_managing_postgres() -> Result<(), ControllerError> { + common::ensure_clean_db(); + + let root = Builder::new().prefix("test-db").tempdir().unwrap(); + let controller = PostgresController::new_postgres( + &common::get_database_url(), + ControllerConfig { + db_path: root.path().to_owned(), + ..Default::default() + }, + ) + .await?; + + let mut km = CryptoBox::new()?; + + let first_pk = BasicPrefix::Ed25519(km.public_key()); + let first_next_npk = BasicPrefix::Ed25519(km.next_public_key()); + let inception_event = controller + .incept(vec![first_pk.clone()], vec![first_next_npk], vec![], 0) + .await?; + + let signature = SelfSigningPrefix::Ed25519Sha512(km.sign(inception_event.as_bytes())?); + let mut identifier = controller.finalize_incept(inception_event.as_bytes(), &signature)?; + + let keys = identifier.current_public_keys()?; + assert_eq!(keys, vec![first_pk.clone()]); + + // Keys rotation + km.rotate()?; + let second_pk = BasicPrefix::Ed25519(km.public_key()); + let second_next_pk = BasicPrefix::Ed25519(km.next_public_key()); + let rotation_event = identifier + .rotate( + vec![second_pk.clone()], + vec![second_next_pk], + 1, + vec![], + vec![], + 0, + ) + .await?; + + let signature = SelfSigningPrefix::Ed25519Sha512(km.sign(rotation_event.as_bytes())?); + identifier + .finalize_rotate(rotation_event.as_bytes(), signature) + .await?; + + let keys = identifier.current_public_keys()?; + assert_ne!(keys, vec![first_pk]); + assert_eq!(keys, vec![second_pk.clone()]); + + let data_to_anchor = b"Hello world"; + let said = HashFunction::from(SelfAddressing::Blake3_256).derive(data_to_anchor); + let interaction_event = identifier.anchor(&[said])?; + + let signature = SelfSigningPrefix::Ed25519Sha512(km.sign(interaction_event.as_bytes())?); + identifier + .finalize_anchor(interaction_event.as_bytes(), signature) + .await?; + + let keys = identifier.current_public_keys()?; + assert_eq!(keys, vec![second_pk]); + + let state = identifier.find_state(identifier.id()); + assert_eq!(state.unwrap().sn, 2); + + Ok(()) +} diff --git a/components/controller/tests/test_tel_managing_postgres.rs b/components/controller/tests/test_tel_managing_postgres.rs new file mode 100644 index 00000000..8b49fcda --- /dev/null +++ b/components/controller/tests/test_tel_managing_postgres.rs @@ -0,0 +1,108 @@ +mod common; + +use std::sync::Arc; + +use keri_controller::{ + config::ControllerConfig, + controller::PostgresController, + error::ControllerError, + BasicPrefix, CryptoBox, KeyManager, SelfSigningPrefix, +}; +use keri_core::actor::prelude::{HashFunction, HashFunctionCode}; +use tempfile::Builder; + +#[async_std::test] +async fn test_tel_postgres() -> Result<(), ControllerError> { + common::ensure_clean_db(); + + let root = Builder::new().prefix("test-db").tempdir().unwrap(); + let controller1 = Arc::new( + PostgresController::new_postgres( + &common::get_database_url(), + ControllerConfig { + db_path: root.path().to_owned(), + ..Default::default() + }, + ) + .await?, + ); + + let km1 = CryptoBox::new().unwrap(); + let pk = BasicPrefix::Ed25519(km1.public_key()); + let npk = BasicPrefix::Ed25519(km1.next_public_key()); + + let icp_event = controller1 + .incept(vec![pk], vec![npk], vec![], 0) + .await + .unwrap(); + let signature = SelfSigningPrefix::Ed25519Sha512(km1.sign(icp_event.as_bytes()).unwrap()); + + let mut identifier1 = controller1 + .finalize_incept(icp_event.as_bytes(), &signature) + .unwrap(); + + let issuer_prefix = identifier1.id().clone(); + + // Incept management TEL + let (_registry_id, ixn) = identifier1.incept_registry().unwrap(); + let ixn_encoded = ixn.encode().unwrap(); + let signature = SelfSigningPrefix::Ed25519Sha512(km1.sign(&ixn_encoded).unwrap()); + + identifier1 + .finalize_incept_registry(&ixn_encoded, signature) + .await + .unwrap(); + + let mana = identifier1 + .find_management_tel_state(identifier1.registry_id().unwrap()) + .unwrap() + .unwrap(); + assert_eq!(mana.sn, 0); + + // Issue a credential + let credential = r#"message"#.to_string(); + let credential_said = + HashFunction::from(HashFunctionCode::Blake3_256).derive(credential.as_bytes()); + + let (vc_id, issuance_ixn) = identifier1.issue(credential_said).unwrap(); + let issuance_ixn_cesr = issuance_ixn.encode().unwrap(); + let vc_hash = match vc_id { + keri_controller::IdentifierPrefix::SelfAddressing(sai) => sai.said.clone(), + _ => unreachable!(), + }; + let signature = SelfSigningPrefix::Ed25519Sha512(km1.sign(&issuance_ixn_cesr).unwrap()); + + identifier1 + .finalize_issue(&issuance_ixn_cesr, signature) + .await + .unwrap(); + + let state = identifier1.find_state(&issuer_prefix)?; + assert_eq!(state.sn, 2); + + let iss = identifier1.find_vc_state(&vc_hash).unwrap(); + assert!(matches!( + iss, + Some(teliox::state::vc_state::TelState::Issued(_)) + )); + + // Revoke the credential + let revocation_ixn = identifier1.revoke(&vc_hash).unwrap(); + let signature = SelfSigningPrefix::Ed25519Sha512(km1.sign(&revocation_ixn).unwrap()); + + identifier1 + .finalize_revoke(&revocation_ixn, signature) + .await + .unwrap(); + + let state = identifier1.find_state(&issuer_prefix)?; + assert_eq!(state.sn, 3); + + let rev = identifier1.find_vc_state(&vc_hash).unwrap(); + assert!(matches!( + rev, + Some(teliox::state::vc_state::TelState::Revoked) + )); + + Ok(()) +} diff --git a/components/watcher/src/http_routing.rs b/components/watcher/src/http_routing.rs index dbdda09c..83467148 100644 --- a/components/watcher/src/http_routing.rs +++ b/components/watcher/src/http_routing.rs @@ -4,35 +4,35 @@ use actix_web::web; pub fn configure_routes(cfg: &mut web::ServiceConfig) { cfg.route( "/introduce", - actix_web::web::get().to(http_handlers::introduce), + actix_web::web::get().to(http_handlers::introduce_redb), ) .route( "/oobi/{id}", - actix_web::web::get().to(http_handlers::resolve_location), + actix_web::web::get().to(http_handlers::resolve_location_redb), ) .route( "/oobi/{cid}/{role}/{eid}", - actix_web::web::get().to(http_handlers::resolve_role), + actix_web::web::get().to(http_handlers::resolve_role_redb), ) .route( "/process", - actix_web::web::post().to(http_handlers::process_notice), + actix_web::web::post().to(http_handlers::process_notice_redb), ) .route( "/query", - actix_web::web::post().to(http_handlers::process_query), + actix_web::web::post().to(http_handlers::process_query_redb), ) .route( "/register", - actix_web::web::post().to(http_handlers::process_reply), + actix_web::web::post().to(http_handlers::process_reply_redb), ) .route( "/resolve", - actix_web::web::post().to(http_handlers::resolve_oobi), + actix_web::web::post().to(http_handlers::resolve_oobi_redb), ) .route( "/query/tel", - actix_web::web::post().to(http_handlers::process_tel_query), + actix_web::web::post().to(http_handlers::process_tel_query_redb), ) .route("info", actix_web::web::get().to(http_handlers::info)); } diff --git a/components/watcher/src/main.rs b/components/watcher/src/main.rs index ac6205a7..efcdf382 100644 --- a/components/watcher/src/main.rs +++ b/components/watcher/src/main.rs @@ -129,7 +129,7 @@ async fn main() -> anyhow::Result<()> { .extract::() .context("Failed to load config")?; - let watcher_listener = WatcherListener::new(WatcherConfig { + let watcher_listener = WatcherListener::setup_with_redb(WatcherConfig { public_address: cfg.public_url.clone(), db_path: cfg.db_path.clone(), priv_key: cfg.seed, diff --git a/components/watcher/src/test.rs b/components/watcher/src/test.rs index 0f5d53cc..a3a1d8fe 100644 --- a/components/watcher/src/test.rs +++ b/components/watcher/src/test.rs @@ -70,7 +70,7 @@ async fn test_watcher_access() -> Result<(), ActorError> { let url = Url::parse("http://some/dummy/url").unwrap(); let root = Builder::new().prefix("cont-test-db").tempdir().unwrap(); - let watcher = Watcher::new(crate::WatcherConfig { + let watcher = Watcher::setup_with_redb(crate::WatcherConfig { public_address: url, db_path: root.path().to_owned(), tel_storage_path: watcher_tel_path, @@ -107,7 +107,7 @@ pub async fn watcher_forward_ksn() -> Result<(), ActorError> { let root_witness = Builder::new().prefix("test-wit").tempdir().unwrap(); Arc::new( - WitnessListener::setup( + WitnessListener::setup_with_redb( witness_url, root_witness.path(), Some("ArwXoACJgOleVZ2PY7kXn7rA0II0mHYDhc6WrBH8fDAc".into()), @@ -167,7 +167,7 @@ pub async fn watcher_forward_ksn() -> Result<(), ActorError> { let watcher_tel_dir = Builder::new().prefix("cont-test-tel-db").tempdir().unwrap(); let watcher_tel_path = watcher_tel_dir.path().join("tel_storage"); - let watcher = Watcher::new(WatcherConfig { + let watcher = Watcher::setup_with_redb(WatcherConfig { public_address: url, db_path: root.path().to_owned(), transport: Box::new(transport), diff --git a/components/watcher/src/watcher/mod.rs b/components/watcher/src/watcher/mod.rs index e6984935..2a2c016e 100644 --- a/components/watcher/src/watcher/mod.rs +++ b/components/watcher/src/watcher/mod.rs @@ -16,6 +16,7 @@ use keri_core::{ error::Error, event_message::signed_event_message::Message, oobi::{error::OobiError, EndRole, LocationScheme}, + oobi_manager::{OobiManager, RedbOobiManager, RedbOobiStorage, storage::OobiStorageBackend}, prefix::{BasicPrefix, IdentifierPrefix}, query::reply_event::{ReplyRoute, SignedReply}, }; @@ -36,16 +37,16 @@ enum WitnessResp { Tel(Vec), } -pub struct Watcher { - pub(crate) watcher_data: Arc, +pub struct Watcher { + pub(crate) watcher_data: Arc>, recv: Mutex>, tel_recv: Mutex>, // Maps registry id to witness id provided by oobi registry_id_mapping: RegistryMapping, } -impl Watcher { - pub fn new(config: WatcherConfig) -> Result { +impl Watcher { + pub fn new(config: WatcherConfig, oobi_manager: OobiManager) -> Result { let (tx, rx) = channel::(100); let (tel_tx, tel_rx) = channel::<(IdentifierPrefix, IdentifierPrefix)>(100); let tel_storage_path = config.tel_storage_path.clone(); @@ -53,7 +54,7 @@ impl Watcher { let mut registry_ids_storage_path = tel_storage_path.clone(); registry_ids_storage_path.push("registry"); Ok(Watcher { - watcher_data: WatcherData::new(config, tx, tel_tx)?, + watcher_data: WatcherData::new(config, tx, tel_tx, oobi_manager)?, recv: Mutex::new(rx), tel_recv: Mutex::new(tel_rx), registry_id_mapping: RegistryMapping::new(®istry_ids_storage_path) @@ -104,6 +105,22 @@ impl Watcher { self.watcher_data.address.clone(), ) } +} + +impl Watcher { + pub fn setup_with_redb(config: WatcherConfig) -> Result { + use std::path::PathBuf; + + // Create oobi manager database in a separate location + let mut oobi_db_path = config.db_path.clone(); + oobi_db_path.push("oobi_database"); + let oobi_db = Arc::new(RedbDatabase::new(&oobi_db_path).unwrap()); + let oobi_manager = RedbOobiManager::new(oobi_db)?; + Self::new(config, oobi_manager) + } +} + +impl Watcher { pub async fn resolve_end_role(&self, er: EndRole) -> Result<(), ActorError> { // find endpoint data of endpoint provider identifier let loc_scheme = self diff --git a/components/watcher/src/watcher/watcher_data.rs b/components/watcher/src/watcher/watcher_data.rs index f1311be5..5fe2bb7f 100644 --- a/components/watcher/src/watcher/watcher_data.rs +++ b/components/watcher/src/watcher/watcher_data.rs @@ -3,8 +3,8 @@ use std::{fs::File, sync::Arc}; use futures::future::join_all; use itertools::Itertools; use keri_core::actor::possible_response::PossibleResponse; -use keri_core::database::redb::RedbError; use keri_core::error::Error; +use keri_core::oobi::error::OobiError; use keri_core::oobi::LocationScheme; use keri_core::prefix::{BasicPrefix, IdentifierPrefix, SelfSigningPrefix}; use keri_core::processor::escrow::default_escrow_bus; @@ -31,7 +31,7 @@ use keri_core::{ }, }; use keri_core::{ - oobi_manager::OobiManager, + oobi_manager::{OobiManager, storage::OobiStorageBackend}, processor::{basic_processor::BasicProcessor, event_storage::EventStorage}, signer::Signer, transport::Transport, @@ -49,12 +49,12 @@ use crate::transport::WatcherTelTransport; use super::{config::WatcherConfig, tel_providing::TelToForward}; -pub struct WatcherData { +pub struct WatcherData { pub address: url::Url, pub prefix: BasicPrefix, pub processor: BasicProcessor, pub event_storage: Arc>, - pub oobi_manager: OobiManager, + pub oobi_manager: OobiManager, pub signer: Arc, pub transport: Box, pub tel_transport: Box, @@ -66,11 +66,12 @@ pub struct WatcherData { reply_escrow: Arc>, } -impl WatcherData { +impl WatcherData { pub fn new( config: WatcherConfig, tx: Sender, tel_tx: Sender<(IdentifierPrefix, IdentifierPrefix)>, + oobi_manager: OobiManager, ) -> Result, ActorError> { let WatcherConfig { public_address, @@ -97,9 +98,7 @@ impl WatcherData { Arc::new(RedbDatabase::new(&path).unwrap()) }; - let oobi_manager = OobiManager::new(events_db.clone()); - - let (mut notification_bus, _) = default_escrow_bus(events_db.clone(), escrow_config); + let (notification_bus, _escrows) = default_escrow_bus(events_db.clone(), escrow_config, None); let reply_escrow = Arc::new(ReplyEscrow::new(events_db.clone())); notification_bus.register_observer( reply_escrow.clone(), @@ -112,7 +111,7 @@ impl WatcherData { let prefix = BasicPrefix::Ed25519NT(signer.public_key()); // watcher uses non transferable key let processor = BasicProcessor::new(events_db.clone(), Some(notification_bus)); - let storage = Arc::new(EventStorage::new(events_db)); + let storage = Arc::new(EventStorage::new_redb(events_db)); // construct witness loc scheme oobi let loc_scheme = LocationScheme::new( @@ -529,7 +528,7 @@ impl WatcherData { } /// Query roles in oobi manager to check if controller with given ID is allowed to communicate with us. - fn check_role(&self, cid: &IdentifierPrefix) -> Result { + fn check_role(&self, cid: &IdentifierPrefix) -> Result { Ok(self .oobi_manager .get_end_role(cid, Role::Watcher)? diff --git a/components/watcher/src/watcher_listener.rs b/components/watcher/src/watcher_listener.rs index b91c5b0f..32f6c383 100644 --- a/components/watcher/src/watcher_listener.rs +++ b/components/watcher/src/watcher_listener.rs @@ -2,20 +2,20 @@ use crate::http_routing::configure_routes; use std::{net::ToSocketAddrs, sync::Arc}; use actix_web::{dev::Server, rt::spawn, web, App, HttpServer}; -use keri_core::{actor::error::ActorError, oobi::LocationScheme, prefix::BasicPrefix}; +use keri_core::{actor::error::ActorError, oobi::LocationScheme, oobi_manager::RedbOobiStorage, oobi_manager::storage::OobiStorageBackend, prefix::BasicPrefix}; use crate::{watcher::Watcher, WatcherConfig}; use self::http_handlers::ApiError; -pub struct WatcherListener { - pub watcher: Arc, +pub struct WatcherListener { + pub watcher: Arc>, } -impl WatcherListener { - pub fn new(config: WatcherConfig) -> Result { +impl WatcherListener { + pub fn new(config: WatcherConfig, oobi_manager: keri_core::oobi_manager::OobiManager) -> Result { Ok(Self { - watcher: Arc::new(Watcher::new(config)?), + watcher: Arc::new(Watcher::new(config, oobi_manager)?), }) } @@ -34,7 +34,23 @@ impl WatcherListener { .unwrap() .run() } +} +impl WatcherListener { + pub fn setup_with_redb(config: WatcherConfig) -> Result { + use std::path::PathBuf; + use keri_core::{database::redb::RedbDatabase, oobi_manager::RedbOobiManager}; + + // Create oobi manager database in a separate location + let mut oobi_db_path = config.db_path.clone(); + oobi_db_path.push("oobi_database"); + let oobi_db = Arc::new(RedbDatabase::new(&oobi_db_path).unwrap()); + let oobi_manager = RedbOobiManager::new(oobi_db)?; + Self::new(config, oobi_manager) + } +} + +impl WatcherListener { pub async fn resolve_initial_oobis( &self, initial_oobis: &[LocationScheme], @@ -51,11 +67,11 @@ impl WatcherListener { } } -pub async fn update_checking(data: Arc) { +pub async fn update_checking(data: Arc>) { data.process_update_requests().await; } -pub async fn update_tel_checking(data: Arc) { +pub async fn update_tel_checking(data: Arc>) { let _ = data.process_update_tel_requests().await; } @@ -72,19 +88,21 @@ pub mod http_handlers { actor::{error::ActorError, prelude::Message}, event_message::signed_event_message::Op, oobi::{error::OobiError, EndRole, LocationScheme, Role}, + oobi_manager::RedbOobiStorage, + oobi_manager::storage::OobiStorageBackend, prefix::IdentifierPrefix, }; use serde::Deserialize; use crate::watcher::Watcher; - pub async fn introduce(data: web::Data>) -> Result { + pub async fn introduce(data: web::Data>>) -> Result { Ok(HttpResponse::Ok().json(data.oobi())) } - pub async fn process_notice( + pub async fn process_notice( body: web::Bytes, - data: web::Data>, + data: web::Data>>, ) -> Result { println!( "\nGot events to process: \n{}", @@ -98,9 +116,9 @@ pub mod http_handlers { .body(())) } - pub async fn process_query( + pub async fn process_query( body: web::Bytes, - data: web::Data>, + data: web::Data>>, ) -> Result { println!( "\nGot queries to process: \n{}", @@ -119,9 +137,9 @@ pub mod http_handlers { .body(resp)) } - pub async fn process_reply( + pub async fn process_reply( body: web::Bytes, - data: web::Data>, + data: web::Data>>, ) -> Result { println!( "\nGot replies to process: \n{}", @@ -135,9 +153,9 @@ pub mod http_handlers { .body(())) } - pub async fn resolve_oobi( + pub async fn resolve_oobi( body: web::Bytes, - data: web::Data>, + data: web::Data>>, ) -> Result { println!( "\nGot oobi to resolve: \n{}", @@ -165,9 +183,9 @@ pub mod http_handlers { Ok(HttpResponse::Ok().finish()) } - pub async fn resolve_location( + pub async fn resolve_location( eid: web::Path, - data: web::Data>, + data: web::Data>>, ) -> Result { let loc_scheme = data.signed_location(&eid)?; let oobis = loc_scheme @@ -185,9 +203,9 @@ pub mod http_handlers { .body(String::from_utf8(oobis).unwrap())) } - pub async fn resolve_role( + pub async fn resolve_role( path: web::Path<(IdentifierPrefix, Role, IdentifierPrefix)>, - data: web::Data>, + data: web::Data>>, ) -> Result { let (cid, role, eid) = path.into_inner(); @@ -209,9 +227,9 @@ pub mod http_handlers { .body(String::from_utf8(oobis).unwrap())) } - pub async fn process_tel_query( + pub async fn process_tel_query( post_data: String, - data: web::Data>, + data: web::Data>>, ) -> Result { println!("\nGot tel query to process: \n{}", post_data); let resp = data @@ -240,6 +258,60 @@ pub mod http_handlers { } } + // Concrete wrapper functions for Redb backend (used for HTTP routing) + pub async fn introduce_redb(data: web::Data>>) -> Result { + introduce(data).await + } + + pub async fn process_notice_redb( + body: web::Bytes, + data: web::Data>>, + ) -> Result { + process_notice(body, data).await + } + + pub async fn process_query_redb( + body: web::Bytes, + data: web::Data>>, + ) -> Result { + process_query(body, data).await + } + + pub async fn process_reply_redb( + body: web::Bytes, + data: web::Data>>, + ) -> Result { + process_reply(body, data).await + } + + pub async fn resolve_oobi_redb( + body: web::Bytes, + data: web::Data>>, + ) -> Result { + resolve_oobi(body, data).await + } + + pub async fn resolve_location_redb( + eid: web::Path, + data: web::Data>>, + ) -> Result { + resolve_location(eid, data).await + } + + pub async fn resolve_role_redb( + path: web::Path<(IdentifierPrefix, Role, IdentifierPrefix)>, + data: web::Data>>, + ) -> Result { + resolve_role(path, data).await + } + + pub async fn process_tel_query_redb( + post_data: String, + data: web::Data>>, + ) -> Result { + process_tel_query(post_data, data).await + } + pub async fn info() -> impl Responder { let version = option_env!("CARGO_PKG_VERSION"); if let Some(version) = version { @@ -260,12 +332,13 @@ mod test { }, event_message::signed_event_message::{Message, Op}, oobi::{Oobi, Role}, + oobi_manager::storage::OobiStorageBackend, prefix::IdentifierPrefix, query::query_event::{QueryRoute, SignedQueryMessage}, }; #[async_trait::async_trait] - impl keri_core::transport::test::TestActor for super::WatcherListener { + impl keri_core::transport::test::TestActor for super::WatcherListener { async fn send_message(&self, msg: Message) -> Result<(), ActorError> { let payload = String::from_utf8(msg.to_cesr().unwrap()).unwrap(); let data = actix_web::web::Data::new(self.watcher.clone()); diff --git a/components/witness/src/main.rs b/components/witness/src/main.rs index 613e76f9..2fec020b 100644 --- a/components/witness/src/main.rs +++ b/components/witness/src/main.rs @@ -108,7 +108,7 @@ async fn main() -> Result<()> { .extract::() .context("Failed to load config")?; - let witness_listener = WitnessListener::setup( + let witness_listener = WitnessListener::setup_with_redb( cfg.public_url.clone(), cfg.db_path.as_path(), cfg.seed, diff --git a/components/witness/src/tests.rs b/components/witness/src/tests.rs index 6dc5afbf..4f173d78 100644 --- a/components/witness/src/tests.rs +++ b/components/witness/src/tests.rs @@ -29,6 +29,7 @@ use tempfile::Builder; use url::Url; use crate::{witness::Witness, witness_processor::WitnessEscrowConfig}; +use keri_core::oobi_manager::{OobiManager, RedbOobiManager}; #[test] fn test_not_fully_witnessed() -> Result<(), Error> { @@ -59,7 +60,7 @@ fn test_not_fully_witnessed() -> Result<(), Error> { let first_witness = { let root_witness = Builder::new().prefix("test-db1").tempdir().unwrap(); std::fs::create_dir_all(root_witness.path()).unwrap(); - Witness::setup( + Witness::setup_with_redb( url::Url::parse("http://some/url").unwrap(), root_witness.path(), Some(seed1.into()), @@ -71,7 +72,7 @@ fn test_not_fully_witnessed() -> Result<(), Error> { let second_witness = { let root_witness = Builder::new().prefix("test-db1").tempdir().unwrap(); std::fs::create_dir_all(root_witness.path()).unwrap(); - Witness::setup( + Witness::setup_with_redb( url::Url::parse("http://some/url").unwrap(), root_witness.path(), Some(seed2.into()), @@ -99,7 +100,7 @@ fn test_not_fully_witnessed() -> Result<(), Error> { let not = Notice::Event(inception_event.clone()); w.process_notice(not).unwrap(); w.event_storage - .mailbox_data + .mailbox_data.as_ref().unwrap() .get_mailbox_receipts(controller.prefix(), 0) .into_iter() .flatten() @@ -185,7 +186,7 @@ fn test_not_fully_witnessed() -> Result<(), Error> { // first_witness.respond(signer_arc.clone())?; let first_receipt = first_witness .event_storage - .mailbox_data + .mailbox_data.as_ref().unwrap() .get_mailbox_receipts(controller.prefix(), 0) .unwrap() .map(Notice::NontransferableRct) @@ -237,11 +238,19 @@ fn test_qry_rpy() -> Result<(), ActorError> { let witness_root = Builder::new().prefix("test-db").tempdir().unwrap(); let signer = Signer::new(); let signer_arc = Arc::new(signer); + + // Create oobi manager database in a separate location + let mut oobi_database_path = witness_root.path().to_path_buf(); + oobi_database_path.push("oobi_database"); + let oobi_db = Arc::new(RedbDatabase::new(&oobi_database_path).unwrap()); + let oobi_manager = RedbOobiManager::new(oobi_db).unwrap(); + let witness = Witness::new( Url::parse("http://example.com").unwrap(), signer_arc, witness_root.path(), WitnessEscrowConfig::default(), + oobi_manager, ) .unwrap(); @@ -280,7 +289,7 @@ fn test_qry_rpy() -> Result<(), ActorError> { // send receipts to alice let receipt_to_alice = witness .event_storage - .mailbox_data + .mailbox_data.as_ref().unwrap() .get_mailbox_receipts(alice.prefix(), 0) .unwrap() .map(Notice::NontransferableRct) @@ -402,11 +411,19 @@ pub fn test_key_state_notice() -> Result<(), Error> { let witness_root = Builder::new().prefix("test-db").tempdir().unwrap(); let path = witness_root.path(); std::fs::create_dir_all(path).unwrap(); + + // Create oobi manager database in a separate location + let mut oobi_database_path = witness_root.path().to_path_buf(); + oobi_database_path.push("oobi_database"); + let oobi_db = Arc::new(RedbDatabase::new(&oobi_database_path).unwrap()); + let oobi_manager = RedbOobiManager::new(oobi_db).unwrap(); + Witness::new( Url::parse("http://example.com").unwrap(), signer_arc.clone(), path, WitnessEscrowConfig::default(), + oobi_manager, ) .unwrap() }; @@ -547,11 +564,19 @@ fn test_mbx() { .tempdir() .unwrap(); std::fs::create_dir_all(root.path()).unwrap(); + + // Create oobi manager database in a separate location + let mut oobi_database_path = root.path().to_path_buf(); + oobi_database_path.push("oobi_database"); + let oobi_db = Arc::new(RedbDatabase::new(&oobi_database_path).unwrap()); + let oobi_manager = RedbOobiManager::new(oobi_db).unwrap(); + Witness::new( Url::parse("http://example.com").unwrap(), signer, root.path(), WitnessEscrowConfig::default(), + oobi_manager, ) .unwrap() }; @@ -614,11 +639,19 @@ fn test_invalid_notice() { if !std::fs::exists(root.path()).unwrap() { std::fs::create_dir_all(root.path()).unwrap(); } + + // Create oobi manager database in a separate location + let mut oobi_database_path = root.path().to_path_buf(); + oobi_database_path.push("oobi_database"); + let oobi_db = Arc::new(RedbDatabase::new(&oobi_database_path).unwrap()); + let oobi_manager = RedbOobiManager::new(oobi_db).unwrap(); + Witness::new( Url::parse("http://example.com").unwrap(), signer, root.path(), WitnessEscrowConfig::default(), + oobi_manager, ) .unwrap() }; @@ -666,15 +699,12 @@ fn test_invalid_notice() { #[test] pub fn test_multisig() -> Result<(), ActorError> { - let signer = Signer::new(); - let signer_arc = Arc::new(signer); let witness = { let witness_root = Builder::new().prefix("test-db").tempdir().unwrap(); - let path = witness_root.path(); - Witness::new( + Witness::setup_with_redb( Url::parse("http://example.com").unwrap(), - signer_arc, - path, + witness_root.path(), + None, WitnessEscrowConfig::default(), ) .unwrap() @@ -818,7 +848,7 @@ pub fn test_multisig() -> Result<(), ActorError> { } // Helper function that creates controller, makes and publish its inception event. -fn setup_controller(witness: &Witness) -> Result, Error> { +fn setup_controller(witness: &Witness) -> Result, Error> { let mut cont1 = { // Create test db and event processor. let cont1_key_manager = Arc::new(Mutex::new(CryptoBox::new()?)); @@ -845,15 +875,12 @@ fn setup_controller(witness: &Witness) -> Result Result<(), ActorError> { - let signer = Signer::new(); - let signer_arc = Arc::new(signer); let witness = { let witness_root = Builder::new().prefix("test-db").tempdir().unwrap(); - let path = witness_root.path(); - Witness::new( + Witness::setup_with_redb( Url::parse("http://example.com").unwrap(), - signer_arc, - path, + witness_root.path(), + None, WitnessEscrowConfig::default(), ) .unwrap() @@ -1097,15 +1124,12 @@ pub fn test_delegated_multisig() -> Result<(), ActorError> { #[test] pub fn test_delegating_multisig() -> Result<(), ActorError> { - let signer = Signer::new(); - let signer_arc = Arc::new(signer); let witness = { let witness_root = Builder::new().prefix("test-db").tempdir().unwrap(); - let path = witness_root.path(); - Witness::new( + Witness::setup_with_redb( Url::parse("http://example.com").unwrap(), - signer_arc, - path, + witness_root.path(), + None, WitnessEscrowConfig::default(), ) .unwrap() diff --git a/components/witness/src/witness.rs b/components/witness/src/witness.rs index f6b8eec5..5c3234e6 100644 --- a/components/witness/src/witness.rs +++ b/components/witness/src/witness.rs @@ -23,7 +23,7 @@ use keri_core::{ }, mailbox::MailboxResponse, oobi::LocationScheme, - oobi_manager::OobiManager, + oobi_manager::{OobiManager, RedbOobiManager, RedbOobiStorage, storage::OobiStorageBackend}, prefix::{BasicPrefix, IdentifierPrefix, SelfSigningPrefix}, processor::notification::{Notification, NotificationBus, Notifier}, query::{ @@ -86,7 +86,7 @@ impl Notifier for WitnessReceiptGenerator { impl WitnessReceiptGenerator { pub fn new(signer: Arc, events_db: Arc) -> Self { - let storage = EventStorage::new(events_db.clone()); + let storage = EventStorage::new_redb(events_db.clone()); let prefix = BasicPrefix::Ed25519NT(signer.public_key()); Self { prefix, @@ -137,23 +137,30 @@ impl From for WitnessError { } } -pub struct Witness { +impl From for WitnessError { + fn from(err: keri_core::oobi::error::OobiError) -> Self { + WitnessError::DatabaseError(err.to_string()) + } +} + +pub struct Witness { pub address: Url, pub prefix: BasicPrefix, pub processor: WitnessProcessor, pub event_storage: Arc>, - pub oobi_manager: OobiManager, + pub oobi_manager: OobiManager, pub signer: Arc, pub receipt_generator: Arc, pub tel: Arc>, } -impl Witness { +impl Witness { pub fn new( address: Url, signer: Arc, event_path: &Path, escrow_config: WitnessEscrowConfig, + oobi_manager: OobiManager, ) -> Result { use keri_core::processor::notification::JustNotification; let mut events_path = PathBuf::new(); @@ -171,8 +178,8 @@ impl Witness { let events_db = Arc::new(RedbDatabase::new(&events_database_path).map_err(|_| Error::DbError)?); - let mut witness_processor = WitnessProcessor::new(events_db.clone(), escrow_config); - let event_storage = Arc::new(EventStorage::new(events_db.clone())); + let witness_processor = WitnessProcessor::new(events_db.clone(), escrow_config); + let event_storage = Arc::new(EventStorage::new_redb(events_db.clone())); let receipt_generator = Arc::new(WitnessReceiptGenerator::new( signer.clone(), @@ -217,30 +224,45 @@ impl Witness { signer, event_storage, receipt_generator, - oobi_manager: OobiManager::new(events_db.clone()), + oobi_manager, tel, }) } +} - pub fn setup( +impl Witness { + pub fn setup_with_redb( public_address: url::Url, event_db_path: &Path, priv_key: Option, escrow_config: WitnessEscrowConfig, - ) -> Result { + ) -> Result, WitnessError> { let signer = Arc::new( priv_key .map(|key| Signer::new_with_seed(&key.parse()?)) .unwrap_or_else(|| Ok(Signer::new()))?, ); let prefix = BasicPrefix::Ed25519NT(signer.public_key()); + + // Create oobi manager database in a separate location + let mut oobi_database_path = PathBuf::from(event_db_path); + oobi_database_path.push("oobi_database"); + let oobi_db = + Arc::new(RedbDatabase::new(&oobi_database_path).map_err(|_| Error::DbError)?); + // construct witness loc scheme oobi let loc_scheme = LocationScheme::new( IdentifierPrefix::Basic(prefix.clone()), public_address.scheme().parse().unwrap(), public_address.clone(), ); - let witness = Witness::new(public_address, signer.clone(), event_db_path, escrow_config)?; + let witness = Witness::new( + public_address, + signer.clone(), + event_db_path, + escrow_config, + RedbOobiManager::new(oobi_db)?, + )?; let reply = ReplyEvent::new_reply( ReplyRoute::LocScheme(loc_scheme), HashFunctionCode::Blake3_256, @@ -258,7 +280,9 @@ impl Witness { witness.oobi_manager.save_oobi(&signed_reply)?; Ok(witness) } +} +impl Witness { pub fn oobi(&self) -> LocationScheme { LocationScheme::new( IdentifierPrefix::Basic(self.prefix.clone()), diff --git a/components/witness/src/witness_listener.rs b/components/witness/src/witness_listener.rs index 8c2f4b4e..bcea8709 100644 --- a/components/witness/src/witness_listener.rs +++ b/components/witness/src/witness_listener.rs @@ -6,33 +6,39 @@ use std::{ use actix_web::{dev::Server, web::Data, App, HttpServer}; use anyhow::Result; -use keri_core::{self, prefix::BasicPrefix}; +use keri_core::{self, oobi_manager::RedbOobiStorage, oobi_manager::storage::OobiStorageBackend, prefix::BasicPrefix}; use crate::{ witness::{Witness, WitnessError}, witness_processor::WitnessEscrowConfig, }; -pub struct WitnessListener { - pub witness_data: Arc, +pub struct WitnessListener { + pub witness_data: Arc>, } -impl WitnessListener { +impl WitnessListener { pub fn setup( pub_addr: url::Url, event_db_path: &Path, priv_key: Option, escrow_config: WitnessEscrowConfig, + oobi_manager: keri_core::oobi_manager::OobiManager, ) -> Result { let mut oobi_path = PathBuf::new(); oobi_path.push(event_db_path); oobi_path.push("oobi"); + let signer = match priv_key { + Some(key) => Arc::new(keri_core::signer::Signer::new_with_seed(&key.parse().unwrap()).unwrap()), + None => Arc::new(keri_core::signer::Signer::new()), + }; Ok(Self { - witness_data: Arc::new(Witness::setup( + witness_data: Arc::new(Witness::new( pub_addr, + signer, event_db_path, - priv_key, escrow_config, + oobi_manager, )?), }) } @@ -44,39 +50,39 @@ impl WitnessListener { .app_data(state.clone()) .route( "/introduce", - actix_web::web::get().to(http_handlers::introduce), + actix_web::web::get().to(http_handlers::introduce_redb), ) .route( "/oobi/{id}", - actix_web::web::get().to(http_handlers::resolve_location), + actix_web::web::get().to(http_handlers::resolve_location_redb), ) .route( "/oobi/{cid}/{role}/{eid}", - actix_web::web::get().to(http_handlers::resolve_role), + actix_web::web::get().to(http_handlers::resolve_role_redb), ) .route( "/process", - actix_web::web::post().to(http_handlers::process_notice), + actix_web::web::post().to(http_handlers::process_notice_redb), ) .route( "/query", - actix_web::web::post().to(http_handlers::process_query), + actix_web::web::post().to(http_handlers::process_query_redb), ) .route( "/query/tel", - actix_web::web::post().to(http_handlers::process_tel_query), + actix_web::web::post().to(http_handlers::process_tel_query_redb), ) .route( "/process/tel", - actix_web::web::post().to(http_handlers::process_tel_events), + actix_web::web::post().to(http_handlers::process_tel_events_redb), ) .route( "/register", - actix_web::web::post().to(http_handlers::process_reply), + actix_web::web::post().to(http_handlers::process_reply_redb), ) .route( "/forward", - actix_web::web::post().to(http_handlers::process_exchange), + actix_web::web::post().to(http_handlers::process_exchange_redb), ) .route("/info", actix_web::web::get().to(http_handlers::info)) }) @@ -90,6 +96,56 @@ impl WitnessListener { } } +impl WitnessListener { + pub fn setup_with_redb( + pub_addr: url::Url, + event_db_path: &Path, + priv_key: Option, + escrow_config: WitnessEscrowConfig, + ) -> Result { + use keri_core::{database::redb::RedbDatabase, oobi_manager::RedbOobiManager}; + + // Create oobi manager database in a separate location + let oobi_db_path = event_db_path.join("oobi_database"); + let oobi_db = Arc::new(RedbDatabase::new(&oobi_db_path).unwrap()); + let oobi_manager = RedbOobiManager::new(oobi_db)?; + + let signer = Arc::new( + priv_key + .as_ref() + .map(|key| keri_core::signer::Signer::new_with_seed(&key.parse().unwrap())) + .unwrap_or_else(|| Ok(keri_core::signer::Signer::new()))?, + ); + let prefix = keri_core::prefix::BasicPrefix::Ed25519NT(signer.public_key()); + + // construct witness loc scheme oobi + let loc_scheme = keri_core::oobi::LocationScheme::new( + keri_core::prefix::IdentifierPrefix::Basic(prefix.clone()), + pub_addr.scheme().parse().unwrap(), + pub_addr.clone(), + ); + let witness = Self::setup(pub_addr, event_db_path, priv_key, escrow_config, oobi_manager)?; + + let reply = keri_core::query::reply_event::ReplyEvent::new_reply( + keri_core::query::reply_event::ReplyRoute::LocScheme(loc_scheme), + keri_core::actor::prelude::HashFunctionCode::Blake3_256, + keri_core::actor::prelude::SerializationFormats::JSON, + ); + let signed_reply = keri_core::query::reply_event::SignedReply::new_nontrans( + reply.clone(), + prefix, + keri_core::prefix::SelfSigningPrefix::Ed25519Sha512( + signer + .sign(reply.encode().unwrap()) + .map_err(|_e| WitnessError::SigningError)?, + ), + ); + witness.witness_data.oobi_manager.save_oobi(&signed_reply)?; + + Ok(witness) + } +} + mod test { use actix_web::body::MessageBody; use keri_core::{ @@ -100,6 +156,7 @@ mod test { }, event_message::signed_event_message::{Message, Op}, oobi::Role, + oobi_manager::storage::OobiStorageBackend, prefix::IdentifierPrefix, query::{ self, @@ -108,7 +165,7 @@ mod test { }; #[async_trait::async_trait] - impl keri_core::transport::test::TestActor for super::WitnessListener { + impl keri_core::transport::test::TestActor for super::WitnessListener { async fn send_message(&self, msg: Message) -> Result<(), ActorError> { let payload = String::from_utf8(msg.to_cesr().unwrap()).unwrap(); let data = actix_web::web::Data::new(self.witness_data.clone()); @@ -221,19 +278,21 @@ pub mod http_handlers { error::Error, event_message::signed_event_message::Op, oobi::Role, + oobi_manager::RedbOobiStorage, + oobi_manager::storage::OobiStorageBackend, prefix::{CesrPrimitive, IdentifierPrefix}, }; use teliox::event::verifiable_event::VerifiableEvent; use crate::witness::Witness; - pub async fn introduce(data: web::Data>) -> Result { + pub async fn introduce(data: web::Data>>) -> Result { Ok(HttpResponse::Ok().json(data.oobi())) } - pub async fn resolve_location( + pub async fn resolve_location( eid: web::Path, - data: web::Data>, + data: web::Data>>, ) -> Result { let loc_scheme = data .get_loc_scheme_for_id(&eid) @@ -253,9 +312,9 @@ pub mod http_handlers { .body(String::from_utf8(oobis).unwrap())) } - pub async fn resolve_role( + pub async fn resolve_role( path: web::Path<(IdentifierPrefix, Role, IdentifierPrefix)>, - data: web::Data>, + data: web::Data>>, ) -> Result { let (cid, role, eid) = path.into_inner(); let out = if role == Role::Witness { @@ -332,9 +391,9 @@ pub mod http_handlers { .body(String::from_utf8(out?).unwrap())) } - pub async fn process_notice( + pub async fn process_notice( post_data: String, - data: web::Data>, + data: web::Data>>, ) -> Result { println!( "\nWitness {} got notice to process: \n{}", @@ -348,9 +407,9 @@ pub mod http_handlers { .body(())) } - pub async fn process_query( + pub async fn process_query( post_data: String, - data: web::Data>, + data: web::Data>>, ) -> Result { println!( "\nWitness {} got query to process: \n{}", @@ -369,9 +428,9 @@ pub mod http_handlers { .body(resp)) } - pub async fn process_tel_query( + pub async fn process_tel_query( post_data: String, - data: web::Data>, + data: web::Data>>, ) -> Result { println!("\nGot tel query to process: \n{}", post_data); let resp = data @@ -386,9 +445,9 @@ pub mod http_handlers { .body(resp)) } - pub async fn process_reply( + pub async fn process_reply( post_data: String, - data: web::Data>, + data: web::Data>>, ) -> Result { println!("\nGot reply to process: \n{}", post_data); data.parse_and_process_replies(post_data.as_bytes())?; @@ -398,9 +457,9 @@ pub mod http_handlers { .body(())) } - pub async fn process_exchange( + pub async fn process_exchange( post_data: String, - data: web::Data>, + data: web::Data>>, ) -> Result { println!("\nGot exchange to process: \n{}", post_data); data.parse_and_process_exchanges(post_data.as_bytes())?; @@ -410,9 +469,9 @@ pub mod http_handlers { .body(())) } - pub async fn process_tel_events( + pub async fn process_tel_events( post_data: String, - data: web::Data>, + data: web::Data>>, ) -> Result { println!("\nGot tel event to process: \n{}", post_data); let parsed = VerifiableEvent::parse(post_data.as_bytes()).unwrap(); @@ -445,4 +504,65 @@ pub mod http_handlers { HttpResponse::build(self.status_code()).json(&self.0) } } + + // Concrete wrapper functions for Redb backend (used for HTTP routing) + pub async fn introduce_redb(data: web::Data>>) -> Result { + introduce(data).await + } + + pub async fn process_notice_redb( + body: String, + data: web::Data>>, + ) -> Result { + process_notice(body, data).await + } + + pub async fn process_query_redb( + body: String, + data: web::Data>>, + ) -> Result { + process_query(body, data).await + } + + pub async fn process_tel_query_redb( + post_data: String, + data: web::Data>>, + ) -> Result { + process_tel_query(post_data, data).await + } + + pub async fn process_reply_redb( + body: String, + data: web::Data>>, + ) -> Result { + process_reply(body, data).await + } + + pub async fn process_exchange_redb( + post_data: String, + data: web::Data>>, + ) -> Result { + process_exchange(post_data, data).await + } + + pub async fn process_tel_events_redb( + post_data: String, + data: web::Data>>, + ) -> Result { + process_tel_events(post_data, data).await + } + + pub async fn resolve_location_redb( + eid: web::Path, + data: web::Data>>, + ) -> Result { + resolve_location(eid, data).await + } + + pub async fn resolve_role_redb( + path: web::Path<(IdentifierPrefix, Role, IdentifierPrefix)>, + data: web::Data>>, + ) -> Result { + resolve_role(path, data).await + } } diff --git a/components/witness/src/witness_processor.rs b/components/witness/src/witness_processor.rs index 88fe9e19..57fb1755 100644 --- a/components/witness/src/witness_processor.rs +++ b/components/witness/src/witness_processor.rs @@ -22,7 +22,7 @@ pub struct WitnessProcessor { impl Processor for WitnessProcessor { type Database = RedbDatabase; fn register_observer( - &mut self, + &self, observer: Arc, notifications: &[JustNotification], ) -> Result<(), Error> { @@ -62,7 +62,7 @@ impl Default for WitnessEscrowConfig { impl WitnessProcessor { pub fn new(redb: Arc, escrow_config: WitnessEscrowConfig) -> Self { - let mut bus = NotificationBus::new(); + let bus = NotificationBus::new(); let partially_signed_escrow = Arc::new(PartiallySignedEscrow::new( redb.clone(), escrow_config.partially_signed_timeout, diff --git a/keriox_core/Cargo.toml b/keriox_core/Cargo.toml index 6bc66b3b..1162a28a 100644 --- a/keriox_core/Cargo.toml +++ b/keriox_core/Cargo.toml @@ -13,11 +13,13 @@ repository.workspace = true crate-type = ["cdylib", "rlib"] [features] -default = [] +default = ["storage-redb"] +storage-redb = ["redb"] +storage-postgres = ["sqlx", "async-std"] query = ["serde_cbor"] oobi = ["url", "strum_macros", "strum"] -oobi-manager = ["oobi", "query", "reqwest", "async-trait", "serde_cbor"] -mailbox = ["query", "serde_cbor"] +oobi-manager = ["oobi", "query", "storage-redb", "reqwest", "async-trait", "serde_cbor"] +mailbox = ["query", "storage-redb", "serde_cbor"] [dependencies] bytes = "1.3.0" @@ -43,7 +45,11 @@ chrono = { version = "0.4.18", features = ["serde"] } arrayref = "0.3.6" zeroize = "1.3.0" fraction = { version = "0.9", features = ["with-serde-support"] } -redb = "2.3.0" +redb = { version = "2.3.0", optional = true } + +# postgres db deps +sqlx = { version = "0.8", features = ["runtime-async-std", "postgres"], optional = true } +async-std = { version = "1", features = ["attributes"], optional = true } # oobis dependecies async-trait = { version = "0.1.57", optional = true } diff --git a/keriox_core/README.md b/keriox_core/README.md index e558dbeb..0d6eec0d 100644 --- a/keriox_core/README.md +++ b/keriox_core/README.md @@ -10,7 +10,35 @@ To use this library, a third-party key provider that derives public-private key ## Available Features +- `storage-redb` *(default)*: enables [redb](https://github.com/cberner/redb) as the persistent storage backend. Without this feature, an in-memory `MemoryDatabase` is available for testing or plugging in custom backends. - `query`: enables query messages and their processing logic. - `oobi`: provides events and logic for the [oobi discovery mechanism](https://weboftrust.github.io/ietf-oobi/draft-ssmith-oobi.html). -- `mailbox`: enables the storing of messages intended for other identifiers and provide them to recipient later. This feature is meant for witnesses and watchers. +- `oobi-manager`: high-level OOBI management. Implies `oobi`, `query`, and `storage-redb`. +- `mailbox`: enables the storing of messages intended for other identifiers and provides them to recipients later. This feature is meant for witnesses and watchers. Implies `query` and `storage-redb`. + +## Architecture + +### NotificationBus + +`NotificationBus` is a pluggable dispatch abstraction for event notifications. The default implementation dispatches in-process, but custom implementations (e.g. SQS for serverless environments) can be injected: + +```rust +// Use the default in-process dispatch: +let bus = NotificationBus::new(); + +// Or provide a custom dispatch: +let bus = NotificationBus::from_dispatch(my_custom_dispatch); +``` + +You can also pass an existing bus to `default_escrow_bus` via `Some(bus)` to share a single dispatch across escrows. + +### EscrowSet + +`EscrowSet` is a named struct (replacing the previous anonymous tuple) returned by `default_escrow_bus`. It provides typed access to each escrow: + +- `out_of_order` -- events received before their dependencies +- `partially_signed` -- events awaiting additional signatures +- `partially_witnessed` -- events awaiting additional witness receipts +- `delegation` -- delegated events awaiting approval +- `duplicitous` -- detected duplicitous events diff --git a/keriox_core/benches/bench.rs b/keriox_core/benches/bench.rs index ad2a49f7..5639149d 100644 --- a/keriox_core/benches/bench.rs +++ b/keriox_core/benches/bench.rs @@ -20,8 +20,8 @@ fn setup_processor() -> ( let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (not_bus, (_ooo_escrow, _, _, _, _)) = - default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (not_bus, _escrows) = + default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); let (processor, storage) = ( BasicProcessor::new(events_db.clone(), Some(not_bus)), diff --git a/keriox_core/src/actor/error.rs b/keriox_core/src/actor/error.rs index 06f4c7a6..15d2ad96 100644 --- a/keriox_core/src/actor/error.rs +++ b/keriox_core/src/actor/error.rs @@ -1,5 +1,6 @@ use http::StatusCode; +#[cfg(feature = "storage-redb")] use crate::database::redb::RedbError; use crate::event_message::cesr_adapter::ParseError; use crate::keys::KeysError; @@ -74,6 +75,7 @@ impl From for ActorError { } } +#[cfg(feature = "storage-redb")] impl From for ActorError { fn from(err: RedbError) -> Self { ActorError::DbError(err.to_string()) diff --git a/keriox_core/src/actor/mod.rs b/keriox_core/src/actor/mod.rs index d58ddce2..023f0b5c 100644 --- a/keriox_core/src/actor/mod.rs +++ b/keriox_core/src/actor/mod.rs @@ -3,7 +3,7 @@ use std::convert::TryFrom; use serde::{Deserialize, Serialize}; #[cfg(feature = "oobi-manager")] -use crate::oobi_manager::OobiManager; +use crate::oobi_manager::{storage::OobiStorageBackend, OobiManager}; #[cfg(feature = "query")] use crate::{ database::EventDatabase, @@ -91,9 +91,9 @@ pub fn process_notice(msg: Notice, processor: &P) -> Result<(), Er } #[cfg(feature = "query")] -pub fn process_reply( +pub fn process_reply( sr: SignedReply, - #[cfg(feature = "oobi-manager")] oobi_manager: &OobiManager, + #[cfg(feature = "oobi-manager")] oobi_manager: &OobiManager, processor: &P, event_storage: &EventStorage, ) -> Result<(), Error> { @@ -108,9 +108,9 @@ pub fn process_reply( } #[cfg(feature = "oobi-manager")] -pub fn process_signed_oobi( +pub fn process_signed_oobi( signed_oobi: &SignedReply, - oobi_manager: &OobiManager, + oobi_manager: &OobiManager, event_storage: &EventStorage, ) -> Result<(), Error> { use crate::processor::validator::EventValidator; diff --git a/keriox_core/src/actor/simple_controller.rs b/keriox_core/src/actor/simple_controller.rs index 7ac2298a..e25ed96b 100644 --- a/keriox_core/src/actor/simple_controller.rs +++ b/keriox_core/src/actor/simple_controller.rs @@ -3,8 +3,10 @@ use std::{ sync::{Arc, Mutex}, }; +#[cfg(feature = "storage-redb")] +use crate::database::redb::RedbDatabase; use crate::{ - database::{redb::RedbDatabase, EscrowCreator, EventDatabase}, + database::{EscrowCreator, EventDatabase}, processor::escrow::{ maybe_out_of_order_escrow::MaybeOutOfOrderEscrow, partially_witnessed_escrow::PartiallyWitnessedEscrow, @@ -49,7 +51,11 @@ use crate::{ #[cfg(feature = "oobi-manager")] use crate::oobi::Role; #[cfg(feature = "oobi-manager")] -use crate::oobi_manager::OobiManager; +use crate::oobi_manager::{OobiManager, RedbOobiManager}; +#[cfg(feature = "oobi-manager")] +use crate::oobi_manager::storage::OobiStorageBackend; +#[cfg(feature = "oobi-manager")] +use crate::oobi_manager::storage::RedbOobiStorage; #[cfg(feature = "query")] use crate::query::{ @@ -59,11 +65,24 @@ use crate::query::{ /// Helper struct for events generation, signing and processing. /// Used in tests. +#[cfg(feature = "oobi-manager")] +pub struct SimpleController { + prefix: IdentifierPrefix, + pub key_manager: Arc>, + processor: BasicProcessor, + oobi_manager: OobiManager, + pub storage: EventStorage, + pub groups: Vec, + pub not_fully_witnessed_escrow: Arc>, + pub ooo_escrow: Arc>, + pub delegation_escrow: Arc>, +} + +#[cfg(not(feature = "oobi-manager"))] pub struct SimpleController { prefix: IdentifierPrefix, pub key_manager: Arc>, processor: BasicProcessor, - oobi_manager: OobiManager, pub storage: EventStorage, pub groups: Vec, pub not_fully_witnessed_escrow: Arc>, @@ -72,27 +91,29 @@ pub struct SimpleController SimpleController { -impl SimpleController { +#[cfg(feature = "storage-redb")] +#[cfg(feature = "oobi-manager")] +impl SimpleController { // incept a state and keys pub fn new( event_db: Arc, key_manager: Arc>, escrow_config: EscrowConfig, - ) -> Result, Error> { - let (not_bus, (ooo, _, partially_witnesses, del_escrow, _duplicates)) = - default_escrow_bus(event_db.clone(), escrow_config); + ) -> Result, Error> { + let (not_bus, escrows) = + default_escrow_bus(event_db.clone(), escrow_config, None); let processor = BasicProcessor::new(event_db.clone(), Some(not_bus)); Ok(SimpleController { prefix: IdentifierPrefix::default(), key_manager, - oobi_manager: OobiManager::new(event_db.clone()), + oobi_manager: RedbOobiManager::new(event_db.clone())?, processor, storage: EventStorage::new(event_db.clone()), groups: vec![], - not_fully_witnessed_escrow: partially_witnesses, - ooo_escrow: ooo, - delegation_escrow: del_escrow, + not_fully_witnessed_escrow: escrows.partially_witnessed, + ooo_escrow: escrows.out_of_order, + delegation_escrow: escrows.delegation, }) } diff --git a/keriox_core/src/database/memory.rs b/keriox_core/src/database/memory.rs new file mode 100644 index 00000000..3fc101b8 --- /dev/null +++ b/keriox_core/src/database/memory.rs @@ -0,0 +1,650 @@ +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use said::SelfAddressingIdentifier; + +#[cfg(feature = "query")] +use crate::query::reply_event::SignedReply; +use crate::{ + database::{ + timestamped::{Timestamped, TimestampedSignedEventMessage}, + EscrowCreator, EscrowDatabase, EventDatabase, LogDatabase, QueryParameters, + SequencedEventDatabase, + }, + error::Error, + event::KeyEvent, + event_message::{ + msg::KeriEvent, + signature::{Nontransferable, Transferable}, + signed_event_message::{ + SignedEventMessage, SignedNontransferableReceipt, SignedTransferableReceipt, + }, + }, + prefix::{IdentifierPrefix, IndexedSignature}, + state::IdentifierState, +}; + +/// In-memory implementation of EventDatabase for testing and validation. +pub struct MemoryDatabase { + /// Events stored by identifier prefix, ordered by sn + events: RwLock>>, + /// Key state per identifier + states: RwLock>, + /// Transferable receipts by (id, sn) + receipts_t: RwLock>>, + /// Non-transferable receipts by (id, sn) + receipts_nt: RwLock>>, + /// Log database + log_db: Arc, + /// Escrow counter for creating unique table names + escrow_db: Arc>>>, + #[cfg(feature = "query")] + replies: RwLock>, +} + +impl MemoryDatabase { + pub fn new() -> Self { + Self { + events: RwLock::new(HashMap::new()), + states: RwLock::new(HashMap::new()), + receipts_t: RwLock::new(HashMap::new()), + receipts_nt: RwLock::new(HashMap::new()), + log_db: Arc::new(MemoryLogDatabase::new()), + escrow_db: Arc::new(RwLock::new(HashMap::new())), + #[cfg(feature = "query")] + replies: RwLock::new(HashMap::new()), + } + } +} + +impl EventDatabase for MemoryDatabase { + type Error = Error; + type LogDatabaseType = MemoryLogDatabase; + + fn get_log_db(&self) -> Arc { + self.log_db.clone() + } + + fn add_kel_finalized_event( + &self, + event: SignedEventMessage, + id: &IdentifierPrefix, + ) -> Result<(), Self::Error> { + // Update key state + let current_state = self + .states + .read() + .unwrap() + .get(id) + .cloned() + .unwrap_or_default(); + let new_state = current_state.apply(&event.event_message)?; + self.states.write().unwrap().insert(id.clone(), new_state); + + // Log the event + self.log_db.log_event_internal(&event); + + // Store in KEL + let timestamped = Timestamped::new(event); + self.events + .write() + .unwrap() + .entry(id.clone()) + .or_default() + .push(timestamped); + + Ok(()) + } + + fn add_receipt_t( + &self, + receipt: SignedTransferableReceipt, + id: &IdentifierPrefix, + ) -> Result<(), Self::Error> { + let sn = receipt.body.sn; + let transferable = Transferable::Seal(receipt.validator_seal, receipt.signatures); + self.receipts_t + .write() + .unwrap() + .entry((id.clone(), sn)) + .or_default() + .push(transferable); + Ok(()) + } + + fn add_receipt_nt( + &self, + receipt: SignedNontransferableReceipt, + id: &IdentifierPrefix, + ) -> Result<(), Self::Error> { + let sn = receipt.body.sn; + self.receipts_nt + .write() + .unwrap() + .entry((id.clone(), sn)) + .or_default() + .push(receipt); + Ok(()) + } + + fn get_key_state(&self, id: &IdentifierPrefix) -> Option { + self.states.read().unwrap().get(id).cloned() + } + + fn get_kel_finalized_events( + &self, + params: QueryParameters, + ) -> Option> { + let events = self.events.read().unwrap(); + match params { + QueryParameters::All { id } => { + events.get(id).cloned().map(|v| v.into_iter()) + } + QueryParameters::BySn { ref id, sn } => { + events.get(id).map(|evts| { + evts.iter() + .filter(move |e| e.signed_event_message.event_message.data.get_sn() == sn) + .cloned() + .collect::>() + .into_iter() + }) + } + QueryParameters::Range { + ref id, + start, + limit, + } => events.get(id).map(|evts| { + evts.iter() + .filter(move |e| { + let sn = e.signed_event_message.event_message.data.get_sn(); + sn >= start && sn < start + limit + }) + .cloned() + .collect::>() + .into_iter() + }), + } + } + + fn get_receipts_t( + &self, + params: QueryParameters, + ) -> Option> { + let receipts = self.receipts_t.read().unwrap(); + match params { + QueryParameters::BySn { ref id, sn } => { + receipts.get(&(id.clone(), sn)).cloned().map(|v| v.into_iter()) + } + _ => None, + } + } + + fn get_receipts_nt( + &self, + params: QueryParameters, + ) -> Option> { + let receipts = self.receipts_nt.read().unwrap(); + match params { + QueryParameters::BySn { ref id, sn } => { + receipts.get(&(id.clone(), sn)).cloned().map(|v| v.into_iter()) + } + _ => None, + } + } + + fn accept_to_kel(&self, _event: &KeriEvent) -> Result<(), Self::Error> { + // In redb, this saves the event to KEL tables. For memory, events + // are already in the events map from add_kel_finalized_event. + Ok(()) + } + + #[cfg(feature = "query")] + fn save_reply(&self, reply: SignedReply) -> Result<(), Self::Error> { + let id = reply.reply.get_prefix(); + let signer = reply + .signature + .get_signer() + .ok_or_else(|| Error::SemanticError("Missing signer".into()))?; + self.replies + .write() + .unwrap() + .insert((id, signer), reply); + Ok(()) + } + + #[cfg(feature = "query")] + fn get_reply( + &self, + id: &IdentifierPrefix, + from_who: &IdentifierPrefix, + ) -> Option { + self.replies + .read() + .unwrap() + .get(&(id.clone(), from_who.clone())) + .cloned() + } +} + +/// In-memory log database for storing events by digest. +pub struct MemoryLogDatabase { + events: RwLock>, + signatures: RwLock>>, + nontrans_couplets: RwLock>>, + trans_receipts: RwLock>>, +} + +impl MemoryLogDatabase { + pub fn new() -> Self { + Self { + events: RwLock::new(HashMap::new()), + signatures: RwLock::new(HashMap::new()), + nontrans_couplets: RwLock::new(HashMap::new()), + trans_receipts: RwLock::new(HashMap::new()), + } + } + + fn log_event_internal(&self, event: &SignedEventMessage) { + if let Ok(digest) = event.event_message.digest() { + let timestamped = Timestamped::new(event.clone()); + self.events.write().unwrap().insert(digest.clone(), timestamped); + self.signatures + .write() + .unwrap() + .insert(digest, event.signatures.clone()); + } + } + + fn log_receipt_internal(&self, receipt: &SignedNontransferableReceipt) { + let digest = receipt.body.receipted_event_digest.clone(); + self.nontrans_couplets + .write() + .unwrap() + .entry(digest) + .or_default() + .extend(receipt.signatures.clone()); + } +} + +impl LogDatabase<'static> for MemoryLogDatabase { + type DatabaseType = (); + type Error = Error; + type TransactionType = (); + + fn new(_db: Arc) -> Result { + Ok(Self::new()) + } + + fn log_event( + &self, + _txn: &Self::TransactionType, + signed_event: &SignedEventMessage, + ) -> Result<(), Self::Error> { + self.log_event_internal(signed_event); + Ok(()) + } + + fn log_event_with_new_transaction( + &self, + signed_event: &SignedEventMessage, + ) -> Result<(), Self::Error> { + self.log_event_internal(signed_event); + Ok(()) + } + + fn log_receipt( + &self, + _txn: &Self::TransactionType, + signed_receipt: &SignedNontransferableReceipt, + ) -> Result<(), Self::Error> { + self.log_receipt_internal(signed_receipt); + Ok(()) + } + + fn log_receipt_with_new_transaction( + &self, + signed_receipt: &SignedNontransferableReceipt, + ) -> Result<(), Self::Error> { + self.log_receipt_internal(signed_receipt); + Ok(()) + } + + fn get_signed_event( + &self, + said: &SelfAddressingIdentifier, + ) -> Result, Self::Error> { + Ok(self.events.read().unwrap().get(said).cloned()) + } + + fn get_event( + &self, + said: &SelfAddressingIdentifier, + ) -> Result>, Self::Error> { + Ok(self + .events + .read() + .unwrap() + .get(said) + .map(|t| t.signed_event_message.event_message.clone())) + } + + fn get_signatures( + &self, + said: &SelfAddressingIdentifier, + ) -> Result>, Self::Error> { + Ok(self + .signatures + .read() + .unwrap() + .get(said) + .cloned() + .map(|v| v.into_iter())) + } + + fn get_nontrans_couplets( + &self, + said: &SelfAddressingIdentifier, + ) -> Result>, Self::Error> { + Ok(self + .nontrans_couplets + .read() + .unwrap() + .get(said) + .cloned() + .map(|v| v.into_iter())) + } + + fn get_trans_receipts( + &self, + said: &SelfAddressingIdentifier, + ) -> Result, Self::Error> { + Ok(self + .trans_receipts + .read() + .unwrap() + .get(said) + .cloned() + .unwrap_or_default() + .into_iter()) + } + + fn remove_nontrans_receipt( + &self, + _txn_mode: &Self::TransactionType, + said: &SelfAddressingIdentifier, + nontrans: impl IntoIterator, + ) -> Result<(), Self::Error> { + let to_remove: Vec<_> = nontrans.into_iter().collect(); + if let Some(existing) = self.nontrans_couplets.write().unwrap().get_mut(said) { + existing.retain(|n| !to_remove.contains(n)); + } + Ok(()) + } + + fn remove_nontrans_receipt_with_new_transaction( + &self, + said: &SelfAddressingIdentifier, + nontrans: impl IntoIterator, + ) -> Result<(), Self::Error> { + self.remove_nontrans_receipt(&(), said, nontrans) + } +} + +/// In-memory sequenced event database for escrow storage. +pub struct MemorySequencedEventDb { + data: RwLock>>, +} + +impl MemorySequencedEventDb { + pub fn new() -> Self { + Self { + data: RwLock::new(HashMap::new()), + } + } +} + +impl SequencedEventDatabase for MemorySequencedEventDb { + type DatabaseType = (); + type Error = Error; + type DigestIter = Box>; + + fn new(_db: Arc, _table_name: &'static str) -> Result { + Ok(Self::new()) + } + + fn insert( + &self, + identifier: &IdentifierPrefix, + sn: u64, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Self::Error> { + self.data + .write() + .unwrap() + .entry((identifier.clone(), sn)) + .or_default() + .push(digest.clone()); + Ok(()) + } + + fn get( + &self, + identifier: &IdentifierPrefix, + sn: u64, + ) -> Result { + let data = self.data.read().unwrap(); + let items = data + .get(&(identifier.clone(), sn)) + .cloned() + .unwrap_or_default(); + Ok(Box::new(items.into_iter())) + } + + fn get_greater_than( + &self, + identifier: &IdentifierPrefix, + sn: u64, + ) -> Result { + let data = self.data.read().unwrap(); + let items: Vec<_> = data + .iter() + .filter(|((id, s), _)| id == identifier && *s >= sn) + .flat_map(|(_, v)| v.clone()) + .collect(); + Ok(Box::new(items.into_iter())) + } + + fn remove( + &self, + identifier: &IdentifierPrefix, + sn: u64, + said: &SelfAddressingIdentifier, + ) -> Result<(), Self::Error> { + if let Some(v) = self.data.write().unwrap().get_mut(&(identifier.clone(), sn)) { + v.retain(|d| d != said); + } + Ok(()) + } +} + +/// In-memory escrow database. +pub struct MemoryEscrowDb { + sequenced: Arc, + log: Arc, +} + +impl EscrowDatabase for MemoryEscrowDb { + type EscrowDatabaseType = (); + type LogDatabaseType = MemoryLogDatabase; + type Error = Error; + type EventIter = std::vec::IntoIter; + + fn new( + _escrow: Arc< + dyn SequencedEventDatabase< + DatabaseType = Self::EscrowDatabaseType, + Error = Self::Error, + DigestIter = Box>, + >, + >, + log: Arc, + ) -> Self { + // We won't use this constructor in practice; use from_parts instead + Self { + sequenced: Arc::new(MemorySequencedEventDb::new()), + log, + } + } + + fn save_digest( + &self, + id: &IdentifierPrefix, + sn: u64, + event_digest: &SelfAddressingIdentifier, + ) -> Result<(), Self::Error> { + self.sequenced.insert(id, sn, event_digest) + } + + fn insert(&self, event: &SignedEventMessage) -> Result<(), Self::Error> { + let digest = event.event_message.digest()?; + let sn = event.event_message.data.get_sn(); + let id = event.event_message.data.get_prefix(); + self.sequenced.insert(&id, sn, &digest)?; + self.log.log_event_internal(event); + Ok(()) + } + + fn insert_key_value( + &self, + id: &IdentifierPrefix, + sn: u64, + event: &SignedEventMessage, + ) -> Result<(), Self::Error> { + let digest = event.event_message.digest()?; + self.sequenced.insert(id, sn, &digest)?; + self.log.log_event_internal(event); + Ok(()) + } + + fn get( + &self, + identifier: &IdentifierPrefix, + sn: u64, + ) -> Result { + let digests = self.sequenced.get(identifier, sn)?; + let events: Vec<_> = digests + .filter_map(|d| { + self.log + .get_signed_event(&d) + .ok() + .flatten() + .map(|t| t.signed_event_message) + }) + .collect(); + Ok(events.into_iter()) + } + + fn get_from_sn( + &self, + identifier: &IdentifierPrefix, + sn: u64, + ) -> Result { + let digests = self.sequenced.get_greater_than(identifier, sn)?; + let events: Vec<_> = digests + .filter_map(|d| { + self.log + .get_signed_event(&d) + .ok() + .flatten() + .map(|t| t.signed_event_message) + }) + .collect(); + Ok(events.into_iter()) + } + + fn remove(&self, event: &KeriEvent) { + if let Ok(digest) = event.digest() { + let sn = event.data.get_sn(); + let id = event.data.get_prefix(); + let _ = self.sequenced.remove(&id, sn, &digest); + } + } + + fn contains( + &self, + id: &IdentifierPrefix, + sn: u64, + digest: &SelfAddressingIdentifier, + ) -> Result { + let digests = self.sequenced.get(id, sn)?; + Ok(digests.collect::>().contains(digest)) + } +} + +impl EscrowCreator for MemoryDatabase { + type EscrowDatabaseType = MemoryEscrowDb; + + fn create_escrow_db(&self, table_name: &'static str) -> Self::EscrowDatabaseType { + let seq = Arc::new(MemorySequencedEventDb::new()); + self.escrow_db + .write() + .unwrap() + .insert(table_name, seq.clone()); + MemoryEscrowDb { + sequenced: seq, + log: self.log_db.clone(), + } + } +} + +#[cfg(test)] +mod tests { + use std::{convert::TryFrom, sync::Arc}; + + use cesrox::parse; + + use super::MemoryDatabase; + use crate::{ + error::Error, + event_message::signed_event_message::{Message, Notice}, + processor::{ + basic_processor::BasicProcessor, event_storage::EventStorage, Processor, + }, + }; + + #[test] + fn test_memory_db_process_icp() -> Result<(), Error> { + let db = Arc::new(MemoryDatabase::new()); + let processor = BasicProcessor::new(db.clone(), None); + let storage = EventStorage::new(db.clone()); + + // Inception event from keripy test_multisig_digprefix + let icp_raw = br#"{"v":"KERI10JSON0001e7_","t":"icp","d":"EBfxc4RiVY6saIFmUfEtETs1FcqmktZW88UkbnOg0Qen","i":"EBfxc4RiVY6saIFmUfEtETs1FcqmktZW88UkbnOg0Qen","s":"0","kt":"2","k":["DErocgXD2RGSyvn3MObcx59jeOsEQhv2TqHirVkzrp0Q","DFXLiTjiRdSBPLL6hLa0rskIxk3dh4XwJLfctkJFLRSS","DE9YgIQVgpLwocTVrG8tidKScsQSMWwLWywNC48fhq4f"],"nt":"2","n":["EDJk5EEpC4-tQ7YDwBiKbpaZahh1QCyQOnZRF7p2i8k8","EAXfDjKvUFRj-IEB_o4y-Y_qeJAjYfZtOMD9e7vHNFss","EN8l6yJC2PxribTN0xfri6bLz34Qvj-x3cNwcV3DvT2m"],"bt":"0","b":[],"c":[],"a":[]}-AADAAD4SyJSYlsQG22MGXzRGz2PTMqpkgOyUfq7cS99sC2BCWwdVmEMKiTEeWe5kv-l_d9auxdadQuArLtAGEArW8wEABD0z_vQmFImZXfdR-0lclcpZFfkJJJNXDcUNrf7a-mGsxNLprJo-LROwDkH5m7tVrb-a1jcor2dHD9Jez-r4bQIACBFeU05ywfZycLdR0FxCvAR9BfV9im8tWe1DglezqJLf-vHRQSChY1KafbYNc96hYYpbuN90WzuCRMgV8KgRsEC"#; + let parsed = parse(icp_raw).unwrap().1; + let deserialized_icp = Message::try_from(parsed).unwrap(); + + let id = match &deserialized_icp { + Message::Notice(Notice::Event(e)) => e.event_message.data.get_prefix(), + _ => panic!("unexpected message type"), + }; + + // Process inception event + processor.process(&deserialized_icp)?; + + // Verify state was created + let state = storage.get_state(&id); + assert!(state.is_some()); + let state = state.unwrap(); + assert_eq!(state.sn, 0); + assert_eq!(state.current.public_keys.len(), 3); + + // Verify KEL has the event + let kel = storage.get_kel_messages(&id)?; + assert!(kel.is_some()); + assert_eq!(kel.unwrap().len(), 1); + + Ok(()) + } +} diff --git a/keriox_core/src/database/mod.rs b/keriox_core/src/database/mod.rs index b667d134..17c8f96a 100644 --- a/keriox_core/src/database/mod.rs +++ b/keriox_core/src/database/mod.rs @@ -19,7 +19,12 @@ use crate::{ #[cfg(feature = "mailbox")] pub mod mailbox; +pub mod memory; +#[cfg(feature = "storage-postgres")] +pub mod postgres; +#[cfg(feature = "storage-redb")] pub mod redb; +pub(crate) mod rkyv_adapter; pub mod timestamped; pub enum QueryParameters<'a> { diff --git a/keriox_core/src/database/postgres/error.rs b/keriox_core/src/database/postgres/error.rs new file mode 100644 index 00000000..90fb0ee7 --- /dev/null +++ b/keriox_core/src/database/postgres/error.rs @@ -0,0 +1,19 @@ +#[derive(Debug, thiserror::Error)] +pub enum PostgresError { + #[error("Database error: {0}")] + Sqlx(#[from] sqlx::Error), + #[error("Migration error: {0}")] + Migration(#[from] sqlx::migrate::MigrateError), + #[error("Rkyv serialization error: {0}")] + Rkyv(#[from] rkyv::rancor::Error), + #[error("CBOR error: {0}")] + Cbor(#[from] serde_cbor::Error), + #[error("JSON error: {0}")] + Json(#[from] serde_json::Error), + #[error("No event for digest {0} found")] + NotFound(said::SelfAddressingIdentifier), + #[error("No digest in provided event")] + MissingDigest, + #[error("Already saved: {0}")] + AlreadySaved(said::SelfAddressingIdentifier), +} diff --git a/keriox_core/src/database/postgres/escrow_database.rs b/keriox_core/src/database/postgres/escrow_database.rs new file mode 100644 index 00000000..bb6509ed --- /dev/null +++ b/keriox_core/src/database/postgres/escrow_database.rs @@ -0,0 +1,286 @@ +use std::sync::Arc; + +use said::SelfAddressingIdentifier; +use sqlx::{PgPool, Row}; + +use crate::{ + database::{ + postgres::{ + error::PostgresError, loging::PostgresWriteTxnMode, PostgresDatabase, + PostgresLogDatabase, + }, + rkyv_adapter::{self, serialize_said}, + EscrowCreator, EscrowDatabase, LogDatabase as _, SequencedEventDatabase, + }, + event::KeyEvent, + event_message::{msg::KeriEvent, signed_event_message::SignedEventMessage}, + prefix::IdentifierPrefix, +}; + +impl EscrowCreator for PostgresDatabase { + type EscrowDatabaseType = PostgresSnKeyEscrow; + + fn create_escrow_db(&self, table_name: &'static str) -> Self::EscrowDatabaseType { + PostgresSnKeyEscrow::new( + Arc::new(PostgresSnKeyDatabase::new(self.pool.clone(), table_name)), + self.log_db.clone(), + ) + } +} + +pub struct PostgresSnKeyEscrow { + escrow: Arc< + dyn SequencedEventDatabase< + DatabaseType = PgPool, + Error = PostgresError, + DigestIter = Box>, + >, + >, + log: Arc, +} + +impl EscrowDatabase for PostgresSnKeyEscrow { + type EscrowDatabaseType = PgPool; + + type LogDatabaseType = PostgresLogDatabase; + + type Error = PostgresError; + + type EventIter = Box + Send>; + + fn new( + escrow: Arc< + dyn SequencedEventDatabase< + DatabaseType = Self::EscrowDatabaseType, + Error = Self::Error, + DigestIter = Box>, + >, + >, + log: Arc, + ) -> Self + where + Self: Sized, + { + Self { escrow, log } + } + + fn save_digest( + &self, + id: &IdentifierPrefix, + sn: u64, + event_digest: &said::SelfAddressingIdentifier, + ) -> Result<(), Self::Error> { + self.escrow.insert(id, sn, event_digest) + } + + fn insert(&self, event: &SignedEventMessage) -> Result<(), Self::Error> { + self.log + .log_event(&PostgresWriteTxnMode::CreateNew, event)?; + let said = event.event_message.digest().unwrap(); + let id = event.event_message.data.get_prefix(); + let sn = event.event_message.data.sn; + self.escrow.insert(&id, sn, &said)?; + + Ok(()) + } + + fn insert_key_value( + &self, + id: &IdentifierPrefix, + sn: u64, + event: &SignedEventMessage, + ) -> Result<(), Self::Error> { + self.log + .log_event(&PostgresWriteTxnMode::CreateNew, event)?; + let said = event.event_message.digest().unwrap(); + + self.escrow.insert(id, sn, &said)?; + + Ok(()) + } + + fn get(&self, identifier: &IdentifierPrefix, sn: u64) -> Result { + let saids = self.escrow.get(identifier, sn)?; + let saids_vec: Vec<_> = saids.collect(); + + let log = Arc::clone(&self.log); + + let events = saids_vec.into_iter().filter_map(move |said| { + log.get_signed_event(&said) + .ok() + .flatten() + .map(|el| el.signed_event_message) + }); + + Ok(Box::new(events)) + } + + fn get_from_sn( + &self, + identifier: &IdentifierPrefix, + sn: u64, + ) -> Result { + let saids: Vec<_> = self.escrow.get_greater_than(identifier, sn)?.collect(); + let log = Arc::clone(&self.log); + + let events = saids.into_iter().filter_map(move |said| { + log.get_signed_event(&said) + .ok() + .flatten() + .map(|el| el.signed_event_message) + }); + + Ok(Box::new(events)) + } + + fn remove(&self, event: &KeriEvent) { + let said = event.digest().unwrap(); + let id = event.data.get_prefix(); + let sn = event.data.sn; + self.escrow.remove(&id, sn, &said).unwrap(); + } + + fn contains( + &self, + id: &IdentifierPrefix, + sn: u64, + digest: &said::SelfAddressingIdentifier, + ) -> Result { + Ok(self + .escrow + .get(id, sn)? + .find(|said| said == digest) + .is_some()) + } +} + +pub struct PostgresSnKeyDatabase { + pool: PgPool, + escrow_type: &'static str, +} + +impl PostgresSnKeyDatabase { + pub fn new(pool: PgPool, escrow_type: &'static str) -> Self { + Self { pool, escrow_type } + } +} + +impl SequencedEventDatabase for PostgresSnKeyDatabase { + type DatabaseType = PgPool; + + type Error = PostgresError; + + type DigestIter = Box>; + + fn new(db: Arc, table_name: &'static str) -> Result + where + Self: Sized, + { + Ok(Self { + pool: (*db).clone(), + escrow_type: table_name, + }) + } + + fn insert( + &self, + identifier: &IdentifierPrefix, + sn: u64, + digest: &said::SelfAddressingIdentifier, + ) -> Result<(), PostgresError> { + let id_str = identifier.to_string(); + let digest_bytes = rkyv_adapter::serialize_said(digest)?; + async_std::task::block_on( + sqlx::query( + "INSERT INTO escrow_events (escrow_type, identifier, sn, digest) \ + VALUES ($1, $2, $3, $4) \ + ON CONFLICT DO NOTHING", + ) + .bind(self.escrow_type) + .bind(&id_str) + .bind(sn as i64) + .bind(digest_bytes.as_ref()) + .execute(&self.pool), + )?; + Ok(()) + } + + fn get( + &self, + identifier: &IdentifierPrefix, + sn: u64, + ) -> Result { + let id_str = identifier.to_string(); + let rows = async_std::task::block_on( + sqlx::query( + "SELECT digest FROM escrow_events \ + WHERE escrow_type = $1 AND identifier = $2 AND sn = $3", + ) + .bind(self.escrow_type) + .bind(&id_str) + .bind(sn as i64) + .fetch_all(&self.pool), + )?; + + let saids: Vec = rows + .into_iter() + .filter_map(|row| { + let bytes: Vec = row.get("digest"); + rkyv_adapter::deserialize_said(&bytes).ok() + }) + .collect(); + + Ok(Box::new(saids.into_iter())) + } + + fn get_greater_than( + &self, + identifier: &IdentifierPrefix, + sn: u64, + ) -> Result { + let id_str = identifier.to_string(); + let rows = async_std::task::block_on( + sqlx::query( + "SELECT digest FROM escrow_events \ + WHERE escrow_type = $1 AND identifier = $2 AND sn >= $3 \ + ORDER BY sn ASC", + ) + .bind(self.escrow_type) + .bind(&id_str) + .bind(sn as i64) + .fetch_all(&self.pool), + )?; + + let saids: Vec = rows + .into_iter() + .filter_map(|row| { + let bytes: Vec = row.get("digest"); + rkyv_adapter::deserialize_said(&bytes).ok() + }) + .collect(); + + Ok(Box::new(saids.into_iter())) + } + + fn remove( + &self, + identifier: &IdentifierPrefix, + sn: u64, + said: &said::SelfAddressingIdentifier, + ) -> Result<(), PostgresError> { + let id_str = identifier.to_string(); + let digest_bytes = serialize_said(said)?; + async_std::task::block_on( + sqlx::query( + "DELETE FROM escrow_events \ + WHERE escrow_type = $1 AND identifier = $2 AND sn = $3 AND digest = $4", + ) + .bind(self.escrow_type) + .bind(&id_str) + .bind(sn as i64) + .bind(digest_bytes.as_ref()) + .execute(&self.pool), + )?; + Ok(()) + } +} diff --git a/keriox_core/src/database/postgres/ksn_log.rs b/keriox_core/src/database/postgres/ksn_log.rs new file mode 100644 index 00000000..5ae5d894 --- /dev/null +++ b/keriox_core/src/database/postgres/ksn_log.rs @@ -0,0 +1,144 @@ +use std::sync::Arc; + +use said::SelfAddressingIdentifier; +use sqlx::{PgPool, Row}; + +use crate::{ + database::{ + postgres::error::PostgresError, + rkyv_adapter, + }, + prefix::IdentifierPrefix, + query::reply_event::{ReplyRoute, SignedReply}, +}; + +pub struct KsnLogDatabase { + pool: PgPool, +} + +pub struct AcceptedKsn { + ksn_log: Arc, + pool: PgPool, +} + +impl AcceptedKsn { + pub fn new(pool: PgPool) -> Self { + let ksn_log = Arc::new(KsnLogDatabase::new(pool.clone())); + Self { ksn_log, pool } + } + + pub fn insert(&self, reply: SignedReply) -> Result<(), PostgresError> { + let (from_who, about_who) = match reply.reply.get_route() { + ReplyRoute::Ksn(id, ksn) => (id, ksn.state.prefix), + _ => panic!("Wrong event type"), + }; + + let digest = reply + .reply + .digest() + .map_err(|_| PostgresError::MissingDigest)?; + let serialized_digest = rkyv_adapter::serialize_said(&digest)?; + + async_std::task::block_on(async { + let mut tx = self.pool.begin().await?; + + // Store the KSN event itself + let value = serde_cbor::to_vec(&reply).unwrap(); + sqlx::query( + "INSERT INTO ksns (digest, ksn_data) VALUES ($1, $2) \ + ON CONFLICT (digest) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(value.as_slice()) + .execute(&mut *tx) + .await?; + + // Update the accepted index + sqlx::query( + "INSERT INTO accepted_ksns (about_who, from_who, digest) VALUES ($1, $2, $3) \ + ON CONFLICT (about_who, from_who) DO UPDATE SET digest = $3", + ) + .bind(about_who.to_string()) + .bind(from_who.to_string()) + .bind(serialized_digest.as_ref()) + .execute(&mut *tx) + .await?; + + tx.commit().await?; + Ok(()) + }) + } + + pub fn get_all(&self, id: &IdentifierPrefix) -> Result, PostgresError> { + async_std::task::block_on(async { + let rows = sqlx::query("SELECT digest FROM accepted_ksns WHERE about_who = $1") + .bind(id.to_string()) + .fetch_all(&self.pool) + .await?; + + let mut replies = Vec::new(); + for row in rows { + let digest_bytes: Vec = row.get("digest"); + let said = rkyv_adapter::deserialize_said(&digest_bytes)?; + if let Some(reply) = self.ksn_log.get_signed_reply(&said)? { + replies.push(reply); + } + } + Ok(replies) + }) + } + + pub fn get( + &self, + id: &IdentifierPrefix, + from_who: &IdentifierPrefix, + ) -> Result, PostgresError> { + async_std::task::block_on(async { + let row = sqlx::query( + "SELECT digest FROM accepted_ksns WHERE about_who = $1 AND from_who = $2", + ) + .bind(id.to_string()) + .bind(from_who.to_string()) + .fetch_optional(&self.pool) + .await?; + + match row { + Some(row) => { + let digest_bytes: Vec = row.get("digest"); + let said = rkyv_adapter::deserialize_said(&digest_bytes)?; + self.ksn_log.get_signed_reply(&said) + } + None => Ok(None), + } + }) + } +} + +impl KsnLogDatabase { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + pub fn get_signed_reply( + &self, + said: &SelfAddressingIdentifier, + ) -> Result, PostgresError> { + let key = rkyv_adapter::serialize_said(said)?; + + async_std::task::block_on(async { + let row = sqlx::query("SELECT ksn_data FROM ksns WHERE digest = $1") + .bind(key.as_ref()) + .fetch_optional(&self.pool) + .await?; + + match row { + Some(row) => { + let bytes: Vec = row.get("ksn_data"); + let reply: SignedReply = serde_cbor::from_slice(&bytes).unwrap(); + Ok(Some(reply)) + } + None => Ok(None), + } + }) + } +} diff --git a/keriox_core/src/database/postgres/loging.rs b/keriox_core/src/database/postgres/loging.rs new file mode 100644 index 00000000..bfab4617 --- /dev/null +++ b/keriox_core/src/database/postgres/loging.rs @@ -0,0 +1,508 @@ +use crate::{ + database::{ + postgres::error::PostgresError, rkyv_adapter, timestamped::TimestampedSignedEventMessage, + LogDatabase as LogDatabaseTrait, + }, + event::KeyEvent, + event_message::{ + msg::KeriEvent, + signature::{Nontransferable, Transferable}, + signed_event_message::SignedEventMessage, + }, + prefix::IndexedSignature, +}; + +use rkyv::{ + api::high::HighSerializer, rancor::Failure, ser::allocator::ArenaHandle, util::AlignedVec, +}; +use said::SelfAddressingIdentifier; +use sqlx::{PgPool, Row}; + +pub struct PostgresLogDatabase { + pool: PgPool, +} + +/// Transaction mode for PostgreSQL operations +pub enum PostgresWriteTxnMode { + /// Create a new transaction + CreateNew, + /// Operations are executed without explicit transaction management + /// (caller is responsible for transaction handling) + NoTransaction, +} + +impl PostgresLogDatabase { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + pub fn pool(&self) -> &PgPool { + &self.pool + } + + fn insert_with_digest_key< + V: for<'a> rkyv::Serialize, rkyv::rancor::Error>>, + >( + &self, + table: &str, + value_column: &str, + said: &SelfAddressingIdentifier, + values: &[V], + ) -> Result<(), PostgresError> { + let serialized_said = rkyv_adapter::serialize_said(said)?; + let query = format!( + "INSERT INTO {table} (digest, {value_column}) VALUES ($1, $2) \ + ON CONFLICT (digest, {value_column}) DO NOTHING" + ); + + async_std::task::block_on(async { + let mut tx = self.pool.begin().await?; + + for value in values { + let bytes = rkyv::to_bytes::(value)?; + sqlx::query(&query) + .bind(serialized_said.as_ref()) + .bind(bytes.as_ref()) + .execute(&mut *tx) + .await?; + } + + tx.commit().await?; + Ok(()) + }) + } + + pub(super) fn insert_nontrans_receipt( + &self, + said: &SelfAddressingIdentifier, + nontrans: &[Nontransferable], + ) -> Result<(), PostgresError> { + self.insert_with_digest_key("nontrans_receipts", "receipt_data", said, nontrans) + } + + pub(super) fn insert_trans_receipt( + &self, + said: &SelfAddressingIdentifier, + trans: &[Transferable], + ) -> Result<(), PostgresError> { + self.insert_with_digest_key("trans_receipts", "receipt_data", said, trans) + } + + /// Workaround: The `LogDatabase` trait takes `&Self::TransactionType` (immutable), + /// which prevents passing a `&mut sqlx::Transaction` through the trait's `log_event`. + /// This async method accepts an existing transaction directly so callers like + /// `add_kel_finalized_event` can log events within the same transaction. + pub async fn log_event_with_tx( + &self, + tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, + signed_event: &SignedEventMessage, + ) -> Result<(), PostgresError> { + let digest = signed_event + .event_message + .digest() + .map_err(|_| PostgresError::MissingDigest)?; + let serialized_digest = rkyv_adapter::serialize_said(&digest)?; + + // 1. Store the event (digest -> event_data) + let event_bytes = rkyv::to_bytes::(&signed_event.event_message)?; + sqlx::query( + "INSERT INTO events (digest, event_data) VALUES ($1, $2) \ + ON CONFLICT (digest) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(event_bytes.as_ref()) + .execute(&mut **tx) + .await?; + + // 2. Store signatures (digest -> signature_data) + for sig in &signed_event.signatures { + let sig_bytes = rkyv::to_bytes::(sig)?; + sqlx::query( + "INSERT INTO signatures (digest, signature_data) VALUES ($1, $2) \ + ON CONFLICT (digest, signature_data) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(sig_bytes.as_ref()) + .execute(&mut **tx) + .await?; + } + + // 3. Store witness receipts (nontransferable) + if let Some(receipts) = &signed_event.witness_receipts { + for receipt in receipts { + let receipt_bytes = rkyv::to_bytes::(receipt)?; + sqlx::query( + "INSERT INTO nontrans_receipts (digest, receipt_data) VALUES ($1, $2) \ + ON CONFLICT (digest, receipt_data) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(receipt_bytes.as_ref()) + .execute(&mut **tx) + .await?; + } + } + + // 4. Store delegator seal + if let Some(seal) = &signed_event.delegator_seal { + let seal_bytes = rkyv::to_bytes::(seal)?; + sqlx::query( + "INSERT INTO seals (digest, seal_data) VALUES ($1, $2) \ + ON CONFLICT (digest) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(seal_bytes.as_ref()) + .execute(&mut **tx) + .await?; + } + + Ok(()) + } + + pub(super) fn get_nontrans_couplets_by_key( + &self, + key: &[u8], + ) -> Result>, PostgresError> { + async_std::task::block_on(async { + let rows = sqlx::query("SELECT receipt_data FROM nontrans_receipts WHERE digest = $1") + .bind(key) + .fetch_all(&self.pool) + .await?; + + let nontrans = rows + .into_iter() + .map(|row| { + let bytes: Vec = row.get("receipt_data"); + rkyv_adapter::deserialize_nontransferable(&bytes).map_err(PostgresError::from) + }) + .collect::, _>>()?; + + Ok(if nontrans.is_empty() { + None + } else { + Some(nontrans.into_iter()) + }) + }) + } + + fn get_trans_receipts_by_serialized_key( + &self, + key: &[u8], + ) -> Result, PostgresError> { + async_std::task::block_on(async { + let rows = sqlx::query("SELECT receipt_data FROM trans_receipts WHERE digest = $1") + .bind(key) + .fetch_all(&self.pool) + .await?; + + let trans = rows + .into_iter() + .map(|row| { + let bytes: Vec = row.get("receipt_data"); + rkyv_adapter::deserialize_transferable(&bytes).map_err(PostgresError::from) + }) + .collect::, _>>()?; + + Ok(trans.into_iter()) + }) + } + + fn get_event_by_serialized_key( + &self, + as_slice: &[u8], + ) -> Result>, PostgresError> { + async_std::task::block_on(async { + let row = sqlx::query("SELECT event_data FROM events WHERE digest = $1") + .bind(as_slice) + .fetch_optional(&self.pool) + .await?; + + match row { + Some(row) => { + let bytes: Vec = row.get("event_data"); + let event = rkyv::from_bytes::<_, Failure>(&bytes).unwrap(); + Ok(Some(event)) + } + None => Ok(None), + } + }) + } + + fn get_signatures_by_serialized_key( + &self, + key: &[u8], + ) -> Result>, PostgresError> { + async_std::task::block_on(async { + let rows = sqlx::query("SELECT signature_data FROM signatures WHERE digest = $1") + .bind(key) + .fetch_all(&self.pool) + .await?; + + let sigs = rows + .into_iter() + .map(|row| { + let bytes: Vec = row.get("signature_data"); + rkyv_adapter::deserialize_indexed_signatures(&bytes) + .map_err(PostgresError::from) + }) + .collect::, _>>()?; + + Ok(if sigs.is_empty() { + None + } else { + Some(sigs.into_iter()) + }) + }) + } +} + +impl<'db> LogDatabaseTrait<'db> for PostgresLogDatabase { + type DatabaseType = PgPool; + type Error = PostgresError; + type TransactionType = PostgresWriteTxnMode; + + fn new(db: std::sync::Arc) -> Result + where + Self: Sized, + { + Ok(Self { + pool: (*db).clone(), + }) + } + + fn log_event( + &self, + _txn: &Self::TransactionType, + signed_event: &SignedEventMessage, + ) -> Result<(), Self::Error> { + let digest = signed_event + .event_message + .digest() + .map_err(|_| PostgresError::MissingDigest)?; + let serialized_digest = rkyv_adapter::serialize_said(&digest)?; + + // 1. Store the event (digest -> event_data) + let event_bytes = rkyv::to_bytes::(&signed_event.event_message)?; + async_std::task::block_on(async { + sqlx::query( + "INSERT INTO events (digest, event_data) VALUES ($1, $2) \ + ON CONFLICT (digest) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(event_bytes.as_ref()) + .execute(&self.pool) + .await?; + + // 2. Store signatures (digest -> signature_data) + for sig in &signed_event.signatures { + let sig_bytes = rkyv::to_bytes::(sig)?; + sqlx::query( + "INSERT INTO signatures (digest, signature_data) VALUES ($1, $2) \ + ON CONFLICT (digest, signature_data) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(sig_bytes.as_ref()) + .execute(&self.pool) + .await?; + } + + // 3. Store witness receipts (nontransferable) + if let Some(receipts) = &signed_event.witness_receipts { + for receipt in receipts { + let receipt_bytes = rkyv::to_bytes::(receipt)?; + sqlx::query( + "INSERT INTO nontrans_receipts (digest, receipt_data) VALUES ($1, $2) \ + ON CONFLICT (digest, receipt_data) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(receipt_bytes.as_ref()) + .execute(&self.pool) + .await?; + } + } + + // 4. Store delegator seal + if let Some(seal) = &signed_event.delegator_seal { + let seal_bytes = rkyv::to_bytes::(seal)?; + sqlx::query( + "INSERT INTO seals (digest, seal_data) VALUES ($1, $2) \ + ON CONFLICT (digest) DO NOTHING", + ) + .bind(serialized_digest.as_ref()) + .bind(seal_bytes.as_ref()) + .execute(&self.pool) + .await?; + } + Ok(()) + }) + } + + fn log_event_with_new_transaction( + &self, + signed_event: &SignedEventMessage, + ) -> Result<(), Self::Error> { + self.log_event(&PostgresWriteTxnMode::CreateNew, signed_event) + } + + fn log_receipt( + &self, + _txn: &Self::TransactionType, + signed_receipt: &crate::event_message::signed_event_message::SignedNontransferableReceipt, + ) -> Result<(), Self::Error> { + let digest = &signed_receipt.body.receipted_event_digest; + self.insert_nontrans_receipt(digest, &signed_receipt.signatures)?; + Ok(()) + } + + fn log_receipt_with_new_transaction( + &self, + signed_receipt: &crate::event_message::signed_event_message::SignedNontransferableReceipt, + ) -> Result<(), Self::Error> { + self.log_receipt(&PostgresWriteTxnMode::CreateNew, signed_receipt) + } + + fn get_signed_event( + &self, + said: &said::SelfAddressingIdentifier, + ) -> Result, Self::Error> { + let key = rkyv_adapter::serialize_said(said)?; + + async_std::task::block_on(async { + // 1. Fetch event + let event_row = sqlx::query("SELECT event_data FROM events WHERE digest = $1") + .bind(key.as_ref()) + .fetch_optional(&self.pool) + .await?; + + let event_row = match event_row { + Some(row) => row, + None => return Ok(None), + }; + + let event_bytes: Vec = event_row.get("event_data"); + let event: KeriEvent = rkyv::from_bytes::<_, Failure>(&event_bytes).unwrap(); + + // 2. Fetch signatures + let sig_rows = sqlx::query("SELECT signature_data FROM signatures WHERE digest = $1") + .bind(key.as_ref()) + .fetch_all(&self.pool) + .await?; + + let signatures: Vec = sig_rows + .iter() + .filter_map(|row| { + let bytes: Vec = row.get("signature_data"); + rkyv_adapter::deserialize_indexed_signatures(&bytes).ok() + }) + .collect(); + + // 3. Fetch nontransferable receipts + let receipt_rows = + sqlx::query("SELECT receipt_data FROM nontrans_receipts WHERE digest = $1") + .bind(key.as_ref()) + .fetch_all(&self.pool) + .await?; + + let receipts: Vec = receipt_rows + .iter() + .filter_map(|row| { + let bytes: Vec = row.get("receipt_data"); + rkyv_adapter::deserialize_nontransferable(&bytes).ok() + }) + .collect(); + + let witness_receipts = if receipts.is_empty() { + None + } else { + Some(receipts) + }; + + // 4. Fetch delegator seal + let seal_row = sqlx::query("SELECT seal_data FROM seals WHERE digest = $1") + .bind(key.as_ref()) + .fetch_optional(&self.pool) + .await?; + + let delegator_seal = seal_row.and_then(|row| { + let bytes: Vec = row.get("seal_data"); + rkyv_adapter::deserialize_source_seal(&bytes).ok() + }); + + Ok(Some(TimestampedSignedEventMessage::new( + SignedEventMessage::new(&event, signatures, witness_receipts, delegator_seal), + ))) + }) + } + + fn get_event( + &self, + said: &said::SelfAddressingIdentifier, + ) -> Result>, Self::Error> { + let key = rkyv_adapter::serialize_said(said)?; + self.get_event_by_serialized_key(key.as_slice()) + } + + fn get_signatures( + &self, + said: &said::SelfAddressingIdentifier, + ) -> Result>, PostgresError> { + let key = rkyv_adapter::serialize_said(said)?; + self.get_signatures_by_serialized_key(key.as_ref()) + } + + fn get_nontrans_couplets( + &self, + said: &said::SelfAddressingIdentifier, + ) -> Result>, PostgresError> { + let serialized_said = rkyv_adapter::serialize_said(said)?; + self.get_nontrans_couplets_by_key(serialized_said.as_ref()) + } + + fn get_trans_receipts( + &self, + said: &said::SelfAddressingIdentifier, + ) -> Result, PostgresError> { + let key = rkyv_adapter::serialize_said(said)?; + self.get_trans_receipts_by_serialized_key(key.as_slice()) + } + + fn remove_nontrans_receipt( + &self, + _txn_mode: &Self::TransactionType, + said: &said::SelfAddressingIdentifier, + nontrans: impl IntoIterator, + ) -> Result<(), Self::Error> { + async_std::task::block_on(async { + let serialized_said = rkyv_adapter::serialize_said(said)?; + + let receipt_bytes: Result>, _> = nontrans + .into_iter() + .map(|receipt| { + rkyv::to_bytes::(&receipt) + .map(|b| b.to_vec()) + .map_err(PostgresError::from) + }) + .collect(); + let receipt_bytes = receipt_bytes?; + + if !receipt_bytes.is_empty() { + let receipt_refs: Vec<&[u8]> = receipt_bytes.iter().map(Vec::as_slice).collect(); + sqlx::query( + "DELETE FROM nontrans_receipts WHERE digest = $1 AND receipt_data = ANY($2)", + ) + .bind(serialized_said.as_ref()) + .bind(receipt_refs.as_slice()) + .execute(&self.pool) + .await?; + } + Ok(()) + }) + } + + fn remove_nontrans_receipt_with_new_transaction( + &self, + said: &said::SelfAddressingIdentifier, + nontrans: impl IntoIterator, + ) -> Result<(), PostgresError> { + self.remove_nontrans_receipt(&PostgresWriteTxnMode::CreateNew, said, nontrans) + } +} diff --git a/keriox_core/src/database/postgres/migrations/001_initial_schema.sql b/keriox_core/src/database/postgres/migrations/001_initial_schema.sql new file mode 100644 index 00000000..94a147c1 --- /dev/null +++ b/keriox_core/src/database/postgres/migrations/001_initial_schema.sql @@ -0,0 +1,175 @@ +-- =========================================== +-- KEL Tables (EventDatabase) +-- =========================================== + +-- Maps (identifier, sn) -> event_digest +-- ReDB: KELS: TableDefinition<(&str, u64), &[u8]> +CREATE TABLE kels ( + identifier TEXT NOT NULL, + sn BIGINT NOT NULL, + digest BYTEA NOT NULL, + PRIMARY KEY (identifier, sn) -- UNIQUE constraint prevents duplicate sn +); +CREATE INDEX idx_kels_identifier ON kels(identifier); + +-- Maps identifier -> serialized IdentifierState +-- ReDB: KEY_STATES: TableDefinition<&str, &[u8]> +CREATE TABLE key_states ( + identifier TEXT PRIMARY KEY, + state_data BYTEA NOT NULL -- rkyv serialized IdentifierState +); + +-- Maps digest -> serialized event +-- ReDB: EVENTS: TableDefinition<&[u8], &[u8]> +CREATE TABLE events ( + digest BYTEA PRIMARY KEY, + event_data BYTEA NOT NULL -- rkyv serialized KeriEvent +); + +-- Multimap: digest -> multiple signatures +-- ReDB: SIGS: MultimapTableDefinition<&[u8], &[u8]> +CREATE TABLE signatures ( + digest BYTEA NOT NULL, + signature_data BYTEA NOT NULL, -- rkyv serialized IndexedSignature + PRIMARY KEY (digest, signature_data) -- Prevents duplicate signatures +); + +-- Multimap: digest -> multiple non-transferable receipts +-- ReDB: NONTRANS_RCTS: MultimapTableDefinition<&[u8], &[u8]> +CREATE TABLE nontrans_receipts ( + digest BYTEA NOT NULL, + receipt_data BYTEA NOT NULL, -- rkyv serialized Nontransferable + PRIMARY KEY (digest, receipt_data) +); + +-- Multimap: digest -> multiple transferable receipts +-- ReDB: TRANS_RCTS: MultimapTableDefinition<&[u8], &[u8]> +CREATE TABLE trans_receipts ( + digest BYTEA NOT NULL, + receipt_data BYTEA NOT NULL, -- rkyv serialized Transferable + PRIMARY KEY (digest, receipt_data) +); + +-- Maps digest -> seal data +-- ReDB: SEALS: TableDefinition<&[u8], &[u8]> +CREATE TABLE seals ( + digest BYTEA PRIMARY KEY, + seal_data BYTEA NOT NULL -- rkyv serialized SourceSeal +); + +-- =========================================== +-- TEL Tables (TelEventDatabase) +-- =========================================== + +-- TEL events storage: digest -> CBOR serialized VerifiableEvent +CREATE TABLE tel_events ( + digest TEXT PRIMARY KEY, + event_data BYTEA NOT NULL +); + +-- VC TEL index: (vc_identifier, sn) -> event_digest +CREATE TABLE vc_tels ( + identifier TEXT NOT NULL, + sn BIGINT NOT NULL, + digest TEXT NOT NULL, + PRIMARY KEY (identifier, sn) +); +CREATE INDEX idx_vc_tels_identifier ON vc_tels(identifier); + +-- Management TEL index: (registry_identifier, sn) -> event_digest +CREATE TABLE management_tels ( + identifier TEXT NOT NULL, + sn BIGINT NOT NULL, + digest TEXT NOT NULL, + PRIMARY KEY (identifier, sn) +); +CREATE INDEX idx_management_tels_identifier ON management_tels(identifier); + +-- =========================================== +-- TEL Escrow Tables (TelEscrowDatabase) +-- =========================================== + +-- Missing KEL issuer event escrow: kel_digest -> [tel_digest] +CREATE TABLE tel_missing_issuer_escrow ( + kel_digest TEXT NOT NULL, + tel_digest TEXT NOT NULL, + PRIMARY KEY (kel_digest, tel_digest) +); + +-- Out-of-order TEL events escrow: (identifier, sn) -> [tel_digest] +CREATE TABLE tel_out_of_order_escrow ( + identifier TEXT NOT NULL, + sn BIGINT NOT NULL, + tel_digest TEXT NOT NULL, + PRIMARY KEY (identifier, sn, tel_digest) +); + +-- Missing registry TEL events escrow: registry_id -> [tel_digest] +CREATE TABLE tel_missing_registry_escrow ( + registry_id TEXT NOT NULL, + tel_digest TEXT NOT NULL, + PRIMARY KEY (registry_id, tel_digest) +); + +-- =========================================== +-- Escrow Tables +-- =========================================== + +-- Unified escrow table (replaces dynamic MultimapTableDefinition per escrow type) +-- ReDB: sn_key_table: MultimapTableDefinition<(&str, u64), &[u8]> +CREATE TABLE escrow_events ( + escrow_type TEXT NOT NULL, -- 'partially_signed', 'out_of_order', 'partially_witnessed', etc. + identifier TEXT NOT NULL, + sn BIGINT NOT NULL, + digest BYTEA NOT NULL, + PRIMARY KEY (escrow_type, identifier, sn, digest) -- Allows multiple digests per (type, id, sn) +); +CREATE INDEX idx_escrow_lookup ON escrow_events(escrow_type, identifier, sn); + +-- Escrow timestamps +-- ReDB: dts_table: TableDefinition<&[u8], u64> +CREATE TABLE escrow_timestamps ( + digest BYTEA PRIMARY KEY, + timestamp_secs BIGINT NOT NULL -- seconds since UNIX_EPOCH +); + +-- =========================================== +-- OOBI Tables +-- =========================================== + +-- Location scheme OOBIs: (eid, scheme) -> OOBI data +CREATE TABLE location_oobis ( + eid TEXT NOT NULL, + scheme TEXT NOT NULL, + oobi_data BYTEA NOT NULL, + PRIMARY KEY (eid, scheme) +); + +-- End role OOBIs: (cid, role) -> multiple OOBIs +CREATE TABLE end_role_oobis ( + id SERIAL PRIMARY KEY, + cid TEXT NOT NULL, + role TEXT NOT NULL, + eid TEXT NOT NULL, + oobi_data BYTEA NOT NULL +); +CREATE INDEX idx_end_role_lookup ON end_role_oobis(cid, role); + + +-- =========================================== +-- KSN Tables +-- =========================================== + +-- Maps digest -> serialized SignedReply (KSN log) +CREATE TABLE ksns ( + digest BYTEA PRIMARY KEY, + ksn_data BYTEA NOT NULL -- CBOR serialized SignedReply +); + +-- Maps (about_who, from_who) -> digest (accepted KSN index) +CREATE TABLE accepted_ksns ( + about_who TEXT NOT NULL, + from_who TEXT NOT NULL, + digest BYTEA NOT NULL, + PRIMARY KEY (about_who, from_who) +); diff --git a/keriox_core/src/database/postgres/mod.rs b/keriox_core/src/database/postgres/mod.rs new file mode 100644 index 00000000..08f0a4ad --- /dev/null +++ b/keriox_core/src/database/postgres/mod.rs @@ -0,0 +1,541 @@ +use std::sync::Arc; + +use cesrox::primitives::CesrPrimitive; +use said::{sad::SerializationFormats, SelfAddressingIdentifier}; +use sqlx::{postgres::PgPoolOptions, PgPool, Row}; + +#[cfg(feature = "query")] +use crate::query::reply_event::SignedReply; +use crate::{ + database::{postgres::error::PostgresError, rkyv_adapter, EventDatabase, LogDatabase}, + event::{receipt::Receipt, KeyEvent}, + event_message::{ + msg::KeriEvent, + signature::{Nontransferable, Transferable}, + signed_event_message::{ + SignedEventMessage, SignedNontransferableReceipt, SignedTransferableReceipt, + }, + }, + prefix::IdentifierPrefix, + state::IdentifierState, +}; + +#[cfg(feature = "query")] +use ksn_log::AcceptedKsn; + +mod error; +mod escrow_database; +#[cfg(feature = "query")] +mod ksn_log; +mod loging; +#[cfg(feature = "oobi-manager")] +pub mod oobi_storage; + +pub use loging::PostgresLogDatabase; +#[cfg(feature = "oobi-manager")] +pub use oobi_storage::PostgresOobiStorage; + +use super::{timestamped::TimestampedSignedEventMessage, QueryParameters}; + +/// Configuration for the PostgreSQL connection pool. +pub struct PostgresConfig { + /// Maximum number of connections in the pool. + pub max_connections: u32, +} + +impl Default for PostgresConfig { + fn default() -> Self { + Self { + max_connections: 10, + } + } +} + +pub struct PostgresDatabase { + pub pool: PgPool, + pub(crate) log_db: Arc, + #[cfg(feature = "query")] + accepted_rpy: Arc, +} + +impl PostgresDatabase { + pub async fn new(database_url: &str) -> Result { + Self::new_with_config(database_url, PostgresConfig::default()).await + } + + pub async fn new_with_config( + database_url: &str, + config: PostgresConfig, + ) -> Result { + let pool = PgPoolOptions::new() + .max_connections(config.max_connections) + .connect(database_url) + .await?; + + let log_db = Arc::new(PostgresLogDatabase::new(pool.clone())); + + #[cfg(feature = "query")] + let accepted_rpy = Arc::new(AcceptedKsn::new(pool.clone())); + + Ok(Self { + pool, + log_db, + #[cfg(feature = "query")] + accepted_rpy, + }) + } + + pub async fn run_migrations(&self) -> Result<(), PostgresError> { + sqlx::migrate!("src/database/postgres/migrations") + .run(&self.pool) + .await?; + Ok(()) + } + + async fn update_key_state( + &self, + tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, + event: &KeriEvent, + ) -> Result<(), PostgresError> { + let prefix = event.data.prefix.to_str(); + + let row = sqlx::query("SELECT state_data FROM key_states WHERE identifier = $1") + .bind(&prefix) + .fetch_optional(&mut **tx) + .await?; + + let current_state = match row { + Some(row) => { + let bytes: Vec = row.get("state_data"); + rkyv_adapter::deserialize_identifier_state(&bytes)? + } + None => IdentifierState::default(), + }; + + let new_state = current_state + .apply(event) + .map_err(|_| PostgresError::AlreadySaved(event.digest().unwrap()))?; + let state_bytes = rkyv::to_bytes::(&new_state)?; + + sqlx::query( + "INSERT INTO key_states (identifier, state_data) VALUES ($1, $2) \ + ON CONFLICT (identifier) DO UPDATE SET state_data = $2", + ) + .bind(&prefix) + .bind(state_bytes.as_ref()) + .execute(&mut **tx) + .await?; + + Ok(()) + } + + fn get_event_digest( + &self, + identifier: &IdentifierPrefix, + sn: u64, + ) -> Result, PostgresError> { + async_std::task::block_on(async { + let row = sqlx::query("SELECT digest FROM kels WHERE identifier = $1 AND sn = $2") + .bind(identifier.to_str()) + .bind(sn as i64) + .fetch_optional(&self.pool) + .await?; + + if let Some(row) = row { + let digest_bytes: Vec = row.get("digest"); + let digest = rkyv_adapter::deserialize_said(&digest_bytes)?; + Ok(Some(digest)) + } else { + Ok(None) + } + }) + } + + fn get_nontrans_receipts_range( + &self, + id: &str, + start: u64, + limit: u64, + ) -> Result, PostgresError> { + async_std::task::block_on(async { + let rows = if limit == u64::MAX { + sqlx::query( + "SELECT k.sn, k.digest, nr.receipt_data \ + FROM kels k \ + LEFT JOIN nontrans_receipts nr ON k.digest = nr.digest \ + WHERE k.identifier = $1 AND k.sn >= $2 \ + ORDER BY k.sn ASC", + ) + .bind(id) + .bind(start as i64) + .fetch_all(&self.pool) + .await? + } else { + let end_sn = start.saturating_add(limit) as i64; + sqlx::query( + "SELECT k.sn, k.digest, nr.receipt_data \ + FROM kels k \ + LEFT JOIN nontrans_receipts nr ON k.digest = nr.digest \ + WHERE k.identifier = $1 AND k.sn >= $2 AND k.sn < $3 \ + ORDER BY k.sn ASC", + ) + .bind(id) + .bind(start as i64) + .bind(end_sn) + .fetch_all(&self.pool) + .await? + }; + + let mut grouped: std::collections::BTreeMap, Vec)> = + std::collections::BTreeMap::new(); + + for row in rows { + let sn: i64 = row.get("sn"); + let digest_bytes: Vec = row.get("digest"); + let receipt_data: Option> = row.get("receipt_data"); + + let entry = grouped + .entry(sn as u64) + .or_insert_with(|| (digest_bytes, Vec::new())); + + if let Some(bytes) = receipt_data { + if let Ok(nt) = rkyv_adapter::deserialize_nontransferable(&bytes) { + entry.1.push(nt); + } + } + } + + let identifier: IdentifierPrefix = id.parse().unwrap(); + let receipts = grouped + .into_iter() + .map(|(sn, (digest_bytes, nontrans))| { + let said = rkyv_adapter::deserialize_said(&digest_bytes).unwrap(); + let rct = + Receipt::new(SerializationFormats::JSON, said, identifier.clone(), sn); + SignedNontransferableReceipt { + body: rct, + signatures: nontrans, + } + }) + .collect(); + + Ok(receipts) + }) + } + + async fn save_to_kel( + &self, + tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, + event: &KeriEvent, + ) -> Result<(), PostgresError> { + let prefix = event.data.prefix.to_str(); + let digest = event.digest().map_err(|_| PostgresError::MissingDigest)?; + let sn = event.data.sn as i64; + let serialized_digest = rkyv_adapter::serialize_said(&digest)?; + + sqlx::query( + "INSERT INTO kels (identifier, sn, digest) VALUES ($1, $2, $3) \ + ON CONFLICT (identifier, sn) DO NOTHING", + ) + .bind(&prefix) + .bind(sn) + .bind(serialized_digest.as_ref()) + .execute(&mut **tx) + .await?; + + Ok(()) + } + + fn get_kel( + &self, + id: &IdentifierPrefix, + from: u64, + limit: u64, + ) -> Result, PostgresError> { + let prefix = id.to_str(); + let from_sn = from as i64; + + async_std::task::block_on(async { + let rows = if limit == u64::MAX { + sqlx::query( + "SELECT digest FROM kels WHERE identifier = $1 AND sn >= $2 ORDER BY sn ASC", + ) + .bind(&prefix) + .bind(from_sn) + .fetch_all(&self.pool) + .await? + } else { + let end_sn = from.saturating_add(limit) as i64; + sqlx::query( + "SELECT digest FROM kels WHERE identifier = $1 AND sn >= $2 AND sn < $3 ORDER BY sn ASC", + ) + .bind(&prefix) + .bind(from_sn) + .bind(end_sn) + .fetch_all(&self.pool) + .await? + }; + + let mut events = Vec::new(); + for row in rows { + let digest_bytes: Vec = row.get("digest"); + let said = rkyv_adapter::deserialize_said(&digest_bytes)?; + if let Some(timestamped_event) = self.log_db.get_signed_event(&said)? { + events.push(timestamped_event); + } + } + Ok(events) + }) + } +} + +impl EventDatabase for PostgresDatabase { + type Error = PostgresError; + type LogDatabaseType = PostgresLogDatabase; + + fn get_log_db(&self) -> Arc { + self.log_db.clone() + } + + fn add_kel_finalized_event( + &self, + signed_event: SignedEventMessage, + _id: &IdentifierPrefix, + ) -> Result<(), Self::Error> { + async_std::task::block_on(async { + let mut tx = self.pool.begin().await?; + + self.update_key_state(&mut tx, &signed_event.event_message) + .await?; + self.save_to_kel(&mut tx, &signed_event.event_message) + .await?; + self.log_db + .log_event_with_tx(&mut tx, &signed_event) + .await?; + + tx.commit().await?; + Ok(()) + }) + } + + fn add_receipt_t( + &self, + receipt: SignedTransferableReceipt, + _id: &IdentifierPrefix, + ) -> Result<(), Self::Error> { + let digest = receipt.body.receipted_event_digest; + let transferable = Transferable::Seal(receipt.validator_seal, receipt.signatures); + self.log_db.insert_trans_receipt(&digest, &[transferable]) + } + + fn add_receipt_nt( + &self, + receipt: SignedNontransferableReceipt, + _id: &IdentifierPrefix, + ) -> Result<(), Self::Error> { + let receipted_event_digest = receipt.body.receipted_event_digest; + let receipts = receipt.signatures; + self.log_db + .insert_nontrans_receipt(&receipted_event_digest, &receipts) + } + + fn get_key_state(&self, id: &IdentifierPrefix) -> Option { + let key = id.to_str(); + let row = async_std::task::block_on( + sqlx::query("SELECT state_data FROM key_states WHERE identifier = $1") + .bind(&key) + .fetch_optional(&self.pool), + ) + .ok()??; + + let bytes: Vec = row.get("state_data"); + Some(rkyv_adapter::deserialize_identifier_state(&bytes).ok()?) + } + + fn get_kel_finalized_events( + &self, + params: QueryParameters, + ) -> Option> { + let result = match params { + QueryParameters::BySn { id, sn } => self.get_kel(&id, sn, 1), + QueryParameters::Range { id, start, limit } => self.get_kel(&id, start, limit), + QueryParameters::All { id } => self.get_kel(id, 0, u64::MAX), + }; + + match result { + Ok(kel) if kel.is_empty() => None, + Ok(kel) => Some(kel.into_iter()), + Err(_) => None::>, + } + } + + fn get_receipts_t( + &self, + params: QueryParameters, + ) -> Option> { + match params { + QueryParameters::BySn { id, sn } => { + if let Ok(Some(said)) = self.get_event_digest(&id, sn) { + let receipts = self.log_db.get_trans_receipts(&said).ok()?; + Some(receipts.collect::>().into_iter()) + } else { + None + } + } + QueryParameters::Range { .. } => todo!(), + QueryParameters::All { .. } => todo!(), + } + } + + fn get_receipts_nt( + &self, + params: QueryParameters, + ) -> Option> { + match params { + QueryParameters::BySn { id, sn } => self + .get_nontrans_receipts_range(&id.to_str(), sn, 1) + .ok() + .map(|e| e.into_iter()), + QueryParameters::Range { id, start, limit } => self + .get_nontrans_receipts_range(&id.to_str(), start, limit) + .ok() + .map(|e| e.into_iter()), + QueryParameters::All { id } => self + .get_nontrans_receipts_range(&id.to_str(), 0, u64::MAX) + .ok() + .map(|e| e.into_iter()), + } + } + + fn accept_to_kel(&self, event: &KeriEvent) -> Result<(), Self::Error> { + async_std::task::block_on(async { + let mut tx = self.pool.begin().await?; + + self.update_key_state(&mut tx, event).await?; + self.save_to_kel(&mut tx, event).await?; + + tx.commit().await?; + Ok(()) + }) + } + + #[cfg(feature = "query")] + fn save_reply(&self, reply: SignedReply) -> Result<(), Self::Error> { + self.accepted_rpy.insert(reply) + } + + #[cfg(feature = "query")] + fn get_reply(&self, id: &IdentifierPrefix, from_who: &IdentifierPrefix) -> Option { + self.accepted_rpy.get(id, from_who).unwrap() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + actor::event_generator, + database::QueryParameters, + event_message::{ + cesr_adapter::{parse_event_type, EventType}, + EventTypeTag, + }, + prefix::{BasicPrefix, IndexedSignature, SelfSigningPrefix}, + signer::{CryptoBox, KeyManager}, + }; + + fn get_database_url() -> String { + std::env::var("DATABASE_URL") + .unwrap_or_else(|_| "postgres://postgres:postgres@localhost:5432/keri_test".to_string()) + } + + async fn setup_db() -> Arc { + let db = Arc::new( + PostgresDatabase::new(&get_database_url()) + .await + .expect("Failed to connect to database"), + ); + db.run_migrations().await.expect("Failed to run migrations"); + db + } + + fn make_signed_icp() -> ( + crate::prefix::IdentifierPrefix, + crate::event_message::signed_event_message::SignedEventMessage, + ) { + let key_manager = CryptoBox::new().unwrap(); + let pk = BasicPrefix::Ed25519(key_manager.public_key()); + let npk = BasicPrefix::Ed25519(key_manager.next_public_key()); + + let icp_str = event_generator::incept(vec![pk], vec![npk], vec![], 0, None).unwrap(); + let sig = SelfSigningPrefix::Ed25519Sha512(key_manager.sign(icp_str.as_bytes()).unwrap()); + let ke = match parse_event_type(icp_str.as_bytes()).unwrap() { + EventType::KeyEvent(ke) => ke, + _ => panic!("Expected key event"), + }; + let signed = ke.sign(vec![IndexedSignature::new_both_same(sig, 0)], None, None); + let prefix = signed.event_message.data.get_prefix(); + (prefix, signed) + } + + #[async_std::test] + #[ignore] + async fn test_postgres_migrations() { + setup_db().await; + println!("Migrations completed successfully!"); + } + + #[async_std::test] + #[ignore] + async fn test_postgres_incept() { + let db = setup_db().await; + let (prefix, signed_event) = make_signed_icp(); + + db.add_kel_finalized_event(signed_event, &prefix) + .expect("Failed to store inception event"); + + let state = db + .get_key_state(&prefix) + .expect("State should exist after inception"); + assert_eq!(state.sn, 0); + } + + #[async_std::test] + #[ignore] + async fn test_postgres_get_kel() { + let db = setup_db().await; + let (prefix, signed_event) = make_signed_icp(); + + db.add_kel_finalized_event(signed_event, &prefix) + .expect("Failed to store inception event"); + + let kel = db.get_kel(&prefix, 0, 1).expect("get_kel failed"); + assert_eq!(kel.len(), 1); + assert_eq!( + kel[0].signed_event_message.event_message.event_type, + EventTypeTag::Icp + ); + + let full: Vec<_> = db + .get_kel_finalized_events(QueryParameters::All { id: &prefix }) + .expect("Full KEL should exist") + .collect(); + assert_eq!(full.len(), 1); + + let by_sn: Vec<_> = db + .get_kel_finalized_events(QueryParameters::BySn { + id: prefix.clone(), + sn: 0, + }) + .expect("BySn should return event") + .collect(); + assert_eq!(by_sn.len(), 1); + + assert!(db + .get_kel_finalized_events(QueryParameters::BySn { + id: prefix.clone(), + sn: 99 + }) + .is_none()); + } +} diff --git a/keriox_core/src/database/postgres/oobi_storage.rs b/keriox_core/src/database/postgres/oobi_storage.rs new file mode 100644 index 00000000..65997806 --- /dev/null +++ b/keriox_core/src/database/postgres/oobi_storage.rs @@ -0,0 +1,127 @@ +use super::error::PostgresError; +use crate::oobi::{Role, Scheme}; +use crate::oobi_manager::storage::OobiStorageBackend; +use crate::prefix::IdentifierPrefix; +use crate::query::reply_event::{ReplyRoute, SignedReply}; +use sqlx::PgPool; + +pub struct PostgresOobiStorage { + pool: PgPool, +} + +impl PostgresOobiStorage { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +impl OobiStorageBackend for PostgresOobiStorage { + type Error = PostgresError; + + fn get_oobis_for_eid(&self, id: &IdentifierPrefix) -> Result, Self::Error> { + async_std::task::block_on(async { + let rows: Vec<(Vec,)> = + sqlx::query_as(r#"SELECT oobi_data FROM location_oobis WHERE eid = $1"#) + .bind(id.to_string()) + .fetch_all(&self.pool) + .await?; + + rows.into_iter() + .map(|(oobi_data,)| serde_cbor::from_slice(&oobi_data).map_err(Into::into)) + .collect::, Self::Error>>() + }) + } + + fn get_last_loc_scheme( + &self, + eid: &IdentifierPrefix, + scheme: &Scheme, + ) -> Result, Self::Error> { + async_std::task::block_on(async { + let row: Option<(Vec,)> = sqlx::query_as( + r#"SELECT oobi_data FROM location_oobis WHERE eid = $1 AND scheme = $2"#, + ) + .bind(eid.to_string()) + .bind(serde_json::to_string(scheme)?) + .fetch_optional(&self.pool) + .await?; + + row.map(|(oobi_data,)| serde_cbor::from_slice(&oobi_data).map_err(Into::into)) + .transpose() + }) + } + + fn get_end_role( + &self, + cid: &IdentifierPrefix, + role: Role, + ) -> Result>, Self::Error> { + async_std::task::block_on(async { + let rows: Vec<(Vec,)> = sqlx::query_as( + r#"SELECT oobi_data FROM end_role_oobis WHERE cid = $1 AND role = $2"#, + ) + .bind(cid.to_string()) + .bind(serde_json::to_string(&role)?) + .fetch_all(&self.pool) + .await?; + + if rows.is_empty() { + return Ok(None); + } + + let replies = rows + .into_iter() + .map(|(oobi_data,)| serde_cbor::from_slice(&oobi_data).map_err(Into::into)) + .collect::, Self::Error>>()?; + Ok(Some(replies)) + }) + } + + fn save_oobi(&self, signed_reply: &SignedReply) -> Result<(), Self::Error> { + async_std::task::block_on(async { + match signed_reply.reply.get_route() { + ReplyRoute::LocScheme(loc_scheme) => { + sqlx::query( + r#"INSERT INTO location_oobis (eid, scheme, oobi_data) + VALUES ($1, $2, $3) + ON CONFLICT (eid, scheme) DO UPDATE SET oobi_data = $3"#, + ) + .bind(loc_scheme.get_eid().to_string()) + .bind(serde_json::to_string(&loc_scheme.scheme)?) + .bind(serde_cbor::to_vec(signed_reply)?) + .execute(&self.pool) + .await?; + } + ReplyRoute::EndRoleAdd(end_role) => { + sqlx::query( + r#"INSERT INTO end_role_oobis (cid, role, eid, oobi_data) + VALUES ($1, $2, $3, $4)"#, + ) + .bind(end_role.cid.to_string()) + .bind(serde_json::to_string(&end_role.role)?) + .bind(end_role.eid.to_string()) + .bind(serde_cbor::to_vec(signed_reply)?) + .execute(&self.pool) + .await?; + } + ReplyRoute::EndRoleCut(end_role) => { + // TODO: EndRoleCut should DELETE the role from storage. + // Currently inserts the Cut event, matching redb behaviour, + // pending a proper removal implementation. + sqlx::query( + r#"INSERT INTO end_role_oobis (cid, role, eid, oobi_data) + VALUES ($1, $2, $3, $4)"#, + ) + .bind(end_role.cid.to_string()) + .bind(serde_json::to_string(&end_role.role)?) + .bind(end_role.eid.to_string()) + .bind(serde_cbor::to_vec(signed_reply)?) + .execute(&self.pool) + .await?; + } + ReplyRoute::Ksn(_, _) => todo!(), + } + Ok(()) + }) + } +} diff --git a/keriox_core/src/database/redb/mod.rs b/keriox_core/src/database/redb/mod.rs index cbebe810..366e1945 100644 --- a/keriox_core/src/database/redb/mod.rs +++ b/keriox_core/src/database/redb/mod.rs @@ -2,7 +2,7 @@ pub mod escrow_database; #[cfg(feature = "query")] pub(crate) mod ksn_log; pub mod loging; -pub(crate) mod rkyv_adapter; +pub(crate) use super::rkyv_adapter; /// Kel storage. (identifier, sn) -> event digest /// The `KELS` table links an identifier and sequence number to the digest of an event, @@ -108,6 +108,10 @@ impl RedbDatabase { accepted_rpy: Arc::new(AcceptedKsn::new(db.clone())?), }) } + + pub fn raw_db(&self) -> Arc { + self.db.clone() + } } impl EventDatabase for RedbDatabase { diff --git a/keriox_core/src/database/rkyv_adapter/mod.rs b/keriox_core/src/database/rkyv_adapter/mod.rs new file mode 100644 index 00000000..e721e9d4 --- /dev/null +++ b/keriox_core/src/database/rkyv_adapter/mod.rs @@ -0,0 +1,58 @@ +use rkyv::{util::AlignedVec, with::With}; +use said::SelfAddressingIdentifier; +use said_wrapper::{ArchivedSAIDef, SAIDef}; + +use crate::{ + event::sections::seal::{ArchivedSourceSeal, SourceSeal}, + event_message::signature::{ + ArchivedNontransferable, ArchivedTransferable, Nontransferable, Transferable, + }, + prefix::{attached_signature::ArchivedIndexedSignature, IndexedSignature}, + state::IdentifierState, +}; + +pub(crate) mod said_wrapper; +pub(crate) mod serialization_info_wrapper; + +pub fn serialize_said(said: &SelfAddressingIdentifier) -> Result { + Ok(rkyv::to_bytes( + With::::cast(said), + )?) +} + +pub fn deserialize_said(bytes: &[u8]) -> Result { + let archived: &ArchivedSAIDef = rkyv::access(&bytes)?; + let deserialized: SelfAddressingIdentifier = + rkyv::deserialize(With::::cast(archived))?; + Ok(deserialized) +} + +pub fn deserialize_nontransferable(bytes: &[u8]) -> Result { + let archived = rkyv::access::(&bytes).unwrap(); + rkyv::deserialize::(archived) +} + +pub fn deserialize_transferable(bytes: &[u8]) -> Result { + let archived = rkyv::access::(&bytes).unwrap(); + rkyv::deserialize::(archived) +} + +pub fn deserialize_indexed_signatures( + bytes: &[u8], +) -> Result { + let archived = rkyv::access::(&bytes).unwrap(); + rkyv::deserialize::(archived) +} + +pub fn deserialize_source_seal(bytes: &[u8]) -> Result { + let archived = rkyv::access::(&bytes).unwrap(); + rkyv::deserialize::(archived) +} + +pub fn deserialize_identifier_state(bytes: &[u8]) -> Result { + let mut aligned_bytes = + AlignedVec::<{ std::mem::align_of::() }>::with_capacity(bytes.len()); + aligned_bytes.extend_from_slice(bytes); + + rkyv::from_bytes::(&aligned_bytes) +} diff --git a/keriox_core/src/database/rkyv_adapter/said_wrapper.rs b/keriox_core/src/database/rkyv_adapter/said_wrapper.rs new file mode 100644 index 00000000..268add1b --- /dev/null +++ b/keriox_core/src/database/rkyv_adapter/said_wrapper.rs @@ -0,0 +1,135 @@ +use said::{ + derivation::{HashFunction, HashFunctionCode}, + SelfAddressingIdentifier, +}; + +use rkyv::{Archive, Deserialize, Serialize}; + +#[derive( + Debug, Clone, Default, Eq, Hash, Archive, rkyv::Serialize, rkyv::Deserialize, PartialEq, +)] +#[rkyv(derive(Debug))] +pub struct SaidValue { + #[rkyv(with = SAIDef)] + pub said: SelfAddressingIdentifier, +} + +impl From for SaidValue { + fn from(value: SelfAddressingIdentifier) -> Self { + Self { said: value } + } +} + +impl From for SelfAddressingIdentifier { + fn from(value: SaidValue) -> Self { + value.said + } +} + +impl serde::Serialize for SaidValue { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.said.serialize(serializer) + } +} + +impl<'de> serde::Deserialize<'de> for SaidValue { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + SelfAddressingIdentifier::deserialize(deserializer).map(|said| SaidValue { said }) + } +} + +#[derive(Archive, Serialize, Deserialize)] +#[rkyv(remote = SelfAddressingIdentifier)] +#[rkyv(derive(Debug))] +pub(crate) struct SAIDef { + #[rkyv(with = HashFunctionDef)] + pub derivation: HashFunction, + pub digest: Vec, +} + +impl From for SelfAddressingIdentifier { + fn from(value: SAIDef) -> Self { + Self::new(value.derivation, value.digest) + } +} + +#[derive(Archive, Serialize, Deserialize, PartialEq)] +#[rkyv(remote = HashFunction)] +#[rkyv(derive(Debug))] +struct HashFunctionDef { + #[rkyv(getter = HashFunctionDef::get_code, with = HashFunctionCodeDef)] + pub f: HashFunctionCode, +} + +impl HashFunctionDef { + fn get_code(foo: &HashFunction) -> HashFunctionCode { + foo.into() + } +} + +impl From for HashFunction { + fn from(value: HashFunctionDef) -> Self { + value.f.into() + } +} + +#[derive(Archive, Serialize, Deserialize, PartialEq)] +#[rkyv(remote = HashFunctionCode)] +#[rkyv(compare(PartialEq), derive(Debug))] +enum HashFunctionCodeDef { + Blake3_256, + Blake2B256(Vec), + Blake2S256(Vec), + SHA3_256, + SHA2_256, + Blake3_512, + SHA3_512, + Blake2B512, + SHA2_512, +} + +impl From for HashFunctionCode { + fn from(value: HashFunctionCodeDef) -> Self { + match value { + HashFunctionCodeDef::Blake3_256 => HashFunctionCode::Blake3_256, + HashFunctionCodeDef::Blake2B256(vec) => HashFunctionCode::Blake2B256(vec), + HashFunctionCodeDef::Blake2S256(vec) => HashFunctionCode::Blake2S256(vec), + HashFunctionCodeDef::SHA3_256 => HashFunctionCode::SHA3_256, + HashFunctionCodeDef::SHA2_256 => HashFunctionCode::SHA2_256, + HashFunctionCodeDef::Blake3_512 => HashFunctionCode::Blake3_512, + HashFunctionCodeDef::SHA3_512 => HashFunctionCode::SHA3_512, + HashFunctionCodeDef::Blake2B512 => HashFunctionCode::Blake2B512, + HashFunctionCodeDef::SHA2_512 => HashFunctionCode::SHA2_512, + } + } +} + +#[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] +struct OptionalSaid { + value: SaidValue, +} + +#[test] +fn test_rkyv_said_serialization() -> Result<(), rkyv::rancor::Failure> { + use rkyv::with::With; + let value: SelfAddressingIdentifier = "EJe_sKQb1otKrz6COIL8VFvBv3DEFvtKaVFGn1vm0IlL" + .parse() + .unwrap(); + + let bytes = rkyv::to_bytes(With::::cast(&value))?; + dbg!(&bytes); + let archived: &ArchivedSAIDef = rkyv::access(&bytes)?; + + let deserialized: SelfAddressingIdentifier = + rkyv::deserialize(With::::cast(archived))?; + + assert_eq!(value, deserialized); + + Ok(()) +} diff --git a/keriox_core/src/database/rkyv_adapter/serialization_info_wrapper.rs b/keriox_core/src/database/rkyv_adapter/serialization_info_wrapper.rs new file mode 100644 index 00000000..333c3138 --- /dev/null +++ b/keriox_core/src/database/rkyv_adapter/serialization_info_wrapper.rs @@ -0,0 +1,58 @@ +use said::{sad::SerializationFormats, version::SerializationInfo}; + +#[derive( + Debug, + Clone, + serde::Serialize, + serde::Deserialize, + Default, + rkyv::Archive, + rkyv::Serialize, + rkyv::Deserialize, + PartialEq, +)] +pub(crate) struct SerializationInfoValue { + #[rkyv(with = SerializationInfoDef)] + info: SerializationInfo, +} + +#[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, PartialEq)] +#[rkyv(remote = SerializationInfo)] +pub(crate) struct SerializationInfoDef { + pub protocol_code: String, + pub major_version: u8, + pub minor_version: u8, + pub size: usize, + #[rkyv(with = SerializationFormatsDef)] + pub kind: SerializationFormats, +} + +impl From for SerializationInfo { + fn from(value: SerializationInfoDef) -> Self { + SerializationInfo { + protocol_code: value.protocol_code, + major_version: value.major_version, + minor_version: value.minor_version, + size: value.size, + kind: value.kind, + } + } +} + +#[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, PartialEq)] +#[rkyv(remote = SerializationFormats)] +pub enum SerializationFormatsDef { + JSON, + MGPK, + CBOR, +} + +impl From for SerializationFormats { + fn from(value: SerializationFormatsDef) -> Self { + match value { + SerializationFormatsDef::JSON => Self::JSON, + SerializationFormatsDef::MGPK => Self::MGPK, + SerializationFormatsDef::CBOR => Self::CBOR, + } + } +} diff --git a/keriox_core/src/error/mod.rs b/keriox_core/src/error/mod.rs index f44594c9..ae054998 100644 --- a/keriox_core/src/error/mod.rs +++ b/keriox_core/src/error/mod.rs @@ -2,10 +2,11 @@ use said::version::error::Error as VersionError; use serde::{Deserialize, Serialize}; use thiserror::Error; +#[cfg(feature = "storage-redb")] +use crate::database::redb::RedbError; use crate::{ - database::redb::RedbError, event::sections::key_config::SignatureError, - event_message::cesr_adapter::ParseError, prefix::IdentifierPrefix, - processor::validator::VerificationError, + event::sections::key_config::SignatureError, event_message::cesr_adapter::ParseError, + prefix::IdentifierPrefix, processor::validator::VerificationError, }; pub mod serializer_error; @@ -78,6 +79,9 @@ pub enum Error { #[error("mutex is poisoned")] MutexPoisoned, + #[error("RwLock poisoned")] + RwLockingError, + #[error("Incorrect event digest")] IncorrectDigest, @@ -128,12 +132,20 @@ impl From for Error { } } +#[cfg(feature = "storage-redb")] impl From for Error { fn from(_: RedbError) -> Self { Error::DbError } } +#[cfg(feature = "oobi")] +impl From for Error { + fn from(e: crate::oobi::error::OobiError) -> Self { + Error::SemanticError(e.to_string()) + } +} + impl From for Error { fn from(_: crate::keys::KeysError) -> Self { Error::SigningError diff --git a/keriox_core/src/event/event_data/interaction.rs b/keriox_core/src/event/event_data/interaction.rs index 9d6169dd..761c2a10 100644 --- a/keriox_core/src/event/event_data/interaction.rs +++ b/keriox_core/src/event/event_data/interaction.rs @@ -1,5 +1,5 @@ use super::super::sections::seal::*; -use crate::database::redb::rkyv_adapter::said_wrapper::SaidValue; +use crate::database::rkyv_adapter::said_wrapper::SaidValue; use crate::error::Error; use crate::state::{EventSemantics, IdentifierState}; use said::SelfAddressingIdentifier; diff --git a/keriox_core/src/event/event_data/rotation.rs b/keriox_core/src/event/event_data/rotation.rs index e42fac61..d91e9218 100644 --- a/keriox_core/src/event/event_data/rotation.rs +++ b/keriox_core/src/event/event_data/rotation.rs @@ -1,6 +1,6 @@ use super::super::sections::{seal::*, KeyConfig, RotationWitnessConfig}; use crate::{ - database::redb::rkyv_adapter::said_wrapper::SaidValue, + database::rkyv_adapter::said_wrapper::SaidValue, error::Error, prefix::BasicPrefix, state::{EventSemantics, IdentifierState, LastEstablishmentData, WitnessConfig}, diff --git a/keriox_core/src/event/sections/key_config.rs b/keriox_core/src/event/sections/key_config.rs index 0a4d662f..9d63105a 100644 --- a/keriox_core/src/event/sections/key_config.rs +++ b/keriox_core/src/event/sections/key_config.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use super::threshold::SignatureThreshold; use crate::{ - database::redb::rkyv_adapter::said_wrapper::SaidValue, + database::rkyv_adapter::said_wrapper::SaidValue, prefix::{attached_signature::Index, BasicPrefix, IndexedSignature}, }; diff --git a/keriox_core/src/event/sections/seal.rs b/keriox_core/src/event/sections/seal.rs index 445c15b5..e918ce35 100644 --- a/keriox_core/src/event/sections/seal.rs +++ b/keriox_core/src/event/sections/seal.rs @@ -1,6 +1,6 @@ use std::fmt::{self, Display}; -use crate::{database::redb::rkyv_adapter::said_wrapper::SaidValue, prefix::IdentifierPrefix}; +use crate::{database::rkyv_adapter::said_wrapper::SaidValue, prefix::IdentifierPrefix}; use said::SelfAddressingIdentifier; use serde::{Deserialize, Serialize}; use serde_hex::{Compact, SerHex}; diff --git a/keriox_core/src/event_message/msg.rs b/keriox_core/src/event_message/msg.rs index a8eb4436..64198564 100644 --- a/keriox_core/src/event_message/msg.rs +++ b/keriox_core/src/event_message/msg.rs @@ -5,8 +5,8 @@ use said::{ use serde::{Deserialize, Serialize}; use super::{EventTypeTag, Typeable}; -use crate::database::redb::rkyv_adapter::said_wrapper::SaidValue; -use crate::database::redb::rkyv_adapter::serialization_info_wrapper::SerializationInfoDef; +use crate::database::rkyv_adapter::said_wrapper::SaidValue; +use crate::database::rkyv_adapter::serialization_info_wrapper::SerializationInfoDef; use crate::error::Error; pub type KeriEvent = TypedEvent; diff --git a/keriox_core/src/oobi_manager/mod.rs b/keriox_core/src/oobi_manager/mod.rs index fbbbcce9..7cc74820 100644 --- a/keriox_core/src/oobi_manager/mod.rs +++ b/keriox_core/src/oobi_manager/mod.rs @@ -1,42 +1,42 @@ -use std::{convert::TryFrom, sync::Arc}; -use crate::oobi::{Role, error::OobiError}; +use std::convert::TryFrom; use cesrox::parse_many; use crate::{ - database::redb::{RedbDatabase, RedbError}, error::Error, event_message::signed_event_message::{Message, Op}, + oobi::{error::OobiError, Role}, prefix::IdentifierPrefix, query::reply_event::{bada_logic, ReplyEvent, ReplyRoute, SignedReply}, }; pub mod storage; -use self::storage::OobiStorage; +pub use self::storage::OobiStorageBackend; +#[cfg(feature = "storage-redb")] +pub use self::storage::RedbOobiStorage; +#[cfg(feature = "storage-postgres")] +pub use crate::database::postgres::oobi_storage::PostgresOobiStorage; -pub struct OobiManager { - store: OobiStorage, +pub struct OobiManager { + store: S, } -impl OobiManager { - pub fn new(events_db: Arc) -> Self { - Self { - store: OobiStorage::new(events_db.db.clone()).unwrap(), - } - } +#[cfg(feature = "storage-redb")] +pub type RedbOobiManager = OobiManager; - pub fn new_from_db(db: Arc) -> Self { - Self { - store: OobiStorage::new(db.clone()).unwrap(), - } +#[cfg(feature = "storage-postgres")] +pub type PostgresOobiManager = + OobiManager; + +impl OobiManager { + pub fn with_storage(store: S) -> Self { + Self { store } } - /// Checks oobi signer and bada logic. Assumes signatures already - /// verified. + /// Checks oobi signer and bada logic. Assumes signatures already verified. pub fn check_oobi_reply(&self, rpy: &SignedReply) -> Result<(), OobiError> { match rpy.reply.get_route() { - // check if signature was made by oobi creator ReplyRoute::LocScheme(lc) => { if rpy.signature.get_signer().ok_or(Error::MissingSigner)? != lc.get_eid() { return Err(OobiError::SignerMismatch); @@ -45,7 +45,7 @@ impl OobiManager { if let Some(old_rpy) = self .store .get_last_loc_scheme(&lc.eid, &lc.scheme) - .map_err(|err| OobiError::Db(err.to_string()))? + .map_err(|e| OobiError::Db(e.to_string()))? { bada_logic(rpy, &old_rpy)?; }; @@ -58,7 +58,7 @@ impl OobiManager { if let Some(old_rpy) = self .store .get_end_role(&er.cid, er.role) - .map_err(|err| OobiError::Db(err.to_string()))? + .map_err(|e| OobiError::Db(e.to_string()))? .and_then(|rpys| rpys.last().cloned()) { bada_logic(rpy, &old_rpy)?; @@ -79,7 +79,9 @@ impl OobiManager { match msg { Message::Op(Op::Reply(oobi_rpy)) => { self.check_oobi_reply(&oobi_rpy)?; - self.store.save_oobi(&oobi_rpy)?; + self.store + .save_oobi(&oobi_rpy) + .map_err(|e| OobiError::Db(e.to_string()))?; Ok(()) } _ => Err(OobiError::InvalidMessageType), @@ -87,14 +89,18 @@ impl OobiManager { })?; Ok(()) } - pub fn save_oobi(&self, signed_oobi: &SignedReply) -> Result<(), RedbError> { - self.store.save_oobi(signed_oobi) + + pub fn save_oobi(&self, signed_oobi: &SignedReply) -> Result<(), OobiError> { + self.store + .save_oobi(signed_oobi) + .map_err(|e| OobiError::Db(e.to_string())) } - pub fn get_loc_scheme(&self, id: &IdentifierPrefix) -> Result, RedbError> { + pub fn get_loc_scheme(&self, id: &IdentifierPrefix) -> Result, OobiError> { Ok(self .store - .get_oobis_for_eid(id)? + .get_oobis_for_eid(id) + .map_err(|e| OobiError::Db(e.to_string()))? .into_iter() .map(|e| e.reply) .collect()) @@ -104,29 +110,50 @@ impl OobiManager { &self, id: &IdentifierPrefix, role: Role, - ) -> Result>, RedbError> { - self.store.get_end_role(id, role) + ) -> Result>, OobiError> { + self.store + .get_end_role(id, role) + .map_err(|e| OobiError::Db(e.to_string())) } /// Assumes that signatures were verified. pub fn process_oobi(&self, oobi_rpy: &SignedReply) -> Result<(), OobiError> { - println!("\nProcessing oobi"); self.check_oobi_reply(oobi_rpy)?; - println!("Checked\n"); - self.store.save_oobi(oobi_rpy)?; + self.store + .save_oobi(oobi_rpy) + .map_err(|e| OobiError::Db(e.to_string()))?; Ok(()) } } -impl From for OobiError { - fn from(err: RedbError) -> Self { - OobiError::Db(err.to_string()) +#[cfg(feature = "storage-redb")] +impl OobiManager { + /// Create a redb-backed OobiManager from a `RedbDatabase` wrapper. + pub fn new(events_db: std::sync::Arc) -> Result { + let store = self::storage::RedbOobiStorage::new(events_db.db.clone()) + .map_err(|e| OobiError::Db(e.to_string()))?; + Ok(Self { store }) + } + + /// Create a redb-backed OobiManager directly from a raw redb `Database`. + pub fn new_redb(db: std::sync::Arc) -> Result { + let store = self::storage::RedbOobiStorage::new(db).map_err(|e| OobiError::Db(e.to_string()))?; + Ok(Self { store }) + } +} + +#[cfg(feature = "storage-postgres")] +impl OobiManager { + /// Create a postgres-backed OobiManager from an existing `PgPool`. + pub fn new_postgres(pool: sqlx::PgPool) -> Self { + Self { + store: crate::database::postgres::oobi_storage::PostgresOobiStorage::new(pool), + } } } #[cfg(test)] mod tests { - use std::sync::Arc; use cesrox::parse_many; @@ -134,27 +161,28 @@ mod tests { use crate::{ oobi::error::OobiError, - oobi_manager::OobiManager, prefix::IdentifierPrefix, query::reply_event::ReplyRoute, + oobi_manager::{OobiManager, RedbOobiManager}, + prefix::IdentifierPrefix, + query::reply_event::ReplyRoute, }; - fn setup_oobi_manager() -> OobiManager { + fn setup_oobi_manager() -> RedbOobiManager { let tmp_path = NamedTempFile::new().unwrap(); let redb = Arc::new(redb::Database::create(tmp_path.path()).unwrap()); - - OobiManager::new_from_db(redb) + OobiManager::new_redb(redb).unwrap() } #[test] fn test_obi_save() -> Result<(), OobiError> { let oobi_manager = setup_oobi_manager(); - let body = r#"{"v":"KERI10JSON0000fa_","t":"rpy","d":"EJq4dQQdqg8aK7VyGnfSibxPyW8Zk2zO1qbVRD6flOvE","dt":"2022-02-28T17:23:20.336207+00:00","r":"/loc/scheme","a":{"eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","scheme":"http","url":"http://127.0.0.1:5643/"}}-VAi-CABBuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw0BAPJ5p_IpUFdmq8uupehsL8DzxWDeaU_SjeiwfmRZ6i9pqddraItmCOAysdXdTEQZ1hEM60iDEWvK16g68TrcAw{"v":"KERI10JSON0000f8_","t":"rpy","d":"ExSR01j5noF2LnGcGFUbLnq-U8JuYBr9WWEMt8d2fb1Y","dt":"2022-02-28T17:23:20.337272+00:00","r":"/loc/scheme","a":{"eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","scheme":"tcp","url":"tcp://127.0.0.1:5633/"}}-VAi-CABBuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw0BZtIhK6Nh6Zk1zPmkJYiFVz0RimQRiubshmSmqAzxzhT4KpGMAH7sbNlFP-0-lKjTawTReKv4L7N3TR7jxXaEBg"#; //{"v":"KERI10JSON000116_","t":"rpy","d":"EcZ1I4nKy6gIkWxjq1LmIivoPGv32lvlSuMVsWnOPwSc","dt":"2022-02-28T17:23:20.338355+00:00","r":"/end/role/add","a":{"cid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","role":"controller","eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw"}}-VAi-CABBuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw0B9ccIiMxdwurRjGvUUUdXsxhseo58onhE4bJddKuyPaSpBHXdRKKuiFE0SmLAogMQGJ0iN6f1V_2E_MVfMc3sAA"#; + let body = r#"{"v":"KERI10JSON0000fa_","t":"rpy","d":"EJq4dQQdqg8aK7VyGnfSibxPyW8Zk2zO1qbVRD6flOvE","dt":"2022-02-28T17:23:20.336207+00:00","r":"/loc/scheme","a":{"eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","scheme":"http","url":"http://127.0.0.1:5643/"}}-VAi-CABBuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw0BAPJ5p_IpUFdmq8uupehsL8DzxWDeaU_SjeiwfmRZ6i9pqddraItmCOAysdXdTEQZ1hEM60iDEWvK16g68TrcAw{"v":"KERI10JSON0000f8_","t":"rpy","d":"ExSR01j5noF2LnGcGFUbLnq-U8JuYBr9WWEMt8d2fb1Y","dt":"2022-02-28T17:23:20.337272+00:00","r":"/loc/scheme","a":{"eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","scheme":"tcp","url":"tcp://127.0.0.1:5633/"}}-VAi-CABBuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw0BZtIhK6Nh6Zk1zPmkJYiFVz0RimQRiubshmSmqAzxzhT4KpGMAH7sbNlFP-0-lKjTawTReKv4L7N3TR7jxXaEBg"#; let stream = parse_many(body.as_bytes()); assert_eq!(stream.unwrap().1.len(), 2); oobi_manager.parse_and_save(body)?; - let res = oobi_manager.store.get_oobis_for_eid( + let res = oobi_manager.get_loc_scheme( &"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw" .parse::() .unwrap(), @@ -162,12 +190,12 @@ mod tests { assert!(!res.is_empty()); assert_eq!( - res.iter().map(|oobi| oobi.reply.get_route()).collect::>(), - vec![ - ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","scheme":"http","url":"http://127.0.0.1:5643/"}"#).unwrap()), - ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","scheme":"tcp","url":"tcp://127.0.0.1:5633/"}"#).unwrap()), - ] - ); + res.iter().map(|oobi| oobi.get_route()).collect::>(), + vec![ + ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","scheme":"http","url":"http://127.0.0.1:5643/"}"#).unwrap()), + ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"BuyRFMideczFZoapylLIyCjSdhtqVb31wZkRKvPfNqkw","scheme":"tcp","url":"tcp://127.0.0.1:5633/"}"#).unwrap()), + ] + ); Ok(()) } @@ -176,57 +204,50 @@ mod tests { pub fn test_oobi_update() -> Result<(), OobiError> { let oobi_manager = setup_oobi_manager(); - let body = r#"{"v":"KERI10JSON0000fa_","t":"rpy","d":"Elxbk-5h8a2PhoserezofHRXEDgAEwhrW0wvhXqyupmY","dt":"2022-04-08T15:00:29.163849+00:00","r":"/loc/scheme","a":{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"http","url":"http://127.0.0.1:5644/"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BezpFQMVxodb7WMUBL4aLeQW1CUTUYbcFNPGohh02cKl7kSajyRZAentI-MkconvyI8-QfaO1in5mexYF-1ZPBg{"v":"KERI10JSON0000f8_","t":"rpy","d":"EfJP2Mkp_2UZJoWoNCWZHMgU7uWMIkzih19Nvit36Cho","dt":"2022-04-08T15:00:29.165103+00:00","r":"/loc/scheme","a":{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"tcp","url":"tcp://127.0.0.1:5634/"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BFcwrcL7Hc8HYLSPvzMGAAEn5QyY76QWY1l2RotQqsX01HgDh4UZYU5GpiVY2A-AbsRIsUpfIKnQi7r4dc0o0DA"#; //{"v":"KERI10JSON000116_","t":"rpy","d":"EXhq-JsyKmr7PJq7luQ0Psd1linhiL6yI4iiDStKPYSw","dt":"2022-04-08T15:00:29.166115+00:00","r":"/end/role/add","a":{"cid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","role":"controller","eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BJwAp49PBodHj42HlBoStigxsgGEWmdaMOyaY6_q1msdS5pi66SWFCNuLqPWX6p1YWXDmq97MgKZmTRJ3g7mPCg"#; - let body2 = r#"{"v":"KERI10JSON0000fa_","t":"rpy","d":"EhmRb98IbAp7xqttLe-knTcT0pg5xbkFdU-D8FMi2NTE","dt":"2022-04-08T15:02:55.382713+00:00","r":"/loc/scheme","a":{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"http","url":"http://127.0.0.1:5644/"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BQ2LHGCoTDzGTU4qnAKvnZocjUEwWfpILfngi5Ej3z_7SGJ5q4ciQSZ2uyBONGNqDeOsyrI4vV5LvrQUxg0vLCg{"v":"KERI10JSON0000f8_","t":"rpy","d":"EQqXdsemACUttgKUOiCYTs9JyXIjbio1itQdA2TeKF0I","dt":"2022-04-08T15:02:55.384117+00:00","r":"/loc/scheme","a":{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"tcp","url":"tcp://127.0.0.1:5634/"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BD1uIyxgm1MFqhkwlbwarxOdNghWIrs_ClHLrHVj-qpGpS2cM1T1Y8E3GUsfvpsvkHNWUFCBZmaQHoSI4WE2cAw"#; //{"v":"KERI10JSON000116_","t":"rpy","d":"E2P4sXDFiU5MnLCk7pMm7IHWOu9UNrqLqnKZJWjdcvuo","dt":"2022-04-08T15:02:55.385191+00:00","r":"/end/role/add","a":{"cid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","role":"controller","eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0B66IhoBb_nIQjY6wlNHwZHicm2Yf4Ioxbm5cnfSvPLQHFjhE7ROXTDlNfZIjyXMmmboHRtpLrCfHO5kz90PF6CA"#; + let body = r#"{"v":"KERI10JSON0000fa_","t":"rpy","d":"Elxbk-5h8a2PhoserezofHRXEDgAEwhrW0wvhXqyupmY","dt":"2022-04-08T15:00:29.163849+00:00","r":"/loc/scheme","a":{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"http","url":"http://127.0.0.1:5644/"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BezpFQMVxodb7WMUBL4aLeQW1CUTUYbcFNPGohh02cKl7kSajyRZAentI-MkconvyI8-QfaO1in5mexYF-1ZPBg{"v":"KERI10JSON0000f8_","t":"rpy","d":"EfJP2Mkp_2UZJoWoNCWZHMgU7uWMIkzih19Nvit36Cho","dt":"2022-04-08T15:00:29.165103+00:00","r":"/loc/scheme","a":{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"tcp","url":"tcp://127.0.0.1:5634/"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BFcwrcL7Hc8HYLSPvzMGAAEn5QyY76QWY1l2RotQqsX01HgDh4UZYU5GpiVY2A-AbsRIsUpfIKnQi7r4dc0o0DA"#; + let body2 = r#"{"v":"KERI10JSON0000fa_","t":"rpy","d":"EhmRb98IbAp7xqttLe-knTcT0pg5xbkFdU-D8FMi2NTE","dt":"2022-04-08T15:02:55.382713+00:00","r":"/loc/scheme","a":{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"http","url":"http://127.0.0.1:5644/"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BQ2LHGCoTDzGTU4qnAKvnZocjUEwWfpILfngi5Ej3z_7SGJ5q4ciQSZ2uyBONGNqDeOsyrI4vV5LvrQUxg0vLCg{"v":"KERI10JSON0000f8_","t":"rpy","d":"EQqXdsemACUttgKUOiCYTs9JyXIjbio1itQdA2TeKF0I","dt":"2022-04-08T15:02:55.384117+00:00","r":"/loc/scheme","a":{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"tcp","url":"tcp://127.0.0.1:5634/"}}-VAi-CABBgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c0BD1uIyxgm1MFqhkwlbwarxOdNghWIrs_ClHLrHVj-qpGpS2cM1T1Y8E3GUsfvpsvkHNWUFCBZmaQHoSI4WE2cAw"#; oobi_manager.parse_and_save(body)?; - let res = oobi_manager.store.get_oobis_for_eid( + let res = oobi_manager.get_loc_scheme( &"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c" .parse::() .unwrap(), )?; assert!(!res.is_empty()); - // Save timestamps of last accepted oobis. let timestamps = res - .clone() .iter() - .map(|reply| reply.reply.get_timestamp()) + .map(|reply| reply.get_timestamp()) .collect::>(); assert_eq!( - res.iter().map(|oobi| oobi.reply.get_route()).collect::>(), - vec![ - ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"http","url":"http://127.0.0.1:5644/"}"#).unwrap()), - ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"tcp","url":"tcp://127.0.0.1:5634/"}"#).unwrap()) - ] - ); - - // process the same oobis but with new timestamp + res.iter().map(|oobi| oobi.get_route()).collect::>(), + vec![ + ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"http","url":"http://127.0.0.1:5644/"}"#).unwrap()), + ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"tcp","url":"tcp://127.0.0.1:5634/"}"#).unwrap()) + ] + ); + oobi_manager.parse_and_save(body2)?; - let res = oobi_manager.store.get_oobis_for_eid( + let res = oobi_manager.get_loc_scheme( &"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c" .parse::() .unwrap(), )?; assert!(!res.is_empty()); - // Save timestamps of last accepted oobis. let timestamps2 = res - .clone() .iter() - .map(|reply| reply.reply.get_timestamp()) + .map(|reply| reply.get_timestamp()) .collect::>(); - // The same oobis should be in database. assert_eq!( - res.iter().map(|oobi| oobi.reply.get_route()).collect::>(), - vec![ - ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"http","url":"http://127.0.0.1:5644/"}"#).unwrap()), - ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"tcp","url":"tcp://127.0.0.1:5634/"}"#).unwrap()) - ] - ); - // But timestamps should be updated. + res.iter().map(|oobi| oobi.get_route()).collect::>(), + vec![ + ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"http","url":"http://127.0.0.1:5644/"}"#).unwrap()), + ReplyRoute::LocScheme(serde_json::from_str(r#"{"eid":"Bgoq68HCmYNUDgOz4Skvlu306o_NY-NrYuKAVhk3Zh9c","scheme":"tcp","url":"tcp://127.0.0.1:5634/"}"#).unwrap()) + ] + ); assert_ne!(timestamps, timestamps2); Ok(()) diff --git a/keriox_core/src/oobi_manager/storage.rs b/keriox_core/src/oobi_manager/storage.rs index 7cefa329..7ab3394c 100644 --- a/keriox_core/src/oobi_manager/storage.rs +++ b/keriox_core/src/oobi_manager/storage.rs @@ -1,150 +1,208 @@ -use std::sync::Arc; - -use redb::{MultimapTableDefinition, TableDefinition}; - -use super::Role; -use crate::oobi::Scheme; use crate::{ - database::redb::RedbError, prefix::IdentifierPrefix, - query::reply_event::{ReplyRoute, SignedReply}, + query::reply_event::SignedReply, }; -/// Location OOBIs store (eid, scheme) -> Signed oobi -const LOCATION: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("location"); +use super::Role; +use crate::oobi::Scheme; + +pub trait OobiStorageBackend: Send + Sync { + type Error: std::error::Error + Send + Sync + 'static; -/// End role OOBIs store (cid, role) -> Signed oobi -const END_ROLE: MultimapTableDefinition<(&[u8], &[u8]), &[u8]> = - MultimapTableDefinition::new("end_role"); + fn get_oobis_for_eid(&self, id: &IdentifierPrefix) -> Result, Self::Error>; + + fn get_last_loc_scheme( + &self, + eid: &IdentifierPrefix, + scheme: &Scheme, + ) -> Result, Self::Error>; + + fn get_end_role( + &self, + cid: &IdentifierPrefix, + role: Role, + ) -> Result>, Self::Error>; -pub struct OobiStorage { - db: Arc, + fn save_oobi(&self, signed_reply: &SignedReply) -> Result<(), Self::Error>; } -impl OobiStorage { - pub fn new(db: Arc) -> Result { - // Create tables - let write_txn = db.begin_write()?; - { - write_txn.open_table(LOCATION)?; - write_txn.open_multimap_table(END_ROLE)?; + +#[cfg(feature = "storage-redb")] +mod redb_backend { + use std::sync::Arc; + + use redb::{MultimapTableDefinition, TableDefinition}; + + use crate::{ + database::redb::RedbError, + oobi::Scheme, + prefix::IdentifierPrefix, + query::reply_event::{ReplyRoute, SignedReply}, + }; + + use super::{super::Role, OobiStorageBackend}; + + /// Location OOBIs store (eid, scheme) -> Signed oobi + const LOCATION: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("location"); + + /// End role OOBIs store (cid, role) -> Signed oobi + const END_ROLE: MultimapTableDefinition<(&[u8], &[u8]), &[u8]> = + MultimapTableDefinition::new("end_role"); + + pub struct RedbOobiStorage { + pub(super) db: Arc, + } + + impl RedbOobiStorage { + pub fn new(db: Arc) -> Result { + let write_txn = db.begin_write()?; + { + write_txn.open_table(LOCATION)?; + write_txn.open_multimap_table(END_ROLE)?; + } + write_txn.commit()?; + Ok(Self { db }) } - write_txn.commit()?; - Ok(Self { db }) } - pub fn get_oobis_for_eid(&self, id: &IdentifierPrefix) -> Result, RedbError> { - let str_id = id.to_string(); - let start = (str_id.as_str(), ""); + impl OobiStorageBackend for RedbOobiStorage { + type Error = RedbError; + + fn get_oobis_for_eid( + &self, + id: &IdentifierPrefix, + ) -> Result, Self::Error> { + let str_id = id.to_string(); + let start = (str_id.as_str(), ""); - // End of the range: ("apple\u{FFFD}", "") - // Adding a character greater than any normal Unicode character ensures the end is exclusive - let mut end_prefix = str_id.to_owned(); - end_prefix.push('\u{FFFD}'); // or use '\u{10FFFF}' for max valid Unicode scalar - let end = (end_prefix.as_str(), ""); + let mut end_prefix = str_id.to_owned(); + end_prefix.push('\u{FFFD}'); + let end = (end_prefix.as_str(), ""); - let signed_oobis = { + let signed_oobis = { + let read_txn = self.db.begin_read().unwrap(); + let table = read_txn.open_table(LOCATION).unwrap(); + table.range(start..end) + } + .unwrap(); + + let out = signed_oobis + .filter_map(|entry| { + let (_, value) = entry.unwrap(); + serde_cbor::from_slice::(value.value()).ok() + }) + .collect(); + Ok(out) + } + + fn get_last_loc_scheme( + &self, + eid: &IdentifierPrefix, + scheme: &Scheme, + ) -> Result, Self::Error> { let read_txn = self.db.begin_read().unwrap(); let table = read_txn.open_table(LOCATION).unwrap(); - table.range(start..end) + let el = table + .get(( + eid.to_string().as_str(), + serde_json::to_string(scheme).unwrap().as_str(), + )) + .unwrap(); + + let out = + el.and_then(|entry| serde_cbor::from_slice::(entry.value()).ok()); + Ok(out) } - .unwrap(); - let out = signed_oobis - .filter_map(|entry| { - let (_, value) = entry.unwrap(); - serde_cbor::from_slice::(value.value()).ok() + fn get_end_role( + &self, + cid: &IdentifierPrefix, + role: Role, + ) -> Result>, Self::Error> { + let read_txn = self.db.begin_read().unwrap(); + let table = read_txn.open_multimap_table(END_ROLE).unwrap(); + let entries: Vec = table + .get(( + cid.to_string().as_bytes(), + serde_json::to_vec(&role).unwrap().as_slice(), + )) + .unwrap() + .filter_map(|e| { + let value = e.unwrap(); + serde_cbor::from_slice::(value.value()).ok() + }) + .collect(); + Ok(if entries.is_empty() { + None + } else { + Some(entries) }) - .collect(); - Ok(out) - } - - pub fn get_last_loc_scheme( - &self, - eid: &IdentifierPrefix, - scheme: &Scheme, - ) -> Result, RedbError> { - let read_txn = self.db.begin_read().unwrap(); - let table = read_txn.open_table(LOCATION).unwrap(); - let el = table - .get(( - eid.to_string().as_str(), - serde_json::to_string(scheme).unwrap().as_str(), - )) - .unwrap(); + } - let out = el.and_then(|entry| { - // let (_, value) = entry; - serde_cbor::from_slice::(entry.value()).ok() - }); - Ok(out) - } + fn save_oobi(&self, signed_reply: &SignedReply) -> Result<(), Self::Error> { + match signed_reply.reply.get_route() { + ReplyRoute::Ksn(_, _) => todo!(), + ReplyRoute::LocScheme(loc_scheme) => { + let (cid, scheme) = ( + loc_scheme.get_eid().to_string(), + serde_json::to_string(&loc_scheme.scheme).unwrap(), + ); - pub fn get_end_role( - &self, - cid: &IdentifierPrefix, - role: Role, - ) -> Result>, RedbError> { - let read_txn = self.db.begin_read().unwrap(); - let table = read_txn.open_multimap_table(END_ROLE).unwrap(); - let entry = table - .get(( - cid.to_string().as_bytes(), - serde_json::to_vec(&role).unwrap().as_slice(), - )) - .unwrap(); - let out: Option> = entry - .map(|entry| { - let value = entry.unwrap(); - serde_cbor::from_slice::(value.value()).ok() - }) - .collect(); - Ok(out) - } + let write_txn = self.db.begin_write().unwrap(); + { + let mut table = (&write_txn).open_table(LOCATION).unwrap(); + table + .insert( + (cid.as_str(), scheme.as_str()), + serde_cbor::to_vec(signed_reply).unwrap().as_slice(), + ) + .unwrap(); + } + write_txn.commit().unwrap(); + } + ReplyRoute::EndRoleAdd(end_role) => { + let (eid, role) = ( + end_role.cid.to_string(), + serde_json::to_vec(&end_role.role).unwrap(), + ); - pub fn save_oobi(&self, signed_reply: &SignedReply) -> Result<(), RedbError> { - println!( - "\n\nSaving oobi for route: {:?}\n", - signed_reply.reply.get_route() - ); - match signed_reply.reply.get_route() { - ReplyRoute::Ksn(_, _) => todo!(), - ReplyRoute::LocScheme(loc_scheme) => { - let (cid, scheme) = ( - loc_scheme.get_eid().to_string(), - serde_json::to_string(&loc_scheme.scheme).unwrap(), - ); - - let write_txn = self.db.begin_write().unwrap(); - { - let mut table = (&write_txn).open_table(LOCATION).unwrap(); - table - .insert( - (cid.as_str(), scheme.as_str()), - serde_cbor::to_vec(signed_reply).unwrap().as_slice(), - ) - .unwrap(); + let write_txn = self.db.begin_write().unwrap(); + { + let mut table = (&write_txn).open_multimap_table(END_ROLE).unwrap(); + table + .insert( + (eid.as_bytes(), role.as_slice()), + serde_cbor::to_vec(signed_reply).unwrap().as_slice(), + ) + .unwrap(); + } + write_txn.commit().unwrap(); } - write_txn.commit().unwrap(); - } - ReplyRoute::EndRoleAdd(end_role) | ReplyRoute::EndRoleCut(end_role) => { - let (eid, role) = ( - end_role.cid.to_string(), - serde_json::to_vec(&end_role.role).unwrap(), - ); - - let write_txn = self.db.begin_write().unwrap(); - { - let mut table = (&write_txn).open_multimap_table(END_ROLE).unwrap(); - table - .insert( - (eid.as_bytes(), role.as_slice()), - serde_cbor::to_vec(signed_reply).unwrap().as_slice(), - ) - .unwrap(); + ReplyRoute::EndRoleCut(end_role) => { + // TODO: EndRoleCut should remove the role from storage, not insert. + // Currently mirrors redb's behaviour (inserting the Cut event) pending + // a proper removal implementation. + let (eid, role) = ( + end_role.cid.to_string(), + serde_json::to_vec(&end_role.role).unwrap(), + ); + + let write_txn = self.db.begin_write().unwrap(); + { + let mut table = (&write_txn).open_multimap_table(END_ROLE).unwrap(); + table + .insert( + (eid.as_bytes(), role.as_slice()), + serde_cbor::to_vec(signed_reply).unwrap().as_slice(), + ) + .unwrap(); + } + write_txn.commit().unwrap(); } - write_txn.commit().unwrap(); } + Ok(()) } - Ok(()) } } + +#[cfg(feature = "storage-redb")] +pub use redb_backend::RedbOobiStorage; diff --git a/keriox_core/src/prefix/mod.rs b/keriox_core/src/prefix/mod.rs index a14826dd..4b6983ec 100644 --- a/keriox_core/src/prefix/mod.rs +++ b/keriox_core/src/prefix/mod.rs @@ -1,5 +1,5 @@ use crate::{ - database::redb::rkyv_adapter::said_wrapper::SaidValue, + database::rkyv_adapter::said_wrapper::SaidValue, event::sections::key_config::SignatureError, }; diff --git a/keriox_core/src/processor/basic_processor.rs b/keriox_core/src/processor/basic_processor.rs index 66311249..13931563 100644 --- a/keriox_core/src/processor/basic_processor.rs +++ b/keriox_core/src/processor/basic_processor.rs @@ -18,7 +18,7 @@ pub struct BasicProcessor(EventProcessor); impl Processor for BasicProcessor { type Database = D; fn register_observer( - &mut self, + &self, observer: Arc, notification: &[JustNotification], ) -> Result<(), Error> { diff --git a/keriox_core/src/processor/escrow/maybe_out_of_order_escrow.rs b/keriox_core/src/processor/escrow/maybe_out_of_order_escrow.rs index 74308d96..6b6c3231 100644 --- a/keriox_core/src/processor/escrow/maybe_out_of_order_escrow.rs +++ b/keriox_core/src/processor/escrow/maybe_out_of_order_escrow.rs @@ -111,7 +111,7 @@ fn test_out_of_order() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let redb = RedbDatabase::new(events_db_path.path()).unwrap(); let events_db = Arc::new(redb); - let mut processor = BasicProcessor::new(events_db.clone(), None); + let processor = BasicProcessor::new(events_db.clone(), None); // Register out of order escrow, to save and reprocess out of order events let new_ooo = Arc::new(MaybeOutOfOrderEscrow::new( diff --git a/keriox_core/src/processor/escrow/mod.rs b/keriox_core/src/processor/escrow/mod.rs index e908f54a..365ce291 100644 --- a/keriox_core/src/processor/escrow/mod.rs +++ b/keriox_core/src/processor/escrow/mod.rs @@ -38,23 +38,23 @@ impl Default for EscrowConfig { } } +pub struct EscrowSet { + pub out_of_order: Arc>, + pub partially_signed: Arc>, + pub partially_witnessed: Arc>, + pub delegation: Arc>, + pub duplicitous: Arc>, +} + pub fn default_escrow_bus( event_db: Arc, escrow_config: EscrowConfig, -) -> ( - NotificationBus, - ( - Arc>, - Arc>, - Arc>, - Arc>, - Arc>, - ), -) + notification_bus: Option, +) -> (NotificationBus, EscrowSet) where D: EventDatabase + EscrowCreator + Sync + Send + 'static, { - let mut bus = NotificationBus::new(); + let bus = notification_bus.unwrap_or_default(); // Register out of order escrow, to save and reprocess out of order events let ooo_escrow = Arc::new(MaybeOutOfOrderEscrow::new( @@ -105,6 +105,12 @@ where ( bus, - (ooo_escrow, ps_escrow, pw_escrow, delegation_escrow, dup), + EscrowSet { + out_of_order: ooo_escrow, + partially_signed: ps_escrow, + partially_witnessed: pw_escrow, + delegation: delegation_escrow, + duplicitous: dup, + }, ) } diff --git a/keriox_core/src/processor/escrow/partially_signed_escrow.rs b/keriox_core/src/processor/escrow/partially_signed_escrow.rs index 66909e62..6501d4ee 100644 --- a/keriox_core/src/processor/escrow/partially_signed_escrow.rs +++ b/keriox_core/src/processor/escrow/partially_signed_escrow.rs @@ -199,7 +199,7 @@ mod tests { let path = witness_root.path(); let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let mut processor = BasicProcessor::new(events_db.clone(), None); + let processor = BasicProcessor::new(events_db.clone(), None); // Register out of order escrow, to save and reprocess out of order events let ooo_escrow = Arc::new(MaybeOutOfOrderEscrow::new( @@ -271,7 +271,7 @@ mod tests { std::fs::create_dir_all(path).unwrap(); let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let mut processor = BasicProcessor::new(events_db.clone(), None); + let processor = BasicProcessor::new(events_db.clone(), None); // Register partially signed escrow, to save and reprocess partially signed events let ps_escrow = Arc::new(PartiallySignedEscrow::new( diff --git a/keriox_core/src/processor/escrow/partially_witnessed_escrow.rs b/keriox_core/src/processor/escrow/partially_witnessed_escrow.rs index 124abe96..b3a6b08a 100644 --- a/keriox_core/src/processor/escrow/partially_witnessed_escrow.rs +++ b/keriox_core/src/processor/escrow/partially_witnessed_escrow.rs @@ -383,7 +383,7 @@ mod tests { let redb = RedbDatabase::new(events_db_path.path()).unwrap(); let log_db = redb.log_db.clone(); let events_db = Arc::new(redb); - let mut event_processor = BasicProcessor::new(events_db.clone(), None); + let event_processor = BasicProcessor::new(events_db.clone(), None); let event_storage = EventStorage::new(Arc::clone(&events_db)); // Register not fully witnessed escrow, to save and reprocess events @@ -528,7 +528,7 @@ mod tests { let redb = RedbDatabase::new(events_db_path.path()).unwrap(); let log_db = redb.log_db.clone(); let events_db = Arc::new(redb); - let mut event_processor = BasicProcessor::new(events_db.clone(), None); + let event_processor = BasicProcessor::new(events_db.clone(), None); let event_storage = EventStorage::new(Arc::clone(&events_db)); // Register not fully witnessed escrow, to save and reprocess events @@ -671,7 +671,7 @@ mod tests { let redb = RedbDatabase::new(events_db_path.path()).unwrap(); let log_db = redb.log_db.clone(); let events_db = Arc::new(redb); - let mut event_processor = BasicProcessor::new(events_db.clone(), None); + let event_processor = BasicProcessor::new(events_db.clone(), None); let event_storage = EventStorage::new(Arc::clone(&events_db)); // Register not fully witnessed escrow, to save and reprocess events @@ -775,7 +775,7 @@ mod tests { let redb = RedbDatabase::new(events_db_path.path()).unwrap(); let log_db = redb.log_db.clone(); let events_db = Arc::new(redb); - let mut event_processor = BasicProcessor::new(events_db.clone(), None); + let event_processor = BasicProcessor::new(events_db.clone(), None); let event_storage = EventStorage::new(Arc::clone(&events_db)); // Register not fully witnessed escrow, to save and reprocess events diff --git a/keriox_core/src/processor/escrow/reply_escrow.rs b/keriox_core/src/processor/escrow/reply_escrow.rs index 2dc9be4b..889b221f 100644 --- a/keriox_core/src/processor/escrow/reply_escrow.rs +++ b/keriox_core/src/processor/escrow/reply_escrow.rs @@ -2,15 +2,14 @@ use std::sync::Arc; use said::SelfAddressingIdentifier; +#[cfg(feature = "storage-redb")] +use crate::database::redb::{ + escrow_database::SnKeyDatabase, + ksn_log::{AcceptedKsn, KsnLogDatabase}, + RedbDatabase, RedbError, +}; use crate::{ - database::{ - redb::{ - escrow_database::SnKeyDatabase, - ksn_log::{AcceptedKsn, KsnLogDatabase}, - RedbDatabase, RedbError, - }, - EventDatabase, SequencedEventDatabase, - }, + database::{EventDatabase, SequencedEventDatabase}, error::Error, prefix::IdentifierPrefix, processor::{ @@ -20,6 +19,7 @@ use crate::{ query::reply_event::{ReplyEvent, ReplyRoute, SignedReply}, }; +#[cfg(feature = "storage-redb")] #[derive(Clone)] pub struct ReplyEscrow { events_db: Arc, @@ -27,6 +27,7 @@ pub struct ReplyEscrow { escrowed_reply: Arc, } +#[cfg(feature = "storage-redb")] impl ReplyEscrow { pub fn new(events_db: Arc) -> Self { let acc = Arc::new(AcceptedKsn::new(events_db.db.clone()).unwrap()); @@ -41,6 +42,7 @@ impl ReplyEscrow { } } } +#[cfg(feature = "storage-redb")] impl Notifier for ReplyEscrow { fn notify(&self, notification: &Notification, bus: &NotificationBus) -> Result<(), Error> { match notification { @@ -63,6 +65,7 @@ impl Notifier for ReplyEscrow { } } +#[cfg(feature = "storage-redb")] impl ReplyEscrow { pub fn process_reply_escrow( &self, @@ -100,11 +103,13 @@ impl ReplyEscrow { } } +#[cfg(feature = "storage-redb")] pub struct SnKeyReplyEscrow { escrow: Arc, log: Arc, } +#[cfg(feature = "storage-redb")] impl SnKeyReplyEscrow { pub(crate) fn new(escrow: Arc, log: Arc) -> Self { Self { escrow, log } @@ -191,7 +196,7 @@ mod tests { fs::create_dir_all(root.path()).unwrap(); let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let mut event_processor = BasicProcessor::new(events_db.clone(), None); + let event_processor = BasicProcessor::new(events_db.clone(), None); let rpy_escrow = Arc::new(ReplyEscrow::new(events_db.clone())); event_processor.register_observer( rpy_escrow.clone(), diff --git a/keriox_core/src/processor/escrow_tests.rs b/keriox_core/src/processor/escrow_tests.rs index 1baa45aa..e1619de9 100644 --- a/keriox_core/src/processor/escrow_tests.rs +++ b/keriox_core/src/processor/escrow_tests.rs @@ -50,7 +50,7 @@ fn test_out_of_order_cleanup() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let redb = RedbDatabase::new(events_db_path.path()).unwrap(); let events_db = Arc::new(redb); - let mut processor = BasicProcessor::new(events_db.clone(), None); + let processor = BasicProcessor::new(events_db.clone(), None); // Register out of order escrow, to save and reprocess out of order events let ooo_escrow = Arc::new(MaybeOutOfOrderEscrow::new( @@ -145,7 +145,7 @@ fn test_partially_sign_escrow_cleanup() -> Result<(), Error> { std::fs::create_dir_all(path).unwrap(); let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let mut processor = BasicProcessor::new(events_db.clone(), None); + let processor = BasicProcessor::new(events_db.clone(), None); // Register partially signed escrow, to save and reprocess partially signed events let ps_escrow = Arc::new(PartiallySignedEscrow::new( @@ -230,7 +230,7 @@ pub fn test_partially_witnessed_escrow_cleanup() -> Result<(), Error> { let redb = RedbDatabase::new(events_db_path.path()).unwrap(); let log_db = redb.log_db.clone(); let events_db = Arc::new(redb); - let mut event_processor = BasicProcessor::new(events_db.clone(), None); + let event_processor = BasicProcessor::new(events_db.clone(), None); let event_storage = EventStorage::new(Arc::clone(&events_db)); // Register not fully witnessed escrow, to save and reprocess events let partially_witnessed_escrow = Arc::new(PartiallyWitnessedEscrow::new( diff --git a/keriox_core/src/processor/event_storage.rs b/keriox_core/src/processor/event_storage.rs index 666afd48..dea15067 100644 --- a/keriox_core/src/processor/event_storage.rs +++ b/keriox_core/src/processor/event_storage.rs @@ -20,7 +20,7 @@ use crate::{ }; #[cfg(feature = "mailbox")] use crate::{ - database::{mailbox::MailboxData, redb::RedbDatabase}, + database::mailbox::MailboxData, query::mailbox::QueryArgsMbx, }; use crate::{ @@ -37,27 +37,37 @@ use crate::mailbox::MailboxResponse; pub struct EventStorage { pub events_db: Arc, #[cfg(feature = "mailbox")] - pub mailbox_data: MailboxData, + pub mailbox_data: Option, } -impl EventStorage { +impl EventStorage { pub fn new(events_db: Arc) -> Self { - #[cfg(feature = "mailbox")] - { - if let Some(redb_db) = - (events_db.as_ref() as &dyn std::any::Any).downcast_ref::() - { - let mailbox_data = MailboxData::new(redb_db.db.clone()).unwrap(); - Self { - events_db, - mailbox_data, - } - } else { - panic!("Expected RedbDatabase for mailbox feature"); - } + Self { + events_db, + #[cfg(feature = "mailbox")] + mailbox_data: None, + } + } +} + +#[cfg(feature = "mailbox")] +impl EventStorage { + pub fn new_redb(events_db: Arc) -> Self { + let mailbox_data = MailboxData::new(events_db.db.clone()).unwrap(); + Self { + events_db, + mailbox_data: Some(mailbox_data), + } + } +} + +#[cfg(feature = "mailbox")] +impl EventStorage { + pub fn new_with_mailbox(events_db: Arc, mailbox_data: MailboxData) -> Self { + Self { + events_db, + mailbox_data: Some(mailbox_data), } - #[cfg(not(feature = "mailbox"))] - Self { events_db } } } @@ -173,13 +183,20 @@ impl EventStorage { } } + #[cfg(feature = "mailbox")] + fn mailbox(&self) -> Result<&MailboxData, Error> { + self.mailbox_data + .as_ref() + .ok_or_else(|| Error::SemanticError("Mailbox not initialized".into())) + } + #[cfg(feature = "mailbox")] pub fn add_mailbox_multisig( &self, receipient: &IdentifierPrefix, to_forward: SignedEventMessage, ) -> Result<(), Error> { - self.mailbox_data + self.mailbox()? .add_mailbox_multisig(receipient, to_forward)?; Ok(()) @@ -191,7 +208,7 @@ impl EventStorage { receipient: &IdentifierPrefix, to_forward: SignedEventMessage, ) -> Result<(), Error> { - self.mailbox_data + self.mailbox()? .add_mailbox_delegate(receipient, to_forward)?; Ok(()) @@ -200,7 +217,7 @@ impl EventStorage { #[cfg(feature = "mailbox")] pub fn add_mailbox_receipt(&self, receipt: SignedNontransferableReceipt) -> Result<(), Error> { let id = receipt.body.prefix.clone(); - self.mailbox_data.add_mailbox_receipt(&id, receipt)?; + self.mailbox()?.add_mailbox_receipt(&id, receipt)?; Ok(()) } @@ -208,34 +225,32 @@ impl EventStorage { #[cfg(feature = "mailbox")] pub fn add_mailbox_reply(&self, reply: SignedEventMessage) -> Result<(), Error> { let id = reply.event_message.data.get_prefix(); - self.mailbox_data.add_mailbox_reply(&id, reply)?; + self.mailbox()?.add_mailbox_reply(&id, reply)?; Ok(()) } #[cfg(feature = "mailbox")] pub fn get_mailbox_messages(&self, args: &QueryArgsMbx) -> Result { + let mailbox = self.mailbox()?; let id = args.i.clone(); // query receipts - let receipt = match self - .mailbox_data + let receipt = match mailbox .get_mailbox_receipts(&id, args.topics.receipt as u64) { Some(receipts) => receipts.collect(), None => vec![], }; - let multisig = match self - .mailbox_data + let multisig = match mailbox .get_mailbox_multisig(&id, args.topics.multisig as u64) { Some(multisig) => multisig.collect(), None => vec![], }; - let delegate = match self - .mailbox_data + let delegate = match mailbox .get_mailbox_delegate(&id, args.topics.delegate as u64) { Some(delegate) => delegate.collect(), diff --git a/keriox_core/src/processor/mod.rs b/keriox_core/src/processor/mod.rs index e5d250ff..9c81cf7f 100644 --- a/keriox_core/src/processor/mod.rs +++ b/keriox_core/src/processor/mod.rs @@ -38,7 +38,7 @@ pub trait Processor { fn process_op_reply(&self, reply: &SignedReply) -> Result<(), Error>; fn register_observer( - &mut self, + &self, observer: Arc, notifications: &[JustNotification], ) -> Result<(), Error>; @@ -92,7 +92,7 @@ impl EventProcessor { } pub fn register_observer( - &mut self, + &self, observer: Arc, notifications: Vec, ) -> Result<(), Error> { diff --git a/keriox_core/src/processor/notification.rs b/keriox_core/src/processor/notification.rs index 41e77069..4658214c 100644 --- a/keriox_core/src/processor/notification.rs +++ b/keriox_core/src/processor/notification.rs @@ -1,4 +1,7 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::HashMap, + sync::{Arc, OnceLock, RwLock}, +}; #[cfg(feature = "query")] use crate::query::reply_event::SignedReply; @@ -10,36 +13,108 @@ use crate::{ }, }; +/// Internal dispatch strategy — the swappable part. +/// Implement this trait to change how notifications are delivered +/// (e.g. in-process HashMap, SQS queue, etc.). +pub trait NotificationDispatch: Send + Sync { + fn dispatch(&self, notification: &Notification) -> Result<(), Error>; + fn register_observer( + &self, + observer: Arc, + notifications: Vec, + ) -> Result<(), Error>; +} + +/// In-process dispatch: preserves the original HashMap-based behavior. +/// Uses `RwLock` for interior mutability so `register_observer` takes `&self`. +struct InProcessDispatch { + observers: RwLock>>>, + /// Back-reference to the owning `NotificationBus` so we can pass it + /// to `Notifier::notify()` callbacks. + bus: OnceLock, +} + +impl InProcessDispatch { + fn new() -> Self { + Self { + observers: RwLock::new(HashMap::new()), + bus: OnceLock::new(), + } + } +} + +impl NotificationDispatch for InProcessDispatch { + fn dispatch(&self, notification: &Notification) -> Result<(), Error> { + let observers = self + .observers + .read() + .map_err(|_| Error::RwLockingError)?; + let bus = self.bus.get().ok_or_else(|| { + Error::SemanticError("InProcessDispatch: bus back-reference not set".into()) + })?; + if let Some(obs) = observers.get(¬ification.into()) { + for esc in obs.iter() { + esc.notify(notification, bus)?; + } + } + Ok(()) + } + + fn register_observer( + &self, + observer: Arc, + notifications: Vec, + ) -> Result<(), Error> { + let mut observers = self + .observers + .write() + .map_err(|_| Error::RwLockingError)?; + for notification in notifications { + observers + .entry(notification) + .or_default() + .push(observer.clone()); + } + Ok(()) + } +} + +/// Clone-able notification bus that delegates to an internal dispatch strategy. +#[derive(Clone)] pub struct NotificationBus { - observers: HashMap>>, + inner: Arc, } impl NotificationBus { + /// Create a new bus with the default in-process dispatch. pub fn new() -> Self { - Self { - observers: HashMap::new(), - } + let dispatch = Arc::new(InProcessDispatch::new()); + let bus = Self { + inner: dispatch.clone(), + }; + // Set the back-reference so InProcessDispatch can pass &NotificationBus + // to Notifier::notify() callbacks. + let _ = dispatch.bus.set(bus.clone()); + bus } + + /// Create a bus backed by a custom dispatch implementation. + pub fn from_dispatch(dispatch: Arc) -> Self { + Self { inner: dispatch } + } + pub fn register_observer( - &mut self, + &self, escrow: Arc, notification: Vec, ) { - notification.into_iter().for_each(|notification| { - self.observers - .entry(notification) - .or_default() - .push(escrow.clone()); - }); + // register_observer on InProcessDispatch should not fail in practice, + // but if it does we silently ignore to preserve the existing API signature. + let _ = self.inner.register_observer(escrow, notification); } pub fn notify(&self, notification: &Notification) -> Result<(), Error> { - if let Some(obs) = self.observers.get(¬ification.into()) { - for esc in obs.iter() { - esc.notify(notification, self)?; - } - }; - Ok(()) + self.inner.dispatch(notification) } } diff --git a/keriox_core/src/processor/processor_tests.rs b/keriox_core/src/processor/processor_tests.rs index 719b134a..af2dd5ee 100644 --- a/keriox_core/src/processor/processor_tests.rs +++ b/keriox_core/src/processor/processor_tests.rs @@ -33,8 +33,8 @@ fn test_process() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (not_bus, (ooo_escrow, ps_escrow, _pw_escrow, _, duplicates)) = - default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (not_bus, escrows) = + default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); let event_processor = BasicProcessor::new(Arc::clone(&events_db), Some(not_bus)); let event_storage = EventStorage::new(Arc::clone(&events_db)); // Events and sigs are from keripy `test_multisig_digprefix` test. @@ -75,7 +75,7 @@ fn test_process() -> Result<(), Error> { // Process the same rotation event one more time. event_processor.process(&deserialized_rot)?; // should be saved as duplicious event - assert_eq!(duplicates.get(&id).unwrap().len(), 1); + assert_eq!(escrows.duplicitous.get(&id).unwrap().len(), 1); let ixn_raw = br#"{"v":"KERI10JSON0000cb_","t":"ixn","d":"EL6Dpm72KXayaUHYvVHlhPplg69fBvRt1P3YzuOGVpmz","i":"EBfxc4RiVY6saIFmUfEtETs1FcqmktZW88UkbnOg0Qen","s":"2","p":"EHjzZj4i_-RpTN2Yh-NocajFROJ_GkBtlByhRykqiXgz","a":[]}-AADAABgep0kbpgl91vvcXziJ7tHY1WVTAcUJyYCBNqTcNuK9AfzLHfKHhJeSC67wFRU845qjLSAC-XwWaqWgyAgw_8MABD5wTnqqJcnLWMA7NZ1vLOTzDspInJrly7O4Kt6Jwzue9z2TXkDXi1jr69JeKbzUQ6c2Ka1qPXAst0JzrOiyuAPACAcLHnOz1Owtgq8mcR_-PpAr91zOTK_Zj9r0V-9P47vzGsYwAxcVshclfhCMhu73aZuZbvQhy9Rxcj-qRz96cIL"#; let parsed = parse(ixn_raw).unwrap().1; @@ -113,7 +113,7 @@ fn test_process() -> Result<(), Error> { if let Notice::Event(ev) = partially_signed_deserialized_ixn { // should be saved in partially signed escrow assert_eq!( - ps_escrow.get_partially_signed_for_event(ev.event_message.clone()), + escrows.partially_signed.get_partially_signed_for_event(ev.event_message.clone()), Some(ev) ); } else { @@ -132,7 +132,7 @@ fn test_process() -> Result<(), Error> { event_processor.process(&out_of_order_rot)?; // should be saved in out of order escrow assert_eq!( - ooo_escrow + escrows.out_of_order .escrowed_out_of_order .get_from_sn(&id, 0) .unwrap() @@ -162,7 +162,7 @@ fn test_process_delegated() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (not_bus, _ooo_escrow) = default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (not_bus, _escrows) = default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); let event_processor = BasicProcessor::new(Arc::clone(&events_db), Some(not_bus)); let event_storage = EventStorage::new(Arc::clone(&events_db)); @@ -260,7 +260,7 @@ fn test_compute_state_at_sn() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (not_bus, _ooo_escrow) = default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (not_bus, _escrows) = default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); let event_processor = BasicProcessor::new(events_db.clone(), Some(not_bus)); let event_storage = EventStorage::new(Arc::clone(&events_db)); @@ -306,8 +306,8 @@ pub fn test_partial_rotation_simple_threshold() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (not_bus, (_, ps_escrow, _, _, _)) = - default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (not_bus, escrows) = + default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); let processor = BasicProcessor::new(events_db.clone(), Some(not_bus)); let storage = EventStorage::new(events_db.clone()); @@ -435,7 +435,7 @@ pub fn test_partial_rotation_simple_threshold() -> Result<(), Error> { let signed_rotation = rotation.sign(signatures[..3].to_vec(), None, None); processor.process_notice(&Notice::Event(signed_rotation.clone()))?; // rotation should be stored in partially signed events escrow. - let escrow_contents = ps_escrow + let escrow_contents = escrows.partially_signed .escrowed_partially_signed .get_from_sn(&id_prefix, 0) .unwrap() @@ -458,7 +458,7 @@ pub fn test_partial_rotation_simple_threshold() -> Result<(), Error> { processor.process_notice(&Notice::Event(signed_rotation.clone()))?; // rotation should be removed from partially signed events escrow. assert_eq!( - ps_escrow + escrows.partially_signed .escrowed_partially_signed .get_from_sn(&id_prefix, 0) .unwrap() @@ -482,7 +482,7 @@ pub fn test_partial_rotation_weighted_threshold() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (not_bus, _ooo_escrow) = default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (not_bus, _escrows) = default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); ( BasicProcessor::new(events_db.clone(), Some(not_bus)), EventStorage::new(events_db.clone()), @@ -648,7 +648,7 @@ pub fn test_reserve_rotation() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (not_bus, _ooo_escrow) = default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (not_bus, _escrows) = default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); ( BasicProcessor::new(events_db.clone(), Some(not_bus)), EventStorage::new(events_db.clone()), @@ -829,7 +829,7 @@ pub fn test_custorial_rotation() -> Result<(), Error> { let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (not_bus, _ooo_escrow) = default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (not_bus, _escrows) = default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); ( BasicProcessor::new(events_db.clone(), Some(not_bus)), EventStorage::new(events_db.clone()), diff --git a/keriox_core/src/processor/validator.rs b/keriox_core/src/processor/validator.rs index 5982ee1a..140bbe81 100644 --- a/keriox_core/src/processor/validator.rs +++ b/keriox_core/src/processor/validator.rs @@ -60,7 +60,7 @@ pub struct EventValidator { event_storage: EventStorage, } -impl EventValidator { +impl EventValidator { pub fn new(event_database: Arc) -> Self { Self { event_storage: EventStorage::new(event_database), diff --git a/keriox_core/src/state/mod.rs b/keriox_core/src/state/mod.rs index d687d9a2..e0eefd15 100644 --- a/keriox_core/src/state/mod.rs +++ b/keriox_core/src/state/mod.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use crate::{ - database::redb::rkyv_adapter::said_wrapper::SaidValue, + database::rkyv_adapter::said_wrapper::SaidValue, error::Error, event::{ event_data::EventData, diff --git a/keriox_core/tests/test_oobi_manager.rs b/keriox_core/tests/test_oobi_manager.rs index cccb2506..2db1252c 100644 --- a/keriox_core/tests/test_oobi_manager.rs +++ b/keriox_core/tests/test_oobi_manager.rs @@ -20,7 +20,7 @@ mod test_oobi_manager { let (processor, storage, oobi_manager) = ( BasicProcessor::new(events_db.clone(), None), EventStorage::new(events_db.clone()), - OobiManager::new(events_db.clone()), + OobiManager::new(events_db.clone()).unwrap(), ); let events = parse_event_stream(oobi_rpy.as_bytes()).unwrap(); for event in events { diff --git a/keriox_core/tests/test_query.rs b/keriox_core/tests/test_query.rs index f9f40cbe..777eb395 100644 --- a/keriox_core/tests/test_query.rs +++ b/keriox_core/tests/test_query.rs @@ -18,8 +18,8 @@ mod test_query { let events_db_path = NamedTempFile::new().unwrap(); let events_db = Arc::new(RedbDatabase::new(events_db_path.path()).unwrap()); - let (notification_bus, (_ooo_escrow, _ps_esrow, _pw_escrow, _, _)) = - default_escrow_bus(events_db.clone(), EscrowConfig::default()); + let (notification_bus, _escrows) = + default_escrow_bus(events_db.clone(), EscrowConfig::default(), None); let (processor, storage) = ( BasicProcessor::new(events_db.clone(), Some(notification_bus)), diff --git a/keriox_sdk/Cargo.toml b/keriox_sdk/Cargo.toml index e30eef7e..7f23d350 100644 --- a/keriox_sdk/Cargo.toml +++ b/keriox_sdk/Cargo.toml @@ -8,21 +8,25 @@ license.workspace = true repository.workspace = true [dependencies] +keri-controller = { path = "../components/controller", version = "0.17.13" } keri-core = { path = "../keriox_core", version = "0.17.13", features = ["query", "oobi"] } cesrox = { version = "0.1.8", features = ["cesr-proof"] } serde_json = "1" serde_cbor = { version = "0.11" } said = { version = "0.4.3", features = ["macros"]} teliox = { path = "../support/teliox", version = "0.17.13", default-features = false } +thiserror = "1.0" log = "0.4" +rand = "0.8.5" +ed25519-dalek = { version = "2.1.0", features = ["rand_core"] } +base64 = "0.13" [dev-dependencies] +keri-core = { path = "../keriox_core", version = "0.17.13" } tempfile = { version = "3.20" } -ed25519-dalek = {version = "2.1.0", features = ["rand_core"] } -rand = "0.8.5" +tokio = { version = "1", features = ["full"] } reqwest = { version = "0.11", features = ["json"] } url = { version = "2.2.2", features = ["serde"] } -tokio = { version = "1", features = ["full"] } [package.metadata.release] publish = false diff --git a/keriox_sdk/README.md b/keriox_sdk/README.md new file mode 100644 index 00000000..92c29a7a --- /dev/null +++ b/keriox_sdk/README.md @@ -0,0 +1,28 @@ +# keriox_sdk + +High-level SDK for KERI and TEL operations, providing composable building blocks for both traditional and serverless deployments. + +## Key Types + +- **`KeriRuntime`**: The shared KERI processing stack -- bundles `BasicProcessor`, `EventStorage`, `EscrowSet`, and `NotificationBus`. Use `KeriRuntime::new(db)` for defaults or `KeriRuntime::with_config(db, config, Some(bus))` to inject a custom notification bus (e.g. SQS-backed dispatch for Lambda handlers). +- **`Controller`**: Composes `KeriRuntime` with a TEL layer (`Tel`). Provides methods for inception, KEL/TEL processing, and state queries. Access the KERI runtime via the public `kel` field. +- **`Identifier`**: Manages a specific identifier's Key Event Log, including event generation and state retrieval. + +## Usage + +```rust +use keri_sdk::{Controller, KeriRuntime}; + +// Standalone KERI runtime (no TEL) +let runtime = KeriRuntime::new(event_db.clone()); +runtime.processor.process_notice(¬ice)?; + +// Full Controller with TEL +let controller = Controller::new(event_db, tel_db); +``` + +## Re-exports + +This crate re-exports commonly used types from `keri-core` and `teliox`: +- `database` module and `Signer` from keri-core +- `TelEventDatabase` and `TelEventStorage` from teliox diff --git a/keriox_sdk/examples/create_identifier.rs b/keriox_sdk/examples/create_identifier.rs new file mode 100644 index 00000000..46d27f6d --- /dev/null +++ b/keriox_sdk/examples/create_identifier.rs @@ -0,0 +1,45 @@ +//! Create a new KERI identifier with witnesses and watchers. +//! +//! This example requires a running KERI witness (`keria` or `keripy`) +//! and watcher to be reachable. Adjust the OOBI URLs for your environment. + +use keri_sdk::{types::IdentifierConfig, KeriStore}; +use std::path::PathBuf; + +#[tokio::main] +async fn main() -> keri_sdk::Result<()> { + // ── 1. Open a store ─────────────────────────────────────────────────────── + + let store = KeriStore::open(PathBuf::from("/tmp/keri-example"))?; + + // ── 2. Build config ─────────────────────────────────────────────────────── + // + // In a real scenario, witnesses/watchers are obtained from OOBI resolution. + // Here we show the structure; replace with real OOBIs for your environment. + + let config = IdentifierConfig { + // Witnesses are KERI nodes that countersign inception events. + // Provide their LocationScheme OOBIs here. + witnesses: vec![ + // Example: LocationScheme from a real witness OOBI URL. + // "http://witness-host:5631/oobi/BAAAAAAA.../controller/witness" + // .parse::().unwrap() + ], + witness_threshold: 0, // require 0 witnesses (offline mode) + // Watchers observe your KEL and help with key-event discovery. + watchers: vec![], + }; + + // ── 3. Create the identifier ────────────────────────────────────────────── + + let (identifier, _signer) = store.create("my-did", config).await?; + println!("Created identifier: {}", identifier.id()); + + // ── 4. Reload from disk (demonstrates KeriStore::load) ─────────────────── + + let reloaded = store.load("my-did")?; + assert_eq!(identifier.id(), reloaded.id()); + println!("Reloaded from disk: {}", reloaded.id()); + + Ok(()) +} diff --git a/keriox_sdk/examples/issue_credential.rs b/keriox_sdk/examples/issue_credential.rs new file mode 100644 index 00000000..339ce673 --- /dev/null +++ b/keriox_sdk/examples/issue_credential.rs @@ -0,0 +1,58 @@ +//! Issue and check a credential via the TEL. +//! +//! This example requires a running KERI witness because the registry inception +//! event must be anchored and receipted. Run with: +//! +//! cargo run --example issue_credential +//! +//! The identifier is created with no witnesses for offline demonstration; +//! a real deployment would add witnesses before calling `incept_registry`. + +use keri_sdk::{types::IdentifierConfig, KeriStore}; +use std::path::PathBuf; + +#[tokio::main] +async fn main() -> keri_sdk::Result<()> { + // ── 1. Create an identifier ─────────────────────────────────────────────── + + let tmp = tempfile::tempdir().expect("tempdir"); + let store = KeriStore::open(PathBuf::from(tmp.path()))?; + + // No witnesses — offline demo only. Real deployments need at least one. + let (mut identifier, signer) = store.create("issuer", IdentifierConfig::default()).await?; + println!("Issuer: {}", identifier.id()); + + // ── 2. Incept a credential registry ────────────────────────────────────── + // + // Without witnesses, this will fail at the `notify_witnesses` step because + // there are no witnesses to send to. In a real deployment with witnesses + // configured this call succeeds and returns the registry identifier. + // + // For a fully offline demo we skip this call. Uncomment in real use: + // + // let registry_id = incept_registry(&mut identifier, signer.clone()).await?; + // store.save_registry("issuer", ®istry_id)?; + + // ── 3. Issue a credential ───────────────────────────────────────────────── + // + // The credential SAID is the Blake3-256 self-addressing identifier of the + // ACDC body. You compute this outside the SDK using the `acdc` crate. + // + // let cred_said: SelfAddressingIdentifier = + // "EBdXt3gIXOf2BBWNHdSXCJnFJL5OuQPyM5K0neuniccM".parse().unwrap(); + // + // issue(&mut identifier, signer.clone(), cred_said.clone()).await?; + + // ── 4. Check credential status ──────────────────────────────────────────── + // + // Without a registry or witnesses, status is always Unknown locally. + // + // In a real deployment after issuing: + // let status = check_credential_status( + // &identifier, &signer, ®istry_id, &cred_said).await?; + // assert_eq!(status, CredentialStatus::Issued); + + println!("Example complete (offline mode — no registry or witnesses configured)"); + + Ok(()) +} diff --git a/keriox_sdk/examples/rotate_keys.rs b/keriox_sdk/examples/rotate_keys.rs new file mode 100644 index 00000000..587d7298 --- /dev/null +++ b/keriox_sdk/examples/rotate_keys.rs @@ -0,0 +1,65 @@ +//! Rotate keys for an existing identifier. +//! +//! After rotation the old key is no longer valid for signing. This example +//! shows the full rotation flow including persisting the new key state via +//! `KeriStore::save_rotation`. +//! +//! Rotation requires witnesses to be configured so that the rotation event +//! can be receipted. This example runs offline (no witnesses) for demo +//! purposes and will print an error at the `notify_witnesses` step. + +use keri_sdk::{ + operations::rotate, + types::{IdentifierConfig, RotationConfig}, + BasicPrefix, KeriStore, SeedPrefix, +}; +use std::path::PathBuf; + +#[tokio::main] +async fn main() -> keri_sdk::Result<()> { + // ── 1. Create an identifier ─────────────────────────────────────────────── + + let tmp = tempfile::tempdir().expect("tempdir"); + let store = KeriStore::open(PathBuf::from(tmp.path()))?; + let (_identifier, _signer) = store.create("alice", IdentifierConfig::default()).await?; + println!("Created identifier: {}", _identifier.id()); + + // ── 2. Load signer and identifier for rotation ──────────────────────────── + + let current_signer = store.load_signer("alice")?; + let mut identifier = store.load("alice")?; + + // ── 3. Generate a fresh "new next" key ──────────────────────────────────── + // + // In a real system, generate this from a random seed. Here we use a + // well-known test seed string for reproducibility. + let fresh_next_seed: SeedPrefix = + "ACrmDHtPQjnM8H9pyKA-QBNdfZ-xixTlRZTS8WXCrrMH".parse().unwrap(); + let (fresh_next_pub_key, _) = fresh_next_seed.derive_key_pair() + .expect("derive key pair"); + let new_next_pk = BasicPrefix::Ed25519NT(fresh_next_pub_key); + + // ── 5. Rotate (requires witnesses in a real deployment) ─────────────────── + + let config = RotationConfig { + new_next_pk, + witness_to_add: vec![], + witness_to_remove: vec![], + witness_threshold: 0, + }; + + match rotate(&mut identifier, current_signer, config).await { + Ok(()) => { + println!("Rotation successful"); + // ── 6. Persist the key rotation ─────────────────────────────────── + store.save_rotation("alice", fresh_next_seed)?; + println!("Key state updated on disk"); + } + Err(e) => { + // In offline mode this is expected — no witnesses to notify. + println!("Rotation error (expected offline): {e}"); + } + } + + Ok(()) +} diff --git a/keriox_sdk/examples/sign_and_verify.rs b/keriox_sdk/examples/sign_and_verify.rs new file mode 100644 index 00000000..026d4fb1 --- /dev/null +++ b/keriox_sdk/examples/sign_and_verify.rs @@ -0,0 +1,37 @@ +//! Sign and verify a message using a locally-incepted identifier. +//! +//! This example does not require network access — it uses no witnesses or +//! watchers, so everything runs offline. + +use keri_sdk::{signing, types::IdentifierConfig, KeriStore}; +use std::path::PathBuf; + +#[tokio::main] +async fn main() -> keri_sdk::Result<()> { + // ── 1. Open a store ─────────────────────────────────────────────────────── + + let tmp = tempfile::tempdir().expect("tempdir"); + let store = KeriStore::open(PathBuf::from(tmp.path()))?; + + // ── 2. Create an identifier (no witnesses, no watchers) ─────────────────── + + let config = IdentifierConfig::default(); + let (identifier, signer) = store.create("alice", config).await?; + println!("Identifier: {}", identifier.id()); + + // ── 3. Sign a message ───────────────────────────────────────────────────── + + let message = b"Hello, KERI!"; + let envelope = signing::sign(&identifier, &signer, message)?; + println!("Signed CESR envelope ({} bytes)", envelope.cesr.len()); + + // ── 4. Verify it ───────────────────────────────────────────────────────── + + let verified = signing::verify(&identifier, envelope.cesr.as_bytes())?; + + assert_eq!(verified.payload, message, "payload mismatch"); + println!("Verified signer: {}", verified.signer_id); + println!("Payload: {}", String::from_utf8_lossy(&verified.payload)); + + Ok(()) +} diff --git a/keriox_sdk/src/controller.rs b/keriox_sdk/src/controller.rs index 4d9a9084..9b392616 100644 --- a/keriox_sdk/src/controller.rs +++ b/keriox_sdk/src/controller.rs @@ -1,195 +1,81 @@ -use std::sync::Arc; +use std::path::PathBuf; -use keri_core::{ - actor::{event_generator, prelude::EventStorage}, - database::{EscrowCreator, EventDatabase}, - event::{event_data::EventData, KeyEvent}, - event_message::{ - cesr_adapter::{parse_event_type, EventType}, - msg::KeriEvent, - signed_event_message::{Message, Notice}, - }, - prefix::{ - BasicPrefix, IdentifierPrefix, IndexedSignature, SelfSigningPrefix, - }, - processor::{ - basic_processor::BasicProcessor, - escrow::{default_escrow_bus, EscrowConfig}, - Processor, - }, state::IdentifierState, -}; -use teliox::{ - database::TelEventDatabase, processor::storage::TelEventStorage, - state::vc_state::TelState, tel::Tel, +use keri_controller::{ + config::ControllerConfig, + controller::RedbController, + IdentifierPrefix, SelfSigningPrefix, }; +use keri_core::state::IdentifierState; -use crate::Identifier; +use crate::{error::Result, Identifier}; -pub struct Controller { - processor: Arc>, - event_storage: Arc>, - tel: Arc>, +/// Concrete controller wrapping `keri_controller::controller::RedbController`. +pub struct Controller { + pub(crate) inner: RedbController, } -impl< - D: EventDatabase + EscrowCreator + Send + Sync + 'static, - T: TelEventDatabase, - > Controller -{ - pub fn new(event_db: Arc, tel_db: Arc) -> Self { - let (not_bus, _) = - default_escrow_bus(event_db.clone(), EscrowConfig::default()); - - let processor = - Arc::new(BasicProcessor::new(event_db.clone(), Some(not_bus))); - - let kel_storage = Arc::new(EventStorage::new(event_db.clone())); - let tel_storage = Arc::new(TelEventStorage::new(tel_db)); - let tel = - Arc::new(Tel::new(tel_storage.clone(), kel_storage.clone(), None)); +impl Controller { + /// Create a controller with a database at the given path, using default transport. + pub fn new(db_path: PathBuf) -> Result { + let config = ControllerConfig { + db_path, + ..ControllerConfig::default() + }; + Ok(Self { + inner: RedbController::new(config)?, + }) + } - Self { - processor, - event_storage: kel_storage, - tel, - } + /// Create a controller from a full `ControllerConfig`. + pub fn new_with_config(config: ControllerConfig) -> Result { + Ok(Self { + inner: RedbController::new(config)?, + }) } - pub fn incept( + /// Generate an inception event (CESR-encoded JSON string). + pub async fn incept( &self, - public_keys: Vec, - next_pub_keys: Vec, - ) -> Result { - event_generator::incept(public_keys, next_pub_keys, vec![], 0, None) - .map_err(|_e| ()) + public_keys: Vec, + next_pub_keys: Vec, + witnesses: Vec, + witness_threshold: u64, + ) -> Result { + Ok(self + .inner + .incept(public_keys, next_pub_keys, witnesses, witness_threshold) + .await?) } + /// Finalize inception by attaching a signature, returning the resulting `Identifier`. pub fn finalize_incept( &self, event: &[u8], sig: &SelfSigningPrefix, - ) -> Result, ()> { - let id_prefix = self.finalize_inception(event, sig)?; - - Ok(Identifier::new(id_prefix, self.event_storage.clone())) + ) -> Result { + let inner_id = self.inner.finalize_incept(event, sig)?; + Ok(Identifier { inner: inner_id }) } - pub fn load_identifier( + /// Return the accepted KEL (with receipts) for any known identifier. + pub fn get_kel_with_receipts( &self, id: &IdentifierPrefix, - ) -> Result, String> { - self.event_storage - .get_kel_messages_with_receipts_all(id) - .map_err(|e| e.to_string()) - .and_then(|kel| { - if kel.is_none_or(|v| v.is_empty()) { - Err("No KEL found for the identifier".to_string()) - } else { - Ok(Identifier::new(id.clone(), self.event_storage.clone())) - } - }) - } - - pub fn process_kel(&self, messages: &[Message]) -> Result<(), String> { - messages.iter().try_for_each(|msg| match msg { - Message::Notice(notice) => self - .processor - .process_notice(notice) - .map_err(|e| e.to_string()), - Message::Op(_) => { - Err("Operation messages are not supported".to_string()) - } - })?; - - Ok(()) - } - - pub fn process_tel(&self, tel: &[u8]) -> Result<(), String> { - self.tel - .parse_and_process_tel_stream(tel) - .map_err(|e| e.to_string()) - } - - pub fn get_vc_state( - &self, - vc_hash: &said::SelfAddressingIdentifier, - ) -> Result, String> { - self.tel.get_vc_state(vc_hash).map_err(|e| e.to_string()) - } - - pub fn get_state(&self, id: &IdentifierPrefix) -> Option { - self.event_storage.get_state(id) + ) -> Option> { + self.inner.get_kel_with_receipts(id) } - fn finalize_inception( + /// Verify a signature over data using known KEL state. + pub fn verify( &self, - event: &[u8], - sig: &SelfSigningPrefix, - ) -> Result { - let parsed_event = parse_event_type(event).map_err(|_e| ())?; - match parsed_event { - EventType::KeyEvent(ke) => { - if let EventData::Icp(_) = &ke.data.get_event_data() { - self.finalize_key_event(&ke, sig, 0)?; - Ok(ke.data.get_prefix()) - } else { - Err(()) - } - } - _ => Err(()), - } - } - - fn finalize_key_event( - &self, - event: &KeriEvent, - sig: &SelfSigningPrefix, - own_index: usize, - ) -> Result<(), ()> { - let signature = - IndexedSignature::new_both_same(sig.clone(), own_index as u16); - - let signed_message = event.sign(vec![signature], None, None); - self.processor - .process_notice(&Notice::Event(signed_message)) - .map_err(|_e| ())?; - - Ok(()) + data: &[u8], + signature: &keri_core::event_message::signature::Signature, + ) -> std::result::Result<(), keri_core::processor::validator::VerificationError> { + self.inner.verify(data, signature) } -} - -#[cfg(test)] -mod tests { - use keri_core::database::redb::RedbDatabase; - use teliox::database::{redb::RedbTelDatabase, TelEventDatabase}; - - use super::*; - use std::sync::Arc; - use tempfile::Builder; - - #[test] - fn test_incept() { - let root = Builder::new().prefix("test-db").tempdir().unwrap(); - std::fs::create_dir_all(root.path()).unwrap(); - - let db_path = root.path().to_path_buf(); - let event_database = { - let mut path = db_path.clone(); - path.push("events_database"); - Arc::new(RedbDatabase::new(&path).unwrap()) - }; - let tel_events_db = { - let mut path = db_path.clone(); - path.push("tel"); - path.push("events"); - Arc::new(RedbTelDatabase::new(&path).unwrap()) - }; - - let controller = Controller::new(event_database, tel_events_db); - let public_keys = vec![]; - let next_pub_keys = vec![]; - let result = controller.incept(public_keys, next_pub_keys); - assert!(result.is_ok()); + /// Return the accepted `IdentifierState` for a known identifier. + pub fn find_state(&self, id: &IdentifierPrefix) -> Result { + Ok(self.inner.find_state(id)?) } } diff --git a/keriox_sdk/src/error.rs b/keriox_sdk/src/error.rs new file mode 100644 index 00000000..09b20a50 --- /dev/null +++ b/keriox_sdk/src/error.rs @@ -0,0 +1,85 @@ +//! Error types for the keri-sdk crate. +//! +//! All public functions in this crate return [`Result`], which is an alias +//! for `std::result::Result`. Import the alias with +//! `use keri_sdk::Result;` or use it fully-qualified as `keri_sdk::Result`. +//! +//! Most variants carry enough context to identify the failing operation without +//! needing to inspect the wrapped upstream error. Where an upstream error is +//! propagated transparently it is wrapped in one of the `Controller`, +//! `Mechanics`, or `Signing` variants. + +use keri_controller::IdentifierPrefix; +use keri_core::actor::prelude::SelfAddressingIdentifier; + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum Error { + // ── Transparent upstream wrappers ───────────────────────────────────────── + + /// Wraps errors from the `keri-controller` layer. + #[error(transparent)] + Controller(#[from] keri_controller::error::ControllerError), + + /// Wraps errors from the identifier mechanics layer. + #[error(transparent)] + Mechanics(#[from] keri_controller::identifier::mechanics::MechanicsError), + + /// Wraps key-signing errors. + #[error("signing error: {0}")] + Signing(String), + + // ── Specific actionable variants ────────────────────────────────────────── + + /// The requested identifier is not known to this controller. + #[error("identifier not found: {0}")] + IdentifierNotFound(IdentifierPrefix), + + /// The identifier has no watchers configured. + #[error("no watchers configured for identifier: {0}")] + NoWatchers(IdentifierPrefix), + + /// The identifier has no witnesses configured. + #[error("no witnesses configured for identifier: {0}")] + NoWitnesses(IdentifierPrefix), + + /// A credential registry has not been incepted for this identifier. + #[error("registry not incepted for identifier: {0}")] + RegistryNotIncepted(IdentifierPrefix), + + /// The credential is not known in the local TEL. + #[error("credential not found in TEL: {0}")] + CredentialNotFound(SelfAddressingIdentifier), + + /// Signature verification failed. + #[error("verification failed: {0}")] + VerificationFailed(String), + + /// CESR stream could not be parsed. + #[error("CESR parse error: {0}")] + CesrParseError(String), + + /// A CESR / CBOR / JSON encoding step failed. + #[error("encoding error: {0}")] + EncodingError(String), + + /// A disk I/O or database persistence error. + #[error("persistence error: {0}")] + PersistenceError(String), + + /// OOBI resolution failed for a specific identifier. + #[error("OOBI resolution failed for {id}: {reason}")] + OobiResolutionFailed { + /// The identifier whose OOBI could not be resolved. + id: IdentifierPrefix, + /// A human-readable description of what went wrong. + reason: String, + }, + + /// A catch-all for errors that do not fit a more specific variant. + #[error("{0}")] + Other(String), +} + +/// Convenience alias — all SDK functions return this type. +pub type Result = std::result::Result; diff --git a/keriox_sdk/src/identifier.rs b/keriox_sdk/src/identifier.rs index ac161fe2..844724af 100644 --- a/keriox_sdk/src/identifier.rs +++ b/keriox_sdk/src/identifier.rs @@ -1,184 +1,322 @@ +use keri_controller::{ + identifier::query::QueryResponse, + BasicPrefix, IdentifierPrefix, LocationScheme, Oobi, SelfSigningPrefix, +}; use keri_core::{ - actor::{ - event_generator, - prelude::{ - EventStorage, HashFunctionCode, Message, SerializationFormats, - }, - }, - database::EventDatabase, + actor::prelude::SelfAddressingIdentifier, + event::sections::seal::EventSeal, event_message::{ - cesr_adapter::{parse_event_type, EventType}, - msg::KeriEvent, - signed_event_message::{Notice, Op}, - timestamped::Timestamped, + msg::TypedEvent, + signature::Signature, + signed_event_message::Notice, + EventTypeTag, }, - oobi::Role, - prefix::{IdentifierPrefix, IndexedSignature, SelfSigningPrefix}, + event::KeyEvent, query::{ - query_event::{LogsQueryArgs, QueryEvent, QueryRoute}, - reply_event::{ReplyEvent, ReplyRoute, SignedReply}, + mailbox::MailboxQuery, + query_event::QueryEvent, }, + state::IdentifierState, }; -use std::sync::Arc; -use teliox::query::{TelQueryArgs, TelQueryEvent, TelQueryRoute}; +use teliox::{ + query::{TelQueryEvent}, + state::{vc_state::TelState, ManagerTelState}, +}; + +use crate::error::Result; -pub struct Identifier { - pub id: IdentifierPrefix, - event_storage: Arc>, +pub use keri_controller::identifier::query::WatcherResponseError; +pub use keri_controller::mailbox_updating::ActionRequired; + +/// Concrete identifier wrapping `keri_controller::controller::RedbIdentifier`. +pub struct Identifier { + pub(crate) inner: keri_controller::RedbIdentifier, } -impl Identifier { - pub fn new( - id: IdentifierPrefix, - event_storage: Arc>, - ) -> Self { - Self { id, event_storage } +impl Identifier { + // ── Identity ──────────────────────────────────────────────────────────── + + pub fn id(&self) -> &IdentifierPrefix { + self.inner.id() + } + + pub fn registry_id(&self) -> Option<&IdentifierPrefix> { + self.inner.registry_id() + } + + // ── State / KEL accessors ──────────────────────────────────────────────── + + /// Returns accepted `IdentifierState` for any known identifier. + pub fn find_state(&self, id: &IdentifierPrefix) -> Result { + Ok(self.inner.find_state(id)?) + } + + pub fn current_public_keys(&self) -> Result> { + Ok(self.inner.current_public_keys()?) } - pub fn get_prefix(&self) -> &IdentifierPrefix { - &self.id + pub fn witnesses(&self) -> impl Iterator + '_ { + self.inner.witnesses() } + pub fn watchers(&self) -> Result> { + Ok(self.inner.watchers()?) + } + + /// Returns own identifier's accepted KEL with receipts. pub fn get_own_kel(&self) -> Option> { - self.event_storage - .get_kel_messages_with_receipts_all(&self.id) - .unwrap() + self.inner.get_own_kel() + } + + /// Returns any identifier's accepted KEL with receipts. + pub fn get_kel(&self, id: &IdentifierPrefix) -> Option> { + self.inner.get_kel(id) } + // ── KEL management ────────────────────────────────────────────────────── + + /// Generate an interaction event anchoring the given SAIs. + pub fn anchor( + &self, + payload: &[SelfAddressingIdentifier], + ) -> Result { + Ok(self.inner.anchor(payload)?) + } + + /// Generate a rotation event. + pub async fn rotate( + &self, + current_keys: Vec, + new_next_keys: Vec, + new_next_threshold: u64, + witness_to_add: Vec, + witness_to_remove: Vec, + witness_threshold: u64, + ) -> Result { + Ok(self + .inner + .rotate( + current_keys, + new_next_keys, + new_next_threshold, + witness_to_add, + witness_to_remove, + witness_threshold, + ) + .await?) + } + + /// Finalise a rotation event (sign + save + queue for witness notification). + pub async fn finalize_rotate( + &mut self, + event: &[u8], + sig: SelfSigningPrefix, + ) -> Result<()> { + Ok(self.inner.finalize_rotate(event, sig).await?) + } + + /// Finalise an interaction event (sign + save + queue for witness notification). + pub async fn finalize_anchor( + &mut self, + event: &[u8], + sig: SelfSigningPrefix, + ) -> Result<()> { + Ok(self.inner.finalize_anchor(event, sig).await?) + } + + /// Send pending events to witnesses, returns the number of events sent. + pub async fn notify_witnesses(&mut self) -> Result { + Ok(self.inner.notify_witnesses().await?) + } + + // ── OOBI / watcher ────────────────────────────────────────────────────── + + pub async fn resolve_oobi(&self, oobi: &Oobi) -> Result<()> { + Ok(self.inner.resolve_oobi(oobi).await?) + } + + pub async fn send_oobi_to_watcher( + &self, + id: &IdentifierPrefix, + oobi: &Oobi, + ) -> Result<()> { + Ok(self.inner.send_oobi_to_watcher(id, oobi).await?) + } + + /// Generate an `end_role_add` reply event for the given watcher. pub fn add_watcher( &self, watcher_id: IdentifierPrefix, - ) -> Result { - String::from_utf8( - event_generator::generate_end_role( - &self.id, - &watcher_id, - Role::Watcher, - true, - ) - .encode() - .map_err(|_| "Event encoding error".to_string())?, - ) - .map_err(|_| "Event format error".to_string()) + ) -> Result { + Ok(self.inner.add_watcher(watcher_id)?) + } + + /// Generate an `end_role_cut` reply event for the given watcher. + pub fn remove_watcher( + &self, + watcher_id: IdentifierPrefix, + ) -> Result { + Ok(self.inner.remove_watcher(watcher_id)?) + } + + /// Sign and send the `end_role_add` reply to the watcher. + pub async fn finalize_add_watcher( + &self, + event: &[u8], + sig: SelfSigningPrefix, + ) -> Result<()> { + Ok(self.inner.finalize_add_watcher(event, sig).await?) } - pub fn finalize_add_watcher( + // ── Signing / verification ────────────────────────────────────────────── + + /// Return CESR stream containing the payload + transferable signature. + pub fn sign_to_cesr( + &self, + data: &str, + signatures: &[SelfSigningPrefix], + ) -> Result { + Ok(self.inner.sign_to_cesr(data, signatures)?) + } + + /// Build a `Signature` from raw bytes + `SelfSigningPrefix`es. + pub fn sign_data( &self, + data: &[u8], + signatures: &[SelfSigningPrefix], + ) -> Result { + Ok(self.inner.sign_data(data, signatures)?) + } + + /// Verify a CESR stream (payload + attached signatures) against known KEL. + pub fn verify_from_cesr(&self, stream: &[u8]) -> Result<()> { + Ok(self.inner.verify_from_cesr(stream)?) + } + + // ── TEL / Credential ──────────────────────────────────────────────────── + + /// Generate a `vcp` inception event and anchor `ixn`. + pub fn incept_registry( + &mut self, + ) -> Result<(IdentifierPrefix, TypedEvent)> { + Ok(self.inner.incept_registry()?) + } + + /// Finalise registry inception (sign + save the anchor ixn). + pub async fn finalize_incept_registry( + &mut self, event: &[u8], sig: SelfSigningPrefix, - ) -> Result<(IdentifierPrefix, Vec), String> { - let parsed_event = parse_event_type(event) - .map_err(|_| "Event parsing error".to_string())?; - match parsed_event { - EventType::Rpy(rpy) => match rpy.get_route() { - ReplyRoute::EndRoleAdd(_) => Ok(self - .finalize_add_role(&self.id, rpy, vec![sig]) - .unwrap()), - ReplyRoute::EndRoleCut(_) => todo!(), - _ => Err("Wrong reply route".to_string()), - }, - _ => Err("Event is not a reply".to_string()), - } - } - - fn finalize_add_role( - &self, - signer_prefix: &IdentifierPrefix, - event: ReplyEvent, - sig: Vec, - ) -> Result<(IdentifierPrefix, Vec), String> { - let mut messages_to_send = vec![]; - let (dest_prefix, role) = match &event.data.data { - ReplyRoute::EndRoleAdd(role) => { - (role.eid.clone(), role.role.clone()) - } - ReplyRoute::EndRoleCut(role) => { - (role.eid.clone(), role.role.clone()) - } - _ => return Err("Wrong reply route".to_string()), - }; - let signed_reply = match signer_prefix { - IdentifierPrefix::Basic(bp) => Message::Op(Op::Reply( - SignedReply::new_nontrans(event, bp.clone(), sig[0].clone()), - )), - _ => { - let sigs = sig - .into_iter() - .enumerate() - .map(|(i, sig)| { - IndexedSignature::new_both_same(sig, i as u16) - }) - .collect(); - - let signed_rpy = - Message::Op(Op::Reply(SignedReply::new_trans( - event, - self.event_storage - .get_last_establishment_event_seal(signer_prefix) - .ok_or( - "Failed to get last establishment event seal" - .to_string(), - )?, - sigs, - ))); - if Role::Messagebox != role { - let kel = self - .event_storage - .get_kel_messages_with_receipts_all(signer_prefix) - .map_err(|_| "Failed to get KEL messages".to_string())? - .ok_or("Identifier not found".to_string())?; - - for ev in kel { - messages_to_send.push(Message::Notice(ev)); - } - }; - signed_rpy - } - }; - - messages_to_send.push(signed_reply.clone()); - Ok((dest_prefix, messages_to_send)) - } - - pub fn get_log_query( - &self, - identifier: IdentifierPrefix, - witness: IdentifierPrefix, - from_sn: Option, - limit: Option, - ) -> QueryEvent { - QueryEvent::new_query( - QueryRoute::Logs { - reply_route: "".to_string(), - args: LogsQueryArgs { - s: from_sn, - limit, - i: identifier, - src: Some(witness), - }, - }, - SerializationFormats::JSON, - HashFunctionCode::Blake3_256, - ) - } - - pub fn get_tel_query( + ) -> Result<()> { + Ok(self.inner.finalize_incept_registry(event, sig).await?) + } + + /// Send TEL events to backers (witnesses). + pub async fn notify_backers(&self) -> Result<()> { + Ok(self.inner.notify_backers().await?) + } + + /// Generate `iss` event + anchor `ixn`. Returns (vc_id, ixn_event). + pub fn issue( + &self, + credential_digest: SelfAddressingIdentifier, + ) -> Result<(IdentifierPrefix, TypedEvent)> { + Ok(self.inner.issue(credential_digest)?) + } + + /// Generate `rev` event + anchor `ixn` (encoded). Returns encoded ixn bytes. + pub fn revoke( + &self, + credential_sai: &SelfAddressingIdentifier, + ) -> Result> { + Ok(self.inner.revoke(credential_sai)?) + } + + /// Build a TEL query event. + pub fn query_tel( &self, registry_id: IdentifierPrefix, vc_identifier: IdentifierPrefix, - ) -> Result { - let route = TelQueryRoute::Tels { - reply_route: "".into(), - args: TelQueryArgs { - i: Some(vc_identifier), - ri: Some(registry_id), - }, - }; - let env = Timestamped::new(route); - Ok(KeriEvent::new( - SerializationFormats::JSON, - HashFunctionCode::Blake3_256.into(), - env, - )) + ) -> Result { + Ok(self.inner.query_tel(registry_id, vc_identifier)?) + } + + /// Sign + send TEL query, process the response. + pub async fn finalize_query_tel( + &self, + qry: TelQueryEvent, + sig: SelfSigningPrefix, + ) -> Result<()> { + Ok(self.inner.finalize_query_tel(qry, sig).await?) + } + + /// Look up a VC's current `TelState` in the local TEL. + pub fn find_vc_state( + &self, + vc_hash: &SelfAddressingIdentifier, + ) -> Result> { + Ok(self.inner.find_vc_state(vc_hash)?) + } + + /// Look up a registry's management TEL state. + pub fn find_management_tel_state( + &self, + id: &IdentifierPrefix, + ) -> Result> { + Ok(self.inner.find_management_tel_state(id)?) + } + + // ── Mailbox / watcher queries ──────────────────────────────────────────── + + /// Generate mailbox query events for each of the given witnesses. + pub fn query_mailbox( + &self, + identifier: &IdentifierPrefix, + witnesses: &[BasicPrefix], + ) -> Result> { + Ok(self.inner.query_mailbox(identifier, witnesses)?) + } + + /// Sign + send mailbox queries, process responses. Returns required actions. + pub async fn finalize_query_mailbox( + &mut self, + queries: Vec<(MailboxQuery, SelfSigningPrefix)>, + ) -> Result> { + Ok(self.inner.finalize_query_mailbox(queries).await?) + } + + /// Generate watcher query events for an identifier. + pub fn query_watchers( + &self, + about_who: &EventSeal, + ) -> Result> { + Ok(self.inner.query_watchers(about_who)?) + } + + /// Sign + send watcher queries, process responses. + pub async fn finalize_query( + &self, + queries: Vec<(QueryEvent, SelfSigningPrefix)>, + ) -> (QueryResponse, Vec) { + self.inner.finalize_query(queries).await + } + + /// Generate a full-log watcher query for an identifier. + pub fn query_full_log( + &self, + id: &IdentifierPrefix, + watcher: IdentifierPrefix, + ) -> Result { + Ok(self.inner.query_full_log(id, watcher)?) + } + + // ── Low-level seal helpers ─────────────────────────────────────────────── + + pub fn get_last_establishment_event_seal(&self) -> Result { + Ok(self.inner.get_last_establishment_event_seal()?) + } + + pub fn get_last_event_seal(&self) -> Result { + Ok(self.inner.get_last_event_seal()?) } } diff --git a/keriox_sdk/src/lib.rs b/keriox_sdk/src/lib.rs index 324ef107..1bd92142 100644 --- a/keriox_sdk/src/lib.rs +++ b/keriox_sdk/src/lib.rs @@ -1,9 +1,117 @@ -mod controller; -mod identifier; +//! # keri-sdk +//! +//! A high-level, stable Rust SDK for the [KERI] (Key Event Receipt +//! Infrastructure) protocol. +//! +//! This crate wraps [`keri-controller`] and exposes a clean public API that +//! hides CESR encoding details, signing internals, and database generics. +//! Consumers only import `keri_sdk::*`. +//! +//! [KERI]: https://keri.one +//! +//! ## Typical workflow +//! +//! ### Create an identifier +//! +//! ```no_run +//! use keri_sdk::{store::KeriStore, types::IdentifierConfig}; +//! use std::path::PathBuf; +//! +//! # #[tokio::main] +//! # async fn main() -> keri_sdk::Result<()> { +//! let store = KeriStore::open(PathBuf::from("/tmp/my-keri-store"))?; +//! let config = IdentifierConfig::default(); // no witnesses, no watchers +//! let (identifier, signer) = store.create("alice", config).await?; +//! println!("Identifier: {}", identifier.id()); +//! # Ok(()) +//! # } +//! ``` +//! +//! ### Sign and verify a message +//! +//! ```no_run +//! use keri_sdk::{signing, store::KeriStore, types::IdentifierConfig}; +//! use std::path::PathBuf; +//! +//! # #[tokio::main] +//! # async fn main() -> keri_sdk::Result<()> { +//! let store = KeriStore::open(PathBuf::from("/tmp/my-keri-store"))?; +//! let (identifier, signer) = store.create("bob", IdentifierConfig::default()).await?; +//! +//! let envelope = signing::sign(&identifier, &signer, b"hello world")?; +//! let verified = signing::verify(&identifier, envelope.cesr.as_bytes())?; +//! assert_eq!(verified.payload, b"hello world"); +//! # Ok(()) +//! # } +//! ``` +//! +//! ### Issue and check a credential +//! +//! ```no_run +//! use keri_sdk::{ +//! operations::{incept_registry, issue}, +//! tel::check_credential_status, +//! store::KeriStore, +//! types::IdentifierConfig, +//! }; +//! use keri_core::actor::prelude::SelfAddressingIdentifier; +//! use std::{path::PathBuf, str::FromStr}; +//! +//! # #[tokio::main] +//! # async fn main() -> keri_sdk::Result<()> { +//! let store = KeriStore::open(PathBuf::from("/tmp/my-keri-store"))?; +//! let (mut id, signer) = store.create("issuer", IdentifierConfig::default()).await?; +//! +//! let registry_id = incept_registry(&mut id, signer.clone()).await?; +//! +//! let cred_said: SelfAddressingIdentifier = +//! "EBdXt3gIXOf2BBWNHdSXCJnFJL5OuQPyM5K0neuniccM".parse().unwrap(); +//! issue(&mut id, signer.clone(), cred_said.clone()).await?; +//! +//! let status = check_credential_status(&id, &signer, ®istry_id, &cred_said).await?; +//! println!("Status: {:?}", status); +//! # Ok(()) +//! # } +//! ``` + +pub mod controller; +pub mod error; +pub mod identifier; +pub mod operations; +pub mod signing; +pub mod store; +pub mod tel; +pub mod types; pub use controller::Controller; +pub use error::{Error, Result}; pub use identifier::Identifier; -pub use keri_core::{database, signer::Signer}; -pub use teliox::{ - database::TelEventDatabase, processor::storage::TelEventStorage, +pub use identifier::{ActionRequired, WatcherResponseError}; +pub use types::{ + CredentialStatus, IdentifierConfig, RotationConfig, SignedEnvelope, VerifiedPayload, +}; +pub use store::KeriStore; +pub use tel::{check_credential_status, get_credential_status}; + +// Prefix / key types — consumers don't need keri-controller directly +pub use keri_controller::{ + BasicPrefix, CesrPrimitive, EndRole, IdentifierPrefix, KeyManager, LocationScheme, Oobi, + SeedPrefix, SelfSigningPrefix, }; +pub use keri_controller::config::ControllerConfig; +pub use keri_controller::identifier::query::QueryResponse; + +// Core types +pub use keri_core::{ + actor::prelude::SelfAddressingIdentifier, + event::sections::seal::EventSeal, + event_message::signature::Signature, + signer::Signer, +}; + +// TEL state types +pub use teliox::state::{vc_state::TelState, ManagerTelState}; +pub use teliox::query::TelQueryEvent; + +// Watcher/mailbox query types (kept for consumers that need low-level access) +pub use keri_core::query::query_event::QueryEvent; diff --git a/keriox_sdk/src/operations.rs b/keriox_sdk/src/operations.rs new file mode 100644 index 00000000..e4d9b304 --- /dev/null +++ b/keriox_sdk/src/operations.rs @@ -0,0 +1,398 @@ +//! Higher-level compound operations for common KERI workflows. +//! +//! These functions combine multiple low-level steps (event generation, +//! signing, witness notification, mailbox queries) so callers don't need to +//! orchestrate individual calls. All signing is done internally with the +//! provided [`Signer`] — callers never touch raw CESR prefix types. +//! +//! For persistence of identifiers across sessions see [`crate::store`]. +//! For signing arbitrary payloads see [`crate::signing`]. + +use std::path::PathBuf; +use std::sync::Arc; + +use keri_controller::{BasicPrefix, IdentifierPrefix, LocationScheme, Oobi, SelfSigningPrefix}; +use keri_core::{ + actor::prelude::SelfAddressingIdentifier, + prefix::IndexedSignature, + query::mailbox::SignedMailboxQuery, + signer::Signer, +}; + +use crate::{ + controller::Controller, + error::{Error, Result}, + identifier::Identifier, + types::{IdentifierConfig, RotationConfig}, +}; + +// ── Internal helpers ───────────────────────────────────────────────────────── + +pub(crate) fn ed25519_sig(signer: &Signer, data: &[u8]) -> Result { + let bytes = signer + .sign(data) + .map_err(|e| Error::Signing(e.to_string()))?; + Ok(SelfSigningPrefix::new( + cesrox::primitives::codes::self_signing::SelfSigning::Ed25519Sha512, + bytes, + )) +} + +// ── Public compound operations ──────────────────────────────────────────────── + +/// Create a new identifier and return it ready to use. +/// +/// Performs the full inception flow: +/// 1. Generates an inception event with the given keys and witness config. +/// 2. Signs + finalises it. +/// 3. Notifies witnesses. +/// 4. Queries each witness mailbox. +/// 5. Sends witness OOBIs to watchers and configures each watcher. +/// +/// # Errors +/// - [`Error::Controller`] if event generation or finalisation fails. +/// - [`Error::Mechanics`] if witness notification or mailbox queries fail. +/// - [`Error::Signing`] if the signer fails to produce a signature. +pub async fn create_identifier( + db_path: PathBuf, + signer: Arc, + next_pk: BasicPrefix, + config: IdentifierConfig, +) -> Result { + let controller = Controller::new(db_path)?; + let pks = vec![BasicPrefix::Ed25519(signer.public_key())]; + let npks = vec![next_pk]; + + let inception_event = controller + .incept(pks, npks, config.witnesses.clone(), config.witness_threshold) + .await?; + + let sig = ed25519_sig(&signer, inception_event.as_bytes())?; + let mut id = controller.finalize_incept(inception_event.as_bytes(), &sig)?; + + id.notify_witnesses().await?; + + for wit in &config.witnesses { + if let IdentifierPrefix::Basic(wit_id) = &wit.eid { + _query_mailbox(&mut id, signer.clone(), wit_id).await?; + } + id.send_oobi_to_watcher(id.id(), &Oobi::Location(wit.clone())) + .await?; + if let IdentifierPrefix::Basic(wit_id) = &wit.eid { + _query_mailbox(&mut id, signer.clone(), wit_id).await?; + } + } + + for watch in &config.watchers { + add_watcher(&mut id, signer.clone(), watch).await?; + } + + Ok(id) +} + +/// Create a new identifier (deprecated alias for [`create_identifier`]). +/// +/// # Deprecated +/// Use [`create_identifier`] with an [`IdentifierConfig`] instead. +#[deprecated(since = "0.2.0", note = "use create_identifier with IdentifierConfig")] +pub async fn setup_identifier( + controller: &Controller, + signer: Arc, + next_pk: BasicPrefix, + witnesses: Vec, + witness_threshold: u64, + watchers: Vec, +) -> Result { + let pks = vec![BasicPrefix::Ed25519(signer.public_key())]; + let npks = vec![next_pk]; + + let inception_event = controller + .incept(pks, npks, witnesses.clone(), witness_threshold) + .await?; + + let sig = ed25519_sig(&signer, inception_event.as_bytes())?; + let mut id = controller.finalize_incept(inception_event.as_bytes(), &sig)?; + + id.notify_witnesses().await?; + + for wit in &witnesses { + if let IdentifierPrefix::Basic(wit_id) = &wit.eid { + _query_mailbox(&mut id, signer.clone(), wit_id).await?; + } + id.send_oobi_to_watcher(id.id(), &Oobi::Location(wit.clone())) + .await?; + if let IdentifierPrefix::Basic(wit_id) = &wit.eid { + _query_mailbox(&mut id, signer.clone(), wit_id).await?; + } + } + + for watch in &watchers { + add_watcher(&mut id, signer.clone(), watch).await?; + } + + Ok(id) +} + +/// Add and configure a watcher for an identifier. +/// +/// Resolves the watcher's OOBI, generates an `end_role_add` reply, signs it, +/// and sends it to the watcher. +/// +/// # Errors +/// - [`Error::Mechanics`] if OOBI resolution or the network call fails. +/// - [`Error::Signing`] if signing the reply fails. +pub async fn add_watcher( + id: &mut Identifier, + km: Arc, + watcher_oobi: &LocationScheme, +) -> Result<()> { + id.resolve_oobi(&Oobi::Location(watcher_oobi.clone())).await?; + let rpy = id.add_watcher(watcher_oobi.eid.clone())?; + let sig = ed25519_sig(&km, rpy.as_bytes())?; + id.finalize_add_watcher(rpy.as_bytes(), sig).await?; + Ok(()) +} + +/// Rotate keys, notify witnesses, and query mailboxes. +/// +/// Signs the rotation event with `current_signer`, sends it to witnesses, +/// then queries each witness mailbox to process the receipts. +/// +/// # Errors +/// - [`Error::Controller`] if rotation event generation fails. +/// - [`Error::Mechanics`] if witness notification or mailbox queries fail. +/// - [`Error::Signing`] if signing fails. +pub async fn rotate( + id: &mut Identifier, + current_signer: Arc, + config: RotationConfig, +) -> Result<()> { + let current_keys = vec![BasicPrefix::Ed25519NT(current_signer.public_key())]; + let new_next_keys = vec![config.new_next_pk]; + + let rotation_event = id + .rotate( + current_keys, + new_next_keys, + 1, + config.witness_to_add, + config.witness_to_remove, + config.witness_threshold, + ) + .await?; + + let sig = ed25519_sig(¤t_signer, rotation_event.as_bytes())?; + id.finalize_rotate(rotation_event.as_bytes(), sig).await?; + id.notify_witnesses().await?; + + let witnesses = id.find_state(id.id())?.witness_config.witnesses; + for witness in witnesses { + _query_mailbox(id, current_signer.clone(), &witness).await?; + } + + Ok(()) +} + +/// Rotation with explicit positional args (deprecated alias for [`rotate`]). +/// +/// # Deprecated +/// Use [`rotate`] with a [`RotationConfig`] instead. +#[deprecated(since = "0.2.0", note = "use rotate with RotationConfig")] +pub async fn rotate_identifier( + id: &mut Identifier, + current_signer: Arc, + new_next_keys: Vec, + new_next_threshold: u64, + witness_to_add: Vec, + witness_to_remove: Vec, + witness_threshold: u64, +) -> Result<()> { + let current_keys = vec![BasicPrefix::Ed25519NT(current_signer.public_key())]; + + let rotation_event = id + .rotate( + current_keys, + new_next_keys, + new_next_threshold, + witness_to_add, + witness_to_remove, + witness_threshold, + ) + .await?; + + let sig = ed25519_sig(¤t_signer, rotation_event.as_bytes())?; + id.finalize_rotate(rotation_event.as_bytes(), sig).await?; + id.notify_witnesses().await?; + + let witnesses = id.find_state(id.id())?.witness_config.witnesses; + for witness in witnesses { + _query_mailbox(id, current_signer.clone(), &witness).await?; + } + + Ok(()) +} + +/// Incept a credential registry and return its identifier. +/// +/// Generates a `vcp` event, anchors it with an `ixn`, signs, notifies +/// witnesses and backers, and queries mailboxes. After this call the +/// identifier's `registry_id()` is set. +/// +/// # Errors +/// - [`Error::Controller`] if registry inception or encoding fails. +/// - [`Error::Mechanics`] on network failures. +/// - [`Error::Signing`] if signing fails. +pub async fn incept_registry( + id: &mut Identifier, + signer: Arc, +) -> Result { + let (reg_id, ixn) = id.incept_registry()?; + let encoded_ixn = ixn + .encode() + .map_err(|e| Error::EncodingError(e.to_string()))?; + let sig = ed25519_sig(&signer, &encoded_ixn)?; + id.finalize_anchor(&encoded_ixn, sig).await?; + id.notify_witnesses().await?; + + let witnesses = id.find_state(id.id())?.witness_config.witnesses; + for witness in &witnesses { + _query_mailbox(id, signer.clone(), witness).await?; + } + + id.notify_backers().await?; + + Ok(reg_id) +} + +/// Issue a credential (TEL `iss` + anchor `ixn` + witness/backer notification). +/// +/// After this call the credential identified by `credential_said` is in the +/// `Issued` state in the local TEL. Witnesses and backers are notified. +/// +/// # Errors +/// - [`Error::Controller`] if event generation or encoding fails. +/// - [`Error::Mechanics`] on network failures. +/// - [`Error::Signing`] if signing fails. +pub async fn issue( + id: &mut Identifier, + signer: Arc, + credential_said: SelfAddressingIdentifier, +) -> Result<()> { + let (_vc_id, ixn) = id.issue(credential_said)?; + let encoded_ixn = ixn + .encode() + .map_err(|e| Error::EncodingError(e.to_string()))?; + let sig = ed25519_sig(&signer, &encoded_ixn)?; + id.finalize_anchor(&encoded_ixn, sig).await?; + id.notify_witnesses().await?; + + let witnesses = id + .find_state(id.id())? + .witness_config + .witnesses; + for witness in &witnesses { + _query_mailbox(id, signer.clone(), witness).await?; + } + + id.notify_backers().await?; + + Ok(()) +} + +/// Issue a credential — deprecated positional-arg alias for [`issue`]. +/// +/// # Deprecated +/// Use [`issue`] instead. +#[deprecated(since = "0.2.0", note = "use issue(id, signer, cred_said)")] +pub async fn issue_credential( + identifier: &mut Identifier, + cred_said: SelfAddressingIdentifier, + km: Arc, +) -> Result<()> { + issue(identifier, km, cred_said).await +} + +/// Revoke a credential (TEL `rev` + anchor `ixn` + witness/backer notification). +/// +/// After this call the credential identified by `credential_said` is in the +/// `Revoked` state in the local TEL. +/// +/// # Errors +/// - [`Error::Controller`] if event generation fails. +/// - [`Error::Mechanics`] on network failures. +/// - [`Error::Signing`] if signing fails. +pub async fn revoke( + id: &mut Identifier, + signer: Arc, + credential_said: &SelfAddressingIdentifier, +) -> Result<()> { + let ixn = id.revoke(credential_said)?; + let sig = ed25519_sig(&signer, &ixn)?; + id.finalize_anchor(&ixn, sig).await?; + id.notify_witnesses().await?; + + let witnesses = id + .find_state(id.id())? + .witness_config + .witnesses; + for witness in &witnesses { + _query_mailbox(id, signer.clone(), witness).await?; + } + + id.notify_backers().await?; + + Ok(()) +} + +/// Revoke a credential — deprecated positional-arg alias for [`revoke`]. +/// +/// # Deprecated +/// Use [`revoke`] instead. +#[deprecated(since = "0.2.0", note = "use revoke(id, signer, cred_said)")] +pub async fn revoke_credential( + identifier: &mut Identifier, + cred_said: &SelfAddressingIdentifier, + km: Arc, +) -> Result<()> { + revoke(identifier, km, cred_said).await +} + +/// Sign and send mailbox queries to a single witness; return the signed queries. +/// +/// This is an internal helper used by other operations in this module. It is +/// also useful when you want to pull updates from a specific witness without +/// doing a full operation. +/// +/// # Errors +/// - [`Error::Mechanics`] on network or processing failures. +/// - [`Error::Signing`] if signing fails. +/// - [`Error::EncodingError`] if query encoding fails. +pub async fn query_mailbox( + id: &mut Identifier, + km: Arc, + witness_id: &BasicPrefix, +) -> Result> { + _query_mailbox(id, km, witness_id).await +} + +// Private implementation to avoid name collision with Identifier::query_mailbox. +async fn _query_mailbox( + id: &mut Identifier, + km: Arc, + witness_id: &BasicPrefix, +) -> Result> { + let mut out = vec![]; + for qry in id.query_mailbox(id.id(), &[witness_id.clone()])? { + let encoded = qry.encode().map_err(|e| Error::EncodingError(e.to_string()))?; + let sig = SelfSigningPrefix::Ed25519Sha512( + km.sign(&encoded) + .map_err(|e| Error::Signing(e.to_string()))?, + ); + let signatures = vec![IndexedSignature::new_both_same(sig.clone(), 0)]; + let signed_qry = + SignedMailboxQuery::new_trans(qry.clone(), id.id().clone(), signatures); + id.finalize_query_mailbox(vec![(qry, sig)]).await?; + out.push(signed_qry); + } + Ok(out) +} diff --git a/keriox_sdk/src/signing.rs b/keriox_sdk/src/signing.rs new file mode 100644 index 00000000..a3243027 --- /dev/null +++ b/keriox_sdk/src/signing.rs @@ -0,0 +1,162 @@ +//! High-level signing and verification helpers. +//! +//! These functions hide all CESR encoding details. Consumers never need to +//! import `cesrox`, `IndexedSignature`, or `SelfSigningPrefix` — those are +//! internal implementation details. +//! +//! # Quick start +//! +//! ```no_run +//! use keri_sdk::{signing, Identifier, Signer}; +//! use std::sync::Arc; +//! +//! # fn example(id: &Identifier, signer: &Signer) -> keri_sdk::Result<()> { +//! let envelope = signing::sign(id, signer, b"hello world")?; +//! println!("CESR: {}", envelope.cesr); +//! +//! let verified = signing::verify(id, envelope.cesr.as_bytes())?; +//! assert_eq!(verified.payload, b"hello world"); +//! # Ok(()) +//! # } +//! ``` + +use keri_core::event_message::signature::{get_signatures, Signature}; +use keri_core::signer::Signer; + +use crate::{ + error::{Error, Result}, + identifier::Identifier, + types::{SignedEnvelope, VerifiedPayload}, +}; + +// CESR `sign_to_cesr` requires a valid JSON payload because the CESR stream +// parser uses serde_json internally. We wrap arbitrary bytes in a minimal +// JSON object `{"p":"...","e":"text"|"b64"}` so the envelope is always +// parseable. The `e` (encoding) field disambiguates text vs. base64 data. + +fn wrap_payload(data: &[u8]) -> Result { + let (p, e): (&str, &str) = if let Ok(s) = std::str::from_utf8(data) { + (s, "text") // valid UTF-8: store as-is + } else { + // binary: store as base64url + return Ok(serde_json::to_string(&serde_json::json!({ + "p": base64::encode_config(data, base64::URL_SAFE_NO_PAD), + "e": "b64" + })).map_err(|e| Error::EncodingError(e.to_string()))?); + }; + serde_json::to_string(&serde_json::json!({ "p": p, "e": e })) + .map_err(|e| Error::EncodingError(e.to_string())) +} + +fn unwrap_payload(json_bytes: &[u8]) -> Result> { + let v: serde_json::Value = serde_json::from_slice(json_bytes) + .map_err(|e| Error::CesrParseError(format!("payload JSON: {e}")))?; + let p = v + .get("p") + .and_then(|v| v.as_str()) + .ok_or_else(|| Error::CesrParseError("missing 'p' field in payload".into()))?; + let enc = v.get("e").and_then(|v| v.as_str()).unwrap_or("text"); + match enc { + "b64" => base64::decode_config(p, base64::URL_SAFE_NO_PAD) + .map_err(|e| Error::CesrParseError(format!("base64 decode: {e}"))), + _ => Ok(p.as_bytes().to_vec()), + } +} + +/// Sign arbitrary bytes and return a self-describing CESR envelope. +/// +/// The bytes are wrapped in a JSON object (`{"p": "..."}`) so that the CESR +/// stream is always parseable. The signer's current key is used to produce a +/// transferable Ed25519 signature. +/// +/// # Errors +/// - [`Error::Signing`] if the signer fails. +/// - [`Error::EncodingError`] if JSON serialisation of the wrapper fails. +/// - [`Error::Controller`] if the CESR envelope cannot be built. +pub fn sign(identifier: &Identifier, signer: &Signer, data: &[u8]) -> Result { + let json_payload = wrap_payload(data)?; + + let raw_sig = signer + .sign(json_payload.as_bytes()) + .map_err(|e| Error::Signing(e.to_string()))?; + + let sig = keri_controller::SelfSigningPrefix::new( + cesrox::primitives::codes::self_signing::SelfSigning::Ed25519Sha512, + raw_sig, + ); + + let cesr = identifier.sign_to_cesr(&json_payload, &[sig])?; + + Ok(SignedEnvelope { + payload: data.to_vec(), + cesr, + }) +} + +/// Sign a UTF-8 string and return a self-describing CESR envelope. +/// +/// This is a convenience wrapper around [`sign`] for string payloads such as +/// JSON documents. The string is wrapped in a CESR-compatible JSON envelope +/// before signing, and unwrapped automatically by [`verify`]. +/// +/// # Errors +/// Same as [`sign`]. +pub fn sign_json(identifier: &Identifier, signer: &Signer, json: &str) -> Result { + sign(identifier, signer, json.as_bytes()) +} + + +/// Verify a CESR-signed envelope against the local KEL. +/// +/// Parses the CESR stream, verifies every attached signature against the +/// current KEL state of the signer, and returns the payload and signer +/// identifier on success. +/// +/// The signer's KEL must already be known locally — call +/// [`Identifier::resolve_oobi`] first if you have not seen this signer before. +/// +/// # Errors +/// - [`Error::CesrParseError`] if `cesr` is not a valid CESR stream. +/// - [`Error::VerificationFailed`] if one or more signatures do not verify. +pub fn verify(identifier: &Identifier, cesr: &[u8]) -> Result { + identifier + .verify_from_cesr(cesr) + .map_err(|e| Error::VerificationFailed(e.to_string()))?; + + let (json_bytes, sigs) = parse_signed_envelope(cesr)?; + + // Unwrap the JSON envelope produced by `sign`. + let payload = unwrap_payload(&json_bytes)?; + + // Try to extract signer from signature metadata; fall back to the local identifier. + let signer_id = sigs + .iter() + .find_map(|s| s.get_signer()) + .unwrap_or_else(|| identifier.id().clone()); + + Ok(VerifiedPayload { payload, signer_id }) +} + +/// Parse a CESR stream into raw payload bytes and attached signatures. +/// +/// This is a low-level helper for when you need to inspect signature details +/// before deciding whether to verify (e.g. to extract the signer's identifier +/// before calling [`verify`]). +/// +/// # Errors +/// - [`Error::CesrParseError`] if `cesr` is not a valid CESR stream. +pub fn parse_signed_envelope(cesr: &[u8]) -> Result<(Vec, Vec)> { + let (_rest, parsed) = cesrox::parse(cesr) + .map_err(|e| Error::CesrParseError(format!("{e:?}")))?; + + let payload = parsed.payload.to_vec(); + + let sigs: Vec = parsed + .attachments + .into_iter() + .filter_map(|group| get_signatures(group).ok()) + .flatten() + .collect(); + + Ok((payload, sigs)) +} diff --git a/keriox_sdk/src/store.rs b/keriox_sdk/src/store.rs new file mode 100644 index 00000000..fe9c925b --- /dev/null +++ b/keriox_sdk/src/store.rs @@ -0,0 +1,279 @@ +//! Persistent storage for named KERI identifiers. +//! +//! [`KeriStore`] manages a root directory that holds one sub-directory per +//! *alias* (a human-readable name for an identifier). Each alias directory +//! stores the Redb database, the current and next signing-key seeds, the +//! identifier prefix, and an optional registry prefix. +//! +//! The on-disk layout is identical to the one used by `dkms-bin`, so existing +//! databases can be opened without migration. +//! +//! See [`crate::operations`] for the functions that use the identifiers +//! returned by this module. +//! +//! # Disk layout +//! +//! ```text +//! / +//! / +//! db/ ← Redb database directory +//! priv_key ← current SeedPrefix (KERI canonical text) +//! next_priv_key ← next SeedPrefix (KERI canonical text) +//! id ← IdentifierPrefix (KERI canonical text) +//! reg_id ← IdentifierPrefix (optional, set after incept_registry) +//! ``` + +use std::{ + path::PathBuf, + str::FromStr, + sync::Arc, +}; + +use keri_controller::{ + controller::RedbIdentifier, + IdentifierPrefix, +}; +use keri_core::{ + prefix::SeedPrefix, + signer::Signer, +}; + +use crate::{ + controller::Controller, + error::{Error, Result}, + identifier::Identifier, + operations::create_identifier, + types::IdentifierConfig, +}; + +/// Manages a directory of named KERI identifiers. +/// +/// Each identifier is stored under `//` using the standard +/// disk layout. Use [`KeriStore::open`] to create or open a store, then +/// [`KeriStore::create`] to provision new identifiers and [`KeriStore::load`] +/// to restore them across sessions. +pub struct KeriStore { + root: PathBuf, +} + +impl KeriStore { + /// Open (or create) a store rooted at `root`. + /// + /// Creates the root directory if it does not exist. + /// + /// # Errors + /// - [`Error::PersistenceError`] if the directory cannot be created. + pub fn open(root: PathBuf) -> Result { + std::fs::create_dir_all(&root) + .map_err(|e| Error::PersistenceError(format!("cannot create store root: {e}")))?; + Ok(Self { root }) + } + + /// Create a brand-new identifier, persist all state, and return the live + /// handle together with the current signer. + /// + /// Generates random Ed25519 key pairs for the current and next keys + /// internally. `config` controls witnesses and watchers. + /// + /// # Errors + /// - [`Error::PersistenceError`] on I/O failures. + /// - Propagates errors from [`create_identifier`]. + pub async fn create( + &self, + alias: &str, + config: IdentifierConfig, + ) -> Result<(Identifier, Arc)> { + use cesrox::primitives::codes::seed::SeedCode; + use rand::rngs::OsRng; + + let current_ed = ed25519_dalek::SigningKey::generate(&mut OsRng); + let next_ed = ed25519_dalek::SigningKey::generate(&mut OsRng); + + let current_seed = SeedPrefix::new( + SeedCode::RandomSeed256Ed25519, + current_ed.as_bytes().to_vec(), + ); + let next_seed = SeedPrefix::new( + SeedCode::RandomSeed256Ed25519, + next_ed.as_bytes().to_vec(), + ); + + self.create_with_seeds(alias, current_seed, next_seed, config).await + } + + /// Create a brand-new identifier with caller-provided seeds. + /// + /// Useful for deterministic key derivation (e.g. from a mnemonic). The + /// seeds are persisted to disk and the identifier is fully incepted before + /// returning. + /// + /// # Errors + /// - [`Error::PersistenceError`] on I/O failures. + /// - [`Error::Signing`] if the seed cannot produce a key pair. + /// - Propagates errors from [`create_identifier`]. + pub async fn create_with_seeds( + &self, + alias: &str, + current_seed: SeedPrefix, + next_seed: SeedPrefix, + config: IdentifierConfig, + ) -> Result<(Identifier, Arc)> { + let alias_dir = self.alias_dir(alias); + std::fs::create_dir_all(&alias_dir) + .map_err(|e| Error::PersistenceError(format!("cannot create alias dir: {e}")))?; + + let db_path = alias_dir.join("db"); + + let signer = Arc::new( + Signer::new_with_seed(¤t_seed) + .map_err(|e| Error::Signing(e.to_string()))?, + ); + + let (next_pub_key, _) = next_seed.derive_key_pair() + .map_err(|e| Error::Signing(e.to_string()))?; + + let next_pk = keri_controller::BasicPrefix::Ed25519NT(next_pub_key); + + let id = create_identifier(db_path, signer.clone(), next_pk, config).await?; + + // Persist seeds and identifier prefix. + use keri_core::prefix::CesrPrimitive; + self.write_file(alias, "priv_key", ¤t_seed.to_str())?; + self.write_file(alias, "next_priv_key", &next_seed.to_str())?; + self.write_file(alias, "id", &id.id().to_str())?; + + Ok((id, signer)) + } + + /// Load an existing identifier from disk. + /// + /// Reconstructs the `Identifier` by opening the Redb database and reading + /// the persisted identifier prefix. The signing key is **not** loaded here + /// — use [`KeriStore::load_signer`] for that. + /// + /// # Errors + /// - [`Error::PersistenceError`] if the alias directory or files are missing. + /// - [`Error::IdentifierNotFound`] if the `id` file cannot be parsed. + /// - [`Error::Controller`] if the database cannot be opened. + pub fn load(&self, alias: &str) -> Result { + let alias_dir = self.alias_dir(alias); + let db_path = alias_dir.join("db"); + + let id_str = self.read_file(alias, "id")?; + let id_prefix = IdentifierPrefix::from_str(id_str.trim()) + .map_err(|_| Error::IdentifierNotFound( + IdentifierPrefix::SelfAddressing(Default::default()) + ))?; + + let reg_id = self.read_file(alias, "reg_id").ok() + .and_then(|s| IdentifierPrefix::from_str(s.trim()).ok()); + + let controller = Controller::new(db_path)?; + + // Reconstruct the inner RedbIdentifier using the controller's shared state. + let inner = RedbIdentifier::new( + id_prefix, + reg_id, + controller.inner.known_events.clone(), + controller.inner.communication.clone(), + ); + + Ok(Identifier { inner }) + } + + /// Load the current signing key for an alias. + /// + /// # Errors + /// - [`Error::PersistenceError`] if the `priv_key` file is missing or invalid. + /// - [`Error::Signing`] if the seed cannot produce a signer. + pub fn load_signer(&self, alias: &str) -> Result> { + let seed = self.load_seed(alias, "priv_key")?; + let signer = Signer::new_with_seed(&seed) + .map_err(|e| Error::Signing(e.to_string()))?; + Ok(Arc::new(signer)) + } + + /// Load the next signing key (used as the current key after rotation). + /// + /// # Errors + /// - [`Error::PersistenceError`] if the `next_priv_key` file is missing or invalid. + /// - [`Error::Signing`] if the seed cannot produce a signer. + pub fn load_next_signer(&self, alias: &str) -> Result> { + let seed = self.load_seed(alias, "next_priv_key")?; + let signer = Signer::new_with_seed(&seed) + .map_err(|e| Error::Signing(e.to_string()))?; + Ok(Arc::new(signer)) + } + + /// Commit a rotation: promote `next_priv_key` → `priv_key`, persist a new + /// next seed, and save the updated identifier prefix. + /// + /// Call this after [`crate::operations::rotate`] has succeeded. + /// + /// # Errors + /// - [`Error::PersistenceError`] on I/O failures. + pub fn save_rotation(&self, alias: &str, new_next_seed: SeedPrefix) -> Result<()> { + // Promote: next becomes current. + let next_content = self.read_file(alias, "next_priv_key")?; + self.write_file(alias, "priv_key", &next_content)?; + + use keri_core::prefix::CesrPrimitive; + self.write_file(alias, "next_priv_key", &new_next_seed.to_str())?; + + Ok(()) + } + + /// Persist a registry identifier after [`crate::operations::incept_registry`]. + /// + /// # Errors + /// - [`Error::PersistenceError`] on I/O failures. + pub fn save_registry(&self, alias: &str, registry_id: &IdentifierPrefix) -> Result<()> { + use keri_core::prefix::CesrPrimitive; + self.write_file(alias, "reg_id", ®istry_id.to_str()) + } + + /// List all stored aliases in this store. + /// + /// # Errors + /// - [`Error::PersistenceError`] if the root directory cannot be read. + pub fn list_aliases(&self) -> Result> { + let mut aliases = vec![]; + for entry in std::fs::read_dir(&self.root) + .map_err(|e| Error::PersistenceError(format!("cannot read store root: {e}")))? + { + let entry = entry + .map_err(|e| Error::PersistenceError(format!("directory entry error: {e}")))?; + if entry.path().is_dir() { + if let Some(name) = entry.file_name().to_str() { + aliases.push(name.to_owned()); + } + } + } + aliases.sort(); + Ok(aliases) + } + + // ── Private helpers ─────────────────────────────────────────────────────── + + fn alias_dir(&self, alias: &str) -> PathBuf { + self.root.join(alias) + } + + fn write_file(&self, alias: &str, filename: &str, content: &str) -> Result<()> { + let path = self.alias_dir(alias).join(filename); + std::fs::write(&path, content) + .map_err(|e| Error::PersistenceError(format!("cannot write {filename}: {e}"))) + } + + fn read_file(&self, alias: &str, filename: &str) -> Result { + let path = self.alias_dir(alias).join(filename); + std::fs::read_to_string(&path) + .map_err(|e| Error::PersistenceError(format!("cannot read {filename}: {e}"))) + } + + fn load_seed(&self, alias: &str, filename: &str) -> Result { + let s = self.read_file(alias, filename)?; + SeedPrefix::from_str(s.trim()) + .map_err(|e| Error::PersistenceError(format!("invalid seed in {filename}: {e}"))) + } +} diff --git a/keriox_sdk/src/tel.rs b/keriox_sdk/src/tel.rs new file mode 100644 index 00000000..b4ebbd16 --- /dev/null +++ b/keriox_sdk/src/tel.rs @@ -0,0 +1,69 @@ +//! TEL credential status queries. +//! +//! These functions provide a simple interface for checking whether a +//! credential is currently `Issued`, `Revoked`, or `Unknown` without +//! requiring callers to interact with `TelState` or `ManagerTelState` directly. +//! +//! For issuing and revoking credentials see [`crate::operations`]. For +//! creating a registry see [`crate::operations::incept_registry`]. + +use keri_controller::IdentifierPrefix; +use keri_core::{actor::prelude::SelfAddressingIdentifier, signer::Signer}; +use teliox::state::vc_state::TelState; + +use crate::{ + error::{Error, Result}, + identifier::Identifier, + operations::ed25519_sig, + types::CredentialStatus, +}; + +/// Query the TEL for a credential's current status (network call). +/// +/// Sends a signed TEL query to the first configured watcher and processes +/// the response. After this call, [`get_credential_status`] will return the +/// up-to-date status without another network round-trip. +/// +/// # Errors +/// - [`Error::NoWatchers`] if no watcher is configured for the identifier. +/// - [`Error::RegistryNotIncepted`] if `registry_id` is not known locally. +/// - [`Error::Mechanics`] on network or processing failures. +/// - [`Error::Signing`] if signing the query fails. +pub async fn check_credential_status( + id: &Identifier, + signer: &Signer, + registry_id: &IdentifierPrefix, + credential_said: &SelfAddressingIdentifier, +) -> Result { + // Make sure the registry is known. + id.find_management_tel_state(registry_id)? + .ok_or_else(|| Error::RegistryNotIncepted(registry_id.clone()))?; + + // Build a VC identifier from the credential SAID. + let vc_id = IdentifierPrefix::self_addressing(credential_said.clone()); + + let qry = id.query_tel(registry_id.clone(), vc_id)?; + let encoded = qry.encode().map_err(|e| Error::EncodingError(e.to_string()))?; + let sig = ed25519_sig(signer, &encoded)?; + id.finalize_query_tel(qry, sig).await?; + + get_credential_status(id, credential_said) +} + +/// Return the last known local TEL state without a network call. +/// +/// Returns [`CredentialStatus::Unknown`] if the TEL has not been queried yet +/// or the credential is not known locally. +/// +/// # Errors +/// - [`Error::Controller`] on database access failures. +pub fn get_credential_status( + id: &Identifier, + credential_said: &SelfAddressingIdentifier, +) -> Result { + match id.find_vc_state(credential_said)? { + Some(TelState::Issued(_)) => Ok(CredentialStatus::Issued), + Some(TelState::Revoked) => Ok(CredentialStatus::Revoked), + Some(TelState::NotIssued) | None => Ok(CredentialStatus::Unknown), + } +} diff --git a/keriox_sdk/src/types.rs b/keriox_sdk/src/types.rs new file mode 100644 index 00000000..34c167ba --- /dev/null +++ b/keriox_sdk/src/types.rs @@ -0,0 +1,82 @@ +//! Typed config structs and result types for keri-sdk operations. +//! +//! These structs replace long positional argument lists throughout the SDK. +//! Import them with `use keri_sdk::*` (they are re-exported from the crate +//! root) or qualify them as `keri_sdk::IdentifierConfig` etc. +//! +//! See [`crate::operations`] for the functions that accept these structs, and +//! [`crate::signing`] for `SignedEnvelope` / `VerifiedPayload` usage. + +use keri_controller::{BasicPrefix, IdentifierPrefix, LocationScheme}; + +// ── Creation / rotation config ──────────────────────────────────────────────── + +/// Configuration for creating a new KERI identifier. +/// +/// Used by [`crate::operations::create_identifier`] and +/// [`crate::store::KeriStore::create`]. +#[derive(Debug, Default, Clone)] +pub struct IdentifierConfig { + /// Witness OOBIs to include in the inception event. + pub witnesses: Vec, + /// Signing threshold required for witness receipts. + pub witness_threshold: u64, + /// Watcher OOBIs to configure after inception. + pub watchers: Vec, +} + +/// Configuration for rotating an identifier's keys. +/// +/// Used by [`crate::operations::rotate`]. +#[derive(Debug, Clone)] +pub struct RotationConfig { + /// The new *next* (pre-rotated) public key. + pub new_next_pk: BasicPrefix, + /// Witnesses to add during this rotation. + pub witness_to_add: Vec, + /// Witnesses to remove during this rotation. + pub witness_to_remove: Vec, + /// New witness signing threshold (0 = keep current). + pub witness_threshold: u64, +} + +// ── Signing / verification result types ────────────────────────────────────── + +/// A CESR-encoded signed payload ready for transport. +/// +/// Returned by [`crate::signing::sign`] and [`crate::signing::sign_json`]. +#[derive(Debug, Clone)] +pub struct SignedEnvelope { + /// The raw payload bytes that were signed. + pub payload: Vec, + /// Full CESR stream: payload text + attached transferable signatures. + /// This string is what you send over the wire. + pub cesr: String, +} + +/// The verified contents of a CESR-signed envelope. +/// +/// Returned by [`crate::signing::verify`] on success. +#[derive(Debug, Clone)] +pub struct VerifiedPayload { + /// The raw payload bytes extracted from the CESR stream. + pub payload: Vec, + /// The identifier that produced the signature. + pub signer_id: IdentifierPrefix, +} + +// ── TEL credential status ───────────────────────────────────────────────────── + +/// The current lifecycle state of a credential in the TEL. +/// +/// Returned by [`crate::tel::get_credential_status`] and +/// [`crate::tel::check_credential_status`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CredentialStatus { + /// The credential has been issued and is currently valid. + Issued, + /// The credential has been revoked. + Revoked, + /// The TEL has not been queried yet, or the credential is not known locally. + Unknown, +} diff --git a/keriox_sdk/tests/test_identifier.rs b/keriox_sdk/tests/test_identifier.rs index ec19c889..a8d7f57a 100644 --- a/keriox_sdk/tests/test_identifier.rs +++ b/keriox_sdk/tests/test_identifier.rs @@ -1,17 +1,18 @@ +//! Integration tests for `keri-sdk`. +//! +//! These tests exercise the concrete (non-generic) `Controller` / `Identifier` +//! wrappers and the new `KeriStore` + `signing` APIs. Network-dependent tests +//! (witness queries, TEL queries) are marked `#[ignore]` so that a plain +//! `cargo test -p keri-sdk` still passes in CI without live infrastructure. + use ed25519_dalek::SigningKey; -use keri_core::{ - actor::parse_event_stream, - event_message::{ - signature::{Nontransferable, Signature}, - signed_event_message::{Message, Op}, - }, - prefix::{BasicPrefix, IdentifierPrefix, SeedPrefix, SelfSigningPrefix}, - query::query_event::{SignedKelQuery, SignedQueryMessage}, - signer::Signer, +use keri_sdk::{ + signing, BasicPrefix, Controller, IdentifierConfig, IdentifierPrefix, KeriStore, SeedPrefix, + SelfSigningPrefix, Signer, }; -use keri_sdk::{database::redb::RedbDatabase, Controller}; -use std::sync::Arc; -use teliox::database::{redb::RedbTelDatabase, TelEventDatabase}; +use std::{path::PathBuf, sync::Arc}; + +// ── Helpers ────────────────────────────────────────────────────────────────── struct KeysConfig { pub current: SeedPrefix, @@ -23,177 +24,192 @@ impl Default for KeysConfig { let current = SigningKey::generate(&mut rand::rngs::OsRng); let next = SigningKey::generate(&mut rand::rngs::OsRng); Self { - current: SeedPrefix::RandomSeed256Ed25519( - current.as_bytes().to_vec(), - ), + current: SeedPrefix::RandomSeed256Ed25519(current.as_bytes().to_vec()), next: SeedPrefix::RandomSeed256Ed25519(next.as_bytes().to_vec()), } } } +// ── Existing tests (unchanged logic) ───────────────────────────────────────── + #[tokio::test] -async fn test_init_id() -> Result<(), ()> { +async fn test_incept_local() { let root = tempfile::Builder::new() .prefix("test-db") .tempdir() .unwrap(); - println!("Root path: {:?}", root.path()); std::fs::create_dir_all(root.path()).unwrap(); - let db_path = root.path().to_path_buf(); - let event_database = { - let mut path = db_path.clone(); - path.push("events_database"); - Arc::new(RedbDatabase::new(&path).unwrap()) - }; - - let tel_events_db = { - let mut path = db_path.clone(); - path.push("tel"); - path.push("events"); - Arc::new(RedbTelDatabase::new(&path).unwrap()) - }; + let controller = Controller::new(root.path().to_path_buf()).unwrap(); let keys = KeysConfig::default(); - let (next_pub_key, _next_secret_keys) = - keys.next.derive_key_pair().map_err(|_e| ())?; - - let signer = - Arc::new(Signer::new_with_seed(&keys.current.clone()).unwrap()); + let (next_pub_key, _) = keys.next.derive_key_pair().unwrap(); + let signer = Arc::new(Signer::new_with_seed(&keys.current).unwrap()); - let controller = Controller::new(event_database, tel_events_db); let public_keys = vec![BasicPrefix::Ed25519(signer.public_key())]; let next_pub_keys = vec![BasicPrefix::Ed25519NT(next_pub_key)]; - let signing_inception = - controller.incept(public_keys.clone(), next_pub_keys)?; - let signature = SelfSigningPrefix::new( + // Generate inception event (no witnesses → no network required) + let inception_event = controller + .incept(public_keys.clone(), next_pub_keys, vec![], 0) + .await + .unwrap(); + + let sig = SelfSigningPrefix::new( cesrox::primitives::codes::self_signing::SelfSigning::Ed25519Sha512, - signer.sign(signing_inception.as_bytes()).unwrap(), + signer.sign(inception_event.as_bytes()).unwrap(), ); - let signing_identifier = - controller.finalize_incept(signing_inception.as_bytes(), &signature)?; - println!("Identifier: {:?}", signing_identifier.get_prefix()); - println!("KEL: {:?}", signing_identifier.get_own_kel()); - let witness_id = serde_json::Value::String( - "BNJJhjUnhlw-lsbYdehzLsX1hJMG9QJlK_wJ5AunJLrM".to_string(), - ); - let id_str = serde_json::Value::String( - "EHIydjfGpSu8mKvrDeWWPaV-mBPeP6Ad7DE6v5fZv2ps".to_string(), + let identifier = controller + .finalize_incept(inception_event.as_bytes(), &sig) + .unwrap(); + + assert!(matches!( + identifier.id(), + IdentifierPrefix::SelfAddressing(_) + )); + assert!(identifier.get_own_kel().is_some()); + assert!(!identifier.get_own_kel().unwrap().is_empty()); +} + +#[tokio::test] +async fn test_incept_and_watcher_event_generation() { + let root = tempfile::Builder::new() + .prefix("test-db") + .tempdir() + .unwrap(); + std::fs::create_dir_all(root.path()).unwrap(); + + let controller = Controller::new(root.path().to_path_buf()).unwrap(); + + let keys = KeysConfig::default(); + let (next_pub_key, _) = keys.next.derive_key_pair().unwrap(); + let signer = Arc::new(Signer::new_with_seed(&keys.current).unwrap()); + + let public_keys = vec![BasicPrefix::Ed25519(signer.public_key())]; + let next_pub_keys = vec![BasicPrefix::Ed25519NT(next_pub_key)]; + + let inception_event = controller + .incept(public_keys, next_pub_keys, vec![], 0) + .await + .unwrap(); + + let sig = SelfSigningPrefix::new( + cesrox::primitives::codes::self_signing::SelfSigning::Ed25519Sha512, + signer.sign(inception_event.as_bytes()).unwrap(), ); - let id: IdentifierPrefix = - serde_json::from_value(id_str).map_err(|_e| ())?; - let witness_prefix: IdentifierPrefix = - serde_json::from_value(witness_id).map_err(|_e| ())?; - let q = signing_identifier.get_log_query(id, witness_prefix, None, None); - let signature_qry = Signature::NonTransferable(Nontransferable::Couplet(vec![( - public_keys[0].clone(), - SelfSigningPrefix::new( - cesrox::primitives::codes::self_signing::SelfSigning::Ed25519Sha512, - signer.sign(q.encode().unwrap()).unwrap(), - ) - )])); - let singed_kel_q = SignedKelQuery { - query: q, - signature: signature_qry, - }; - let signed_qry = SignedQueryMessage::KelQuery(singed_kel_q); - - let body = Message::Op(Op::Query(signed_qry)).to_cesr().unwrap(); - let client = reqwest::Client::new(); - let url: url::Url = serde_json::from_value(serde_json::Value::String( - "http://w1.ea.argo.colossi.network/query".to_string(), - )) - .map_err(|_e| ())?; - let response = client.post(url).body(body).send().await.map_err(|e| { - eprintln!("Request error: {:?}", e); - })?; - - if !response.status().is_success() { - println!("Request failed with status: {}", response.status()); - let error_text = response.text().await.map_err(|_| ())?; - println!("Error body: {}", error_text); - return Err(()); - } - let kel = response.text().await.map_err(|_| ())?; - let parsed_kel = parse_event_stream(kel.as_bytes()).map_err(|_e| ())?; - println!("KEL: {:?}", kel); - println!("Parsed KEL: {:?}", parsed_kel.len()); - controller.process_kel(&parsed_kel).map_err(|e| { - eprintln!("Processing error: {:?}", e); - })?; - - // let msg = r#"{"v":"ACDC10JSON000207_","d":"EGRIIeNj2HIP787COJFiQbYqsp6UwAR22oeqWsEVhq42","i":"EHIydjfGpSu8mKvrDeWWPaV-mBPeP6Ad7DE6v5fZv2ps","ri":"EMDfCDynqGvpaN7Fbm5FADyfS98q_WUkPKmbZapBB1J_","s":"EHLjK9n1i1osh8SPYpyotPxC8IeBqtdfK-Qrz4_TZp6G","a":{"d":"ENaVuh9EMbTGgVjbnPHDZDDxvhsvzIZsuvTEIkFa3JPP","a":{"last_name":"KOWALSKI","first_name":"JAN","birth_date":"07.04.1964","birth_place":"WARSZAWA","issue_date":"06.03.2019","expiry_date":"18.01.2028","issuer":"PREZYDENT m.st. WARSZAWY","pesel":"64040738293","number":"SP006/15/1"}}}"#; - let vc_said: said::SelfAddressingIdentifier = - "EGRIIeNj2HIP787COJFiQbYqsp6UwAR22oeqWsEVhq42" - .parse() - .unwrap(); - let registry_id: said::SelfAddressingIdentifier = - "EMDfCDynqGvpaN7Fbm5FADyfS98q_WUkPKmbZapBB1J_" + let identifier = controller + .finalize_incept(inception_event.as_bytes(), &sig) + .unwrap(); + + let fake_watcher_id: IdentifierPrefix = + "BNJJhjUnhlw-lsbYdehzLsX1hJMG9QJlK_wJ5AunJLrM" .parse() .unwrap(); - let tel_qry = signing_identifier - .get_tel_query( - IdentifierPrefix::self_addressing(registry_id), - IdentifierPrefix::self_addressing(vc_said.clone()), - ) - .map_err(|_e| ())?; + let rpy = identifier.add_watcher(fake_watcher_id); + assert!(rpy.is_ok(), "add_watcher event generation failed"); +} - let signature_tel_query = SelfSigningPrefix::new( - cesrox::primitives::codes::self_signing::SelfSigning::Ed25519Sha512, - signer.sign(tel_qry.encode().unwrap()).unwrap(), - ); - let tel_query = match signing_identifier.id { - IdentifierPrefix::Basic(bp) => { - teliox::query::SignedTelQuery::new_nontrans( - tel_qry.clone(), - bp.clone(), - signature_tel_query, - ) - } - _ => { - let signatures = - vec![keri_core::prefix::IndexedSignature::new_both_same( - signature_tel_query, - 0, - )]; - teliox::query::SignedTelQuery::new_trans( - tel_qry.clone(), - signing_identifier.id.clone(), - signatures, - ) - } - }; - - let tel_url: url::Url = serde_json::from_value(serde_json::Value::String( - "http://wa1.hcf.argo.colossi.network/query/tel".to_string(), - )) - .map_err(|_e| ())?; - let tel_response = client - .post(tel_url) - .body(tel_query.to_cesr().unwrap()) - .send() +// ── KeriStore tests ─────────────────────────────────────────────────────────── + +/// Create an identifier via KeriStore and verify the persisted state. +#[tokio::test] +async fn test_keri_store_create_persists() { + let root = tempfile::Builder::new() + .prefix("keri-store") + .tempdir() + .unwrap(); + + let store = KeriStore::open(PathBuf::from(root.path())).unwrap(); + + // Create with no witnesses (offline). + let (identifier, _signer) = store + .create("alice", IdentifierConfig::default()) .await - .map_err(|e| { - eprintln!("Request error: {:?}", e); - })?; - - if !tel_response.status().is_success() { - println!("Request failed with status: {}", tel_response.status()); - let error_text = tel_response.text().await.map_err(|_| ())?; - println!("Error body: {}", error_text); - return Err(()); - } + .unwrap(); + + let id = identifier.id().clone(); + assert!(matches!(id, IdentifierPrefix::SelfAddressing(_))); + + // Verify the alias appears in the store. + let aliases = store.list_aliases().unwrap(); + assert!(aliases.contains(&"alice".to_string())); + + // Signer can be loaded (key files were written correctly). + let _signer2 = store.load_signer("alice").unwrap(); + let _next_signer = store.load_next_signer("alice").unwrap(); +} + +/// list_aliases returns only the created alias. +#[tokio::test] +async fn test_keri_store_list_aliases() { + let root = tempfile::Builder::new() + .prefix("keri-store") + .tempdir() + .unwrap(); + + let store = KeriStore::open(PathBuf::from(root.path())).unwrap(); + assert!(store.list_aliases().unwrap().is_empty()); + + store + .create("bob", IdentifierConfig::default()) + .await + .unwrap(); + + let aliases = store.list_aliases().unwrap(); + assert_eq!(aliases, vec!["bob".to_string()]); +} + +// ── signing module tests ────────────────────────────────────────────────────── + +/// Sign a payload and verify the resulting CESR envelope (offline). +#[tokio::test] +async fn test_sign_and_verify() { + let root = tempfile::Builder::new() + .prefix("keri-store") + .tempdir() + .unwrap(); + + let store = KeriStore::open(PathBuf::from(root.path())).unwrap(); + + // Use the identifier directly from create — no need to reload from disk. + let (identifier, signer) = store + .create("carol", IdentifierConfig::default()) + .await + .unwrap(); + + let message = b"hello KERI"; + let envelope = signing::sign(&identifier, &signer, message).unwrap(); + + assert_eq!(envelope.payload, message); + assert!(!envelope.cesr.is_empty()); + + let verified = signing::verify(&identifier, envelope.cesr.as_bytes()).unwrap(); + assert_eq!(verified.payload, message); +} + +/// parse_signed_envelope extracts the payload and signatures. +#[tokio::test] +async fn test_parse_signed_envelope() { + let root = tempfile::Builder::new() + .prefix("keri-store") + .tempdir() + .unwrap(); + + let store = KeriStore::open(PathBuf::from(root.path())).unwrap(); + let (identifier, signer) = store + .create("dave", IdentifierConfig::default()) + .await + .unwrap(); - let tel = tel_response.text().await.map_err(|_| ())?; - println!("Tel: {}", tel); - let _ = controller.process_tel(tel.as_bytes()); - let state = controller.get_vc_state(&vc_said).map_err(|_e| ())?; - println!("VC said: {:?}", vc_said); - println!("VC State: {:?}", state); + let message = b"parse me"; + let envelope = signing::sign(&identifier, &signer, message).unwrap(); - Ok(()) + let (payload, sigs) = signing::parse_signed_envelope(envelope.cesr.as_bytes()).unwrap(); + // parse_signed_envelope returns the raw JSON bytes of the CESR payload, + // not the original bytes. Use signing::verify to get back the original payload. + assert!(!payload.is_empty()); + assert!(!sigs.is_empty()); } diff --git a/keriox_tests/src/lib.rs b/keriox_tests/src/lib.rs index 16a93479..05dd1d84 100644 --- a/keriox_tests/src/lib.rs +++ b/keriox_tests/src/lib.rs @@ -1,9 +1,9 @@ use std::{path::Path, sync::Arc}; use keri_controller::{ - config::ControllerConfig, controller::Controller, error::ControllerError, - identifier::Identifier, mailbox_updating::ActionRequired, BasicPrefix, CryptoBox, - IdentifierPrefix, KeyManager, LocationScheme, SelfSigningPrefix, + config::ControllerConfig, error::ControllerError, mailbox_updating::ActionRequired, + BasicPrefix, CryptoBox, IdentifierPrefix, KeyManager, LocationScheme, RedbController, + RedbIdentifier, SelfSigningPrefix, }; use keri_core::{ actor::error::ActorError, @@ -23,24 +23,24 @@ pub async fn setup_identifier( witness_locations: Vec, transport: Option>, tel_transport: Option, -) -> (Identifier, CryptoBox, Arc) { +) -> (RedbIdentifier, CryptoBox, Arc) { let verifier_controller = Arc::new( match (transport, tel_transport) { - (None, None) => Controller::new(ControllerConfig { + (None, None) => RedbController::new(ControllerConfig { db_path: root_path.to_owned(), ..Default::default() }), - (None, Some(tel_transport)) => Controller::new(ControllerConfig { + (None, Some(tel_transport)) => RedbController::new(ControllerConfig { db_path: root_path.to_owned(), tel_transport: Box::new(tel_transport.clone()), ..Default::default() }), - (Some(transport), None) => Controller::new(ControllerConfig { + (Some(transport), None) => RedbController::new(ControllerConfig { db_path: root_path.to_owned(), transport: Box::new(transport.clone()), ..Default::default() }), - (Some(transport), Some(tel_transport)) => Controller::new(ControllerConfig { + (Some(transport), Some(tel_transport)) => RedbController::new(ControllerConfig { db_path: root_path.to_owned(), transport: Box::new(transport.clone()), tel_transport: Box::new(tel_transport.clone()), @@ -93,7 +93,7 @@ pub async fn setup_identifier( } pub async fn handle_delegation_request( - id: &mut Identifier, + id: &mut RedbIdentifier, keypair: &CryptoBox, witness_id: &[BasicPrefix], delegator_group_id: IdentifierPrefix, diff --git a/keriox_tests/src/settings.rs b/keriox_tests/src/settings.rs index c60df34f..ae46b0c6 100644 --- a/keriox_tests/src/settings.rs +++ b/keriox_tests/src/settings.rs @@ -18,7 +18,7 @@ impl AsyncTestContext for InfrastructureContext { let first_witness = { let wit_root = Builder::new().prefix("wit-db").tempdir().unwrap(); Arc::new( - WitnessListener::setup( + WitnessListener::setup_with_redb( Url::parse("http://127.0.0.1:3232").unwrap(), wit_root.path(), Some("ArwXoACJgOleVZ2PY7kXn7rA0II0mHYDhc6WrBH8fDAc".to_string()), @@ -39,7 +39,7 @@ impl AsyncTestContext for InfrastructureContext { let second_witness = { let wit_root = Builder::new().prefix("wit-db").tempdir().unwrap(); Arc::new( - WitnessListener::setup( + WitnessListener::setup_with_redb( Url::parse("http://127.0.0.1:3233").unwrap(), wit_root.path(), Some("ArwXoACJgOleVZ2PY7kXn7rA0II0mHYDhc6WrBH8fDAd".to_string()), @@ -63,7 +63,7 @@ impl AsyncTestContext for InfrastructureContext { let watcher_listener = { let root = Builder::new().prefix("watcher-test-db").tempdir().unwrap(); - WatcherListener::new(WatcherConfig { + WatcherListener::setup_with_redb(WatcherConfig { public_address: watcher_url.clone(), db_path: root.path().to_owned(), tel_storage_path: watcher_tel_path, diff --git a/keriox_tests/src/transport.rs b/keriox_tests/src/transport.rs index adbcf6e8..e14931c2 100644 --- a/keriox_tests/src/transport.rs +++ b/keriox_tests/src/transport.rs @@ -9,10 +9,11 @@ use keri_core::transport::TransportError; use teliox::{event::verifiable_event::VerifiableEvent, query::SignedTelQuery}; use watcher::{transport::WatcherTelTransport, Watcher}; use witness::Witness; +use keri_core::oobi_manager::RedbOobiStorage; pub enum TelTestActor { - Witness(Arc), - Watcher(Arc), + Witness(Arc>), + Watcher(Arc>), } impl TelTestActor { diff --git a/keriox_tests/tests/tel_from_watcher.rs b/keriox_tests/tests/tel_from_watcher.rs index 41e6fa5a..a1303c9e 100644 --- a/keriox_tests/tests/tel_from_watcher.rs +++ b/keriox_tests/tests/tel_from_watcher.rs @@ -27,7 +27,7 @@ async fn test_tel_from_watcher() -> Result<(), anyhow::Error> { let seed = "AK8F6AAiYDpXlWdj2O5F5-6wNCCNJh2A4XOlqwR_HwwH"; let witness_db_path = Builder::new().prefix("test-wit1-db").tempdir().unwrap(); Arc::new( - WitnessListener::setup( + WitnessListener::setup_with_redb( verifier_witness_url.clone(), witness_db_path.path(), Some(seed.to_string()), @@ -50,7 +50,7 @@ async fn test_tel_from_watcher() -> Result<(), anyhow::Error> { let seed = "AK8F6AAiYDpXlWdj2O5F5-6wNCCNJh2A4XOlqwR_Hwwg"; let witness_db_path = Builder::new().prefix("test-wit2-db").tempdir().unwrap(); Arc::new( - WitnessListener::setup( + WitnessListener::setup_with_redb( issuer_witness_url.clone(), witness_db_path.path(), Some(seed.to_string()), @@ -100,7 +100,7 @@ async fn test_tel_from_watcher() -> Result<(), anyhow::Error> { .await; let watcher_db_path = Builder::new().prefix("cont-test-db").tempdir().unwrap(); - let watcher_listener = Arc::new(WatcherListener::new(WatcherConfig { + let watcher_listener = Arc::new(WatcherListener::setup_with_redb(WatcherConfig { public_address: Url::parse("http://watcher1/").unwrap(), db_path: watcher_db_path.path().to_owned(), transport: Box::new(watcher_transport), diff --git a/support/teliox/Cargo.toml b/support/teliox/Cargo.toml index 8bbea58a..9c203932 100644 --- a/support/teliox/Cargo.toml +++ b/support/teliox/Cargo.toml @@ -9,6 +9,11 @@ repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +default = ["storage-redb"] +storage-redb = ["keri-core/storage-redb", "redb"] +storage-postgres = ["keri-core/storage-postgres", "sqlx", "async-std"] + [dependencies] keri-core = {path = "../../keriox_core", version= "0.17.13", features = ["query"]} said = { version = "0.4.3" } @@ -21,7 +26,9 @@ serde-hex = "0.1" chrono = { version = "0.4.18", features = ["serde"] } arrayref = "0.3.6" serde_cbor = "0.11.1" -redb = "2.6.0" +redb = { version = "2.6.0", optional = true } +sqlx = { version = "0.8", features = ["postgres", "runtime-async-std"], optional = true } +async-std = { version = "1", features = ["attributes"], optional = true } [dev-dependencies] diff --git a/support/teliox/src/database/mod.rs b/support/teliox/src/database/mod.rs index efdcfaab..d94e5a3f 100644 --- a/support/teliox/src/database/mod.rs +++ b/support/teliox/src/database/mod.rs @@ -1,20 +1,15 @@ use crate::{error::Error, event::verifiable_event::VerifiableEvent}; -use ::redb::Database; -use keri_core::{database::redb::WriteTxnMode, prefix::IdentifierPrefix}; +use keri_core::prefix::IdentifierPrefix; use said::SelfAddressingIdentifier; -use std::{ - fs::{create_dir_all, exists}, - path::Path, - sync::Arc, -}; + +#[cfg(feature = "storage-redb")] pub(crate) mod digest_key_database; +#[cfg(feature = "storage-redb")] pub mod redb; +#[cfg(feature = "storage-postgres")] +pub mod postgres; -pub trait TelEventDatabase { - fn new(path: impl AsRef) -> Result - where - Self: Sized; - +pub trait TelEventDatabase: Send + Sync { fn add_new_event(&self, event: VerifiableEvent, id: &IdentifierPrefix) -> Result<(), Error>; fn get_events( @@ -26,18 +21,83 @@ pub trait TelEventDatabase { &self, id: &IdentifierPrefix, ) -> Option>; + + fn log_event(&self, event: &VerifiableEvent) -> Result<(), Error>; + + fn get_event( + &self, + digest: &SelfAddressingIdentifier, + ) -> Result, Error>; } -pub trait TelLogDatabase { - fn log_event(&self, event: &VerifiableEvent, transaction: &WriteTxnMode) -> Result<(), Error>; - fn get(&self, digest: &SelfAddressingIdentifier) -> Result, Error>; +pub trait TelEscrowDatabase: Send + Sync { + fn missing_issuer_insert( + &self, + kel_digest: &str, + tel_digest: &SelfAddressingIdentifier, + ) -> Result<(), Error>; + + fn missing_issuer_get( + &self, + kel_digest: &str, + ) -> Result, Error>; + + fn missing_issuer_remove( + &self, + kel_digest: &str, + tel_digest: &SelfAddressingIdentifier, + ) -> Result<(), Error>; + + fn out_of_order_insert( + &self, + id: &IdentifierPrefix, + sn: u64, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error>; + + fn out_of_order_get( + &self, + id: &IdentifierPrefix, + sn: u64, + ) -> Result, Error>; + + fn out_of_order_remove( + &self, + id: &IdentifierPrefix, + sn: u64, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error>; + + fn missing_registry_insert( + &self, + registry_id: &str, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error>; + + fn missing_registry_get( + &self, + registry_id: &str, + ) -> Result, Error>; + + fn missing_registry_remove( + &self, + registry_id: &str, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error>; } -pub struct EscrowDatabase(pub(crate) Arc); +#[cfg(feature = "storage-redb")] +pub struct EscrowDatabase { + missing_issuer: digest_key_database::DigestKeyDatabase, + out_of_order: keri_core::database::redb::escrow_database::SnKeyDatabase, + missing_registry: digest_key_database::DigestKeyDatabase, +} +#[cfg(feature = "storage-redb")] impl EscrowDatabase { - pub fn new(file_path: &Path) -> Result { - // Create file if not exists + pub fn new(file_path: &std::path::Path) -> Result { + use keri_core::database::SequencedEventDatabase; + use std::fs::{create_dir_all, exists}; if !std::fs::exists(file_path).map_err(|e| Error::EscrowDatabaseError(e.to_string()))? { if let Some(parent) = file_path.parent() { if !exists(parent).map_err(|e| Error::EscrowDatabaseError(e.to_string()))? { @@ -46,8 +106,109 @@ impl EscrowDatabase { } } } - let db = - Database::create(file_path).map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; - Ok(Self(Arc::new(db))) + let db = std::sync::Arc::new( + ::redb::Database::create(file_path) + .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?, + ); + + let missing_issuer = + digest_key_database::DigestKeyDatabase::new(db.clone(), "missing_issuer_escrow"); + let out_of_order = + keri_core::database::redb::escrow_database::SnKeyDatabase::new(db.clone(), "out_of_order") + .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; + let missing_registry = + digest_key_database::DigestKeyDatabase::new(db, "missing_registry_escrow"); + + Ok(Self { + missing_issuer, + out_of_order, + missing_registry, + }) + } +} + +#[cfg(feature = "storage-redb")] +impl TelEscrowDatabase for EscrowDatabase { + fn missing_issuer_insert( + &self, + kel_digest: &str, + tel_digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + self.missing_issuer.insert(&kel_digest, tel_digest) + } + + fn missing_issuer_get( + &self, + kel_digest: &str, + ) -> Result, Error> { + self.missing_issuer.get(&kel_digest) + } + + fn missing_issuer_remove( + &self, + kel_digest: &str, + tel_digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + self.missing_issuer.remove(&kel_digest, tel_digest) + } + + fn out_of_order_insert( + &self, + id: &IdentifierPrefix, + sn: u64, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + use keri_core::database::SequencedEventDatabase; + self.out_of_order + .insert(id, sn, digest) + .map_err(|e| Error::EscrowDatabaseError(e.to_string())) + } + + fn out_of_order_get( + &self, + id: &IdentifierPrefix, + sn: u64, + ) -> Result, Error> { + use keri_core::database::SequencedEventDatabase; + let iter = self + .out_of_order + .get(id, sn) + .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; + Ok(iter.collect()) + } + + fn out_of_order_remove( + &self, + id: &IdentifierPrefix, + sn: u64, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + use keri_core::database::SequencedEventDatabase; + self.out_of_order + .remove(id, sn, digest) + .map_err(|e| Error::EscrowDatabaseError(e.to_string())) + } + + fn missing_registry_insert( + &self, + registry_id: &str, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + self.missing_registry.insert(®istry_id, digest) + } + + fn missing_registry_get( + &self, + registry_id: &str, + ) -> Result, Error> { + self.missing_registry.get(®istry_id) + } + + fn missing_registry_remove( + &self, + registry_id: &str, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + self.missing_registry.remove(®istry_id, digest) } } diff --git a/support/teliox/src/database/postgres.rs b/support/teliox/src/database/postgres.rs new file mode 100644 index 00000000..e259de67 --- /dev/null +++ b/support/teliox/src/database/postgres.rs @@ -0,0 +1,586 @@ +use crate::{ + database::{TelEscrowDatabase, TelEventDatabase}, + error::Error, + event::{Event, verifiable_event::VerifiableEvent}, +}; +use keri_core::prefix::IdentifierPrefix; +use said::SelfAddressingIdentifier; +use sqlx::PgPool; + +pub struct PostgresTelDatabase { + pool: PgPool, +} + +impl PostgresTelDatabase { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + fn get_events_from_index( + &self, + index_table: &'static str, + id: &IdentifierPrefix, + ) -> Result, Error> { + let id_str = id.to_string(); + let pool = self.pool.clone(); + async_std::task::block_on(async move { + let query = + format!("SELECT digest FROM {index_table} WHERE identifier = $1 ORDER BY sn ASC"); + let rows: Vec<(String,)> = sqlx::query_as(&query) + .bind(&id_str) + .fetch_all(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + + let mut events = Vec::new(); + for (digest,) in rows { + let maybe: Option<(Vec,)> = + sqlx::query_as("SELECT event_data FROM tel_events WHERE digest = $1") + .bind(&digest) + .fetch_optional(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + + if let Some((data,)) = maybe { + let event = serde_cbor::from_slice::(&data) + .map_err(|e| Error::Generic(format!("Deserialization error: {}", e)))?; + events.push(event); + } + } + Ok(events) + }) + } +} + +impl TelEventDatabase for PostgresTelDatabase { + fn add_new_event(&self, event: VerifiableEvent, _id: &IdentifierPrefix) -> Result<(), Error> { + let pool = self.pool.clone(); + async_std::task::block_on(async move { + let digest = event + .event + .get_digest() + .map_err(|_| Error::Generic("Event has no digest".to_string()))?; + let digest_str = digest.to_string(); + let event_data = serde_cbor::to_vec(&event) + .map_err(|e| Error::Generic(format!("Serialization error: {}", e)))?; + + let mut tx = pool + .begin() + .await + .map_err(|e| Error::Generic(e.to_string()))?; + + sqlx::query( + "INSERT INTO tel_events (digest, event_data) VALUES ($1, $2) \ + ON CONFLICT (digest) DO NOTHING", + ) + .bind(&digest_str) + .bind(&event_data) + .execute(&mut *tx) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + + let id_str = event.event.get_prefix().to_string(); + let sn = event.event.get_sn() as i64; + + match &event.event { + Event::Management(_) => { + sqlx::query( + "INSERT INTO management_tels (identifier, sn, digest) \ + VALUES ($1, $2, $3) ON CONFLICT (identifier, sn) DO NOTHING", + ) + .bind(&id_str) + .bind(sn) + .bind(&digest_str) + .execute(&mut *tx) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + } + Event::Vc(_) => { + sqlx::query( + "INSERT INTO vc_tels (identifier, sn, digest) \ + VALUES ($1, $2, $3) ON CONFLICT (identifier, sn) DO NOTHING", + ) + .bind(&id_str) + .bind(sn) + .bind(&digest_str) + .execute(&mut *tx) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + } + } + + tx.commit() + .await + .map_err(|e| Error::Generic(e.to_string()))?; + Ok(()) + }) + } + + fn get_events( + &self, + id: &IdentifierPrefix, + ) -> Option> { + let events = self.get_events_from_index("vc_tels", id).ok()?; + (!events.is_empty()).then(|| events.into_iter()) + } + + fn get_management_events( + &self, + id: &IdentifierPrefix, + ) -> Option> { + let events = self.get_events_from_index("management_tels", id).ok()?; + (!events.is_empty()).then(|| events.into_iter()) + } + + fn log_event(&self, event: &VerifiableEvent) -> Result<(), Error> { + let pool = self.pool.clone(); + let digest = event + .event + .get_digest() + .map_err(|_| Error::Generic("Event has no digest".to_string()))?; + let digest_str = digest.to_string(); + let event_data = serde_cbor::to_vec(event) + .map_err(|e| Error::Generic(format!("Serialization error: {}", e)))?; + + async_std::task::block_on(async move { + sqlx::query( + "INSERT INTO tel_events (digest, event_data) VALUES ($1, $2) \ + ON CONFLICT (digest) DO NOTHING", + ) + .bind(&digest_str) + .bind(&event_data) + .execute(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + Ok(()) + }) + } + + fn get_event( + &self, + digest: &SelfAddressingIdentifier, + ) -> Result, Error> { + let pool = self.pool.clone(); + let digest_str = digest.to_string(); + async_std::task::block_on(async move { + let maybe: Option<(Vec,)> = + sqlx::query_as("SELECT event_data FROM tel_events WHERE digest = $1") + .bind(&digest_str) + .fetch_optional(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + + match maybe { + None => Ok(None), + Some((data,)) => { + let event = serde_cbor::from_slice::(&data) + .map_err(|e| Error::Generic(format!("Deserialization error: {}", e)))?; + Ok(Some(event)) + } + } + }) + } +} + +pub struct PostgresTelEscrowDatabase { + pool: PgPool, +} + +impl PostgresTelEscrowDatabase { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +impl TelEscrowDatabase for PostgresTelEscrowDatabase { + fn missing_issuer_insert( + &self, + kel_digest: &str, + tel_digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + let pool = self.pool.clone(); + let kel = kel_digest.to_string(); + let tel = tel_digest.to_string(); + async_std::task::block_on(async move { + sqlx::query( + "INSERT INTO tel_missing_issuer_escrow (kel_digest, tel_digest) \ + VALUES ($1, $2) ON CONFLICT DO NOTHING", + ) + .bind(&kel) + .bind(&tel) + .execute(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + Ok(()) + }) + } + + fn missing_issuer_get( + &self, + kel_digest: &str, + ) -> Result, Error> { + let pool = self.pool.clone(); + let kel = kel_digest.to_string(); + async_std::task::block_on(async move { + let rows: Vec<(String,)> = sqlx::query_as( + "SELECT tel_digest FROM tel_missing_issuer_escrow WHERE kel_digest = $1", + ) + .bind(&kel) + .fetch_all(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + + rows.into_iter() + .map(|(s,)| { + s.parse::() + .map_err(|e| Error::Generic(format!("Invalid digest: {}", e))) + }) + .collect() + }) + } + + fn missing_issuer_remove( + &self, + kel_digest: &str, + tel_digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + let pool = self.pool.clone(); + let kel = kel_digest.to_string(); + let tel = tel_digest.to_string(); + async_std::task::block_on(async move { + sqlx::query( + "DELETE FROM tel_missing_issuer_escrow \ + WHERE kel_digest = $1 AND tel_digest = $2", + ) + .bind(&kel) + .bind(&tel) + .execute(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + Ok(()) + }) + } + + fn out_of_order_insert( + &self, + id: &IdentifierPrefix, + sn: u64, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + let pool = self.pool.clone(); + let id_str = id.to_string(); + let sn_i = sn as i64; + let dig = digest.to_string(); + async_std::task::block_on(async move { + sqlx::query( + "INSERT INTO tel_out_of_order_escrow (identifier, sn, tel_digest) \ + VALUES ($1, $2, $3) ON CONFLICT DO NOTHING", + ) + .bind(&id_str) + .bind(sn_i) + .bind(&dig) + .execute(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + Ok(()) + }) + } + + fn out_of_order_get( + &self, + id: &IdentifierPrefix, + sn: u64, + ) -> Result, Error> { + let pool = self.pool.clone(); + let id_str = id.to_string(); + let sn_i = sn as i64; + async_std::task::block_on(async move { + let rows: Vec<(String,)> = sqlx::query_as( + "SELECT tel_digest FROM tel_out_of_order_escrow \ + WHERE identifier = $1 AND sn = $2", + ) + .bind(&id_str) + .bind(sn_i) + .fetch_all(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + + rows.into_iter() + .map(|(s,)| { + s.parse::() + .map_err(|e| Error::Generic(format!("Invalid digest: {}", e))) + }) + .collect() + }) + } + + fn out_of_order_remove( + &self, + id: &IdentifierPrefix, + sn: u64, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + let pool = self.pool.clone(); + let id_str = id.to_string(); + let sn_i = sn as i64; + let dig = digest.to_string(); + async_std::task::block_on(async move { + sqlx::query( + "DELETE FROM tel_out_of_order_escrow \ + WHERE identifier = $1 AND sn = $2 AND tel_digest = $3", + ) + .bind(&id_str) + .bind(sn_i) + .bind(&dig) + .execute(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + Ok(()) + }) + } + + fn missing_registry_insert( + &self, + registry_id: &str, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + let pool = self.pool.clone(); + let reg = registry_id.to_string(); + let dig = digest.to_string(); + async_std::task::block_on(async move { + sqlx::query( + "INSERT INTO tel_missing_registry_escrow (registry_id, tel_digest) \ + VALUES ($1, $2) ON CONFLICT DO NOTHING", + ) + .bind(®) + .bind(&dig) + .execute(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + Ok(()) + }) + } + + fn missing_registry_get( + &self, + registry_id: &str, + ) -> Result, Error> { + let pool = self.pool.clone(); + let reg = registry_id.to_string(); + async_std::task::block_on(async move { + let rows: Vec<(String,)> = sqlx::query_as( + "SELECT tel_digest FROM tel_missing_registry_escrow WHERE registry_id = $1", + ) + .bind(®) + .fetch_all(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + + rows.into_iter() + .map(|(s,)| { + s.parse::() + .map_err(|e| Error::Generic(format!("Invalid digest: {}", e))) + }) + .collect() + }) + } + + fn missing_registry_remove( + &self, + registry_id: &str, + digest: &SelfAddressingIdentifier, + ) -> Result<(), Error> { + let pool = self.pool.clone(); + let reg = registry_id.to_string(); + let dig = digest.to_string(); + async_std::task::block_on(async move { + sqlx::query( + "DELETE FROM tel_missing_registry_escrow \ + WHERE registry_id = $1 AND tel_digest = $2", + ) + .bind(®) + .bind(&dig) + .execute(&pool) + .await + .map_err(|e| Error::Generic(e.to_string()))?; + Ok(()) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + database::{TelEscrowDatabase, TelEventDatabase}, + event::verifiable_event::VerifiableEvent, + }; + use keri_core::database::postgres::PostgresDatabase; + use sqlx::postgres::PgPoolOptions; + + // CESR stream with 3 TEL events: vcp (Management), bis (Vc issuance), brv (Vc revocation) + const TEL_EVENTS: &str = r#"{"v":"KERI10JSON0000e0_","t":"vcp","d":"EJPLd0ZMdbusC-nEQgXfVDcNWPkaZfhPAYH43ZqIrOOA","i":"EPafIvNeW6xYZZhmXBO3hc3GtCHv-8jDgdZsKAFffhLN","s":"0","ii":"EPyhGnPEzI1OjbmvNCEsiQfinmwxGcJgyDK_Nx9hnI2l","c":["NB"],"bt":"0","b":[]}-GAB0AAAAAAAAAAAAAAAAAAAAAABENMILl_3-wbKmzOR5IC4rOjwwXE-LFafC34vzduBn2O1{"v":"KERI10JSON000162_","t":"bis","d":"EH--8AOVXFyZ5HdshHVUjYIgrxqIRczzzbTZiZRzl6v8","i":"EEvXZtq623byRrE7h34J7sosXnSlXT5oKMuvntyqTgVa","s":"0","ii":"EPyhGnPEzI1OjbmvNCEsiQfinmwxGcJgyDK_Nx9hnI2l","ra":{"i":"EPafIvNeW6xYZZhmXBO3hc3GtCHv-8jDgdZsKAFffhLN","s":"0","d":"EJPLd0ZMdbusC-nEQgXfVDcNWPkaZfhPAYH43ZqIrOOA"},"dt":"2023-06-30T08:04:23.180342+00:00"}-GAB0AAAAAAAAAAAAAAAAAAAAAACEPBB-kmu3NQkuDUijczDscu6SMkOq_XznhufG2DFiveh{"v":"KERI10JSON000161_","t":"brv","d":"EBr1rgUjzKeGKRijXUkc-Sx_LzB1HUxyd3qB6zc8Jaga","i":"EEvXZtq623byRrE7h34J7sosXnSlXT5oKMuvntyqTgVa","s":"1","p":"EH--8AOVXFyZ5HdshHVUjYIgrxqIRczzzbTZiZRzl6v8","ra":{"i":"EPafIvNeW6xYZZhmXBO3hc3GtCHv-8jDgdZsKAFffhLN","s":"0","d":"EJPLd0ZMdbusC-nEQgXfVDcNWPkaZfhPAYH43ZqIrOOA"},"dt":"2023-06-30T08:04:23.186687+00:00"}-GAB0AAAAAAAAAAAAAAAAAAAAAADEKtt7vosEnv-Y0QVRfZq5HFmRZ1e_l5NeJq-zq_wd2ht"#; + + fn get_database_url() -> String { + std::env::var("DATABASE_URL") + .unwrap_or_else(|_| "postgres://postgres:postgres@localhost:5432/keri_test".to_string()) + } + + /// Ensures the test database exists with a fresh schema, serialized across parallel tests. + fn ensure_schema() { + static INIT: std::sync::Mutex = std::sync::Mutex::new(false); + let mut done = INIT.lock().unwrap(); + if *done { + return; + } + let result = std::panic::catch_unwind(|| { + async_std::task::block_on(async { + let url = get_database_url(); + let (base, db_name) = url.rsplit_once('/').expect("Invalid DATABASE_URL"); + let admin = PgPoolOptions::new() + .max_connections(2) + .connect(&format!("{}/postgres", base)) + .await + .expect("Failed to connect to admin db"); + // Drop and recreate to get a clean schema + let _ = sqlx::query(&format!("DROP DATABASE IF EXISTS \"{}\" WITH (FORCE)", db_name)) + .execute(&admin) + .await; + sqlx::query(&format!("CREATE DATABASE \"{}\"", db_name)) + .execute(&admin) + .await + .expect("Failed to create test database"); + + let db = PostgresDatabase::new(&url) + .await + .expect("Failed to connect to database"); + db.run_migrations() + .await + .expect("Failed to run migrations"); + }); + }); + if result.is_err() { + panic!("ensure_schema failed — check DATABASE_URL and postgres connection"); + } + *done = true; + } + + async fn setup_pool() -> PgPool { + ensure_schema(); + PgPoolOptions::new() + .max_connections(5) + .connect(&get_database_url()) + .await + .expect("Failed to connect to database") + } + + fn parse_tel_events() -> (VerifiableEvent, VerifiableEvent, VerifiableEvent) { + let parsed = VerifiableEvent::parse(TEL_EVENTS.as_bytes()).unwrap(); + (parsed[0].clone(), parsed[1].clone(), parsed[2].clone()) + } + + #[async_std::test] + async fn test_add_and_get_management_event() { + let db = PostgresTelDatabase::new(setup_pool().await); + let (vcp, _, _) = parse_tel_events(); + + let id = vcp.event.get_prefix(); + db.add_new_event(vcp.clone(), &id).unwrap(); + + let events: Vec<_> = db.get_management_events(&id).unwrap().collect(); + assert_eq!(events.len(), 1); + assert_eq!(events[0], vcp); + } + + #[async_std::test] + async fn test_add_and_get_vc_events() { + let db = PostgresTelDatabase::new(setup_pool().await); + let (_, iss, rev) = parse_tel_events(); + + let id = iss.event.get_prefix(); + db.add_new_event(iss.clone(), &id).unwrap(); + db.add_new_event(rev.clone(), &id).unwrap(); + + let events: Vec<_> = db.get_events(&id).unwrap().collect(); + assert_eq!(events.len(), 2); + assert_eq!(events[0], iss); + assert_eq!(events[1], rev); + } + + #[async_std::test] + async fn test_log_event_and_get_by_digest() { + let db = PostgresTelDatabase::new(setup_pool().await); + let (vcp, _, _) = parse_tel_events(); + + db.log_event(&vcp).unwrap(); + + let digest = vcp.event.get_digest().unwrap(); + let result = db.get_event(&digest).unwrap(); + assert_eq!(result, Some(vcp)); + } + + #[async_std::test] + async fn test_get_event_missing_returns_none() { + let db = PostgresTelDatabase::new(setup_pool().await); + // Valid SAI format (E prefix = Blake3-256, 44 chars) that is never inserted + let digest: said::SelfAddressingIdentifier = + "EAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA".parse().unwrap(); + let result = db.get_event(&digest).unwrap(); + assert_eq!(result, None); + } + + #[async_std::test] + async fn test_missing_issuer_escrow_insert_get_remove() { + let db = PostgresTelEscrowDatabase::new(setup_pool().await); + let (vcp, _, _) = parse_tel_events(); + let tel_digest = vcp.event.get_digest().unwrap(); + let kel_digest = "EKel_test_digest_insert_get_remove"; + + db.missing_issuer_insert(kel_digest, &tel_digest).unwrap(); + + let results = db.missing_issuer_get(kel_digest).unwrap(); + assert!(results.contains(&tel_digest)); + + db.missing_issuer_remove(kel_digest, &tel_digest).unwrap(); + + let results = db.missing_issuer_get(kel_digest).unwrap(); + assert!(!results.contains(&tel_digest)); + } + + #[async_std::test] + async fn test_out_of_order_escrow_insert_get_remove() { + let db = PostgresTelEscrowDatabase::new(setup_pool().await); + let (_, iss, _) = parse_tel_events(); + let tel_digest = iss.event.get_digest().unwrap(); + let id = iss.event.get_prefix(); + let sn = iss.event.get_sn(); + + db.out_of_order_insert(&id, sn, &tel_digest).unwrap(); + + let results = db.out_of_order_get(&id, sn).unwrap(); + assert!(results.contains(&tel_digest)); + + db.out_of_order_remove(&id, sn, &tel_digest).unwrap(); + + let results = db.out_of_order_get(&id, sn).unwrap(); + assert!(!results.contains(&tel_digest)); + } + + #[async_std::test] + async fn test_missing_registry_escrow_insert_get_remove() { + let db = PostgresTelEscrowDatabase::new(setup_pool().await); + let (vcp, _, _) = parse_tel_events(); + let tel_digest = vcp.event.get_digest().unwrap(); + let registry_id = "EPafIvNeW6xYZZhmXBO3hc3GtCHv-8jDgdZsKAFffhLN"; + + db.missing_registry_insert(registry_id, &tel_digest).unwrap(); + + let results = db.missing_registry_get(registry_id).unwrap(); + assert!(results.contains(&tel_digest)); + + db.missing_registry_remove(registry_id, &tel_digest).unwrap(); + + let results = db.missing_registry_get(registry_id).unwrap(); + assert!(!results.contains(&tel_digest)); + } +} diff --git a/support/teliox/src/database/redb.rs b/support/teliox/src/database/redb.rs index 900db65b..5c849b3e 100644 --- a/support/teliox/src/database/redb.rs +++ b/support/teliox/src/database/redb.rs @@ -1,5 +1,5 @@ use crate::{ - database::{TelEventDatabase, TelLogDatabase}, + database::TelEventDatabase, error::Error, event::{ manager_event::ManagerTelEventMessage, vc_event::VCEventMessage, @@ -11,6 +11,7 @@ use keri_core::{ prefix::IdentifierPrefix, }; use redb::{Database, ReadTransaction, TableDefinition}; +use said::SelfAddressingIdentifier; use std::{fs, path::Path, sync::Arc}; /// Events store. (event digest) -> tel event @@ -179,22 +180,8 @@ impl LogTelDb { } } -impl TelLogDatabase for RedbTelDatabase { - /// Saves provided event. Key is it's digest and value is event. - fn log_event(&self, event: &VerifiableEvent, transaction: &WriteTxnMode) -> Result<(), Error> { - self.events_log.log_event(event, transaction) - } - - fn get( - &self, - digest: &said::SelfAddressingIdentifier, - ) -> Result, Error> { - self.events_log.get(digest) - } -} - -impl TelEventDatabase for RedbTelDatabase { - fn new(db_path: impl AsRef) -> Result { +impl RedbTelDatabase { + pub fn new(db_path: impl AsRef) -> Result { if let Some(parent) = db_path.as_ref().parent() { fs::create_dir_all(parent).unwrap(); } @@ -207,7 +194,9 @@ impl TelEventDatabase for RedbTelDatabase { db, }) } +} +impl TelEventDatabase for RedbTelDatabase { fn add_new_event(&self, event: VerifiableEvent, id: &IdentifierPrefix) -> Result<(), Error> { let write_txn = self.db.begin_write()?; let txn_mode = WriteTxnMode::UseExisting(&write_txn); @@ -261,4 +250,15 @@ impl TelEventDatabase for RedbTelDatabase { Some(out_iter.collect::>().into_iter()) } } + + fn log_event(&self, event: &VerifiableEvent) -> Result<(), Error> { + self.events_log.log_event(event, &WriteTxnMode::CreateNew) + } + + fn get_event( + &self, + digest: &SelfAddressingIdentifier, + ) -> Result, Error> { + self.events_log.get(digest) + } } diff --git a/support/teliox/src/error.rs b/support/teliox/src/error.rs index 678aa1fc..5d5716c8 100644 --- a/support/teliox/src/error.rs +++ b/support/teliox/src/error.rs @@ -44,24 +44,28 @@ pub enum Error { RwLockingError, } +#[cfg(feature = "storage-redb")] impl From for Error { fn from(_: redb::TransactionError) -> Self { Error::RedbError } } +#[cfg(feature = "storage-redb")] impl From for Error { fn from(_: redb::TableError) -> Self { Error::RedbError } } +#[cfg(feature = "storage-redb")] impl From for Error { fn from(_: redb::CommitError) -> Self { Error::RedbError } } +#[cfg(feature = "storage-redb")] impl From for Error { fn from(_: redb::StorageError) -> Self { Error::RedbError diff --git a/support/teliox/src/lib.rs b/support/teliox/src/lib.rs index 2014682e..1d394d63 100644 --- a/support/teliox/src/lib.rs +++ b/support/teliox/src/lib.rs @@ -6,3 +6,7 @@ pub mod query; pub mod seal; pub mod state; pub mod tel; + +pub use database::{TelEscrowDatabase, TelEventDatabase}; +#[cfg(feature = "storage-postgres")] +pub use database::postgres::{PostgresTelDatabase, PostgresTelEscrowDatabase}; diff --git a/support/teliox/src/processor/escrow/missing_issuer.rs b/support/teliox/src/processor/escrow/missing_issuer.rs index 38debb07..f9578ac1 100644 --- a/support/teliox/src/processor/escrow/missing_issuer.rs +++ b/support/teliox/src/processor/escrow/missing_issuer.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, time::Duration}; use keri_core::{ - database::redb::{RedbDatabase, WriteTxnMode}, + database::EventDatabase, processor::{ event_storage::EventStorage, notification::{Notification, NotificationBus, Notifier}, @@ -10,9 +10,7 @@ use keri_core::{ use said::SelfAddressingIdentifier; use crate::{ - database::{ - digest_key_database::DigestKeyDatabase, EscrowDatabase, TelEventDatabase, TelLogDatabase, - }, + database::{TelEscrowDatabase, TelEventDatabase}, error::Error, event::Event, processor::{ @@ -22,33 +20,36 @@ use crate::{ }, }; -pub struct MissingIssuerEscrow { - kel_reference: Arc>, +pub struct MissingIssuerEscrow { + kel_reference: Arc>, tel_reference: Arc>, publisher: TelNotificationBus, - escrowed_missing_issuer: DigestKeyDatabase, + escrow_db: Arc, } -impl MissingIssuerEscrow { +impl + MissingIssuerEscrow +{ pub fn new( db: Arc, - escrow_db: &EscrowDatabase, - duration: Duration, - kel_reference: Arc>, + escrow_db: Arc, + _duration: Duration, + kel_reference: Arc>, bus: TelNotificationBus, ) -> Self { - let escrow = DigestKeyDatabase::new(escrow_db.0.clone(), "missing_issuer_escrow"); - - let tel_event_storage = Arc::new(TelEventStorage::new(db.clone())); + let tel_event_storage = Arc::new(TelEventStorage::new(db)); Self { tel_reference: tel_event_storage, - escrowed_missing_issuer: escrow, + escrow_db, kel_reference, publisher: bus, } } } -impl Notifier for MissingIssuerEscrow { + +impl Notifier + for MissingIssuerEscrow +{ fn notify( &self, notification: &Notification, @@ -57,7 +58,6 @@ impl Notifier for MissingIssuerEscrow { match notification { Notification::KeyEventAdded(ev_message) => { let digest = ev_message.event_message.digest()?; - self.process_missing_issuer_escrow(&digest).unwrap(); } _ => { @@ -66,12 +66,13 @@ impl Notifier for MissingIssuerEscrow { )) } } - Ok(()) } } -impl TelNotifier for MissingIssuerEscrow { +impl TelNotifier + for MissingIssuerEscrow +{ fn notify( &self, notification: &TelNotification, @@ -80,12 +81,10 @@ impl TelNotifier for MissingIssuerEscrow { let tel_event_digest = event.event.get_digest()?; - self.tel_reference - .db - .log_event(&event, &WriteTxnMode::CreateNew)?; + self.tel_reference.db.log_event(event)?; let missing_event_digest = event.seal.seal.digest.clone().to_string(); - self.escrowed_missing_issuer - .insert(&missing_event_digest.as_str(), &tel_event_digest) + self.escrow_db + .missing_issuer_insert(&missing_event_digest, &tel_event_digest) .map_err(|e| Error::EscrowDatabaseError(e.to_string())) } _ => return Err(Error::Generic("Wrong notification".into())), @@ -93,15 +92,16 @@ impl TelNotifier for MissingIssuerEscrow MissingIssuerEscrow { - /// Reprocess escrowed events that need issuer event of given digest for acceptance. +impl + MissingIssuerEscrow +{ pub fn process_missing_issuer_escrow( &self, said: &SelfAddressingIdentifier, ) -> Result<(), Error> { - if let Ok(esc) = self.escrowed_missing_issuer.get(&said.to_string().as_str()) { + if let Ok(esc) = self.escrow_db.missing_issuer_get(&said.to_string()) { for digest in esc { - let event = self.tel_reference.db.get(&digest)?.unwrap(); + let event = self.tel_reference.db.get_event(&digest)?.unwrap(); let kel_event_digest = event.event.get_digest()?; let validator = TelEventValidator::new(self.tel_reference.clone(), self.kel_reference.clone()); @@ -111,40 +111,35 @@ impl MissingIssuerEscrow { }; match result { Ok(_) => { - // remove from escrow - self.escrowed_missing_issuer - .remove(said, &event.event.get_digest()?) + self.escrow_db + .missing_issuer_remove(&said.to_string(), &event.event.get_digest()?) .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; - // accept tel event self.tel_reference.add_event(event.clone())?; - self.publisher .notify(&TelNotification::TelEventAdded(event))?; } Err(Error::MissingSealError) => { - // remove from escrow - self.escrowed_missing_issuer - .remove(said, &kel_event_digest) - .unwrap(); + self.escrow_db + .missing_issuer_remove(&said.to_string(), &kel_event_digest) + .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; } Err(Error::OutOfOrderError) => { - self.escrowed_missing_issuer - .remove(said, &kel_event_digest) - .unwrap(); + self.escrow_db + .missing_issuer_remove(&said.to_string(), &kel_event_digest) + .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; self.publisher.notify(&TelNotification::OutOfOrder(event))?; } Err(Error::MissingRegistryError) => { - self.escrowed_missing_issuer - .remove(said, &kel_event_digest) - .unwrap(); + self.escrow_db + .missing_issuer_remove(&said.to_string(), &kel_event_digest) + .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; self.publisher .notify(&TelNotification::MissingRegistry(event))?; } - Err(_e) => (), // keep in escrow, + Err(_e) => (), } } }; - Ok(()) } } @@ -162,10 +157,9 @@ mod tests { notification::JustNotification, Processor, }, }; - use redb::Database; use crate::{ - database::{redb::RedbTelDatabase, EscrowDatabase, TelEventDatabase}, + database::{redb::RedbTelDatabase, EscrowDatabase}, error::Error, event::{manager_event, verifiable_event::VerifiableEvent}, processor::{ @@ -181,10 +175,9 @@ mod tests { pub fn test_missing_issuer_escrow() -> Result<(), Error> { use tempfile::Builder; - // Setup issuer key event log. Without ixn events tel event's can't be validated. let keri_root = Builder::new().prefix("test-db").tempfile().unwrap(); let keri_db = Arc::new(RedbDatabase::new(keri_root.path()).unwrap()); - let mut keri_processor = BasicProcessor::new(keri_db.clone(), None); + let keri_processor = BasicProcessor::new(keri_db.clone(), None); let keri_storage = Arc::new(EventStorage::new(keri_db.clone())); let issuer_kel = r#"{"v":"KERI10JSON00012b_","t":"icp","d":"EETk5xW-rl2TgHTTXr8m5kGXiC30m3gMgsYcBAjOE9eI","i":"EETk5xW-rl2TgHTTXr8m5kGXiC30m3gMgsYcBAjOE9eI","s":"0","kt":"1","k":["DHdoiqT1iac2HI6-HfCYcc01Piz2FTTPvZDFt6vADioD"],"nt":"1","n":["EH8IzIWeQFiUr3rr2dh8xAiW9Akwl6EooDt8iduQYyq_"],"bt":"0","b":[],"c":[],"a":[]}-AABAABvFFeXb9uW2G16o3C9xJZvY3a_utMPxd4NIUcGWRTqykMO1NzKwjsA_AQrOEwgO5jselWHREcK6vcAxRfv6-QC{"v":"KERI10JSON00013a_","t":"ixn","d":"EMOzEVoFjbkS3ZS5JtmJO4LeZ4gydbr8iXNrEQAt1OR2","i":"EETk5xW-rl2TgHTTXr8m5kGXiC30m3gMgsYcBAjOE9eI","s":"1","p":"EETk5xW-rl2TgHTTXr8m5kGXiC30m3gMgsYcBAjOE9eI","a":[{"i":"EF3TVac5quxrbLGLKAHF21laISjMgjYQAIg3OsTen969","s":"0","d":"ENIKpuUkjM-1K2Sv_TZwF_k8FTVkefAgy8sIpiFp0uWh"}]}-AABAACvrSS_EZUMKQ6Ax8FaB_Sf99O0y6MmfoRDBKMphVWWtuCOlFQm6N0XrTwtYxO3pO0AEZkJ1vzu52-RDK-w3YAN{"v":"KERI10JSON00013a_","t":"ixn","d":"EDvnfU2yMZUXEy9D_22YOkeSZOq6YG9zfItawvx3GR_6","i":"EETk5xW-rl2TgHTTXr8m5kGXiC30m3gMgsYcBAjOE9eI","s":"2","p":"EMOzEVoFjbkS3ZS5JtmJO4LeZ4gydbr8iXNrEQAt1OR2","a":[{"i":"EC8Oej-3HAUpBY_kxzBK3B-0RV9j4dXw1H0NRKxJg7g-","s":"0","d":"EDBM1ys50vEJxRzvBjTOrmOhokELjVtozXy3ZbJ8-KFk"}]}-AABAAABtEQ7SoGt2IcZBMX0GaEaMqGdMsrGpj1fABDKgE5dA7s7AGXTkWrZjzA4GXkGXuOspi6upqBhpxr6d5ySeKQH"#; @@ -193,14 +186,12 @@ mod tests { let issuer_icp = kel[0].clone(); let issuer_vcp_ixn = kel[1].clone(); - // Incept identifier keri_processor.process(&issuer_icp)?; - // Initiate tel and it's escrows let tel_root = Builder::new().prefix("test-db").tempfile().unwrap(); let tel_escrow_root = Builder::new().prefix("test-db").tempfile().unwrap(); - let db = EscrowDatabase::new(tel_escrow_root.path()).unwrap(); + let db = Arc::new(EscrowDatabase::new(tel_escrow_root.path()).unwrap()); let tel_events_db = Arc::new(RedbTelDatabase::new(&tel_root.path()).unwrap()); let tel_storage = Arc::new(TelEventStorage::new(tel_events_db.clone())); @@ -208,7 +199,7 @@ mod tests { let missing_issuer_escrow = Arc::new(MissingIssuerEscrow::new( tel_events_db, - &db, + db, Duration::from_secs(100), keri_storage.clone(), tel_bus.clone(), @@ -224,7 +215,7 @@ mod tests { &vec![JustNotification::KeyEventAdded], )?; - let processor = TelEventProcessor::new(keri_storage, tel_storage.clone(), Some(tel_bus)); // TelEventProcessor{database: TelEventDatabase::new(db, db_escrow)}; + let processor = TelEventProcessor::new(keri_storage, tel_storage.clone(), Some(tel_bus)); let issuer_prefix: IdentifierPrefix = "EETk5xW-rl2TgHTTXr8m5kGXiC30m3gMgsYcBAjOE9eI" .parse() @@ -246,23 +237,16 @@ mod tests { )?; let management_tel_prefix = vcp.get_prefix(); - - // before applying vcp to management tel, insert anchor event seal with proper ixn event data. let verifiable_vcp = VerifiableEvent::new(vcp.clone(), dummy_source_seal.clone().into()); processor.process(verifiable_vcp.clone())?; - // Check management state. Vcp event should't be accepted, because of - // missing issuer event. It should be in missing issuer escrow. let st = tel_storage.compute_management_tel_state(&management_tel_prefix)?; assert_eq!(st, None); - // check if vcp event is in db. let man_event_from_db = tel_storage.get_management_event_at_sn(&management_tel_prefix, 0)?; assert!(man_event_from_db.is_none()); - // Process missing ixn in issuer's kel. Now escrowed vcp event should be - // accepted. keri_processor.process(&issuer_vcp_ixn)?; let management_state = tel_storage @@ -270,7 +254,6 @@ mod tests { .unwrap(); assert_eq!(management_state.sn, 0); - // check if vcp event is in db. let man_event_from_db = tel_storage.get_management_event_at_sn(&management_tel_prefix, 0)?; assert!(man_event_from_db.is_some()); diff --git a/support/teliox/src/processor/escrow/missing_registry.rs b/support/teliox/src/processor/escrow/missing_registry.rs index dda3ee91..f9582543 100644 --- a/support/teliox/src/processor/escrow/missing_registry.rs +++ b/support/teliox/src/processor/escrow/missing_registry.rs @@ -1,15 +1,13 @@ use std::{sync::Arc, time::Duration}; use keri_core::{ - database::redb::{RedbDatabase, WriteTxnMode}, + database::EventDatabase, prefix::IdentifierPrefix, processor::event_storage::EventStorage, }; use crate::{ - database::{ - digest_key_database::DigestKeyDatabase, EscrowDatabase, TelEventDatabase, TelLogDatabase, - }, + database::{TelEscrowDatabase, TelEventDatabase}, error::Error, processor::{ notification::{TelNotification, TelNotificationBus, TelNotifier}, @@ -18,31 +16,33 @@ use crate::{ }, }; -pub struct MissingRegistryEscrow { +pub struct MissingRegistryEscrow { tel_reference: Arc>, - kel_reference: Arc>, - // Key is the registry id, value is the escrowed tel events digests - escrowed_missing_registry: DigestKeyDatabase, + kel_reference: Arc>, + escrow_db: Arc, } -impl MissingRegistryEscrow { +impl + MissingRegistryEscrow +{ pub fn new( tel_reference: Arc, - kel_reference: Arc>, - escrow_db: &EscrowDatabase, - duration: Duration, + kel_reference: Arc>, + escrow_db: Arc, + _duration: Duration, ) -> Self { - let escrow = DigestKeyDatabase::new(escrow_db.0.clone(), "missing_registry_escrow"); - let tel_event_storage = Arc::new(TelEventStorage::new(tel_reference.clone())); + let tel_event_storage = Arc::new(TelEventStorage::new(tel_reference)); Self { tel_reference: tel_event_storage, kel_reference, - escrowed_missing_registry: escrow, + escrow_db, } } } -impl TelNotifier for MissingRegistryEscrow { +impl TelNotifier + for MissingRegistryEscrow +{ fn notify( &self, notification: &TelNotification, @@ -52,11 +52,9 @@ impl TelNotifier for MissingRegistryEscrow TelNotification::MissingRegistry(signed_event) => { let registry_id = signed_event.event.get_registry_id()?; let value = signed_event.event.get_digest()?; - self.tel_reference - .db - .log_event(signed_event, &WriteTxnMode::CreateNew)?; - self.escrowed_missing_registry - .insert(®istry_id.to_string().as_str(), &value) + self.tel_reference.db.log_event(signed_event)?; + self.escrow_db + .missing_registry_insert(®istry_id.to_string(), &value) .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; Ok(()) } @@ -69,43 +67,43 @@ impl TelNotifier for MissingRegistryEscrow } } -impl MissingRegistryEscrow { +impl + MissingRegistryEscrow +{ pub fn process_missing_registry( &self, bus: &TelNotificationBus, id: &IdentifierPrefix, ) -> Result<(), Error> { - if let Ok(esc) = self.escrowed_missing_registry.get(&id.to_string().as_str()) { + if let Ok(esc) = self.escrow_db.missing_registry_get(&id.to_string()) { for digest in esc { - let event = self.tel_reference.db.get(&digest)?.unwrap(); + let event = self.tel_reference.db.get_event(&digest)?.unwrap(); let validator = TelEventValidator::new(self.tel_reference.clone(), self.kel_reference.clone()); match validator.validate(&event) { Ok(_) => { - // remove from escrow - self.escrowed_missing_registry - .remove(id, &digest) + self.escrow_db + .missing_registry_remove(&id.to_string(), &digest) .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; - // accept tel event self.tel_reference.add_event(event.clone())?; - bus.notify(&TelNotification::TelEventAdded(event.clone()))?; - // stop processing the escrow if tel was updated. It needs to start again. break; } Err(Error::MissingSealError) => { - // remove from escrow - self.escrowed_missing_registry.remove(id, &digest).unwrap(); + self.escrow_db + .missing_registry_remove(&id.to_string(), &digest) + .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; } Err(Error::MissingIssuerEventError) => { - self.escrowed_missing_registry.remove(id, &digest).unwrap(); + self.escrow_db + .missing_registry_remove(&id.to_string(), &digest) + .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; bus.notify(&TelNotification::MissingIssuer(event.clone()))?; } - Err(_e) => {} // keep in escrow, + Err(_e) => {} } } }; - Ok(()) } } @@ -120,10 +118,9 @@ mod tests { prefix::IdentifierPrefix, processor::{basic_processor::BasicProcessor, event_storage::EventStorage, Processor}, }; - use redb::Database; use crate::{ - database::{redb::RedbTelDatabase, EscrowDatabase, TelEventDatabase}, + database::{redb::RedbTelDatabase, EscrowDatabase}, error::Error, event::verifiable_event::VerifiableEvent, processor::{ @@ -138,7 +135,6 @@ mod tests { pub fn test_out_of_order_escrow() -> Result<(), Error> { use tempfile::Builder; - // Setup issuer key event log. Without ixn events tel event's can't be validated. let keri_root = Builder::new().prefix("test-db").tempfile().unwrap(); let keri_db = Arc::new(RedbDatabase::new(keri_root.path()).unwrap()); let keri_processor = BasicProcessor::new(keri_db.clone(), None); @@ -151,12 +147,11 @@ mod tests { keri_processor.process(&event)?; } - // Initiate tel and it's escrows let tel_root = Builder::new().prefix("test-db").tempfile().unwrap(); let tel_escrow_root = Builder::new().prefix("test-db2").tempfile().unwrap(); let tel_events_db = Arc::new(RedbTelDatabase::new(&tel_root.path()).unwrap()); - let escrow_db = EscrowDatabase::new(tel_escrow_root.path()).unwrap(); + let escrow_db = Arc::new(EscrowDatabase::new(tel_escrow_root.path()).unwrap()); let tel_storage = Arc::new(TelEventStorage::new(tel_events_db.clone())); let tel_bus = TelNotificationBus::new(); @@ -164,7 +159,7 @@ mod tests { let missing_registry_escrow = Arc::new(MissingRegistryEscrow::new( tel_events_db.clone(), keri_storage.clone(), - &escrow_db, + escrow_db, Duration::from_secs(100), )); @@ -178,7 +173,6 @@ mod tests { let tel_events = r#"{"v":"KERI10JSON0000e0_","t":"vcp","d":"EJPLd0ZMdbusC-nEQgXfVDcNWPkaZfhPAYH43ZqIrOOA","i":"EPafIvNeW6xYZZhmXBO3hc3GtCHv-8jDgdZsKAFffhLN","s":"0","ii":"EPyhGnPEzI1OjbmvNCEsiQfinmwxGcJgyDK_Nx9hnI2l","c":["NB"],"bt":"0","b":[]}-GAB0AAAAAAAAAAAAAAAAAAAAAABENMILl_3-wbKmzOR5IC4rOjwwXE-LFafC34vzduBn2O1{"v":"KERI10JSON000162_","t":"bis","d":"EH--8AOVXFyZ5HdshHVUjYIgrxqIRczzzbTZiZRzl6v8","i":"EEvXZtq623byRrE7h34J7sosXnSlXT5oKMuvntyqTgVa","s":"0","ii":"EPyhGnPEzI1OjbmvNCEsiQfinmwxGcJgyDK_Nx9hnI2l","ra":{"i":"EPafIvNeW6xYZZhmXBO3hc3GtCHv-8jDgdZsKAFffhLN","s":"0","d":"EJPLd0ZMdbusC-nEQgXfVDcNWPkaZfhPAYH43ZqIrOOA"},"dt":"2023-06-30T08:04:23.180342+00:00"}-GAB0AAAAAAAAAAAAAAAAAAAAAACEPBB-kmu3NQkuDUijczDscu6SMkOq_XznhufG2DFiveh{"v":"KERI10JSON000161_","t":"brv","d":"EBr1rgUjzKeGKRijXUkc-Sx_LzB1HUxyd3qB6zc8Jaga","i":"EEvXZtq623byRrE7h34J7sosXnSlXT5oKMuvntyqTgVa","s":"1","p":"EH--8AOVXFyZ5HdshHVUjYIgrxqIRczzzbTZiZRzl6v8","ra":{"i":"EPafIvNeW6xYZZhmXBO3hc3GtCHv-8jDgdZsKAFffhLN","s":"0","d":"EJPLd0ZMdbusC-nEQgXfVDcNWPkaZfhPAYH43ZqIrOOA"},"dt":"2023-06-30T08:04:23.186687+00:00"}-GAB0AAAAAAAAAAAAAAAAAAAAAADEKtt7vosEnv-Y0QVRfZq5HFmRZ1e_l5NeJq-zq_wd2ht"#; let parsed_tel = VerifiableEvent::parse(tel_events.as_bytes())?; - let vcp = parsed_tel[0].clone(); let iss = parsed_tel[1].clone(); let rev = parsed_tel[2].clone(); @@ -192,17 +186,11 @@ mod tests { let st = tel_storage.compute_vc_state(&vc_hash)?; assert!(st.is_none()); - let st = tel_storage.compute_vc_state(&vc_hash)?; - assert!(st.is_none()); processor.process(iss)?; - - // Check vc tel state. Iss event should't be accepted, because of - // missing issuer management tel event. It should be in out of order escrow. let st = tel_storage.compute_vc_state(&vc_hash)?; assert!(st.is_none()); - // Process missing vcp event processor.process(vcp)?; let st = tel_storage.compute_vc_state(&vc_hash)?; diff --git a/support/teliox/src/processor/escrow/mod.rs b/support/teliox/src/processor/escrow/mod.rs index 36424a61..f2c5534d 100644 --- a/support/teliox/src/processor/escrow/mod.rs +++ b/support/teliox/src/processor/escrow/mod.rs @@ -1,9 +1,9 @@ use std::{sync::Arc, time::Duration}; -use keri_core::{database::redb::RedbDatabase, processor::event_storage::EventStorage}; +use keri_core::{database::EventDatabase, processor::event_storage::EventStorage}; use crate::{ - database::{redb::RedbTelDatabase, EscrowDatabase}, + database::{TelEscrowDatabase, TelEventDatabase}, error::Error, processor::notification::TelNotificationKind, }; @@ -19,36 +19,42 @@ pub mod missing_issuer; pub mod missing_registry; pub mod out_of_order; -pub fn default_escrow_bus( - tel_storage: Arc, - kel_storage: Arc>, - tel_escrow_db: EscrowDatabase, +pub fn default_escrow_bus< + D: TelEventDatabase + Send + Sync + 'static, + K: EventDatabase + Send + Sync + 'static, + E: TelEscrowDatabase + 'static, +>( + tel_storage: Arc, + kel_storage: Arc>, + tel_escrow_db: E, ) -> Result< ( TelNotificationBus, - Arc>, - Arc>, - Arc>, + Arc>, + Arc>, + Arc>, ), Error, > { + let escrow_db = Arc::new(tel_escrow_db); + let out_of_order_escrow = Arc::new(OutOfOrderEscrow::new( tel_storage.clone(), kel_storage.clone(), - &tel_escrow_db, + escrow_db.clone(), Duration::from_secs(100), )); let missing_registry_escrow = Arc::new(MissingRegistryEscrow::new( tel_storage.clone(), kel_storage.clone(), - &tel_escrow_db, + escrow_db.clone(), Duration::from_secs(100), )); let tel_bus = TelNotificationBus::new(); let missing_issuer_escrow = Arc::new(MissingIssuerEscrow::new( tel_storage.clone(), - &tel_escrow_db, + escrow_db, Duration::from_secs(100), kel_storage.clone(), tel_bus.clone(), diff --git a/support/teliox/src/processor/escrow/out_of_order.rs b/support/teliox/src/processor/escrow/out_of_order.rs index 44f0650c..1224598e 100644 --- a/support/teliox/src/processor/escrow/out_of_order.rs +++ b/support/teliox/src/processor/escrow/out_of_order.rs @@ -1,16 +1,13 @@ use std::{sync::Arc, time::Duration}; use keri_core::{ - database::{ - redb::{escrow_database::SnKeyDatabase, RedbDatabase, WriteTxnMode}, - SequencedEventDatabase, - }, + database::EventDatabase, prefix::IdentifierPrefix, processor::event_storage::EventStorage, }; use crate::{ - database::{EscrowDatabase, TelEventDatabase, TelLogDatabase}, + database::{TelEscrowDatabase, TelEventDatabase}, error::Error, processor::{ notification::{TelNotification, TelNotificationBus, TelNotifier}, @@ -19,32 +16,33 @@ use crate::{ }, }; -pub struct OutOfOrderEscrow { +pub struct OutOfOrderEscrow { tel_reference: Arc>, - kel_reference: Arc>, - tel_log: Arc, - escrowed_out_of_order: SnKeyDatabase, + kel_reference: Arc>, + escrow_db: Arc, } -impl OutOfOrderEscrow { +impl + OutOfOrderEscrow +{ pub fn new( tel_reference: Arc, - kel_reference: Arc>, - escrow_db: &EscrowDatabase, - duration: Duration, + kel_reference: Arc>, + escrow_db: Arc, + _duration: Duration, ) -> Self { - let escrow = SnKeyDatabase::new(escrow_db.0.clone(), "out_of_order").unwrap(); - let tel_event_storage = Arc::new(TelEventStorage::new(tel_reference.clone())); + let tel_event_storage = Arc::new(TelEventStorage::new(tel_reference)); Self { tel_reference: tel_event_storage, kel_reference, - escrowed_out_of_order: escrow, - tel_log: tel_reference, + escrow_db, } } } -impl TelNotifier for OutOfOrderEscrow { +impl TelNotifier + for OutOfOrderEscrow +{ fn notify( &self, notification: &TelNotification, @@ -54,13 +52,11 @@ impl TelNotifier for OutOfOrderEscrow { TelNotification::OutOfOrder(signed_event) => { let event = signed_event.get_event(); let key_id = event.get_prefix(); - self.tel_log - .log_event(signed_event, &WriteTxnMode::CreateNew)?; + self.tel_reference.db.log_event(signed_event)?; let sn = event.get_sn(); let digest = event.get_digest()?; - - self.escrowed_out_of_order - .insert(&key_id, sn, &digest) + self.escrow_db + .out_of_order_insert(&key_id, sn, &digest) .map_err(|e| Error::EscrowDatabaseError(e.to_string())) } TelNotification::TelEventAdded(event) => { @@ -72,18 +68,21 @@ impl TelNotifier for OutOfOrderEscrow { } } -impl OutOfOrderEscrow { +impl + OutOfOrderEscrow +{ pub fn process_out_of_order_events( &self, bus: &TelNotificationBus, id: &IdentifierPrefix, sn: u64, ) -> Result<(), Error> { - if let Ok(esc) = self.escrowed_out_of_order.get(id, sn + 1) { + if let Ok(esc) = self.escrow_db.out_of_order_get(id, sn + 1) { for said in esc { let event = self - .tel_log - .get(&said) + .tel_reference + .db + .get_event(&said) .map_err(|e| Error::EscrowDatabaseError(e.to_string()))? .ok_or(Error::Generic(format!( "Event of digest {} not found in out of order escrow", @@ -93,28 +92,22 @@ impl OutOfOrderEscrow { TelEventValidator::new(self.tel_reference.clone(), self.kel_reference.clone()); match validator.validate(&event) { Ok(_) => { - // remove from escrow - self.escrowed_out_of_order - .remove(id, sn, &said) + self.escrow_db + .out_of_order_remove(id, sn, &said) .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; - // accept tel event self.tel_reference.add_event(event.clone())?; - bus.notify(&TelNotification::TelEventAdded(event.clone()))?; - // stop processing the escrow if tel was updated. It needs to start again. break; } Err(Error::MissingSealError) => { - // remove from escrow - self.escrowed_out_of_order - .remove(id, sn, &said) + self.escrow_db + .out_of_order_remove(id, sn, &said) .map_err(|e| Error::EscrowDatabaseError(e.to_string()))?; } - Err(_e) => {} // keep in escrow, + Err(_e) => {} } } }; - Ok(()) } } @@ -129,10 +122,9 @@ mod tests { prefix::IdentifierPrefix, processor::{basic_processor::BasicProcessor, event_storage::EventStorage, Processor}, }; - use redb::Database; use crate::{ - database::{redb::RedbTelDatabase, EscrowDatabase, TelEventDatabase}, + database::{redb::RedbTelDatabase, EscrowDatabase}, error::Error, event::verifiable_event::VerifiableEvent, processor::{ @@ -147,7 +139,6 @@ mod tests { pub fn test_out_of_order_escrow() -> Result<(), Error> { use tempfile::Builder; - // Setup issuer key event log. Without ixn events tel event's can't be validated. let keri_root = Builder::new().prefix("test-db").tempfile().unwrap(); let keri_db = Arc::new(RedbDatabase::new(keri_root.path()).unwrap()); let keri_processor = BasicProcessor::new(keri_db.clone(), None); @@ -160,12 +151,11 @@ mod tests { keri_processor.process(&event)?; } - // Initiate tel and it's escrows let tel_root = Builder::new().prefix("test-db").tempfile().unwrap(); let tel_escrow_root = Builder::new().prefix("test-db").tempfile().unwrap(); let tel_events_db = Arc::new(RedbTelDatabase::new(&tel_root.path()).unwrap()); - let escrow_db = EscrowDatabase::new(&tel_escrow_root.path()).unwrap(); + let escrow_db = Arc::new(EscrowDatabase::new(&tel_escrow_root.path()).unwrap()); let tel_storage = Arc::new(TelEventStorage::new(tel_events_db.clone())); let tel_bus = TelNotificationBus::new(); @@ -173,7 +163,7 @@ mod tests { let out_of_order_escrow = Arc::new(OutOfOrderEscrow::new( tel_events_db, keri_storage.clone(), - &escrow_db, + escrow_db, Duration::from_secs(100), )); @@ -193,24 +183,19 @@ mod tests { let rev = parsed_tel[2].clone(); let processor = TelEventProcessor::new(keri_storage, tel_storage.clone(), Some(tel_bus)); - // Incept registry processor.process(vcp)?; - // Process out of order event. processor.process(rev)?; let vc_hash: IdentifierPrefix = "EEvXZtq623byRrE7h34J7sosXnSlXT5oKMuvntyqTgVa" .parse() .unwrap(); - // Check vc tel state. Iss event should't be accepted, because of - // missing issuance event. It should be in out of order escrow. let st = tel_storage.compute_vc_state(&vc_hash)?; assert!(st.is_none()); let st = tel_storage.compute_vc_state(&vc_hash)?; assert!(st.is_none()); - // Process missing event processor.process(iss)?; let st = tel_storage.compute_vc_state(&vc_hash)?; diff --git a/support/teliox/src/processor/mod.rs b/support/teliox/src/processor/mod.rs index cf88b7dc..12e10521 100644 --- a/support/teliox/src/processor/mod.rs +++ b/support/teliox/src/processor/mod.rs @@ -15,6 +15,7 @@ use self::{ validator::TelEventValidator, }; +#[cfg(feature = "storage-redb")] pub mod escrow; pub mod notification; pub mod storage; diff --git a/support/teliox/src/tel/mod.rs b/support/teliox/src/tel/mod.rs index 1de31bfc..e63d0acf 100644 --- a/support/teliox/src/tel/mod.rs +++ b/support/teliox/src/tel/mod.rs @@ -12,7 +12,7 @@ use crate::{ state::{vc_state::TelState, ManagerTelState}, }; use keri_core::{ - database::{redb::RedbDatabase, EventDatabase}, prefix::IdentifierPrefix, processor::event_storage::EventStorage, + database::EventDatabase, prefix::IdentifierPrefix, processor::event_storage::EventStorage, }; use said::SelfAddressingIdentifier; diff --git a/watcher.Dockerfile b/watcher.Dockerfile index 844b24e6..d64dc0e7 100644 --- a/watcher.Dockerfile +++ b/watcher.Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.88 as build +FROM rust:1.91 AS build WORKDIR /app RUN echo '[workspace] \n\ diff --git a/witness.Dockerfile b/witness.Dockerfile index 78c6fb34..940aa04a 100644 --- a/witness.Dockerfile +++ b/witness.Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.88 as build +FROM rust:1.91 AS build WORKDIR /app RUN echo '[workspace] \n\