diff --git a/src/keri/db/__init__.py b/src/keri/db/__init__.py index 38f1a0091..423778aed 100644 --- a/src/keri/db/__init__.py +++ b/src/keri/db/__init__.py @@ -6,7 +6,7 @@ from . import basing, dbing, escrowing, koming, subing, webdbing -from .basing import Baser, BaserDoer, openDB, reopenDB, statedict +from .basing import Baser, BaserDoer, openDB, reopenDB, statedict, BaserBase from .dbing import (LMDBer, clearDatabaserDir, openLMDB, onKey, snKey, fnKey, dgKey, dtKey, splitKey, splitOnKey, splitKeyDT, fetchTsgs, suffix, unsuffix, diff --git a/src/keri/db/basing.py b/src/keri/db/basing.py index ca42d1adc..d9a59d7e5 100644 --- a/src/keri/db/basing.py +++ b/src/keri/db/basing.py @@ -31,23 +31,6 @@ logger = ogler.getLogger() -def _strip_prerelease(version_str): - """Strip prerelease and build metadata from a semver string. - - Semver compares alphanumeric prerelease identifiers lexicographically, - so 'dev4' > 'dev10' (because '4' > '1'). Stripping prerelease ensures - dev releases within the same version cycle compare as equal. - See: https://github.com/WebOfTrust/keripy/issues/820 - """ - ver = semver.VersionInfo.parse(version_str) - return str(semver.Version(ver.major, ver.minor, ver.patch)) - - -MIGRATIONS = [ - ("0.6.8", ["hab_data_rename"]), - ("1.0.0", ["add_key_and_reg_state_schemas"]), - ("1.2.0", ["rekey_habs"]) -] # ToDo XXXX maybe @@ -126,1881 +109,1934 @@ def get(self, k, default=None): else: return self.__getitem__(k) +def _strip_prerelease(version_str): + """Strip prerelease and build metadata from a semver string. -def openDB(*, cls=None, name="test", **kwa): + Semver compares alphanumeric prerelease identifiers lexicographically, + so 'dev4' > 'dev10' (because '4' > '1'). Stripping prerelease ensures + dev releases within the same version cycle compare as equal. + See: https://github.com/WebOfTrust/keripy/issues/820 """ - Returns contextmanager generated by openLMDB but with Baser instance as default + ver = semver.VersionInfo.parse(version_str) + return str(semver.Version(ver.major, ver.minor, ver.patch)) + + +MIGRATIONS = [ + ("0.6.8", ["hab_data_rename"]), + ("1.0.0", ["add_key_and_reg_state_schemas"]), + ("1.2.0", ["rekey_habs"]) +] + +class BaserBase: """ - if cls == None: # can't reference class before its defined below - cls = Baser - return openLMDB(cls=cls, name=name, **kwa) + Base class for Baser and WebBaser. + BaserBase provides minimal, non‑persistent structures like + prefixes, groups, kevers and db as well as common functions. -@contextmanager -def reopenDB(db, clear=False, **kwa): + Attributes: + - prefixes (oset): set of local prefix identifiers (`prefixes`) + - groups (oset): set of group identifiers (`groups`) + - _kevers (statedict): in‑memory mapping of prefix to Kever + - db: db where `_kevers.db` points back to this instance so that kever + lookups and read‑through caching work correctly + + This class must be initialized *before* any LMDB‑backed components + so that reload() has valid in‑memory targets to populate. """ - Context manager wrapper LMDB DB instances. - Repens and closes db.path and db.env LMDB - Parameters: - db (LMDBer): instance with LMDB environment at .env - clear (bool): True means clear directory after close + def __init__(self, **kwa): - Usage: + self.prefixes = oset() # should change to hids for hab ids + self.groups = oset() # group hab ids + self._kevers = statedict() + self._kevers.db = self # assign db for read through cache of kevers - with reopenDB(baser) as env: - env. .... + @property + def kevers(self): + """ + Returns .db.kevers + """ + return self._kevers - """ - try: - db.reopen(clear=clear, **kwa) - yield db.env - finally: - db.close(clear=clear) + def migrate(self): + """ Run all migrations required + Run all migrations that are required from the current version of database up to the current version + of the software that have not already been run. -KERIBaserMapSizeKey = "KERI_BASER_MAP_SIZE" + Sets the version of the database to the current version of the software after successful completion + of required migrations + """ + from ..core import coring -class Baser(LMDBer): - """ - Baser sets up named sub databases with Keri Event Logs within main database + escrows_cleared = False - Attributes: - see superclass LMDBer for inherited attributes + for (version, migrations) in MIGRATIONS: + # Only run migration if current source code version is at or below the migration version + ver = semver.VersionInfo.parse(__version__) + ver_no_prerelease = semver.Version(ver.major, ver.minor, ver.patch) + if self.version is not None and semver.compare(version, str(ver_no_prerelease)) > 0: + print( + f"Skipping migration {version} as higher than the current KERI version {__version__}") + continue + # Skip migrations already run - where version less than (-1) or equal to (0) database version + # Strip prerelease from DB version to avoid lexicographic comparison bugs (#820) + if self.version is not None and semver.compare(version, _strip_prerelease(self.version)) != 1: + continue - kevers (dbdict): read-through cache of Kever instances indexed by - identifier prefix qb64 - prefixes (OrderedSet): local prefixes corresponding to habitats for - this db - groups (OrderedSet): group hab identifier prefixes for this db + # Clear all escrows before first migration to prevent old key + # format crashes (e.g. qnfs keys without insertion-order suffix). + # Uses .trim() which bypasses key parsing. See #863. + if not escrows_cleared: + self._trimAllEscrows() + escrows_cleared = True - .evts is named subDB instance of SerderSuber whose values are serialized - key events - subkey 'evts.' - dgKey (prefix + digest) - DB is keyed by identifier prefix plus digest of serialized event - Only one value per DB key is allowed + print(f"Migrating database v{self.version} --> v{version}") + for migration in migrations: + modName = f"keri.db.migrations.{migration}" + if self.migs.get(keys=(migration,)) is not None: + continue - .fels is named subDB instance of OnSuber for first seen event logs (FEL) - as indices mapping first-seen ordinal fn to event digests. - Actual serialized key events are stored in .evts by SAID digest - Indexed in first-seen accepted order for replay and cloning. - subkey 'fels.' - Key: identifier prefix + monotonically increasing fn. - Value: qb64 str of event digest used to lookup event in .evts. - Only one value per DB key is allowed. - Append-only ordering of accepted first-seen events. + mod = importlib.import_module(modName) + try: + print(f"running migration {modName}") + mod.migrate(self) + except Exception as e: + print(f"\nAbandoning migration {migration} at version {version} with error: {e}") + return - .kels is named subDB instance of OnIoDupSuber for key event logs as indices - mapping composite key "
" to serialized key event digests.
-            Actual serialized key events are stored in .evts by SAID digest.
-            subkey 'kels.'
-            Key: identifier prefix + sequence number.
-            Value: qb64 digest used to lookup event in .evts.
-            More than one value per DB key is allowed.
+                self.migs.pin(keys=(migration,), val=coring.Dater())
 
-        .dtss is named subDB instance of CesrSuber (klas=Dater) for datetime
-            stamps of when the event was first escrowed and then later first
-            seen by log. Used for escrow timeouts and extended validation.
-            subkey 'dtss.'
-            dgKey (prefix + digest)
-            Value: Dater instance
-            Only one value per DB key is allowed.
+            # update database version after successful migration
+            self.version = version
 
-        .aess is named subDB instance of CatCesrSuber (klas=(Number, Diger))
-            for authorizing event source seal couples that map digest of key
-            event to seal source couple of authorizer's (delegator or issuer)
-            event.
-            subkey 'aess.'
-            dgKey (prefix + digest)
-            Value: (Number, Diger) tuple; Number serialized as Huge
-            (fixed 24-char), used to lookup authorizer's source event in .kels.
-            Only one value per DB key is allowed.
+        self.version = __version__
 
-        .sigs is named subDB instance of CesrIoSetSuber (klas=Siger) for
-            fully qualified indexed event signatures from the controller.
-            subkey 'sigs.'
-            dgKey (prefix + digest)
-            More than one value per DB key is allowed.
 
-        .wigs is named subDB instance of CesrIoSetSuber (klas=Siger) for
-            indexed witness signatures of events that may come directly or be
-            derived from a witness receipt message. Witnesses always have
-            nontransferable identifier prefixes. The index is the offset of
-            the witness into the witness list of the most recent establishment
-            event wrt the receipted event.
-            subkey 'wigs.'
-            dgKey (prefix + digest)
-            More than one value per DB key is allowed.
+    def _trimAllEscrows(self):
+        """Trim all escrow databases via low-level .trim().
 
-        .rcts is named subDB instance of CatCesrIoSetSuber (klas=(Prefixer, Cigar))
-            for event receipt couplets from nontransferable signers.
-            These are endorsements from nontransferable signers who are not witnesses
-            May be watchers or other
-            Each entry is a (Prefixer, Cigar) duple.
-            subkey 'rcts.'
-            dgKey (prefix + digest)
-            Multiple values per key stored as ordered set (duplicates ignored,
-            insertion order preserved).
+        Safe for old key formats that would crash higher-level iterators
+        (e.g., qnfs keys without insertion-order suffix from pre-1.2.0).
+        Called at the beginning of migration per spec call guidance.
+        See: https://github.com/WebOfTrust/keripy/issues/863
+        """
+        escrows = [
+            self.ures, self.vres, self.pses, self.pwes, self.ooes,
+            self.qnfs, self.uwes, self.misfits, self.delegables,
+            self.pdes, self.udes, self.rpes, self.ldes, self.epsd,
+            self.eoobi, self.dpub, self.gpwe, self.gdee, self.dpwe,
+            self.gpse, self.epse, self.dune,
+        ]
+        total = 0
+        for escrow in escrows:
+            count = escrow.cnt()
+            if count > 0:
+                escrow.trim()
+                total += count
+        if total > 0:
+            print(f"Cleared {total} escrow entries before migration")
 
-        .ures is named subDB instance of CatCesrIoSetSuber
-            (klas=(Diger, Prefixer, Cigar)) for unverified event receipt
-            escrowed triples from nontransferable signers. Each triple is
-            (receipted event digest, receiptor prefix, receipt signature).
-            Used to escrow receipt couples until the receipted event appears.
-            subkey 'ures.'
-            snKey (prefix + sequence number)
-            More than one value per DB key is allowed.
 
-        .vrcs is named subDB instance of CatCesrIoSetSuber
-            (klas=(Prefixer, Number, Diger, Siger)) for verified transferable-
-            validator receipt quadruples. Each stored value is a typed CESR
-            tuple (Prefixer, Number, Diger, Siger) representing a validator's
-            AID, its latest establishment-event sequence number, digest, and
-            its indexed signature over the event. Values preserved in insertion
-            order. Represents fully validated receipts moved out of escrow.
-            subkey 'vrcs.'
-            dgKey (prefix + digest)
-            Multiple values per key stored as ordered set.
+    def clearEscrows(self):
+        """
+        Clear all escrows
+        """
+        for escrow in [self.ures, self.vres, self.pses, self.pwes, self.ooes,
+                       self.qnfs, self.uwes,
+                       self.qnfs, self.misfits, self.delegables, self.pdes,
+                       self.udes, self.rpes, self.ldes, self.epsd, self.eoobi,
+                       self.dpub, self.gpwe, self.gdee, self.dpwe, self.gpse,
+                       self.epse, self.dune]:
+            count = escrow.cntAll()
+            escrow.trim()
+            logger.info(f"KEL: Cleared {count} escrows from ({escrow}")
 
-        .vres is named subDB instance of CatCesrIoSetSuber for escrowed
-            transferable-receipt quintuples. Each value is a typed CESR tuple
-            (Diger, Prefixer, Number, Diger, Siger) representing a validator's
-            receipt escrow entry. Holds unverified transferable receipts until
-            validated and moved into .vrcs.
-            subkey 'vres.'
-            snKey (prefix + sequence number)
-            Values stored in insertion order.
+    @property
+    def current(self):
+        """ Current property determines if we are at the current database migration state.
 
-        .pses is named subDB instance of OnIoDupSuber for partially-signed
-            event escrows under composite keys "
". Tracks events
-            with at least one verified signature but not yet fully validated
-            due to missing signatures or dependent events.
-            subkey 'pses.'
-            Key: identifier prefix + sequence number.
-            Values stored in insertion order.
+         If the database version matches the library version return True
+         If the current database version is behind the current library version, check for migrations
+            - If there are migrations to run, return False
+            - If there are no migrations to run, reset database version to library version and return True
+         If the current database version is ahead of the current library version, raise exception
 
-        .pwes is named subDB instance of OnIoDupSuber for partially witnessed
-            key event escrows under composite keys "
" to
-            serialized event digest. Escrows events with verified signatures
-            but not yet verified witness receipts.
-            subkey 'pwes.'
-            Key: identifier prefix + sequence number.
-            More than one value per DB key is allowed.
+         """
+        if self.version == __version__:
+            return True
 
-        .pdes is named subDB instance of OnIoDupSuber for partially delegated
-            key event escrows that map prefix + sequence number to serialized
-            event digest. Used in conjunction with .udes which escrows the
-            associated seal source couple.
-            subkey 'pdes.'
-            snKey (prefix + sequence number)
-            More than one value per DB key is allowed.
+        ver = semver.VersionInfo.parse(__version__)
+        ver_no_prerelease = semver.Version(ver.major, ver.minor, ver.patch)
+        # Strip prerelease from DB version to avoid lexicographic comparison bugs (#820)
+        if self.version is not None and semver.compare(_strip_prerelease(self.version), str(ver_no_prerelease)) == 1:
+            raise ConfigurationError(
+                f"Database version={self.version} is ahead of library version={__version__}")
 
-        .udes is named subDB instance of CatCesrSuber (klas=(Number, Diger))
-            for unverified delegation seal source couple escrows that map
-            (prefix, digest) of delegated event to delegating seal source
-            couple (sn, dig) that provides the source delegator event seal.
-            Each couple is (Number(num=sn).qb64b, Diger.qb64b) used to lookup
-            the source event in the delegator's KEL. Once accepted, entries
-            move into .aess.
-            subkey 'udes.'
-            dgKey (prefix + digest)
-            Only one value per DB key is allowed.
+        last = MIGRATIONS[-1]
+        # If we aren't at latest version, but there are no outstanding migrations,
+        # reset version to latest (rightmost (-1) migration is latest)
+        if self.migs.get(keys=(last[1][-1],)) is not None:
+            return True
 
-        .uwes is named subDB instance of B64OnIoSetSuber for unverified event
-            indexed escrowed couples from witness signers. Each couple is
-            (edig, wig) where edig is receipted event digest and wig is the
-            indexed witness signature derived from the witness nontrans prefix
-            and offset into the witness list of the latest establishment event.
-            subkey 'uwes.'
-            Key: receipted event controller prefix + sequence number.
-            Multiple values per key are stored in insertion order as a set.
+        # We have migrations to run
+        return False
 
-        .ooes is named subDB instance of OnIoDupSuber for out-of-order event
-            escrows under composite keys "
". Tracks events whose
-            prior event has not yet been accepted into the KEL.
-            subkey 'ooes.'
-            Key: identifier prefix + sequence number.
-            Values stored in insertion order.
 
-        .dels is named subDB instance of OnIoDupSuber for duplicitous event
-            log tables that map identifier prefix plus sequence number to
-            serialized event digests.
-            subkey 'dels.'
-            snKey (prefix + sequence number)
-            Values are qb64 digests used to lookup event in .evts.
-            More than one value per DB key is allowed (insertion ordered).
+    def complete(self, name=None):
+        """ Returns list of tuples of migrations completed with date of completion
 
-        .ldes is named subDB instance of OnIoDupSuber for likely duplicitous
-            escrowed event tables that map identifier prefix plus sequence
-            number to serialized event digests.
-            subkey 'ldes.'
-            snKey (prefix + sequence number)
-            Values are qb64 digests used to lookup event in .evts.
-            More than one value per DB key is allowed (insertion ordered).
+        Parameters:
+            name(str): optional name of migration to check completeness
 
-        .qnfs is named subDB instance of IoSetSuber for queued not-first-seen
-            event escrows. Maps (prefix, said) to event digest.
-            subkey 'qnfs.'
-            dupsort=True
-            More than one value per DB key is allowed.
+        Returns:
+            list: tuples of migration,date of completed migration names and the date of completion
 
-        .fons is named subDB instance of CesrSuber (klas=Number) mapping
-            prefix and digest to fn value (first seen ordinal number) of the
-            associated event. Given pre and event digest, retrieve fn here then
-            fetch event from .fels. Ensures any event looked up this way was
-            first seen at some point, even if later superseded by a recovery
-            rotation. Direct lookup in .evts could return escrowed events that
-            may never have been accepted as first seen.
-            subkey 'fons.'
-            dgKey (prefix + digest)
-            Only one value per DB key is allowed.
+        """
+        migrations = []
+        if not name:
+            for version, migs in MIGRATIONS:
+                # Print entries only for migrations that have been run
+                # Strip prerelease from DB version to avoid lexicographic comparison bugs (#820)
+                if self.version is not None and semver.compare(version, _strip_prerelease(self.version)) <= 0:
+                    for mig in migs:
+                        dater = self.migs.get(keys=(mig,))
+                        migrations.append((mig, dater))
+        else:
+            for version, migs in MIGRATIONS:  # check all migrations for each version
+                if name not in migs or not self.migs.get(keys=(name,)):
+                    raise ValueError(f"No migration named {name}")
+            migrations.append((name, self.migs.get(keys=(name,))))
 
-        .migs is named subDB instance of CesrSuber (klas=Dater) tracking
-            completed migrations. Maps migration module name to the Dater
-            timestamp of when it was run.
-            subkey 'migs.'
-            Key: migration name str.
-            Only one value per DB key is allowed.
+        return migrations
 
-        .vers is named subDB instance of Suber storing the current database
-            schema version string.
-            subkey 'vers.'
 
-        .esrs is named subDB instance of Komer (schema=EventSourceRecord)
-            tracking the source of each event. When .local is Truthy the event
-            was sourced in a protected way (generated locally or via a protected
-            path). When .local is Falsey the event was NOT sourced in a
-            protected way. The value of .local determines what validation logic
-            to run. Used to track source when processing escrows that would
-            otherwise be decoupled from the original source of the event.
-            subkey 'esrs.'
-            dgKey (prefix + digest)
-            Only one value per DB key is allowed.
+    def clonePreIter(self, pre, fn=0):
+        """
+        Returns iterator of first seen event messages with attachments for the
+        identifier prefix pre starting at first seen order number, fn.
+        Essentially a replay in first seen order with attachments
 
-        .misfits is named subDB instance of OnIoSetSuber for misfit escrows.
-            Events with remote (nonlocal) sources that are inappropriate (i.e.
-            would be dropped) unless promoted to local source via extra
-            after-the-fact authentication. Escrow processing determines if and
-            how to promote event source to local and then reprocess.
-            subkey 'mfes.'
-            snKey (prefix + sequence number)
-            Value: qb64b digest of event.
+        Parameters:
+            pre is bytes of itdentifier prefix
+            fn is int fn to resume replay. Earliset is fn=0
 
-        .delegables is named subDB instance of IoSetSuber for delegable event
-            escrows of key events with a local delegator that need approval.
-            Approval is via anchoring of the delegated event seal in the
-            delegator's KEL. Event source must be local. A nonlocal (remote)
-            source must first pass through .misfits and be promoted to local.
-            subkey 'dees.'
-            snKey (prefix + sequence number)
-            Value: qb64b digest of event.
+        Returns:
+           msgs (Iterator): over all items with pre starting at fn
+        """
+        if hasattr(pre, 'encode'):
+            pre = pre.encode("utf-8")
 
-        .states is named subDB instance of Komer (schema=KeyStateRecord)
-            mapping a prefix to its latest key state. Used as read-through
-            cache backing .kevers to reload Kever instances from persistent
-            storage.
-            subkey 'stts.'
-            Key: identifier prefix.
-            Only one value per DB key is allowed.
+        for keys, fn, dig in self.fels.getAllItemIter(keys=pre, on=fn):
+            try:
+                msg = self.cloneEvtMsg(pre=pre, fn=fn, dig=dig)
+            except Exception:
+                continue  # skip this event
+            yield msg
 
-        .wits is named subDB instance of CesrIoSetSuber (klas=Prefixer)
-            storing the current witness set for an identifier.
-            subkey 'wits.'
-            Key: identifier prefix.
-            Multiple values per key (one per witness).
 
-        .habs is named subDB instance of Komer (schema=HabitatRecord) mapping
-            habitat names to habitat application state including identifier
-            prefix.
-            subkey 'habs.'
-            Key: habitat name str.
-            Only one value per DB key is allowed.
+    def cloneAllPreIter(self):
+        """
+        Returns iterator of first seen event messages with attachments for all
+        identifier prefixes starting at key. If key == b'' then start at first
+        key in databse. Use key to resume replay.
+        Essentially a replay in first seen order with attachments of entire
+        set of FELs.
 
-        .names is named subDB instance of Suber (sep='^') mapping
-            (namespace, name) to identifier prefix. Provides namespace-scoped
-            name lookup for habitats.
-            subkey 'names.'
-            Key: namespace + '^' + name.
+        Returns:
+           msgs (Iterator): over all items in db
 
-        .sdts is named subDB instance of CesrSuber (klas=Dater) mapping SAD
-            SAID to Dater CESR serialization of ISO-8601 datetime
-            (sad date-time stamp).
-            subkey 'sdts.'
-            Key: said (bytes) of SAD.
-            Only one value per DB key is allowed.
+        """
+        for keys, fn, dig in self.fels.getAllItemIter(keys=b'', on=0):
+            pre = keys[0].encode() if isinstance(keys[0], str) else keys[0]
+            try:
+                msg = self.cloneEvtMsg(pre=pre, fn=fn, dig=dig)
+            except Exception:
+                continue  # skip this event
+            yield msg
+
+
+    def cloneEvtMsg(self, pre, fn, dig):
+        """
+        Clones Event as Serialized CESR Message with Body and attached Foot
+
+        Parameters:
+            pre (bytes): identifier prefix of event
+            fn (int): first seen number (ordinal) of event
+            dig (bytes): digest of event
+
+        Returns:
+            bytearray: message body with attachments
+        """
+        from ..core import coring
+        from ..core.counting import Counter, Codens
+
+        msg = bytearray()  # message
+        atc = bytearray()  # attachments
+        dgkey = dgKey(pre, dig)  # get message
+        if not (serder := self.evts.get(keys=(pre, dig))):
+            raise MissingEntryError("Missing event for dig={}.".format(dig))
+        msg.extend(serder.raw)
+
+        # add indexed signatures to attachments
+        if not (sigers := self.sigs.get(keys=dgkey)):
+            raise MissingEntryError("Missing sigs for dig={}.".format(dig))
+        atc.extend(Counter(code=Codens.ControllerIdxSigs,
+                           count=len(sigers), version=Vrsn_1_0).qb64b)
+        for siger in sigers:
+            atc.extend(siger.qb64b)
+
+        # add indexed witness signatures to attachments
+        if wigers := self.wigs.get(keys=dgkey):
+            atc.extend(Counter(code=Codens.WitnessIdxSigs,
+                               count=len(wigers), version=Vrsn_1_0).qb64b)
+            for wiger in wigers:
+                atc.extend(wiger.qb64b)
+
+        # add authorizer (delegator/issuer) source seal event couple to attachments
+        if (duple := self.aess.get(keys=(pre, dig))) is not None:
+            number, diger = duple
+            atc.extend(Counter(code=Codens.SealSourceCouples,
+                               count=1, version=Vrsn_1_0).qb64b)
+            atc.extend(number.qb64b + diger.qb64b)
+
+        # add trans endorsement quadruples to attachments not controller
+        # may have been originally key event attachments or receipted endorsements
+        if quads := self.vrcs.get(keys=dgkey):
+            atc.extend(Counter(code=Codens.TransReceiptQuadruples,
+                               count=len(quads), version=Vrsn_1_0).qb64b)
+            for pre, snu, diger, siger in quads:    # adapt to CESR
+                atc.extend(pre.qb64b)
+                atc.extend(snu.qb64b)
+                atc.extend(diger.qb64b)
+                atc.extend(siger.qb64b)
+
+        # add nontrans endorsement couples to attachments not witnesses
+        # may have been originally key event attachments or receipted endorsements
+        if coups := self.rcts.get(keys=dgkey):
+            atc.extend(Counter(code=Codens.NonTransReceiptCouples,
+                               count=len(coups), version=Vrsn_1_0).qb64b)
+            for prefixer, cigar in coups:
+                atc.extend(prefixer.qb64b)
+                atc.extend(cigar.qb64b)
+
+        # add first seen replay couple to attachments
+        if not (dater := self.dtss.get(keys=dgkey)):
+            raise MissingEntryError("Missing datetime for dig={}.".format(dig))
+        atc.extend(Counter(code=Codens.FirstSeenReplayCouples,
+                           count=1, version=Vrsn_1_0).qb64b)
+        atc.extend(coring.Number(num=fn, code=coring.NumDex.Huge).qb64b)  # may not need to be Huge
+        atc.extend(dater.qb64b)
 
-        .ssgs is named subDB instance of CesrIoSetSuber (klas=Siger) for SAD
-            transferable indexed signatures. Maps quadruple key
-            (diger.qb64, prefixer.qb64, number.qb64, diger.qb64) to Siger
-            of the transferable signer's signature. Diger is the SAID of the
-            SAD; prefixer, number, and diger indicate the key state
-            establishment event for the signer.
-            subkey 'ssgs.'
-            Key: join(diger.qb64b, prefixer.qb64b, number.qb64b, diger.qb64b)
-            Multiple values per key (one per signer, insertion ordered).
+        # prepend pipelining counter to attachments
+        if len(atc) % 4:
+            raise ValueError("Invalid attachments size={}, nonintegral"
+                             " quadlets.".format(len(atc)))
+        pcnt = Counter(code=Codens.AttachmentGroup,
+                       count=(len(atc) // 4), version=Vrsn_1_0).qb64b
+        msg.extend(pcnt)
+        msg.extend(atc)
+        return msg
 
-        .scgs is named subDB instance of CatCesrIoSetSuber
-            (klas=(Verfer, Cigar)) for SAD nontransferable signatures. Maps
-            SAD SAID to (Verfer, Cigar) couple for each nontransferable signer.
-            For nontransferable signers, qb64 of Verfer equals Prefixer.
-            subkey 'scgs.'
-            Key: said (bytes) of SAD.
-            Multiple values per key (one per nontransferable signer, insertion
-            ordered).
 
-        .rpys is named subDB instance of SerderSuber for reply messages. Maps
-            reply SAID to serialization of the reply message (versioned SAD).
-            Use .sdts, .ssgs, and .scgs for associated datetimes and
-            signatures.
-            subkey 'rpys.'
-            Key: said bytes.
-            Only one value per DB key is allowed.
+    def cloneDelegation(self, kever):
+        """
+        Recursively clone delegation chain from AID of Kever if one exits.
 
-        .rpes is named subDB instance of CesrIoSetSuber (klas=Diger) for
-            reply escrows. Maps reply route to Diger of the escrowed reply
-            message. Routes such as '/end/role' and '/loc/scheme'.
-            subkey 'rpes.'
-            Key: route bytes.
-            Multiple values per key.
+        Parameters:
+            kever (Kever): Kever from which to clone the delegator's AID.
 
-        .eans is named subDB instance of CesrSuber (klas=Diger) for endpoint
-            role authorizations. Maps cid.role.eid to SAID of the reply SAD
-            that authN by controller cid authZ endpoint provider eid in the
-            given role. Routes /end/role/add and /end/role/cut to nullify.
-            subkey 'eans.'
-            Key: cid.role.eid.
-            Only one value per DB key is allowed.
+        """
+        if kever.delegated and kever.delpre in self.kevers:
+            dkever = self.kevers[kever.delpre]
+            yield from self.cloneDelegation(dkever)
 
-        .lans is named subDB instance of CesrSuber (klas=Diger) for location
-            authorizations. Maps eid.scheme to SAID of the reply SAD that
-            authN by endpoint provider eid designates scheme URL.
-            Route /loc/scheme; null URL nullifies.
-            subkey 'lans.'
-            Key: eid.scheme.
-            Only one value per DB key is allowed.
+            for dmsg in self.clonePreIter(pre=kever.delpre, fn=0):
+                yield dmsg
 
-        .ends is named subDB instance of Komer (schema=EndpointRecord) mapping
-            (cid, role, eid) to EndpointRecord attributes about endpoint
-            authorization. cid is controller prefix, role is endpoint role
-            (e.g. watcher), eid is controller prefix of endpoint provider.
-            Data extracted from reply /end/role/add or /end/role/cut.
-            subkey 'ends.'
-            Key: cid.role.eid.
 
-        .locs is named subDB instance of Komer (schema=LocationRecord) mapping
-            endpoint prefix eid and network location scheme to endpoint
-            location details. Data extracted from reply /loc/scheme.
-            subkey 'locs.'
-            Key: eid.scheme.
+    def fetchAllSealingEventByEventSeal(self, pre, seal, sn=0):
+        """
+        Search through a KEL for the event that contains a specific anchored
+        SealEvent type of provided seal but in dict form and is also fully
+        witnessed. Searchs from sn forward (default = 0).Searches all events in
+        KEL of pre including disputed and/or superseded events.
+        Returns the Serder of the first event with the anchored SealEvent seal,
+            None if not found
 
-        .obvs is named subDB instance of Komer (schema=ObservedRecord) for
-            observed OIDs by watcher. Maps (cid, aid, oid) to ObservedRecord.
-            subkey 'obvs.'
-            Key: cid.aid.oid.
 
-        .tops is named subDB instance of Komer (schema=TopicsRecord) mapping
-            witness identifier prefix to the topic index of the last recieved
-            mailbox message.
-            subkey 'witm.'
-            Key: witness prefix identifier.
+        Parameters:
+            pre (bytes|str): identifier of the KEL to search
+            seal (dict): dict form of Seal of any type SealEvent to find in anchored
+                seals list of each event
+            sn (int): beginning sn to search
 
-        .gpse is named subDB instance of CatCesrIoSetSuber
-            (klas=(Number, Diger)) for group multisig partial signature
-            escrows.
-            subkey 'gpse.'
-            Multiple values per key.
+        """
+        from ..core.structing import SealEvent
 
-        .gdee is named subDB instance of CatCesrIoSetSuber
-            (klas=(Number, Diger)) for group multisig delegate escrows.
-            subkey 'gdee.'
-            Multiple values per key.
+        if tuple(seal) != SealEvent._fields:  # wrong type of seal
+            return None
 
-        .gpwe is named subDB instance of CatCesrIoSetSuber
-            (klas=(Number, Diger)) for group multisig partial witness escrows.
-            subkey 'gdwe.'
-            Multiple values per key.
+        seal = SealEvent(**seal)  #convert to namedtuple
 
-        .cgms is named subDB instance of CesrSuber (klas=Diger) for completed
-            group multisig events. Maps key to Diger of completed event.
-            subkey 'cgms.'
-            Only one value per DB key is allowed.
+        for srdr in self.getEvtPreIter(pre=pre, sn=sn):  # includes disputed & superseded
+            for eseal in srdr.seals or []:  # or [] for seals 'a' field missing
+                if tuple(eseal) == SealEvent._fields:
+                    eseal = SealEvent(**eseal)  # convert to namedtuple
+                    if seal == eseal and self.fullyWitnessed(srdr):
+                        return srdr
+        return None
 
-        .epse is named subDB instance of SerderSuber for exchange message
-            partial signature escrows. Maps key to serialized Serder of the
-            escrowed exchange message.
-            subkey 'epse.'
 
-        .epsd is named subDB instance of CesrSuber (klas=Dater) for exchange
-            message partial signature escrow datetimes. Maps key to Dater
-            timestamp of the escrowed message.
-            subkey 'epsd.'
+    # use alias here until can change everywhere for  backwards compatibility
+    findAnchoringSealEvent = fetchAllSealingEventByEventSeal  # alias
 
-        .exns is named subDB instance of SerderSuber for accepted exchange
-            messages. Maps key to serialized Serder of the exchange message.
-            subkey 'exns.'
 
-        .erpy is named subDB instance of CesrSuber (klas=Saider) as a forward
-            pointer to a provided reply message associated with an exchange
-            message.
-            subkey 'erpy.'
-            Only one value per DB key is allowed.
+    def fetchLastSealingEventByEventSeal(self, pre, seal, sn=0):
+        """
+        Search through a KEL for the last event at any sn but that contains a
+        specific anchored event seal of namedtuple SealEvent type that matches
+        the provided seal in dict form and is also fully witnessed.
+        Searchs from provided sn forward (default = 0).
+        Searches only last events in KEL of pre so does not include disputed
+        and/or superseded events.
 
-        .esigs is named subDB instance of CesrIoSetSuber (klas=Siger) for
-            exchange message transferable indexed signatures.
-            subkey 'esigs.'
-            Multiple values per key.
+        Returns:
+            srdr (Serder): instance of the first event with the matching
+                           anchoring SealEvent seal,
+                        None if not found
 
-        .ecigs is named subDB instance of CatCesrIoSetSuber
-            (klas=(Verfer, Cigar)) for exchange message nontransferable
-            signatures. Maps key to (Verfer, Cigar) couples.
-            subkey 'ecigs.'
-            Multiple values per key.
+        Parameters:
+            pre (bytes|str): identifier of the KEL to search
+            seal (dict): dict form of Seal of any type SealEvent to find in anchored
+                seals list of each event
+            sn (int): beginning sn to search
 
-        .epath is named subDB instance of IoSetSuber for exchange message
-            pathed attachments.
-            subkey '.epath'
-            Multiple values per key.
+        """
+        from ..core.structing import SealEvent
 
-        .essrs is named subDB instance of CesrIoSetSuber (klas=Texter) for
-            exchange message event source records.
-            subkey '.essrs'
-            Multiple values per key.
+        if tuple(seal) != SealEvent._fields:  # wrong type of seal
+            return None
 
-        .chas is named subDB instance of CesrIoSetSuber (klas=Diger) for
-            accepted signed 12-word challenge response exn messages. Keyed by
-            prefix of signer.
-            subkey 'chas.'
-            Multiple values per key.
+        seal = SealEvent(**seal)  #convert to namedtuple
 
-        .reps is named subDB instance of CesrIoSetSuber (klas=Diger) for
-            successful signed 12-word challenge response exn messages. Keyed
-            by prefix of signer.
-            subkey 'reps.'
-            Multiple values per key.
+        for srdr in self.getEvtLastPreIter(pre=pre, sn=sn):  # no disputed or superseded
+            for eseal in srdr.seals or []:  # or [] for seals 'a' field missing
+                if tuple(eseal) == SealEvent._fields:
+                    eseal = SealEvent(**eseal)  # convert to namedtuple
+                    if seal == eseal and self.fullyWitnessed(srdr):
+                        return srdr
+        return None
 
-        .wkas is named subDB instance of IoSetKomer (schema=WellKnownAuthN)
-            for authorized well-known OOBIs.
-            subkey 'wkas.'
-            Multiple values per key.
 
-        .kdts is named subDB instance of CesrSuber (klas=Dater) mapping key
-            state SAID to ISO-8601 datetime stamp (ksn date-time stamp).
-            subkey 'kdts.'
-            Key: said (bytes).
-            Only one value per DB key is allowed.
+    def fetchLastSealingEventBySeal(self, pre, seal, sn=0):
+        """Only searches last event at any sn therefore does not search
+        any disputed or superseded events.
+        Search through last event at each sn in KEL for the event that contains
+        an anchored Seal with same Seal type as provided seal but in dict form.
+        Searchs from sn forward (default = 0).
+        Returns the Serder of the first found event with the anchored Seal seal,
+            None if not found
 
-        .ksns is named subDB instance of Komer (schema=KeyStateRecord) for
-            key state notice messages. Maps key state SAID to KeyStateRecord.
-            Use .kdts for associated datetimes and signatures.
-            subkey 'ksns.'
+        Parameters:
+            pre (bytes|str): identifier of the KEL to search
+            seal (dict): dict form of Seal of any type to find in anchored
+                seals list of each event
+            sn (int): beginning sn to search
 
-        .knas is named subDB instance of CesrSuber (klas=Diger) for key state
-            SAID records of successfully saved key state notices. Maps
-            (prefix, aid) to SAID of key state.
-            subkey 'knas.'
-            Only one value per DB key is allowed.
+        """
+        # create generic Seal namedtuple class using keys from provided seal dict
+        Seal = namedtuple('Seal', list(seal))  # matching type
 
-        .wwas is named subDB instance of CesrSuber (klas=Diger) for watcher
-            watched SAID records. Maps (cid, aid, oid) to SAID of the reply
-            message for successfully saved watched AIDs.
-            subkey 'wwas.'
-            Only one value per DB key is allowed.
+        for srdr in self.getEvtLastPreIter(pre=pre, sn=sn):  # only last evt at sn
+            for eseal in srdr.seals or []:  # or [] for seals 'a' field missing
+                if tuple(eseal) == Seal._fields:  # same type of seal
+                    eseal = Seal(**eseal)  #convert to namedtuple
+                    if seal == eseal and self.fullyWitnessed(srdr):
+                        return srdr
+        return None
 
-        .oobis is named subDB instance of Komer (schema=OobiRecord, sep='>')
-            for configured OOBIs to be processed asynchronously. Keyed by
-            OOBI URL. sep='>' prevents splitting as '>' is not valid in URLs.
-            subkey 'oobis.'
 
-        .eoobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
-            for OOBIs that failed to load and are pending retry. Keyed by
-            OOBI URL.
-            subkey 'eoobi.'
+    def signingMembers(self, pre: str):
+        """ Find signing members of a multisig group aid.
 
-        .coobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
-            for OOBIs with outstanding client requests. Keyed by OOBI URL.
-            subkey 'coobi.'
+        Using the pubs index to find members of a signing group
 
-        .roobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
-            for resolved OOBIs that have been successfully processed. Keyed
-            by OOBI URL.
-            subkey 'roobi.'
+        Parameters:
+            pre (str): qb64 identifier prefix to find members
 
-        .woobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
-            for well-known OOBIs used for MFA against a resolved OOBI. Keyed
-            by OOBI URL.
-            subkey 'woobi.'
+        Returns:
+            list: qb64 identifier prefixes of signing members for provided aid
 
-        .moobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
-            for multifactor well-known OOBI records. Keyed by OOBI URL.
-            subkey 'moobi.'
+        """
+        if (habord := self.habs.get(keys=(pre,))) is None:
+            return None
 
-        .mfa is named subDB instance of Komer (schema=OobiRecord, sep='>')
-            for multifactor well-known OOBI auth records pending processing.
-            Keyed by controller URL.
-            subkey 'mfa.'
+        return habord.smids
 
-        .rmfa is named subDB instance of Komer (schema=OobiRecord, sep='>')
-            for resolved multifactor well-known OOBI auth records. Keyed by
-            controller URL.
-            subkey 'rmfa.'
 
-        .schema is named subDB instance of SchemerSuber storing JSON schema
-            SADs keyed by SAID of the schema.
-            subkey 'schema.'
+    def rotationMembers(self, pre: str):
+        """ Find rotation members of a multisig group aid.
 
-        .cfld is named subDB instance of Suber for contact field values for
-            remote identifiers. Keyed by prefix/field.
-            subkey 'cfld.'
+        Using the digs index to lookup member pres of a group aid
 
-        .hbys is named subDB instance of Suber for global settings of the
-            Habery environment.
-            subkey 'hbys.'
+        Parameters:
+            pre (str): qb64 identifier prefix to find members
 
-        .cons is named subDB instance of Suber for signed contact data. Keyed
-            by prefix.
-            subkey 'cons.'
+        Returns:
+            list: qb64 identifier prefixes of rotation members for provided aid
+        """
+        if (habord := self.habs.get(keys=(pre,))) is None:
+            return None
 
-        .ccigs is named subDB instance of CesrSuber (klas=Cigar) for
-            transferable signatures on contact data. Keyed by prefix.
-            subkey 'ccigs.'
-            Only one value per DB key is allowed.
+        return habord.rmids
 
-        .imgs is raw LMDB sub database for chunked image data for contact
-            information of remote identifiers. Keyed by prefix/chunk-index.
-            subkey b'imgs.'
-            Raw bytes values; accessed directly via env.open_db.
 
-        .ifld is named subDB instance of Suber for identifier field values
-            for local identifiers. Keyed by prefix/field.
-            subkey 'ifld.'
+    def fullyWitnessed(self, serder):
+        """ Verify the witness threshold on the event
 
-        .sids is named subDB instance of Suber for signed local identifier
-            data. Keyed by prefix.
-            subkey 'sids.'
+        Parameters:
+            serder (Serder): event serder to validate witness threshold
 
-        .icigs is named subDB instance of CesrSuber (klas=Cigar) for
-            transferable signatures on local identifier data. Keyed by prefix.
-            subkey 'icigs.'
-            Only one value per DB key is allowed.
+        Returns:
 
-        .iimgs is raw LMDB sub database for chunked image data for local
-            identifier information. Keyed by prefix/chunk-index.
-            subkey b'iimgs.'
-            Raw bytes values; accessed directly via env.open_db.
+        """
+        # Verify fully receipted, because this witness may have persisted before all receipts
+        # have been gathered if this ius a witness for serder.pre
+        # get unique verified wigers and windices lists from wigers list
+        wigers = self.wigs.get(keys=(serder.preb, serder.saidb))
+        kever = self.kevers[serder.pre]
+        toad = kever.toader.num
 
-        .dpwe is named subDB instance of SerderSuber for delegated partial
-            witness escrows. Maps key to serialized Serder of the escrowed
-            delegated event.
-            subkey 'dpwe.'
+        return not len(wigers) < toad
 
-        .dune is named subDB instance of SerderSuber for delegated unanchored
-            escrows. Maps key to serialized Serder of the unanchored delegated
-            event awaiting delegation anchor.
-            subkey 'dune.'
 
-        .dpub is named subDB instance of SerderSuber for delegate publication
-            escrows used to send delegator info to the delegate's witnesses.
-            subkey 'dpub.'
+    def resolveVerifiers(self, pre=None, sn=0, dig=None):
+        """
+        Returns the Tholder and Verfers for the provided identifier prefix.
+        Default pre is own .pre
 
-        .cdel is named subDB instance of CesrOnSuber (klas=Diger) for
-            completed group delegated AIDs. Maps ordinal key to Diger of the
-            completed delegation event.
-            subkey 'cdel.'
+        Parameters:
+            pre(str) is qb64 str of bytes of identifier prefix.
+            sn(int) is the sequence number of the est event
+            dig(str) is qb64 str of digest of est event
 
-        .meids is named subDB instance of CesrIoSetSuber (klas=Diger) mapping
-            multisig embed payload SAID to the SAIDs of the exn messages that
-            contained it. Aggregates identical message bodies across group
-            multisig participants reaching consensus on events or credentials.
-            subkey 'meids.'
-            Multiple values per key.
+        """
+        from ..core import coring
 
-        .maids is named subDB instance of CesrIoSetSuber (klas=Prefixer)
-            mapping multisig embed payload SAID to the AIDs of the group
-            multisig participants that contributed it.
-            subkey 'maids.'
-            Multiple values per key.
+        prefixer = coring.Prefixer(qb64=pre)
+        if prefixer.transferable:
+            # receipted event and receipter in database so get receipter est evt
+            # retrieve dig of last event at sn of est evt of receipter.
+            sdig = self.kels.getLast(keys=prefixer.qb64b, on=sn)
+            if sdig is None:
+                # receipter's est event not yet in receipters's KEL
+                raise ValidationError("key event sn {} for pre {} is not yet in KEL"
+                                             "".format(sn, pre))
+            sdig = sdig.encode("utf-8")
+            # retrieve last event itself of receipter est evt from sdig
+            sserder = self.evts.get(keys=(prefixer.qb64b, bytes(sdig)))
+            # assumes db ensures that sserder must not be none because sdig was in KE
+            if dig is not None and not sserder.compare(said=dig):  # endorser's dig not match event
+                raise ValidationError("Bad proof sig group at sn = {}"
+                                             " for ksn = {}."
+                                             "".format(sn, sserder.sad))
 
-        .kramCTYP is named subDB instance of Komer (schema=CacheTypeRecord) for
-            KRAM cache type records. Maps expression string to drift and lag
-            parameters.
-            subkey 'ctyp.'
+            verfers = sserder.verfers
+            tholder = sserder.tholder
 
-        .kramMSGC is named subDB instance of Komer (schema=MsgCacheRecord) for
-            KRAM message cache. Maps (AID, MID) to message datetime, drift,
-            and lag values.
-            subkey 'msgc.'
+        else:
+            verfers = [coring.Verfer(qb64=pre)]
+            tholder = coring.Tholder(sith="1")
 
-        .kramTMSC is named subDB instance of Komer (schema=TxnMsgCacheRecord) for
-            KRAM transactioned message cache. Maps (AID, XID, MID) to
-            datetimes, drift, and lag values.
-            subkey 'tmsc.'
+        return tholder, verfers
 
-        .kramPMKM is named subDB instance of SerderSuber for KRAM partially signed
-            multi-key messages. Maps (AID, MID) key to the associated
-            SerderKERI message.
-            subkey 'pmkm.'
 
-        .kramPMKS is named subDB instance of CesrIoSetSuber (klas=Siger) for
-            KRAM partially signed multi-key signatures. Maps (AID, MID) key
-            to associated Siger instances.
-            subkey 'pmks.'
-            Multiple values per key.
+    def getEvtPreIter(self, pre, sn=0):
+        """
+        Returns iterator of event messages without attachments
+        in sn order from the KEL of identifier prefix pre.
+        Essentially a replay of all event messages without attachments
+        for each sn from the KEL of pre including superseded duplicates
 
-        .kramPMSK is named subDB instance of CatCesrSuber (klas=(Number, Diger))
-            for KRAM partially signed multi-key sender key state records. Maps
-            (AID, MID) key to (sn, event SAID) couple identifying the sender's
-            key state.
-            subkey 'pmsk.'
-            Only one value per DB key is allowed.
+        Parameters:
+            pre (bytes|str): identifier prefix
+            sn (int): sequence number (default 0) to begin interation
+        """
+        if hasattr(pre, 'encode'):
+            pre = pre.encode("utf-8")
 
-        .kramTRQS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key trans receipt quadruple attachments.
-            subkey 'trqs.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Prefixer, Number, Diger, Siger) tuple. Sourced from
-            parser kwa key 'trqs'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
+        for dig in self.kels.getAllIter(keys=pre, on=sn):
+            try:
+                if not (serder := self.evts.get(keys=(pre, dig))):
+                    raise MissingEntryError("Missing event for dig={}.".format(dig))
 
-        .kramTSGS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key trans last sig group attachments. Each group is
-            stored per-siger as a flat (Prefixer, Seqner, Saider, Siger) tuple.
-            subkey 'tsgs.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Prefixer, Number, Diger, Siger) tuple. Sourced from
-            parser kwa key 'tsgs'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
+            except Exception:
+                continue  # skip this event
 
-        .kramSSCS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key first seen seal couple attachments from issuing or
-            delegating events.
-            subkey 'sscs.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Number, Diger) tuple. Sourced from parser kwa key 'sscs'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
+            yield serder  # event as Serder
 
-        .kramSSTS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key source seal triple attachments from issued or
-            delegated events.
-            subkey 'ssts.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Prefixer, Number, Diger) tuple. Sourced from parser kwa
-            key 'ssts'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
 
-        .kramFRCS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key first seen replay couple attachments.
-            subkey 'frcs.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Number, Dater) tuple. Sourced from parser kwa key 'frcs'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
+    def getEvtLastPreIter(self, pre, sn=0):
+        """
+        Returns iterator of event messages without attachments
+        in sn order from the KEL of identifier prefix pre.
+        Essentially a replay of all event messages without attachments
+        for each sn from the KEL of pre including superseded duplicates
 
-        .kramTDCS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key typed digest seal couple attachments.
-            subkey 'tdcs.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Verser, Diger) tuple. Sourced from parser kwa key 'tdcs'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
+        Parameters:
+            pre (bytes|str): identifier prefix
+            sn (int): sequence number (default 0) to begin interation
+        """
+        if hasattr(pre, 'encode'):
+            pre = pre.encode("utf-8")
 
-        .kramPTDS is named subDB instance of IoSetSuber for KRAM partially signed
-            multi-key pathed stream attachments.
-            subkey 'ptds.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is raw bytes of pathed CESR stream. Sourced from parser kwa
-            key 'ptds'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
+        for dig in self.kels.getLastIter(keys=pre, on=sn):
+            try:
 
-        .kramBSQS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key blind state quadruple attachments.
-            subkey 'bsqs.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Diger, Noncer, Noncer, Labeler) tuple. Sourced from
-            parser kwa key 'bsqs'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
+                if not (serder := self.evts.get(keys=(pre, dig) )):
+                    raise MissingEntryError("Missing event for dig={}.".format(dig))
 
-        .kramBSSS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key bound state sextuple attachments.
-            subkey 'bsss.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Diger, Noncer, Noncer, Labeler, Number, Noncer) tuple.
-            Sourced from parser kwa key 'bsss'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
+            except Exception:
+                continue  # skip this event
+
+            yield serder  # event as Serder
 
-        .kramTMQS is named subDB instance of CatCesrIoSetSuber for KRAM partially
-            signed multi-key type media quadruple attachments.
-            subkey 'tmqs.'
-            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
-            Value is (Diger, Noncer, Labeler, Texter) tuple. Sourced from
-            parser kwa key 'tmqs'.
-            Multiple values per key stored as ordered set (duplicates ignored).
-            Entries persist until removed by the KRAM pruner.
 
-    Properties:
-        kevers (statedict): read through cache of kevers of states for KELs in db
 
+def openDB(*, cls=None, name="test", **kwa):
+    """
+    Returns contextmanager generated by openLMDB but with Baser instance as default
     """
+    if cls == None:  # can't reference class before its defined below
+        cls = Baser
+    return openLMDB(cls=cls, name=name, **kwa)
 
-    def __init__(self, headDirPath=None, reopen=False, **kwa):
-        """
-        Setup named sub databases.
 
-        Parameters:
-            name is str directory path name differentiator for main database
-                When system employs more than one keri database, name allows
-                differentiating each instance by name
-            temp is boolean, assign to .temp
-                True then open in temporary directory, clear on close
-                Othewise then open persistent directory, do not clear on close
-            headDirPath is optional str head directory pathname for main database
-                If not provided use default .HeadDirpath
-            mode is int numeric os dir permissions for database directory
-            reopen (bool): True means database will be reopened by this init
+@contextmanager
+def reopenDB(db, clear=False, **kwa):
+    """
+    Context manager wrapper LMDB DB instances.
+    Repens and closes db.path and db.env LMDB
 
+    Parameters:
+        db (LMDBer): instance with LMDB environment at .env
+        clear (bool): True means clear directory after close
 
-        """
-        self.prefixes = oset()  # should change to hids for hab ids
-        self.groups = oset()  # group hab ids
-        self._kevers = statedict()
-        self._kevers.db = self  # assign db for read through cache of kevers
+    Usage:
 
-        if (mapSize := os.getenv(KERIBaserMapSizeKey)) is not None:
-            try:
-                self.MapSize = int(mapSize)
-            except ValueError:
-                logger.error("KERI_BASER_MAP_SIZE must be an integer value >1!")
-                raise
+    with reopenDB(baser) as env:
+        env.  ....
 
-        super(Baser, self).__init__(headDirPath=headDirPath, reopen=reopen, **kwa)
+    """
+    try:
+        db.reopen(clear=clear, **kwa)
+        yield db.env
 
-    @property
-    def kevers(self):
-        """
-        Returns .db.kevers
-        """
-        return self._kevers
+    finally:
+        db.close(clear=clear)
 
-    def reopen(self, **kwa):
-        """
-        Open sub databases
 
-        Notes:
+KERIBaserMapSizeKey = "KERI_BASER_MAP_SIZE"
 
-        dupsort=True for sub DB means allow unique (key,pair) duplicates at a key.
-        Duplicate means that is more than one value at a key but not a redundant
-        copies a (key,value) pair per key. In other words the pair (key,value)
-        must be unique both key and value in combination.
-        Attempting to put the same (key,value) pair a second time does
-        not add another copy.
 
-        Duplicates are inserted in lexocographic order by value, insertion order.
+class Baser(BaserBase,LMDBer ):
+    """
+    Baser sets up named sub databases with Keri Event Logs within main database
 
-        """
-        from . import koming, subing
-        from ..core import coring, indexing
+    Attributes:
+        see superclass LMDBer for inherited attributes
 
-        super(Baser, self).reopen(**kwa)
+        kevers (dbdict): read-through cache of Kever instances indexed by
+            identifier prefix qb64
+        prefixes (OrderedSet): local prefixes corresponding to habitats for
+            this db
+        groups (OrderedSet): group hab identifier prefixes for this db
 
-        # Create by opening first time named sub DBs within main DB instance
-        # Names end with "." as sub DB name must include a non Base64 character
-        # to avoid namespace collisions with Base64 identifier prefixes.
+        .evts is named subDB instance of SerderSuber whose values are serialized
+            key events
+            subkey 'evts.'
+            dgKey (prefix + digest)
+            DB is keyed by identifier prefix plus digest of serialized event
+            Only one value per DB key is allowed
 
-        self.evts = subing.SerderSuber(db=self, subkey='evts.')
-        self.fels = subing.OnSuber(db=self, subkey='fels.')
-        self.kels = subing.OnIoDupSuber(db=self, subkey='kels.')
-        self.dtss = subing.CesrSuber(db=self, subkey='dtss.', klas=coring.Dater)
-        self.aess = subing.CatCesrSuber(db=self, subkey='aess.',
-                                        klas=(coring.Number, coring.Diger))
-        self.sigs = subing.CesrIoSetSuber(db=self, subkey='sigs.',
-                                        klas=(indexing.Siger))
-        self.wigs = subing.CesrIoSetSuber(db=self, subkey='wigs.', klas=indexing.Siger)
-        self.rcts = subing.CatCesrIoSetSuber(db=self, subkey="rcts.",
-                                             klas=(coring.Prefixer, coring.Cigar))
-        self.ures = subing.CatCesrIoSetSuber(db=self, subkey='ures.',
-                                             klas=(coring.Diger, coring.Prefixer, coring.Cigar))
-        self.vrcs = subing.CatCesrIoSetSuber(db=self, subkey='vrcs.',
-                             klas=(coring.Prefixer, coring.Number, coring.Diger, indexing.Siger))
-        self.vres = subing.CatCesrIoSetSuber(db=self, subkey='vres.',
-                             klas=(coring.Diger, coring.Prefixer, coring.Number, coring.Diger, indexing.Siger))
-        self.pses = subing.OnIoDupSuber(db=self, subkey='pses.')
-        self.pwes = subing.OnIoDupSuber(db=self, subkey='pwes.')
-        self.pdes = subing.OnIoDupSuber(db=self, subkey='pdes.')
-        self.udes = subing.CatCesrSuber(db=self, subkey='udes.', klas=(coring.Number, coring.Diger))
-        self.uwes = subing.B64OnIoSetSuber(db=self, subkey='uwes.')
-        self.ooes = subing.OnIoDupSuber(db=self, subkey='ooes.')
-        self.dels = subing.OnIoDupSuber(db=self, subkey='dels.')
-        self.ldes = subing.OnIoDupSuber(db=self, subkey='ldes.')
-        self.qnfs = subing.IoSetSuber(db=self, subkey="qnfs.", dupsort=True)
+        .fels is named subDB instance of OnSuber for first seen event logs (FEL)
+            as indices mapping first-seen ordinal fn to event digests.
+            Actual serialized key events are stored in .evts by SAID digest
+            Indexed in first-seen accepted order for replay and cloning.
+            subkey 'fels.'
+            Key: identifier prefix + monotonically increasing fn.
+            Value: qb64 str of event digest used to lookup event in .evts.
+            Only one value per DB key is allowed.
+            Append-only ordering of accepted first-seen events.
 
-        # events as ordered by first seen ordinals
-        self.fons = subing.CesrSuber(db=self, subkey='fons.', klas=coring.Number)
+        .kels is named subDB instance of OnIoDupSuber for key event logs as indices
+            mapping composite key "
" to serialized key event digests.
+            Actual serialized key events are stored in .evts by SAID digest.
+            subkey 'kels.'
+            Key: identifier prefix + sequence number.
+            Value: qb64 digest used to lookup event in .evts.
+            More than one value per DB key is allowed.
 
-        self.migs = subing.CesrSuber(db=self, subkey="migs.", klas=coring.Dater)
-        self.vers = subing.Suber(db=self, subkey="vers.")
+        .dtss is named subDB instance of CesrSuber (klas=Dater) for datetime
+            stamps of when the event was first escrowed and then later first
+            seen by log. Used for escrow timeouts and extended validation.
+            subkey 'dtss.'
+            dgKey (prefix + digest)
+            Value: Dater instance
+            Only one value per DB key is allowed.
 
-        # event source local (protected) or non-local (remote not protected)
-        self.esrs = koming.Komer(db=self,
-                                   klas=EventSourceRecord,
-                                   subkey='esrs.')
+        .aess is named subDB instance of CatCesrSuber (klas=(Number, Diger))
+            for authorizing event source seal couples that map digest of key
+            event to seal source couple of authorizer's (delegator or issuer)
+            event.
+            subkey 'aess.'
+            dgKey (prefix + digest)
+            Value: (Number, Diger) tuple; Number serialized as Huge
+            (fixed 24-char), used to lookup authorizer's source event in .kels.
+            Only one value per DB key is allowed.
 
-        # misfit escrows whose processing may change the .esrs event source record
-        self.misfits = subing.OnIoSetSuber(db=self, subkey='mfes.')
+        .sigs is named subDB instance of CesrIoSetSuber (klas=Siger) for
+            fully qualified indexed event signatures from the controller.
+            subkey 'sigs.'
+            dgKey (prefix + digest)
+            More than one value per DB key is allowed.
 
-        # delegable events escrows. events with local delegator that need approval
-        self.delegables = subing.IoSetSuber(db=self, subkey='dees.')
+        .wigs is named subDB instance of CesrIoSetSuber (klas=Siger) for
+            indexed witness signatures of events that may come directly or be
+            derived from a witness receipt message. Witnesses always have
+            nontransferable identifier prefixes. The index is the offset of
+            the witness into the witness list of the most recent establishment
+            event wrt the receipted event.
+            subkey 'wigs.'
+            dgKey (prefix + digest)
+            More than one value per DB key is allowed.
 
-        # Kever state made of KeyStateRecord key states
-        # TODO: clean
-        self.states = koming.Komer(db=self,
-                                   klas=KeyStateRecord,
-                                   subkey='stts.')
+        .rcts is named subDB instance of CatCesrIoSetSuber (klas=(Prefixer, Cigar))
+            for event receipt couplets from nontransferable signers.
+            These are endorsements from nontransferable signers who are not witnesses
+            May be watchers or other
+            Each entry is a (Prefixer, Cigar) duple.
+            subkey 'rcts.'
+            dgKey (prefix + digest)
+            Multiple values per key stored as ordered set (duplicates ignored,
+            insertion order preserved).
 
-        self.wits = subing.CesrIoSetSuber(db=self, subkey="wits.", klas=coring.Prefixer)
+        .ures is named subDB instance of CatCesrIoSetSuber
+            (klas=(Diger, Prefixer, Cigar)) for unverified event receipt
+            escrowed triples from nontransferable signers. Each triple is
+            (receipted event digest, receiptor prefix, receipt signature).
+            Used to escrow receipt couples until the receipted event appears.
+            subkey 'ures.'
+            snKey (prefix + sequence number)
+            More than one value per DB key is allowed.
 
-        # habitat application state keyed by habitat name, includes prefix
-        self.habs = koming.Komer(db=self,
-                                 subkey='habs.',
-                                 klas=HabitatRecord, )
-        # habitat name database mapping (domain,name) as key to Prefixer
-        self.names = subing.Suber(db=self, subkey='names.', sep="^")
+        .vrcs is named subDB instance of CatCesrIoSetSuber
+            (klas=(Prefixer, Number, Diger, Siger)) for verified transferable-
+            validator receipt quadruples. Each stored value is a typed CESR
+            tuple (Prefixer, Number, Diger, Siger) representing a validator's
+            AID, its latest establishment-event sequence number, digest, and
+            its indexed signature over the event. Values preserved in insertion
+            order. Represents fully validated receipts moved out of escrow.
+            subkey 'vrcs.'
+            dgKey (prefix + digest)
+            Multiple values per key stored as ordered set.
 
-        # SAD support datetime stamps and signatures indexed and not-indexed
-        # all sad  sdts (sad datetime serializations) maps said to date-time
-        self.sdts = subing.CesrSuber(db=self, subkey='sdts.', klas=coring.Dater)
+        .vres is named subDB instance of CatCesrIoSetSuber for escrowed
+            transferable-receipt quintuples. Each value is a typed CESR tuple
+            (Diger, Prefixer, Number, Diger, Siger) representing a validator's
+            receipt escrow entry. Holds unverified transferable receipts until
+            validated and moved into .vrcs.
+            subkey 'vres.'
+            snKey (prefix + sequence number)
+            Values stored in insertion order.
 
-        # all sad ssgs (sad indexed signature serializations) maps SAD quadkeys
-        # given by quadruple (diger.qb64, prefixer.qb64, seqner.q64, diger.qb64)
-        #  of reply and trans signer's key state est evt to val Siger for each
-        # signature.
-        self.ssgs = subing.CesrIoSetSuber(db=self, subkey='ssgs.', klas=indexing.Siger)
+        .pses is named subDB instance of OnIoDupSuber for partially-signed
+            event escrows under composite keys "
". Tracks events
+            with at least one verified signature but not yet fully validated
+            due to missing signatures or dependent events.
+            subkey 'pses.'
+            Key: identifier prefix + sequence number.
+            Values stored in insertion order.
 
-        # all sad scgs  (sad non-indexed signature serializations) maps SAD SAID
-        # to couple (Verfer, Cigar) of nontrans signer of signature in Cigar
-        # nontrans qb64 of Prefixer is same as Verfer
-        self.scgs = subing.CatCesrIoSetSuber(db=self, subkey='scgs.',
-                                             klas=(coring.Verfer, coring.Cigar))
+        .pwes is named subDB instance of OnIoDupSuber for partially witnessed
+            key event escrows under composite keys "
" to
+            serialized event digest. Escrows events with verified signatures
+            but not yet verified witness receipts.
+            subkey 'pwes.'
+            Key: identifier prefix + sequence number.
+            More than one value per DB key is allowed.
 
-        # all reply messages. Maps reply said to serialization. Replys are
-        # versioned sads ( with version string) so use Serder to deserialize and
-        # use  .sdts, .ssgs, and .scgs for datetimes and signatures
-        # TODO: clean
-        self.rpys = subing.SerderSuber(db=self, subkey='rpys.')
+        .pdes is named subDB instance of OnIoDupSuber for partially delegated
+            key event escrows that map prefix + sequence number to serialized
+            event digest. Used in conjunction with .udes which escrows the
+            associated seal source couple.
+            subkey 'pdes.'
+            snKey (prefix + sequence number)
+            More than one value per DB key is allowed.
 
-        # all reply escrows indices of partially signed reply messages. Maps
-        # route in reply to single (Diger,)  of escrowed reply.
-        # Routes such as /end/role  /loc/schema
-        self.rpes = subing.CesrIoSetSuber(db=self, subkey='rpes.',
-                                          klas=coring.Diger)
+        .udes is named subDB instance of CatCesrSuber (klas=(Number, Diger))
+            for unverified delegation seal source couple escrows that map
+            (prefix, digest) of delegated event to delegating seal source
+            couple (sn, dig) that provides the source delegator event seal.
+            Each couple is (Number(num=sn).qb64b, Diger.qb64b) used to lookup
+            the source event in the delegator's KEL. Once accepted, entries
+            move into .aess.
+            subkey 'udes.'
+            dgKey (prefix + digest)
+            Only one value per DB key is allowed.
 
-        # auth AuthN/AuthZ by controller at cid of endpoint provider at eid
-        # maps key=cid.role.eid to val=diger of end reply
-        self.eans = subing.CesrSuber(db=self, subkey='eans.', klas=coring.Diger)
+        .uwes is named subDB instance of B64OnIoSetSuber for unverified event
+            indexed escrowed couples from witness signers. Each couple is
+            (edig, wig) where edig is receipted event digest and wig is the
+            indexed witness signature derived from the witness nontrans prefix
+            and offset into the witness list of the latest establishment event.
+            subkey 'uwes.'
+            Key: receipted event controller prefix + sequence number.
+            Multiple values per key are stored in insertion order as a set.
 
-        # auth AuthN/AuthZ by endpoint provider at eid of location at scheme url
-        # maps key=cid.role.eid to val=diger of end reply
-        self.lans = subing.CesrSuber(db=self, subkey='lans.', klas=coring.Diger)
+        .ooes is named subDB instance of OnIoDupSuber for out-of-order event
+            escrows under composite keys "
". Tracks events whose
+            prior event has not yet been accepted into the KEL.
+            subkey 'ooes.'
+            Key: identifier prefix + sequence number.
+            Values stored in insertion order.
 
-        # service endpoint identifier (eid) auths keyed by controller cid.role.eid
-        # data extracted from reply /end/role/add or /end/role/cut
-        self.ends = koming.Komer(db=self, subkey='ends.',
-                                 klas=EndpointRecord, )
+        .dels is named subDB instance of OnIoDupSuber for duplicitous event
+            log tables that map identifier prefix plus sequence number to
+            serialized event digests.
+            subkey 'dels.'
+            snKey (prefix + sequence number)
+            Values are qb64 digests used to lookup event in .evts.
+            More than one value per DB key is allowed (insertion ordered).
 
-        # service endpoint locations keyed by eid.scheme  (endpoint identifier)
-        # data extracted from reply loc
-        self.locs = koming.Komer(db=self,
-                                 subkey='locs.',
-                                 klas=LocationRecord, )
-        # observed oids by watcher by cid.aid.oid  (endpoint identifier)
-        # data extracted from reply loc
-        self.obvs = koming.Komer(db=self,
-                                 subkey='obvs.',
-                                 klas=ObservedRecord, )
+        .ldes is named subDB instance of OnIoDupSuber for likely duplicitous
+            escrowed event tables that map identifier prefix plus sequence
+            number to serialized event digests.
+            subkey 'ldes.'
+            snKey (prefix + sequence number)
+            Values are qb64 digests used to lookup event in .evts.
+            More than one value per DB key is allowed (insertion ordered).
 
-        # index of last retrieved message from witness mailbox
-        # TODO: clean
-        self.tops = koming.Komer(db=self,
-                                 subkey='witm.',
-                                 klas=TopicsRecord, )
+        .qnfs is named subDB instance of IoSetSuber for queued not-first-seen
+            event escrows. Maps (prefix, said) to event digest.
+            subkey 'qnfs.'
+            dupsort=True
+            More than one value per DB key is allowed.
 
-        # group partial signature escrow
-        self.gpse = subing.CatCesrIoSetSuber(db=self, subkey='gpse.',
-                                             klas=(coring.Number, coring.Diger))
+        .fons is named subDB instance of CesrSuber (klas=Number) mapping
+            prefix and digest to fn value (first seen ordinal number) of the
+            associated event. Given pre and event digest, retrieve fn here then
+            fetch event from .fels. Ensures any event looked up this way was
+            first seen at some point, even if later superseded by a recovery
+            rotation. Direct lookup in .evts could return escrowed events that
+            may never have been accepted as first seen.
+            subkey 'fons.'
+            dgKey (prefix + digest)
+            Only one value per DB key is allowed.
 
-        # group delegate escrow
-        self.gdee = subing.CatCesrIoSetSuber(db=self, subkey='gdee.',
-                                             klas=(coring.Number, coring.Diger))
+        .migs is named subDB instance of CesrSuber (klas=Dater) tracking
+            completed migrations. Maps migration module name to the Dater
+            timestamp of when it was run.
+            subkey 'migs.'
+            Key: migration name str.
+            Only one value per DB key is allowed.
 
-        # group partial witness escrow
-        self.gpwe = subing.CatCesrIoSetSuber(db=self, subkey='gdwe.',
-                                             klas=(coring.Number, coring.Diger))
+        .vers is named subDB instance of Suber storing the current database
+            schema version string.
+            subkey 'vers.'
 
-        # completed group multisig
-        # TODO: clean
-        self.cgms = subing.CesrSuber(db=self, subkey='cgms.',
-                                     klas=coring.Diger)
+        .esrs is named subDB instance of Komer (schema=EventSourceRecord)
+            tracking the source of each event. When .local is Truthy the event
+            was sourced in a protected way (generated locally or via a protected
+            path). When .local is Falsey the event was NOT sourced in a
+            protected way. The value of .local determines what validation logic
+            to run. Used to track source when processing escrows that would
+            otherwise be decoupled from the original source of the event.
+            subkey 'esrs.'
+            dgKey (prefix + digest)
+            Only one value per DB key is allowed.
 
-        # exchange message partial signature escrow
-        self.epse = subing.SerderSuber(db=self, subkey="epse.")
+        .misfits is named subDB instance of OnIoSetSuber for misfit escrows.
+            Events with remote (nonlocal) sources that are inappropriate (i.e.
+            would be dropped) unless promoted to local source via extra
+            after-the-fact authentication. Escrow processing determines if and
+            how to promote event source to local and then reprocess.
+            subkey 'mfes.'
+            snKey (prefix + sequence number)
+            Value: qb64b digest of event.
 
-        # exchange message PS escrow date time of message
-        self.epsd = subing.CesrSuber(db=self, subkey="epsd.",
-                                     klas=coring.Dater)
+        .delegables is named subDB instance of IoSetSuber for delegable event
+            escrows of key events with a local delegator that need approval.
+            Approval is via anchoring of the delegated event seal in the
+            delegator's KEL. Event source must be local. A nonlocal (remote)
+            source must first pass through .misfits and be promoted to local.
+            subkey 'dees.'
+            snKey (prefix + sequence number)
+            Value: qb64b digest of event.
 
-        # exchange messages
-        # TODO: clean
-        self.exns = subing.SerderSuber(db=self, subkey="exns.")
+        .states is named subDB instance of Komer (schema=KeyStateRecord)
+            mapping a prefix to its latest key state. Used as read-through
+            cache backing .kevers to reload Kever instances from persistent
+            storage.
+            subkey 'stts.'
+            Key: identifier prefix.
+            Only one value per DB key is allowed.
 
-        # Forward pointer to a provided reply message
-        # TODO: clean
-        self.erpy = subing.CesrSuber(db=self, subkey="erpy.", klas=coring.Saider)
+        .wits is named subDB instance of CesrIoSetSuber (klas=Prefixer)
+            storing the current witness set for an identifier.
+            subkey 'wits.'
+            Key: identifier prefix.
+            Multiple values per key (one per witness).
 
-        # exchange message signatures
-        # TODO: clean
-        self.esigs = subing.CesrIoSetSuber(db=self, subkey='esigs.', klas=indexing.Siger)
+        .habs is named subDB instance of Komer (schema=HabitatRecord) mapping
+            habitat names to habitat application state including identifier
+            prefix.
+            subkey 'habs.'
+            Key: habitat name str.
+            Only one value per DB key is allowed.
 
-        # exchange message signatures
-        # TODO: clean
-        self.ecigs = subing.CatCesrIoSetSuber(db=self, subkey='ecigs.',
-                                              klas=(coring.Verfer, coring.Cigar))
+        .names is named subDB instance of Suber (sep='^') mapping
+            (namespace, name) to identifier prefix. Provides namespace-scoped
+            name lookup for habitats.
+            subkey 'names.'
+            Key: namespace + '^' + name.
 
-        # exchange pathed attachments
-        # TODO: clean
-        self.epath = subing.IoSetSuber(db=self, subkey=".epath")
+        .sdts is named subDB instance of CesrSuber (klas=Dater) mapping SAD
+            SAID to Dater CESR serialization of ISO-8601 datetime
+            (sad date-time stamp).
+            subkey 'sdts.'
+            Key: said (bytes) of SAD.
+            Only one value per DB key is allowed.
 
-        self.essrs = subing.CesrIoSetSuber(db=self, subkey=".essrs", klas=coring.Texter)
+        .ssgs is named subDB instance of CesrIoSetSuber (klas=Siger) for SAD
+            transferable indexed signatures. Maps quadruple key
+            (diger.qb64, prefixer.qb64, number.qb64, diger.qb64) to Siger
+            of the transferable signer's signature. Diger is the SAID of the
+            SAD; prefixer, number, and diger indicate the key state
+            establishment event for the signer.
+            subkey 'ssgs.'
+            Key: join(diger.qb64b, prefixer.qb64b, number.qb64b, diger.qb64b)
+            Multiple values per key (one per signer, insertion ordered).
 
-        # accepted signed 12-word challenge response exn messages keys by prefix of signer
-        # TODO: clean
-        self.chas = subing.CesrIoSetSuber(db=self, subkey='chas.', klas=coring.Diger)
+        .scgs is named subDB instance of CatCesrIoSetSuber
+            (klas=(Verfer, Cigar)) for SAD nontransferable signatures. Maps
+            SAD SAID to (Verfer, Cigar) couple for each nontransferable signer.
+            For nontransferable signers, qb64 of Verfer equals Prefixer.
+            subkey 'scgs.'
+            Key: said (bytes) of SAD.
+            Multiple values per key (one per nontransferable signer, insertion
+            ordered).
+
+        .rpys is named subDB instance of SerderSuber for reply messages. Maps
+            reply SAID to serialization of the reply message (versioned SAD).
+            Use .sdts, .ssgs, and .scgs for associated datetimes and
+            signatures.
+            subkey 'rpys.'
+            Key: said bytes.
+            Only one value per DB key is allowed.
 
-        # successfull signed 12-word challenge response exn messages keys by prefix of signer
-        # TODO: clean
-        self.reps = subing.CesrIoSetSuber(db=self, subkey='reps.', klas=coring.Diger)
+        .rpes is named subDB instance of CesrIoSetSuber (klas=Diger) for
+            reply escrows. Maps reply route to Diger of the escrowed reply
+            message. Routes such as '/end/role' and '/loc/scheme'.
+            subkey 'rpes.'
+            Key: route bytes.
+            Multiple values per key.
 
-        # authorzied well known OOBIs
-        # TODO: clean
-        self.wkas = koming.IoSetKomer(db=self, subkey='wkas.', klas=WellKnownAuthN)
+        .eans is named subDB instance of CesrSuber (klas=Diger) for endpoint
+            role authorizations. Maps cid.role.eid to SAID of the reply SAD
+            that authN by controller cid authZ endpoint provider eid in the
+            given role. Routes /end/role/add and /end/role/cut to nullify.
+            subkey 'eans.'
+            Key: cid.role.eid.
+            Only one value per DB key is allowed.
 
-        # KSN support datetime stamps and signatures indexed and not-indexed
-        # all ksn  kdts (key state datetime serializations) maps said to date-time
-        # TODO: clean
-        self.kdts = subing.CesrSuber(db=self, subkey='kdts.', klas=coring.Dater)
+        .lans is named subDB instance of CesrSuber (klas=Diger) for location
+            authorizations. Maps eid.scheme to SAID of the reply SAD that
+            authN by endpoint provider eid designates scheme URL.
+            Route /loc/scheme; null URL nullifies.
+            subkey 'lans.'
+            Key: eid.scheme.
+            Only one value per DB key is allowed.
 
-        # all key state messages. Maps key state said to serialization. ksns are
-        # KeyStateRecords so use ._asdict or ._asjson as appropriate
-        # use  .kdts, .ksgs, and .kcgs for datetimes and signatures
-        # TODO: clean
-        self.ksns = koming.Komer(db=self,
-                                klas=KeyStateRecord,
-                                subkey='ksns.')
+        .ends is named subDB instance of Komer (schema=EndpointRecord) mapping
+            (cid, role, eid) to EndpointRecord attributes about endpoint
+            authorization. cid is controller prefix, role is endpoint role
+            (e.g. watcher), eid is controller prefix of endpoint provider.
+            Data extracted from reply /end/role/add or /end/role/cut.
+            subkey 'ends.'
+            Key: cid.role.eid.
 
-        # key state SAID database for successfully saved key state notices
-        # maps key=(prefix, aid) to val=said of key state
-        # TODO: clean
-        self.knas = subing.CesrSuber(db=self, subkey='knas.', klas=coring.Diger)
+        .locs is named subDB instance of Komer (schema=LocationRecord) mapping
+            endpoint prefix eid and network location scheme to endpoint
+            location details. Data extracted from reply /loc/scheme.
+            subkey 'locs.'
+            Key: eid.scheme.
 
-        # Watcher watched SAID database for successfully saved watched AIDs for a watcher
-        # maps key=(cid, aid, oid) to val=said of rpy message
-        # TODO: clean
-        self.wwas = subing.CesrSuber(db=self, subkey='wwas.', klas=coring.Diger)
+        .obvs is named subDB instance of Komer (schema=ObservedRecord) for
+            observed OIDs by watcher. Maps (cid, aid, oid) to ObservedRecord.
+            subkey 'obvs.'
+            Key: cid.aid.oid.
 
-        # config loaded oobis to be processed asynchronously, keyed by oobi URL
-        # TODO: clean
-        self.oobis = koming.Komer(db=self,
-                                  subkey='oobis.',
-                                  klas=OobiRecord,
-                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+        .tops is named subDB instance of Komer (schema=TopicsRecord) mapping
+            witness identifier prefix to the topic index of the last recieved
+            mailbox message.
+            subkey 'witm.'
+            Key: witness prefix identifier.
 
-        # escrow OOBIs that failed to load, retriable, keyed by oobi URL
-        self.eoobi = koming.Komer(db=self,
-                                  subkey='eoobi.',
-                                  klas=OobiRecord,
-                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+        .gpse is named subDB instance of CatCesrIoSetSuber
+            (klas=(Number, Diger)) for group multisig partial signature
+            escrows.
+            subkey 'gpse.'
+            Multiple values per key.
 
-        # OOBIs with outstand client requests.
-        self.coobi = koming.Komer(db=self,
-                                  subkey='coobi.',
-                                  klas=OobiRecord,
-                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+        .gdee is named subDB instance of CatCesrIoSetSuber
+            (klas=(Number, Diger)) for group multisig delegate escrows.
+            subkey 'gdee.'
+            Multiple values per key.
 
-        # Resolved OOBIs (those that have been processed successfully for this database.
-        # TODO: clean
-        self.roobi = koming.Komer(db=self,
-                                  subkey='roobi.',
-                                  klas=OobiRecord,
-                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+        .gpwe is named subDB instance of CatCesrIoSetSuber
+            (klas=(Number, Diger)) for group multisig partial witness escrows.
+            subkey 'gdwe.'
+            Multiple values per key.
 
-        # Well known OOBIs that are to be used for mfa against a resolved OOBI.
-        # TODO: clean
-        self.woobi = koming.Komer(db=self,
-                                  subkey='woobi.',
-                                  klas=OobiRecord,
-                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+        .cgms is named subDB instance of CesrSuber (klas=Diger) for completed
+            group multisig events. Maps key to Diger of completed event.
+            subkey 'cgms.'
+            Only one value per DB key is allowed.
 
-        # Well known OOBIs that are to be used for mfa against a resolved OOBI.
-        # TODO: clean
-        self.moobi = koming.Komer(db=self,
-                                  subkey='moobi.',
-                                  klas=OobiRecord,
-                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+        .epse is named subDB instance of SerderSuber for exchange message
+            partial signature escrows. Maps key to serialized Serder of the
+            escrowed exchange message.
+            subkey 'epse.'
 
-        # Multifactor well known OOBI auth records to process.  Keys by controller URL
-        # TODO: clean
-        self.mfa = koming.Komer(db=self,
-                                subkey='mfa.',
-                                klas=OobiRecord,
-                                sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+        .epsd is named subDB instance of CesrSuber (klas=Dater) for exchange
+            message partial signature escrow datetimes. Maps key to Dater
+            timestamp of the escrowed message.
+            subkey 'epsd.'
 
-        # Resolved multifactor well known OOBI auth records.  Keys by controller URL
-        # TODO: clean
-        self.rmfa = koming.Komer(db=self,
-                                 subkey='rmfa.',
-                                 klas=OobiRecord,
-                                 sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+        .exns is named subDB instance of SerderSuber for accepted exchange
+            messages. Maps key to serialized Serder of the exchange message.
+            subkey 'exns.'
 
-        # JSON schema SADs keys by the SAID
-        # TODO: clean
-        self.schema = subing.SchemerSuber(db=self,
-                                          subkey='schema.')
+        .erpy is named subDB instance of CesrSuber (klas=Saider) as a forward
+            pointer to a provided reply message associated with an exchange
+            message.
+            subkey 'erpy.'
+            Only one value per DB key is allowed.
 
-        # Field values for contact information for remote identifiers.  Keyed by prefix/field
-        # TODO: clean
-        self.cfld = subing.Suber(db=self,
-                                 subkey="cfld.")
+        .esigs is named subDB instance of CesrIoSetSuber (klas=Siger) for
+            exchange message transferable indexed signatures.
+            subkey 'esigs.'
+            Multiple values per key.
 
-        # Global settings for the Habery environment
-        self.hbys = subing.Suber(db=self, subkey='hbys.')
+        .ecigs is named subDB instance of CatCesrIoSetSuber
+            (klas=(Verfer, Cigar)) for exchange message nontransferable
+            signatures. Maps key to (Verfer, Cigar) couples.
+            subkey 'ecigs.'
+            Multiple values per key.
 
-        # Signed contact data, keys by prefix
-        # TODO: clean
-        self.cons = subing.Suber(db=self,
-                                 subkey="cons.")
+        .epath is named subDB instance of IoSetSuber for exchange message
+            pathed attachments.
+            subkey 'epath.'
+            Multiple values per key.
 
-        # Transferable signatures on contact data
-        # TODO: clean
-        self.ccigs = subing.CesrSuber(db=self, subkey='ccigs.', klas=coring.Cigar)
+        .essrs is named subDB instance of CesrIoSetSuber (klas=Texter) for
+            exchange message event source records.
+            subkey 'essrs.'
+            Multiple values per key.
 
-        # Blinded media for contact information for remote identifiers.
-        # CatCesrSuber with TypeMedia format: (Noncer=SAID, Noncer=UUID, Labeler=MIME, Texter=data)
-        self.imgs = subing.CatCesrSuber(db=self, subkey='imgs.',
-                                         klas=(coring.Noncer, coring.Noncer,
-                                               coring.Labeler, coring.Texter))
+        .chas is named subDB instance of CesrIoSetSuber (klas=Diger) for
+            accepted signed 12-word challenge response exn messages. Keyed by
+            prefix of signer.
+            subkey 'chas.'
+            Multiple values per key.
 
-        # Field values for identifier information for local identifiers. Keyed by prefix/field
-        # TODO: clean
-        self.ifld = subing.Suber(db=self,
-                                 subkey="ifld.")
+        .reps is named subDB instance of CesrIoSetSuber (klas=Diger) for
+            successful signed 12-word challenge response exn messages. Keyed
+            by prefix of signer.
+            subkey 'reps.'
+            Multiple values per key.
 
-        # Signed identifier data, keys by prefix
-        # TODO: clean
-        self.sids = subing.Suber(db=self,
-                                  subkey="sids.")
+        .wkas is named subDB instance of IoSetKomer (schema=WellKnownAuthN)
+            for authorized well-known OOBIs.
+            subkey 'wkas.'
+            Multiple values per key.
 
-        # Transferable signatures on identifier data
-        # TODO: clean
-        self.icigs = subing.CesrSuber(db=self, subkey='icigs.', klas=coring.Cigar)
+        .kdts is named subDB instance of CesrSuber (klas=Dater) mapping key
+            state SAID to ISO-8601 datetime stamp (ksn date-time stamp).
+            subkey 'kdts.'
+            Key: said (bytes).
+            Only one value per DB key is allowed.
 
-        # Blinded media for identifier information for local identifiers.
-        # CatCesrSuber with TypeMedia format: (Noncer=SAID, Noncer=UUID, Labeler=MIME, Texter=data)
-        self.iimgs = subing.CatCesrSuber(db=self, subkey='iimgs.',
-                                          klas=(coring.Noncer, coring.Noncer,
-                                                coring.Labeler, coring.Texter))
+        .ksns is named subDB instance of Komer (schema=KeyStateRecord) for
+            key state notice messages. Maps key state SAID to KeyStateRecord.
+            Use .kdts for associated datetimes and signatures.
+            subkey 'ksns.'
 
-        # Delegation escrow dbs #
-        # delegated partial witness escrow
-        self.dpwe = subing.SerderSuber(db=self, subkey='dpwe.')
+        .knas is named subDB instance of CesrSuber (klas=Diger) for key state
+            SAID records of successfully saved key state notices. Maps
+            (prefix, aid) to SAID of key state.
+            subkey 'knas.'
+            Only one value per DB key is allowed.
 
-        # delegated unanchored escrow
-        self.dune = subing.SerderSuber(db=self, subkey='dune.')
+        .wwas is named subDB instance of CesrSuber (klas=Diger) for watcher
+            watched SAID records. Maps (cid, aid, oid) to SAID of the reply
+            message for successfully saved watched AIDs.
+            subkey 'wwas.'
+            Only one value per DB key is allowed.
 
-        # delegate publication escrow for sending delegator info to my witnesses
-        self.dpub = subing.SerderSuber(db=self, subkey='dpub.')
+        .oobis is named subDB instance of Komer (schema=OobiRecord, sep='>')
+            for configured OOBIs to be processed asynchronously. Keyed by
+            OOBI URL. sep='>' prevents splitting as '>' is not valid in URLs.
+            subkey 'oobis.'
 
-        # completed group delegated AIDs
-        # TODO: clean
-        self.cdel = subing.CesrOnSuber(db=self, subkey='cdel.',
-                                     klas=coring.Diger)
+        .eoobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
+            for OOBIs that failed to load and are pending retry. Keyed by
+            OOBI URL.
+            subkey 'eoobi.'
 
-        # multisig sig embed payload SAID mapped to containing exn messages across group multisig participants
-        # TODO: clean
-        self.meids = subing.CesrIoSetSuber(db=self, subkey="meids.", klas=coring.Diger)
+        .coobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
+            for OOBIs with outstanding client requests. Keyed by OOBI URL.
+            subkey 'coobi.'
 
-        # multisig sig embed payload SAID mapped to group multisig participants AIDs
-        # TODO: clean
-        self.maids = subing.CesrIoSetSuber(db=self, subkey="maids.", klas=coring.Prefixer)
+        .roobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
+            for resolved OOBIs that have been successfully processed. Keyed
+            by OOBI URL.
+            subkey 'roobi.'
 
-        # KRAM cache type — key: expression string, value: drift and lag params
-        self.kramCTYP = koming.Komer(db=self, subkey='ctyp.',
-                                 klas=CacheTypeRecord)
+        .woobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
+            for well-known OOBIs used for MFA against a resolved OOBI. Keyed
+            by OOBI URL.
+            subkey 'woobi.'
 
-        # KRAM message cache — key: (AID, MID), value: msg datetime, drift, lags
-        self.kramMSGC = koming.Komer(db=self, subkey='msgc.',
-                                 klas=MsgCacheRecord)
+        .moobi is named subDB instance of Komer (schema=OobiRecord, sep='>')
+            for multifactor well-known OOBI records. Keyed by OOBI URL.
+            subkey 'moobi.'
 
-        # KRAM transactioned message cache — key: (AID, XID, MID), value: datetimes, drift, lags
-        self.kramTMSC = koming.Komer(db=self, subkey='tmsc.',
-                                 klas=TxnMsgCacheRecord)
+        .mfa is named subDB instance of Komer (schema=OobiRecord, sep='>')
+            for multifactor well-known OOBI auth records pending processing.
+            Keyed by controller URL.
+            subkey 'mfa.'
 
-        # KRAM partially signed multi-key message key (AID.MID) mapped to associated message (SerderKERI)
-        self.kramPMKM = subing.SerderSuber(db=self, subkey='pmkm.')
+        .rmfa is named subDB instance of Komer (schema=OobiRecord, sep='>')
+            for resolved multifactor well-known OOBI auth records. Keyed by
+            controller URL.
+            subkey 'rmfa.'
 
-        # KRAM partially signed multi-key signature key (AID.MID) mapped to associated signatures
-        self.kramPMKS = subing.CesrIoSetSuber(db=self, subkey='pmks.', klas=indexing.Siger)
+        .schema is named subDB instance of SchemerSuber storing JSON schema
+            SADs keyed by SAID of the schema.
+            subkey 'schema.'
 
-        # KRAM partially signed multi-key sender key state key (AID.MID) mapped to SN and event SAID
-        self.kramPMSK = subing.CatCesrSuber(db=self, subkey='pmsk.', klas=(coring.Number, coring.Diger))
+        .cfld is named subDB instance of Suber for contact field values for
+            remote identifiers. Keyed by prefix/field.
+            subkey 'cfld.'
 
-        # KRAM partially signed multi-key non-authenticator attachments
+        .hbys is named subDB instance of Suber for global settings of the
+            Habery environment.
+            subkey 'hbys.'
 
-        # trqs: trans receipt quadruples (prefixer, number, diger, siger)
-        self.kramTRQS = subing.CatCesrIoSetSuber(db=self, subkey='trqs.',
-                                                  klas=(coring.Prefixer, coring.Number,
-                                                        coring.Diger, indexing.Siger))
+        .cons is named subDB instance of Suber for signed contact data. Keyed
+            by prefix.
+            subkey 'cons.'
 
-        # tsgs: trans last sig groups (prefixer, number, diger, siger) — stored per-siger
-        self.kramTSGS = subing.CatCesrIoSetSuber(db=self, subkey='tsgs.',
-                                                  klas=(coring.Prefixer, coring.Number,
-                                                        coring.Diger, indexing.Siger))
+        .ccigs is named subDB instance of CesrSuber (klas=Cigar) for
+            transferable signatures on contact data. Keyed by prefix.
+            subkey 'ccigs.'
+            Only one value per DB key is allowed.
 
-        # sscs: first seen seal couples (number, diger) issuing or delegating
-        self.kramSSCS = subing.CatCesrIoSetSuber(db=self, subkey='sscs.',
-                                                  klas=(coring.Number, coring.Diger))
+        .imgs is raw LMDB sub database for chunked image data for contact
+            information of remote identifiers. Keyed by prefix/chunk-index.
+            subkey b'imgs.'
+            Raw bytes values; accessed directly via env.open_db.
 
-        # ssts: source seal triples (prefixer, number, diger) issued or delegated
-        self.kramSSTS = subing.CatCesrIoSetSuber(db=self, subkey='ssts.',
-                                                  klas=(coring.Prefixer, coring.Number,
-                                                        coring.Diger))
+        .ifld is named subDB instance of Suber for identifier field values
+            for local identifiers. Keyed by prefix/field.
+            subkey 'ifld.'
 
-        # frcs: first seen replay couples (number, dater)
-        self.kramFRCS = subing.CatCesrIoSetSuber(db=self, subkey='frcs.',
-                                                  klas=(coring.Number, coring.Dater))
+        .sids is named subDB instance of Suber for signed local identifier
+            data. Keyed by prefix.
+            subkey 'sids.'
 
-        # tdcs: typed digest seal couples (verser, diger)
-        self.kramTDCS = subing.CatCesrIoSetSuber(db=self, subkey='tdcs.',
-                                                  klas=(coring.Verser, coring.Diger))
+        .icigs is named subDB instance of CesrSuber (klas=Cigar) for
+            transferable signatures on local identifier data. Keyed by prefix.
+            subkey 'icigs.'
+            Only one value per DB key is allowed.
 
-        # ptds: pathed streams (raw bytes)
-        self.kramPTDS = subing.IoSetSuber(db=self, subkey='ptds.')
+        .iimgs is raw LMDB sub database for chunked image data for local
+            identifier information. Keyed by prefix/chunk-index.
+            subkey b'iimgs.'
+            Raw bytes values; accessed directly via env.open_db.
 
-        # bsqs: blind state quadruples (diger, noncer, noncer, labeler)
-        self.kramBSQS = subing.CatCesrIoSetSuber(db=self, subkey='bsqs.',
-                                                  klas=(coring.Diger, coring.Noncer,
-                                                        coring.Noncer, coring.Labeler))
+        .dpwe is named subDB instance of SerderSuber for delegated partial
+            witness escrows. Maps key to serialized Serder of the escrowed
+            delegated event.
+            subkey 'dpwe.'
 
-        # bsss: bound state sextuples (diger, noncer, noncer, labeler, number, noncer)
-        self.kramBSSS = subing.CatCesrIoSetSuber(db=self, subkey='bsss.',
-                                                  klas=(coring.Diger, coring.Noncer,
-                                                        coring.Noncer, coring.Labeler,
-                                                        coring.Number, coring.Noncer))
+        .dune is named subDB instance of SerderSuber for delegated unanchored
+            escrows. Maps key to serialized Serder of the unanchored delegated
+            event awaiting delegation anchor.
+            subkey 'dune.'
 
-        # tmqs: type media quadruples (diger, noncer, labeler, texter)
-        self.kramTMQS = subing.CatCesrIoSetSuber(db=self, subkey='tmqs.',
-                                                  klas=(coring.Diger, coring.Noncer,
-                                                        coring.Labeler, coring.Texter))
+        .dpub is named subDB instance of SerderSuber for delegate publication
+            escrows used to send delegator info to the delegate's witnesses.
+            subkey 'dpub.'
 
-        self.reload()
+        .cdel is named subDB instance of CesrOnSuber (klas=Diger) for
+            completed group delegated AIDs. Maps ordinal key to Diger of the
+            completed delegation event.
+            subkey 'cdel.'
 
-        return self.env
+        .meids is named subDB instance of CesrIoSetSuber (klas=Diger) mapping
+            multisig embed payload SAID to the SAIDs of the exn messages that
+            contained it. Aggregates identical message bodies across group
+            multisig participants reaching consensus on events or credentials.
+            subkey 'meids.'
+            Multiple values per key.
 
-    def reload(self):
-        """
-        Reload stored prefixes and Kevers from .habs
+        .maids is named subDB instance of CesrIoSetSuber (klas=Prefixer)
+            mapping multisig embed payload SAID to the AIDs of the group
+            multisig participants that contributed it.
+            subkey 'maids.'
+            Multiple values per key.
 
-        """
-        # Check migrations to see if this database is up to date.  Error otherwise
-        if not self.current:
-            raise DatabaseError(f"Database migrations must be run. DB version {self.version}; current {__version__}")
+        .kramCTYP is named subDB instance of Komer (schema=CacheTypeRecord) for
+            KRAM cache type records. Maps expression string to drift and lag
+            parameters.
+            subkey 'ctyp.'
 
-        removes = []
-        for keys, data in self.habs.getTopItemIter():
-            if (ksr := self.states.get(keys=data.hid)) is not None:
-                try:
-                    from ..core.eventing import Kever
-                    kever = Kever(state=ksr,
-                                           db=self,
-                                           local=True)
-                except MissingEntryError as ex:  # no kel event for keystate
-                    removes.append(keys)  # remove from .habs
-                    continue
-                self.kevers[kever.prefixer.qb64] = kever
-                self.prefixes.add(kever.prefixer.qb64)
-                if data.mid:  # group hab
-                    self.groups.add(data.hid)
+        .kramMSGC is named subDB instance of Komer (schema=MsgCacheRecord) for
+            KRAM message cache. Maps (AID, MID) to message datetime, drift,
+            and lag values.
+            subkey 'msgc.'
 
-            elif data.mid is None:  # in .habs but no corresponding key state and not a group so remove
-                removes.append(keys)  # no key state or KEL event for .hab record
+        .kramTMSC is named subDB instance of Komer (schema=TxnMsgCacheRecord) for
+            KRAM transactioned message cache. Maps (AID, XID, MID) to
+            datetimes, drift, and lag values.
+            subkey 'tmsc.'
 
-        for keys in removes:  # remove bare .habs records
-            self.habs.rem(keys=keys)
+        .kramPMKM is named subDB instance of SerderSuber for KRAM partially signed
+            multi-key messages. Maps (AID, MID) key to the associated
+            SerderKERI message.
+            subkey 'pmkm.'
 
-    def migrate(self):
-        """ Run all migrations required
+        .kramPMKS is named subDB instance of CesrIoSetSuber (klas=Siger) for
+            KRAM partially signed multi-key signatures. Maps (AID, MID) key
+            to associated Siger instances.
+            subkey 'pmks.'
+            Multiple values per key.
 
-        Run all migrations  that are required from the current version of database up to the current version
-         of the software that have not already been run.
+        .kramPMSK is named subDB instance of CatCesrSuber (klas=(Number, Diger))
+            for KRAM partially signed multi-key sender key state records. Maps
+            (AID, MID) key to (sn, event SAID) couple identifying the sender's
+            key state.
+            subkey 'pmsk.'
+            Only one value per DB key is allowed.
 
-         Sets the version of the database to the current version of the software after successful completion
-         of required migrations
+        .kramTRQS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key trans receipt quadruple attachments.
+            subkey 'trqs.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Prefixer, Number, Diger, Siger) tuple. Sourced from
+            parser kwa key 'trqs'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-        """
-        from ..core import coring
+        .kramTSGS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key trans last sig group attachments. Each group is
+            stored per-siger as a flat (Prefixer, Seqner, Saider, Siger) tuple.
+            subkey 'tsgs.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Prefixer, Number, Diger, Siger) tuple. Sourced from
+            parser kwa key 'tsgs'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-        escrows_cleared = False
+        .kramSSCS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key first seen seal couple attachments from issuing or
+            delegating events.
+            subkey 'sscs.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Number, Diger) tuple. Sourced from parser kwa key 'sscs'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-        for (version, migrations) in MIGRATIONS:
-            # Only run migration if current source code version is at or below the migration version
-            ver = semver.VersionInfo.parse(__version__)
-            ver_no_prerelease = semver.Version(ver.major, ver.minor, ver.patch)
-            if self.version is not None and semver.compare(version, str(ver_no_prerelease)) > 0:
-                print(
-                    f"Skipping migration {version} as higher than the current KERI version {__version__}")
-                continue
-            # Skip migrations already run - where version less than (-1) or equal to (0) database version
-            # Strip prerelease from DB version to avoid lexicographic comparison bugs (#820)
-            if self.version is not None and semver.compare(version, _strip_prerelease(self.version)) != 1:
-                continue
+        .kramSSTS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key source seal triple attachments from issued or
+            delegated events.
+            subkey 'ssts.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Prefixer, Number, Diger) tuple. Sourced from parser kwa
+            key 'ssts'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-            # Clear all escrows before first migration to prevent old key
-            # format crashes (e.g. qnfs keys without insertion-order suffix).
-            # Uses .trim() which bypasses key parsing. See #863.
-            if not escrows_cleared:
-                self._trimAllEscrows()
-                escrows_cleared = True
+        .kramFRCS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key first seen replay couple attachments.
+            subkey 'frcs.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Number, Dater) tuple. Sourced from parser kwa key 'frcs'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-            print(f"Migrating database v{self.version} --> v{version}")
-            for migration in migrations:
-                modName = f"keri.db.migrations.{migration}"
-                if self.migs.get(keys=(migration,)) is not None:
-                    continue
+        .kramTDCS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key typed digest seal couple attachments.
+            subkey 'tdcs.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Verser, Diger) tuple. Sourced from parser kwa key 'tdcs'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-                mod = importlib.import_module(modName)
-                try:
-                    print(f"running migration {modName}")
-                    mod.migrate(self)
-                except Exception as e:
-                    print(f"\nAbandoning migration {migration} at version {version} with error: {e}")
-                    return
+        .kramPTDS is named subDB instance of IoSetSuber for KRAM partially signed
+            multi-key pathed stream attachments.
+            subkey 'ptds.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is raw bytes of pathed CESR stream. Sourced from parser kwa
+            key 'ptds'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-                self.migs.pin(keys=(migration,), val=coring.Dater())
+        .kramBSQS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key blind state quadruple attachments.
+            subkey 'bsqs.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Diger, Noncer, Noncer, Labeler) tuple. Sourced from
+            parser kwa key 'bsqs'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-            # update database version after successful migration
-            self.version = version
+        .kramBSSS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key bound state sextuple attachments.
+            subkey 'bsss.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Diger, Noncer, Noncer, Labeler, Number, Noncer) tuple.
+            Sourced from parser kwa key 'bsss'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-        self.version = __version__
+        .kramTMQS is named subDB instance of CatCesrIoSetSuber for KRAM partially
+            signed multi-key type media quadruple attachments.
+            subkey 'tmqs.'
+            DB is keyed by (AID, MID): sender identifier prefix plus message SAID
+            Value is (Diger, Noncer, Labeler, Texter) tuple. Sourced from
+            parser kwa key 'tmqs'.
+            Multiple values per key stored as ordered set (duplicates ignored).
+            Entries persist until removed by the KRAM pruner.
 
-    def _trimAllEscrows(self):
-        """Trim all escrow databases via low-level .trim().
+    Properties:
+        kevers (statedict): read through cache of kevers of states for KELs in db
 
-        Safe for old key formats that would crash higher-level iterators
-        (e.g., qnfs keys without insertion-order suffix from pre-1.2.0).
-        Called at the beginning of migration per spec call guidance.
-        See: https://github.com/WebOfTrust/keripy/issues/863
-        """
-        escrows = [
-            self.ures, self.vres, self.pses, self.pwes, self.ooes,
-            self.qnfs, self.uwes, self.misfits, self.delegables,
-            self.pdes, self.udes, self.rpes, self.ldes, self.epsd,
-            self.eoobi, self.dpub, self.gpwe, self.gdee, self.dpwe,
-            self.gpse, self.epse, self.dune,
-        ]
-        total = 0
-        for escrow in escrows:
-            count = escrow.cnt()
-            if count > 0:
-                escrow.trim()
-                total += count
-        if total > 0:
-            print(f"Cleared {total} escrow entries before migration")
+    """
 
-    def clearEscrows(self):
+    def __init__(self, headDirPath=None, reopen=False, **kwa):
         """
-        Clear all escrows
+        Setup named sub databases.
+
+        Parameters:
+            name is str directory path name differentiator for main database
+                When system employs more than one keri database, name allows
+                differentiating each instance by name
+            temp is boolean, assign to .temp
+                True then open in temporary directory, clear on close
+                Othewise then open persistent directory, do not clear on close
+            headDirPath is optional str head directory pathname for main database
+                If not provided use default .HeadDirpath
+            mode is int numeric os dir permissions for database directory
+            reopen (bool): True means database will be reopened by this init
         """
-        for escrow in [self.ures, self.vres, self.pses, self.pwes, self.ooes,
-                       self.qnfs, self.uwes,
-                       self.qnfs, self.misfits, self.delegables, self.pdes,
-                       self.udes, self.rpes, self.ldes, self.epsd, self.eoobi,
-                       self.dpub, self.gpwe, self.gdee, self.dpwe, self.gpse,
-                       self.epse, self.dune]:
-            count = escrow.cntAll()
-            escrow.trim()
-            logger.info(f"KEL: Cleared {count} escrows from ({escrow}")
 
-    @property
-    def current(self):
-        """ Current property determines if we are at the current database migration state.
+        if (mapSize := os.getenv(KERIBaserMapSizeKey)) is not None:
+            try:
+                self.MapSize = int(mapSize)
+            except ValueError:
+                logger.error("KERI_BASER_MAP_SIZE must be an integer value >1!")
+                raise
 
-         If the database version matches the library version return True
-         If the current database version is behind the current library version, check for migrations
-            - If there are migrations to run, return False
-            - If there are no migrations to run, reset database version to library version and return True
-         If the current database version is ahead of the current library version, raise exception
+        BaserBase.__init__(self)
+        LMDBer.__init__(self, headDirPath=headDirPath, reopen=reopen, **kwa)
 
-         """
-        if self.version == __version__:
-            return True
 
-        ver = semver.VersionInfo.parse(__version__)
-        ver_no_prerelease = semver.Version(ver.major, ver.minor, ver.patch)
-        # Strip prerelease from DB version to avoid lexicographic comparison bugs (#820)
-        if self.version is not None and semver.compare(_strip_prerelease(self.version), str(ver_no_prerelease)) == 1:
-            raise ConfigurationError(
-                f"Database version={self.version} is ahead of library version={__version__}")
+    def reopen(self, **kwa):
+        """
+        Open sub databases
 
-        last = MIGRATIONS[-1]
-        # If we aren't at latest version, but there are no outstanding migrations,
-        # reset version to latest (rightmost (-1) migration is latest)
-        if self.migs.get(keys=(last[1][-1],)) is not None:
-            return True
+        Notes:
 
-        # We have migrations to run
-        return False
+        dupsort=True for sub DB means allow unique (key,pair) duplicates at a key.
+        Duplicate means that is more than one value at a key but not a redundant
+        copies a (key,value) pair per key. In other words the pair (key,value)
+        must be unique both key and value in combination.
+        Attempting to put the same (key,value) pair a second time does
+        not add another copy.
 
-    def complete(self, name=None):
-        """ Returns list of tuples of migrations completed with date of completion
+        Duplicates are inserted in lexocographic order by value, insertion order.
 
-        Parameters:
-            name(str): optional name of migration to check completeness
+        """
+        from . import koming, subing
+        from ..core import coring, indexing
 
-        Returns:
-            list: tuples of migration,date of completed migration names and the date of completion
+        super(Baser, self).reopen(**kwa)
 
-        """
-        migrations = []
-        if not name:
-            for version, migs in MIGRATIONS:
-                # Print entries only for migrations that have been run
-                # Strip prerelease from DB version to avoid lexicographic comparison bugs (#820)
-                if self.version is not None and semver.compare(version, _strip_prerelease(self.version)) <= 0:
-                    for mig in migs:
-                        dater = self.migs.get(keys=(mig,))
-                        migrations.append((mig, dater))
-        else:
-            for version, migs in MIGRATIONS:  # check all migrations for each version
-                if name not in migs or not self.migs.get(keys=(name,)):
-                    raise ValueError(f"No migration named {name}")
-            migrations.append((name, self.migs.get(keys=(name,))))
+        # Create by opening first time named sub DBs within main DB instance
+        # Names end with "." as sub DB name must include a non Base64 character
+        # to avoid namespace collisions with Base64 identifier prefixes.
 
-        return migrations
+        self.evts = subing.SerderSuber(db=self, subkey='evts.')
+        self.fels = subing.OnSuber(db=self, subkey='fels.')
+        self.kels = subing.OnIoDupSuber(db=self, subkey='kels.')
+        self.dtss = subing.CesrSuber(db=self, subkey='dtss.', klas=coring.Dater)
+        self.aess = subing.CatCesrSuber(db=self, subkey='aess.',
+                                        klas=(coring.Number, coring.Diger))
+        self.sigs = subing.CesrIoSetSuber(db=self, subkey='sigs.',
+                                        klas=(indexing.Siger))
+        self.wigs = subing.CesrIoSetSuber(db=self, subkey='wigs.', klas=indexing.Siger)
+        self.rcts = subing.CatCesrIoSetSuber(db=self, subkey="rcts.",
+                                             klas=(coring.Prefixer, coring.Cigar))
+        self.ures = subing.CatCesrIoSetSuber(db=self, subkey='ures.',
+                                             klas=(coring.Diger, coring.Prefixer, coring.Cigar))
+        self.vrcs = subing.CatCesrIoSetSuber(db=self, subkey='vrcs.',
+                             klas=(coring.Prefixer, coring.Number, coring.Diger, indexing.Siger))
+        self.vres = subing.CatCesrIoSetSuber(db=self, subkey='vres.',
+                             klas=(coring.Diger, coring.Prefixer, coring.Number, coring.Diger, indexing.Siger))
+        self.pses = subing.OnIoDupSuber(db=self, subkey='pses.')
+        self.pwes = subing.OnIoDupSuber(db=self, subkey='pwes.')
+        self.pdes = subing.OnIoDupSuber(db=self, subkey='pdes.')
+        self.udes = subing.CatCesrSuber(db=self, subkey='udes.', klas=(coring.Number, coring.Diger))
+        self.uwes = subing.B64OnIoSetSuber(db=self, subkey='uwes.')
+        self.ooes = subing.OnIoDupSuber(db=self, subkey='ooes.')
+        self.dels = subing.OnIoDupSuber(db=self, subkey='dels.')
+        self.ldes = subing.OnIoDupSuber(db=self, subkey='ldes.')
+        self.qnfs = subing.IoSetSuber(db=self, subkey="qnfs.", dupsort=True)
 
-    def clean(self):
-        """
-        Clean database by creating re-verified cleaned cloned copy
-        and then replacing original with cleaned cloned copy
+        # events as ordered by first seen ordinals
+        self.fons = subing.CesrSuber(db=self, subkey='fons.', klas=coring.Number)
 
-        Database usage should be offline during cleaning as it will be cloned in
-        readonly mode
+        self.migs = subing.CesrSuber(db=self, subkey="migs.", klas=coring.Dater)
+        self.vers = subing.Suber(db=self, subkey="vers.")
 
-        """
-        from ..core import parsing
+        # event source local (protected) or non-local (remote not protected)
+        self.esrs = koming.Komer(db=self,
+                                   klas=EventSourceRecord,
+                                   subkey='esrs.')
 
-        # create copy to clone into
-        with openDB(name=self.name,
-                    temp=False,
-                    headDirPath=self.headDirPath,
-                    perm=self.perm,
-                    clean=True) as copy:  # copy is Baser instance
+        # misfit escrows whose processing may change the .esrs event source record
+        self.misfits = subing.OnIoSetSuber(db=self, subkey='mfes.')
 
-            with reopenDB(db=self, reuse=True, readonly=True):  # reopen as readonly
-                if not os.path.exists(self.path):
-                    raise ValueError("Error while cleaning, no orig at {}."
-                                     "".format(self.path))
-                from ..core.eventing import Kevery
-                kvy = Kevery(db=copy)  # promiscuous mode
+        # delegable events escrows. events with local delegator that need approval
+        self.delegables = subing.IoSetSuber(db=self, subkey='dees.')
 
-                # Revise in future to NOT parse msgs but to extract the processed
-                # objects so can pass directly to kvy.processEvent()
-                # need new method cloneObjAllPreIter()
-                # process event doesn't capture exceptions so we can more easily
-                # detect in the cloning that some events did not make it through
-                psr = parsing.Parser(kvy=kvy, version=Vrsn_1_0)
-                for msg in self.cloneAllPreIter():  # clone into copy
-                    psr.parseOne(ims=msg)
+        # Kever state made of KeyStateRecord key states
+        # TODO: clean
+        self.states = koming.Komer(db=self,
+                                   klas=KeyStateRecord,
+                                   subkey='stts.')
+
+        self.wits = subing.CesrIoSetSuber(db=self, subkey="wits.", klas=coring.Prefixer)
+
+        # habitat application state keyed by habitat name, includes prefix
+        self.habs = koming.Komer(db=self,
+                                 subkey='habs.',
+                                 klas=HabitatRecord, )
+        # habitat name database mapping (domain,name) as key to Prefixer
+        self.names = subing.Suber(db=self, subkey='names.', sep="^")
 
-                # This is the list of non-set based databases that are not created as part of event processing.
-                # for now we are just copying them from self to copy without worrying about being able to
-                # reprocess them.  We need a more secure method in the future
-                unsecured = ["hbys", "schema", "states", "rpys", "eans", "tops", "cgms", "exns", "erpy",
-                             "kdts", "ksns", "knas", "oobis", "roobi", "woobi", "moobi", "mfa", "rmfa",
-                             "cfld", "cons", "ccigs", "cdel", "migs",
-                             "ifld", "sids", "icigs"]
+        # SAD support datetime stamps and signatures indexed and not-indexed
+        # all sad  sdts (sad datetime serializations) maps said to date-time
+        self.sdts = subing.CesrSuber(db=self, subkey='sdts.', klas=coring.Dater)
 
-                for name in unsecured:
-                    srcdb = getattr(self, name)
-                    cpydb = getattr(copy, name)
-                    for keys, val in srcdb.getTopItemIter():
-                        cpydb.put(keys=keys, val=val)
+        # all sad ssgs (sad indexed signature serializations) maps SAD quadkeys
+        # given by quadruple (diger.qb64, prefixer.qb64, seqner.q64, diger.qb64)
+        #  of reply and trans signer's key state est evt to val Siger for each
+        # signature.
+        self.ssgs = subing.CesrIoSetSuber(db=self, subkey='ssgs.', klas=indexing.Siger)
 
-                # This is the list of set based databases that are not created as part of event processing.
-                # for now we are just copying them from self to copy without worrying about being able to
-                # reprocess them.  We need a more secure method in the future
-                sets = ["esigs", "ecigs", "epath", "chas", "reps", "wkas", "meids", "maids"]
-                for name in sets:
-                    srcdb = getattr(self, name)
-                    cpydb = getattr(copy, name)
-                    for keys, val in srcdb.getTopItemIter():
-                        cpydb.add(keys=keys, val=val)
+        # all sad scgs  (sad non-indexed signature serializations) maps SAD SAID
+        # to couple (Verfer, Cigar) of nontrans signer of signature in Cigar
+        # nontrans qb64 of Prefixer is same as Verfer
+        self.scgs = subing.CatCesrIoSetSuber(db=self, subkey='scgs.',
+                                             klas=(coring.Verfer, coring.Cigar))
 
-                # Copy imgs (blinded media for remote identifiers)
-                for keys, val in self.imgs.getTopItemIter():
-                    copy.imgs.pin(keys=keys, val=val)
+        # all reply messages. Maps reply said to serialization. Replys are
+        # versioned sads ( with version string) so use Serder to deserialize and
+        # use  .sdts, .ssgs, and .scgs for datetimes and signatures
+        # TODO: clean
+        self.rpys = subing.SerderSuber(db=self, subkey='rpys.')
 
-                # Copy iimgs (blinded media for local identifiers)
-                for keys, val in self.iimgs.getTopItemIter():
-                    copy.iimgs.pin(keys=keys, val=val)
+        # all reply escrows indices of partially signed reply messages. Maps
+        # route in reply to single (Diger,)  of escrowed reply.
+        # Routes such as /end/role  /loc/schema
+        self.rpes = subing.CesrIoSetSuber(db=self, subkey='rpes.',
+                                          klas=coring.Diger)
 
-                # clone .habs  habitat name prefix Komer subdb
-                # copy.habs = koming.Komer(db=copy, schema=HabitatRecord, subkey='habs.')  # copy
-                for keys, val in self.habs.getTopItemIter():
-                    if val.hid in copy.kevers:  # only copy habs that verified
-                        copy.habs.put(keys=keys, val=val)
-                        ns = "" if val.domain is None else val.domain
-                        copy.names.put(keys=(ns, val.name), val=val.hid)
-                        copy.prefixes.add(val.hid)
-                        if val.mid:  # a group hab
-                            copy.groups.add(val.hid)
+        # auth AuthN/AuthZ by controller at cid of endpoint provider at eid
+        # maps key=cid.role.eid to val=diger of end reply
+        self.eans = subing.CesrSuber(db=self, subkey='eans.', klas=coring.Diger)
 
-                # clone .ends and .locs databases
-                for (cid, role, eid), val in self.ends.getTopItemIter():
-                    exists = False  # only copy if entries in both .ends and .locs
-                    for scheme in ("https", "http", "tcp"):  # all supported schemes
-                        lval = self.locs.get(keys=(eid, scheme))
-                        if lval:
-                            exists = True  # loc with matching cid and rol
-                            copy.locs.put(keys=(eid, scheme), val=lval)
-                    if exists:  # only copy end if has at least one matching loc
-                        copy.ends.put(keys=(cid, role, eid), val=val)
+        # auth AuthN/AuthZ by endpoint provider at eid of location at scheme url
+        # maps key=cid.role.eid to val=diger of end reply
+        self.lans = subing.CesrSuber(db=self, subkey='lans.', klas=coring.Diger)
 
-                # replace own kevers with copy kevers by clear and copy
-                # future do this by loading kever from .stts  key state subdb
-                self.kevers.clear()
-                for pre, kever in copy.kevers.items():
-                    self.kevers[pre] = kever
+        # service endpoint identifier (eid) auths keyed by controller cid.role.eid
+        # data extracted from reply /end/role/add or /end/role/cut
+        self.ends = koming.Komer(db=self, subkey='ends.',
+                                 klas=EndpointRecord, )
 
-                # replace prefixes with cloned copy prefixes
+        # service endpoint locations keyed by eid.scheme  (endpoint identifier)
+        # data extracted from reply loc
+        self.locs = koming.Komer(db=self,
+                                 subkey='locs.',
+                                 klas=LocationRecord, )
+        # observed oids by watcher by cid.aid.oid  (endpoint identifier)
+        # data extracted from reply loc
+        self.obvs = koming.Komer(db=self,
+                                 subkey='obvs.',
+                                 klas=ObservedRecord, )
 
-                # clear and clone .prefixes
-                self.prefixes.clear()
-                self.prefixes.update(copy.prefixes)
+        # index of last retrieved message from witness mailbox
+        # TODO: clean
+        self.tops = koming.Komer(db=self,
+                                 subkey='witm.',
+                                 klas=TopicsRecord, )
 
-                # clear and clone .gids
-                self.groups.clear()
-                self.groups.update(copy.groups)
+        # group partial signature escrow
+        self.gpse = subing.CatCesrIoSetSuber(db=self, subkey='gpse.',
+                                             klas=(coring.Number, coring.Diger))
 
-        # remove own db directory replace with clean clone copy
-        if os.path.exists(self.path):
-            shutil.rmtree(self.path)
+        # group delegate escrow
+        self.gdee = subing.CatCesrIoSetSuber(db=self, subkey='gdee.',
+                                             klas=(coring.Number, coring.Diger))
 
-        dst = shutil.move(copy.path, self.path)  # move copy back to orig
+        # group partial witness escrow
+        self.gpwe = subing.CatCesrIoSetSuber(db=self, subkey='gdwe.',
+                                             klas=(coring.Number, coring.Diger))
 
-        if os.path.exists(os.path.join(os.path.sep, "usr", "local", "var", "keri", "clean")):
-            shutil.rmtree(os.path.join(os.path.sep, "usr", "local", "var", "keri", "clean"))
+        # completed group multisig
+        # TODO: clean
+        self.cgms = subing.CesrSuber(db=self, subkey='cgms.',
+                                     klas=coring.Diger)
 
-        if not dst:  # move failed leave new in place so can manually fix
-            raise ValueError("Error cloning, unable to move {} to {}."
-                             "".format(copy.path, self.path))
+        # exchange message partial signature escrow
+        self.epse = subing.SerderSuber(db=self, subkey="epse.")
 
-        with reopenDB(db=self, reuse=True):  # make sure can reopen
-            if not isinstance(self.env, lmdb.Environment):
-                raise ValueError("Error cloning, unable to reopen."
-                                 "".format(self.path))
+        # exchange message PS escrow date time of message
+        self.epsd = subing.CesrSuber(db=self, subkey="epsd.",
+                                     klas=coring.Dater)
 
-        # clone success so remove if still there
-        if os.path.exists(copy.path):
-            shutil.rmtree(copy.path)
+        # exchange messages
+        # TODO: clean
+        self.exns = subing.SerderSuber(db=self, subkey="exns.")
 
-    def clonePreIter(self, pre, fn=0):
-        """
-        Returns iterator of first seen event messages with attachments for the
-        identifier prefix pre starting at first seen order number, fn.
-        Essentially a replay in first seen order with attachments
+        # Forward pointer to a provided reply message
+        # TODO: clean
+        self.erpy = subing.CesrSuber(db=self, subkey="erpy.", klas=coring.Saider)
 
-        Parameters:
-            pre is bytes of itdentifier prefix
-            fn is int fn to resume replay. Earliset is fn=0
+        # exchange message signatures
+        # TODO: clean
+        self.esigs = subing.CesrIoSetSuber(db=self, subkey='esigs.', klas=indexing.Siger)
 
-        Returns:
-           msgs (Iterator): over all items with pre starting at fn
-        """
-        if hasattr(pre, 'encode'):
-            pre = pre.encode("utf-8")
+        # exchange message signatures
+        # TODO: clean
+        self.ecigs = subing.CatCesrIoSetSuber(db=self, subkey='ecigs.',
+                                              klas=(coring.Verfer, coring.Cigar))
 
-        for keys, fn, dig in self.fels.getAllItemIter(keys=pre, on=fn):
-            try:
-                msg = self.cloneEvtMsg(pre=pre, fn=fn, dig=dig)
-            except Exception:
-                continue  # skip this event
-            yield msg
+        # exchange pathed attachments
+        # TODO: clean
+        self.epath = subing.IoSetSuber(db=self, subkey="epath.")
 
+        self.essrs = subing.CesrIoSetSuber(db=self, subkey="essrs.", klas=coring.Texter)
 
-    def cloneAllPreIter(self):
-        """
-        Returns iterator of first seen event messages with attachments for all
-        identifier prefixes starting at key. If key == b'' then start at first
-        key in databse. Use key to resume replay.
-        Essentially a replay in first seen order with attachments of entire
-        set of FELs.
+        # accepted signed 12-word challenge response exn messages keys by prefix of signer
+        # TODO: clean
+        self.chas = subing.CesrIoSetSuber(db=self, subkey='chas.', klas=coring.Diger)
 
-        Returns:
-           msgs (Iterator): over all items in db
+        # successfull signed 12-word challenge response exn messages keys by prefix of signer
+        # TODO: clean
+        self.reps = subing.CesrIoSetSuber(db=self, subkey='reps.', klas=coring.Diger)
 
-        """
-        for keys, fn, dig in self.fels.getAllItemIter(keys=b'', on=0):
-            pre = keys[0].encode() if isinstance(keys[0], str) else keys[0]
-            try:
-                msg = self.cloneEvtMsg(pre=pre, fn=fn, dig=dig)
-            except Exception:
-                continue  # skip this event
-            yield msg
+        # authorzied well known OOBIs
+        # TODO: clean
+        self.wkas = koming.IoSetKomer(db=self, subkey='wkas.', klas=WellKnownAuthN)
 
+        # KSN support datetime stamps and signatures indexed and not-indexed
+        # all ksn  kdts (key state datetime serializations) maps said to date-time
+        # TODO: clean
+        self.kdts = subing.CesrSuber(db=self, subkey='kdts.', klas=coring.Dater)
 
-    def cloneEvtMsg(self, pre, fn, dig):
-        """
-        Clones Event as Serialized CESR Message with Body and attached Foot
+        # all key state messages. Maps key state said to serialization. ksns are
+        # KeyStateRecords so use ._asdict or ._asjson as appropriate
+        # use  .kdts, .ksgs, and .kcgs for datetimes and signatures
+        # TODO: clean
+        self.ksns = koming.Komer(db=self,
+                                klas=KeyStateRecord,
+                                subkey='ksns.')
 
-        Parameters:
-            pre (bytes): identifier prefix of event
-            fn (int): first seen number (ordinal) of event
-            dig (bytes): digest of event
+        # key state SAID database for successfully saved key state notices
+        # maps key=(prefix, aid) to val=said of key state
+        # TODO: clean
+        self.knas = subing.CesrSuber(db=self, subkey='knas.', klas=coring.Diger)
 
-        Returns:
-            bytearray: message body with attachments
-        """
-        from ..core import coring
-        from ..core.counting import Counter, Codens
+        # Watcher watched SAID database for successfully saved watched AIDs for a watcher
+        # maps key=(cid, aid, oid) to val=said of rpy message
+        # TODO: clean
+        self.wwas = subing.CesrSuber(db=self, subkey='wwas.', klas=coring.Diger)
 
-        msg = bytearray()  # message
-        atc = bytearray()  # attachments
-        dgkey = dgKey(pre, dig)  # get message
-        if not (serder := self.evts.get(keys=(pre, dig))):
-            raise MissingEntryError("Missing event for dig={}.".format(dig))
-        msg.extend(serder.raw)
+        # config loaded oobis to be processed asynchronously, keyed by oobi URL
+        # TODO: clean
+        self.oobis = koming.Komer(db=self,
+                                  subkey='oobis.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
 
-        # add indexed signatures to attachments
-        if not (sigers := self.sigs.get(keys=dgkey)):
-            raise MissingEntryError("Missing sigs for dig={}.".format(dig))
-        atc.extend(Counter(code=Codens.ControllerIdxSigs,
-                           count=len(sigers), version=Vrsn_1_0).qb64b)
-        for siger in sigers:
-            atc.extend(siger.qb64b)
+        # escrow OOBIs that failed to load, retriable, keyed by oobi URL
+        self.eoobi = koming.Komer(db=self,
+                                  subkey='eoobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
 
-        # add indexed witness signatures to attachments
-        if wigers := self.wigs.get(keys=dgkey):
-            atc.extend(Counter(code=Codens.WitnessIdxSigs,
-                               count=len(wigers), version=Vrsn_1_0).qb64b)
-            for wiger in wigers:
-                atc.extend(wiger.qb64b)
+        # OOBIs with outstand client requests.
+        self.coobi = koming.Komer(db=self,
+                                  subkey='coobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
 
-        # add authorizer (delegator/issuer) source seal event couple to attachments
-        if (duple := self.aess.get(keys=(pre, dig))) is not None:
-            number, diger = duple
-            atc.extend(Counter(code=Codens.SealSourceCouples,
-                               count=1, version=Vrsn_1_0).qb64b)
-            atc.extend(number.qb64b + diger.qb64b)
+        # Resolved OOBIs (those that have been processed successfully for this database.
+        # TODO: clean
+        self.roobi = koming.Komer(db=self,
+                                  subkey='roobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
 
-        # add trans endorsement quadruples to attachments not controller
-        # may have been originally key event attachments or receipted endorsements
-        if quads := self.vrcs.get(keys=dgkey):
-            atc.extend(Counter(code=Codens.TransReceiptQuadruples,
-                               count=len(quads), version=Vrsn_1_0).qb64b)
-            for pre, snu, diger, siger in quads:    # adapt to CESR
-                atc.extend(pre.qb64b)
-                atc.extend(snu.qb64b)
-                atc.extend(diger.qb64b)
-                atc.extend(siger.qb64b)
+        # Well known OOBIs that are to be used for mfa against a resolved OOBI.
+        # TODO: clean
+        self.woobi = koming.Komer(db=self,
+                                  subkey='woobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
 
-        # add nontrans endorsement couples to attachments not witnesses
-        # may have been originally key event attachments or receipted endorsements
-        if coups := self.rcts.get(keys=dgkey):
-            atc.extend(Counter(code=Codens.NonTransReceiptCouples,
-                               count=len(coups), version=Vrsn_1_0).qb64b)
-            for prefixer, cigar in coups:
-                atc.extend(prefixer.qb64b)
-                atc.extend(cigar.qb64b)
+        # Well known OOBIs that are to be used for mfa against a resolved OOBI.
+        # TODO: clean
+        self.moobi = koming.Komer(db=self,
+                                  subkey='moobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
 
-        # add first seen replay couple to attachments
-        if not (dater := self.dtss.get(keys=dgkey)):
-            raise MissingEntryError("Missing datetime for dig={}.".format(dig))
-        atc.extend(Counter(code=Codens.FirstSeenReplayCouples,
-                           count=1, version=Vrsn_1_0).qb64b)
-        atc.extend(coring.Number(num=fn, code=coring.NumDex.Huge).qb64b)  # may not need to be Huge
-        atc.extend(dater.qb64b)
+        # Multifactor well known OOBI auth records to process.  Keys by controller URL
+        # TODO: clean
+        self.mfa = koming.Komer(db=self,
+                                subkey='mfa.',
+                                klas=OobiRecord,
+                                sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
 
-        # prepend pipelining counter to attachments
-        if len(atc) % 4:
-            raise ValueError("Invalid attachments size={}, nonintegral"
-                             " quadlets.".format(len(atc)))
-        pcnt = Counter(code=Codens.AttachmentGroup,
-                       count=(len(atc) // 4), version=Vrsn_1_0).qb64b
-        msg.extend(pcnt)
-        msg.extend(atc)
-        return msg
+        # Resolved multifactor well known OOBI auth records.  Keys by controller URL
+        # TODO: clean
+        self.rmfa = koming.Komer(db=self,
+                                 subkey='rmfa.',
+                                 klas=OobiRecord,
+                                 sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
 
-    def cloneDelegation(self, kever):
-        """
-        Recursively clone delegation chain from AID of Kever if one exits.
+        # JSON schema SADs keys by the SAID
+        # TODO: clean
+        self.schema = subing.SchemerSuber(db=self,
+                                          subkey='schema.')
 
-        Parameters:
-            kever (Kever): Kever from which to clone the delegator's AID.
+        # Field values for contact information for remote identifiers.  Keyed by prefix/field
+        # TODO: clean
+        self.cfld = subing.Suber(db=self,
+                                 subkey="cfld.")
 
-        """
-        if kever.delegated and kever.delpre in self.kevers:
-            dkever = self.kevers[kever.delpre]
-            yield from self.cloneDelegation(dkever)
+        # Global settings for the Habery environment
+        self.hbys = subing.Suber(db=self, subkey='hbys.')
 
-            for dmsg in self.clonePreIter(pre=kever.delpre, fn=0):
-                yield dmsg
+        # Signed contact data, keys by prefix
+        # TODO: clean
+        self.cons = subing.Suber(db=self,
+                                 subkey="cons.")
 
-    def fetchAllSealingEventByEventSeal(self, pre, seal, sn=0):
-        """
-        Search through a KEL for the event that contains a specific anchored
-        SealEvent type of provided seal but in dict form and is also fully
-        witnessed. Searchs from sn forward (default = 0).Searches all events in
-        KEL of pre including disputed and/or superseded events.
-        Returns the Serder of the first event with the anchored SealEvent seal,
-            None if not found
+        # Transferable signatures on contact data
+        # TODO: clean
+        self.ccigs = subing.CesrSuber(db=self, subkey='ccigs.', klas=coring.Cigar)
 
+        # Blinded media for contact information for remote identifiers.
+        # CatCesrSuber with TypeMedia format: (Noncer=SAID, Noncer=UUID, Labeler=MIME, Texter=data)
+        self.imgs = subing.CatCesrSuber(db=self, subkey='imgs.',
+                                         klas=(coring.Noncer, coring.Noncer,
+                                               coring.Labeler, coring.Texter))
 
-        Parameters:
-            pre (bytes|str): identifier of the KEL to search
-            seal (dict): dict form of Seal of any type SealEvent to find in anchored
-                seals list of each event
-            sn (int): beginning sn to search
+        # Field values for identifier information for local identifiers. Keyed by prefix/field
+        # TODO: clean
+        self.ifld = subing.Suber(db=self,
+                                 subkey="ifld.")
 
-        """
-        from ..core.structing import SealEvent
+        # Signed identifier data, keys by prefix
+        # TODO: clean
+        self.sids = subing.Suber(db=self,
+                                  subkey="sids.")
 
-        if tuple(seal) != SealEvent._fields:  # wrong type of seal
-            return None
+        # Transferable signatures on identifier data
+        # TODO: clean
+        self.icigs = subing.CesrSuber(db=self, subkey='icigs.', klas=coring.Cigar)
 
-        seal = SealEvent(**seal)  #convert to namedtuple
+        # Blinded media for identifier information for local identifiers.
+        # CatCesrSuber with TypeMedia format: (Noncer=SAID, Noncer=UUID, Labeler=MIME, Texter=data)
+        self.iimgs = subing.CatCesrSuber(db=self, subkey='iimgs.',
+                                          klas=(coring.Noncer, coring.Noncer,
+                                                coring.Labeler, coring.Texter))
 
-        for srdr in self.getEvtPreIter(pre=pre, sn=sn):  # includes disputed & superseded
-            for eseal in srdr.seals or []:  # or [] for seals 'a' field missing
-                if tuple(eseal) == SealEvent._fields:
-                    eseal = SealEvent(**eseal)  # convert to namedtuple
-                    if seal == eseal and self.fullyWitnessed(srdr):
-                        return srdr
-        return None
+        # Delegation escrow dbs #
+        # delegated partial witness escrow
+        self.dpwe = subing.SerderSuber(db=self, subkey='dpwe.')
 
-    # use alias here until can change everywhere for  backwards compatibility
-    findAnchoringSealEvent = fetchAllSealingEventByEventSeal  # alias
+        # delegated unanchored escrow
+        self.dune = subing.SerderSuber(db=self, subkey='dune.')
 
-    def fetchLastSealingEventByEventSeal(self, pre, seal, sn=0):
-        """
-        Search through a KEL for the last event at any sn but that contains a
-        specific anchored event seal of namedtuple SealEvent type that matches
-        the provided seal in dict form and is also fully witnessed.
-        Searchs from provided sn forward (default = 0).
-        Searches only last events in KEL of pre so does not include disputed
-        and/or superseded events.
+        # delegate publication escrow for sending delegator info to my witnesses
+        self.dpub = subing.SerderSuber(db=self, subkey='dpub.')
 
-        Returns:
-            srdr (Serder): instance of the first event with the matching
-                           anchoring SealEvent seal,
-                        None if not found
+        # completed group delegated AIDs
+        # TODO: clean
+        self.cdel = subing.CesrOnSuber(db=self, subkey='cdel.',
+                                     klas=coring.Diger)
 
-        Parameters:
-            pre (bytes|str): identifier of the KEL to search
-            seal (dict): dict form of Seal of any type SealEvent to find in anchored
-                seals list of each event
-            sn (int): beginning sn to search
+        # multisig sig embed payload SAID mapped to containing exn messages across group multisig participants
+        # TODO: clean
+        self.meids = subing.CesrIoSetSuber(db=self, subkey="meids.", klas=coring.Diger)
 
-        """
-        from ..core.structing import SealEvent
+        # multisig sig embed payload SAID mapped to group multisig participants AIDs
+        # TODO: clean
+        self.maids = subing.CesrIoSetSuber(db=self, subkey="maids.", klas=coring.Prefixer)
 
-        if tuple(seal) != SealEvent._fields:  # wrong type of seal
-            return None
+        # KRAM cache type — key: expression string, value: drift and lag params
+        self.kramCTYP = koming.Komer(db=self, subkey='ctyp.',
+                                 klas=CacheTypeRecord)
 
-        seal = SealEvent(**seal)  #convert to namedtuple
+        # KRAM message cache — key: (AID, MID), value: msg datetime, drift, lags
+        self.kramMSGC = koming.Komer(db=self, subkey='msgc.',
+                                 klas=MsgCacheRecord)
 
-        for srdr in self.getEvtLastPreIter(pre=pre, sn=sn):  # no disputed or superseded
-            for eseal in srdr.seals or []:  # or [] for seals 'a' field missing
-                if tuple(eseal) == SealEvent._fields:
-                    eseal = SealEvent(**eseal)  # convert to namedtuple
-                    if seal == eseal and self.fullyWitnessed(srdr):
-                        return srdr
-        return None
+        # KRAM transactioned message cache — key: (AID, XID, MID), value: datetimes, drift, lags
+        self.kramTMSC = koming.Komer(db=self, subkey='tmsc.',
+                                 klas=TxnMsgCacheRecord)
 
+        # KRAM partially signed multi-key message key (AID.MID) mapped to associated message (SerderKERI)
+        self.kramPMKM = subing.SerderSuber(db=self, subkey='pmkm.')
 
+        # KRAM partially signed multi-key signature key (AID.MID) mapped to associated signatures
+        self.kramPMKS = subing.CesrIoSetSuber(db=self, subkey='pmks.', klas=indexing.Siger)
 
-    def fetchLastSealingEventBySeal(self, pre, seal, sn=0):
-        """Only searches last event at any sn therefore does not search
-        any disputed or superseded events.
-        Search through last event at each sn in KEL for the event that contains
-        an anchored Seal with same Seal type as provided seal but in dict form.
-        Searchs from sn forward (default = 0).
-        Returns the Serder of the first found event with the anchored Seal seal,
-            None if not found
+        # KRAM partially signed multi-key sender key state key (AID.MID) mapped to SN and event SAID
+        self.kramPMSK = subing.CatCesrSuber(db=self, subkey='pmsk.', klas=(coring.Number, coring.Diger))
 
-        Parameters:
-            pre (bytes|str): identifier of the KEL to search
-            seal (dict): dict form of Seal of any type to find in anchored
-                seals list of each event
-            sn (int): beginning sn to search
+        # KRAM partially signed multi-key non-authenticator attachments
 
-        """
-        # create generic Seal namedtuple class using keys from provided seal dict
-        Seal = namedtuple('Seal', list(seal))  # matching type
+        # trqs: trans receipt quadruples (prefixer, number, diger, siger)
+        self.kramTRQS = subing.CatCesrIoSetSuber(db=self, subkey='trqs.',
+                                                  klas=(coring.Prefixer, coring.Number,
+                                                        coring.Diger, indexing.Siger))
 
-        for srdr in self.getEvtLastPreIter(pre=pre, sn=sn):  # only last evt at sn
-            for eseal in srdr.seals or []:  # or [] for seals 'a' field missing
-                if tuple(eseal) == Seal._fields:  # same type of seal
-                    eseal = Seal(**eseal)  #convert to namedtuple
-                    if seal == eseal and self.fullyWitnessed(srdr):
-                        return srdr
-        return None
+        # tsgs: trans last sig groups (prefixer, number, diger, siger) — stored per-siger
+        self.kramTSGS = subing.CatCesrIoSetSuber(db=self, subkey='tsgs.',
+                                                  klas=(coring.Prefixer, coring.Number,
+                                                        coring.Diger, indexing.Siger))
 
-    def signingMembers(self, pre: str):
-        """ Find signing members of a multisig group aid.
+        # sscs: first seen seal couples (number, diger) issuing or delegating
+        self.kramSSCS = subing.CatCesrIoSetSuber(db=self, subkey='sscs.',
+                                                  klas=(coring.Number, coring.Diger))
+
+        # ssts: source seal triples (prefixer, number, diger) issued or delegated
+        self.kramSSTS = subing.CatCesrIoSetSuber(db=self, subkey='ssts.',
+                                                  klas=(coring.Prefixer, coring.Number,
+                                                        coring.Diger))
 
-        Using the pubs index to find members of a signing group
+        # frcs: first seen replay couples (number, dater)
+        self.kramFRCS = subing.CatCesrIoSetSuber(db=self, subkey='frcs.',
+                                                  klas=(coring.Number, coring.Dater))
 
-        Parameters:
-            pre (str): qb64 identifier prefix to find members
+        # tdcs: typed digest seal couples (verser, diger)
+        self.kramTDCS = subing.CatCesrIoSetSuber(db=self, subkey='tdcs.',
+                                                  klas=(coring.Verser, coring.Diger))
 
-        Returns:
-            list: qb64 identifier prefixes of signing members for provided aid
+        # ptds: pathed streams (raw bytes)
+        self.kramPTDS = subing.IoSetSuber(db=self, subkey='ptds.')
 
-        """
-        if (habord := self.habs.get(keys=(pre,))) is None:
-            return None
+        # bsqs: blind state quadruples (diger, noncer, noncer, labeler)
+        self.kramBSQS = subing.CatCesrIoSetSuber(db=self, subkey='bsqs.',
+                                                  klas=(coring.Diger, coring.Noncer,
+                                                        coring.Noncer, coring.Labeler))
 
-        return habord.smids
+        # bsss: bound state sextuples (diger, noncer, noncer, labeler, number, noncer)
+        self.kramBSSS = subing.CatCesrIoSetSuber(db=self, subkey='bsss.',
+                                                  klas=(coring.Diger, coring.Noncer,
+                                                        coring.Noncer, coring.Labeler,
+                                                        coring.Number, coring.Noncer))
 
-    def rotationMembers(self, pre: str):
-        """ Find rotation members of a multisig group aid.
+        # tmqs: type media quadruples (diger, noncer, labeler, texter)
+        self.kramTMQS = subing.CatCesrIoSetSuber(db=self, subkey='tmqs.',
+                                                  klas=(coring.Diger, coring.Noncer,
+                                                        coring.Labeler, coring.Texter))
 
-        Using the digs index to lookup member pres of a group aid
+        self.reload()
 
-        Parameters:
-            pre (str): qb64 identifier prefix to find members
+        return self.env
 
-        Returns:
-            list: qb64 identifier prefixes of rotation members for provided aid
+    def reload(self):
         """
-        if (habord := self.habs.get(keys=(pre,))) is None:
-            return None
+        Reload stored prefixes and Kevers from .habs
 
-        return habord.rmids
+        """
+        # Check migrations to see if this database is up to date.  Error otherwise
+        if not self.current:
+            raise DatabaseError(f"Database migrations must be run. DB version {self.version}; current {__version__}")
 
-    def fullyWitnessed(self, serder):
-        """ Verify the witness threshold on the event
+        removes = []
+        for keys, data in self.habs.getTopItemIter():
+            if (ksr := self.states.get(keys=data.hid)) is not None:
+                try:
+                    from ..core.eventing import Kever
+                    kever = Kever(state=ksr,
+                                           db=self,
+                                           local=True)
+                except MissingEntryError as ex:  # no kel event for keystate
+                    removes.append(keys)  # remove from .habs
+                    continue
+                self.kevers[kever.prefixer.qb64] = kever
+                self.prefixes.add(kever.prefixer.qb64)
+                if data.mid:  # group hab
+                    self.groups.add(data.hid)
 
-        Parameters:
-            serder (Serder): event serder to validate witness threshold
+            elif data.mid is None:  # in .habs but no corresponding key state and not a group so remove
+                removes.append(keys)  # no key state or KEL event for .hab record
 
-        Returns:
+        for keys in removes:  # remove bare .habs records
+            self.habs.rem(keys=keys)
 
+    def clean(self):
         """
-        # Verify fully receipted, because this witness may have persisted before all receipts
-        # have been gathered if this ius a witness for serder.pre
-        # get unique verified wigers and windices lists from wigers list
-        wigers = self.wigs.get(keys=(serder.preb, serder.saidb))
-        kever = self.kevers[serder.pre]
-        toad = kever.toader.num
+        Clean database by creating re-verified cleaned cloned copy
+        and then replacing original with cleaned cloned copy
 
-        return not len(wigers) < toad
+        Database usage should be offline during cleaning as it will be cloned in
+        readonly mode
 
-    def resolveVerifiers(self, pre=None, sn=0, dig=None):
         """
-        Returns the Tholder and Verfers for the provided identifier prefix.
-        Default pre is own .pre
+        from ..core import parsing
 
-        Parameters:
-            pre(str) is qb64 str of bytes of identifier prefix.
-            sn(int) is the sequence number of the est event
-            dig(str) is qb64 str of digest of est event
+        # create copy to clone into
+        with openDB(name=self.name,
+                    temp=False,
+                    headDirPath=self.headDirPath,
+                    perm=self.perm,
+                    clean=True) as copy:  # copy is Baser instance
 
-        """
-        from ..core import coring
+            with reopenDB(db=self, reuse=True, readonly=True):  # reopen as readonly
+                if not os.path.exists(self.path):
+                    raise ValueError("Error while cleaning, no orig at {}."
+                                     "".format(self.path))
+                from ..core.eventing import Kevery
+                kvy = Kevery(db=copy)  # promiscuous mode
 
-        prefixer = coring.Prefixer(qb64=pre)
-        if prefixer.transferable:
-            # receipted event and receipter in database so get receipter est evt
-            # retrieve dig of last event at sn of est evt of receipter.
-            sdig = self.kels.getLast(keys=prefixer.qb64b, on=sn)
-            if sdig is None:
-                # receipter's est event not yet in receipters's KEL
-                raise ValidationError("key event sn {} for pre {} is not yet in KEL"
-                                             "".format(sn, pre))
-            sdig = sdig.encode("utf-8")
-            # retrieve last event itself of receipter est evt from sdig
-            sserder = self.evts.get(keys=(prefixer.qb64b, bytes(sdig)))
-            # assumes db ensures that sserder must not be none because sdig was in KE
-            if dig is not None and not sserder.compare(said=dig):  # endorser's dig not match event
-                raise ValidationError("Bad proof sig group at sn = {}"
-                                             " for ksn = {}."
-                                             "".format(sn, sserder.sad))
+                # Revise in future to NOT parse msgs but to extract the processed
+                # objects so can pass directly to kvy.processEvent()
+                # need new method cloneObjAllPreIter()
+                # process event doesn't capture exceptions so we can more easily
+                # detect in the cloning that some events did not make it through
+                psr = parsing.Parser(kvy=kvy, version=Vrsn_1_0)
+                for msg in self.cloneAllPreIter():  # clone into copy
+                    psr.parseOne(ims=msg)
 
-            verfers = sserder.verfers
-            tholder = sserder.tholder
+                # This is the list of non-set based databases that are not created as part of event processing.
+                # for now we are just copying them from self to copy without worrying about being able to
+                # reprocess them.  We need a more secure method in the future
+                unsecured = ["hbys", "schema", "states", "rpys", "eans", "tops", "cgms", "exns", "erpy",
+                             "kdts", "ksns", "knas", "oobis", "roobi", "woobi", "moobi", "mfa", "rmfa",
+                             "cfld", "cons", "ccigs", "cdel", "migs",
+                             "ifld", "sids", "icigs"]
 
-        else:
-            verfers = [coring.Verfer(qb64=pre)]
-            tholder = coring.Tholder(sith="1")
+                for name in unsecured:
+                    srcdb = getattr(self, name)
+                    cpydb = getattr(copy, name)
+                    for keys, val in srcdb.getTopItemIter():
+                        cpydb.put(keys=keys, val=val)
 
-        return tholder, verfers
+                # This is the list of set based databases that are not created as part of event processing.
+                # for now we are just copying them from self to copy without worrying about being able to
+                # reprocess them.  We need a more secure method in the future
+                sets = ["esigs", "ecigs", "epath", "chas", "reps", "wkas", "meids", "maids"]
+                for name in sets:
+                    srcdb = getattr(self, name)
+                    cpydb = getattr(copy, name)
+                    for keys, val in srcdb.getTopItemIter():
+                        cpydb.add(keys=keys, val=val)
 
-    def getEvtPreIter(self, pre, sn=0):
-        """
-        Returns iterator of event messages without attachments
-        in sn order from the KEL of identifier prefix pre.
-        Essentially a replay of all event messages without attachments
-        for each sn from the KEL of pre including superseded duplicates
+                # Copy imgs (blinded media for remote identifiers)
+                for keys, val in self.imgs.getTopItemIter():
+                    copy.imgs.pin(keys=keys, val=val)
 
-        Parameters:
-            pre (bytes|str): identifier prefix
-            sn (int): sequence number (default 0) to begin interation
-        """
-        if hasattr(pre, 'encode'):
-            pre = pre.encode("utf-8")
+                # Copy iimgs (blinded media for local identifiers)
+                for keys, val in self.iimgs.getTopItemIter():
+                    copy.iimgs.pin(keys=keys, val=val)
 
-        for dig in self.kels.getAllIter(keys=pre, on=sn):
-            try:
-                if not (serder := self.evts.get(keys=(pre, dig))):
-                    raise MissingEntryError("Missing event for dig={}.".format(dig))
+                # clone .habs  habitat name prefix Komer subdb
+                # copy.habs = koming.Komer(db=copy, schema=HabitatRecord, subkey='habs.')  # copy
+                for keys, val in self.habs.getTopItemIter():
+                    if val.hid in copy.kevers:  # only copy habs that verified
+                        copy.habs.put(keys=keys, val=val)
+                        ns = "" if val.domain is None else val.domain
+                        copy.names.put(keys=(ns, val.name), val=val.hid)
+                        copy.prefixes.add(val.hid)
+                        if val.mid:  # a group hab
+                            copy.groups.add(val.hid)
 
-            except Exception:
-                continue  # skip this event
+                # clone .ends and .locs databases
+                for (cid, role, eid), val in self.ends.getTopItemIter():
+                    exists = False  # only copy if entries in both .ends and .locs
+                    for scheme in ("https", "http", "tcp"):  # all supported schemes
+                        lval = self.locs.get(keys=(eid, scheme))
+                        if lval:
+                            exists = True  # loc with matching cid and rol
+                            copy.locs.put(keys=(eid, scheme), val=lval)
+                    if exists:  # only copy end if has at least one matching loc
+                        copy.ends.put(keys=(cid, role, eid), val=val)
 
-            yield serder  # event as Serder
+                # replace own kevers with copy kevers by clear and copy
+                # future do this by loading kever from .stts  key state subdb
+                self.kevers.clear()
+                for pre, kever in copy.kevers.items():
+                    self.kevers[pre] = kever
 
+                # replace prefixes with cloned copy prefixes
 
-    def getEvtLastPreIter(self, pre, sn=0):
-        """
-        Returns iterator of event messages without attachments
-        in sn order from the KEL of identifier prefix pre.
-        Essentially a replay of all event messages without attachments
-        for each sn from the KEL of pre including superseded duplicates
+                # clear and clone .prefixes
+                self.prefixes.clear()
+                self.prefixes.update(copy.prefixes)
 
-        Parameters:
-            pre (bytes|str): identifier prefix
-            sn (int): sequence number (default 0) to begin interation
-        """
-        if hasattr(pre, 'encode'):
-            pre = pre.encode("utf-8")
+                # clear and clone .gids
+                self.groups.clear()
+                self.groups.update(copy.groups)
 
-        for dig in self.kels.getLastIter(keys=pre, on=sn):
-            try:
+        # remove own db directory replace with clean clone copy
+        if os.path.exists(self.path):
+            shutil.rmtree(self.path)
 
-                if not (serder := self.evts.get(keys=(pre, dig) )):
-                    raise MissingEntryError("Missing event for dig={}.".format(dig))
+        dst = shutil.move(copy.path, self.path)  # move copy back to orig
 
-            except Exception:
-                continue  # skip this event
+        if os.path.exists(os.path.join(os.path.sep, "usr", "local", "var", "keri", "clean")):
+            shutil.rmtree(os.path.join(os.path.sep, "usr", "local", "var", "keri", "clean"))
 
-            yield serder  # event as Serder
+        if not dst:  # move failed leave new in place so can manually fix
+            raise ValueError("Error cloning, unable to move {} to {}."
+                             "".format(copy.path, self.path))
+
+        with reopenDB(db=self, reuse=True):  # make sure can reopen
+            if not isinstance(self.env, lmdb.Environment):
+                raise ValueError("Error cloning, unable to reopen."
+                                 "".format(self.path))
+
+        # clone success so remove if still there
+        if os.path.exists(copy.path):
+            shutil.rmtree(copy.path)
 
 
 class BaserDoer(doing.Doer):
diff --git a/src/keri/db/webbasing.py b/src/keri/db/webbasing.py
new file mode 100644
index 000000000..55e95588d
--- /dev/null
+++ b/src/keri/db/webbasing.py
@@ -0,0 +1,838 @@
+# -*- encoding: utf-8 -*-
+"""
+keri.db.webbasing module
+
+Browser-safe plain-value DBer backed by PyScript storage.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import semver
+import importlib
+from collections import namedtuple
+
+from hio.base import doing
+from hio.help import ogler
+
+from ordered_set import OrderedSet as oset
+
+from keri import __version__
+
+from ..recording import (KeyStateRecord, EventSourceRecord,
+                         HabitatRecord, OobiRecord, EndpointRecord,
+                         LocationRecord, ObservedRecord,
+                         CacheTypeRecord, TxnMsgCacheRecord,
+                         MsgCacheRecord, WellKnownAuthN,
+                         TopicsRecord)
+
+from ..kering import (MissingEntryError, ValidationError,
+                      ConfigurationError, DatabaseError, Vrsn_1_0)
+
+from .webdbing import WebDBer
+
+try:
+    from .basing import BaserBase
+except ImportError: 
+    BaserBase = None
+
+logger = ogler.getLogger()
+
+
+# --- Duplicated from dbing.py / basing.py to avoid lmdb import ---
+
+def _strip_prerelease(version_str):
+    """Strip prerelease and build metadata from a semver string.
+
+    See: https://github.com/WebOfTrust/keripy/issues/820
+    """
+    ver = semver.VersionInfo.parse(version_str)
+    return str(semver.Version(ver.major, ver.minor, ver.patch))
+
+
+class WebBaser(WebDBer, BaserBase):
+    def __init__(self, name="main", reopen=False, temp=False, **kwa):
+        """
+        Setup named sub databases.
+
+        Parameters:
+            name is str directory path name differentiator for main database
+                When system employs more than one keri database, name allows
+                differentiating each instance by name
+            temp is boolean, assign to .temp
+                True then open in temporary directory, clear on close
+                Othewise then open persistent directory, do not clear on close
+            headDirPath is optional str head directory pathname for main database
+                If not provided use default .HeadDirpath
+            mode is int numeric os dir permissions for database directory
+            reopen (bool): True means database will be reopened by this init
+
+
+        """
+        SubDbNames = ["aess.", "bsss.", "bsqs.", "ccigs.", "cdel.", "cfld.", "chas.",
+            "cgms.", "coobi.", "cons.", "ctyp.", "dees.", "dels.", "dpwe.", "dpub.",
+            "dtss.", "dune.", "eans.", "ecigs.", "ends.", "eoobi.", "epath.", "epse.",
+            "epsd.", "erpy.", "esigs.", "esrs.", "essrs.", "exns.", "evts.", "fels.", "fons.",
+            "frcs.", "gdee.", "gdwe.", "gpse.", "habs.", "hbys.", "iimgs.", "icigs.",
+            "ifld.", "imgs.", "kels.", "kdts.", "knas.", "ksns.", "lans.", "ldes.",
+            "locs.", "maids.", "meids.", "mfes.", "mfa.", "migs.", "moobi.", "msgc.",
+            "names.", "obvs.", "oobis.", "ooes.", "pdes.", "pmkm.", "pmks.", "pmsk.",
+            "pses.", "ptds.", "pwes.", "qnfs.", "rcts.", "reps.", "rpes.", "rmfa.",
+            "roobi.", "rpys.", "scgs.", "schema.", "sdts.", "sids.", "sigs.", "sscs.",
+            "ssgs.", "ssts.", "stts.", "tdcs.", "tmsc.", "tmqs.", "trqs.", "tsgs.", "udes.",
+            "ures.", "uwes.", "vrcs.", "vres.", "vers.", "wigs.", "wits.", "wkas.",
+            "witm.", "woobi.", "wwas."
+        ]
+        self.SubDbNames = SubDbNames
+
+        self.name = name
+        self._version = None
+        self.opened = False
+
+        self.temp = temp
+
+        BaserBase.__init__(self)
+
+    async def reopen(self, clear=False, storageOpener=None):
+        """Open or re-open the WebBaser backing store.
+
+        Creates a WebDBer instance using the baser's name and declared
+        SubDbNames, loads or initialises each SubDb's underlying store,
+        binds all SubDbs to this WebBaser via ``_bindSubDbs()``, then
+        rebuilds in-memory state (kevers, escrows) via ``reload()``.
+
+        This method must be awaited because browser storage operations are
+        asynchronous.  After calling ``reopen()`` the WebBaser is fully
+        operational and ready for reads, writes, and flushes.  Calling
+        ``reopen()`` on an already-open baser replaces the existing WebDBer
+        instance and resets all SubDb bindings.
+
+        Parameters:
+            clear (bool): When True, all existing persisted data for this
+                baser (across all SubDbs) is cleared before loading.
+            storageOpener (callable | None): Optional async factory that
+                returns a storage handle for a given namespace.  Overrides
+                the default PyScript opener.  Used to inject
+                FakeStorageBackend in CPython tests.
+        """
+        if storageOpener is not None:
+            self._storageOpener = storageOpener
+        opener = getattr(self, "_storageOpener", None)
+
+        try:
+            self.db = await WebDBer.open(
+                name=self.name,
+                stores=self.SubDbNames,
+                clear=clear,
+                storageOpener=opener,
+            )
+        except RuntimeError as e:
+            if opener is None:
+                raise RuntimeError(
+                    "No storage opener available. "
+                    "Provide storageOpener=FakeStorageBackend.open in CPython, "
+                    "or run under PyScript for IndexedDB."
+                ) from e
+            raise
+
+        self.env = self.db.env
+        self._bindSubDbs()
+        self.reload()
+        self.opened = True
+
+
+    def close(self, *, clear: bool = False):
+        """Synchronous close. Safe to call from hio Doer.exit() and Habery.close().
+
+        Drops all in-memory state and schedules a best-effort fire-and-forget
+        flush to the browser's backing storage.  The flush is scheduled as an
+        ``asyncio`` task via ``loop.create_task()`` so it does NOT block the
+        caller.
+
+        In a browser / Pyodide environment the event loop persists for the
+        lifetime of the page, so the scheduled flush task will always complete.
+
+        In CPython tests where ``asyncio.run()`` terminates the loop when
+        the test coroutine returns, the task may be cancelled before it runs.
+        Use `aclose` instead when the caller can ``await`` and needs a
+        guaranteed flush.
+
+        When ``clear=True`` (or ``self.temp is True``), each SubDb's in-memory
+        items are emptied and marked dirty before the flush is scheduled, so
+        the cleared state is what gets persisted.
+
+        If the baser is not open the method returns immediately.
+
+        Note:
+            After close, all Suber/Komer attributes (e.g. ``self.oobis``)
+            are deleted.  Any attempt to access them will raise
+            ``AttributeError``, making accidental post-close usage fail
+            loudly instead of silently writing to an orphaned in-memory
+            SubDb.  The attributes are rebound on ``reopen()``.
+
+        Parameters:
+            clear (bool): When True, the backing storage for this WebBaser
+                is cleared.  When False (default), stored state is preserved
+                for future ``reopen()`` calls.
+        """
+        if not self.opened or self.db is None:
+            return
+
+        if clear or self.temp:
+            for subdb in self.db._stores.values():
+                subdb.items.clear()
+                subdb.dirty = True
+
+        # Capture reference before clearing self.db
+        db = self.db
+        self.db = None
+        self.env = None
+        self.opened = False
+
+        # Remove all Suber/Komer attributes so post-close writes raise
+        # AttributeError instead of silently going to an orphaned SubDb.
+        for name in getattr(self, '_subdb_names', ()):
+            try:
+                delattr(self, name)
+            except AttributeError:
+                pass
+
+        # Schedule async flush as fire-and-forget task.
+        try:
+            loop = asyncio.get_running_loop()
+            loop.create_task(db.flush())
+        except RuntimeError:
+            pass  # no running event loop — skip async flush
+
+
+    async def aclose(self, *, clear: bool = False):
+        """Async close with guaranteed flush — use when the caller can ``await``.
+
+        Flushes all pending in-memory writes to backing browser storage and
+        waits for the flush to complete before clearing internal references.
+        This is the preferred close path in any ``async`` context (tests,
+        wallet ``AsyncRecurDoer.recur_async()`` shutdown, etc.) because the
+        caller can be certain that all data has been persisted when the method
+        returns.
+
+        When ``clear=True`` (or ``self.temp is True``), each SubDb's in-memory
+        items are emptied and marked dirty before flushing, so the cleared
+        state is what gets persisted.
+
+        For sync callers (hio Doer.exit(), Habery.close(), openHby() context
+        manager) use :meth:`close` instead — it schedules the flush as a
+        fire-and-forget task that completes on the next event-loop tick.
+
+        If the baser is not open the method returns immediately.
+
+        Note:
+            After close, all Suber/Komer attributes (e.g. ``self.oobis``)
+            are deleted.  Any attempt to access them will raise
+            ``AttributeError``, making accidental post-close usage fail
+            loudly instead of silently writing to an orphaned in-memory
+            SubDb.  The attributes are rebound on ``reopen()``.
+
+        Parameters:
+            clear (bool): When True the backing storage for this WebBaser
+                is cleared.  When False (default) stored state is preserved
+                for future ``reopen()`` calls.
+        """
+        if not self.opened or self.db is None:
+            return
+
+        if clear or self.temp:
+            for subdb in self.db._stores.values():
+                subdb.items.clear()
+                subdb.dirty = True
+
+        await self.db.flush()
+        self.db = None
+        self.env = None
+        self.opened = False
+
+        # Remove all Suber/Komer attributes so post-close writes raise
+        # AttributeError instead of silently going to an orphaned SubDb.
+        for name in getattr(self, '_subdb_names', ()):
+            try:
+                delattr(self, name)
+            except AttributeError:
+                pass
+
+
+    def _bindSubDbs(self):
+        """
+        Bind all WebBaser sub‑databases (Subers and Komers) to this instance.
+
+        This method initializes the full set of logical sub‑databases that make up
+        the WebBaser storage schema. Each sub‑database is created with the correct
+        Suber/Komer type, serialization format, and key prefix (`subkey`).
+
+        WebBaser uses WebDBer as the underlying backend, which provides a
+        lexicographically‑sorted key/value store. Because WebDBer does not support
+        LMDB dupsort semantics, the choice of Suber class (IoSetSuber, OnIoSetSuber,
+        CatCesrIoSetSuber, etc.) determines how uniqueness, ordering, and grouping
+        are emulated in the browser environment.
+
+        This method must be called exactly once during initialization or reopen().
+        After binding, each attribute (e.g. `self.kels`, `self.sigs`, `self.states`)
+        provides the full API for interacting with that logical sub‑database.
+
+        No I/O occurs here; this method only constructs the Suber/Komer wrappers.
+        Actual persistence happens through WebDBer during flush(), reopen(), and
+        close().
+        """
+
+        from . import koming, subing
+        from ..core import coring, indexing
+
+        _before = set(self.__dict__)
+        self.evts = subing.SerderSuber(db=self, subkey='evts.')
+        self.fels = subing.OnSuber(db=self, subkey='fels.')
+        self.kels = subing.OnIoSetSuber(db=self, subkey='kels.')
+        self.dtss = subing.CesrSuber(db=self, subkey='dtss.', klas=coring.Dater)
+        self.aess = subing.CatCesrSuber(db=self, subkey='aess.',
+                                        klas=(coring.Number, coring.Diger))
+        self.sigs = subing.CesrIoSetSuber(db=self, subkey='sigs.',
+                                        klas=(indexing.Siger))
+        self.wigs = subing.CesrIoSetSuber(db=self, subkey='wigs.', klas=indexing.Siger)
+        self.rcts = subing.CatCesrIoSetSuber(db=self, subkey="rcts.",
+                                             klas=(coring.Prefixer, coring.Cigar))
+        self.ures = subing.CatCesrIoSetSuber(db=self, subkey='ures.',
+                                             klas=(coring.Diger, coring.Prefixer, coring.Cigar))
+        self.vrcs = subing.CatCesrIoSetSuber(db=self, subkey='vrcs.',
+                             klas=(coring.Prefixer, coring.Number, coring.Diger, indexing.Siger))
+        self.vres = subing.CatCesrIoSetSuber(db=self, subkey='vres.',
+                             klas=(coring.Diger, coring.Prefixer, coring.Number, coring.Diger, indexing.Siger))
+        self.pses = subing.OnIoSetSuber(db=self, subkey='pses.')
+        self.pwes = subing.OnIoSetSuber(db=self, subkey='pwes.')
+        self.pdes = subing.OnIoSetSuber(db=self, subkey='pdes.')
+        self.udes = subing.CatCesrSuber(db=self, subkey='udes.', klas=(coring.Number, coring.Diger))
+        self.uwes = subing.B64OnIoSetSuber(db=self, subkey='uwes.')
+        self.ooes = subing.OnIoSetSuber(db=self, subkey='ooes.')
+        self.dels = subing.OnIoSetSuber(db=self, subkey='dels.')
+        self.ldes = subing.OnIoSetSuber(db=self, subkey='ldes.')
+        self.qnfs = subing.IoSetSuber(db=self, subkey="qnfs.")
+
+        # events as ordered by first seen ordinals
+        self.fons = subing.CesrSuber(db=self, subkey='fons.', klas=coring.Number)
+
+        self.migs = subing.CesrSuber(db=self, subkey="migs.", klas=coring.Dater)
+        self.vers = subing.Suber(db=self, subkey="vers.")
+
+        # event source local (protected) or non-local (remote not protected)
+        self.esrs = koming.Komer(db=self,
+                                   klas=EventSourceRecord,
+                                   subkey='esrs.')
+
+        # misfit escrows whose processing may change the .esrs event source record
+        self.misfits = subing.IoSetSuber(db=self, subkey='mfes.')
+
+        # delegable events escrows. events with local delegator that need approval
+        self.delegables = subing.IoSetSuber(db=self, subkey='dees.')
+
+        # Kever state made of KeyStateRecord key states
+        self.states = koming.Komer(db=self,
+                                   klas=KeyStateRecord,
+                                   subkey='stts.')
+
+        self.wits = subing.CesrIoSetSuber(db=self, subkey="wits.", klas=coring.Prefixer)
+
+        # habitat application state keyed by habitat name, includes prefix
+        self.habs = koming.Komer(db=self,
+                                 subkey='habs.',
+                                 klas=HabitatRecord, )
+        # habitat name database mapping (domain,name) as key to Prefixer
+        self.names = subing.Suber(db=self, subkey='names.', sep="^")
+
+        # SAD support datetime stamps and signatures indexed and not-indexed
+        # all sad  sdts (sad datetime serializations) maps said to date-time
+        self.sdts = subing.CesrSuber(db=self, subkey='sdts.', klas=coring.Dater)
+
+        # all sad ssgs (sad indexed signature serializations) maps SAD quadkeys
+        # given by quadruple (diger.qb64, prefixer.qb64, seqner.q64, diger.qb64)
+        #  of reply and trans signer's key state est evt to val Siger for each
+        # signature.
+        self.ssgs = subing.CesrIoSetSuber(db=self, subkey='ssgs.', klas=indexing.Siger)
+
+        # all sad scgs  (sad non-indexed signature serializations) maps SAD SAID
+        # to couple (Verfer, Cigar) of nontrans signer of signature in Cigar
+        # nontrans qb64 of Prefixer is same as Verfer
+        self.scgs = subing.CatCesrIoSetSuber(db=self, subkey='scgs.',
+                                             klas=(coring.Verfer, coring.Cigar))
+
+        # all reply messages. Maps reply said to serialization. Replys are
+        # versioned sads ( with version string) so use Serder to deserialize and
+        # use  .sdts, .ssgs, and .scgs for datetimes and signatures
+        # TODO: clean
+        self.rpys = subing.SerderSuber(db=self, subkey='rpys.')
+
+        # all reply escrows indices of partially signed reply messages. Maps
+        # route in reply to single (Diger,)  of escrowed reply.
+        # Routes such as /end/role  /loc/schema
+        self.rpes = subing.CesrIoSetSuber(db=self, subkey='rpes.',
+                                          klas=coring.Diger)
+
+        # auth AuthN/AuthZ by controller at cid of endpoint provider at eid
+        # maps key=cid.role.eid to val=diger of end reply
+        self.eans = subing.CesrSuber(db=self, subkey='eans.', klas=coring.Diger)
+
+        # auth AuthN/AuthZ by endpoint provider at eid of location at scheme url
+        # maps key=cid.role.eid to val=diger of end reply
+        self.lans = subing.CesrSuber(db=self, subkey='lans.', klas=coring.Diger)
+
+        # service endpoint identifier (eid) auths keyed by controller cid.role.eid
+        # data extracted from reply /end/role/add or /end/role/cut
+        self.ends = koming.Komer(db=self, subkey='ends.',
+                                 klas=EndpointRecord, )
+
+        # service endpoint locations keyed by eid.scheme  (endpoint identifier)
+        # data extracted from reply loc
+        self.locs = koming.Komer(db=self,
+                                 subkey='locs.',
+                                 klas=LocationRecord, )
+        # observed oids by watcher by cid.aid.oid  (endpoint identifier)
+        # data extracted from reply loc
+        self.obvs = koming.Komer(db=self,
+                                 subkey='obvs.',
+                                 klas=ObservedRecord, )
+
+        # index of last retrieved message from witness mailbox
+        self.tops = koming.Komer(db=self,
+                                 subkey='witm.',
+                                 klas=TopicsRecord, )
+
+        # group partial signature escrow
+        self.gpse = subing.CatCesrIoSetSuber(db=self, subkey='gpse.',
+                                             klas=(coring.Number, coring.Diger))
+
+        # group delegate escrow
+        self.gdee = subing.CatCesrIoSetSuber(db=self, subkey='gdee.',
+                                             klas=(coring.Number, coring.Diger))
+
+        # group partial witness escrow
+        self.gpwe = subing.CatCesrIoSetSuber(db=self, subkey='gdwe.',
+                                             klas=(coring.Number, coring.Diger))
+
+        # completed group multisig
+        self.cgms = subing.CesrSuber(db=self, subkey='cgms.',
+                                     klas=coring.Diger)
+
+        # exchange message partial signature escrow
+        self.epse = subing.SerderSuber(db=self, subkey="epse.")
+
+        # exchange message PS escrow date time of message
+        self.epsd = subing.CesrSuber(db=self, subkey="epsd.",
+                                     klas=coring.Dater)
+
+        # exchange messages
+        self.exns = subing.SerderSuber(db=self, subkey="exns.")
+
+        # Forward pointer to a provided reply message
+        self.erpy = subing.CesrSuber(db=self, subkey="erpy.", klas=coring.Saider)
+
+        # exchange message signatures
+        self.esigs = subing.CesrIoSetSuber(db=self, subkey='esigs.', klas=indexing.Siger)
+
+        # exchange message signatures
+        self.ecigs = subing.CatCesrIoSetSuber(db=self, subkey='ecigs.',
+                                              klas=(coring.Verfer, coring.Cigar))
+
+        # exchange pathed attachments
+        self.epath = subing.IoSetSuber(db=self, subkey="epath.")
+
+        self.essrs = subing.CesrIoSetSuber(db=self, subkey="essrs.", klas=coring.Texter)
+
+        # accepted signed 12-word challenge response exn messages keys by prefix of signer
+        self.chas = subing.CesrIoSetSuber(db=self, subkey='chas.', klas=coring.Diger)
+
+        # successfull signed 12-word challenge response exn messages keys by prefix of signer
+        self.reps = subing.CesrIoSetSuber(db=self, subkey='reps.', klas=coring.Diger)
+
+        # authorzied well known OOBIs
+        self.wkas = koming.IoSetKomer(db=self, subkey='wkas.', klas=WellKnownAuthN)
+
+        # KSN support datetime stamps and signatures indexed and not-indexed
+        # all ksn  kdts (key state datetime serializations) maps said to date-time
+        self.kdts = subing.CesrSuber(db=self, subkey='kdts.', klas=coring.Dater)
+
+        # all key state messages. Maps key state said to serialization. ksns are
+        # KeyStateRecords so use ._asdict or ._asjson as appropriate
+        # use  .kdts, .ksgs, and .kcgs for datetimes and signatures
+        self.ksns = koming.Komer(db=self,
+                                klas=KeyStateRecord,
+                                subkey='ksns.')
+
+        # key state SAID database for successfully saved key state notices
+        # maps key=(prefix, aid) to val=said of key state
+        self.knas = subing.CesrSuber(db=self, subkey='knas.', klas=coring.Diger)
+
+        # Watcher watched SAID database for successfully saved watched AIDs for a watcher
+        # maps key=(cid, aid, oid) to val=said of rpy message
+        self.wwas = subing.CesrSuber(db=self, subkey='wwas.', klas=coring.Diger)
+
+        # config loaded oobis to be processed asynchronously, keyed by oobi URL
+        self.oobis = koming.Komer(db=self,
+                                  subkey='oobis.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+
+        # escrow OOBIs that failed to load, retriable, keyed by oobi URL
+        self.eoobi = koming.Komer(db=self,
+                                  subkey='eoobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+
+        # OOBIs with outstand client requests.
+        self.coobi = koming.Komer(db=self,
+                                  subkey='coobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+
+        # Resolved OOBIs (those that have been processed successfully for this database.
+        self.roobi = koming.Komer(db=self,
+                                  subkey='roobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+
+        # Well known OOBIs that are to be used for mfa against a resolved OOBI.
+        self.woobi = koming.Komer(db=self,
+                                  subkey='woobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+
+        # Well known OOBIs that are to be used for mfa against a resolved OOBI.
+        self.moobi = koming.Komer(db=self,
+                                  subkey='moobi.',
+                                  klas=OobiRecord,
+                                  sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+
+        # Multifactor well known OOBI auth records to process.  Keys by controller URL
+        self.mfa = koming.Komer(db=self,
+                                subkey='mfa.',
+                                klas=OobiRecord,
+                                sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+
+        # Resolved multifactor well known OOBI auth records.  Keys by controller URL
+        self.rmfa = koming.Komer(db=self,
+                                 subkey='rmfa.',
+                                 klas=OobiRecord,
+                                 sep=">")  # Use seperator not allowed in URLs so no splitting occurs.
+
+        # JSON schema SADs keys by the SAID
+        self.schema = subing.SchemerSuber(db=self,
+                                          subkey='schema.')
+
+        # Field values for contact information for remote identifiers.  Keyed by prefix/field
+        self.cfld = subing.Suber(db=self,
+                                 subkey="cfld.")
+
+        # Global settings for the Habery environment
+        self.hbys = subing.Suber(db=self, subkey='hbys.')
+
+        # Signed contact data, keys by prefix
+        self.cons = subing.Suber(db=self,
+                                 subkey="cons.")
+
+        # Transferable signatures on contact data
+        self.ccigs = subing.CesrSuber(db=self, subkey='ccigs.', klas=coring.Cigar)
+
+        # Blinded media for contact information for remote identifiers.
+        # CatCesrSuber with TypeMedia format: (Noncer=SAID, Noncer=UUID, Labeler=MIME, Texter=data)
+        self.imgs = subing.CatCesrSuber(db=self, subkey='imgs.',
+                                         klas=(coring.Noncer, coring.Noncer,
+                                               coring.Labeler, coring.Texter))
+
+        # Field values for identifier information for local identifiers. Keyed by prefix/field
+        self.ifld = subing.Suber(db=self,
+                                 subkey="ifld.")
+
+        # Signed identifier data, keys by prefix
+        self.sids = subing.Suber(db=self,
+                                  subkey="sids.")
+
+        # Transferable signatures on identifier data
+        self.icigs = subing.CesrSuber(db=self, subkey='icigs.', klas=coring.Cigar)
+
+        # Blinded media for identifier information for local identifiers.
+        # CatCesrSuber with TypeMedia format: (Noncer=SAID, Noncer=UUID, Labeler=MIME, Texter=data)
+        self.iimgs = subing.CatCesrSuber(db=self, subkey='iimgs.',
+                                          klas=(coring.Noncer, coring.Noncer,
+                                                coring.Labeler, coring.Texter))
+
+        # Delegation escrow dbs #
+        # delegated partial witness escrow
+        self.dpwe = subing.SerderSuber(db=self, subkey='dpwe.')
+
+        # delegated unanchored escrow
+        self.dune = subing.SerderSuber(db=self, subkey='dune.')
+
+        # delegate publication escrow for sending delegator info to my witnesses
+        self.dpub = subing.SerderSuber(db=self, subkey='dpub.')
+
+        # completed group delegated AIDs
+        self.cdel = subing.CesrOnSuber(db=self, subkey='cdel.',
+                                     klas=coring.Diger)
+
+        # multisig sig embed payload SAID mapped to containing exn messages across group multisig participants
+        self.meids = subing.CesrIoSetSuber(db=self, subkey="meids.", klas=coring.Diger)
+
+        # multisig sig embed payload SAID mapped to group multisig participants AIDs
+        self.maids = subing.CesrIoSetSuber(db=self, subkey="maids.", klas=coring.Prefixer)
+
+        # KRAM cache type — key: expression string, value: drift and lag params
+        self.kramCTYP = koming.Komer(db=self, subkey='ctyp.',
+                                 klas=CacheTypeRecord)
+
+        # KRAM message cache — key: (AID, MID), value: msg datetime, drift, lags
+        self.kramMSGC = koming.Komer(db=self, subkey='msgc.',
+                                 klas=MsgCacheRecord)
+
+        # KRAM transactioned message cache — key: (AID, XID, MID), value: datetimes, drift, lags
+        self.kramTMSC = koming.Komer(db=self, subkey='tmsc.',
+                                 klas=TxnMsgCacheRecord)
+
+        # KRAM partially signed multi-key message key (AID.MID) mapped to associated message (SerderKERI)
+        self.kramPMKM = subing.SerderSuber(db=self, subkey='pmkm.')
+
+        # KRAM partially signed multi-key signature key (AID.MID) mapped to associated signatures
+        self.kramPMKS = subing.CesrIoSetSuber(db=self, subkey='pmks.', klas=indexing.Siger)
+
+        # KRAM partially signed multi-key sender key state key (AID.MID) mapped to SN and event SAID
+        self.kramPMSK = subing.CatCesrSuber(db=self, subkey='pmsk.', klas=(coring.Number, coring.Diger))
+
+        # KRAM partially signed multi-key non-authenticator attachments
+
+        # trqs: trans receipt quadruples (prefixer, number, diger, siger)
+        self.kramTRQS = subing.CatCesrIoSetSuber(db=self, subkey='trqs.',
+                                                  klas=(coring.Prefixer, coring.Number,
+                                                        coring.Diger, indexing.Siger))
+
+        # tsgs: trans last sig groups (prefixer, number, diger, siger) — stored per-siger
+        self.kramTSGS = subing.CatCesrIoSetSuber(db=self, subkey='tsgs.',
+                                                  klas=(coring.Prefixer, coring.Number,
+                                                        coring.Diger, indexing.Siger))
+
+        # sscs: first seen seal couples (number, diger) issuing or delegating
+        self.kramSSCS = subing.CatCesrIoSetSuber(db=self, subkey='sscs.',
+                                                  klas=(coring.Number, coring.Diger))
+
+        # ssts: source seal triples (prefixer, number, diger) issued or delegated
+        self.kramSSTS = subing.CatCesrIoSetSuber(db=self, subkey='ssts.',
+                                                  klas=(coring.Prefixer, coring.Number,
+                                                        coring.Diger))
+
+        # frcs: first seen replay couples (number, dater)
+        self.kramFRCS = subing.CatCesrIoSetSuber(db=self, subkey='frcs.',
+                                                  klas=(coring.Number, coring.Dater))
+
+        # tdcs: typed digest seal couples (verser, diger)
+        self.kramTDCS = subing.CatCesrIoSetSuber(db=self, subkey='tdcs.',
+                                                  klas=(coring.Verser, coring.Diger))
+
+        # ptds: pathed streams (raw bytes)
+        self.kramPTDS = subing.IoSetSuber(db=self, subkey='ptds.')
+
+        # bsqs: blind state quadruples (diger, noncer, noncer, labeler)
+        self.kramBSQS = subing.CatCesrIoSetSuber(db=self, subkey='bsqs.',
+                                                  klas=(coring.Diger, coring.Noncer,
+                                                        coring.Noncer, coring.Labeler))
+
+        # bsss: bound state sextuples (diger, noncer, noncer, labeler, number, noncer)
+        self.kramBSSS = subing.CatCesrIoSetSuber(db=self, subkey='bsss.',
+                                                  klas=(coring.Diger, coring.Noncer,
+                                                        coring.Noncer, coring.Labeler,
+                                                        coring.Number, coring.Noncer))
+
+        # tmqs: type media quadruples (diger, noncer, labeler, texter)
+        self.kramTMQS = subing.CatCesrIoSetSuber(db=self, subkey='tmqs.',
+                                                  klas=(coring.Diger, coring.Noncer,
+                                                        coring.Labeler, coring.Texter))
+
+        # Every attribute added above is a Suber or Komer.  Record their
+        # names so close()/aclose() can null them out to prevent silent
+        # writes to orphaned SubDb objects.
+        self._subdb_names = set(self.__dict__) - _before
+
+
+    def reload(self):
+        """Rebuild in-memory Kever state from persisted habitat and key state records.
+
+        WebBaser stores KERI state across multiple SubDbs but maintains an
+        in-memory cache of active Kevers, prefixes, and group identifiers
+        for efficient event processing.  This method reconstructs that
+        cache after a ``reopen()``.
+
+        Clears all in-memory prefix, group, and kever caches, then
+        iterates habitat records in ``habs.`` via ``getTopItemIter()``.
+        For each habitat with a corresponding KeyStateRecord in ``stts.``,
+        a Kever is constructed.  On success the Kever is cached in
+        ``_kevers`` and the prefix is added to ``self.prefixes``.  Group
+        habitats (where ``hab.mid`` is set) are added to ``self.groups``.
+
+        Habitats that have no key state and are not groups, or whose Kever
+        construction raises ``MissingEntryError``, are collected as orphans
+        and removed from ``habs.`` after iteration (matching Baser
+        behaviour).
+
+        This method performs no I/O — it operates entirely on
+        already-loaded SubDbs and their in-memory views.  It is
+        automatically invoked during ``reopen()``.
+        """
+        # Version/migration check — skip if version infrastructure isn't
+        # initialised yet (fresh database with no _stores on self).
+        try:
+            if not self.current:
+                raise DatabaseError(
+                    f"Database migrations must be run. "
+                    f"DB version {self.version}; current {__version__}")
+        except AttributeError:
+            pass  # fresh WebBaser before first migrate — treat as current
+
+        self.prefixes.clear()
+        self.groups.clear()
+        self._kevers.clear()
+
+        removes = []
+        for keys, data in self.habs.getTopItemIter():
+            if (ksr := self.states.get(keys=data.hid)) is not None:
+                try:
+                    from ..core.eventing import Kever
+                    kever = Kever(state=ksr, db=self, local=True)
+                except MissingEntryError:
+                    removes.append(keys)
+                    continue
+                self._kevers[kever.prefixer.qb64] = kever
+                self.prefixes.add(kever.prefixer.qb64)
+                if data.mid:
+                    self.groups.add(data.hid)
+            elif data.mid is None:
+                removes.append(keys)
+
+        for keys in removes:
+            self.habs.rem(keys=keys)
+    
+
+    async def clean(self):
+        """Clean database by replaying events into a fresh clone and swapping data."""
+        from ..core import parsing
+        from ..core.eventing import Kevery
+
+        # 1. Create a fresh empty WebBaser clone
+        copy = WebBaser(name=f"{self.name}_clean")
+        await copy.reopen(clear=True,
+                          storageOpener=getattr(self, "_storageOpener", None))
+
+        # 2. Replay all events into the clean DB
+        kvy = Kevery(db=copy)
+        psr = parsing.Parser(kvy=kvy, version=Vrsn_1_0)
+        for msg in self.cloneAllPreIter():
+            psr.parseOne(ims=msg)
+
+        # 3. Copy non-event subdbs
+        unsecured = [
+            "hbys", "schema", "states", "rpys", "eans", "tops", "cgms", "exns",
+            "erpy", "kdts", "ksns", "knas", "oobis", "roobi", "woobi", "moobi",
+            "mfa", "rmfa", "cfld", "cons", "ccigs", "cdel", "migs",
+            "ifld", "sids", "icigs"
+        ]
+        for name in unsecured:
+            srcdb = getattr(self, name, None)
+            cpydb = getattr(copy, name, None)
+            if srcdb is None or cpydb is None:
+                continue
+            for keys, val in srcdb.getTopItemIter():
+                cpydb.put(keys=keys, val=val)
+
+        # 4. Copy set-based subdbs
+        sets = ["esigs", "ecigs", "epath", "chas", "reps", "wkas", "meids", "maids"]
+        for name in sets:
+            srcdb = getattr(self, name, None)
+            cpydb = getattr(copy, name, None)
+            if srcdb is None or cpydb is None:
+                continue
+            for keys, val in srcdb.getTopItemIter():
+                cpydb.add(keys=keys, val=val)
+
+        # 5. Copy imgs and iimgs
+        for keys, val in self.imgs.getTopItemIter():
+            copy.imgs.pin(keys=keys, val=val)
+        for keys, val in self.iimgs.getTopItemIter():
+            copy.iimgs.pin(keys=keys, val=val)
+
+        # 6. Clone verified habs, names, prefixes, groups
+        for keys, val in self.habs.getTopItemIter():
+            if val.hid in copy.kevers:
+                copy.habs.put(keys=keys, val=val)
+                ns = "" if val.domain is None else val.domain
+                copy.names.put(keys=(ns, val.name), val=val.hid)
+                copy.prefixes.add(val.hid)
+                if val.mid:
+                    copy.groups.add(val.hid)
+
+        # 7. Clone ends and locs
+        for (cid, role, eid), val in self.ends.getTopItemIter():
+            exists = False
+            for scheme in ("https", "http", "tcp"):
+                lval = self.locs.get(keys=(eid, scheme))
+                if lval:
+                    exists = True
+                    copy.locs.put(keys=(eid, scheme), val=lval)
+            if exists:
+                copy.ends.put(keys=(cid, role, eid), val=val)
+
+        # 8. Replace in-memory state with cloned data
+        self.kevers.clear()
+        for pre, kever in copy.kevers.items():
+            self.kevers[pre] = kever
+        self.prefixes.clear()
+        self.prefixes.update(copy.prefixes)
+        self.groups.clear()
+        self.groups.update(copy.groups)
+
+        # 9. Swap subdb data from clone into self via WebDBer API
+        for name in self.SubDbNames:
+            src_store = copy.db._stores.get(name)
+            dst_store = self.db._stores.get(name)
+            if src_store and dst_store:
+                dst_store.items.clear()
+                dst_store.items.update(src_store.items)
+                dst_store.dirty = True
+        await self.db.flush()
+        await copy.aclose(clear=True)
+
+
+class WebBaserDoer(doing.Doer):
+    """Doer for WebBaser lifecycle management within the hio scheduler.
+
+    Manages the close-on-exit side of the WebBaser lifecycle.  Because hio's
+    Doer.enter() and Doer.exit() are synchronous, and WebBaser.reopen()
+    is async, the baser must already be opened before the Doist starts.
+
+    On exit, calls the synchronous :meth:`WebBaser.close` which schedules a
+    fire-and-forget flush to IndexedDB.  In a browser/Pyodide environment
+    the event loop persists, so the flush will complete.  For guaranteed
+    flush semantics, call ``await baser.aclose()`` from an async context
+    before the Doer exits (e.g. in an ``AsyncRecurDoer.recur_async()``
+    finally block).
+
+    Typical usage::
+
+        baser = WebBaser(name="wallet", temp=False)
+        await baser.reopen(storageOpener=backend.open)
+        doer = WebBaserDoer(baser=baser)
+        doist.doers = [doer, ...]
+        await doist.ado()
+    """
+
+    def __init__(self, baser, **kwa):
+        super().__init__(**kwa)
+        self.baser = baser
+
+    def enter(self, *, temp=None):
+        if not self.baser.opened:
+            raise RuntimeError(
+                "WebBaser must be opened before WebBaserDoer.enter()")
+
+    def exit(self):
+        if self.baser.opened:
+            self.baser.close(clear=self.baser.temp)
diff --git a/src/keri/db/webdbing.py b/src/keri/db/webdbing.py
index 114a732e2..1a5358f0f 100644
--- a/src/keri/db/webdbing.py
+++ b/src/keri/db/webdbing.py
@@ -10,15 +10,15 @@
 import json
 from collections.abc import Awaitable, Callable, Iterable, Iterator
 from dataclasses import dataclass, field
-from typing import Any
+from typing import Any, Union
+from ordered_set import OrderedSet as oset
+from sortedcontainers import SortedDict
 
 try:
     from pyscript import storage
 except ImportError:  # pragma: no cover
     storage = None
 
-from ordered_set import OrderedSet as oset
-from sortedcontainers import SortedDict
 
 # The following are necessary to define in this file 
 # to prevent non wasm compatible imports (importing from dbing)
@@ -864,7 +864,7 @@ def getTopItemIter(self, db: SubDb, top: bytes = b"") -> Iterator[tuple[bytes, b
                 yield key, val
             return
 
-        for key in db.items.irange(minimum=prefix):
+        for key in list(db.items.irange(minimum=prefix)):
             if not key.startswith(prefix):
                 break
             yield key, db.items[key]
@@ -1123,11 +1123,9 @@ def getIoSetItemIter(self, db, key, *, ion=0, sep=b'.'):
         if not key:
             return iter(())
 
-        # Get the prefix 
+        # Snapshot keys via list() to allow safe delete-during-iteration
         iokey = suffix(key, ion, sep=sep)
-
-        # Iterate through items from the starting key
-        for iokey in db.items.irange(minimum=iokey):
+        for iokey in list(db.items.irange(minimum=iokey)):
             ckey, cion = unsuffix(iokey, sep=sep)
             # Stop when we leave this IoSet
             if ckey != key:
diff --git a/tests/db/test_webbasing.py b/tests/db/test_webbasing.py
new file mode 100644
index 000000000..899e99c5b
--- /dev/null
+++ b/tests/db/test_webbasing.py
@@ -0,0 +1,4028 @@
+# -*- encoding: utf-8 -*-
+"""
+tests.db.test_webbasing module
+
+"""
+
+import asyncio
+import json
+
+import pytest
+
+from keri.db.webbasing import WebBaser, WebBaserDoer, _strip_prerelease
+
+try:
+    from keri.db import subing, koming, dgKey, snKey, statedict
+except ImportError:
+    subing = None
+    koming = None
+
+try:
+    from keri.core import (serdering, coring, signing, Noncer, Labeler, Parser,
+                        indexing, Number, Diger, Seqner, Saider, Texter, StateEstEvent,
+                        SerderKERI, Salter, rotate, MtrDex, incept, interact,
+                        Kever, Prefixer, Siger, Dater, Serder, Signer, NumDex, Kevery)
+    from keri import versify, Kinds, Ilks
+    from keri.recording import (EventSourceRecord, HabitatRecord, KeyStateRecord,
+                            OobiRecord, RawRecord, StateEERecord)
+except ImportError:
+    # Pyodide fallback
+    from keri.core import serdering
+
+from keri.kering import Vrsn_1_0
+from keri.core import state as eventState
+from keri.app import openHby
+from keri.help import datify, dictify
+                            
+needskeri = pytest.mark.skipif(subing is None, reason="requires full keri (lmdb)")
+
+
+class FakeStorageHandle:
+    """Async storage handle with local writes and explicit sync commit."""
+
+    def __init__(self, backend, namespace):
+        self.backend = backend
+        self.namespace = namespace
+        self._local = dict(self.backend.persisted.get(namespace, {}))
+
+    def get(self, key, default=None):
+        return self._local.get(key, default)
+
+    def __getitem__(self, key):
+        return self._local[key]
+
+    def __setitem__(self, key, value):
+        self._local[key] = value
+
+    def clear(self):
+        """Remove all keys from the local storage buffer."""
+        self._local.clear()
+
+    async def sync(self):
+        self.backend.persisted[self.namespace] = dict(self._local)
+
+
+class FakeStorageBackend:
+    """Minimal async opener that mimics PyScript storage commit semantics."""
+
+    def __init__(self):
+        self.persisted = {}
+
+    async def open(self, namespace):
+        return FakeStorageHandle(self, namespace)
+
+
+@needskeri
+def test_webdb_baser():
+    """Test WebBaser class."""
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        await baser.reopen(storageOpener=backend.open)
+
+        assert baser.opened
+        assert baser.name == "main"
+
+        assert isinstance(baser.evts, subing.SerderSuber)
+        assert isinstance(baser.sigs, subing.CesrIoSetSuber)
+        assert isinstance(baser.dtss, subing.CesrSuber)
+        assert isinstance(baser.rcts, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.ures, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kels, subing.OnIoSetSuber)
+        assert isinstance(baser.ooes, subing.OnIoSetSuber)
+        assert isinstance(baser.pses, subing.OnIoSetSuber)
+        assert isinstance(baser.dels, subing.OnIoSetSuber)
+        assert isinstance(baser.ldes, subing.OnIoSetSuber)
+        assert isinstance(baser.ures, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.esrs, koming.Komer)
+        assert isinstance(baser.states, koming.Komer)
+        assert isinstance(baser.habs, koming.Komer)
+        assert isinstance(baser.names, subing.Suber)
+        assert isinstance(baser.imgs, subing.CatCesrSuber)
+        assert isinstance(baser.iimgs, subing.CatCesrSuber)
+
+        await baser.aclose(clear=True)
+        assert not baser.opened
+
+        # test not opened on init
+        baser = WebBaser(reopen=False)
+        assert isinstance(baser, WebBaser)
+        assert baser.name == "main"
+        assert baser.opened == False
+
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.opened
+
+        assert isinstance(baser.evts, subing.SerderSuber)
+        assert isinstance(baser.sigs, subing.CesrIoSetSuber)
+        assert isinstance(baser.dtss, subing.CesrSuber)
+        assert isinstance(baser.rcts, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.ures, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kels, subing.OnIoSetSuber)
+        assert isinstance(baser.ooes, subing.OnIoSetSuber)
+        assert isinstance(baser.pses, subing.OnIoSetSuber)
+        assert isinstance(baser.dels, subing.OnIoSetSuber)
+        assert isinstance(baser.ldes, subing.OnIoSetSuber)
+        assert isinstance(baser.ures, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.esrs, koming.Komer)
+        assert isinstance(baser.states, koming.Komer)
+        assert isinstance(baser.habs, koming.Komer)
+        assert isinstance(baser.names, subing.Suber)
+        assert isinstance(baser.imgs, subing.CatCesrSuber)
+        assert isinstance(baser.iimgs, subing.CatCesrSuber)
+
+        await baser.aclose(clear=True)
+        assert not baser.opened
+
+        backend = FakeStorageBackend()
+        baser = WebBaser(name="test")
+
+        # Open WebBaser using the fake async storage backend
+        await baser.reopen(storageOpener=backend.open)
+
+        # Basic identity checks
+        assert baser.opened is True
+        assert baser.env is not None
+
+        # Complete SubDBs instance checks
+        # SerderSuber
+        assert isinstance(baser.evts, subing.SerderSuber)
+        assert isinstance(baser.rpys, subing.SerderSuber)
+        assert isinstance(baser.epse, subing.SerderSuber)
+        assert isinstance(baser.exns, subing.SerderSuber)
+        assert isinstance(baser.dpwe, subing.SerderSuber)
+        assert isinstance(baser.dune, subing.SerderSuber)
+        assert isinstance(baser.dpub, subing.SerderSuber)
+        assert isinstance(baser.kramPMKM, subing.SerderSuber)
+
+        # CesrIoSetSuber
+        assert isinstance(baser.sigs, subing.CesrIoSetSuber)
+        assert isinstance(baser.wigs, subing.CesrIoSetSuber)
+        assert isinstance(baser.wits, subing.CesrIoSetSuber)
+        assert isinstance(baser.ssgs, subing.CesrIoSetSuber)
+        assert isinstance(baser.rpes, subing.CesrIoSetSuber)
+        assert isinstance(baser.esigs, subing.CesrIoSetSuber)
+        assert isinstance(baser.essrs, subing.CesrIoSetSuber)
+        assert isinstance(baser.chas, subing.CesrIoSetSuber)
+        assert isinstance(baser.reps, subing.CesrIoSetSuber)
+        assert isinstance(baser.meids, subing.CesrIoSetSuber)
+        assert isinstance(baser.maids, subing.CesrIoSetSuber)
+        assert isinstance(baser.kramPMKS, subing.CesrIoSetSuber)
+        
+        # CesrSuber
+        assert isinstance(baser.dtss, subing.CesrSuber)
+        assert isinstance(baser.fons, subing.CesrSuber)
+        assert isinstance(baser.migs, subing.CesrSuber)
+        assert isinstance(baser.sdts, subing.CesrSuber)
+        assert isinstance(baser.eans, subing.CesrSuber)
+        assert isinstance(baser.lans, subing.CesrSuber)
+        assert isinstance(baser.cgms, subing.CesrSuber)
+        assert isinstance(baser.epsd, subing.CesrSuber)
+        assert isinstance(baser.erpy, subing.CesrSuber)
+        assert isinstance(baser.kdts, subing.CesrSuber)
+        assert isinstance(baser.knas, subing.CesrSuber)
+        assert isinstance(baser.wwas, subing.CesrSuber)
+        assert isinstance(baser.ccigs, subing.CesrSuber)
+        assert isinstance(baser.icigs, subing.CesrSuber)
+        
+        # CatCesrIoSetSuber
+        assert isinstance(baser.rcts, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.ures, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.vrcs, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.vres, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.scgs, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.gpse, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.gdee, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.gpwe, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.ecigs, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramTRQS, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramTSGS, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramSSCS, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramSSTS, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramFRCS, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramTDCS, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramBSQS, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramBSSS, subing.CatCesrIoSetSuber)
+        assert isinstance(baser.kramTMQS, subing.CatCesrIoSetSuber)
+
+        # OnIoSetSuber
+        assert isinstance(baser.ooes, subing.OnIoSetSuber)
+        assert isinstance(baser.pses, subing.OnIoSetSuber)
+        assert isinstance(baser.dels, subing.OnIoSetSuber)
+        assert isinstance(baser.ldes, subing.OnIoSetSuber)
+        assert isinstance(baser.kels, subing.OnIoSetSuber)
+        assert isinstance(baser.pwes, subing.OnIoSetSuber)
+        assert isinstance(baser.pdes, subing.OnIoSetSuber)
+
+        # IoSetSuber
+        assert isinstance(baser.qnfs, subing.IoSetSuber)
+        assert isinstance(baser.misfits, subing.IoSetSuber)
+        assert isinstance(baser.delegables, subing.IoSetSuber)
+        assert isinstance(baser.epath, subing.IoSetSuber)
+        assert isinstance(baser.kramPTDS, subing.IoSetSuber)
+
+        # Komers
+        assert isinstance(baser.esrs, koming.Komer)
+        assert isinstance(baser.states, koming.Komer)
+        assert isinstance(baser.habs, koming.Komer)
+        assert isinstance(baser.ends, koming.Komer)
+        assert isinstance(baser.locs, koming.Komer)
+        assert isinstance(baser.obvs, koming.Komer)
+        assert isinstance(baser.tops, koming.Komer)
+        assert isinstance(baser.ksns, koming.Komer)
+        assert isinstance(baser.oobis, koming.Komer)
+        assert isinstance(baser.eoobi, koming.Komer)
+        assert isinstance(baser.coobi, koming.Komer)
+        assert isinstance(baser.roobi, koming.Komer)
+        assert isinstance(baser.woobi, koming.Komer)
+        assert isinstance(baser.moobi, koming.Komer)
+        assert isinstance(baser.mfa, koming.Komer)
+        assert isinstance(baser.rmfa, koming.Komer)
+        assert isinstance(baser.kramCTYP, koming.Komer)
+        assert isinstance(baser.kramMSGC, koming.Komer)
+        assert isinstance(baser.kramTMSC, koming.Komer)
+
+        # ---- Begin functional tests ----
+
+        preb = 'DAzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhcc'.encode("utf-8")
+        digb = 'EGAPkzNZMtX-QiVgbRbyAIZGoXvbGv9IPb0foWTZvI_4'.encode("utf-8")
+        sn = 3
+
+        ked = dict(v=versify(kind=Kinds.json, size=0), t="rot", d=digb.decode("utf-8"),
+               i=preb.decode("utf-8"), s="{:x}".format(sn), p=preb.decode("utf-8"),
+               kt="0", k=[], nt="0", n=[], bt="0", br=[], ba=[], a=[])
+        skedb = json.dumps(ked, separators=(",", ":"), ensure_ascii=False).encode("utf-8")
+
+        while True:
+            ked["v"] = versify(kind=Kinds.json, size=len(skedb))
+            next_skedb = json.dumps(ked, separators=(",", ":"), ensure_ascii=False).encode("utf-8")
+            if len(next_skedb) == len(skedb):
+                skedb = next_skedb
+                break
+            skedb = next_skedb
+
+        key = dgKey(preb, digb)
+        assert key == f"{preb.decode()}.{digb.decode()}".encode()
+
+        # Build minimal Serder
+        sked = serdering.SerderKERI(raw=skedb, verify=False)
+
+        # Basic tests for all SerderSuber instances
+        serderSubers = [    
+            baser.evts,
+            baser.rpys,
+            baser.epse,
+            baser.exns,
+            baser.dpwe,
+            baser.dune,
+            baser.dpub,
+            baser.kramPMKM,
+        ]
+
+        for sub in serderSubers:
+            assert isinstance(sub, subing.SerderSuber)
+
+            # empty db
+            assert sub.get(keys=(preb, digb)) is None
+            assert sub.rem(keys=(preb, digb)) is False
+
+            # insert
+            assert sub.put(keys=(preb, digb), val=sked) is True
+            assert sub.get(keys=(preb, digb)).raw == skedb
+
+            # put should not overwrite
+            assert sub.put(keys=(preb, digb), val=sked) is False
+
+            # pin should overwrite
+            assert sub.pin(keys=(preb, digb), val=sked) is True
+            assert sub.get(keys=(preb, digb)).raw == skedb
+
+            # remove
+            assert sub.rem(keys=(preb, digb)) is True
+            assert sub.get(keys=(preb, digb)) is None
+
+            # second key
+            pre2 = b"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
+            dig2 = b"CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
+
+            assert sub.put(keys=(pre2, dig2), val=sked) is True
+            assert sub.get(keys=(pre2, dig2)).raw == skedb
+
+            # idempotent pin
+            assert sub.pin(keys=(pre2, dig2), val=sked) is True
+            assert sub.pin(keys=(pre2, dig2), val=sked) is True
+
+            # equal-content Serder should not overwrite
+            skedClone = serdering.SerderKERI(raw=skedb, verify=False)
+            assert sub.put(keys=(pre2, dig2), val=skedClone) is False
+            assert sub.get(keys=(pre2, dig2)).raw == skedb
+
+            # persistence
+            await baser.reopen(storageOpener=backend.open)
+            assert sub.get(keys=(preb, digb)) is None
+            assert sub.get(keys=(pre2, dig2)) is not None
+
+            # delete second key
+            assert sub.rem(keys=(pre2, dig2)) is True
+            assert sub.get(keys=(pre2, dig2)) is None
+
+
+        # Basic tests for OnIoSetSuber instances
+        onIoSubers = [
+            baser.ooes,
+            baser.pses,
+            baser.dels,
+            baser.ldes,
+            baser.kels,
+            baser.pwes,
+            baser.pdes,
+        ]
+
+        for sub in onIoSubers:
+            assert isinstance(sub, subing.OnIoSetSuber)
+            
+            # Basic insertion behavior
+            pre = 'A'
+            sn = 0
+            key = snKey(pre, sn)
+            vals = [b"z", b"m", b"x", b"a"]
+            deserialized_vals = [sub._des(val) for val in vals]
+
+            # Empty db
+            assert sub.get(keys=key) == []
+            assert sub.cntAll(key) == 0
+            assert sub.rem(key) is False
+
+            # initial insertion
+            assert sub.put(keys=key, vals=vals) is True
+            assert sub.get(keys=key) == deserialized_vals
+
+            # duplicate insertion
+            assert sub.put(keys=key, vals=[b'd', b'k']) is True
+            assert sub.put(keys=key, vals=[b'd']) is False
+            assert sub.put(keys=key, vals=[b'k']) is False
+            assert sub.put(keys=key, vals=[b'k', b'd', b'k']) is False
+            assert sub.add(keys=key, val=b'd') is False
+            assert sub.add(keys=key, val=b'k') is False
+            assert sub.get(keys=key) == deserialized_vals + ['d', 'k']
+
+            # mixed insertion
+            assert sub.put(keys=key, vals=[b'k', b'c']) is True
+            assert sub.get(keys=key) == deserialized_vals + ['d', 'k', 'c']
+
+            # deletion + reinsertion
+            assert sub.rem(keys=key, val=b'd') is True
+            assert sub.get(keys=key) == deserialized_vals + ['k', 'c']
+            assert sub.add(keys=key, val=b'd') is True
+            assert sub.get(keys=key) == deserialized_vals + ['k', 'c', 'd']
+
+            # empty insertion
+            assert sub.put(keys=key, vals=[]) is False
+            assert sub.get(keys=key) == deserialized_vals + ['k', 'c', 'd']
+
+            # empty val allowed
+            assert sub.add(keys=key, val=b'') is True
+            assert sub.get(keys=key) == deserialized_vals + ['k', 'c', 'd', '']
+
+            # cleanup
+            assert sub.rem(key) is True
+            assert sub.get(keys=key) == []
+
+            # Key type normalization
+            assert sub.put(keys='B', vals=[b'1', b'2']) is True
+            assert sub.add(keys='B', val=b'3') is True
+            assert sub.put(['B'], vals=b'4') is True
+            assert sub.add(keys=['B'], val=b'5') is True
+            assert sub.put(("B"), vals=b'6') is True
+            assert sub.add(keys=("B"), val=b'7') is True
+            assert sub.put(memoryview(b'B'), vals=b'8') is True
+            assert sub.add(keys=memoryview(b'B'), val=b'9') is True
+
+            assert sub.get(keys=b'B') == ['1','2','3','4','5','6','7','8','9']
+
+            assert sub.rem(b'B') is True
+            assert sub.get(keys=b'B') == []
+
+            # Edge case: duplicate vals
+            assert sub.put(key, vals=[b'a', 'a']) is True
+            assert sub.get(keys=key) == ['a']   # Only added once
+            assert sub.rem(key) is True
+
+            # Retrieval behavior
+            assert sub.put(keys=pre, on=sn, vals=vals) is True
+            assert sub.get(keys=pre, on=sn) == deserialized_vals
+            assert list(sub.getAllIter(pre, on=sn)) == deserialized_vals
+            assert sub.getLast(keys=pre, on=sn) == deserialized_vals[-1]
+            assert sub.cntAll(pre, on=sn) == 4
+
+            # empty retrieval
+            assert sub.get(keys=b'X') == []
+            assert list(sub.getAllIter(b'X')) == []
+            assert sub.getLast(keys=b'X') is None
+            assert sub.cntAll(b'X') == 0
+            assert list(sub.getAllItemIter(keys=b'X')) == []
+
+            # getAllItemIter ordering
+            items = list(sub.getAllItemIter())
+            assert items == [(('A',), 0, 'z'),
+                            (('A',), 0, 'm'),
+                            (('A',), 0, 'x'),
+                            (('A',), 0, 'a')]
+
+            # Key type normalization (again)
+            assert sub.get(keys=b'A') == deserialized_vals
+            assert sub.get(keys='A') == deserialized_vals
+            assert sub.get(keys=['A']) == deserialized_vals
+            assert sub.get(keys=('A',)) == deserialized_vals
+            assert sub.get(keys=memoryview(b'A')) == deserialized_vals
+
+            # Deletion behavior
+            assert sub.getLast(keys=pre, on=sn) == 'a'
+            assert sub.rem(keys=pre, on=sn, val=b'a') is True
+            assert sub.get(keys=pre, on=sn) == ['z','m','x']
+            assert sub.getLast(keys=pre, on=sn) == 'x'
+            assert sub.cntAll(pre, on=sn) == 3
+
+            assert sub.rem(pre, on=sn) is True
+
+            # Pinning behavior
+            assert sub.get(keys=key) == []
+            assert sub.put(keys=key, vals=vals) is True
+            assert sub.get(keys=key) == deserialized_vals
+
+            assert sub.pin(keys=key, vals=[b'a', b'b', b'c']) is True
+            assert sub.get(keys=key) == ['a','b','c']
+
+            assert sub.pin(keys=key, vals=[b'x', b'y']) is True
+            assert sub.get(keys=key) == ['x','y']
+
+            assert sub.pin(keys=key, vals=[]) is False
+            assert sub.get(keys=key) == ['x','y']
+
+            assert sub.rem(key) is True
+
+            assert sub.put(keys=key, vals=[b'1', b'2']) is True
+            assert sub.pin(keys=key, vals=[b'Q']) is True
+            assert sub.get(keys=key) == ['Q']
+
+            assert sub.pin(keys=key, vals=[b'A','A',memoryview(b'A')]) is True
+            assert sub.get(keys=key) == ['A']
+
+            assert sub.rem(key) is True
+
+            # Multi-sn ordering tests
+            pre = b"A"
+            aKey = snKey(pre, 1)
+            bKey = snKey(pre, 2)
+            cKey = snKey(pre, 4)
+            dKey = snKey(pre, 7)
+
+            assert sub.put(keys=pre, on=1, vals=[b"z", b"m", b"x"])
+            assert sub.put(keys=pre, on=2, vals=[b"o", b"r", b"z"])
+            assert sub.put(keys=pre, on=4, vals=[b"h", b"n"])
+            assert sub.put(keys=pre, on=7, vals=[b"k", b"b"])
+
+            assert list(sub.getTopItemIter(keys=aKey)) == [(('A',),1,'z'),
+                                                        (('A',),1,'m'),
+                                                        (('A',),1,'x')]
+
+            # cleanup each sn
+            assert sub.remAll() is True
+            assert list(sub.getFullItemIter()) == []
+
+            # tokey / tokeys round-trip
+            t = sub._tokey(aKey)
+            assert sub._tokeys(t) == ("A", "00000000000000000000000000000001")
+
+
+        # Basic tests for IoSetSuber instances
+        ioSetSubers = [
+            baser.qnfs,
+            baser.misfits,
+            baser.delegables,
+            baser.epath,
+            baser.kramPTDS,
+        ]
+
+        for sub in ioSetSubers:
+            assert isinstance(sub, subing.IoSetSuber)
+
+            # Basic insertion behavior
+            key = b"A"
+            vals = [b"z", b"m", b"x", b"a"]
+
+            assert sub.get(keys=key) == []
+            assert sub.cnt(keys=key) == 0
+            assert sub.rem(keys=key) is False
+
+            # initial insertion
+            assert sub.put(keys=key, vals=vals) is True
+            assert sorted(sub.get(keys=key)) == sorted([v.decode() for v in vals])
+
+            # duplicate insertion
+            assert sub.put(keys=key, vals=[b'd', b'k']) is True
+            assert sub.put(keys=key, vals=[b'd']) is False
+            assert sub.put(keys=key, vals=[b'k']) is False
+            assert sub.add(keys=key, val=b'd') is False
+            assert sub.add(keys=key, val=b'k') is False
+
+            # mixed insertion
+            assert sub.put(keys=key, vals=[b'k', b'c']) is True
+            assert 'c' in sub.get(keys=key)
+
+            # deletion + reinsertion
+            assert sub.rem(keys=key, val=b'd') is True
+            assert 'd' not in sub.get(keys=key)
+            assert sub.add(keys=key, val=b'd') is True
+            assert 'd' in sub.get(keys=key)
+
+            # empty insertion
+            assert sub.put(keys=key, vals=[]) is False
+
+            # empty val allowed
+            assert sub.add(keys=key, val=b'') is True
+            assert '' in sub.get(keys=key)
+
+            # cleanup
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) == []
+
+            # Key type normalization
+            assert sub.put(keys='B', vals=[b'1', b'2']) is True
+            assert sub.add(keys='B', val=b'3') is True
+            assert sub.put(['B'], vals=b'4') is True
+            assert sub.add(keys=['B'], val=b'5') is True
+            assert sub.put(("B"), vals=b'6') is True
+            assert sub.add(keys=("B"), val=b'7') is True
+            assert sub.put(memoryview(b'B'), vals=b'8') is True
+            assert sub.add(keys=memoryview(b'B'), val=b'9') is True
+
+            assert sorted(sub.get(keys=b'B')) == ['1','2','3','4','5','6','7','8','9']
+
+            assert sub.rem(b'B') is True
+            assert sub.get(keys=b'B') == []
+
+            # Duplicate vals collapse
+            assert sub.put(key, vals=[b'a', b'a']) is True
+            assert sub.get(keys=key) == ['a']
+            assert sub.rem(key) is True
+
+            # Deletion behavior
+            assert sub.put(key, vals=vals) is True
+            assert sub.rem(key, val=b'm') is True
+            assert 'm' not in sub.get(keys=key)
+
+            assert sub.rem(key, val=b'y') is False
+
+            assert sub.rem(key) is True
+            assert sub.get(keys=key) == []
+            assert sub.cnt(key) == 0
+
+            assert sub.rem(b'X') is False
+
+            # key isolation
+            assert sub.put(b'A', vals=[b'1']) is True
+            assert sub.put(b'B', vals=[b'2']) is True
+            assert sub.rem(b'A') is True
+            assert sub.get(keys=b'B') == ['2']
+
+            # cleanup all
+            for fullKey, val in list(sub.getFullItemIter()):
+                userKey = fullKey[0]
+                assert sub.rem(keys=userKey) is True
+            assert list(sub.getFullItemIter()) == []
+
+            # Persistence across reopen
+            assert sub.put(keys=b'C', vals=[b'1', b'2']) is True
+            await baser.reopen(storageOpener=backend.open)
+            assert sub.get(keys=b'C') == ['1', '2']
+
+        # -------- CesrIoSetSuber Subdbs tests ---------
+
+        # Tests for CesrIoSetSuber where klas=Siger
+        sigerCesrIoSetSuber = [
+            "sigs",
+            "ssgs",
+            "wigs",
+            "esigs",
+            "kramPMKS"
+        ]
+
+        # Create valid test signatures
+        signer0 = signing.Signer(transferable=False, seed=b'0123456789abcdef0123456789abcdef')
+        signer1 = signing.Signer(transferable=False, seed=b'fedcba9876543210fedcba9876543210')
+
+        test_data = b"test witness signatures"
+        cigar0 = signer0.sign(ser=test_data)
+        cigar1 = signer1.sign(ser=test_data)
+
+        siger0 = indexing.Siger(raw=cigar0.raw, code=indexing.IdrDex.Ed25519_Sig, index=0)
+        siger1 = indexing.Siger(raw=cigar1.raw, code=indexing.IdrDex.Ed25519_Sig, index=1)
+
+        for name in sigerCesrIoSetSuber:
+            sub = getattr(baser, name)
+
+            # Setup
+            key = dgKey(preb, digb)
+            assert key == f'{preb.decode("utf-8")}.{digb.decode("utf-8")}'.encode("utf-8")
+
+            # Empty db
+            assert sub.get(keys=key) == []
+            assert sub.cnt(keys=key) == 0
+            assert sub.rem(keys=key) == False
+
+            # Basic insertion
+            assert sub.put(keys=key, vals=[siger0]) == True
+            assert [s.qb64b for s in sub.get(keys=key)] == [siger0.qb64b]
+            assert sub.cnt(keys=key) == 1
+
+            # idempotent put
+            assert sub.put(keys=key, vals=[siger0]) == False
+            assert [s.qb64b for s in sub.get(keys=key)] == [siger0.qb64b]
+            
+            # Add second signature
+            assert sub.add(keys=key, val=siger1) == True
+            assert [s.qb64b for s in sub.get(keys=key)] == [siger0.qb64b, siger1.qb64b]
+            assert [val.qb64b for val in sub.getIter(keys=key)] == [siger0.qb64b, siger1.qb64b]
+            
+            # Deletion
+            assert sub.rem(keys=key) == True
+            assert sub.get(keys=key) == []
+
+            # put with multiple vals and delete individually
+            assert sub.put(keys=key, vals=[siger0, siger1]) == True
+            for val in [siger0, siger1]:
+                assert sub.rem(keys=key, val=val) == True
+            assert sub.get(keys=key) == []
+
+            # put with multiple vals and delete via iteration
+            assert sub.put(keys=key, vals=[siger0, siger1]) == True
+            for val in sub.getIter(keys=key):
+                assert sub.rem(keys=key, val=val) == True
+            assert sub.get(keys=key) == []
+
+            # Put with multiple vals and check ordering 
+            assert sub.put(keys=key, vals=[siger0]) == True
+            assert [s.qb64b for s in sub.get(keys=key)] == [siger0.qb64b]
+            assert sub.put(keys=key, vals=[siger1]) == True
+            assert [s.qb64b for s in sub.get(keys=key)] == [siger0.qb64b, siger1.qb64b]
+            
+            # Delete
+            assert sub.rem(keys=key) == True
+            
+            # Check insertion order
+            assert sub.put(keys=key, vals=[siger1, siger0]) == True
+            assert [s.qb64b for s in sub.get(keys=key)] == [siger1.qb64b, siger0.qb64b]
+            assert sub.rem(keys=key) == True
+            assert sub.get(keys=key) == []
+            assert sub.put(keys=key, vals=[siger0, siger1]) == True
+
+            # more sigs tests
+
+            # Reset to empty 
+            assert sub.rem(keys=key) == True
+            assert sub.get(keys=key) == []
+
+            # Mixed insertion behavior
+            assert sub.put(keys=key, vals=[siger0]) is True
+            assert sub.add(keys=key, val=siger0) is False  # duplicate
+            assert sub.add(keys=key, val=siger1) is True
+            assert sub.put(keys=key, vals=[siger1]) is False  # duplicate
+            assert sub.cnt(keys=key) == 2
+
+            # Key normalization
+            altKeyStr = key.decode()
+            altKeyMv = memoryview(key)
+
+            assert sub.get(keys=altKeyStr) != sub.get(keys=key)
+            assert sub.get(keys=altKeyMv) != sub.get(keys=key)
+
+            # Type safety
+            with pytest.raises(AttributeError):
+                sub.add(keys=key, val=b"not a siger")
+
+            with pytest.raises(AttributeError):
+                sub.put(keys=key, vals=[b"nope"])
+
+            # Reset to empty
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) == []
+
+            # Non-Persistence across reopen
+            assert sub.put(keys=key, vals=[siger0, siger1]) is True
+            await baser.reopen(storageOpener=backend.open)
+            sub = getattr(baser, name)
+            assert sub.get(keys=key) == []
+
+            # getFullItemIter consistency
+            assert sub.put(keys=key, vals=[siger0, siger1]) is True
+            items = list(sub.getFullItemIter(keys=key))
+            assert len(items) == 2
+            assert [v.qb64b for k, v in items] == [siger0.qb64b, siger1.qb64b]
+
+            # Cleanup
+            for k, v in list(sub.getFullItemIter()):
+                # The key is stored as a (pre, dig) tuple
+                preStr = k[0]   # prefix of the user
+                digStr = k[1]   # digest of the event
+                k = f"{preStr}.{digStr}".encode("utf-8")
+                assert sub.rem(keys=k, val=v) is True
+
+            assert sub.get(keys=key) == []
+
+
+        # Tests for CesrIoSetSuber where klas=Prefixer
+        prefixerCesrIoSetSubers = [
+            "wits",
+            "maids",            
+        ]
+
+        # Create witness prefixes
+        witA = coring.Prefixer(qb64b=b'BADA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qrIZIicQg')
+        witB = coring.Prefixer(qb64b=b'BADAyl33W9ja_wLX85UrzRnL4KNzlsIKIA7CrD04nVX1w')
+
+        for name in prefixerCesrIoSetSubers:
+            sub = getattr(baser, name)
+
+            # Empty DB behavior
+            assert sub.get(keys=key) == []
+            assert sub.cnt(keys=key) == 0
+            assert sub.rem(keys=key) is False
+
+            # Basic insertion
+            assert sub.put(keys=key, vals=[witA]) is True
+            assert [w.qb64b for w in sub.get(keys=key)] == [witA.qb64b]
+            assert sub.cnt(keys=key) == 1
+
+            # idempotent put
+            assert sub.put(keys=key, vals=[witA]) is False
+            assert [w.qb64b for w in sub.get(keys=key)] == [witA.qb64b]
+
+            # add second witness
+            assert sub.add(keys=key, val=witB) is True
+            assert [w.qb64b for w in sub.get(keys=key)] == [witA.qb64b, witB.qb64b]
+
+            # iteration
+            assert [w.qb64b for w in sub.getIter(keys=key)] == [witA.qb64b, witB.qb64b]
+
+            # Deletion
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) == []
+
+            # delete individual values
+            assert sub.put(keys=key, vals=[witA, witB]) is True
+            for w in [witA, witB]:
+                assert sub.rem(keys=key, val=w) is True
+            assert sub.get(keys=key) == []
+
+            # delete via iteration
+            assert sub.put(keys=key, vals=[witA, witB]) is True
+            for w in sub.getIter(keys=key):
+                assert sub.rem(keys=key, val=w) is True
+            assert sub.get(keys=key) == []
+
+            # Ordering guarantees
+            assert sub.put(keys=key, vals=[witA]) is True
+            assert sub.put(keys=key, vals=[witB]) is True
+            assert [w.qb64b for w in sub.get(keys=key)] == [witA.qb64b, witB.qb64b]
+
+            assert sub.rem(keys=key) is True
+
+            # reversed insertion order
+            assert sub.put(keys=key, vals=[witB, witA]) is True
+            assert [w.qb64b for w in sub.get(keys=key)] == [witB.qb64b, witA.qb64b]
+
+            assert sub.rem(keys=key) is True
+
+            # Mixed insertion behavior
+            assert sub.put(keys=key, vals=[witA]) is True
+            assert sub.add(keys=key, val=witA) is False  # duplicate
+            assert sub.add(keys=key, val=witB) is True
+            assert sub.put(keys=key, vals=[witB]) is False  # duplicate
+            assert sub.cnt(keys=key) == 2
+
+            # Key normalization
+            alt_key_str = key.decode()
+            alt_key_mv = memoryview(key)
+
+            assert sub.get(keys=alt_key_str) != sub.get(keys=key)
+            assert sub.get(keys=alt_key_mv) != sub.get(keys=key)
+
+            # Type safety
+            with pytest.raises(AttributeError):
+                sub.add(keys=key, val=b"not a prefixer")
+
+            with pytest.raises(AttributeError):
+                sub.put(keys=key, vals=[b"nope"])
+
+            # Reset to empty
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) == []
+
+            # getFullItemIter consistency
+            assert sub.put(keys=key, vals=[witA, witB]) is True
+            items = list(sub.getFullItemIter(keys=key))
+
+            assert len(items) == 2
+            assert all(isinstance(internal_key, tuple) for internal_key, _ in items)
+            assert all(isinstance(w, coring.Prefixer) for _, w in items)
+            assert [w.qb64b for _, w in items] == [witA.qb64b, witB.qb64b]
+
+            # Cleanup using _tokey (correct)
+            for k, w in list(sub.getFullItemIter()):
+                preStr = k[0]   # prefix of the user
+                digStr = k[1]   # digest of the event
+                k = f"{preStr}.{digStr}".encode("utf-8")
+                assert sub.rem(keys=k, val=w) is True
+
+            assert sub.get(keys=key) == []
+
+            # Persistence across reopen
+            assert sub.put(keys=key, vals=[witA, witB]) is True
+            await baser.reopen(storageOpener=backend.open)
+            sub = getattr(baser, name)
+
+            # WebBaser clears CesrIoSetSuber on reopen
+            assert sub.get(keys=key) == []
+            assert sub.cnt(keys=key) == 0
+
+
+        # Tests for CesrIoSetSuber where klas=Diger
+        digerCesrIoSetSubers = [
+            "rpes",
+            "chas",
+            "reps",
+            "meids", 
+        ]
+
+        # Setup
+        # Create two valid Diger values
+        raw0 = b"abcdef0123456789abcdef0123456789"
+        raw1 = b"0123456789abcdef0123456789abcdef"
+
+        diger0 = coring.Diger(raw=raw0, code=coring.MtrDex.Blake3_256)
+        diger1 = coring.Diger(raw=raw1, code=coring.MtrDex.Blake3_256)
+        
+        for name in digerCesrIoSetSubers:
+            sub = getattr(baser, name)
+
+            # Empty DB behavior
+            assert sub.get(keys=key) == []
+            assert sub.cnt(keys=key) == 0
+            assert sub.rem(keys=key) is False
+
+            # Basic insertion
+            assert sub.put(keys=key, vals=[diger0]) is True
+            assert [d.qb64b for d in sub.get(keys=key)] == [diger0.qb64b]
+            assert sub.cnt(keys=key) == 1
+
+            # idempotent put
+            assert sub.put(keys=key, vals=[diger0]) is False
+            assert [d.qb64b for d in sub.get(keys=key)] == [diger0.qb64b]
+
+            # add second diger
+            assert sub.add(keys=key, val=diger1) is True
+            assert [d.qb64b for d in sub.get(keys=key)] == [diger0.qb64b, diger1.qb64b]
+
+            # iteration
+            assert [d.qb64b for d in sub.getIter(keys=key)] == [diger0.qb64b, diger1.qb64b]
+
+            # Deletion
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) == []
+
+            # delete individually
+            assert sub.put(keys=key, vals=[diger0, diger1]) is True
+            for d in [diger0, diger1]:
+                assert sub.rem(keys=key, val=d) is True
+            assert sub.get(keys=key) == []
+
+            # delete via iteration
+            assert sub.put(keys=key, vals=[diger0, diger1]) is True
+            for d in sub.getIter(keys=key):
+                assert sub.rem(keys=key, val=d) is True
+            assert sub.get(keys=key) == []
+
+            # Ordering guarantees
+            assert sub.put(keys=key, vals=[diger0]) is True
+            assert sub.put(keys=key, vals=[diger1]) is True
+            assert [d.qb64b for d in sub.get(keys=key)] == [diger0.qb64b, diger1.qb64b]
+
+            assert sub.rem(keys=key) is True
+
+            # reversed insertion order
+            assert sub.put(keys=key, vals=[diger1, diger0]) is True
+            assert [d.qb64b for d in sub.get(keys=key)] == [diger1.qb64b, diger0.qb64b]
+
+            assert sub.rem(keys=key) is True
+
+            # Mixed insertion behavior
+            assert sub.put(keys=key, vals=[diger0]) is True
+            assert sub.add(keys=key, val=diger0) is False  # duplicate
+            assert sub.add(keys=key, val=diger1) is True
+            assert sub.put(keys=key, vals=[diger1]) is False  # duplicate
+            assert sub.cnt(keys=key) == 2
+
+            # Key normalization
+            alt_key_str = key.decode()
+            alt_key_mv = memoryview(key)
+
+            assert sub.get(keys=alt_key_str) != sub.get(keys=key)
+            assert sub.get(keys=alt_key_mv) != sub.get(keys=key)
+
+            # Type safety
+            with pytest.raises(AttributeError):
+                sub.add(keys=key, val=b"not a diger")
+
+            with pytest.raises(AttributeError):
+                sub.put(keys=key, vals=[b"nope"])
+
+            # Reset to empty
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) == []
+            
+            # getFullItemIter consistency
+            assert sub.put(keys=key, vals=[diger0, diger1]) is True
+            items = list(sub.getFullItemIter(keys=key))
+
+            assert len(items) == 2
+            assert all(isinstance(internal_key, tuple) for internal_key, _ in items)
+            assert all(isinstance(d, coring.Diger) for _, d in items)
+            assert [d.qb64b for _, d in items] == [diger0.qb64b, diger1.qb64b]
+
+            # Cleanup (correct composite key reconstruction)
+            for internal_key, d in list(sub.getFullItemIter()):
+                preStr = internal_key[0]
+                digStr = internal_key[1]
+                composite_key = f"{preStr}.{digStr}".encode()
+                assert sub.rem(keys=composite_key, val=d) is True
+
+            assert sub.get(keys=key) == []
+
+            # Persistence across reopen
+            assert sub.put(keys=key, vals=[diger0, diger1]) is True
+            await baser.reopen(storageOpener=backend.open)
+            sub = getattr(baser, name)
+
+            # WebBaser clears CesrIoSetSuber on reopen
+            assert sub.get(keys=key) == []
+            assert sub.cnt(keys=key) == 0
+
+
+        # Test .essrs (CesrIoSetSuber of Texter)
+
+        # Setup
+        pre = b'BAKY1sKmgyjAiUDdUBPNPyrSz_ad_Qf9yzhDNZlEKiMc'
+        dig = b'EA73b7reENuBahMJsMTLbeyyNPsfTRzKRWtJ3ytmInvw'
+
+        key = dgKey(pre, dig)
+        assert key == f"{pre.decode()}.{dig.decode()}".encode()
+
+        # Create two valid Texter values
+        texter0 = coring.Texter(text="hello world")
+        texter1 = coring.Texter(text="another value")
+
+        # Empty DB behavior
+        assert baser.essrs.get(keys=key) == []
+        assert baser.essrs.cnt(keys=key) == 0
+        assert baser.essrs.rem(keys=key) is False
+
+        # Basic insertion
+        assert baser.essrs.put(keys=key, vals=[texter0]) is True
+        assert [t.qb64b for t in baser.essrs.get(keys=key)] == [texter0.qb64b]
+        assert baser.essrs.cnt(keys=key) == 1
+
+        # idempotent put
+        assert baser.essrs.put(keys=key, vals=[texter0]) is False
+        assert [t.qb64b for t in baser.essrs.get(keys=key)] == [texter0.qb64b]
+
+        # add second texter
+        assert baser.essrs.add(keys=key, val=texter1) is True
+        assert [t.qb64b for t in baser.essrs.get(keys=key)] == [texter0.qb64b, texter1.qb64b]
+
+        # iteration
+        assert [t.qb64b for t in baser.essrs.getIter(keys=key)] == [texter0.qb64b, texter1.qb64b]
+
+        # Deletion
+        assert baser.essrs.rem(keys=key) is True
+        assert baser.essrs.get(keys=key) == []
+
+        # delete individually
+        assert baser.essrs.put(keys=key, vals=[texter0, texter1]) is True
+        for t in [texter0, texter1]:
+            assert baser.essrs.rem(keys=key, val=t) is True
+        assert baser.essrs.get(keys=key) == []
+
+        # delete via iteration
+        assert baser.essrs.put(keys=key, vals=[texter0, texter1]) is True
+        for t in baser.essrs.getIter(keys=key):
+            assert baser.essrs.rem(keys=key, val=t) is True
+        assert baser.essrs.get(keys=key) == []
+
+        # Ordering guarantees
+        assert baser.essrs.put(keys=key, vals=[texter0]) is True
+        assert baser.essrs.put(keys=key, vals=[texter1]) is True
+        assert [t.qb64b for t in baser.essrs.get(keys=key)] == [texter0.qb64b, texter1.qb64b]
+
+        assert baser.essrs.rem(keys=key) is True
+
+        # reversed insertion order
+        assert baser.essrs.put(keys=key, vals=[texter1, texter0]) is True
+        assert [t.qb64b for t in baser.essrs.get(keys=key)] == [texter1.qb64b, texter0.qb64b]
+
+        assert baser.essrs.rem(keys=key) is True
+
+        # Mixed insertion behavior
+        assert baser.essrs.put(keys=key, vals=[texter0]) is True
+        assert baser.essrs.add(keys=key, val=texter0) is False  # duplicate
+        assert baser.essrs.add(keys=key, val=texter1) is True
+        assert baser.essrs.put(keys=key, vals=[texter1]) is False  # duplicate
+        assert baser.essrs.cnt(keys=key) == 2
+
+        # Key normalization
+        alt_key_str = key.decode()
+        alt_key_mv = memoryview(key)
+
+        assert baser.essrs.get(keys=alt_key_str) != baser.essrs.get(keys=key)
+        assert baser.essrs.get(keys=alt_key_mv) != baser.essrs.get(keys=key)
+
+        # Type safety
+        with pytest.raises(AttributeError):
+            baser.essrs.add(keys=key, val=b"not a texter")
+
+        with pytest.raises(AttributeError):
+            baser.essrs.put(keys=key, vals=[b"nope"])
+
+        # Reset to empty
+        assert baser.essrs.rem(keys=key) is True
+        assert baser.essrs.get(keys=key) == []
+
+        # getFullItemIter consistency
+        assert baser.essrs.put(keys=key, vals=[texter0, texter1]) is True
+        items = list(baser.essrs.getFullItemIter(keys=key))
+
+        assert len(items) == 2
+        assert all(isinstance(internal_key, tuple) for internal_key, _ in items)
+        assert all(isinstance(t, coring.Texter) for _, t in items)
+        assert [t.qb64b for _, t in items] == [texter0.qb64b, texter1.qb64b]
+
+        # Cleanup (correct composite key reconstruction)
+        for internal_key, t in list(baser.essrs.getFullItemIter()):
+            pre_str = internal_key[0]
+            dig_str = internal_key[1]
+            composite_key = f"{pre_str}.{dig_str}".encode()
+            assert baser.essrs.rem(keys=composite_key, val=t) is True
+
+        assert baser.essrs.get(keys=key) == []
+
+        # Persistence across reopen 
+        assert baser.essrs.put(keys=key, vals=[texter0, texter1]) is True
+        await baser.reopen(storageOpener=backend.open)
+
+        assert baser.essrs.get(keys=key) == []
+        assert baser.essrs.cnt(keys=key) == 0
+
+
+        # -------- CesrSuber Subdbs tests ---------
+        
+        # Tests for CesrSuber where klas=Dater
+        daterCesrSubers = [
+            "dtss",
+            "migs",
+            "sdts",
+            "epsd", 
+            "kdts", 
+        ]
+
+        # Two Dater values
+        dater0 = coring.Dater(dts="2020-08-22T17:50:09.988921+00:00")
+        dater1 = coring.Dater(dts="2020-08-22T17:50:10.000000+00:00")
+
+        for name in daterCesrSubers:
+            sub = getattr(baser, name)
+
+            # Empty DB behavior
+            assert sub.get(keys=key) == None
+            assert sub.cnt() == 0
+            assert sub.rem(keys=key) == False
+
+            # Basic insertion
+            assert sub.put(keys=key, val=dater0) == True
+            assert sub.get(keys=key).dts == dater0.dts
+
+            # idempotent put
+            assert sub.put(keys=key, val=dater0) == False
+            assert sub.get(keys=key).dts == dater0.dts
+
+            # pin overwrites
+            assert sub.pin(keys=key, val=dater1) == True
+            assert sub.get(keys=key).dts == dater1.dts
+
+            # Deletion
+            assert sub.rem(keys=key) == True
+            assert sub.get(keys=key) == None
+
+            # delete individually (CesrSuber stores only one value)
+            assert sub.put(keys=key, val=dater0) is True
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # delete via iteration (using getFullItemIter)
+            assert sub.put(keys=key, val=dater0) is True
+            for k, d in sub.getFullItemIter(keys=key):
+                assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # Ordering guarantees (CesrSuber stores only one value, so overwrite)
+            assert sub.put(keys=key, val=dater0) is True
+            assert sub.pin(keys=key, val=dater1) is True
+            assert sub.get(keys=key).dts == dater1.dts
+
+            assert sub.rem(keys=key) is True
+
+            # reversed insertion order (still only one value)
+            assert sub.put(keys=key, val=dater1) is True
+            assert sub.pin(keys=key, val=dater0) is True
+            assert sub.get(keys=key).dts == dater0.dts
+
+            assert sub.rem(keys=key) is True
+
+            # Mixed insertion behavior
+            assert sub.put(keys=key, val=dater0) is True
+            assert sub.put(keys=key, val=dater0) is False  # idempotent
+            assert sub.pin(keys=key, val=dater1) is True   # overwrite
+            assert sub.get(keys=key).dts == dater1.dts
+
+            # Key normalization
+            alt_key_str = key.decode()
+            alt_key_mv = memoryview(key)
+
+            assert sub.get(keys=alt_key_str).dts == sub.get(keys=key).dts
+            assert sub.get(keys=alt_key_mv).dts == sub.get(keys=key).dts
+
+            # Type safety
+            with pytest.raises(AttributeError):
+                sub.put(keys=key, val=b"not a dater")
+
+            with pytest.raises(AttributeError):
+                sub.pin(keys=key, val=b"nope")
+
+            # Reset to empty
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # getFullItemIter consistency
+            assert sub.put(keys=key, val=dater0) is True
+            items = list(sub.getFullItemIter(keys=key))
+
+            assert len(items) == 1
+            internal_key, d = items[0]
+            assert isinstance(internal_key, tuple)
+            assert isinstance(d, coring.Dater)
+            assert d.qb64b == dater0.qb64b
+
+            # Cleanup (correct composite key reconstruction)
+            for internal_key, d in list(sub.getFullItemIter()):
+                preStr = internal_key[0]
+                digStr = internal_key[1]
+                composite_key = f"{preStr}.{digStr}".encode()
+                assert sub.rem(keys=composite_key) is True
+
+            assert sub.get(keys=key) is None
+
+            # Persistence across reopen
+            assert sub.put(keys=key, val=dater0) is True
+            await baser.reopen(storageOpener=backend.open)
+            sub = getattr(baser, name)
+
+            # WebBaser clears CesrSuber on reopen
+            assert sub.get(keys=key) is None
+
+
+        # Test for CesrSuber where klas=Diger
+        digerCesrSubers = [
+            "eans",
+            "lans",
+            "cgms",
+            "knas",
+            "wwas",
+        ]
+
+        # Two Diger values
+        raw0 = b"abcdef0123456789abcdef0123456789"
+        raw1 = b"0123456789abcdef0123456789abcdef"
+
+        diger0 = coring.Diger(raw=raw0, code=coring.MtrDex.Blake3_256)
+        diger1 = coring.Diger(raw=raw1, code=coring.MtrDex.Blake3_256)
+
+        for name in digerCesrSubers:
+            sub = getattr(baser, name)
+            # Empty DB behavior
+            assert sub.get(keys=key) is None
+            assert sub.cnt() == 0
+            assert sub.rem(keys=key) is False
+
+            # Basic insertion
+            assert sub.put(keys=key, val=diger0) is True
+            assert sub.get(keys=key).qb64b == diger0.qb64b
+
+            # idempotent put
+            assert sub.put(keys=key, val=diger0) is False
+            assert sub.get(keys=key).qb64b == diger0.qb64b
+
+            # pin overwrites
+            assert sub.pin(keys=key, val=diger1) is True
+            assert sub.get(keys=key).qb64b == diger1.qb64b
+
+            # Deletion
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # delete individually (CesrSuber stores only one value)
+            assert sub.put(keys=key, val=diger0) is True
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # delete via iteration (using getFullItemIter)
+            assert sub.put(keys=key, val=diger0) is True
+            for k, d in sub.getFullItemIter(keys=key):
+                assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # Ordering guarantees (CesrSuber stores only one value, so overwrite)
+            assert sub.put(keys=key, val=diger0) is True
+            assert sub.pin(keys=key, val=diger1) is True
+            assert sub.get(keys=key).qb64b == diger1.qb64b
+
+            assert sub.rem(keys=key) is True
+
+            # reversed insertion order (still only one value)
+            assert sub.put(keys=key, val=diger1) is True
+            assert sub.pin(keys=key, val=diger0) is True
+            assert sub.get(keys=key).qb64b == diger0.qb64b
+
+            assert sub.rem(keys=key) is True
+
+            # Mixed insertion behavior
+            assert sub.put(keys=key, val=diger0) is True
+            assert sub.put(keys=key, val=diger0) is False  # idempotent
+            assert sub.pin(keys=key, val=diger1) is True   # overwrite
+            assert sub.get(keys=key).qb64b == diger1.qb64b
+
+            # Key normalization
+            alt_key_str = key.decode()
+            alt_key_mv = memoryview(key)
+
+            assert sub.get(keys=alt_key_str).qb64b == sub.get(keys=key).qb64b
+            assert sub.get(keys=alt_key_mv).qb64b == sub.get(keys=key).qb64b
+
+            # Type safety
+            with pytest.raises(AttributeError):
+                sub.put(keys=key, val=b"not a diger")
+
+            with pytest.raises(AttributeError):
+                sub.pin(keys=key, val=b"nope")
+
+            # Reset to empty
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # getFullItemIter consistency
+            assert sub.put(keys=key, val=diger0) is True
+            items = list(sub.getFullItemIter(keys=key))
+
+            assert len(items) == 1
+            internal_key, d = items[0]
+            assert isinstance(internal_key, tuple)
+            assert isinstance(d, coring.Diger)
+            assert d.qb64b == diger0.qb64b
+
+            # Cleanup (correct composite key reconstruction)
+            for internal_key, d in list(sub.getFullItemIter()):
+                preStr = internal_key[0]
+                digStr = internal_key[1]
+                composite_key = f"{preStr}.{digStr}".encode()
+                assert sub.rem(keys=composite_key) is True
+
+            assert sub.get(keys=key) is None
+
+            # Persistence across reopen
+            assert sub.put(keys=key, val=diger0) is True
+            await baser.reopen(storageOpener=backend.open)
+            sub = getattr(baser, name)
+
+            # WebBaser clears CesrSuber on reopen
+            assert sub.get(keys=key) is None
+
+        # Tests for .erpy subdb 
+
+        # Using the diger values from above
+        sdig0 = diger0.qb64b
+        sdig1 = diger1.qb64b
+
+        saider0 = Saider(qb64=sdig0)
+        saider1 = Saider(qb64=sdig1)
+
+        # Empty DB behavior
+        assert baser.erpy.get(keys=key) is None
+        assert baser.erpy.cnt() == 0
+        assert baser.erpy.rem(keys=key) is False
+
+        # Basic insertion
+        assert baser.erpy.put(keys=key, val=saider0) is True
+        assert baser.erpy.get(keys=key).qb64b == saider0.qb64b
+
+        # idempotent put
+        assert baser.erpy.put(keys=key, val=saider0) is False
+        assert baser.erpy.get(keys=key).qb64b == saider0.qb64b
+
+        # pin overwrites
+        assert baser.erpy.pin(keys=key, val=saider1) is True
+        assert baser.erpy.get(keys=key).qb64b == saider1.qb64b
+
+        # Deletion
+        assert baser.erpy.rem(keys=key) is True
+        assert baser.erpy.get(keys=key) is None
+
+        # delete individually (Cesrbaser.erpyer stores only one value)
+        assert baser.erpy.put(keys=key, val=saider0) is True
+        assert baser.erpy.rem(keys=key) is True
+        assert baser.erpy.get(keys=key) is None
+
+        # delete via iteration (using getFullItemIter)
+        assert baser.erpy.put(keys=key, val=saider0) is True
+        for k, d in baser.erpy.getFullItemIter(keys=key):
+            assert baser.erpy.rem(keys=key) is True
+        assert baser.erpy.get(keys=key) is None
+
+        # Ordering guarantees (Cesrbaser.erpyer stores only one value)
+        assert baser.erpy.put(keys=key, val=saider0) is True
+        assert baser.erpy.pin(keys=key, val=saider1) is True
+        assert baser.erpy.get(keys=key).qb64b == saider1.qb64b
+
+        assert baser.erpy.rem(keys=key) is True
+
+        # reversed insertion order (still only one value)
+        assert baser.erpy.put(keys=key, val=saider1) is True
+        assert baser.erpy.pin(keys=key, val=saider0) is True
+        assert baser.erpy.get(keys=key).qb64b == saider0.qb64b
+
+        assert baser.erpy.rem(keys=key) is True
+
+        # Mixed insertion behavior
+        assert baser.erpy.put(keys=key, val=saider0) is True
+        assert baser.erpy.put(keys=key, val=saider0) is False  # idempotent
+        assert baser.erpy.pin(keys=key, val=saider1) is True   # overwrite
+        assert baser.erpy.get(keys=key).qb64b == saider1.qb64b
+
+        # Key normalization
+        alt_key_str = key.decode()
+        alt_key_mv = memoryview(key)
+
+        assert baser.erpy.get(keys=alt_key_str).qb64b == baser.erpy.get(keys=key).qb64b
+        assert baser.erpy.get(keys=alt_key_mv).qb64b == baser.erpy.get(keys=key).qb64b
+
+        # Type safety
+        with pytest.raises(AttributeError):
+            baser.erpy.put(keys=key, val=b"not a saider")
+
+        with pytest.raises(AttributeError):
+            baser.erpy.pin(keys=key, val=b"nope")
+
+        # Reset to empty
+        assert baser.erpy.rem(keys=key) is True
+        assert baser.erpy.get(keys=key) is None
+
+        # getFullItemIter consistency
+        assert baser.erpy.put(keys=key, val=saider0) is True
+        items = list(baser.erpy.getFullItemIter(keys=key))
+
+        assert len(items) == 1
+        internal_key, d = items[0]
+        assert isinstance(internal_key, tuple)
+        assert isinstance(d, coring.Saider)
+        assert d.qb64b == saider0.qb64b
+
+        # Cleanup (correct composite key reconstruction)
+        for internal_key, d in list(baser.erpy.getFullItemIter()):
+            preStr = internal_key[0]
+            digStr = internal_key[1]
+            composite_key = f"{preStr}.{digStr}".encode()
+            assert baser.erpy.rem(keys=composite_key) is True
+
+        assert baser.erpy.get(keys=key) is None
+
+        # Persistence across reopen
+        assert baser.erpy.put(keys=key, val=saider0) is True
+        await baser.reopen(storageOpener=backend.open)
+        baser.erpy = getattr(baser, name)
+
+        # WebBaser clears Cesrbaser.erpyer on reopen
+        assert baser.erpy.get(keys=key) is None
+
+
+        # Test for CesrSuber where klas=Cigar
+        cigarCesrSubers = [
+            "ccigs",
+            "icigs",
+        ]
+
+        # Use the cigar values from Tests for CesrIoSetSuber where klas=Siger
+        
+        for name in cigarCesrSubers:
+
+            sub = getattr(baser, name)
+
+            # Empty DB behavior
+            assert sub.get(keys=key) is None
+            assert sub.cnt() == 0
+            assert sub.rem(keys=key) is False
+
+            # Basic insertion
+            assert sub.put(keys=key, val=cigar0) is True
+            assert sub.get(keys=key).qb64b == cigar0.qb64b
+
+            # idempotent put
+            assert sub.put(keys=key, val=cigar0) is False
+            assert sub.get(keys=key).qb64b == cigar0.qb64b
+
+            # pin overwrites
+            assert sub.pin(keys=key, val=cigar1) is True
+            assert sub.get(keys=key).qb64b == cigar1.qb64b
+
+            # Deletion
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # delete individually (CesrSuber stores only one value)
+            assert sub.put(keys=key, val=cigar0) is True
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # delete via iteration (using getFullItemIter)
+            assert sub.put(keys=key, val=cigar0) is True
+            for k, d in sub.getFullItemIter(keys=key):
+                assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # Ordering guarantees (CesrSuber stores only one value)
+            assert sub.put(keys=key, val=cigar0) is True
+            assert sub.pin(keys=key, val=cigar1) is True
+            assert sub.get(keys=key).qb64b == cigar1.qb64b
+
+            assert sub.rem(keys=key) is True
+
+            # reversed insertion order (still only one value)
+            assert sub.put(keys=key, val=cigar1) is True
+            assert sub.pin(keys=key, val=cigar0) is True
+            assert sub.get(keys=key).qb64b == cigar0.qb64b
+
+            assert sub.rem(keys=key) is True
+
+            # Mixed insertion behavior
+            assert sub.put(keys=key, val=cigar0) is True
+            assert sub.put(keys=key, val=cigar0) is False  # idempotent
+            assert sub.pin(keys=key, val=cigar1) is True   # overwrite
+            assert sub.get(keys=key).qb64b == cigar1.qb64b
+
+            # Key normalization
+            alt_key_str = key.decode()
+            alt_key_mv = memoryview(key)
+
+            assert sub.get(keys=alt_key_str).qb64b == sub.get(keys=key).qb64b
+            assert sub.get(keys=alt_key_mv).qb64b == sub.get(keys=key).qb64b
+
+            # Type safety
+            with pytest.raises(AttributeError):
+                sub.put(keys=key, val=b"not a cigar")
+
+            with pytest.raises(AttributeError):
+                sub.pin(keys=key, val=b"nope")
+
+            # Reset to empty
+            assert sub.rem(keys=key) is True
+            assert sub.get(keys=key) is None
+
+            # getFullItemIter consistency
+            assert sub.put(keys=key, val=cigar0) is True
+            items = list(sub.getFullItemIter(keys=key))
+
+            assert len(items) == 1
+            internal_key, d = items[0]
+            assert isinstance(internal_key, tuple)
+            assert isinstance(d, coring.Cigar)
+            assert d.qb64b == cigar0.qb64b
+
+            # Cleanup (correct composite key reconstruction)
+            for internal_key, d in list(sub.getFullItemIter()):
+                preStr = internal_key[0]
+                digStr = internal_key[1]
+                composite_key = f"{preStr}.{digStr}".encode()
+                assert sub.rem(keys=composite_key) is True
+
+            assert sub.get(keys=key) is None
+
+            # Persistence across reopen
+            assert sub.put(keys=key, val=cigar0) is True
+            await baser.reopen(storageOpener=backend.open)
+            sub = getattr(baser, name)
+
+            # WebBaser clears CesrSuber on reopen
+            assert sub.get(keys=key) is None
+
+
+        # ---- EventSourceRecord tests ----
+
+        record = EventSourceRecord()
+
+        assert baser.esrs.get(key) is None
+        assert baser.esrs.put(key, record) is True
+
+        actual = baser.esrs.get(key)
+        assert actual == record
+
+        # modify record, ensure put does not overwrite
+        record.local = False
+        assert baser.esrs.put(key, record) is False
+
+        actual = baser.esrs.get(key)
+        assert actual.local != record.local
+        assert actual != record
+
+        # pin overwrites
+        assert baser.esrs.pin(key, record) is True
+        actual = baser.esrs.get(key)
+        assert actual.local == record.local
+        assert actual == record
+
+        # Multiple independent keys do not collide
+        key2 = b"other"
+        rec2 = EventSourceRecord(local=True)
+        assert baser.esrs.put(key2, rec2) is True
+        assert baser.esrs.get(key2) == rec2
+        assert baser.esrs.get(key) != rec2
+
+        # pin() is idempotent
+        assert baser.esrs.pin(key2, rec2) is True
+        assert baser.esrs.pin(key2, rec2) is True  # no change, but still True
+        assert baser.esrs.get(key2) == rec2
+
+        # put() with a new instance but same content does NOT overwrite
+        rec2Clone = EventSourceRecord(local=True)
+        assert baser.esrs.put(key2, rec2Clone) is False
+        assert baser.esrs.get(key2) == rec2  # original remains
+
+        # delete() removes record
+        assert baser.esrs.rem(key2) is True
+        assert baser.esrs.get(key2) is None
+
+        # delete() on missing key returns False
+        assert baser.esrs.rem(key2) is False
+
+        # reopen
+        await baser.reopen(storageOpener=backend.open)
+        restored = baser.esrs.get(key)
+        assert restored == None
+
+
+        # test first seen event log .fels sub db
+        preA = b'BAKY1sKmgyjAiUDdUBPNPyrSz_ad_Qf9yzhDNZlEKiMc'
+        preB = b'EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYyB-6n4WDi7w'
+        preC = b'EIDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qrIZIicQg'
+
+        digA = b'EA73b7reENuBahMJsMTLbeyyNPsfTRzKRWtJ3ytmInvw'
+        digU = b'EB73b7reENuBahMJsMTLbeyyNPsfTRzKRWtJ3ytmInvw'
+        digV = b'EC4vCeJswIBJlO3RqE-wsE72Vt3wAceJ_LzqKvbDtBSY'
+        digW = b'EDAyl33W9ja_wLX85UrzRnL4KNzlsIKIA7CrD04nVX1w'
+        digX = b'EEnwxEm5Bg5s5aTLsgQCNpubIYzwlvMwZIzdOM0Z3u7o'
+        digY = b'EFrq74_Q11S2vHx1gpK_46Ik5Q7Yy9K1zZ5BavqGDKnk'
+
+        digC = b'EG5RimdY_OWoreR-Z-Q5G81-I4tjASJCaP_MqkBbtM2w'
+
+        # Empty db
+        assert baser.fels.get(keys=preA, on=0) is None
+        assert baser.fels.rem(keys=preA, on=0) == False
+        assert baser.fels.put(keys=preA, on=0, val=digA) == True
+        assert baser.fels.get(keys=preA, on=0) == digA.decode("utf-8")
+        assert baser.fels.put(keys=preA, on=0, val=digA) == False
+        assert baser.fels.pin(keys=preA, on=0, val=digA) == True
+        assert baser.fels.get(keys=preA, on=0) == digA.decode("utf-8")
+        assert baser.fels.rem(keys=preA, on=0) == True
+        assert baser.fels.get(keys=preA, on=0) is None
+
+        # test appendOn
+        # empty database
+        assert baser.fels.get(keys=preB, on=0) is None
+        on = baser.fels.append(keys=preB, val=digU)
+        assert on == 0
+        assert baser.fels.get(keys=preB, on=0) == digU.decode("utf-8")
+        assert baser.fels.rem(keys=preB, on=0) == True
+        assert baser.fels.get(keys=preB, on=0) is None
+
+        # earlier pre in database only
+        assert baser.fels.put(keys=preA, on=0, val=digA) == True
+        on = baser.fels.append(keys=preB, val=digU)
+        assert on == 0
+        assert baser.fels.get(keys=preB, on=0) == digU.decode("utf-8")
+        assert baser.fels.rem(keys=preB, on=0) == True
+        assert baser.fels.get(keys=preB, on=0) is None
+
+        # earlier and later pre in baser but not same pre
+        assert baser.fels.get(keys=preA, on=0) == digA.decode("utf-8")
+        assert baser.fels.put(keys=preC, on=0, val=digC) == True
+        on = baser.fels.append(keys=preB, val=digU)
+        assert on == 0
+        assert baser.fels.get(keys=preB, on=0) == digU.decode("utf-8")
+        assert baser.fels.rem(keys=preB, on=0) == True
+        assert baser.fels.get(keys=preB, on=0) is None
+
+        # later pre only
+        assert baser.fels.rem(keys=preA, on=0) == True
+        assert baser.fels.get(keys=preA, on=0) is None
+        assert baser.fels.get(keys=preC, on=0) == digC.decode("utf-8")
+        on = baser.fels.append(keys=preB, val=digU)
+        assert on == 0
+        assert baser.fels.get(keys=preB, on=0) == digU.decode("utf-8")
+
+        # earlier pre and later pre and earlier entry for same pre
+        assert baser.fels.put(keys=preA, on=0, val=digA) == True
+        on = baser.fels.append(keys=preB, val=digV)
+        assert on == 1
+        assert baser.fels.get(keys=preB, on=1) == digV.decode("utf-8")
+
+        # earlier entry for same pre but only same pre
+        assert baser.fels.rem(keys=preA, on=0) == True
+        assert baser.fels.get(keys=preA, on=0) is None
+        assert baser.fels.rem(keys=preC, on=0) == True
+        assert baser.fels.get(keys=preC, on=0) is None
+        # another value for preB
+        on = baser.fels.append(keys=preB, val=digW)
+        assert on == 2
+        assert baser.fels.get(keys=preB, on=2) == digW.decode("utf-8")
+        # yet another value for preB
+        on = baser.fels.append(keys=preB, val=digX)
+        assert on == 3
+        assert baser.fels.get(keys=preB, on=3) == digX.decode("utf-8")
+        # yet another value for preB
+        on = baser.fels.append(keys=preB, val=digY)
+        assert on == 4
+        assert baser.fels.get(keys=preB, on=4) == digY.decode("utf-8")
+
+         # replay preB events in database
+        _pre = lambda k: k[0].encode() if isinstance(k[0], str) else k[0]
+        items = [(_pre(keys), on, val) for keys, on, val in baser.fels.getAllItemIter(keys=preB)]
+        assert items == [(preB, 0, digU.decode("utf-8")), (preB, 1, digV.decode("utf-8")), (preB, 2, digW.decode("utf-8")), (preB, 3, digX.decode("utf-8")), (preB, 4, digY.decode("utf-8"))]
+
+        # resume replay preB events at on = 3
+        items = [(_pre(keys), on, val) for keys, on, val in baser.fels.getAllItemIter(keys=preB, on=3)]
+        assert items == [(preB, 3, digX.decode("utf-8")), (preB, 4, digY.decode("utf-8"))]
+
+        # resume replay preB events at on = 5
+        items = [(_pre(keys), on, val) for keys, on, val in baser.fels.getAllItemIter(keys=preB, on=5)]
+        assert items == []
+
+        # replay all events in database with pre events before and after
+        assert baser.fels.put(keys=preA, on=0, val=digA) == True
+        assert baser.fels.put(keys=preC, on=0, val=digC) == True
+
+        # replay all pres in first-seen order (keys=b'', on=0)
+        items = [(_pre(keys), on, val) for keys, on, val in baser.fels.getAllItemIter(keys=b'', on=0)]
+        assert items == [
+            (preA, 0, digA.decode("utf-8")),
+            (preB, 0, digU.decode("utf-8")),
+            (preB, 1, digV.decode("utf-8")),
+            (preB, 2, digW.decode("utf-8")),
+            (preB, 3, digX.decode("utf-8")),
+            (preB, 4, digY.decode("utf-8")),
+            (preC, 0, digC.decode("utf-8")),
+        ]
+
+        # Test .dtss datetime stamps
+        key = dgKey(preb, digb)
+        assert key == f'{preb.decode("utf-8")}.{digb.decode("utf-8")}'.encode("utf-8")
+
+        # test .dtss sub db methods - now returns Dater objects
+        dater1 = coring.Dater(dts='2020-08-22T17:50:09.988921+00:00')
+        dater2 = coring.Dater(dts='2020-08-22T17:50:10.000000+00:00')
+        # empty dtss
+        assert baser.dtss.get(keys=key) is None
+
+        # rem on empty returns False
+        assert baser.dtss.rem(keys=key) == False
+
+        # Insert dater1 and check retrieval
+        assert baser.dtss.put(keys=key, val=dater1) == True
+        result = baser.dtss.get(keys=key)
+        assert isinstance(result, coring.Dater)
+        assert result.dts == dater1.dts
+
+        # Try to insert dater2 with put()
+        assert baser.dtss.put(keys=key, val=dater2) == False  # idempotent
+        result = baser.dtss.get(keys=key)
+        assert result.dts == dater1.dts  # still original
+
+        # Pin overwrites dater1 with dater2
+        assert baser.dtss.pin(keys=key, val=dater2) == True  # overwrites
+        result = baser.dtss.get(keys=key)
+        assert result.dts == dater2.dts
+
+        # Remove key and check empty again
+        assert baser.dtss.rem(keys=key) == True
+        assert baser.dtss.get(keys=key) is None
+
+        # New set of keys and daters
+        key2 = dgKey(b"X", b"Y")
+        d3 = coring.Dater(dts="2021-01-01T00:00:00.000000+00:00")
+        d4 = coring.Dater(dts="2021-01-01T00:00:01.000000+00:00")
+
+        # Insert d3 at key2 and check retrieval
+        assert baser.dtss.put(keys=key2, val=d3) is True
+        assert baser.dtss.get(keys=key2).dts == d3.dts
+
+        # pin() is idempotent and stable
+        assert baser.dtss.pin(keys=key2, val=d3) is True
+        assert baser.dtss.pin(keys=key2, val=d3) is True
+        assert baser.dtss.get(keys=key2).dts == d3.dts
+
+        # put() with equal-content Dater does NOT overwrite
+        d3Clone = coring.Dater(dts=d3.dts)
+        assert baser.dtss.put(keys=key2, val=d3Clone) is False
+        assert baser.dtss.get(keys=key2).dts == d3.dts
+
+        # Persistence across reopen (tests serialization/deserialization)
+        await baser.reopen(storageOpener=backend.open)
+        # After reopen, dtss should still be empty for key but not key2
+        assert baser.dtss.get(keys=key) is None
+        assert baser.dtss.get(keys=key2) is None
+        
+        # Check second key
+        assert baser.dtss.rem(keys=key2) is False
+        assert baser.dtss.get(keys=key2) is None
+
+        
+        # Test .aess authorizing event source seal couples
+        # test .aess sub db methods
+        ssnu1 = b'0AAAAAAAAAAAAAAAAAAAAAAB'
+        sdig1 = b'EALkveIFUPvt38xhtgYYJRCCpAGO7WjjHVR37Pawv67E'
+        ssnu2 = b'0AAAAAAAAAAAAAAAAAAAAAAC'
+        sdig2 = b'EBYYJRCCpAGO7WjjsLhtHVR37Pawv67kveIFUPvt38x0'
+        number1 = coring.Number(qb64b=ssnu1)
+        diger1 = coring.Diger(qb64b=sdig1)
+        number2 = coring.Number(qb64b=ssnu2)
+        diger2 = coring.Diger(qb64b=sdig2)
+        val1 = (number1, diger1)
+        val2 = (number2, diger2)
+
+        # Empty db
+        assert baser.aess.get(keys=(preb, digb)) == None
+        assert baser.aess.rem(keys=(preb, digb)) == False
+
+        # Insert val1 and check retrieval
+        assert baser.aess.put(keys=(preb, digb), val=val1) == True
+        result = baser.aess.get(keys=(preb, digb))
+        assert result is not None
+        rnumber1, rdiger1 = result
+        assert rnumber1.qb64b == number1.qb64b
+        assert rdiger1.qb64b == diger1.qb64b
+
+        # Insert val2 with put()
+        assert baser.aess.put(keys=(preb, digb), val=val2) == False # idempotent, does not overwrite
+        result = baser.aess.get(keys=(preb, digb))
+        assert result is not None
+        rnumber1, rdiger1 = result
+        # Same result as before
+        assert rnumber1.qb64b == number1.qb64b
+        assert rdiger1.qb64b == diger1.qb64b
+
+        # Insert val2 with pin()
+        assert baser.aess.pin(keys=(preb, digb), val=val2) == True  # overwrites
+        result = baser.aess.get(keys=(preb, digb))
+        assert result is not None
+        rnumber2, rdiger2 = result
+        assert rnumber2.qb64b == number2.qb64b
+        assert rdiger2.qb64b == diger2.qb64b
+        
+        # Remove key and check empty again
+        assert baser.aess.rem(keys=(preb, digb)) == True
+        assert baser.aess.get(keys=(preb, digb)) == None
+
+        pre2 = b"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
+        dig2 = b"CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
+
+        val3 = (number1, diger1)
+        assert baser.aess.put(keys=(pre2, dig2), val=val3) is True
+        rnum3, rdig3 = baser.aess.get(keys=(pre2, dig2))
+        assert rnum3.qb64b == number1.qb64b
+        assert rdig3.qb64b == diger1.qb64b
+
+        # pin() is idempotent
+        assert baser.aess.pin(keys=(pre2, dig2), val=val3) is True
+        assert baser.aess.pin(keys=(pre2, dig2), val=val3) is True
+
+        # put() with equal-content tuple does NOT overwrite
+        val3_clone = (coring.Number(qb64b=ssnu1), coring.Diger(qb64b=sdig1))
+        assert baser.aess.put(keys=(pre2, dig2), val=val3_clone) is False
+        rnum3b, rdig3b = baser.aess.get(keys=(pre2, dig2))
+        assert rnum3b.qb64b == number1.qb64b
+        assert rdig3b.qb64b == diger1.qb64b
+
+        # delete second key
+        assert baser.aess.rem(keys=(pre2, dig2)) is True
+        assert baser.aess.get(keys=(pre2, dig2)) is None
+
+        # Persistence across reopen (tests serialization/deserialization)
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.aess.get(keys=(preb, digb)) is None
+        assert baser.aess.get(keys=(pre2, dig2)) is None
+
+        # Tuple key ordering correctness (pre, dig)
+        pre3 = preb
+        dig3 = b"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"
+        val4 = (number2, diger2)
+        assert baser.aess.put(keys=(pre3, dig3), val=val4) is True
+        rnum4, rdig4 = baser.aess.get(keys=(pre3, dig3))
+        assert rnum4.qb64b == number2.qb64b
+        assert rdig4.qb64b == diger2.qb64b
+
+        # test .wigs sub db methods (witness indexed sigs)
+        key = dgKey(preb, digb)
+        assert key == f'{preb.decode("utf-8")}.{digb.decode("utf-8")}'.encode("utf-8")
+
+        # Create valid test signatures
+        signer0 = signing.Signer(transferable=False, seed=b'0123456789abcdef0123456789abcdef')
+        signer1 = signing.Signer(transferable=False, seed=b'fedcba9876543210fedcba9876543210')
+
+        test_data = b"test witness signatures"
+        cigar0 = signer0.sign(ser=test_data)
+        cigar1 = signer1.sign(ser=test_data)
+
+        siger0 = indexing.Siger(raw=cigar0.raw, code=indexing.IdrDex.Ed25519_Sig, index=0)
+        siger1 = indexing.Siger(raw=cigar1.raw, code=indexing.IdrDex.Ed25519_Sig, index=1)
+
+        # Use siger objects for testing
+        wig0 = siger0
+        wig1 = siger1
+
+        # Test empty state
+        assert baser.wigs.get(keys=key) == []
+        assert baser.wigs.cnt(keys=key) == 0
+        assert baser.wigs.rem(keys=key) == False
+
+        # Test pin with multiple values
+        assert baser.wigs.pin(keys=key, vals=[wig1, wig0]) == True
+        result = baser.wigs.get(keys=key)
+        assert len(result) == 2
+        # Just verify both are present (don't test exact order)
+        result_bytes = set(w.qb64b for w in result)
+        assert result_bytes == {wig0.qb64b, wig1.qb64b}
+        assert baser.wigs.cnt(keys=key) == 2
+
+        # Test pin overwrites
+        assert baser.wigs.pin(keys=key, vals=[wig0]) == True
+        result = baser.wigs.get(keys=key)
+        assert len(result) == 1
+        assert result[0].qb64b == wig0.qb64b
+
+        # Reset to both
+        assert baser.wigs.pin(keys=key, vals=[wig1, wig0]) == True
+        assert baser.wigs.cnt(keys=key) == 2
+
+        # Test add, duplicate should return False
+        assert baser.wigs.add(keys=key, val=wig0) == False  
+        assert baser.wigs.add(keys=key, val=wig1) == False  
+        assert baser.wigs.cnt(keys=key) == 2
+
+        # Test getIter, returns just values
+        result_list = list(baser.wigs.getIter(keys=key))
+        assert len(result_list) == 2
+        assert set(w.qb64b for w in result_list) == {wig0.qb64b, wig1.qb64b}
+
+        # Test remove all
+        assert baser.wigs.rem(keys=key) == True
+        assert baser.wigs.get(keys=key) == []
+        assert baser.wigs.cnt(keys=key) == 0
+
+        # Test individual removal by value
+        vals = [wig0, wig1]
+        assert baser.wigs.pin(keys=key, vals=vals) == True
+        for val in vals:
+            assert baser.wigs.rem(keys=key, val=val) == True
+        assert baser.wigs.get(keys=key) == []
+
+        # Test removal while iterating
+        assert baser.wigs.pin(keys=key, vals=vals) == True
+        for val in baser.wigs.getIter(keys=key):
+            assert baser.wigs.rem(keys=key, val=val) == True
+        assert baser.wigs.get(keys=key) == []
+
+        # Test sequence with individual pins
+        assert baser.wigs.pin(keys=key, vals=[wig0]) == True
+        result = baser.wigs.get(keys=key)
+        assert len(result) == 1
+        assert result[0].qb64b == wig0.qb64b
+
+        assert baser.wigs.pin(keys=key, vals=[wig1]) == True
+        result = baser.wigs.get(keys=key)
+        assert len(result) == 1
+        assert result[0].qb64b == wig1.qb64b
+
+        assert baser.wigs.pin(keys=key, vals=[wig1, wig0]) == True
+        result = baser.wigs.get(keys=key)
+        assert len(result) == 2
+        assert set(w.qb64b for w in result) == {wig0.qb64b, wig1.qb64b}
+
+        assert baser.wigs.rem(keys=key) == True
+        assert baser.wigs.get(keys=key) == []
+
+        # test .rcts
+        wit0b = 'BBuupUhPx5_yZ-Wk1x4ejhccWzwEHHzq7K0gzQPYGGwT'.encode("utf-8")
+        wit1b = 'BAhccWzwEHHzq7K0gzmuupUhPx5_yZ-Wk1x4eQPYGGwT'.encode("utf-8")
+
+        # Create test prefixes and cigars
+        wit0 = coring.Prefixer(qb64=wit0b.decode('utf-8'))  # Convert from qb64 string
+        wit1 = coring.Prefixer(qb64=wit1b.decode('utf-8'))
+
+        wsig0b = '0BATimrykocna6Z_pQBl2gt59I_F6BsSwFbIOG1TDQz1KAV2z5IRqcFe4gPs9l3wsFKi1NsSZvBe8yQJmiu5AzJ9'.encode("utf-8")
+        wsig1b = '0BBIRqcFe4gPs9l3wsFKi1NsSZvBe8yQJmiu5Az_pQBl2gt59I_F6BsSwFbIOG1TDQz1KAV2zJ91Timrykocna6Z'.encode("utf-8")
+
+        # Create cigars (non-indexed signatures)
+        cigar0 = coring.Cigar(qb64=wsig0b.decode('utf-8'))
+        cigar1 = coring.Cigar(qb64=wsig1b.decode('utf-8'))
+
+        # Test with CESR tuples (insertion order)
+        assert baser.rcts.put(key, vals=[(wit0, cigar0), (wit1, cigar1)]) == True
+        result = baser.rcts.get(key)
+        assert len(result) == 2
+        # Check insertion order: wit0 inserted first, wit1 second
+        assert result[0][0].qb64 == wit0.qb64
+        assert result[0][1].qb64 == cigar0.qb64
+        assert result[1][0].qb64 == wit1.qb64
+        assert result[1][1].qb64 == cigar1.qb64
+
+        # Test duplicate (should not add)
+        assert baser.rcts.put(key, vals=[(wit0, cigar0)]) == False
+        result = baser.rcts.get(key)
+        assert len(result) == 2
+        assert result[0][0].qb64 == wit0.qb64
+        assert result[0][1].qb64 == cigar0.qb64
+        assert result[1][0].qb64 == wit1.qb64
+        assert result[1][1].qb64 == cigar1.qb64
+
+        # Test adding new item
+        wit2 = coring.Prefixer(qb64='BNewTestPrefix000000000000000000000000000000')
+        cigar2 = coring.Cigar(qb64='BNewTestSignature00000000000000000000000000000000000000000000000000000000000000000000000')
+        assert baser.rcts.add(key, (wit2, cigar2)) == True
+        result = baser.rcts.get(key)
+        assert len(result) == 3
+        # Insertion order: wit0, wit1, wit2
+        assert result[0][0].qb64 == wit0.qb64
+        assert result[0][1].qb64 == cigar0.qb64
+        assert result[1][0].qb64 == wit1.qb64
+        assert result[1][1].qb64 == cigar1.qb64
+        assert result[2][0].qb64 == wit2.qb64
+        assert result[2][1].qb64 == cigar2.qb64
+
+        # Test duplicate add returns False
+        assert baser.rcts.add(key, (wit0, cigar0)) == False
+
+        # Test getIter maintains insertion order
+        iter_result = [val for val in baser.rcts.getIter(key)]
+        assert len(iter_result) == 3
+        assert iter_result[0][0].qb64 == wit0.qb64
+        assert iter_result[0][1].qb64 == cigar0.qb64
+        assert iter_result[1][0].qb64 == wit1.qb64
+        assert iter_result[1][1].qb64 == cigar1.qb64
+        assert iter_result[2][0].qb64 == wit2.qb64
+        assert iter_result[2][1].qb64 == cigar2.qb64
+
+        # Test removal
+        assert baser.rcts.rem(key) == True
+        assert baser.rcts.get(key) == []
+
+        # Test insertion order preserved when inserting in different order
+        vals = [(wit1, cigar1), (wit0, cigar0)]
+        assert baser.rcts.put(key, vals) == True
+        result = baser.rcts.get(key)
+        assert len(result) == 2
+        # Should maintain insertion order: wit1 first, wit0 second
+        assert result[0][0].qb64 == wit1.qb64
+        assert result[0][1].qb64 == cigar1.qb64
+        assert result[1][0].qb64 == wit0.qb64
+        assert result[1][1].qb64 == cigar0.qb64
+
+        # Test individual removal
+        assert baser.rcts.rem(key, (wit1, cigar1)) == True
+        result = baser.rcts.get(key)
+        assert len(result) == 1
+        assert result[0][0].qb64 == wit0.qb64
+        assert result[0][1].qb64 == cigar0.qb64
+
+        assert baser.rcts.rem(key) == True
+        assert baser.rcts.get(key) == []
+
+        # Unverified Receipt Escrows
+        # test .ures insertion order methods.
+
+        # Setup CESR test values
+        diger0 = coring.Diger(ser=b"event0")
+        diger1 = coring.Diger(ser=b"event1")
+        diger2 = coring.Diger(ser=b"event2")
+        diger3 = coring.Diger(ser=b"event3")
+        diger4 = coring.Diger(ser=b"event4")
+
+        pre0 = coring.Prefixer(qb64="BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
+
+        signer0 = signing.Signer(transferable=False, seed=b'0123456789abcdef0123456789abcdef')
+        signer1 = signing.Signer(transferable=False, seed=b'abcdef0123456789abcdef0123456789')
+        signer2 = signing.Signer(transferable=False, seed=b'fedcba9876543210fedcba9876543210')
+        signer3 = signing.Signer(transferable=False, seed=b'0011223344556677889900112233445566')
+        signer4 = signing.Signer(transferable=False, seed=b'ffeeddccbbaa99887766554433221100')
+
+        test_data = b"test witness signatures"
+        cigar0 = signer0.sign(ser=test_data)
+        cigar1 = signer1.sign(ser=test_data)
+        cigar2 = signer2.sign(ser=test_data)
+        cigar3 = signer3.sign(ser=test_data)
+        cigar4 = signer4.sign(ser=test_data)
+
+        pre1 = coring.Prefixer(qb64=signer0.verfer.qb64)
+        pre2 = coring.Prefixer(qb64=signer1.verfer.qb64)
+        pre3 = coring.Prefixer(qb64=signer2.verfer.qb64)
+        pre4 = coring.Prefixer(qb64=signer3.verfer.qb64)
+
+        key = ("BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", coring.Seqner(sn=0).qb64)
+
+        cesrVal = (diger0, pre0, cigar0)
+        cesrVals = [cesrVal]
+
+        assert baser.ures.get(key) == []
+        assert baser.ures.getLast(keys=key) is None
+        assert baser.ures.cnt(key) == 0
+        assert baser.ures.rem(key) == False
+
+        assert baser.ures.put(keys=key, vals=cesrVals) == True
+        stored = baser.ures.get(key)
+        assert len(stored) == 1
+        diger_s, pre_s, cigar_s = stored[0]
+        assert diger_s.qb64 == diger0.qb64
+        assert pre_s.qb64 == pre0.qb64
+        assert cigar_s.qb64b == cigar0.qb64b
+
+        result = baser.ures.getLast(keys=key)
+        assert result is not None
+        diger_l, pre_l, cigar_l = result
+        assert diger_l.qb64 == diger0.qb64
+        assert pre_l.qb64 == pre0.qb64
+        assert cigar_l.qb64b == cigar0.qb64b
+
+        assert baser.ures.put(keys=key, vals=[(diger0, pre0, cigar0)]) == False
+        result = baser.ures.get(key)
+        assert len(result) == 1
+        d, p, c = result[0]
+        assert d.qb64 == diger0.qb64
+        assert p.qb64 == pre0.qb64
+        assert c.qb64b == cigar0.qb64b
+
+        assert baser.ures.add(key, (diger0, pre0, cigar0)) == False   
+        assert baser.ures.add(key, (diger1, pre1, cigar1)) == True
+
+        result = baser.ures.get(key)
+        assert len(result) == 2
+        d0, p0, c0 = result[0]
+        assert d0.qb64 == diger0.qb64
+        assert p0.qb64 == pre0.qb64
+        assert c0.qb64b == cigar0.qb64b
+        d1, p1, c1 = result[1]
+        assert d1.qb64 == diger1.qb64
+        assert p1.qb64 == pre1.qb64
+        assert c1.qb64b == cigar1.qb64b
+
+        result_iter = [val for val in baser.ures.getIter(key)]
+        assert len(result_iter) == 2
+        d0, p0, c0 = result_iter[0]
+        assert d0.qb64 == diger0.qb64
+        assert p0.qb64 == pre0.qb64
+        assert c0.qb64b == cigar0.qb64b
+        d1, p1, c1 = result_iter[1]
+        assert d1.qb64 == diger1.qb64
+        assert p1.qb64 == pre1.qb64
+        assert c1.qb64b == cigar1.qb64b
+
+        assert baser.ures.rem(key) == True
+        assert baser.ures.get(key) == []
+
+        # Setup multi-key tests for getTopItemIter
+        aKey = ("BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", coring.Seqner(sn=1).qb64)
+        aVals = [(diger0, pre0, cigar0), (diger1, pre1, cigar1), (diger2, pre2, cigar2)]
+        bKey = ("BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", coring.Seqner(sn=2).qb64)
+        bVals = [(diger1, pre1, cigar1), (diger2, pre2, cigar2), (diger3, pre3, cigar3)]
+        cKey = ("BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", coring.Seqner(sn=4).qb64)
+        cVals = [(diger2, pre2, cigar2), (diger3, pre3, cigar3)]
+        dKey = ("BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", coring.Seqner(sn=7).qb64)
+        dVals = [(diger3, pre3, cigar3), (diger4, pre4, cigar4)]
+
+        assert baser.ures.put(keys=aKey, vals=aVals)
+        assert baser.ures.put(keys=bKey, vals=bVals)
+        assert baser.ures.put(keys=cKey, vals=cVals)
+        assert baser.ures.put(keys=dKey, vals=dVals)
+
+        # Test getTopItemIter with no key
+        items = [(keys, val) for keys, val in baser.ures.getTopItemIter()]
+        assert items  # not empty
+        ikey = items[0][0]
+        assert ikey == aKey
+        # Verify total count
+        assert len(items) == len(aVals) + len(bVals) + len(cVals) + len(dVals)
+
+        # aVals — iterate at aKey only
+        items = [(keys, val) for keys, val in baser.ures.getTopItemIter(keys=aKey)]
+        assert items  # not empty
+        ikey = items[0][0]
+        assert ikey == aKey
+        assert len(items) == len(aVals)  # only aKey items
+
+        # bVals — iterate at bKey, remove each
+        items = [(keys, val) for keys, val in baser.ures.getTopItemIter(keys=bKey)]
+        assert items  # not empty
+        ikey = items[0][0]
+        assert ikey == bKey
+        assert len(items) == len(bVals)  # only bKey items
+        for ikeys, val in baser.ures.getTopItemIter(keys=bKey):
+            assert baser.ures.rem(bKey, val) == True
+
+        # cVals — iterate at cKey, remove each
+        items = [(keys, val) for keys, val in baser.ures.getTopItemIter(keys=cKey)]
+        assert items  # not empty
+        ikey = items[0][0]
+        assert ikey == cKey
+        assert len(items) == len(cVals)  # only cKey items
+        for ikeys, val in baser.ures.getTopItemIter(keys=cKey):
+            assert baser.ures.rem(cKey, val) == True
+
+        # dVals — iterate at dKey, remove each
+        items = [(keys, val) for keys, val in baser.ures.getTopItemIter(keys=dKey)]
+        assert items  # not empty
+        ikey = items[0][0]
+        assert ikey == dKey
+        assert len(items) == len(dVals)
+        for ikeys, val in baser.ures.getTopItemIter(keys=dKey):
+            assert baser.ures.rem(dKey, val) == True
+
+        # aVals should still be intact, others removed
+        result_a = baser.ures.get(aKey)
+        assert len(result_a) == len(aVals)
+        for i, (d_expected, p_expected, c_expected) in enumerate(aVals):
+            d, p, c = result_a[i]
+            assert d.qb64 == d_expected.qb64
+            assert p.qb64 == p_expected.qb64
+            assert c.qb64b == c_expected.qb64b
+
+        assert baser.ures.get(bKey) == []
+        assert baser.ures.get(cKey) == []
+        assert baser.ures.get(dKey) == []
+
+
+        # Validator (transferable) Receipts
+        # test .vrcs sub db methods dgkey
+        key = dgKey(preb, digb)
+        assert key == f'{preb.decode("utf-8")}.{digb.decode("utf-8")}'.encode("utf-8")
+
+        p1 = coring.Prefixer(qb64="BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")  # fake prefix
+        n1 = Number(num=1)
+        e1 = coring.Diger(ser=b"est1")    # digest of est event
+        s1 = Siger(raw=b"\x00" * 64)  # 64‑byte fake signature
+
+        cesrVal = (p1, n1, e1, s1)
+        cesrVal = [cesrVal]
+
+        assert baser.vrcs.get(key) == []
+        assert baser.vrcs.cnt(key) == 0
+        assert baser.vrcs.rem(key) == False
+
+        assert baser.vrcs.put(key, cesrVal) is True
+
+        stored = baser.vrcs.get(key)
+        assert len(stored) == 1
+        sp1, sn1, se1, ss1 = stored[0]
+
+        assert sp1.qb64 == p1.qb64
+        assert sn1.num == n1.num
+        assert se1.qb64 == e1.qb64
+        assert ss1.raw == s1.raw
+
+        assert baser.vrcs.rem(key) == True
+
+        # # dup vals are lexocographic
+        # Build several distinct typed CESR quadruples
+        pA = coring.Prefixer(qb64="BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
+        pB = coring.Prefixer(qb64="BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB")
+        pC = coring.Prefixer(qb64="BCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC")
+        pD = coring.Prefixer(qb64="BDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD")
+
+        nA = Number(num=1)
+        nB = Number(num=2)
+        nC = Number(num=3)
+        nD = Number(num=4)
+
+        eA = coring.Diger(ser=b"estA")
+        eB = coring.Diger(ser=b"estB")
+        eC = coring.Diger(ser=b"estC")
+        eD = coring.Diger(ser=b"estD")
+
+        sA = Siger(raw=b"\x00" * 64)
+        sB = Siger(raw=b"\x01" * 64)
+        sC = Siger(raw=b"\x02" * 64)
+        sD = Siger(raw=b"\x03" * 64)
+
+        quadA = (pA, nA, eA, sA)
+        quadB = (pB, nB, eB, sB)
+        quadC = (pC, nC, eC, sC)
+        quadD = (pD, nD, eD, sD)
+
+        vals = [quadD, quadB, quadC, quadA]   # intentionally out of order
+
+        # Initially empty
+        assert baser.vrcs.get(key) == []
+        assert baser.vrcs.cnt(key) == 0
+
+        # Insert multiple typed tuples
+        assert baser.vrcs.put(key, vals) is True
+
+        # Insertion order is preserved
+        stored = baser.vrcs.get(key)
+        assert len(stored) == len(vals)
+        for (sp, sn, se, ss), (ep, en, ee, es) in zip(stored, vals):
+            assert sp.qb64 == ep.qb64
+            assert sn.num == en.num
+            assert se.qb64 == ee.qb64
+            assert ss.raw == es.raw
+
+        assert baser.vrcs.cnt(key) == 4
+
+        assert baser.vrcs.put(key, [quadA]) == False
+        assert baser.vrcs.put(key, [quadB]) == False   # quadB already present → no change
+        assert baser.vrcs.put(key, [quadD]) == False   # quadD already present → no change
+        assert baser.vrcs.put(key, [quadC]) == False   # quadC already present → no change
+
+        # Iteration returns the same tuples in insertion order
+        itered = list(baser.vrcs.getIter(key))
+        for (sp, sn, se, ss), (ep, en, ee, es) in zip(itered, vals):
+            assert sp.qb64 == ep.qb64
+            assert sn.num == en.num
+            assert se.qb64 == ee.qb64
+            assert ss.raw == es.raw
+
+        # Remove individual tuples
+        for quad in vals:
+            assert baser.vrcs.rem(key, quad) == True
+
+        assert baser.vrcs.get(key) == []
+        assert baser.vrcs.cnt(key) == 0
+
+         # Unverified Validator (transferable) Receipt Escrows
+        # test .vres insertion order dup methods.  dup vals are insertion order
+        key = b'A'
+        vals = [b"z", b"m", b"x", b"a"]
+
+        d1 = coring.Diger(ser=b"event1")  # digest of event
+        p1 = coring.Prefixer(qb64="BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")  # fake prefix
+        n1 = Number(num=1)
+        e1 = coring.Diger(ser=b"est1")    # digest of est event
+        s1 = Siger(raw=b"\x00" * 64)  # 64‑byte fake signature
+
+        cesrVal = (d1, p1, n1, e1, s1)
+        cesrVal = [cesrVal]
+
+        assert baser.vres.get(key) == []
+        assert baser.vres.getLast(keys=key) == None
+        assert baser.vres.cnt(key) == 0
+        assert baser.vres.rem(key) == False
+
+        assert baser.vres.put(keys=key, vals=cesrVal) is True
+
+        stored = baser.vres.get(key)
+        assert len(stored) == 1
+        sd1, sp1, sn1, se1, ss1 = stored[0]
+
+        assert sd1.qb64 == d1.qb64
+        assert sp1.qb64 == p1.qb64
+        assert sn1.num == n1.num
+        assert se1.qb64 == e1.qb64
+        assert ss1.raw == s1.raw
+
+        # test .kels insertion order methods.
+        vals = [b"z", b"m", b"x", b"a"]
+        deserializedVals = ["z", "m", "x", "a"]
+
+        assert baser.kels.get(keys=key) == []
+        assert baser.kels.getLast(keys=key)== None
+        assert baser.kels.cntAll(keys=key) == 0
+        assert baser.kels.rem(key) == False
+        assert baser.kels.put(keys=key, vals=vals) == True
+        assert baser.kels.get(keys=key) == deserializedVals  # preserved insertion order
+        assert baser.kels.cntAll(keys=key) == len(vals) == 4
+        assert baser.kels.getLast(keys=key) == deserializedVals[-1]
+        assert baser.kels.put(keys=key, vals=[b'a']) == False   
+        assert baser.kels.get(keys=key) == deserializedVals  #  no change
+        assert baser.kels.add(keys=key, val=b'a') == False   
+        assert baser.kels.add(keys=key, val=b'b') == True
+        assert baser.kels.get(keys=key) == deserializedVals + ['b']
+        assert baser.kels.rem(key) == True
+        assert baser.kels.get(keys=key) == []
+
+        # Partially Signed Escrow Events
+        # test .pses insertion order methods. 
+        pre = b'A'
+        sn = 0
+        key = snKey(pre, sn)
+        vals = [b"z", b"m", b"x", b"a"]
+        deserialized_vals = [baser.pses._des(val) for val in vals] # deserialize for assertion
+
+        # core insertion
+        assert baser.pses.get(keys=key) == []
+        assert baser.pses.getLast(keys=pre, on=sn) == None
+        assert baser.pses.cntAll(keys=key) == 0
+        assert baser.pses.rem(keys=key) == False
+
+        # initial insertion
+        assert baser.pses.put(keys=key, vals=vals) == True
+        assert baser.pses.get(keys=key) == deserialized_vals    #sanity check
+
+        # duplication insertion behavior
+        assert baser.pses.put(keys=key, vals=[b'd', b'k']) == True
+        assert baser.pses.put(keys=key, vals=[b'd']) == False  
+        assert baser.pses.put(keys=key, vals=[b'k']) == False  
+        assert baser.pses.put(keys=key, vals=[b'k',b'd',b'k']) == False
+        assert baser.pses.add(keys=key, val=b'd') == False  
+        assert baser.pses.add(keys=key, val=b'k') == False
+        assert baser.pses.get(keys=key) == deserialized_vals + ['d', 'k']
+
+        # mixed insertion behavior
+        assert baser.pses.put(keys=key, vals=[b'k', b'c']) == True  # True because 'c' is new
+        assert baser.pses.get(keys=key) == deserialized_vals + ['d', 'k', 'c']
+
+        # insertion after deletion
+        assert baser.pses.rem(keys=key, val=b'd') == True   # remove a specific val
+        assert baser.pses.get(keys=key) == deserialized_vals + ['k', 'c']   # d removed
+        assert baser.pses.add(keys=key, val=b'd') == True   # add d back
+        assert baser.pses.get(keys=key) == deserialized_vals + ['k', 'c', 'd']   # d added back
+
+        # empty insertion
+        assert baser.pses.put(keys=key, vals=[]) == False # no vals to add
+        assert baser.pses.get(keys=key) == deserialized_vals + ['k', 'c', 'd'] # no change
+
+        assert baser.pses.add(keys=key, val=b'') == True  # empty val is allowed
+        assert baser.pses.get(key) == deserialized_vals + ['k', 'c', 'd',''] # empty val added
+
+        # clean up
+        assert baser.pses.rem(keys=key) == True
+        assert baser.pses.get(keys=key) == []
+
+        # different key types insertion
+        assert baser.pses.put(keys='B', vals=[b'1', b'2']) == True   # key as str
+        assert baser.pses.add(keys='B', val=b'3') == True
+        assert baser.pses.put(keys=['B'], vals=b'4') == True  # key as list
+        assert baser.pses.add(keys=['B'], val=b'5') == True
+        assert baser.pses.put(keys=("B"), vals=b'6') == True # key as tuple
+        assert baser.pses.add(keys=("B"), val=b'7') == True
+        assert baser.pses.put(keys=memoryview(b'B'), vals=b'8') == True  # key as memoryview
+        assert baser.pses.add(keys=memoryview(b'B'), val=b'9') == True
+        assert baser.pses.get(keys=b'B') == ['1', '2', '3', '4', '5', '6', '7', '8', '9']
+
+        # clean up
+        assert baser.pses.rem(keys=b'B') == True
+        assert baser.pses.get(keys=b'B') == []
+
+        # edge case: add different types of vals
+        assert baser.pses.put(keys=key, vals=[b'a','a']) == True
+        assert baser.pses.get(keys=key) == ['a'] # only 1 value added
+
+        assert baser.pses.rem(keys=key) == True
+        assert baser.pses.get(keys=key) == []
+
+
+        # test .pses retrieval behavior methods
+        # insertion order preserved
+        assert baser.pses.put(keys=pre, on=sn, vals=vals) == True
+        assert baser.pses.get(keys=pre, on=sn) == deserialized_vals
+        assert list(baser.pses.getIter(keys=pre, on=sn)) == deserialized_vals
+        assert baser.pses.getLast(keys=pre, on=sn) == deserialized_vals[-1]
+        assert baser.pses.cntAll(keys=pre, on=sn) == len(vals) == 4
+
+        # retrieval on empty list
+        assert baser.pses.get(keys=b'X') == []
+        assert list(baser.pses.getIter(b'X')) == []
+        assert baser.pses.getLast(keys=b'X') == None
+        assert baser.pses.cntAll(keys=b'X') == 0
+        items = baser.pses.getTopItemIter(keys=b'X')
+        assert list(items) == []
+
+        # getTopItemIter retrieval of (key, val) pairs in lexicographic key order
+        items = list(baser.pses.getAllItemIter())
+        assert items == [(('A',), 0, 'z'), (('A',), 0, 'm'), (('A',), 0, 'x'), (('A',), 0, 'a')]  # Insertion order preserved for vals
+        items = list(baser.pses.getTopItemIter(keys=key))
+        assert items == [(('A',), 0, 'z'), (('A',), 0, 'm'), (('A',), 0, 'x'), (('A',), 0, 'a')]
+        keysB = (b'B', b'C')
+        assert baser.pses.put(keys=keysB, vals=[b'1', b'2', b'3']) == True
+        items = list(baser.pses.getTopItemIter(keys=keysB))
+        assert items == [(('B', 'C'), 0, '1'), (('B', 'C'), 0, '2'), (('B', 'C'), 0, '3')]
+        items = list(baser.pses.getTopItemIter(keys=keysB[0]))  # top key first element
+        assert items == [(('B', 'C'), 0, '1'), (('B', 'C'), 0, '2'), (('B', 'C'), 0, '3')]
+
+
+        # retrieval with different key types, A is the key used above where key = b'A'
+        assert baser.pses.get(keys=b'A') == deserialized_vals  # key as bytes
+        assert baser.pses.get(keys='A') == deserialized_vals  # key as str
+        assert baser.pses.get(keys=['A']) == deserialized_vals  # key as list
+        assert baser.pses.get(keys=('A',)) == deserialized_vals  # key as tuple
+        assert baser.pses.get(keys=memoryview(b'A')) == deserialized_vals  # key as memoryview
+
+        # retrieval afterd deletion of specific val
+        assert baser.pses.getLast(keys=pre, on=sn) == 'a'              # vals = [b"z", b"m", b"x", b"a"]
+        assert baser.pses.rem(keys=pre, on=sn, val=b'a') == True           # vals = [b"z", b"m", b"x"]
+        assert baser.pses.get(keys=pre, on=sn) == ['z', 'm', 'x']
+        assert baser.pses.getLast(keys=pre, on=sn) == 'x'
+        assert baser.pses.cntAll(keys=pre, on=sn) == 3
+
+        # clean up
+        assert baser.pses.rem(keys=pre, on=sn) == True
+
+
+        # test .pses pinning behavior method
+        # start clean
+        assert baser.pses.get(keys=key) == []
+        assert baser.pses.put(keys=key, vals=vals) == True
+        assert baser.pses.get(keys=key) == deserialized_vals
+        assert baser.pses.pin(keys=key, vals=[b'a', b'b', b'c']) == True
+        assert baser.pses.get(keys=key) == ['a', 'b', 'c']  # exact overwrite
+
+        # pin with a different list
+        assert baser.pses.pin(keys=key, vals=[b'x', b'y']) == True
+        assert baser.pses.get(keys=key) == ['x', 'y']  # previous values removed
+
+        # pin with empty list (valid use case)
+        assert baser.pses.pin(keys=key, vals=[]) == False  # nothing to pin
+        assert baser.pses.get(keys=key) == ['x', 'y']  # previous values are still here
+        assert baser.pses.rem(keys=key) == True
+
+        # pin after normal insertion
+        assert baser.pses.put(keys=key, vals=[b'1', b'2']) == True
+        assert baser.pses.get(keys=key) == ['1', '2']
+        assert baser.pses.pin(keys=key, vals=[b'Q']) == True
+        assert baser.pses.get(keys=key) == ['Q']  # overwritten
+
+        # edge case: pin with mixed types
+        assert baser.pses.pin(keys=key, vals=[b'A', 'A', memoryview(b'A')]) == True
+        assert baser.pses.get(keys=key) == ['A'] # only one value gets added
+
+        # cleanup
+        assert baser.pses.rem(keys=key) == True
+        assert baser.pses.get(keys=key) == []
+
+
+        # test .pses deletion methods
+        # delete specific val
+        assert baser.pses.put(keys=key, vals=vals) == True
+        assert baser.pses.rem(keys=key, val=b'm') == True
+        assert baser.pses.get(keys=key) == ['z', 'x', 'a']
+
+        # delete non existing val
+        assert baser.pses.rem(keys=key, val=b'y') == False
+        assert baser.pses.get(keys=key) == ['z', 'x', 'a']
+
+        # delete all vals
+        assert baser.pses.rem(keys=key) == True
+        assert baser.pses.get(keys=key) == []
+        assert baser.pses.cntAll(keys=key) == 0 # all vals deleted
+
+        # delete non existing key
+        assert baser.pses.rem(keys=b'X') == False
+
+        # insert other keys to ensure only specified key is deleted
+        assert baser.pses.put(keys=b'A', vals=[b'1']) == True
+        assert baser.pses.put(keys=b'B', vals=[b'2']) == True
+        assert baser.pses.rem(keys=b'A') == True
+        assert baser.pses.get(keys=b'B') == ['2']
+
+        # clean up all entries
+        for k, sn, v in list(baser.pses.getAllItemIter()):
+            assert baser.pses.rem(keys=k, on=sn, val=v) == True
+
+        # Setup Tests for getPsesNext and getPsesNextIter
+        pre = b"A"
+        aSn = 1
+        aKey = snKey(pre=pre, sn=aSn)
+        aVals = [b"z", b"m", b"x"]
+        bSn = 2
+        bKey = snKey(pre=pre, sn=bSn)
+        bVals = [b"o", b"r", b"z"]
+        cSn = 4
+        cKey = snKey(pre=pre, sn=cSn)
+        cVals = [b"h", b"n"]
+        dSn = 7
+        dKey = snKey(pre=pre, sn=dSn)
+        dVals = [b"k", b"b"]
+
+        assert baser.pses.put(keys=pre, on=1, vals=aVals)
+        assert baser.pses.put(keys=pre, on=2, vals=bVals)
+        assert baser.pses.put(keys=pre, on=4, vals=cVals)
+        assert baser.pses.put(keys=pre, on=7, vals=dVals)
+
+        # Test getPseItemsNextIter(key=b"")
+        # vals are in bytes, assertion is done after serializing
+
+        # aVals
+        items = [item for item in baser.pses.getTopItemIter()]
+        assert items == \
+        [
+            (('A',), 1, 'z'),
+            (('A',), 1, 'm'),
+            (('A',), 1, 'x'),
+            (('A',), 2, 'o'),
+            (('A',), 2, 'r'),
+            (('A',), 2, 'z'),
+            (('A',), 4, 'h'),
+            (('A',), 4, 'n'),
+            (('A',), 7, 'k'),
+            (('A',), 7, 'b')
+        ]
+
+        # avals
+        items = [item for item in baser.pses.getTopItemIter(keys=aKey)]
+        assert items == [(('A',), 1, 'z'), (('A',), 1, 'm'), (('A',), 1, 'x')]
+
+        # bVals
+        items = [item for item in baser.pses.getTopItemIter(keys=bKey)]
+        assert items  == [(('A',), 2, 'o'), (('A',), 2, 'r'), (('A',), 2, 'z')]
+        for keys, on, val in items:
+            assert baser.pses.rem(keys=keys, on=on, val=val) == True
+
+        # cVals
+        items = [item for item in baser.pses.getTopItemIter(keys=cKey)]
+        assert items == [(('A',), 4, 'h'), (('A',), 4, 'n')]
+        for keys, on, val in items:
+            assert baser.pses.rem(keys=keys, on=on, val=val) == True
+
+        # dVals
+        items = [item for item in baser.pses.getTopItemIter(keys=dKey)]
+        assert items == [(('A',), 7, 'k'), (('A',), 7, 'b')]
+        for keys, on, val in items:
+            assert baser.pses.rem(keys=keys, on=on, val=val) == True
+
+        # clean up all entries
+        for k, sn, v in list(baser.pses.getAllItemIter()):
+            baser.pses.rem(keys=k)
+
+        # test _tokey and _tokeys
+        t = baser.ooes._tokey(aKey)
+        assert baser.ooes._tokeys(t) == ("A", "00000000000000000000000000000001")
+
+
+        # Test .udes partial delegated escrow seal source couples
+        key = dgKey(preb, digb)
+        assert key == f'{preb.decode("utf-8")}.{digb.decode("utf-8")}'.encode("utf-8")
+
+        ssnu1 = b'0AAAAAAAAAAAAAAAAAAAAAAB'
+        sdig1 = b'EALkveIFUPvt38xhtgYYJRCCpAGO7WjjHVR37Pawv67E'
+        ssnu2 = b'0AAAAAAAAAAAAAAAAAAAAAAC'
+        sdig2 = b'EBYYJRCCpAGO7WjjsLhtHVR37Pawv67kveIFUPvt38x0'
+        val1 = ssnu1 + sdig1
+        num1 = coring.Number(qb64b=ssnu1)
+        val2 = ssnu2 + sdig2
+        num2 = coring.Number(qb64b=ssnu2)
+        diger1 = coring.Diger(qb64b=sdig1)
+        diger2 = coring.Diger(qb64b=sdig2)
+
+        # Empty db
+        assert baser.udes.get(keys=key) == None
+        assert baser.udes.rem(keys=key) == False
+
+        # Insert first values and check retrieval
+        assert baser.udes.put(keys=key, val=(num1, diger1)) == True
+        num, diger = baser.udes.get(keys=key)
+        assert num.qb64b + diger.qb64b == val1
+
+        # Attempt insertion with put
+        assert baser.udes.put(keys=key, val=(num2, diger2)) == False  
+        num, diger = baser.udes.get(keys=key)
+        assert num.qb64b + diger.qb64b == val1
+
+        # Insert with pin
+        assert baser.udes.pin(keys=key, val=(num2, diger2)) == True # overwrite
+        num, diger = baser.udes.get(keys=key)
+        assert num.qb64b + diger.qb64b == val2
+        assert baser.udes.rem(keys=key) == True
+        assert baser.udes.get(keys=key) == None
+
+
+        # Partially Witnessed Escrow Events
+        # test .pwes insertion order methods.
+        key = b'A'
+        vals = [b"z", b"m", b"x", b"a"]
+        deserializedVals = ["z", "m", "x", "a"]
+
+        assert baser.pwes.get(key) == []
+        assert baser.pwes.cntAll(key) == 0
+        assert baser.pwes.rem(key) == False
+        assert baser.pwes.put(keys=key, vals=vals) == True
+        assert baser.pwes.get(key) == deserializedVals  # preserved insertion order
+        assert baser.pwes.cntAll(key) == len(vals) == 4
+        assert list(baser.pwes.getLastIter(key))[0] == deserializedVals[-1]
+        assert baser.pwes.put(key, vals=[b'a']) == False   
+        assert baser.pwes.get(key) == deserializedVals  #  no change
+        assert baser.pwes.add(keys=key, val=b"a") == False   
+        assert baser.pwes.add(keys=key, val=b"b") == True
+        assert baser.pwes.get(key) == deserializedVals + ['b']
+        assert [val for val in baser.pwes.getIter(key)] == deserializedVals + ['b']
+        assert baser.pwes.rem(key) == True
+        assert baser.pwes.get(key) == []
+
+        # Setup Tests for getPwesNext and getPwesNextIter
+        pre = b"A"
+        aSn = 1
+        aKey = snKey(pre=pre, sn=aSn)
+        aVals = [b"z", b"m", b"x"]
+        bSn = 2
+        bKey = snKey(pre=pre, sn=bSn)
+        bVals = [b"o", b"r", b"z"]
+        cSn = 4
+        cKey = snKey(pre=pre, sn=cSn)
+        cVals = [b"h", b"n"]
+        dSn = 7
+        dKey = snKey(pre=pre, sn=dSn)
+        dVals = [b"k", b"b"]
+
+        assert baser.pwes.put(keys=pre, on=aSn, vals=aVals)
+        assert baser.pwes.put(keys=pre, on=bSn, vals=bVals)
+        assert baser.pwes.put(keys=pre, on=cSn, vals=cVals)
+        assert baser.pwes.put(keys=pre, on=dSn, vals=dVals)
+
+
+        # Test getOnItemIterAll()
+        # aVals
+        items = [item for item in baser.pwes.getAllItemIter()]
+        assert items  # not empty
+        ikey = snKey(items[0][0][0], items[0][1])
+        assert  ikey == aKey
+        vals = [baser.pwes._ser(val) for  key, sn, val in items]
+        assert vals ==  aVals + bVals + cVals + dVals
+
+        items = [item for item in baser.pwes.getTopItemIter()]
+        assert items == \
+        [
+            (('A',), 1, 'z'),
+            (('A',), 1, 'm'),
+            (('A',), 1, 'x'),
+            (('A',), 2, 'o'),
+            (('A',), 2, 'r'),
+            (('A',), 2, 'z'),
+            (('A',), 4, 'h'),
+            (('A',), 4, 'n'),
+            (('A',), 7, 'k'),
+            (('A',), 7, 'b')
+        ]
+
+        # avals
+        items = [item for item in baser.pwes.getTopItemIter(keys=aKey)]
+        assert items == [(('A',), 1, 'z'), (('A',), 1, 'm'), (('A',), 1, 'x')]
+
+        # bVals
+        items = [item for item in baser.pwes.getTopItemIter(keys=bKey)]
+        assert items  == [(('A',), 2, 'o'), (('A',), 2, 'r'), (('A',), 2, 'z')]
+        for keys, on, val in items:
+            assert baser.pwes.rem(keys=keys, on=on, val=val) == True
+
+        # cVals
+        items = [item for item in baser.pwes.getTopItemIter(keys=cKey)]
+        assert items == [(('A',), 4, 'h'), (('A',), 4, 'n')]
+        for keys, on, val in items:
+            assert baser.pwes.rem(keys=keys, on=on, val=val) == True
+
+        # dVals
+        items = [item for item in baser.pwes.getTopItemIter(keys=dKey)]
+        assert items == [(('A',), 7, 'k'), (('A',), 7, 'b')]
+        for keys, on, val in items:
+            assert baser.pwes.rem(keys=keys, on=on, val=val) == True
+
+        
+         # Unverified Witness Receipt Escrows
+        # test .uwes insertion order methods.
+        key = b'A'
+        vals = [('z',), ('m',), ('x',), ('a',)]
+
+        assert baser.uwes.get(key) == []  # default on = 0
+        assert baser.uwes.getLast(key) == None
+        assert baser.uwes.cnt(key) == 0
+        assert baser.uwes.rem(key) == False
+        assert baser.uwes.put(key, on=0, vals=vals) == True
+        assert baser.uwes.get(key, 0) == vals # preserved insertion order
+        assert baser.uwes.cnt(key, 0) == len(vals) == 4
+        assert baser.uwes.getLast(key, 0) == vals[-1]
+        assert baser.uwes.put(key, 0, vals=[b'a']) == False   
+        assert baser.uwes.get(key, 0) == vals  #  no change
+        assert baser.uwes.add(key, 0, b'a') == False   
+        assert baser.uwes.add(key, 0, b'b') == True
+        assert baser.uwes.get(key, 0) == [('z',), ('m',), ('x',), ('a',), ('b',)]
+        assert [val for key, on, val in baser.uwes.getTopItemIter(key)] == \
+        [('z',), ('m',), ('x',), ('a',), ('b',)]
+        assert baser.uwes.rem(key, 0) == True
+        assert baser.uwes.get(key, 0) == []
+
+        # Setup Tests
+        keys = ("A", )
+        assert baser.uwes.put(keys=keys, on=1, vals=aVals)
+        assert baser.uwes.put(keys=keys, on=2, vals=bVals)
+        assert baser.uwes.put(keys=keys, on=4, vals=cVals)
+        assert baser.uwes.put(keys=keys, on=7, vals=dVals)
+
+        items = [item for item in baser.uwes.getTopItemIter()]
+        assert items == \
+        [
+            (('A',), 1, ('z',)),
+            (('A',), 1, ('m',)),
+            (('A',), 1, ('x',)),
+            (('A',), 2, ('o',)),
+            (('A',), 2, ('r',)),
+            (('A',), 2, ('z',)),
+            (('A',), 4, ('h',)),
+            (('A',), 4, ('n',)),
+            (('A',), 7, ('k',)),
+            (('A',), 7, ('b',))
+        ]
+
+
+         # Ooes tests
+        # test .ooes insertion behavior methods.
+        pre = 'A'
+        sn = 0
+        key = snKey(pre, sn)
+        vals = [b"z", b"m", b"x", b"a"]
+        deserialized_vals = [baser.ooes._des(val) for val in vals] # deserialize for assertion
+
+        # core insertion
+        assert baser.ooes.get(keys=key) == []
+        assert baser.ooes.cntAll(key) == 0
+        assert baser.ooes.rem(key) == False
+
+        # initial insertion
+        assert baser.ooes.put(keys=key, vals=vals) == True
+        assert baser.ooes.get(key) == deserialized_vals    #sanity check
+
+        # duplication insertion behavior
+        assert baser.ooes.put(keys=key,vals=[b'd', b'k']) == True
+        assert baser.ooes.put(keys=key,vals=[b'd']) == False  
+        assert baser.ooes.put(keys=key,vals=[b'k']) == False  
+        assert baser.ooes.put(keys=key,vals=[b'k',b'd',b'k']) == False
+        assert baser.ooes.add(keys=key, val=b'd') == False  
+        assert baser.ooes.add(keys=key, val=b'k') == False
+        assert baser.ooes.get(keys=key) == deserialized_vals + ['d', 'k']
+
+        # mixed insertion behavior
+        assert baser.ooes.put(keys=key,vals=[b'k', b'c']) == True  # True because 'c' is new
+        assert baser.ooes.get(keys=key) == deserialized_vals + ['d', 'k', 'c']
+
+        # insertion after deletion
+        assert baser.ooes.rem(keys=key, val=b'd') == True   # remove a specific val
+        assert baser.ooes.get(keys=key) == deserialized_vals + ['k', 'c']   # d removed
+        assert baser.ooes.add(keys=key,val=b'd') == True   # add d back
+        assert baser.ooes.get(keys=key) == deserialized_vals + ['k', 'c', 'd']   # d added back
+
+        # empty insertion
+        assert baser.ooes.put(keys=key, vals=[]) == False # no vals to add
+        assert baser.ooes.get(keys=key) == deserialized_vals + ['k', 'c', 'd'] # no change
+
+        assert baser.ooes.add(keys=key, val=b'') == True  # empty val is allowed
+        assert baser.ooes.get(keys=key) == deserialized_vals + ['k', 'c', 'd',''] # empty val added
+
+        # clean up
+        assert baser.ooes.rem(key) == True
+        assert baser.ooes.get(keys=key) == []
+
+        # different key types insertion
+        assert baser.ooes.put(keys='B', vals=[b'1', b'2']) == True   # key as str
+        assert baser.ooes.add(keys='B', val=b'3') == True
+        assert baser.ooes.put(['B'], vals=b'4') == True  # key as list
+        assert baser.ooes.add(keys=['B'], val=b'5') == True
+        assert baser.ooes.put(("B"), vals=b'6') == True # key as tuple
+        assert baser.ooes.add(keys=("B"), val=b'7') == True
+        assert baser.ooes.put(memoryview(b'B'),vals= b'8') == True  # key as memoryview
+        assert baser.ooes.add(keys=memoryview(b'B'), val=b'9') == True
+        assert baser.ooes.get(keys=b'B') == ['1', '2', '3', '4', '5', '6', '7', '8', '9']
+
+        # clean up
+        assert baser.ooes.rem(b'B') == True
+        assert baser.ooes.get(keys=b'B') == []
+
+        # edge case: add different types of vals
+        assert baser.ooes.put(key,vals=[b'a','a']) == True
+        assert baser.ooes.get(keys=key) == ['a'] # only 1 value added
+
+        assert baser.ooes.rem(key) == True
+        assert baser.ooes.get(keys=key) == []
+
+
+        # test .ooes retrieval behavior methods
+        # insertion order preserved
+        assert baser.ooes.put(keys=pre,on=sn, vals=vals) == True
+        assert baser.ooes.get(keys=pre,on=sn) == deserialized_vals
+        assert list(baser.ooes.getAllIter(pre,on=sn)) == deserialized_vals
+        assert baser.ooes.getLast(keys=pre, on=sn) == deserialized_vals[-1]
+        assert baser.ooes.cntAll(pre,on=sn) == len(vals) == 4
+
+        # retrieval on empty list
+        assert baser.ooes.get(keys=b'X') == []
+        assert list(baser.ooes.getAllIter(b'X')) == []
+        assert baser.ooes.getLast(keys=b'X') == None
+        assert baser.ooes.cntAll(b'X') == 0
+        items = baser.ooes.getAllItemIter(keys=b'X')
+        assert list(items) == []
+
+        # getTopItemIter retrieval of (key, val) pairs in lexicographic key order
+        items = list(baser.ooes.getAllItemIter())
+        assert items == [(('A',), 0, 'z'), (('A',), 0, 'm'), (('A',), 0, 'x'), (('A',), 0, 'a')]  # Insertion order preserved for vals
+        assert baser.ooes.put(keys=[b'B', b'C'], vals=[b'1', b'2', b'3']) == True
+        items = list(baser.ooes.getAllItemIter(keys=key))
+        assert all(k[0] == 'A' for k, sn, v in items)
+
+        # retrieval with different key types, A is the key used above where key = b'A'
+        assert baser.ooes.get(keys=b'A') == deserialized_vals  # key as bytes
+        assert baser.ooes.get(keys='A') == deserialized_vals  # key as str
+        assert baser.ooes.get(keys=['A']) == deserialized_vals  # key as list
+        assert baser.ooes.get(keys=('A',)) == deserialized_vals  # key as tuple
+        assert baser.ooes.get(keys=memoryview(b'A')) == deserialized_vals  # key as memoryview
+
+        # retrieval afterd deletion of specific val
+        assert baser.ooes.getLast(keys=pre, on=sn) == 'a'              # vals = [b"z", b"m", b"x", b"a"]
+        assert baser.ooes.rem(keys=pre,on=sn, val=b'a') == True           # vals = [b"z", b"m", b"x"]
+        assert baser.ooes.get(keys=pre,on=sn,) == ['z', 'm', 'x']
+        assert baser.ooes.getLast(keys=pre, on=sn) == 'x'
+        assert baser.ooes.cntAll(pre,on=sn) == 3
+
+        # clean up
+        assert baser.ooes.rem(pre,on=sn) == True
+
+
+        # test .ooes pinning behavior method
+        # start clean
+        assert baser.ooes.get(keys=key) == []
+        assert baser.ooes.put(keys=key, vals=vals) == True
+        assert baser.ooes.get(keys=key) == deserialized_vals
+        assert baser.ooes.pin(keys=key, vals=[b'a', b'b', b'c']) == True
+        assert baser.ooes.get(keys=key) == ['a', 'b', 'c']  # exact overwrite
+
+        # pin with a different list
+        assert baser.ooes.pin(keys=key, vals=[b'x', b'y']) == True
+        assert baser.ooes.get(keys=key) == ['x', 'y']  # previous values removed
+
+        # pin with empty list (valid use case)
+        assert baser.ooes.pin(keys=key, vals=[]) == False  # nothing to pin
+        assert baser.ooes.get(keys=key) == ['x', 'y']  # previous values are still here
+        assert baser.ooes.rem(key) == True
+
+        # pin after normal insertion
+        assert baser.ooes.put(keys=key, vals=[b'1', b'2']) == True
+        assert baser.ooes.get(keys=key) == ['1', '2']
+        assert baser.ooes.pin(keys=key, vals=[b'Q']) == True
+        assert baser.ooes.get(keys=key) == ['Q']  # overwritten
+
+        # edge case: pin with mixed types
+        assert baser.ooes.pin(keys=key, vals=[b'A', 'A', memoryview(b'A')]) == True
+        assert baser.ooes.get(keys=key) == ['A'] # Only 1 value added
+
+        # cleanup
+        assert baser.ooes.rem(key) == True
+        assert baser.ooes.get(keys=key) == []
+
+
+        # test .ooes deletion methods
+        # delete specific val
+        assert baser.ooes.put(key, vals=vals) == True
+        assert baser.ooes.rem(key, val=b'm') == True
+        assert baser.ooes.get(keys=key) == ['z', 'x', 'a']
+
+        # delete non existing val
+        assert baser.ooes.rem(key, val=b'y') == False
+        assert baser.ooes.get(keys=key) == ['z', 'x', 'a']
+
+        # delete all vals
+        assert baser.ooes.rem(key) == True
+        assert baser.ooes.get(keys=key) == []
+        assert baser.ooes.cntAll(key) == 0 # all vals deleted
+
+        # delete non existing key
+        assert baser.ooes.rem(b'X') == False
+
+        # insert other keys to ensure only specified key is deleted
+        assert baser.ooes.put(b'A', vals=[b'1']) == True
+        assert baser.ooes.put(b'B', vals=[b'2']) == True
+        assert baser.ooes.rem(b'A') == True
+        assert baser.ooes.get(keys=b'B') == ['2']
+
+        # clean up all entries
+        for k, sn, v in list(baser.ooes.getAllItemIter()):
+            assert baser.ooes.rem(keys=k, on=sn, val=v) == True
+
+
+        # Setup Tests for getOoeItemsNext and getOoeItemsNextIter
+        # vals are in bytes, assertion is done after serializing
+        pre = b"A"
+        aSn = 1
+        aKey = snKey(pre=pre, sn=aSn)
+        aVals = [b"z", b"m", b"x"]
+        bSn = 2
+        bKey = snKey(pre=pre, sn=bSn)
+        bVals = [b"o", b"r", b"z"]
+        cSn = 4
+        cKey = snKey(pre=pre, sn=cSn)
+        cVals = [b"h", b"n"]
+        dSn = 7
+        dKey = snKey(pre=pre, sn=dSn)
+        dVals = [b"k", b"b"]
+
+        assert baser.ooes.put(keys=pre, on=1, vals=aVals)
+        assert baser.ooes.put(keys=pre, on=2, vals=bVals)
+        assert baser.ooes.put(keys=pre, on=4, vals=cVals)
+        assert baser.ooes.put(keys=pre, on=7, vals=dVals)
+
+
+
+        # avals
+        items = [item for item in baser.ooes.getTopItemIter(keys=aKey)]
+        assert items == [(('A',), 1, 'z'), (('A',), 1, 'm'), (('A',), 1, 'x')]
+
+        # bVals
+        items = [item for item in baser.ooes.getTopItemIter(keys=bKey)]
+        assert items  == [(('A',), 2, 'o'), (('A',), 2, 'r'), (('A',), 2, 'z')]
+        for keys, on, val in items:
+            assert baser.ooes.rem(keys=keys, on=on, val=val) == True
+
+        # cVals
+        items = [item for item in baser.ooes.getTopItemIter(keys=cKey)]
+        assert items == [(('A',), 4, 'h'), (('A',), 4, 'n')]
+        for keys, on, val in items:
+            assert baser.ooes.rem(keys=keys, on=on, val=val) == True
+
+        # dVals
+        items = [item for item in baser.ooes.getTopItemIter(keys=dKey)]
+        assert items == [(('A',), 7, 'k'), (('A',), 7, 'b')]
+        for keys, on, val in items:
+            assert baser.ooes.rem(keys=keys, on=on, val=val) == True
+
+        # clean up all entries
+        for k, sn, v in list(baser.pses.getAllItemIter()):
+            baser.ooes.rem(keys=k)
+
+        # test _tokey and _tokeys
+        t = baser.ooes._tokey(aKey)
+        assert baser.ooes._tokeys(t) == ("A", "00000000000000000000000000000001")
+
+
+         # test .dels insertion order methods.
+        keys = b'A'
+        on = 0
+        vals = ["z", "m", "x", "a"]
+
+        assert baser.dels.get(keys=keys, on=on) == []
+        result = baser.dels.get(keys=keys, on=on)
+        assert (result[-1] if result else None) == None
+        assert len(baser.dels.get(keys=keys, on=on)) == 0
+        assert baser.dels.rem(keys=keys, on=on) == False
+        for val in vals:
+            baser.dels.add(keys=keys, on=on, val=val)
+        assert baser.dels.get(keys=keys, on=on) == vals  # preserved insertion order
+        assert len(baser.dels.get(keys=keys, on=on)) == len(vals) == 4
+        result = baser.dels.get(keys=keys, on=on)
+        assert result[-1] == vals[-1]
+        assert baser.dels.add(keys=keys, on=on, val='a') == False   
+        assert baser.dels.get(keys=keys, on=on) == vals  #  no change
+        assert baser.dels.add(keys=keys, on=on, val='a') == False   
+        assert baser.dels.add(keys=keys, on=on, val='b') == True
+        assert baser.dels.get(keys=keys, on=on) == ["z", "m", "x", "a", "b"]
+        assert baser.dels.rem(keys=keys, on=on) == True
+        assert baser.dels.get(keys=keys, on=on) == []
+
+
+        # test .ldes insertion order methods.
+        key = b'A'
+        vals = [b"z", b"m", b"x", b"a"]
+
+        assert baser.ldes.get(keys=key) == []
+        assert baser.ldes.getLast(keys=key) == None
+        assert baser.ldes.cnt(keys=key) == 0
+        assert baser.ldes.rem(keys=key) == False
+        # put is not fully compatible with putLdes because putLdes took list of vals
+        # and OnIoSetSuber.put takes iterable of vals.
+        assert baser.ldes.put(keys=key, on=0, vals=vals) == True
+        # OnIoSetSuber decodes bytes to utf-8 strings
+        assert baser.ldes.get(keys=key) == [v.decode("utf-8") for v in vals]
+        assert baser.ldes.cnt(keys=key) == len(vals) == 4
+        assert baser.ldes.getLast(keys=key) == vals[-1].decode("utf-8")
+        assert baser.ldes.put(keys=key, on=0, vals=[b'a']) == False   
+        assert baser.ldes.get(keys=key) == [v.decode("utf-8") for v in vals] #  no change
+        assert baser.ldes.rem(keys=key) == True
+        assert baser.ldes.get(keys=key) == []
+
+        # Setup Tests for getOnItemIter with proper OnIoSetSuber API
+        # Use addOn with explicit ordinal instead of snKey
+        aVals = [b"z", b"m", b"x"]
+        bVals = [b"o", b"r", b"z"]
+        cVals = [b"h", b"n"]
+        dVals = [b"k", b"b"]
+
+        for val in aVals:
+            assert baser.ldes.add(keys=b'A', on=1, val=val) == True
+        for val in bVals:
+            assert baser.ldes.add(keys=b'A', on=2, val=val) == True
+        for val in cVals:
+            assert baser.ldes.add(keys=b'A', on=4, val=val) == True
+        for val in dVals:
+            assert baser.ldes.add(keys=b'A', on=7, val=val) == True
+
+        # Test getOnItemIterAll - iterate all items for prefix b'A'
+        items = [item for item in baser.ldes.getAllItemIter(keys=b'A')]
+        assert items  # not empty
+        # item is (keys, on, val)
+        vals = [val for pre, sn, val in items]
+        allVals = aVals + bVals + cVals + dVals
+        assert vals == [v.decode("utf-8") for v in allVals]
+
+        # Iterate starting from specific ordinal (sn=1)
+        items = [item for item in baser.ldes.getAllItemIter(keys=b'A', on=1)]
+        assert items
+        pre, sn, val = items[0]
+        assert sn == 1
+        assert val == aVals[0].decode("utf-8")
+
+        # Verify vals at sn=1
+        vals = [val for p, s, val in items if s == 1]
+        assert vals == [v.decode("utf-8") for v in aVals]
+
+        # bVals at sn=2
+        items = [item for item in baser.ldes.getAllItemIter(keys=b'A', on=2)]
+        vals = [val for p, s, val in items if s == 2]
+        assert vals == [v.decode("utf-8") for v in bVals]
+        # Remove bVals using remOn
+        for p, s, val in items:
+            if s == 2:
+                assert baser.ldes.rem(keys=b'A', on=s, val=val) == True
+
+        # cVals at sn=4
+        items = [item for item in baser.ldes.getAllItemIter(keys=b'A', on=4)]
+        vals = [val for p, s, val in items if s == 4]
+        assert vals == [v.decode("utf-8") for v in cVals]
+        for p, s, val in items:
+            if s == 4:
+                assert baser.ldes.rem(keys=b'A', on=s, val=val) == True
+
+        # dVals at sn=7
+        items = [item for item in baser.ldes.getAllItemIter(keys=b'A', on=7)]
+        vals = [val for p, s, val in items if s == 7]
+        assert vals == [v.decode("utf-8") for v in dVals]
+        for p, s, val in items:
+            if s == 7:
+                assert baser.ldes.rem(keys=b'A', on=s, val=val) == True
+
+
+        # Test for gpse
+        key = b'a'
+        sdig1 = b'EALkveIFUPvt38xhtgYYJRCCpAGO7WjjHVR37Pawv67E'
+        number = Number(num=0)
+        diger = Diger(qb64=sdig1)
+
+        assert baser.gpse.get(key) == []   # gpse is empty
+        assert baser.gpse.add(keys=key, val=(number, diger)) == True   # add new entry with val as a tuple of number and diger
+
+        val = baser.gpse.get(key)  # returns Cesr tuple of (number, diger)
+        num, dig = val[0]
+        assert isinstance(num, Number)
+        assert isinstance(dig, Diger)
+        assert num.num == number.num
+        assert dig.qb64 == diger.qb64
+
+        assert baser.gpse.rem(key) == True
+        assert baser.gpse.get(key) == []   # gpse is empty again
+
+
+         # Saider and Seqner instead of Diger and Number
+        seqner = Seqner(num=0)
+        saider = Saider(qb64=sdig1)
+        assert baser.gpse.add(keys=key, val=(seqner, saider)) == True # val is not using Number and Diger type
+        val = baser.gpse.get(key)                                     # but it still gets validated
+        assert val is not None
+        seq, dig = val[0]   # returns Cesr tuple of (number, diger)
+
+        assert isinstance(seq, Number) # Seqner gets converted to Number on read
+        assert isinstance(dig, Diger)   # Saider gets converted to Diger on read
+        assert seq.num == seqner.sn
+        assert dig.qb64 == saider.qb64
+
+        # test .imgs  CatCesrSuber with TypeMedia (Noncer, Noncer, Labeler, Texter)
+        said_nonce = Noncer()  # random SAID nonce
+        uuid_nonce = Noncer()  # random UUID blinding nonce
+        mime_label = Labeler(label="image_png")  # MIME type label
+        img_data = Texter(text="iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk")
+
+        img_key = "BIFKYlgMQk78iSSYjE5CWVeLj9UKgBfdfQRos5PK38Yp"
+        assert baser.imgs.get(keys=img_key) is None  # empty
+        assert baser.imgs.put(keys=img_key, val=(said_nonce, uuid_nonce, mime_label, img_data)) == True
+        result = baser.imgs.get(keys=img_key)
+        assert result is not None
+        rsaid, ruuid, rmime, rdata = result
+        assert isinstance(rsaid, Noncer)
+        assert isinstance(ruuid, Noncer)
+        assert isinstance(rmime, Labeler)
+        assert isinstance(rdata, Texter)
+        assert rsaid.qb64 == said_nonce.qb64
+        assert ruuid.qb64 == uuid_nonce.qb64
+        assert rdata.text == img_data.text
+
+        # overwrite with pin
+        new_data = Texter(text="newdata")
+        assert baser.imgs.pin(keys=img_key, val=(said_nonce, uuid_nonce, mime_label, new_data)) == True
+        result = baser.imgs.get(keys=img_key)
+        _, _, _, rdata2 = result
+        assert rdata2.text == "newdata"
+
+        assert baser.imgs.rem(keys=img_key) == True
+        assert baser.imgs.get(keys=img_key) is None
+
+        # test .iimgs  same format for local identifiers
+        assert baser.iimgs.put(keys=img_key, val=(said_nonce, uuid_nonce, mime_label, img_data)) == True
+        result = baser.iimgs.get(keys=img_key)
+        assert result is not None
+        rsaid, ruuid, rmime, rdata = result
+        assert isinstance(rsaid, Noncer)
+        assert isinstance(rdata, Texter)
+        assert baser.iimgs.rem(keys=img_key) == True
+        assert baser.iimgs.get(keys=img_key) is None
+
+
+    asyncio.run(_go())
+
+
+@needskeri
+def test_fetchkeldel():
+    """
+    Test fetching full KEL and full DEL from Baser
+    """
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        await baser.reopen(storageOpener=backend.open)
+
+        assert baser.opened
+        assert baser.name == "main"
+
+
+        preb = 'BWzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhcc'.encode("utf-8")
+        digb = 'EGAPkzNZMtX-QiVgbRbyAIZGoXvbGv9IPb0foWTZvI_4'.encode("utf-8")
+        sn = 3
+        vs = versify(kind=Kinds.json, size=20)
+        assert vs == 'KERI10JSON000014_'
+
+        ked = dict(vs=vs, pre=preb.decode("utf-8"),
+                sn="{:x}".format(sn),
+                ilk="rot",
+                dig=digb.decode("utf-8"))
+        skedb = json.dumps(ked, separators=(",", ":"), ensure_ascii=False).encode("utf-8")
+        assert skedb == (b'{"vs":"KERI10JSON000014_","pre":"BWzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhc'
+                        b'c","sn":"3","ilk":"rot","dig":"EGAPkzNZMtX-QiVgbRbyAIZGoXvbGv9IPb0foWTZvI_4"'
+                        b'}')
+
+        # test kels getAllIter
+        sn = 0
+        vals0 = [skedb]
+        assert baser.kels.add(keys=preb, on=sn, val=vals0[0]) == True
+
+        vals1 = [b"mary", b"peter", b"john", b"paul"]
+        sn += 1
+        for val in vals1:
+            assert baser.kels.add(keys=preb, on=sn, val=val) == True
+
+        vals2 = [b"dog", b"cat", b"bird"]
+        sn += 1
+        for val in vals2:
+            assert baser.kels.add(keys=preb, on=sn, val=val) == True
+
+        vals = list(baser.kels.getAllIter(keys=preb))
+        allvals = [v.decode("utf-8") for v in (vals0 + vals1 + vals2)]
+        assert vals == allvals
+
+        # test kels getLastIter
+        preb = 'B4ejhccWzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x'.encode("utf-8")
+        sn = 0
+        
+        vals0 = [skedb]
+        assert baser.kels.add(keys=preb, on=sn, val=vals0[0]) == True
+
+        vals1 = [b"mary", b"peter", b"john", b"paul"]
+        sn += 1
+        for val in vals1:
+            assert baser.kels.add(keys=preb, on=sn, val=val) == True
+
+        vals2 = [b"dog", b"cat", b"bird"]
+        sn += 1
+        for val in vals2:
+            assert baser.kels.add(keys=preb, on=sn, val=val) == True
+        vals = list(baser.kels.getLastIter(keys=preb))
+        # Kels being an IoSetSuber, getLastIter calls getIoSetLastItemIterAll 
+        # which Iterates over every last added ioset entry at every effective key
+        # starting at key greater or equal to key so the values from the previous tests are 
+        # yielded here too.
+
+        # Because lexicographically BWzwEHH > B4ejhcc
+        # when getLastIter iterates, we get B4ejhcc's values first then BWzweHH
+        lastvals = ['{"vs":"KERI10JSON000014_","pre":"BWzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhcc","sn":"3","ilk":"rot","dig":"EGAPkzNZMtX-QiVgbRbyAIZGoXvbGv9IPb0foWTZvI_4"}', 'paul', 'bird', 
+                    '{"vs":"KERI10JSON000014_","pre":"BWzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhcc","sn":"3","ilk":"rot","dig":"EGAPkzNZMtX-QiVgbRbyAIZGoXvbGv9IPb0foWTZvI_4"}', 'paul', 'bird']
+
+        assert vals == lastvals
+
+
+        # test getDelItemIter
+        preb = 'BTmuupUhPx5_yZ-Wk1x4ejhccWzwEHHzq7K0gzQPYGGw'.encode("utf-8")
+        sn = 1  # do not start at zero
+        key = snKey(preb, sn)
+        assert key == (b'BTmuupUhPx5_yZ-Wk1x4ejhccWzwEHHzq7K0gzQPYGGw.'
+                        b'00000000000000000000000000000001')
+        vals0 = [skedb]
+        assert baser.dels.add(keys=preb, on=sn, val=vals0[0]) == True
+
+        vals1 = [b"mary", b"peter", b"john", b"paul"]
+        sn += 1
+        for val in vals1:
+            assert baser.dels.add(keys=preb, on=sn, val=val) == True
+
+        vals2 = [b"dog", b"cat", b"bird"]
+        sn += 3  # skip make gap in SN
+        for val in vals2:
+            assert baser.dels.add(keys=preb, on=sn, val=val) == True
+
+        allvals = vals0 + vals1 + vals2
+        vals = [(val.encode("utf-8") if isinstance(val, str) else bytes(val))
+            for keys, on, val in baser.dels.getAllItemIter(keys=preb)]
+        assert vals == allvals
+
+    asyncio.run(_go())
+
+
+@needskeri
+def test_usebaser():
+    """
+    Test using Baser
+    """
+
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        await baser.reopen(storageOpener=backend.open)
+
+        assert baser.opened
+
+        raw = b'g\x15\x89\x1a@\xa4\xa47\x07\xb9Q\xb8\x18\xcdJW'
+        salter = Salter(raw=raw)
+
+        #  create coe's signers
+        signers = salter.signers(count=8, path='db', temp=True)
+
+        # Event 0  Inception Transferable (nxt digest not empty) 2 0f 3 multisig
+        keys = [signers[0].verfer.qb64, signers[1].verfer.qb64, signers[2].verfer.qb64]
+        count = len(keys)
+        nxtkeys = [signers[3].verfer.qb64b, signers[4].verfer.qb64b, signers[5].verfer.qb64b]
+        sith = "2"
+        code = MtrDex.Blake3_256  # Blake3 digest of incepting data
+        serder = incept(keys=keys,
+                        code=code,
+                        isith=sith,
+                        ndigs=[Diger(ser=key).qb64 for key in nxtkeys])
+
+
+        # sign serialization
+        sigers = [signers[i].sign(serder.raw, index=i) for i in range(count)]
+        # create key event verifier state
+        kever = Kever(serder=serder, sigers=sigers, db=baser)
+
+        # Event 1 Rotation Transferable
+        keys = [signers[3].verfer.qb64, signers[4].verfer.qb64, signers[5].verfer.qb64]
+        nxtkeys = [signers[5].verfer.qb64b, signers[6].verfer.qb64b, signers[7].verfer.qb64b]
+        serder = rotate(pre=kever.prefixer.qb64,
+                        keys=keys,
+                        isith=sith,
+                        dig=kever.serder.said,
+                        ndigs=[Diger(ser=key).qb64 for key in nxtkeys],
+                        sn=1)
+
+        # sign serialization
+        sigers = [signers[i].sign(serder.raw, index=i-count) for i in range(count, count+count)]
+        # update key event verifier state
+        kever.update(serder=serder, sigers=sigers)
+
+
+        # Event 2 Interaction
+        serder = interact(pre=kever.prefixer.qb64,
+                          dig=kever.serder.said,
+                          sn=2)
+
+        # sign serialization  (keys don't change for signing)
+        sigers = [signers[i].sign(serder.raw, index=i-count) for i in range(count, count+count)]
+        # update key event verifier state
+        kever.update(serder=serder, sigers=sigers)
+
+    asyncio.run(_go())
+
+
+def test_clear_escrows():
+    async def _go():
+        backend = FakeStorageBackend()
+        db = WebBaser()
+
+        await db.reopen(storageOpener=backend.open)
+
+        assert db.opened
+
+        key = b'A'
+        vals = [b"z", b"m", b"x", b"a"]
+        d1 = Diger(ser=b"event1")                     # event digest
+        p1 = Prefixer(qb64="BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
+        n1 = Number(num=1)
+        e1 = Diger(ser=b"est1")                       # est event digest
+        s1 = Siger(raw=b"\x00" * 64)                    # fake sig
+        res_vals = [(d1, p1, n1, e1, s1)]
+
+        db.ures.put(keys=key, vals=res_vals)
+        db.vres.put(keys=key, vals=res_vals)
+        db.pses.put(keys=key, vals=vals)
+        for v in vals:
+            db.pwes.add(keys=key, on=0, val=v)
+        for v in vals:
+            db.ooes.add(keys=key, on=0, val=v)
+
+        db.ldes.put(keys=key, on=0, vals=vals)
+
+        pre = b'k'
+        sn = 0
+        snh = b"%032x" % sn
+        saidb = b'saidb'
+
+        db.uwes.add(keys=pre, on=sn, val=saidb)
+        assert db.uwes.cnt(keys=pre, on=sn) == 1
+
+        db.qnfs.add(keys=(pre, saidb), val=b"z")
+        assert db.qnfs.cnt(keys=(pre, saidb)) == 1
+
+        db.misfits.add(keys=(pre, snh), val=saidb)
+        assert db.misfits.cnt(keys=(pre, snh)) == 1
+
+        db.delegables.add(snKey(pre, 0), saidb)
+        assert db.delegables.cnt(keys=snKey(pre, 0)) == 1
+
+        db.pdes.add(keys=pre, on=0, val=saidb)
+        assert db.pdes.cnt(keys=pre, on=0) == 1
+
+        udesKey = ('DAzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhcc'.encode("utf-8"),
+                    'EGAPkzNZMtX-QiVgbRbyAIZGoXvbGv9IPb0foWTZvI_4'.encode("utf-8"))
+        db.udes.put(keys=udesKey, val=(Number(qb64b=b'0AAAAAAAAAAAAAAAAAAAAAAB'),
+                                   Diger(qb64b=b'EALkveIFUPvt38xhtgYYJRCCpAGO7WjjHVR37Pawv67E')))
+        assert db.udes.get(keys=udesKey) is not None
+
+        diger = Diger(qb64b='EGAPkzNZMtX-QiVgbRbyAIZGoXvbGv9IPb0foWTZvI_4')
+        db.rpes.put(keys=('route',), vals=[diger])
+        assert db.rpes.cnt(keys=('route',)) == 1
+
+        db.epsd.put(keys=('DAzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhcc',), val=Dater())
+        assert db.epsd.get(keys=('DAzwEHHzq7K0gzQPYGGwTmuupUhPx5_yZ-Wk1x4ejhcc',)) is not None
+
+        db.eoobi.pin(keys=('url',), val=OobiRecord())
+        assert db.eoobi.cnt() == 1
+
+        serder = Serder(raw=b'{"v":"KERI10JSON0000cb_","t":"ixn","d":"EG8WAmM29ZBdoXbnb87yiPxQw4Y7gcQjqZS74vBAKsRm","i":"DApYGFaqnrALTyejaJaGAVhNpSCtqyerPqWVK9ZBNZk0","s":"4","p":"EAskHI462CuIMS_gNkcl_QewzrRSKH2p9zHQIO132Z30","a":[]}')
+        db.dpub.put(keys=(pre, 'said'), val=serder)
+        assert db.dpub.get(keys=(pre, 'said')) is not None
+
+        db.gpwe.add(keys=(pre,), val=(Seqner(qb64b=b'0AAAAAAAAAAAAAAAAAAAAAAB'), diger))
+        assert db.gpwe.cnt(keys=(pre,)) == 1
+
+        db.gdee.add(keys=(pre,), val=(Seqner(qb64b=b'0AAAAAAAAAAAAAAAAAAAAAAB'), diger))
+        assert db.gdee.cnt(keys=(pre,)) == 1
+
+        db.dpwe.pin(keys=(pre, 'said'), val=serder)
+        assert db.dpwe.get(keys=(pre, 'said')) is not None
+
+        db.gpse.add(keys=('qb64',), val=(Number(qb64b=b'0AAAAAAAAAAAAAAAAAAAAAAB'), diger))
+        assert db.gpse.cnt(keys=('qb64',)) == 1
+
+        db.epse.put(keys=('dig',), val=serder)
+        assert db.epse.get(keys=('dig',)) is not None
+
+        db.dune.pin(keys=(pre, 'said'), val=serder)
+        assert db.dune.get(keys=(pre, 'said')) is not None
+
+        db.clearEscrows()
+
+        for escrow in [db.ures, db.vres, db.pses, db.pwes, db.ooes,
+                       db.qnfs, db.uwes,
+                       db.qnfs, db.misfits, db.delegables, db.pdes,
+                       db.udes, db.rpes, db.ldes, db.epsd, db.eoobi,
+                       db.dpub, db.gpwe, db.gdee, db.dpwe, db.gpse,
+                       db.epse, db.dune]:
+            assert escrow.cntAll() == 0
+
+    asyncio.run(_go())
+
+
+@needskeri
+def test_trim_all_escrows_during_migration():
+    """Regression test for issue #863: old qnfs key format crashes migration.
+
+    When upgrading from keripy <1.2.0, qnfs entries lack the insertion-order
+    suffix (e.g. 'PRE.SAID' instead of 'PRE.SAID.00000000'). The high-level
+    iterators in clearEscrows() call unsuffix() which does int(SAID, 16) and
+    crashes with ValueError.
+
+    _trimAllEscrows() uses low-level .trim() which bypasses key parsing,
+    safely clearing all escrow databases regardless of key format.
+    """
+    async def _go(): 
+
+        backend = FakeStorageBackend()
+        db = WebBaser()
+
+        await db.reopen(storageOpener=backend.open)
+
+        # Populate escrow databases with test data
+        pre = b'k'
+        saidb = b'saidb'
+        vals = [b"z", b"m", b"x"]
+
+        db.qnfs.add(keys=(pre, saidb), val=b"z")
+        assert db.qnfs.cnt(keys=(pre, saidb)) == 1
+
+        db.pses.put(keys=pre, vals=vals)
+        assert db.pses.cnt(keys=pre) == 3
+
+        ooes_key = (snKey(pre, 0),)
+        db.ooes.put(keys=ooes_key, vals=vals)
+        assert db.ooes.cntAll() > 0
+
+        db.misfits.add(keys=(pre, b'snh'), val=saidb)
+        assert db.misfits.cnt(keys=(pre, b'snh')) == 1
+
+        # _trimAllEscrows clears everything via .trim()
+        db._trimAllEscrows()
+
+        assert db.qnfs.cntAll() == 0
+        assert db.pses.cntAll() == 0
+        assert db.ooes.cntAll() == 0
+        assert db.misfits.cntAll() == 0
+        assert db.ures.cntAll() == 0
+        assert db.vres.cntAll() == 0
+        assert db.pwes.cntAll() == 0
+        assert db.uwes.cntAll() == 0
+        assert db.delegables.cntAll() == 0
+        assert db.pdes.cntAll() == 0
+        assert db.udes.cntAll() == 0
+        assert db.rpes.cntAll() == 0
+        assert db.ldes.cntAll() == 0
+        assert db.epsd.cntAll() == 0
+        assert db.eoobi.cnt() == 0
+        assert db.dpub.cntAll() == 0
+        assert db.gpwe.cntAll() == 0
+        assert db.gdee.cntAll() == 0
+        assert db.dpwe.cntAll() == 0
+        assert db.gpse.cntAll() == 0
+        assert db.epse.cntAll() == 0
+        assert db.dune.cntAll() == 0
+
+    asyncio.run(_go())
+
+
+def test_db_keyspace_end_to_end_migration():
+    """
+    End-to-end test for DB keyspace migration from Seqner.qb64 to Number with Huge code.
+
+    Asserts:
+    - Correct DB writes using Number (Huge)
+    - Correct DB reads using Number (Huge)
+    - Backward compatibility with old Seqner.qb64 keys
+    - Round-trip correctness for Number (Huge)
+    - Lexicographic ordering == numeric ordering (for NEW keys)
+    - Mixed encodings do not break iteration
+    """
+
+
+    async def _go():
+        backend = FakeStorageBackend()
+        db = WebBaser()
+
+        await db.reopen(storageOpener=backend.open)
+
+        assert db.opened
+
+        sns = [0, 1, 2, 10, 100, 999999, 2**40, 2**80]
+
+        # Build a valid Cigar + Prefixer once, reuse in all values
+        signer = Signer()                     # ephemeral keypair
+        cigar = signer.sign(b"test")          # Cigar
+        pre = cigar.verfer.qb64               # non-transferable prefix
+
+        # old encoding (Seqner.qb64) – backward compatibility
+        for sn in sns:
+            old_key = Seqner(sn=sn).qb64
+            dig = Diger(raw=b"\x00" * 32)     # valid 32-byte raw
+            val = (dig, Prefixer(qb64=pre), cigar)
+            db.ures.add(keys=("OLD", old_key), val=val)
+
+        # new encoding (Number with Huge code)
+        for sn in sns:
+            new_key = Number(num=sn, code=NumDex.Huge).qb64
+            dig = Diger(raw=b"\x01" * 32)     # distinguishable but valid
+            val = (dig, Prefixer(qb64=pre), cigar)
+            db.ures.add(keys=("NEW", new_key), val=val)
+
+        # round-trip correctness for Number with Huge code
+        for sn in sns:
+            enc = Number(num=sn, code=NumDex.Huge).qb64
+            parsed = Number(qb64=enc)
+            assert parsed.num == sn
+
+        # read back old and new keys (existence + type)
+        for sn in sns:
+            old_key = Seqner(sn=sn).qb64
+            new_key = Number(num=sn, code=NumDex.Huge).qb64
+
+            old_vals = db.ures.get(keys=("OLD", old_key))
+            new_vals = db.ures.get(keys=("NEW", new_key))
+
+            assert len(old_vals) == 1
+            assert len(new_vals) == 1
+
+            odig, opre, ocig = old_vals[0]
+            ndig, npre, ncig = new_vals[0]
+
+            assert isinstance(odig, Diger)
+            assert isinstance(opre, Prefixer)
+            assert isinstance(ncig, type(cigar))
+            assert isinstance(ndig, Diger)
+            assert isinstance(npre, Prefixer)
+
+        # lexicographic ordering must match numeric ordering for NEW keys
+        ordered_sns = []
+        for (pre_key, key), vals in db.ures.getTopItemIter():
+            if pre_key == "NEW":
+                n = Number(qb64=key)
+                ordered_sns.append(n.num)
+
+        assert ordered_sns == sns
+
+    asyncio.run(_go())
+
+
+def test_statedict():
+    """
+    Test custom statedict subclass of dict
+    """
+    
+
+    async def _go():
+
+        backend = FakeStorageBackend()
+        db = WebBaser()
+
+        await db.reopen(storageOpener=backend.open)
+
+        assert db.opened
+
+        dbd = statedict(a=1, b=2, c=3)  # init in memory so never acesses db
+        assert dbd.db == None
+        assert 'a' in dbd
+        assert 'b' in dbd
+        assert 'c' in dbd
+        assert [(k, v) for k, v in dbd.items()] == [('a', 1), ('b', 2), ('c', 3)]
+        assert list(dbd.keys()) == ['a', 'b', 'c']
+        assert list(dbd.values()) == [1, 2, 3]
+
+        assert dbd.get('a') == 1
+        assert dbd['a'] == 1
+
+        dbd.clear()
+        assert not dbd
+        dbd.db = db
+        assert dbd.db == db
+        assert not dbd
+
+        dbd['a'] = 1
+        dbd['b'] = 2
+        dbd['c'] = 3
+        assert dbd
+        assert dbd.get('a') == 1
+        assert dbd['a'] == 1
+
+        assert [(k, v) for k, v in dbd.items()] == [('a', 1), ('b', 2), ('c', 3)]
+
+        assert 'd' not in dbd
+        assert dbd.get('d') is None
+
+        with pytest.raises(KeyError):
+            x = dbd['d']
+
+        dbd.clear()
+        pre = 'DApYGFaqnrALTyejaJaGAVhNpSCtqyerPqWVK9ZBNZk0'
+
+        assert pre not in dbd
+        dig = 'EAskHI462CuIMS_gNkcl_QewzrRSKH2p9zHQIO132Z30'
+        serder = interact(pre=pre, dig=dig, sn=4)
+
+        eevt = StateEstEvent(s='3', d=dig, br=[], ba=[])
+
+        state = eventState(pre=pre,
+                           sn=4,
+                           pig=dig,
+                           dig=serder.said,
+                           fn=4,
+                           eilk=Ilks.ixn,
+                           keys=[pre],
+                           eevt=eevt,
+                           )
+
+        db.evts.put(keys=(pre, serder.said), val=serder)
+        assert db.evts.get(keys=(pre, serder.said)) is not None
+
+        db.states.pin(keys=pre, val=state)  # put state in database
+        dbstate = db.states.get(keys=pre)
+        assert dbstate is not None
+        assert dbstate == state
+
+        kever = Kever(state=state, db=db)
+        assert kever.state() == state
+
+        dkever = dbd[pre]  # read through cache works here
+        dstate = dkever.state()
+        assert  dstate == state
+
+        del dbd[pre]  # not in dbd memory
+        assert pre in dbd  #  read through cache works
+        dkever = dbd[pre]
+        dstate = dkever.state()
+        assert  dstate == state
+
+        db.states.rem(keys=pre)
+        assert pre in dbd  # still in memory
+        del dbd[pre]
+        assert pre not in dbd  # not in memory or db so read through cache misses
+
+    asyncio.run(_go())
+
+
+@needskeri
+def test_close_clear_persistence():
+    """Test close() and aclose() — both clear/preserve paths, temp flag, and
+    post-close inoperability."""
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        # --- aclose(clear=False) preserves data ---
+        await baser.reopen(storageOpener=backend.open)
+        baser.oobis.put(keys=("test_cid",), val=OobiRecord(cid="test_cid"))
+        await baser.aclose(clear=False)
+        assert not baser.opened
+        assert baser.db is None
+
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.oobis.get(keys=("test_cid",)) is not None
+
+        # --- aclose(clear=True) wipes data ---
+        await baser.aclose(clear=True)
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.oobis.get(keys=("test_cid",)) is None
+
+        # --- self.temp=True triggers implicit clear without explicit clear arg ---
+        baser.oobis.put(keys=("tmp",), val=OobiRecord(cid="tmp"))
+        baser.temp = True
+        await baser.aclose()  # clear not passed, but temp=True should clear
+        baser.temp = False
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.oobis.get(keys=("tmp",)) is None
+
+        # --- sync close() preserves data via fire-and-forget flush ---
+        baser.oobis.put(keys=("sync",), val=OobiRecord(cid="sync"))
+        baser.close(clear=False)
+        assert not baser.opened
+        assert baser.db is None
+        await asyncio.sleep(0)  # let fire-and-forget flush task run
+
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.oobis.get(keys=("sync",)) is not None
+
+        # --- sync close(clear=True) wipes data ---
+        baser.close(clear=True)
+        await asyncio.sleep(0)
+
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.oobis.get(keys=("sync",)) is None
+
+        # --- post-close: SubDb attributes are deleted, any access raises ---
+        await baser.aclose()
+        assert baser.db is None
+        assert not baser.opened
+        assert not hasattr(baser, 'oobis')
+        with pytest.raises(AttributeError):
+            baser.oobis.put(keys=("ghost",), val=OobiRecord(cid="ghost"))
+
+        # reopen restores the attributes
+        await baser.reopen(storageOpener=backend.open)
+        assert hasattr(baser, 'oobis')
+
+        # --- double-close is a no-op for both variants ---
+        baser.close()   # sync on already-closed — should not raise
+        await baser.aclose()  # async on already-closed — should not raise
+
+    asyncio.run(_go())
+
+
+@needskeri
+def test_sync_close_no_event_loop():
+    """Test that sync close() works outside a running event loop (no-flush path).
+
+    When there is no running asyncio loop, close() should still drop state
+    without raising — it just can't schedule the flush task.
+    """
+    # Set up baser inside asyncio.run so reopen can await
+    backend = FakeStorageBackend()
+    baser = WebBaser()
+    asyncio.run(baser.reopen(storageOpener=backend.open))
+    assert baser.opened
+
+    baser.oobis.put(keys=("nf",), val=OobiRecord(cid="nf"))
+
+    # Now we're outside asyncio.run — no running event loop
+    baser.close(clear=False)  # should not raise despite no loop
+    assert not baser.opened
+    assert baser.db is None
+
+    # Data was NOT flushed (no loop to run the task), but the in-memory
+    # state was dropped.  Reopen to confirm flush didn't happen — data
+    # may or may not be there depending on whether a prior flush persisted it.
+    # The key assertion is that close() itself didn't raise.
+
+
+@needskeri
+def test_reload_orphan_cleanup():
+    """Test that reload() removes orphan habs, keeps valid/group habs, and
+    handles MissingEntryError (state exists but event missing)."""
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        await baser.reopen(storageOpener=backend.open)
+
+        # --- Build a valid hab with key state ---
+        pre = 'DApYGFaqnrALTyejaJaGAVhNpSCtqyerPqWVK9ZBNZk0'
+        dig = 'EAskHI462CuIMS_gNkcl_QewzrRSKH2p9zHQIO132Z30'
+        serder = interact(pre=pre, dig=dig, sn=4)
+        eevt = StateEstEvent(s='3', d=dig, br=[], ba=[])
+        state = eventState(pre=pre, sn=4, pig=dig, dig=serder.said,
+                           fn=4, eilk=Ilks.ixn, keys=[pre], eevt=eevt)
+
+        baser.evts.put(keys=(pre, serder.said), val=serder)
+        baser.states.pin(keys=pre, val=state)
+        baser.habs.put(keys=pre, val=HabitatRecord(hid=pre, name="valid"))
+
+        # --- Orphan hab: no key state, mid=None ---
+        orphan_pre = 'DBMbr7Z-pd4KJwzxuptSmCYqxrBnE2xKVO-MnjYkeUrt'
+        baser.habs.put(keys=orphan_pre,
+                       val=HabitatRecord(hid=orphan_pre, name="orphan", mid=None))
+
+        # --- Group hab stub: no key state, but mid is set ---
+        group_pre = 'DCMbr7Z-pd4KJwzxuptSmCYqxrBnE2xKVO-MnjYkeUrt'
+        group_mid = 'DDMbr7Z-pd4KJwzxuptSmCYqxrBnE2xKVO-MnjYkeUrt'
+        baser.habs.put(keys=group_pre,
+                       val=HabitatRecord(hid=group_pre, name="group", mid=group_mid))
+
+        # --- Corrupt hab: state exists but event missing from evts ---
+        # Kever(state=ksr, db=baser) will raise MissingEntryError when it
+        # looks up db.evts.get(keys=(pre, state.d)) and gets None.
+        corrupt_pre = 'DEMbr7Z-pd4KJwzxuptSmCYqxrBnE2xKVO-MnjYkeUrt'
+        corrupt_dig = 'EFskHI462CuIMS_gNkcl_QewzrRSKH2p9zHQIO132Z30'
+        corrupt_serder = interact(pre=corrupt_pre, dig=corrupt_dig, sn=1)
+        corrupt_eevt = StateEstEvent(s='0', d=corrupt_dig, br=[], ba=[])
+        corrupt_state = eventState(pre=corrupt_pre, sn=1, pig=corrupt_dig,
+                                   dig=corrupt_serder.said, fn=1,
+                                   eilk=Ilks.ixn, keys=[corrupt_pre],
+                                   eevt=corrupt_eevt)
+        baser.states.pin(keys=corrupt_pre, val=corrupt_state)
+        # Deliberately do NOT put the event into baser.evts
+        baser.habs.put(keys=corrupt_pre,
+                       val=HabitatRecord(hid=corrupt_pre, name="corrupt"))
+
+        # reload should clean up orphans and corrupt habs
+        baser.reload()
+
+        # Valid hab should be in kevers and prefixes
+        assert pre in baser.prefixes
+        assert pre in baser.kevers
+
+        # Orphan should be removed
+        assert baser.habs.get(keys=orphan_pre) is None
+
+        # Corrupt hab should be removed (MissingEntryError path)
+        assert baser.habs.get(keys=corrupt_pre) is None
+
+        # Group hab should remain (mid is set, so not an orphan)
+        assert baser.habs.get(keys=group_pre) is not None
+
+    asyncio.run(_go())
+
+
+@needskeri
+def test_clean_subdb_swap():
+    """Test that clean() copies unsecured and sets-type subdbs, wipes others."""
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        await baser.reopen(storageOpener=backend.open)
+
+        # Write to an "unsecured" SubDb that clean() copies via .put()
+        baser.oobis.put(keys=("test_cid",), val=OobiRecord(cid="test_cid"))
+        assert baser.oobis.get(keys=("test_cid",)) is not None
+
+        # Write to a "sets" SubDb that clean() copies via .add()
+        # chas is CesrIoSetSuber(klas=Diger) in the sets list at webbasing.py:924
+        test_diger = Diger(ser=b"test-challenge-response")
+        baser.chas.add(keys=("challenge_pre",), val=test_diger)
+        assert baser.chas.get(keys=("challenge_pre",))
+
+        # Write to a SubDb NOT in the unsecured/sets lists
+        baser.names.put(keys=("", "myname"), val="somepre")
+        assert baser.names.get(keys=("", "myname")) is not None
+
+        await baser.clean()
+
+        # oobis data should survive (unsecured copy via .put())
+        assert baser.oobis.get(keys=("test_cid",)) is not None
+
+        # chas data should survive (sets copy via .add())
+        chas_vals = baser.chas.get(keys=("challenge_pre",))
+        assert chas_vals
+        assert test_diger.qb64 in [v.qb64 for v in chas_vals]
+
+        # names data should be gone (not copied to clone)
+        assert baser.names.get(keys=("", "myname")) is None
+
+        assert baser.opened
+
+    asyncio.run(_go())
+
+
+@needskeri
+def test_web_baser_doer():
+    """Test WebBaserDoer lifecycle: enter guard, exit closes, round-trip, and
+    exit on already-closed baser."""
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        # enter() on un-opened baser should raise
+        doer = WebBaserDoer(baser=baser)
+        with pytest.raises(RuntimeError, match="must be opened"):
+            doer.enter()
+
+        # Open baser, enter() should succeed
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.opened
+        doer.enter()  # no error
+
+        # exit() calls sync close() — baser is closed immediately
+        doer.exit()
+        assert not baser.opened
+        await asyncio.sleep(0)  # let fire-and-forget flush run
+
+        # exit() on already-closed baser should not raise
+        doer.exit()  # close() is a no-op when not opened
+
+        # --- Full round-trip: enter -> exit -> reopen -> enter -> exit ---
+        await baser.reopen(storageOpener=backend.open)
+        doer.enter()
+        assert baser.opened
+        doer.exit()
+        assert not baser.opened
+        await asyncio.sleep(0)
+
+        # --- temp=True causes exit to clear data ---
+        await baser.reopen(storageOpener=backend.open)
+        baser.temp = True
+        baser.oobis.put(keys=("x",), val=OobiRecord(cid="x"))
+
+        doer2 = WebBaserDoer(baser=baser)
+        doer2.enter()
+        doer2.exit()
+        assert not baser.opened
+        await asyncio.sleep(0)
+
+        # Reopen and verify data was cleared (temp=True -> clear=True)
+        baser.temp = False
+        await baser.reopen(storageOpener=backend.open)
+        assert baser.oobis.get(keys=("x",)) is None
+
+    asyncio.run(_go())
+
+
+def test_strip_prerelease_webbasing():
+    """Test the locally-duplicated _strip_prerelease in webbasing.py."""
+    import semver
+
+    # Core bug that _strip_prerelease fixes: dev4 > dev10 lexicographically
+    assert semver.compare("1.2.0-dev4", "1.2.0-dev10") == 1
+
+    # _strip_prerelease normalizes by removing prerelease/build metadata
+    assert _strip_prerelease("1.2.0-dev4") == "1.2.0"
+    assert _strip_prerelease("1.2.0-dev10") == "1.2.0"
+    assert _strip_prerelease("1.2.0") == "1.2.0"
+    assert _strip_prerelease("0.6.8") == "0.6.8"
+    assert _strip_prerelease("1.2.0-rc1") == "1.2.0"
+    assert _strip_prerelease("2.0.0-dev5+build42") == "2.0.0"
+
+    # After stripping, migration version comparisons work correctly
+    db_ver = _strip_prerelease("1.2.0-dev4")
+    assert semver.compare("1.2.0", db_ver) == 0  # same cycle, skip
+
+    db_ver = _strip_prerelease("1.0.0")
+    assert semver.compare("1.2.0", db_ver) == 1  # newer, run migration
+
+
+@needskeri
+def test_trim_all_escrows_web():
+    """Test _trimAllEscrows clears all escrow subdbs via trim().
+
+    trim() with empty keys uses startswith(b"") which matches every key,
+    so all entries are removed regardless of key format.
+    """
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        await baser.reopen(storageOpener=backend.open)
+
+        # Inject old-format key directly into qnfs SubDb's SortedDict.
+        # Old format: PRE.SAID (no .00000000 insertion-order suffix)
+        old_key = (b'EBMbr7Z-pd4KJwzxuptSmCYqxrBnE2xKVO-MnjYkeUrt.'
+                   b'EBMbr7Z-pd4KJwzxuptSmCYqxrBnE2xKVO-MnjYkeUrt')
+        baser.qnfs.sdb.items[old_key] = b'EALkveIFUPvt38xhtgYYJRCCpAGO7WjjHVR37Pawv67E'
+        baser.qnfs.sdb.dirty = True
+
+        assert baser.qnfs.cntAll() > 0
+
+        # _trimAllEscrows uses .trim() which bypasses key parsing
+        baser._trimAllEscrows()
+
+        assert baser.qnfs.cntAll() == 0
+
+    asyncio.run(_go())
+
+
+@needskeri
+def test_webbaser_clone_all_pre_iter():
+    """
+    Test cloneAllPreIter yields first-seen event messages for all identifier
+    prefixes in the database.
+    """
+    async def _go():
+        backend = FakeStorageBackend()
+        baser = WebBaser()
+
+        await baser.reopen(storageOpener=backend.open)
+
+        kwa = dict(db=baser)
+
+        with openHby(name="test", base="test", **kwa) as hby:
+            hab1 = hby.makeHab(name="alice", isith="1", icount=1)
+            hab2 = hby.makeHab(name="bob", isith="1", icount=1)
+            # Single shared db now has fels (and evts, sigs) for both identifiers
+            msgs = list(hby.db.cloneAllPreIter())
+            assert len(msgs) >= 2
+            pres = set()
+            for msg in msgs:
+                serder = SerderKERI(raw=bytes(msg))
+                pres.add(serder.pre)
+            assert hab1.pre in pres
+            assert hab2.pre in pres
+
+            hab1.rotate()
+            hab2.rotate()
+
+            msgs = list(hby.db.cloneAllPreIter())
+            assert len(msgs) >= 4  # two icps + two rots
+
+            sn_by_pre = {}
+            for msg in msgs:
+                ser = SerderKERI(raw=bytes(msg))
+                sn = ser.sn
+                sn_by_pre.setdefault(ser.pre, []).append(sn)
+
+            for pre, sns in sn_by_pre.items():
+                assert sns == sorted(sns)
+
+    asyncio.run(_go())
diff --git a/tests/db/test_webdbing.py b/tests/db/test_webdbing.py
index 951c8ee63..f10ae5d10 100644
--- a/tests/db/test_webdbing.py
+++ b/tests/db/test_webdbing.py
@@ -6,7 +6,6 @@
 
 import asyncio
 from dataclasses import asdict, dataclass
-from typing import Any
 
 import pytest
 
@@ -26,7 +25,7 @@
     )
     from keri.db import webdbing as webdbing_module
 except ImportError:
-    from webdbing import (  # standalone import for Pyodide
+    from keri.db.webdbing import (  # standalone import for Pyodide
         WebDBer,
         _META_KEY,
         _META_STORE,
@@ -39,14 +38,23 @@
         onKey,
         splitOnKey,
     )
-    import webdbing as webdbing_module
+    import keri.db.webdbing as webdbing_module
 
 try:
-    from keri.db import subing, koming
+    from keri.db import subing, koming, dgKey, snKey
 except ImportError:
     subing = None
     koming = None
 
+try:
+    from keri.core import serdering, coring, signing, indexing
+    from keri import versify, Kinds
+    from keri.recording import EventSourceRecord
+    from keri import core
+except ImportError:
+    # Pyodide fallback
+    from keri.core import serdering
+
 needskeri = pytest.mark.skipif(subing is None, reason="requires full keri (lmdb)")
 
 
@@ -67,6 +75,10 @@ def __getitem__(self, key):
     def __setitem__(self, key, value):
         self._local[key] = value
 
+    def clear(self):
+        """Remove all keys from the local storage buffer."""
+        self._local.clear()
+
     async def sync(self):
         self.backend.persisted[self.namespace] = dict(self._local)