diff --git a/.cursor/rules/navigation.mdc b/.cursor/rules/navigation.mdc index 27416690c..5040bb804 100644 --- a/.cursor/rules/navigation.mdc +++ b/.cursor/rules/navigation.mdc @@ -4,6 +4,13 @@ globs: "" alwaysApply: true --- +# Always + +- Sacrifice grammar for the sake of concision +- List any unresolved questions at the end, if any + +## Navigation + - The Rust CLI lives in `codegenerator/cli`. - Entry point: `codegenerator/cli/src/lib.rs`. - Command dispatcher: `codegenerator/cli/src/commands.rs`. diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index b5879b79b..8516e0134 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -37,6 +37,20 @@ jobs: # Maps tcp port 5432 on service container to the host port of 5433 which envio will use. - 5433:5432 + hasura: + image: hasura/graphql-engine:v2.43.0 + env: + HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:testing@postgres:5432/envio-dev + HASURA_GRAPHQL_ENABLE_CONSOLE: "true" + HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup, http-log, webhook-log, websocket-log, query-log + HASURA_GRAPHQL_NO_OF_RETRIES: 10 + HASURA_GRAPHQL_ADMIN_SECRET: testing + HASURA_GRAPHQL_STRINGIFY_NUMERIC_TYPES: "true" + HASURA_GRAPHQL_UNAUTHORIZED_ROLE: public + PORT: 8080 + ports: + - 8080:8080 + steps: - uses: actions/checkout@v4 - uses: dorny/paths-filter@v3 @@ -122,6 +136,15 @@ jobs: pnpm hardhat compile --verbose pnpm res:build + - name: Wait for Hasura + if: steps.changes.outputs.testChanges == 'true' + shell: bash + run: | + for i in {1..60}; do + if curl -sSf http://localhost:8080/healthz >/dev/null; then + echo "Hasura is up"; exit 0; fi; sleep 1; done + echo "Hasura did not become ready in time"; exit 1 + - name: test_codegen test if: steps.changes.outputs.testChanges == 'true' working-directory: scenarios/test_codegen diff --git a/codegenerator/cli/npm/envio/src/Batch.res b/codegenerator/cli/npm/envio/src/Batch.res index dc040fe3e..85752f6ea 100644 --- a/codegenerator/cli/npm/envio/src/Batch.res +++ b/codegenerator/cli/npm/envio/src/Batch.res @@ -1,17 +1,34 @@ -type progressedChain = { - chainId: int, +open Belt + +@@warning("-44") +open Utils.UnsafeIntOperators + +type chainAfterBatch = { batchSize: int, progressBlockNumber: int, - isProgressAtHead: bool, + totalEventsProcessed: int, + fetchState: FetchState.t, + isProgressAtHeadWhenBatchCreated: bool, +} + +type chainBeforeBatch = { + fetchState: FetchState.t, + reorgDetection: ReorgDetection.t, + progressBlockNumber: int, + sourceBlockNumber: int, totalEventsProcessed: int, } type t = { + totalBatchSize: int, items: array, - progressedChains: array, - updatedFetchStates: ChainMap.t, - dcsToStoreByChainId: dict>, - creationTimeMs: int, + progressedChainsById: dict, + // Unnest-like checkpoint fields: + checkpointIds: array, + checkpointChainIds: array, + checkpointBlockNumbers: array, + checkpointBlockHashes: array>, + checkpointEventsProcessed: array, } /** @@ -22,7 +39,7 @@ let getOrderedNextChain = (fetchStates: ChainMap.t, ~batchSizePerC let earliestChainTimestamp = ref(0) let chainKeys = fetchStates->ChainMap.keys for idx in 0 to chainKeys->Array.length - 1 { - let chain = chainKeys->Array.get(idx) + let chain = chainKeys->Array.getUnsafe(idx) let fetchState = fetchStates->ChainMap.get(chain) if fetchState->FetchState.isActivelyIndexing { let timestamp = fetchState->FetchState.getTimestampAt( @@ -75,37 +92,250 @@ let hasMultichainReadyItem = ( } } +let getProgressedChainsById = { + let getChainAfterBatchIfProgressed = ( + ~chainBeforeBatch: chainBeforeBatch, + ~progressBlockNumberAfterBatch, + ~fetchStateAfterBatch, + ~batchSize, + ) => { + // The check is sufficient, since we guarantee to include a full block in a batch + // Also, this might be true even if batchSize is 0, + // eg when indexing at the head and chain doesn't have items in a block + if chainBeforeBatch.progressBlockNumber < progressBlockNumberAfterBatch { + Some( + ( + { + batchSize, + progressBlockNumber: progressBlockNumberAfterBatch, + totalEventsProcessed: chainBeforeBatch.totalEventsProcessed + batchSize, + fetchState: fetchStateAfterBatch, + isProgressAtHeadWhenBatchCreated: progressBlockNumberAfterBatch >= + chainBeforeBatch.sourceBlockNumber, + }: chainAfterBatch + ), + ) + } else { + None + } + } + + ( + ~chainsBeforeBatch: ChainMap.t, + ~batchSizePerChain: dict, + ~progressBlockNumberPerChain: dict, + ) => { + let progressedChainsById = Js.Dict.empty() + + // Needed to: + // - Recalculate the computed queue sizes + // - Accumulate registered dynamic contracts to store in the db + // - Trigger onBlock pointer update + chainsBeforeBatch + ->ChainMap.values + ->Array.forEachU(chainBeforeBatch => { + let fetchState = chainBeforeBatch.fetchState + + let progressBlockNumberAfterBatch = switch progressBlockNumberPerChain->Utils.Dict.dangerouslyGetNonOption( + fetchState.chainId->Int.toString, + ) { + | Some(progressBlockNumber) => progressBlockNumber + | None => chainBeforeBatch.progressBlockNumber + } + + switch switch batchSizePerChain->Utils.Dict.dangerouslyGetNonOption( + fetchState.chainId->Int.toString, + ) { + | Some(batchSize) => + let leftItems = fetchState.buffer->Js.Array2.sliceFrom(batchSize) + getChainAfterBatchIfProgressed( + ~chainBeforeBatch, + ~batchSize, + ~fetchStateAfterBatch=fetchState->FetchState.updateInternal(~mutItems=leftItems), + ~progressBlockNumberAfterBatch, + ) + // Skip not affected chains + | None => + getChainAfterBatchIfProgressed( + ~chainBeforeBatch, + ~batchSize=0, + ~fetchStateAfterBatch=chainBeforeBatch.fetchState, + ~progressBlockNumberAfterBatch, + ) + } { + | Some(progressedChain) => + progressedChainsById->Utils.Dict.setByInt( + chainBeforeBatch.fetchState.chainId, + progressedChain, + ) + | None => () + } + }) + + progressedChainsById + } +} + +@inline +let addReorgCheckpoints = ( + ~prevCheckpointId, + ~reorgDetection: ReorgDetection.t, + ~fromBlockExclusive, + ~toBlockExclusive, + ~chainId, + ~mutCheckpointIds, + ~mutCheckpointChainIds, + ~mutCheckpointBlockNumbers, + ~mutCheckpointBlockHashes, + ~mutCheckpointEventsProcessed, +) => { + if ( + reorgDetection.shouldRollbackOnReorg && !(reorgDetection.dataByBlockNumber->Utils.Dict.isEmpty) + ) { + let prevCheckpointId = ref(prevCheckpointId) + for blockNumber in fromBlockExclusive + 1 to toBlockExclusive - 1 { + switch reorgDetection->ReorgDetection.getHashByBlockNumber(~blockNumber) { + | Js.Null.Value(hash) => + let checkpointId = prevCheckpointId.contents + 1 + prevCheckpointId := checkpointId + + mutCheckpointIds->Js.Array2.push(checkpointId)->ignore + mutCheckpointChainIds->Js.Array2.push(chainId)->ignore + mutCheckpointBlockNumbers->Js.Array2.push(blockNumber)->ignore + mutCheckpointBlockHashes->Js.Array2.push(Js.Null.Value(hash))->ignore + mutCheckpointEventsProcessed->Js.Array2.push(0)->ignore + | Js.Null.Null => () + } + } + prevCheckpointId.contents + } else { + prevCheckpointId + } +} + let prepareOrderedBatch = ( + ~checkpointIdBeforeBatch, + ~chainsBeforeBatch: ChainMap.t, ~batchSizeTarget, - ~fetchStates: ChainMap.t, - ~mutBatchSizePerChain: dict, ) => { - let batchSize = ref(0) + let totalBatchSize = ref(0) let isFinished = ref(false) + let prevCheckpointId = ref(checkpointIdBeforeBatch) + let mutBatchSizePerChain = Js.Dict.empty() + let mutProgressBlockNumberPerChain = Js.Dict.empty() + + let fetchStates = chainsBeforeBatch->ChainMap.map(chainBeforeBatch => chainBeforeBatch.fetchState) + let items = [] + let checkpointIds = [] + let checkpointChainIds = [] + let checkpointBlockNumbers = [] + let checkpointBlockHashes = [] + let checkpointEventsProcessed = [] - while batchSize.contents < batchSizeTarget && !isFinished.contents { + while totalBatchSize.contents < batchSizeTarget && !isFinished.contents { switch fetchStates->getOrderedNextChain(~batchSizePerChain=mutBatchSizePerChain) { | Some(fetchState) => { + let chainBeforeBatch = + chainsBeforeBatch->ChainMap.get(ChainMap.Chain.makeUnsafe(~chainId=fetchState.chainId)) let itemsCountBefore = switch mutBatchSizePerChain->Utils.Dict.dangerouslyGetByIntNonOption( fetchState.chainId, ) { | Some(batchSize) => batchSize | None => 0 } - let newItemsCount = - fetchState->FetchState.getReadyItemsCount(~targetSize=1, ~fromItem=itemsCountBefore) + + let prevBlockNumber = switch mutProgressBlockNumberPerChain->Utils.Dict.dangerouslyGetByIntNonOption( + fetchState.chainId, + ) { + | Some(progressBlockNumber) => progressBlockNumber + | None => chainBeforeBatch.progressBlockNumber + } + + let newItemsCount = fetchState->FetchState.getReadyItemsCount( + // We should get items only for a single block + // Since for the ordered mode next block could be after another chain's block + ~targetSize=1, + ~fromItem=itemsCountBefore, + ) if newItemsCount > 0 { - for idx in itemsCountBefore to itemsCountBefore + newItemsCount - 1 { - items->Js.Array2.push(fetchState.buffer->Belt.Array.getUnsafe(idx))->ignore + let item0 = fetchState.buffer->Array.getUnsafe(itemsCountBefore) + let blockNumber = item0->Internal.getItemBlockNumber + + prevCheckpointId := + addReorgCheckpoints( + ~chainId=fetchState.chainId, + ~reorgDetection=chainBeforeBatch.reorgDetection, + ~prevCheckpointId=prevCheckpointId.contents, + ~fromBlockExclusive=prevBlockNumber, + ~toBlockExclusive=blockNumber, + ~mutCheckpointIds=checkpointIds, + ~mutCheckpointChainIds=checkpointChainIds, + ~mutCheckpointBlockNumbers=checkpointBlockNumbers, + ~mutCheckpointBlockHashes=checkpointBlockHashes, + ~mutCheckpointEventsProcessed=checkpointEventsProcessed, + ) + + let checkpointId = prevCheckpointId.contents + 1 + + items + ->Js.Array2.push(item0) + ->ignore + for idx in 1 to newItemsCount - 1 { + items + ->Js.Array2.push(fetchState.buffer->Belt.Array.getUnsafe(itemsCountBefore + idx)) + ->ignore } - batchSize := batchSize.contents + newItemsCount + + checkpointIds + ->Js.Array2.push(checkpointId) + ->ignore + checkpointChainIds + ->Js.Array2.push(fetchState.chainId) + ->ignore + checkpointBlockNumbers + ->Js.Array2.push(blockNumber) + ->ignore + checkpointBlockHashes + ->Js.Array2.push( + chainBeforeBatch.reorgDetection->ReorgDetection.getHashByBlockNumber(~blockNumber), + ) + ->ignore + checkpointEventsProcessed + ->Js.Array2.push(newItemsCount) + ->ignore + + prevCheckpointId := checkpointId + totalBatchSize := totalBatchSize.contents + newItemsCount mutBatchSizePerChain->Utils.Dict.setByInt( fetchState.chainId, itemsCountBefore + newItemsCount, ) + mutProgressBlockNumberPerChain->Utils.Dict.setByInt(fetchState.chainId, blockNumber) } else { + let blockNumberAfterBatch = fetchState->FetchState.bufferBlockNumber + + prevCheckpointId := + addReorgCheckpoints( + ~chainId=fetchState.chainId, + ~reorgDetection=chainBeforeBatch.reorgDetection, + ~prevCheckpointId=prevCheckpointId.contents, + ~fromBlockExclusive=prevBlockNumber, + ~toBlockExclusive=blockNumberAfterBatch + 1, // Make it inclusive + ~mutCheckpointIds=checkpointIds, + ~mutCheckpointChainIds=checkpointChainIds, + ~mutCheckpointBlockNumbers=checkpointBlockNumbers, + ~mutCheckpointBlockHashes=checkpointBlockHashes, + ~mutCheckpointEventsProcessed=checkpointEventsProcessed, + ) + + // Since the chain was chosen as next + // the fact that it doesn't have new items means that it reached the buffer block number + mutProgressBlockNumberPerChain->Utils.Dict.setByInt( + fetchState.chainId, + blockNumberAfterBatch, + ) isFinished := true } } @@ -114,45 +344,187 @@ let prepareOrderedBatch = ( } } - items + { + totalBatchSize: totalBatchSize.contents, + items, + progressedChainsById: getProgressedChainsById( + ~chainsBeforeBatch, + ~batchSizePerChain=mutBatchSizePerChain, + ~progressBlockNumberPerChain=mutProgressBlockNumberPerChain, + ), + checkpointIds, + checkpointChainIds, + checkpointBlockNumbers, + checkpointBlockHashes, + checkpointEventsProcessed, + } } let prepareUnorderedBatch = ( + ~checkpointIdBeforeBatch, + ~chainsBeforeBatch: ChainMap.t, ~batchSizeTarget, - ~fetchStates: ChainMap.t, - ~mutBatchSizePerChain: dict, ) => { let preparedFetchStates = - fetchStates + chainsBeforeBatch ->ChainMap.values - ->FetchState.filterAndSortForUnorderedBatch(~batchSizeTarget) + ->Js.Array2.map(chainBeforeBatch => chainBeforeBatch.fetchState) + ->FetchState.sortForUnorderedBatch(~batchSizeTarget) let chainIdx = ref(0) let preparedNumber = preparedFetchStates->Array.length - let batchSize = ref(0) + let totalBatchSize = ref(0) + + let prevCheckpointId = ref(checkpointIdBeforeBatch) + let mutBatchSizePerChain = Js.Dict.empty() + let mutProgressBlockNumberPerChain = Js.Dict.empty() let items = [] + let checkpointIds = [] + let checkpointChainIds = [] + let checkpointBlockNumbers = [] + let checkpointBlockHashes = [] + let checkpointEventsProcessed = [] // Accumulate items for all actively indexing chains // the way to group as many items from a single chain as possible // This way the loaders optimisations will hit more often - while batchSize.contents < batchSizeTarget && chainIdx.contents < preparedNumber { + while totalBatchSize.contents < batchSizeTarget && chainIdx.contents < preparedNumber { let fetchState = preparedFetchStates->Js.Array2.unsafe_get(chainIdx.contents) let chainBatchSize = fetchState->FetchState.getReadyItemsCount( - ~targetSize=batchSizeTarget - batchSize.contents, + ~targetSize=batchSizeTarget - totalBatchSize.contents, ~fromItem=0, ) + let chainBeforeBatch = + chainsBeforeBatch->ChainMap.get(ChainMap.Chain.makeUnsafe(~chainId=fetchState.chainId)) + + let prevBlockNumber = ref(chainBeforeBatch.progressBlockNumber) if chainBatchSize > 0 { for idx in 0 to chainBatchSize - 1 { - items->Js.Array2.push(fetchState.buffer->Belt.Array.getUnsafe(idx))->ignore + let item = fetchState.buffer->Belt.Array.getUnsafe(idx) + let blockNumber = item->Internal.getItemBlockNumber + + // Every new block we should create a new checkpoint + if blockNumber !== prevBlockNumber.contents { + prevCheckpointId := + addReorgCheckpoints( + ~chainId=fetchState.chainId, + ~reorgDetection=chainBeforeBatch.reorgDetection, + ~prevCheckpointId=prevCheckpointId.contents, + ~fromBlockExclusive=prevBlockNumber.contents, + ~toBlockExclusive=blockNumber, + ~mutCheckpointIds=checkpointIds, + ~mutCheckpointChainIds=checkpointChainIds, + ~mutCheckpointBlockNumbers=checkpointBlockNumbers, + ~mutCheckpointBlockHashes=checkpointBlockHashes, + ~mutCheckpointEventsProcessed=checkpointEventsProcessed, + ) + + let checkpointId = prevCheckpointId.contents + 1 + + checkpointIds->Js.Array2.push(checkpointId)->ignore + checkpointChainIds->Js.Array2.push(fetchState.chainId)->ignore + checkpointBlockNumbers->Js.Array2.push(blockNumber)->ignore + checkpointBlockHashes + ->Js.Array2.push( + chainBeforeBatch.reorgDetection->ReorgDetection.getHashByBlockNumber(~blockNumber), + ) + ->ignore + checkpointEventsProcessed->Js.Array2.push(1)->ignore + + prevBlockNumber := blockNumber + prevCheckpointId := checkpointId + } else { + let lastIndex = checkpointEventsProcessed->Array.length - 1 + checkpointEventsProcessed + ->Belt.Array.setUnsafe( + lastIndex, + checkpointEventsProcessed->Array.getUnsafe(lastIndex) + 1, + ) + ->ignore + } + + items->Js.Array2.push(item)->ignore } - batchSize := batchSize.contents + chainBatchSize + + totalBatchSize := totalBatchSize.contents + chainBatchSize mutBatchSizePerChain->Utils.Dict.setByInt(fetchState.chainId, chainBatchSize) } + let progressBlockNumberAfterBatch = + fetchState->FetchState.getUnorderedMultichainProgressBlockNumberAt(~index=chainBatchSize) + + prevCheckpointId := + addReorgCheckpoints( + ~chainId=fetchState.chainId, + ~reorgDetection=chainBeforeBatch.reorgDetection, + ~prevCheckpointId=prevCheckpointId.contents, + ~fromBlockExclusive=prevBlockNumber.contents, + ~toBlockExclusive=progressBlockNumberAfterBatch + 1, // Make it inclusive + ~mutCheckpointIds=checkpointIds, + ~mutCheckpointChainIds=checkpointChainIds, + ~mutCheckpointBlockNumbers=checkpointBlockNumbers, + ~mutCheckpointBlockHashes=checkpointBlockHashes, + ~mutCheckpointEventsProcessed=checkpointEventsProcessed, + ) + + mutProgressBlockNumberPerChain->Utils.Dict.setByInt( + fetchState.chainId, + progressBlockNumberAfterBatch, + ) + chainIdx := chainIdx.contents + 1 } - items + { + totalBatchSize: totalBatchSize.contents, + items, + progressedChainsById: getProgressedChainsById( + ~chainsBeforeBatch, + ~batchSizePerChain=mutBatchSizePerChain, + ~progressBlockNumberPerChain=mutProgressBlockNumberPerChain, + ), + checkpointIds, + checkpointChainIds, + checkpointBlockNumbers, + checkpointBlockHashes, + checkpointEventsProcessed, + } +} + +let make = ( + ~checkpointIdBeforeBatch, + ~chainsBeforeBatch: ChainMap.t, + ~multichain: InternalConfig.multichain, + ~batchSizeTarget, +) => { + if ( + switch multichain { + | Unordered => true + | Ordered => chainsBeforeBatch->ChainMap.size === 1 + } + ) { + prepareUnorderedBatch(~checkpointIdBeforeBatch, ~chainsBeforeBatch, ~batchSizeTarget) + } else { + prepareOrderedBatch(~checkpointIdBeforeBatch, ~chainsBeforeBatch, ~batchSizeTarget) + } +} + +let findFirstEventBlockNumber = (batch: t, ~chainId) => { + let idx = ref(0) + let result = ref(None) + let checkpointsLength = batch.checkpointIds->Array.length + while idx.contents < checkpointsLength && result.contents === None { + let checkpointChainId = batch.checkpointChainIds->Array.getUnsafe(idx.contents) + if ( + checkpointChainId === chainId && + batch.checkpointEventsProcessed->Array.getUnsafe(idx.contents) > 0 + ) { + result := Some(batch.checkpointBlockNumbers->Array.getUnsafe(idx.contents)) + } else { + idx := idx.contents + 1 + } + } + result.contents } diff --git a/codegenerator/cli/npm/envio/src/Envio.res b/codegenerator/cli/npm/envio/src/Envio.res index 98b974bfa..d74f090f7 100644 --- a/codegenerator/cli/npm/envio/src/Envio.res +++ b/codegenerator/cli/npm/envio/src/Envio.res @@ -59,7 +59,6 @@ let experimental_createEffect = ( options: effectOptions<'input, 'output>, handler: effectArgs<'input> => promise<'output>, ) => { - Prometheus.EffectCallsCount.set(~callsCount=0, ~effectName=options.name) let outputSchema = S.schema(_ => options.output)->(Utils.magic: S.t> => S.t) { @@ -86,7 +85,7 @@ let experimental_createEffect = ( }) Some({ table: Internal.makeCacheTable(~effectName=options.name), - rowsSchema: S.array(itemSchema), + outputSchema, itemSchema, }) | None diff --git a/codegenerator/cli/npm/envio/src/EventRegister.res b/codegenerator/cli/npm/envio/src/EventRegister.res index 02fb4894d..78407c98b 100644 --- a/codegenerator/cli/npm/envio/src/EventRegister.res +++ b/codegenerator/cli/npm/envio/src/EventRegister.res @@ -1,4 +1,7 @@ -type registrations = {onBlockByChainId: dict>} +type registrations = { + onBlockByChainId: dict>, + mutable hasEvents: bool, +} type activeRegistration = { ecosystem: InternalConfig.ecosystem, @@ -40,6 +43,7 @@ let startRegistration = (~ecosystem, ~multichain, ~preloadHandlers) => { preloadHandlers, registrations: { onBlockByChainId: Js.Dict.empty(), + hasEvents: false, }, finished: false, } @@ -205,7 +209,8 @@ let setEventOptions = (t: t, ~eventOptions, ~logger=Logging.getLogger()) => { } let setHandler = (t: t, handler, ~eventOptions, ~logger=Logging.getLogger()) => { - withRegistration(_ => { + withRegistration(registration => { + registration.registrations.hasEvents = true switch t.handler { | None => t.handler = @@ -225,7 +230,8 @@ let setHandler = (t: t, handler, ~eventOptions, ~logger=Logging.getLogger()) => } let setContractRegister = (t: t, contractRegister, ~eventOptions, ~logger=Logging.getLogger()) => { - withRegistration(_ => { + withRegistration(registration => { + registration.registrations.hasEvents = true switch t.contractRegister { | None => t.contractRegister = Some( diff --git a/codegenerator/cli/npm/envio/src/EventRegister.resi b/codegenerator/cli/npm/envio/src/EventRegister.resi index 877c74bd8..330fa057e 100644 --- a/codegenerator/cli/npm/envio/src/EventRegister.resi +++ b/codegenerator/cli/npm/envio/src/EventRegister.resi @@ -1,4 +1,7 @@ -type registrations = {onBlockByChainId: dict>} +type registrations = { + onBlockByChainId: dict>, + mutable hasEvents: bool, +} let startRegistration: ( ~ecosystem: InternalConfig.ecosystem, diff --git a/codegenerator/cli/npm/envio/src/FetchState.res b/codegenerator/cli/npm/envio/src/FetchState.res index 5427904ab..1b2d6ce52 100644 --- a/codegenerator/cli/npm/envio/src/FetchState.res +++ b/codegenerator/cli/npm/envio/src/FetchState.res @@ -1,24 +1,5 @@ open Belt -type dcData = { - registeringEventBlockTimestamp: int, - registeringEventLogIndex: int, - registeringEventContractName: string, - registeringEventName: string, - registeringEventSrcAddress: Address.t, -} - -@unboxed -type contractRegister = - | Config - | DC(dcData) -type indexingContract = { - address: Address.t, - contractName: string, - startBlock: int, - register: contractRegister, -} - type contractConfig = {filterByAddresses: bool} type blockNumberAndTimestamp = { @@ -56,12 +37,9 @@ type t = { maxAddrInPartition: int, normalSelection: selection, // By address - indexingContracts: dict, + indexingContracts: dict, // By contract name contractConfigs: dict, - // Registered dynamic contracts that need to be stored in the db - // Should read them at the same time when getting items for the batch - dcsToStore: array, // Not used for logic - only metadata chainId: int, // The block number of the latest block fetched @@ -92,7 +70,7 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) let allowedAddressesNumber = ref(maxAddrInPartition) - target.addressesByContractName->Utils.Dict.forEachWithKey((contractName, addresses) => { + target.addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => { allowedAddressesNumber := allowedAddressesNumber.contents - addresses->Array.length mergedAddresses->Js.Dict.set(contractName, addresses) }) @@ -100,7 +78,7 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) // Start with putting all addresses to the merging dict // And if they exceed the limit, start removing from the merging dict // and putting into the rest dict - p.addressesByContractName->Utils.Dict.forEachWithKey((contractName, addresses) => { + p.addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => { allowedAddressesNumber := allowedAddressesNumber.contents - addresses->Array.length switch mergedAddresses->Utils.Dict.dangerouslyGetNonOption(contractName) { | Some(targetAddresses) => @@ -112,7 +90,7 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) let rest = if allowedAddressesNumber.contents < 0 { let restAddresses = Js.Dict.empty() - mergedAddresses->Utils.Dict.forEachWithKey((contractName, addresses) => { + mergedAddresses->Utils.Dict.forEachWithKey((addresses, contractName) => { if allowedAddressesNumber.contents === 0 { () } else if addresses->Array.length <= -allowedAddressesNumber.contents { @@ -204,7 +182,6 @@ let updateInternal = ( ~partitions=fetchState.partitions, ~nextPartitionIndex=fetchState.nextPartitionIndex, ~indexingContracts=fetchState.indexingContracts, - ~dcsToStore=fetchState.dcsToStore, ~mutItems=?, ~blockLag=fetchState.blockLag, ): t => { @@ -304,7 +281,6 @@ let updateInternal = ( latestOnBlockBlockNumber, latestFullyFetchedBlock, indexingContracts, - dcsToStore, blockLag, buffer: switch mutItemsRef.contents { // Theoretically it could be faster to asume that @@ -333,7 +309,11 @@ let updateInternal = ( let numAddresses = fetchState => fetchState.indexingContracts->Js.Dict.keys->Array.length -let warnDifferentContractType = (fetchState, ~existingContract, ~dc: indexingContract) => { +let warnDifferentContractType = ( + fetchState, + ~existingContract: Internal.indexingContract, + ~dc: Internal.indexingContract, +) => { let logger = Logging.createChild( ~params={ "chainId": fetchState.chainId, @@ -347,9 +327,9 @@ let warnDifferentContractType = (fetchState, ~existingContract, ~dc: indexingCon let registerDynamicContracts = ( fetchState: t, - // These are raw dynamic contracts received from contractRegister call. + // These are raw items which might have dynamic contracts received from contractRegister call. // Might contain duplicates which we should filter out - dynamicContracts: array, + items: array, ) => { if fetchState.normalSelection.eventConfigs->Utils.Array.isEmpty { // Can the normalSelection be empty? @@ -361,79 +341,83 @@ let registerDynamicContracts = ( } let indexingContracts = fetchState.indexingContracts - let registeringContracts = Js.Dict.empty() + let registeringContracts: dict = Js.Dict.empty() let addressesByContractName = Js.Dict.empty() let earliestRegisteringEventBlockNumber = ref(%raw(`Infinity`)) let hasDCWithFilterByAddresses = ref(false) - for idx in 0 to dynamicContracts->Array.length - 1 { - let dc = dynamicContracts->Js.Array2.unsafe_get(idx) - switch fetchState.contractConfigs->Utils.Dict.dangerouslyGetNonOption(dc.contractName) { - | Some({filterByAddresses}) => - // Prevent registering already indexing contracts - switch indexingContracts->Utils.Dict.dangerouslyGetNonOption(dc.address->Address.toString) { - | Some(existingContract) => - // FIXME: Instead of filtering out duplicates, - // we should check the block number first. - // If new registration with earlier block number - // we should register it for the missing block range - if existingContract.contractName != dc.contractName { - fetchState->warnDifferentContractType(~existingContract, ~dc) - } else if existingContract.startBlock > dc.startBlock { - let logger = Logging.createChild( - ~params={ - "chainId": fetchState.chainId, - "contractAddress": dc.address->Address.toString, - "existingBlockNumber": existingContract.startBlock, - "newBlockNumber": dc.startBlock, - }, - ) - logger->Logging.childWarn(`Skipping contract registration: Contract address is already registered at a later block number. Currently registration of the same contract address is not supported by Envio. Reach out to us if it's a problem for you.`) - } - () - | None => - let shouldUpdate = switch registeringContracts->Utils.Dict.dangerouslyGetNonOption( - dc.address->Address.toString, - ) { - | Some(registeringContract) if registeringContract.contractName != dc.contractName => - fetchState->warnDifferentContractType(~existingContract=registeringContract, ~dc) - false - | Some(registeringContract) => - switch (registeringContract.register, dc.register) { - | ( - DC({registeringEventLogIndex}), - DC({registeringEventLogIndex: newRegisteringEventLogIndex}), - ) => - // Update DC registration if the new one from the batch has an earlier registration log - registeringContract.startBlock > dc.startBlock || - (registeringContract.startBlock === dc.startBlock && - registeringEventLogIndex > newRegisteringEventLogIndex) - | (Config, _) | (_, Config) => - Js.Exn.raiseError( - "Unexpected case: Config registration should be handled in a different function", + for itemIdx in 0 to items->Array.length - 1 { + let item = items->Js.Array2.unsafe_get(itemIdx) + switch item->Internal.getItemDcs { + | None => () + | Some(dcs) => + for idx in 0 to dcs->Array.length - 1 { + let dc = dcs->Js.Array2.unsafe_get(idx) + + switch fetchState.contractConfigs->Utils.Dict.dangerouslyGetNonOption(dc.contractName) { + | Some({filterByAddresses}) => + // Prevent registering already indexing contracts + switch indexingContracts->Utils.Dict.dangerouslyGetNonOption( + dc.address->Address.toString, + ) { + | Some(existingContract) => + // FIXME: Instead of filtering out duplicates, + // we should check the block number first. + // If new registration with earlier block number + // we should register it for the missing block range + if existingContract.contractName != dc.contractName { + fetchState->warnDifferentContractType(~existingContract, ~dc) + } else if existingContract.startBlock > dc.startBlock { + let logger = Logging.createChild( + ~params={ + "chainId": fetchState.chainId, + "contractAddress": dc.address->Address.toString, + "existingBlockNumber": existingContract.startBlock, + "newBlockNumber": dc.startBlock, + }, + ) + logger->Logging.childWarn(`Skipping contract registration: Contract address is already registered at a later block number. Currently registration of the same contract address is not supported by Envio. Reach out to us if it's a problem for you.`) + } + // Remove the DC from item to prevent it from saving to the db + let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx) + | None => + let shouldUpdate = switch registeringContracts->Utils.Dict.dangerouslyGetNonOption( + dc.address->Address.toString, + ) { + | Some(registeringContract) if registeringContract.contractName != dc.contractName => + fetchState->warnDifferentContractType(~existingContract=registeringContract, ~dc) + false + | Some(_) => // Since the DC is registered by an earlier item in the query + // FIXME: This unsafely relies on the asc order of the items + // which is 99% true, but there were cases when the source ordering was wrong + false + | None => + hasDCWithFilterByAddresses := hasDCWithFilterByAddresses.contents || filterByAddresses + addressesByContractName->Utils.Dict.push(dc.contractName, dc.address) + true + } + if shouldUpdate { + earliestRegisteringEventBlockNumber := + Pervasives.min(earliestRegisteringEventBlockNumber.contents, dc.startBlock) + registeringContracts->Js.Dict.set(dc.address->Address.toString, dc) + } else { + // Remove the DC from item to prevent it from saving to the db + let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx) + } + } + | None => { + let logger = Logging.createChild( + ~params={ + "chainId": fetchState.chainId, + "contractAddress": dc.address->Address.toString, + "contractName": dc.contractName, + }, ) + logger->Logging.childWarn(`Skipping contract registration: Contract doesn't have any events to fetch.`) + let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx) } - | None => - hasDCWithFilterByAddresses := hasDCWithFilterByAddresses.contents || filterByAddresses - addressesByContractName->Utils.Dict.push(dc.contractName, dc.address) - true - } - if shouldUpdate { - earliestRegisteringEventBlockNumber := - Pervasives.min(earliestRegisteringEventBlockNumber.contents, dc.startBlock) - registeringContracts->Js.Dict.set(dc.address->Address.toString, dc) } } - | None => { - let logger = Logging.createChild( - ~params={ - "chainId": fetchState.chainId, - "contractAddress": dc.address->Address.toString, - "contractName": dc.contractName, - }, - ) - logger->Logging.childWarn(`Skipping contract registration: Contract doesn't have any events to fetch.`) - } } } @@ -568,10 +552,6 @@ let registerDynamicContracts = ( fetchState->updateInternal( ~partitions=fetchState.partitions->Js.Array2.concat(newPartitions), - ~dcsToStore=switch fetchState.dcsToStore { - | [] => dcsToStore - | existingDcs => Array.concat(existingDcs, dcsToStore) - }, ~indexingContracts=// We don't need registeringContracts anymore, // so we can safely mixin indexingContracts in it // The original indexingContracts won't be mutated @@ -598,7 +578,7 @@ type query = { selection: selection, addressesByContractName: dict>, target: queryTarget, - indexingContracts: dict, + indexingContracts: dict, } exception UnexpectedPartitionNotFound({partitionId: string}) @@ -975,7 +955,7 @@ let make = ( ~startBlock, ~endBlock, ~eventConfigs: array, - ~contracts: array, + ~contracts: array, ~maxAddrInPartition, ~chainId, ~targetBufferSize, @@ -1104,7 +1084,6 @@ let make = ( latestOnBlockBlockNumber: progressBlockNumber, normalSelection, indexingContracts, - dcsToStore: [], blockLag, onBlockConfigs, targetBufferSize, @@ -1114,31 +1093,14 @@ let make = ( let bufferSize = ({buffer}: t) => buffer->Array.length -let pruneQueueFromFirstChangeEvent = ( - buffer: array, - ~firstChangeEvent: blockNumberAndLogIndex, -) => { - buffer->Array.keep(item => - switch item { - | Event({blockNumber, logIndex}) - | Block({blockNumber, logIndex}) => (blockNumber, logIndex) - } < - (firstChangeEvent.blockNumber, firstChangeEvent.logIndex) - ) -} - /** Rolls back partitions to the given valid block */ -let rollbackPartition = ( - p: partition, - ~firstChangeEvent: blockNumberAndLogIndex, - ~addressesToRemove, -) => { - let shouldRollbackFetched = p.latestFetchedBlock.blockNumber >= firstChangeEvent.blockNumber +let rollbackPartition = (p: partition, ~targetBlockNumber, ~addressesToRemove) => { + let shouldRollbackFetched = p.latestFetchedBlock.blockNumber > targetBlockNumber let latestFetchedBlock = shouldRollbackFetched ? { - blockNumber: firstChangeEvent.blockNumber - 1, + blockNumber: targetBlockNumber, blockTimestamp: 0, } : p.latestFetchedBlock @@ -1153,7 +1115,7 @@ let rollbackPartition = ( }) | {addressesByContractName} => let rollbackedAddressesByContractName = Js.Dict.empty() - addressesByContractName->Utils.Dict.forEachWithKey((contractName, addresses) => { + addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => { let keptAddresses = addresses->Array.keep(address => !(addressesToRemove->Utils.Set.has(address))) if keptAddresses->Array.length > 0 { @@ -1177,7 +1139,7 @@ let rollbackPartition = ( } } -let rollback = (fetchState: t, ~firstChangeEvent) => { +let rollback = (fetchState: t, ~targetBlockNumber) => { let addressesToRemove = Utils.Set.make() let indexingContracts = Js.Dict.empty() @@ -1185,40 +1147,34 @@ let rollback = (fetchState: t, ~firstChangeEvent) => { ->Js.Dict.keys ->Array.forEach(address => { let indexingContract = fetchState.indexingContracts->Js.Dict.unsafeGet(address) - if ( - switch indexingContract { - | {register: Config} => true - | {register: DC(dc)} => - indexingContract.startBlock < firstChangeEvent.blockNumber || - (indexingContract.startBlock === firstChangeEvent.blockNumber && - dc.registeringEventLogIndex < firstChangeEvent.logIndex) + switch indexingContract.registrationBlock { + | Some(registrationBlock) if registrationBlock > targetBlockNumber => { + //If the registration block is later than the first change event, + //Do not keep it and add to the removed addresses + let _ = addressesToRemove->Utils.Set.add(address->Address.unsafeFromString) } - ) { - indexingContracts->Js.Dict.set(address, indexingContract) - } else { - //If the registration block is later than the first change event, - //Do not keep it and add to the removed addresses - let _ = addressesToRemove->Utils.Set.add(address->Address.unsafeFromString) + | _ => indexingContracts->Js.Dict.set(address, indexingContract) } }) let partitions = fetchState.partitions->Array.keepMap(p => - p->rollbackPartition(~firstChangeEvent, ~addressesToRemove) + p->rollbackPartition(~targetBlockNumber, ~addressesToRemove) ) { ...fetchState, - latestOnBlockBlockNumber: firstChangeEvent.blockNumber - 1, // TODO: This is not tested + latestOnBlockBlockNumber: targetBlockNumber, // TODO: This is not tested. I assume there might be a possible issue of it skipping some blocks }->updateInternal( ~partitions, ~indexingContracts, - ~mutItems=fetchState.buffer->pruneQueueFromFirstChangeEvent(~firstChangeEvent), - ~dcsToStore=switch fetchState.dcsToStore { - | [] as empty => empty - | dcsToStore => - dcsToStore->Js.Array2.filter(dc => !(addressesToRemove->Utils.Set.has(dc.address))) - }, + ~mutItems=fetchState.buffer->Array.keep(item => + switch item { + | Event({blockNumber}) + | Block({blockNumber}) => blockNumber + } <= + targetBlockNumber + ), ) } @@ -1252,7 +1208,7 @@ let isReadyToEnterReorgThreshold = ( buffer->Utils.Array.isEmpty } -let filterAndSortForUnorderedBatch = { +let sortForUnorderedBatch = { let hasFullBatch = ({buffer} as fetchState: t, ~batchSizeTarget) => { switch buffer->Belt.Array.get(batchSizeTarget - 1) { | Some(item) => item->Internal.getItemBlockNumber <= fetchState->bufferBlockNumber @@ -1262,20 +1218,24 @@ let filterAndSortForUnorderedBatch = { (fetchStates: array, ~batchSizeTarget: int) => { fetchStates - ->Array.keepU(hasReadyItem) + ->Array.copy ->Js.Array2.sortInPlaceWith((a: t, b: t) => { switch (a->hasFullBatch(~batchSizeTarget), b->hasFullBatch(~batchSizeTarget)) { | (true, true) | (false, false) => - // Use unsafe since we filtered out all queues without batch items - switch (a.buffer->Belt.Array.getUnsafe(0), b.buffer->Belt.Array.getUnsafe(0)) { - | (Event({timestamp: aTimestamp}), Event({timestamp: bTimestamp})) => + switch (a.buffer->Belt.Array.get(0), b.buffer->Belt.Array.get(0)) { + | (Some(Event({timestamp: aTimestamp})), Some(Event({timestamp: bTimestamp}))) => aTimestamp - bTimestamp - | (Block(_), _) - | (_, Block(_)) => + | (Some(Block(_)), _) + | (_, Some(Block(_))) => // Currently block items don't have a timestamp, // so we sort chains with them in a random order Js.Math.random_int(-1, 1) + // We don't care about the order of chains with no items + // Just keep them to increase the progress block number when relevant + | (Some(_), None) => -1 + | (None, Some(_)) => 1 + | (None, None) => 0 } | (true, false) => -1 | (false, true) => 1 @@ -1284,9 +1244,10 @@ let filterAndSortForUnorderedBatch = { } } -let getProgressBlockNumber = ({buffer} as fetchState: t) => { +// Ordered multichain mode can't skip blocks, even if there are no items. +let getUnorderedMultichainProgressBlockNumberAt = ({buffer} as fetchState: t, ~index) => { let bufferBlockNumber = fetchState->bufferBlockNumber - switch buffer->Belt.Array.get(0) { + switch buffer->Belt.Array.get(index) { | Some(item) if bufferBlockNumber >= item->Internal.getItemBlockNumber => item->Internal.getItemBlockNumber - 1 | _ => bufferBlockNumber diff --git a/codegenerator/cli/npm/envio/src/Internal.res b/codegenerator/cli/npm/envio/src/Internal.res index 102cab660..ee98172b2 100644 --- a/codegenerator/cli/npm/envio/src/Internal.res +++ b/codegenerator/cli/npm/envio/src/Internal.res @@ -145,6 +145,18 @@ type evmContractConfig = { events: array, } +type indexingContract = { + address: Address.t, + contractName: string, + startBlock: int, + // Needed for rollback + // If not set, assume the contract comes from config + // and shouldn't be rolled back + registrationBlock: option, +} + +type dcs = array + // Duplicate the type from item // to make item properly unboxed type eventItem = private { @@ -200,6 +212,11 @@ external getItemBlockNumber: item => int = "blockNumber" @get external getItemLogIndex: item => int = "logIndex" +@get +external getItemDcs: item => option = "dcs" +@set +external setItemDcs: (item, dcs) => unit = "dcs" + @genType type eventOptions<'eventFilters> = { wildcard?: bool, @@ -234,6 +251,7 @@ let fuelTransferParamsSchema = S.schema(s => { type entity = private {id: string} type genericEntityConfig<'entity> = { name: string, + index: int, schema: S.t<'entity>, rowsSchema: S.t>, table: Table.table, @@ -272,7 +290,7 @@ type effectArgs = { type effectCacheItem = {id: string, output: effectOutput} type effectCacheMeta = { itemSchema: S.t, - rowsSchema: S.t>, + outputSchema: S.t, table: Table.table, } type effect = { @@ -284,16 +302,51 @@ type effect = { mutable callsCount: int, } let cacheTablePrefix = "envio_effect_" +let cacheOutputSchema = S.json(~validate=false)->(Utils.magic: S.t => S.t) +let effectCacheItemRowsSchema = S.array( + S.schema(s => {id: s.matches(S.string), output: s.matches(cacheOutputSchema)}), +) let makeCacheTable = (~effectName) => { Table.mkTable( cacheTablePrefix ++ effectName, ~fields=[ Table.mkField("id", Text, ~fieldSchema=S.string, ~isPrimaryKey=true), - Table.mkField("output", JsonB, ~fieldSchema=S.json(~validate=false), ~isNullable=true), + Table.mkField("output", JsonB, ~fieldSchema=cacheOutputSchema, ~isNullable=true), ], - ~compositeIndices=[], ) } @genType.import(("./Types.ts", "Invalid")) type noEventFilters + +type reorgCheckpoint = { + @as("id") + checkpointId: int, + @as("chain_id") + chainId: int, + @as("block_number") + blockNumber: int, + @as("block_hash") + blockHash: string, +} + +type entityValueAtStartOfBatch<'entityType> = + | NotSet // The entity isn't in the DB yet + | AlreadySet('entityType) + +type updatedValue<'entityType> = { + latest: EntityHistory.entityUpdate<'entityType>, + history: array>, + // In the event of a rollback, some entity updates may have been + // been affected by a rollback diff. If there was no rollback diff + // this will always be false. + // If there was a rollback diff, this will be false in the case of a + // new entity update (where entity affected is not present in the diff) b + // but true if the update is related to an entity that is + // currently present in the diff + containsRollbackDiffChange: bool, +} + +type inMemoryStoreRowEntity<'entityType> = + | Updated(updatedValue<'entityType>) + | InitialReadFromDb(entityValueAtStartOfBatch<'entityType>) // This means there is no change from the db. diff --git a/codegenerator/cli/npm/envio/src/InternalConfig.res b/codegenerator/cli/npm/envio/src/InternalConfig.res index 61f3d2ec6..447b999c1 100644 --- a/codegenerator/cli/npm/envio/src/InternalConfig.res +++ b/codegenerator/cli/npm/envio/src/InternalConfig.res @@ -30,7 +30,7 @@ type chain = { id: int, startBlock: int, endBlock?: int, - confirmedBlockThreshold: int, + maxReorgDepth: int, contracts: array, sources: array, } diff --git a/codegenerator/cli/npm/envio/src/Persistence.res b/codegenerator/cli/npm/envio/src/Persistence.res index 8986dd232..f3449f3ff 100644 --- a/codegenerator/cli/npm/envio/src/Persistence.res +++ b/codegenerator/cli/npm/envio/src/Persistence.res @@ -13,10 +13,25 @@ type effectCacheRecord = { mutable count: int, } +type initialChainState = { + id: int, + startBlock: int, + endBlock: option, + maxReorgDepth: int, + progressBlockNumber: int, + numEventsProcessed: int, + firstEventBlockNumber: option, + timestampCaughtUpToHeadOrEndblock: option, + dynamicContracts: array, +} + type initialState = { cleanRun: bool, cache: dict, - chains: array, + chains: array, + checkpointId: int, + // Needed to keep reorg detection logic between restarts + reorgCheckpoints: array, } type operator = [#">" | #"="] @@ -141,13 +156,13 @@ let init = { Logging.info(`Found existing indexer storage. Resuming indexing state...`) let initialState = await persistence.storage.resumeInitialState() persistence.storageStatus = Ready(initialState) - let checkpoints = Js.Dict.empty() + let progress = Js.Dict.empty() initialState.chains->Js.Array2.forEach(c => { - checkpoints->Utils.Dict.setByInt(c.id, c.progressBlockNumber) + progress->Utils.Dict.setByInt(c.id, c.progressBlockNumber) }) Logging.info({ "msg": `Successfully resumed indexing state! Continuing from the last checkpoint.`, - "checkpoints": checkpoints, + "progress": progress, }) } resolveRef.contents() @@ -177,7 +192,12 @@ let getInitializedState = persistence => { } } -let setEffectCacheOrThrow = async (persistence, ~effect: Internal.effect, ~items) => { +let setEffectCacheOrThrow = async ( + persistence, + ~effect: Internal.effect, + ~items, + ~invalidationsCount, +) => { switch persistence.storageStatus { | Unknown | Initializing(_) => @@ -195,7 +215,8 @@ let setEffectCacheOrThrow = async (persistence, ~effect: Internal.effect, ~items } let initialize = effectCacheRecord.count === 0 await storage.setEffectCacheOrThrow(~effect, ~items, ~initialize) - effectCacheRecord.count = effectCacheRecord.count + items->Js.Array2.length + effectCacheRecord.count = + effectCacheRecord.count + items->Js.Array2.length - invalidationsCount Prometheus.EffectCacheCount.set(~count=effectCacheRecord.count, ~effectName) } } diff --git a/codegenerator/cli/npm/envio/src/PgStorage.res b/codegenerator/cli/npm/envio/src/PgStorage.res index 12037b739..00cc530ed 100644 --- a/codegenerator/cli/npm/envio/src/PgStorage.res +++ b/codegenerator/cli/npm/envio/src/PgStorage.res @@ -22,7 +22,7 @@ let makeCreateTableIndicesQuery = (table: Table.table, ~pgSchema) => { compositeIndices->Array.map(createCompositeIndex)->Js.Array2.joinWith("\n") } -let makeCreateTableQuery = (table: Table.table, ~pgSchema) => { +let makeCreateTableQuery = (table: Table.table, ~pgSchema, ~isNumericArrayAsText) => { open Belt let fieldsMapped = table @@ -34,6 +34,8 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => { { `"${fieldName}" ${switch fieldType { | Custom(name) if !(name->Js.String2.startsWith("NUMERIC(")) => `"${pgSchema}".${name}` + // Workaround for Hasura bug https://github.com/enviodev/hyperindex/issues/788 + | Numeric if isArray && isNumericArrayAsText => (Table.Text :> string) | _ => (fieldType :> string) }}${isArray ? "[]" : ""}${switch defaultValue { | Some(defaultValue) => ` DEFAULT ${defaultValue}` @@ -57,6 +59,7 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => { let makeInitializeTransaction = ( ~pgSchema, ~pgUser, + ~isHasuraEnabled, ~chainConfigs=[], ~entities=[], ~enums=[], @@ -65,7 +68,7 @@ let makeInitializeTransaction = ( let generalTables = [ InternalTable.Chains.table, InternalTable.PersistedState.table, - InternalTable.EndOfBlockRangeScannedData.table, + InternalTable.Checkpoints.table, InternalTable.RawEvents.table, ] @@ -105,7 +108,10 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`, // Batch all table creation first (optimal for PostgreSQL) allTables->Js.Array2.forEach((table: Table.table) => { - query := query.contents ++ "\n" ++ makeCreateTableQuery(table, ~pgSchema) + query := + query.contents ++ + "\n" ++ + makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=isHasuraEnabled) }) // Then batch all indices (better performance when tables exist) @@ -116,13 +122,8 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`, } }) - let functionsQuery = ref("") - // Add derived indices entities->Js.Array2.forEach((entity: Internal.entityConfig) => { - functionsQuery := - functionsQuery.contents ++ "\n" ++ entity.entityHistory.makeInsertFnQuery(~pgSchema) - entity.table ->Table.getDerivedFromFields ->Js.Array2.forEach(derivedFromField => { @@ -149,10 +150,8 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`, | None => () } - // Add cache row count function - functionsQuery := - functionsQuery.contents ++ - "\n" ++ + [ + query.contents, `CREATE OR REPLACE FUNCTION ${getCacheRowCountFnName}(table_name text) RETURNS integer AS $$ DECLARE @@ -161,11 +160,8 @@ BEGIN EXECUTE format('SELECT COUNT(*) FROM "${pgSchema}".%I', table_name) INTO result; RETURN result; END; -$$ LANGUAGE plpgsql;` - - [query.contents]->Js.Array2.concat( - functionsQuery.contents !== "" ? [functionsQuery.contents] : [], - ) +$$ LANGUAGE plpgsql;`, + ] } let makeLoadByIdQuery = (~pgSchema, ~tableName) => { @@ -270,6 +266,11 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<' // FIXME what about Fuel params? let isRawEvents = table.tableName === InternalTable.RawEvents.table.tableName + // Currently history update table uses S.object with transformation for schema, + // which is being lossed during conversion to dbSchema. + // So use simple insert values for now. + let isHistoryUpdate = table.tableName->Js.String2.startsWith(EntityHistory.historyTablePrefix) + // Should experiment how much it'll affect performance // Although, it should be fine not to perform the validation check, // since the values are validated by type system. @@ -277,7 +278,7 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<' // db write fails to show a better user error. let typeValidation = false - if isRawEvents || !hasArrayField { + if (isRawEvents || !hasArrayField) && !isHistoryUpdate { { "query": makeInsertUnnestSetQuery(~pgSchema, ~table, ~itemSchema, ~isRawEvents), "convertOrThrow": S.compile( @@ -325,7 +326,7 @@ let chunkArray = (arr: array<'a>, ~chunkSize) => { let removeInvalidUtf8InPlace = entities => entities->Js.Array2.forEach(item => { let dict = item->(Utils.magic: 'a => dict) - dict->Utils.Dict.forEachWithKey((key, value) => { + dict->Utils.Dict.forEachWithKey((value, key) => { if value->Js.typeof === "string" { let value = value->(Utils.magic: unknown => string) // We mutate here, since we don't care @@ -334,7 +335,7 @@ let removeInvalidUtf8InPlace = entities => // This is unsafe, but we rely that it'll use // the mutated reference on retry. // TODO: Test it properly after we start using - // in-memory PGLite for indexer test framework. + // real pg for indexer test framework. dict->Js.Dict.set( key, value @@ -414,44 +415,6 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema } } -let setEntityHistoryOrThrow = ( - sql, - ~entityHistory: EntityHistory.t<'entity>, - ~rows: array>, - ~shouldCopyCurrentEntity=?, - ~shouldRemoveInvalidUtf8=false, -) => { - rows->Belt.Array.map(historyRow => { - let row = historyRow->S.reverseConvertToJsonOrThrow(entityHistory.schema) - if shouldRemoveInvalidUtf8 { - [row]->removeInvalidUtf8InPlace - } - entityHistory.insertFn( - sql, - row, - ~shouldCopyCurrentEntity=switch shouldCopyCurrentEntity { - | Some(v) => v - | None => { - let containsRollbackDiffChange = - historyRow.containsRollbackDiffChange->Belt.Option.getWithDefault(false) - !containsRollbackDiffChange - } - }, - )->Promise.catch(exn => { - let reason = exn->Utils.prettifyExn - let detail = %raw(`reason?.detail || ""`) - raise( - Persistence.StorageError({ - message: `Failed to insert history item into table "${entityHistory.table.tableName}".${detail !== "" - ? ` Details: ${detail}` - : ""}`, - reason, - }), - ) - }) - }) -} - type schemaTableName = { @as("table_name") tableName: string, @@ -550,6 +513,7 @@ let make = ( ~pgUser, ~pgDatabase, ~pgPassword, + ~isHasuraEnabled, ~onInitialize=?, ~onNewTables=?, ): Persistence.storage => { @@ -595,7 +559,7 @@ let make = ( let table = Internal.makeCacheTable(~effectName) sql - ->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema)) + ->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false)) ->Promise.then(() => { let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString @@ -688,6 +652,7 @@ let make = ( ~enums, ~chainConfigs, ~isEmptyPgSchema=schemaTableNames->Utils.Array.isEmpty, + ~isHasuraEnabled, ) // Execute all queries within a single transaction for integrity let _ = await sql->Postgres.beginSql(sql => { @@ -707,7 +672,19 @@ let make = ( { cleanRun: true, cache, - chains: chainConfigs->Js.Array2.map(InternalTable.Chains.initialFromConfig), + reorgCheckpoints: [], + chains: chainConfigs->Js.Array2.map((chainConfig): Persistence.initialChainState => { + id: chainConfig.id, + startBlock: chainConfig.startBlock, + endBlock: chainConfig.endBlock, + maxReorgDepth: chainConfig.maxReorgDepth, + progressBlockNumber: -1, + numEventsProcessed: 0, + firstEventBlockNumber: None, + timestampCaughtUpToHeadOrEndblock: None, + dynamicContracts: [], + }), + checkpointId: InternalTable.Checkpoints.initialCheckpointId, } } @@ -821,7 +798,10 @@ let make = ( } if initialize { - let _ = await sql->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema)) + let _ = + await sql->Postgres.unsafe( + makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false), + ) // Integration with other tools like Hasura switch onNewTables { | Some(onNewTables) => await onNewTables(~tableNames=[table.tableName]) @@ -895,19 +875,38 @@ let make = ( } let resumeInitialState = async (): Persistence.initialState => { - let (cache, chains) = await Promise.all2(( + let (cache, chains, checkpointIdResult, reorgCheckpoints) = await Promise.all4(( restoreEffectCache(~withUpload=false), + InternalTable.Chains.getInitialState( + sql, + ~pgSchema, + )->Promise.thenResolve(rawInitialStates => { + rawInitialStates->Belt.Array.map((rawInitialState): Persistence.initialChainState => { + id: rawInitialState.id, + startBlock: rawInitialState.startBlock, + endBlock: rawInitialState.endBlock->Js.Null.toOption, + maxReorgDepth: rawInitialState.maxReorgDepth, + firstEventBlockNumber: rawInitialState.firstEventBlockNumber->Js.Null.toOption, + timestampCaughtUpToHeadOrEndblock: rawInitialState.timestampCaughtUpToHeadOrEndblock->Js.Null.toOption, + numEventsProcessed: rawInitialState.numEventsProcessed, + progressBlockNumber: rawInitialState.progressBlockNumber, + dynamicContracts: rawInitialState.dynamicContracts, + }) + }), sql - ->Postgres.unsafe( - makeLoadAllQuery(~pgSchema, ~tableName=InternalTable.Chains.table.tableName), - ) - ->(Utils.magic: promise> => promise>), + ->Postgres.unsafe(InternalTable.Checkpoints.makeCommitedCheckpointIdQuery(~pgSchema)) + ->(Utils.magic: promise> => promise>), + sql + ->Postgres.unsafe(InternalTable.Checkpoints.makeGetReorgCheckpointsQuery(~pgSchema)) + ->(Utils.magic: promise> => promise>), )) { cleanRun: false, + reorgCheckpoints, cache, chains, + checkpointId: (checkpointIdResult->Belt.Array.getUnsafe(0))["id"], } } diff --git a/codegenerator/cli/npm/envio/src/Prometheus.res b/codegenerator/cli/npm/envio/src/Prometheus.res index dfaac0a71..1941d26bf 100644 --- a/codegenerator/cli/npm/envio/src/Prometheus.res +++ b/codegenerator/cli/npm/envio/src/Prometheus.res @@ -477,9 +477,15 @@ module RollbackSuccess = { "help": "Number of successful rollbacks on reorg", }) - let increment = (~timeMillis: Hrtime.milliseconds) => { + let eventsCounter = PromClient.Counter.makeCounter({ + "name": "envio_rollback_events_count", + "help": "Number of events rollbacked on reorg", + }) + + let increment = (~timeMillis: Hrtime.milliseconds, ~rollbackedProcessedEvents) => { timeCounter->PromClient.Counter.incMany(timeMillis->Hrtime.intFromMillis) counter->PromClient.Counter.inc + eventsCounter->PromClient.Counter.incMany(rollbackedProcessedEvents) } } @@ -519,30 +525,6 @@ module RollbackTargetBlockNumber = { } } -module ProcessingBlockNumber = { - let gauge = SafeGauge.makeOrThrow( - ~name="envio_processing_block_number", - ~help="The latest item block number included in the currently processing batch for the chain.", - ~labelSchema=chainIdLabelsSchema, - ) - - let set = (~blockNumber, ~chainId) => { - gauge->SafeGauge.handleInt(~labels=chainId, ~value=blockNumber) - } -} - -module ProcessingBatchSize = { - let gauge = SafeGauge.makeOrThrow( - ~name="envio_processing_batch_size", - ~help="The number of items included in the currently processing batch for the chain.", - ~labelSchema=chainIdLabelsSchema, - ) - - let set = (~batchSize, ~chainId) => { - gauge->SafeGauge.handleInt(~labels=chainId, ~value=batchSize) - } -} - module ProcessingMaxBatchSize = { let gauge = PromClient.Gauge.makeGauge({ "name": "envio_processing_max_batch_size", @@ -587,6 +569,17 @@ module ProgressEventsCount = { } } +module ProgressBatchCount = { + let counter = PromClient.Counter.makeCounter({ + "name": "envio_progress_batches_count", + "help": "The number of batches processed and reflected in the database.", + }) + + let increment = () => { + counter->PromClient.Counter.inc + } +} + let effectLabelsSchema = S.object(s => { s.field("effect", S.string) }) @@ -615,6 +608,18 @@ module EffectCacheCount = { } } +module EffectCacheInvalidationsCount = { + let counter = SafeCounter.makeOrThrow( + ~name="envio_effect_cache_invalidations_count", + ~help="The number of effect cache invalidations.", + ~labelSchema=effectLabelsSchema, + ) + + let increment = (~effectName) => { + counter->SafeCounter.increment(~labels=effectName) + } +} + module StorageLoad = { let operationLabelsSchema = S.object(s => s.field("operation", S.string)) diff --git a/codegenerator/cli/npm/envio/src/ReorgDetection.res b/codegenerator/cli/npm/envio/src/ReorgDetection.res index 58dec963d..1ce384188 100644 --- a/codegenerator/cli/npm/envio/src/ReorgDetection.res +++ b/codegenerator/cli/npm/envio/src/ReorgDetection.res @@ -40,188 +40,168 @@ type reorgResult = NoReorg | ReorgDetected(reorgDetected) type validBlockError = NotFound | AlreadyReorgedHashes type validBlockResult = result -module LastBlockScannedHashes: { - type t - /**Instantiat t with existing data*/ - let makeWithData: ( - array, - ~confirmedBlockThreshold: int, - ~detectedReorgBlock: blockData=?, - ) => t - - /**Instantiat empty t with no block data*/ - let empty: (~confirmedBlockThreshold: int) => t - - /** Registers a new reorg guard, prunes unneeded data, and returns the updated state. - * Resets internal state if shouldRollbackOnReorg is false (detect-only mode) - */ - let registerReorgGuard: ( - t, - ~reorgGuard: reorgGuard, - ~currentBlockHeight: int, - ~shouldRollbackOnReorg: bool, - ) => (t, reorgResult) - - /** - Returns the latest block data which matches block number and hashes in the provided array - If it doesn't exist in the reorg threshold it returns None or the latest scanned block outside of the reorg threshold - */ - let getLatestValidScannedBlock: ( - t, - ~blockNumbersAndHashes: array, - ~currentBlockHeight: int, - ~skipReorgDuplicationCheck: bool=?, - ) => validBlockResult - - let getThresholdBlockNumbers: (t, ~currentBlockHeight: int) => array - - let rollbackToValidBlockNumber: (t, ~blockNumber: int) => t -} = { - type t = { - // Number of blocks behind head, we want to keep track - // as a threshold for reorgs. If for eg. this is 200, - // it means we are accounting for reorgs up to 200 blocks - // behind the head - confirmedBlockThreshold: int, - // A hash map of recent blockdata by block number to make comparison checks - // for reorgs. - dataByBlockNumber: dict, - // The latest block which detected a reorg - // and should never be valid. - // We keep track of this to avoid responses - // with the stale data from other data-source instances. - detectedReorgBlock: option, - } +type t = { + // Whether to rollback on reorg + // Even if it's disabled, we still track reorgs checkpoints in memory + // and log when we detect an unhandled reorg + shouldRollbackOnReorg: bool, + // Number of blocks behind head, we want to keep track + // as a threshold for reorgs. If for eg. this is 200, + // it means we are accounting for reorgs up to 200 blocks + // behind the head + maxReorgDepth: int, + // A hash map of recent blockdata by block number to make comparison checks + // for reorgs. + dataByBlockNumber: dict, + // The latest block which detected a reorg + // and should never be valid. + // We keep track of this to avoid responses + // with the stale data from other data-source instances. + detectedReorgBlock: option, +} - let makeWithData = (blocks, ~confirmedBlockThreshold, ~detectedReorgBlock=?) => { - let dataByBlockNumber = Js.Dict.empty() +let make = ( + ~chainReorgCheckpoints: array, + ~maxReorgDepth, + ~shouldRollbackOnReorg, + ~detectedReorgBlock=?, +) => { + let dataByBlockNumber = Js.Dict.empty() + + chainReorgCheckpoints->Belt.Array.forEach(block => { + dataByBlockNumber->Utils.Dict.setByInt( + block.blockNumber, + { + blockHash: block.blockHash, + blockNumber: block.blockNumber, + }, + ) + }) - blocks->Belt.Array.forEach(block => { - dataByBlockNumber->Js.Dict.set(block.blockNumber->Js.Int.toString, block) - }) + { + shouldRollbackOnReorg, + maxReorgDepth, + dataByBlockNumber, + detectedReorgBlock, + } +} - { - confirmedBlockThreshold, - dataByBlockNumber, - detectedReorgBlock, +let getDataByBlockNumberCopyInThreshold = ( + {dataByBlockNumber, maxReorgDepth}: t, + ~currentBlockHeight, +) => { + // Js engine automatically orders numeric object keys + let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys + let thresholdBlockNumber = currentBlockHeight - maxReorgDepth + + let copy = Js.Dict.empty() + + for idx in 0 to ascBlockNumberKeys->Array.length - 1 { + let blockNumberKey = ascBlockNumberKeys->Js.Array2.unsafe_get(idx) + let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey) + let isInReorgThreshold = scannedBlock.blockNumber >= thresholdBlockNumber + if isInReorgThreshold { + copy->Js.Dict.set(blockNumberKey, scannedBlock) } } - //Instantiates empty LastBlockHashes - let empty = (~confirmedBlockThreshold) => { - confirmedBlockThreshold, - dataByBlockNumber: Js.Dict.empty(), - detectedReorgBlock: None, - } - - let getDataByBlockNumberCopyInThreshold = ( - {dataByBlockNumber, confirmedBlockThreshold}: t, - ~currentBlockHeight, - ) => { - // Js engine automatically orders numeric object keys - let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys - let thresholdBlockNumber = currentBlockHeight - confirmedBlockThreshold - let copy = Js.Dict.empty() + copy +} - for idx in 0 to ascBlockNumberKeys->Array.length - 1 { - let blockNumberKey = ascBlockNumberKeys->Js.Array2.unsafe_get(idx) - let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey) - let isInReorgThreshold = scannedBlock.blockNumber >= thresholdBlockNumber - if isInReorgThreshold { - copy->Js.Dict.set(blockNumberKey, scannedBlock) +/** Registers a new reorg guard, prunes unneeded data, and returns the updated state. + * Resets internal state if shouldRollbackOnReorg is false (detect-only mode) + */ +let registerReorgGuard = ( + {maxReorgDepth, shouldRollbackOnReorg} as self: t, + ~reorgGuard: reorgGuard, + ~currentBlockHeight, +) => { + let dataByBlockNumberCopyInThreshold = + self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight) + + let {rangeLastBlock, prevRangeLastBlock} = reorgGuard + + let maybeReorgDetected = switch dataByBlockNumberCopyInThreshold->Utils.Dict.dangerouslyGetNonOption( + rangeLastBlock.blockNumber->Int.toString, + ) { + | Some(scannedBlock) if scannedBlock.blockHash !== rangeLastBlock.blockHash => + Some({ + receivedBlock: rangeLastBlock, + scannedBlock, + }) + | _ => + switch prevRangeLastBlock { + //If parentHash is None, then it's the genesis block (no reorg) + //Need to check that parentHash matches because of the dynamic contracts + | None => None + | Some(prevRangeLastBlock) => + switch dataByBlockNumberCopyInThreshold->Utils.Dict.dangerouslyGetNonOption( + prevRangeLastBlock.blockNumber->Int.toString, + ) { + | Some(scannedBlock) if scannedBlock.blockHash !== prevRangeLastBlock.blockHash => + Some({ + receivedBlock: prevRangeLastBlock, + scannedBlock, + }) + | _ => None } } - - copy } - let registerReorgGuard = ( - {confirmedBlockThreshold} as self: t, - ~reorgGuard: reorgGuard, - ~currentBlockHeight, - ~shouldRollbackOnReorg, - ) => { - let dataByBlockNumberCopyInThreshold = - self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight) - - let {rangeLastBlock, prevRangeLastBlock} = reorgGuard - - let maybeReorgDetected = switch dataByBlockNumberCopyInThreshold->Utils.Dict.dangerouslyGetNonOption( - rangeLastBlock.blockNumber->Int.toString, - ) { - | Some(scannedBlock) if scannedBlock.blockHash !== rangeLastBlock.blockHash => - Some({ - receivedBlock: rangeLastBlock, - scannedBlock, - }) - | _ => + switch maybeReorgDetected { + | Some(reorgDetected) => ( + shouldRollbackOnReorg + ? { + ...self, + detectedReorgBlock: Some(reorgDetected.scannedBlock), + } + : make(~chainReorgCheckpoints=[], ~maxReorgDepth, ~shouldRollbackOnReorg), + ReorgDetected(reorgDetected), + ) + | None => { + dataByBlockNumberCopyInThreshold->Js.Dict.set( + rangeLastBlock.blockNumber->Int.toString, + rangeLastBlock, + ) switch prevRangeLastBlock { - //If parentHash is None, then it's the genesis block (no reorg) - //Need to check that parentHash matches because of the dynamic contracts - | None => None + | None => () | Some(prevRangeLastBlock) => - switch dataByBlockNumberCopyInThreshold->Utils.Dict.dangerouslyGetNonOption( + dataByBlockNumberCopyInThreshold->Js.Dict.set( prevRangeLastBlock.blockNumber->Int.toString, - ) { - | Some(scannedBlock) if scannedBlock.blockHash !== prevRangeLastBlock.blockHash => - Some({ - receivedBlock: prevRangeLastBlock, - scannedBlock, - }) - | _ => None - } + prevRangeLastBlock, + ) } - } - switch maybeReorgDetected { - | Some(reorgDetected) => ( - shouldRollbackOnReorg - ? { - ...self, - detectedReorgBlock: Some(reorgDetected.scannedBlock), - } - : empty(~confirmedBlockThreshold), - ReorgDetected(reorgDetected), + ( + { + maxReorgDepth, + dataByBlockNumber: dataByBlockNumberCopyInThreshold, + detectedReorgBlock: None, + shouldRollbackOnReorg, + }, + NoReorg, ) - | None => { - dataByBlockNumberCopyInThreshold->Js.Dict.set( - rangeLastBlock.blockNumber->Int.toString, - rangeLastBlock, - ) - switch prevRangeLastBlock { - | None => () - | Some(prevRangeLastBlock) => - dataByBlockNumberCopyInThreshold->Js.Dict.set( - prevRangeLastBlock.blockNumber->Int.toString, - prevRangeLastBlock, - ) - } - - ( - { - confirmedBlockThreshold, - dataByBlockNumber: dataByBlockNumberCopyInThreshold, - detectedReorgBlock: None, - }, - NoReorg, - ) - } } } +} - let getLatestValidScannedBlock = ( - self: t, - ~blockNumbersAndHashes: array, - ~currentBlockHeight, - ~skipReorgDuplicationCheck=false, - ) => { - let verifiedDataByBlockNumber = Js.Dict.empty() - for idx in 0 to blockNumbersAndHashes->Array.length - 1 { - let blockData = blockNumbersAndHashes->Array.getUnsafe(idx) - verifiedDataByBlockNumber->Js.Dict.set(blockData.blockNumber->Int.toString, blockData) - } +/** +Returns the latest block data which matches block number and hashes in the provided array +If it doesn't exist in the reorg threshold it returns None or the latest scanned block outside of the reorg threshold +*/ +let getLatestValidScannedBlock = ( + self: t, + ~blockNumbersAndHashes: array, + ~currentBlockHeight, + ~skipReorgDuplicationCheck=false, +) => { + let verifiedDataByBlockNumber = Js.Dict.empty() + for idx in 0 to blockNumbersAndHashes->Array.length - 1 { + let blockData = blockNumbersAndHashes->Array.getUnsafe(idx) + verifiedDataByBlockNumber->Js.Dict.set(blockData.blockNumber->Int.toString, blockData) + } - /* + /* Let's say we indexed block X with hash A. The next query we got the block X with hash B. We assume that the hash A is reorged since we received it earlier than B. @@ -234,98 +214,105 @@ module LastBlockScannedHashes: { we can skip the reorg duplication check if we're sure that the block hashes query is not coming from a different instance. (let's say we tried several times) */ - let isAlreadyReorgedResponse = skipReorgDuplicationCheck - ? false - : switch self.detectedReorgBlock { - | Some(detectedReorgBlock) => - switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption( - detectedReorgBlock.blockNumber->Int.toString, - ) { - | Some(verifiedBlockData) => verifiedBlockData.blockHash === detectedReorgBlock.blockHash - | None => false - } + let isAlreadyReorgedResponse = skipReorgDuplicationCheck + ? false + : switch self.detectedReorgBlock { + | Some(detectedReorgBlock) => + switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption( + detectedReorgBlock.blockNumber->Int.toString, + ) { + | Some(verifiedBlockData) => verifiedBlockData.blockHash === detectedReorgBlock.blockHash | None => false } - - if isAlreadyReorgedResponse { - Error(AlreadyReorgedHashes) - } else { - let dataByBlockNumber = self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight) - // Js engine automatically orders numeric object keys - let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys - - let getPrevScannedBlock = idx => - switch ascBlockNumberKeys - ->Belt.Array.get(idx - 1) - ->Option.flatMap(key => { - // We should already validate that the block number is verified at the point - verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(key) - }) { - | Some(data) => Ok(data) - | None => Error(NotFound) - } - - let rec loop = idx => { - switch ascBlockNumberKeys->Belt.Array.get(idx) { - | Some(blockNumberKey) => - let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey) - switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(blockNumberKey) { - | None => - Js.Exn.raiseError( - `Unexpected case. Couldn't find verified hash for block number ${blockNumberKey}`, - ) - | Some(verifiedBlockData) if verifiedBlockData.blockHash === scannedBlock.blockHash => - loop(idx + 1) - | Some(_) => getPrevScannedBlock(idx) - } - | None => getPrevScannedBlock(idx) - } + | None => false } - loop(0) - } - } - /** - Return a BlockNumbersAndHashes.t rolled back to where blockData is less - than the provided blockNumber - */ - let rollbackToValidBlockNumber = ( - {dataByBlockNumber, confirmedBlockThreshold}: t, - ~blockNumber: int, - ) => { + if isAlreadyReorgedResponse { + Error(AlreadyReorgedHashes) + } else { + let dataByBlockNumber = self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight) // Js engine automatically orders numeric object keys let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys - let newDataByBlockNumber = Js.Dict.empty() + let getPrevScannedBlock = idx => + switch ascBlockNumberKeys + ->Belt.Array.get(idx - 1) + ->Option.flatMap(key => { + // We should already validate that the block number is verified at the point + verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(key) + }) { + | Some(data) => Ok(data) + | None => Error(NotFound) + } let rec loop = idx => { switch ascBlockNumberKeys->Belt.Array.get(idx) { - | Some(blockNumberKey) => { - let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey) - let shouldKeep = scannedBlock.blockNumber <= blockNumber - if shouldKeep { - newDataByBlockNumber->Js.Dict.set(blockNumberKey, scannedBlock) - loop(idx + 1) - } else { - () - } + | Some(blockNumberKey) => + let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey) + switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(blockNumberKey) { + | None => + Js.Exn.raiseError( + `Unexpected case. Couldn't find verified hash for block number ${blockNumberKey}`, + ) + | Some(verifiedBlockData) if verifiedBlockData.blockHash === scannedBlock.blockHash => + loop(idx + 1) + | Some(_) => getPrevScannedBlock(idx) } - | None => () + | None => getPrevScannedBlock(idx) } } loop(0) + } +} - { - confirmedBlockThreshold, - dataByBlockNumber: newDataByBlockNumber, - detectedReorgBlock: None, +/** + Return a BlockNumbersAndHashes.t rolled back to where blockData is less + than the provided blockNumber + */ +let rollbackToValidBlockNumber = ( + {dataByBlockNumber, maxReorgDepth, shouldRollbackOnReorg}: t, + ~blockNumber: int, +) => { + // Js engine automatically orders numeric object keys + let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys + + let newDataByBlockNumber = Js.Dict.empty() + + let rec loop = idx => { + switch ascBlockNumberKeys->Belt.Array.get(idx) { + | Some(blockNumberKey) => { + let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey) + let shouldKeep = scannedBlock.blockNumber <= blockNumber + if shouldKeep { + newDataByBlockNumber->Js.Dict.set(blockNumberKey, scannedBlock) + loop(idx + 1) + } else { + () + } + } + | None => () } } + loop(0) + + { + maxReorgDepth, + dataByBlockNumber: newDataByBlockNumber, + detectedReorgBlock: None, + shouldRollbackOnReorg, + } +} - let getThresholdBlockNumbers = (self: t, ~currentBlockHeight) => { - let dataByBlockNumberCopyInThreshold = - self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight) +let getThresholdBlockNumbers = (self: t, ~currentBlockHeight) => { + let dataByBlockNumberCopyInThreshold = + self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight) + + dataByBlockNumberCopyInThreshold->Js.Dict.values->Js.Array2.map(v => v.blockNumber) +} - dataByBlockNumberCopyInThreshold->Js.Dict.values->Js.Array2.map(v => v.blockNumber) +let getHashByBlockNumber = (reorgDetection: t, ~blockNumber) => { + switch reorgDetection.dataByBlockNumber->Utils.Dict.dangerouslyGetByIntNonOption(blockNumber) { + | Some(v) => Js.Null.Value(v.blockHash) + | None => Js.Null.Null } } diff --git a/codegenerator/cli/npm/envio/src/SafeCheckpointTracking.res b/codegenerator/cli/npm/envio/src/SafeCheckpointTracking.res new file mode 100644 index 000000000..39ce85cd3 --- /dev/null +++ b/codegenerator/cli/npm/envio/src/SafeCheckpointTracking.res @@ -0,0 +1,131 @@ +// We need this module to effectively track safe checkpoint id +// this is very cheap to do in memory, while requires a lot of work on a db +// especially when save_full_history is enabled. +// The safe checkpoint id can be used to optimize checkpoints traverse logic and +// make pruning operation super cheap. +type t = { + checkpointIds: array, + checkpointBlockNumbers: array, + maxReorgDepth: int, +} + +let make = ( + ~maxReorgDepth, + ~shouldRollbackOnReorg, + ~chainReorgCheckpoints: array, +) => { + if maxReorgDepth > 0 && shouldRollbackOnReorg { + let checkpointIds = Belt.Array.makeUninitializedUnsafe(chainReorgCheckpoints->Array.length) + let checkpointBlockNumbers = Belt.Array.makeUninitializedUnsafe( + chainReorgCheckpoints->Array.length, + ) + chainReorgCheckpoints->Js.Array2.forEachi((checkpoint, idx) => { + checkpointIds->Belt.Array.setUnsafe(idx, checkpoint.checkpointId) + checkpointBlockNumbers->Belt.Array.setUnsafe(idx, checkpoint.blockNumber) + }) + Some({ + checkpointIds, + checkpointBlockNumbers, + maxReorgDepth, + }) + } else { + None + } +} + +let getSafeCheckpointId = (safeCheckpointTracking: t, ~sourceBlockNumber: int) => { + let safeBlockNumber = sourceBlockNumber - safeCheckpointTracking.maxReorgDepth + + switch safeCheckpointTracking.checkpointIds { + | [] => 0 + | _ + if safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(0) > safeBlockNumber => 0 + | [checkpointId] => checkpointId + | _ => { + let trackingCheckpointsCount = safeCheckpointTracking.checkpointIds->Array.length + let result = ref(None) + let idx = ref(1) + + while idx.contents < trackingCheckpointsCount && result.contents === None { + if ( + safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(idx.contents) > + safeBlockNumber + ) { + result := + Some(safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(idx.contents - 1)) + } + idx := idx.contents + 1 + } + + switch result.contents { + | Some(checkpointId) => checkpointId + | None => + safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(trackingCheckpointsCount - 1) + } + } + } +} + +let updateOnNewBatch = ( + safeCheckpointTracking: t, + ~sourceBlockNumber: int, + ~chainId: int, + ~batchCheckpointIds: array, + ~batchCheckpointBlockNumbers: array, + ~batchCheckpointChainIds: array, +) => { + let safeCheckpointId = getSafeCheckpointId(safeCheckpointTracking, ~sourceBlockNumber) + + let mutCheckpointIds = [] + let mutCheckpointBlockNumbers = [] + + // Copy + Clean up old checkpoints + for idx in 0 to safeCheckpointTracking.checkpointIds->Array.length - 1 { + let checkpointId = safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(idx) + if checkpointId >= safeCheckpointId { + mutCheckpointIds->Js.Array2.push(checkpointId)->ignore + mutCheckpointBlockNumbers + ->Js.Array2.push(safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(idx)) + ->ignore + } + } + + // Append new checkpoints + for idx in 0 to batchCheckpointIds->Array.length - 1 { + if batchCheckpointChainIds->Belt.Array.getUnsafe(idx) === chainId { + mutCheckpointIds->Js.Array2.push(batchCheckpointIds->Belt.Array.getUnsafe(idx))->ignore + mutCheckpointBlockNumbers + ->Js.Array2.push(batchCheckpointBlockNumbers->Belt.Array.getUnsafe(idx)) + ->ignore + } + } + + { + checkpointIds: mutCheckpointIds, + checkpointBlockNumbers: mutCheckpointBlockNumbers, + maxReorgDepth: safeCheckpointTracking.maxReorgDepth, + } +} + +let rollback = (safeCheckpointTracking: t, ~targetBlockNumber: int) => { + let mutCheckpointIds = [] + let mutCheckpointBlockNumbers = [] + + for idx in 0 to safeCheckpointTracking.checkpointIds->Array.length - 1 { + let blockNumber = safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(idx) + if blockNumber <= targetBlockNumber { + mutCheckpointIds + ->Js.Array2.push(safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(idx)) + ->ignore + mutCheckpointBlockNumbers + ->Js.Array2.push(safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(idx)) + ->ignore + } + } + + { + checkpointIds: mutCheckpointIds, + checkpointBlockNumbers: mutCheckpointBlockNumbers, + maxReorgDepth: safeCheckpointTracking.maxReorgDepth, + } +} diff --git a/codegenerator/cli/npm/envio/src/Utils.res b/codegenerator/cli/npm/envio/src/Utils.res index 97372e341..e4c7144e5 100644 --- a/codegenerator/cli/npm/envio/src/Utils.res +++ b/codegenerator/cli/npm/envio/src/Utils.res @@ -97,30 +97,66 @@ module Dict = { @val external mergeInPlace: (dict<'a>, dict<'a>) => dict<'a> = "Object.assign" - let map = (dict, fn) => { - let newDict = Js.Dict.empty() - let keys = dict->Js.Dict.keys - for idx in 0 to keys->Js.Array2.length - 1 { - let key = keys->Js.Array2.unsafe_get(idx) - newDict->Js.Dict.set(key, fn(dict->Js.Dict.unsafeGet(key))) + // Use %raw to support for..in which is a ~10% faster than .forEach + let mapValues: (dict<'a>, 'a => 'b) => dict<'b> = %raw(`(dict, f) => { + var target = {}, i; + for (i in dict) { + target[i] = f(dict[i]); } - newDict - } + return target; + }`) - let forEach = (dict, fn) => { - let keys = dict->Js.Dict.keys - for idx in 0 to keys->Js.Array2.length - 1 { - fn(dict->Js.Dict.unsafeGet(keys->Js.Array2.unsafe_get(idx))) + // Use %raw to support for..in which is a ~10% faster than .forEach + let filterMapValues: (dict<'a>, 'a => option<'b>) => dict<'b> = %raw(`(dict, f) => { + var target = {}, i, v; + for (i in dict) { + v = f(dict[i]); + if (v !== undefined) { + target[i] = v; + } } - } + return target; + }`) - let forEachWithKey = (dict, fn) => { - let keys = dict->Js.Dict.keys - for idx in 0 to keys->Js.Array2.length - 1 { - let key = keys->Js.Array2.unsafe_get(idx) - fn(key, dict->Js.Dict.unsafeGet(key)) + // Use %raw to support for..in which is a ~10% faster than .forEach + let mapValuesToArray: (dict<'a>, 'a => 'b) => array<'b> = %raw(`(dict, f) => { + var target = [], i; + for (i in dict) { + target.push(f(dict[i])); } - } + return target; + }`) + + // Use %raw to support for..in which is a ~10% faster than .forEach + let forEach: (dict<'a>, 'a => unit) => unit = %raw(`(dict, f) => { + for (var i in dict) { + f(dict[i]); + } + }`) + + // Use %raw to support for..in which is a ~10% faster than .forEach + let forEachWithKey: (dict<'a>, ('a, string) => unit) => unit = %raw(`(dict, f) => { + for (var i in dict) { + f(dict[i], i); + } + }`) + + // Use %raw to support for..in which is a ~10% faster than Object.keys + let size: dict<'a> => int = %raw(`(dict) => { + var size = 0, i; + for (i in dict) { + size++; + } + return size; + }`) + + // Use %raw to support for..in which is a 2x faster than Object.keys + let isEmpty: dict<'a> => bool = %raw(`(dict) => { + for (var _ in dict) { + return false + } + return true + }`) let deleteInPlace: (dict<'a>, string) => unit = %raw(`(dict, key) => { delete dict[key]; @@ -135,8 +171,6 @@ module Dict = { let shallowCopy: dict<'a> => dict<'a> = %raw(`(dict) => ({...dict})`) - let size = dict => dict->Js.Dict.keys->Js.Array2.length - @set_index external setByInt: (dict<'a>, int, 'a) => unit = "" @@ -155,6 +189,15 @@ module Math = { } } +// This is a microoptimization to avoid int32 safeguards +module UnsafeIntOperators = { + external \"*": (int, int) => int = "%mulfloat" + + external \"+": (int, int) => int = "%addfloat" + + external \"-": (int, int) => int = "%subfloat" +} + module Array = { @send external forEachAsync: (array<'a>, 'a => promise) => unit = "forEach" diff --git a/codegenerator/cli/npm/envio/src/db/EntityHistory.res b/codegenerator/cli/npm/envio/src/db/EntityHistory.res index 87308f5f1..707f75d04 100644 --- a/codegenerator/cli/npm/envio/src/db/EntityHistory.res +++ b/codegenerator/cli/npm/envio/src/db/EntityHistory.res @@ -3,183 +3,59 @@ open Table module RowAction = { type t = SET | DELETE let variants = [SET, DELETE] - let name = "ENTITY_HISTORY_ROW_ACTION" + let name = "ENVIO_HISTORY_CHANGE" let schema = S.enum(variants) } -type historyFieldsGeneral<'a> = { - chain_id: 'a, - block_timestamp: 'a, - block_number: 'a, - log_index: 'a, -} - -type historyFields = historyFieldsGeneral +type entityUpdateAction<'entityType> = + | Set('entityType) + | Delete -type entityIdOnly = {id: string} -let entityIdOnlySchema = S.schema(s => {id: s.matches(S.string)}) -type entityData<'entity> = Delete(entityIdOnly) | Set('entity) - -type historyRow<'entity> = { - current: historyFields, - previous: option, - entityData: entityData<'entity>, - // In the event of a rollback, some entity updates may have been - // been affected by a rollback diff. If there was no rollback diff - // this will always be false. - // If there was a rollback diff, this will be false in the case of a - // new entity update (where entity affected is not present in the diff) b - // but true if the update is related to an entity that is - // currently present in the diff - // Optional since it's discarded during parsing/serialization - containsRollbackDiffChange?: bool, +type entityUpdate<'entityType> = { + entityId: string, + entityUpdateAction: entityUpdateAction<'entityType>, + checkpointId: int, } -type previousHistoryFields = historyFieldsGeneral> - -//For flattening the optional previous fields into their own individual nullable fields -let previousHistoryFieldsSchema = S.object(s => { - chain_id: s.field("previous_entity_history_chain_id", S.null(S.int)), - block_timestamp: s.field("previous_entity_history_block_timestamp", S.null(S.int)), - block_number: s.field("previous_entity_history_block_number", S.null(S.int)), - log_index: s.field("previous_entity_history_log_index", S.null(S.int)), -}) - -let currentHistoryFieldsSchema = S.object(s => { - chain_id: s.field("entity_history_chain_id", S.int), - block_timestamp: s.field("entity_history_block_timestamp", S.int), - block_number: s.field("entity_history_block_number", S.int), - log_index: s.field("entity_history_log_index", S.int), -}) - -let makeHistoryRowSchema: S.t<'entity> => S.t> = entitySchema => { - //Maps a schema object for the given entity with all fields nullable except for the id field - //Keeps any original nullable fields - let nullableEntitySchema: S.t> = S.schema(s => - switch entitySchema->S.classify { - | Object({items}) => - let nulldict = Js.Dict.empty() - items->Belt.Array.forEach(({location, schema}) => { - let nullableFieldSchema = switch (location, schema->S.classify) { - | ("id", _) - | (_, Null(_)) => schema //TODO double check this works for array types - | _ => S.null(schema)->S.toUnknown - } - - nulldict->Js.Dict.set(location, s.matches(nullableFieldSchema)) - }) - nulldict - | _ => - Js.Exn.raiseError( - "Failed creating nullableEntitySchema. Expected an object schema for entity", - ) - } - ) - - let previousWithNullFields = { - chain_id: None, - block_timestamp: None, - block_number: None, - log_index: None, - } +// Prefix with envio_ to avoid colleasions +let changeFieldName = "envio_change" +let checkpointIdFieldName = "checkpoint_id" +let makeSetUpdateSchema: S.t<'entity> => S.t> = entitySchema => { S.object(s => { + s.tag(changeFieldName, RowAction.SET) { - "current": s.flatten(currentHistoryFieldsSchema), - "previous": s.flatten(previousHistoryFieldsSchema), - "entityData": s.flatten(nullableEntitySchema), - "action": s.field("action", RowAction.schema), + checkpointId: s.field(checkpointIdFieldName, S.int), + entityId: s.field("id", S.string), + entityUpdateAction: Set(s.flatten(entitySchema)), } - })->S.transform(s => { - parser: v => { - current: v["current"], - previous: switch v["previous"] { - | { - chain_id: Some(chain_id), - block_timestamp: Some(block_timestamp), - block_number: Some(block_number), - log_index: Some(log_index), - } => - Some({ - chain_id, - block_timestamp, - block_number, - log_index, - }) - | {chain_id: None, block_timestamp: None, block_number: None, log_index: None} => None - | _ => s.fail("Unexpected mix of null and non-null values in previous history fields") - }, - entityData: switch v["action"] { - | SET => v["entityData"]->(Utils.magic: Js.Dict.t => 'entity)->Set - | DELETE => - let {id} = v["entityData"]->(Utils.magic: Js.Dict.t => entityIdOnly) - Delete({id: id}) - }, - }, - serializer: v => { - let (entityData, action) = switch v.entityData { - | Set(entityData) => (entityData->(Utils.magic: 'entity => Js.Dict.t), RowAction.SET) - | Delete(entityIdOnly) => ( - entityIdOnly->(Utils.magic: entityIdOnly => Js.Dict.t), - DELETE, - ) - } - - { - "current": v.current, - "entityData": entityData, - "action": action, - "previous": switch v.previous { - | Some(historyFields) => - historyFields->(Utils.magic: historyFields => previousHistoryFields) //Cast to previousHistoryFields (with "Some" field values) - | None => previousWithNullFields - }, - } - }, }) } type t<'entity> = { table: table, - makeInsertFnQuery: (~pgSchema: string) => string, - schema: S.t>, + setUpdateSchema: S.t>, // Used for parsing - schemaRows: S.t>>, - insertFn: (Postgres.sql, Js.Json.t, ~shouldCopyCurrentEntity: bool) => promise, + setUpdateSchemaRows: S.t>>, + makeInsertDeleteUpdatesQuery: (~pgSchema: string) => string, + makeGetRollbackRemovedIdsQuery: (~pgSchema: string) => string, + makeGetRollbackRestoredEntitiesQuery: (~pgSchema: string) => string, } -type entityInternal - -external castInternal: t<'entity> => t = "%identity" -external eval: string => 'a = "eval" - -let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => { - let entity_history_block_timestamp = "entity_history_block_timestamp" - let entity_history_chain_id = "entity_history_chain_id" - let entity_history_block_number = "entity_history_block_number" - let entity_history_log_index = "entity_history_log_index" - - //NB: Ordered by hirarchy of event ordering - let currentChangeFieldNames = [ - entity_history_block_timestamp, - entity_history_chain_id, - entity_history_block_number, - entity_history_log_index, - ] - - let currentHistoryFields = - currentChangeFieldNames->Belt.Array.map(fieldName => - mkField(fieldName, Integer, ~fieldSchema=S.never, ~isPrimaryKey=true) - ) - - let previousChangeFieldNames = - currentChangeFieldNames->Belt.Array.map(fieldName => "previous_" ++ fieldName) - - let previousHistoryFields = - previousChangeFieldNames->Belt.Array.map(fieldName => - mkField(fieldName, Integer, ~fieldSchema=S.never, ~isNullable=true) - ) +let maxPgTableNameLength = 63 +let historyTablePrefix = "envio_history_" +let historyTableName = (~entityName, ~entityIndex) => { + let fullName = historyTablePrefix ++ entityName + if fullName->String.length > maxPgTableNameLength { + let entityIndexStr = entityIndex->Belt.Int.toString + fullName->Js.String.slice(~from=0, ~to_=maxPgTableNameLength - entityIndexStr->String.length) ++ + entityIndexStr + } else { + fullName + } +} +let fromTable = (table: table, ~schema: S.t<'entity>, ~entityIndex): t<'entity> => { let id = "id" let dataFields = table.fields->Belt.Array.keepMap(field => @@ -202,118 +78,95 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => { } ) - let actionFieldName = "action" - - let actionField = mkField(actionFieldName, Custom(RowAction.name), ~fieldSchema=S.never) + let actionField = mkField(changeFieldName, Custom(RowAction.name), ~fieldSchema=S.never) - let serialField = mkField("serial", Serial, ~fieldSchema=S.never, ~isNullable=true, ~isIndex=true) - - let dataFieldNames = dataFields->Belt.Array.map(field => field->getFieldName) + let checkpointIdField = mkField( + checkpointIdFieldName, + Integer, + ~fieldSchema=S.int, + ~isPrimaryKey=true, + ) - let originTableName = table.tableName - let historyTableName = originTableName ++ "_history" + let entityTableName = table.tableName + let historyTableName = historyTableName(~entityName=entityTableName, ~entityIndex) //ignore composite indices let table = mkTable( historyTableName, - ~fields=Belt.Array.concatMany([ - currentHistoryFields, - previousHistoryFields, - dataFields, - [actionField, serialField], - ]), + ~fields=dataFields->Belt.Array.concat([checkpointIdField, actionField]), ) - let insertFnName = `"insert_${table.tableName}"` - - let allFieldNamesDoubleQuoted = - Belt.Array.concatMany([ - currentChangeFieldNames, - previousChangeFieldNames, - dataFieldNames, - [actionFieldName], - ])->Belt.Array.map(fieldName => `"${fieldName}"`) - - let makeInsertFnQuery = (~pgSchema) => { - let historyRowArg = "history_row" - let historyTablePath = `"${pgSchema}"."${historyTableName}"` - let originTablePath = `"${pgSchema}"."${originTableName}"` - - let previousHistoryFieldsAreNullStr = - previousChangeFieldNames - ->Belt.Array.map(fieldName => `${historyRowArg}.${fieldName} IS NULL`) - ->Js.Array2.joinWith(" OR ") - - let currentChangeFieldNamesCommaSeparated = currentChangeFieldNames->Js.Array2.joinWith(", ") - - let dataFieldNamesDoubleQuoted = dataFieldNames->Belt.Array.map(fieldName => `"${fieldName}"`) - let dataFieldNamesCommaSeparated = dataFieldNamesDoubleQuoted->Js.Array2.joinWith(", ") - - `CREATE OR REPLACE FUNCTION ${insertFnName}(${historyRowArg} ${historyTablePath}, should_copy_current_entity BOOLEAN) -RETURNS void AS $$ -DECLARE - v_previous_record RECORD; - v_origin_record RECORD; -BEGIN - -- Check if previous values are not provided - IF ${previousHistoryFieldsAreNullStr} THEN - -- Find the most recent record for the same id - SELECT ${currentChangeFieldNamesCommaSeparated} INTO v_previous_record - FROM ${historyTablePath} - WHERE ${id} = ${historyRowArg}.${id} - ORDER BY ${currentChangeFieldNames - ->Belt.Array.map(fieldName => fieldName ++ " DESC") - ->Js.Array2.joinWith(", ")} - LIMIT 1; - - -- If a previous record exists, use its values - IF FOUND THEN - ${Belt.Array.zip(currentChangeFieldNames, previousChangeFieldNames) - ->Belt.Array.map(((currentFieldName, previousFieldName)) => { - `${historyRowArg}.${previousFieldName} := v_previous_record.${currentFieldName};` - }) - ->Js.Array2.joinWith(" ")} - ElSIF should_copy_current_entity THEN - -- Check if a value for the id exists in the origin table and if so, insert a history row for it. - SELECT ${dataFieldNamesCommaSeparated} FROM ${originTablePath} WHERE id = ${historyRowArg}.${id} INTO v_origin_record; - IF FOUND THEN - INSERT INTO ${historyTablePath} (${currentChangeFieldNamesCommaSeparated}, ${dataFieldNamesCommaSeparated}, "${actionFieldName}") - -- SET the current change data fields to 0 since we don't know what they were - -- and it doesn't matter provided they are less than any new values - VALUES (${currentChangeFieldNames - ->Belt.Array.map(_ => "0") - ->Js.Array2.joinWith(", ")}, ${dataFieldNames - ->Belt.Array.map(fieldName => `v_origin_record."${fieldName}"`) - ->Js.Array2.joinWith(", ")}, 'SET'); - - ${previousChangeFieldNames - ->Belt.Array.map(previousFieldName => { - `${historyRowArg}.${previousFieldName} := 0;` - }) - ->Js.Array2.joinWith(" ")} - END IF; - END IF; - END IF; - - INSERT INTO ${historyTablePath} (${allFieldNamesDoubleQuoted->Js.Array2.joinWith(", ")}) - VALUES (${allFieldNamesDoubleQuoted - ->Belt.Array.map(fieldName => `${historyRowArg}.${fieldName}`) - ->Js.Array2.joinWith(", ")}); -END; -$$ LANGUAGE plpgsql;` + let setUpdateSchema = makeSetUpdateSchema(schema) + + let makeInsertDeleteUpdatesQuery = { + // Get all field names for the INSERT statement + let allFieldNames = table.fields->Belt.Array.map(field => field->getFieldName) + let allFieldNamesStr = + allFieldNames->Belt.Array.map(name => `"${name}"`)->Js.Array2.joinWith(", ") + + // Build the SELECT part: id from unnest, checkpoint_id from unnest, 'DELETE' for action, NULL for all other fields + let selectParts = allFieldNames->Belt.Array.map(fieldName => { + switch fieldName { + | "id" => "u.id" + | field if field == checkpointIdFieldName => "u.checkpoint_id" + | field if field == changeFieldName => "'DELETE'" + | _ => "NULL" + } + }) + let selectPartsStr = selectParts->Js.Array2.joinWith(", ") + (~pgSchema) => { + `INSERT INTO "${pgSchema}"."${historyTableName}" (${allFieldNamesStr}) +SELECT ${selectPartsStr} +FROM UNNEST($1::text[], $2::int[]) AS u(id, checkpoint_id)` + } } - let insertFnString = `(sql, rowArgs, shouldCopyCurrentEntity) => - sql\`select ${insertFnName}(ROW(${allFieldNamesDoubleQuoted - ->Belt.Array.map(fieldNameDoubleQuoted => `\${rowArgs[${fieldNameDoubleQuoted}]\}`) - ->Js.Array2.joinWith(", ")}, NULL), --NULL argument for SERIAL field - \${shouldCopyCurrentEntity});\`` - - let insertFn: (Postgres.sql, Js.Json.t, ~shouldCopyCurrentEntity: bool) => promise = - insertFnString->eval + // Get data field names for rollback queries (exclude changeFieldName and checkpointIdFieldName) + let dataFieldNames = + table.fields + ->Belt.Array.map(field => field->getFieldName) + ->Belt.Array.keep(fieldName => + fieldName != changeFieldName && fieldName != checkpointIdFieldName + ) + let dataFieldsCommaSeparated = + dataFieldNames->Belt.Array.map(name => `"${name}"`)->Js.Array2.joinWith(", ") + + // Returns entity IDs that were created after the rollback target and have no history before it. + // These entities should be deleted during rollback. + let makeGetRollbackRemovedIdsQuery = (~pgSchema) => { + `SELECT DISTINCT id +FROM "${pgSchema}"."${historyTableName}" +WHERE "${checkpointIdFieldName}" > $1 + AND NOT EXISTS ( + SELECT 1 + FROM "${pgSchema}"."${historyTableName}" h + WHERE h.id = "${historyTableName}".id + AND h."${checkpointIdFieldName}" <= $1 + )` + } - let schema = makeHistoryRowSchema(schema) + // Returns the most recent entity state for IDs that need to be restored during rollback. + // For each ID modified after the rollback target, retrieves its latest state at or before the target. + let makeGetRollbackRestoredEntitiesQuery = (~pgSchema) => { + `SELECT DISTINCT ON (id) ${dataFieldsCommaSeparated} +FROM "${pgSchema}"."${historyTableName}" +WHERE "${checkpointIdFieldName}" <= $1 + AND EXISTS ( + SELECT 1 + FROM "${pgSchema}"."${historyTableName}" h + WHERE h.id = "${historyTableName}".id + AND h."${checkpointIdFieldName}" > $1 + ) +ORDER BY id, "${checkpointIdFieldName}" DESC` + } - {table, makeInsertFnQuery, schema, schemaRows: S.array(schema), insertFn} + { + table, + setUpdateSchema, + setUpdateSchemaRows: S.array(setUpdateSchema), + makeInsertDeleteUpdatesQuery, + makeGetRollbackRemovedIdsQuery, + makeGetRollbackRestoredEntitiesQuery, + } } type safeReorgBlocks = { @@ -323,60 +176,105 @@ type safeReorgBlocks = { // We want to keep only the minimum history needed to survive chain reorgs and delete everything older. // Each chain gives us a "safe block": we assume reorgs will never happen at that block. +// The latest checkpoint belonging to safe blocks of all chains is the safe checkpoint id. // // What we keep per entity id: -// - The latest history row at or before the safe block (the "anchor"). This is the last state that could -// ever be relevant during a rollback. // - If there are history rows in reorg threshold (after the safe block), we keep the anchor and delete all older rows. // - If there are no history rows in reorg threshold (after the safe block), even the anchor is redundant, so we delete it too. +// Anchor is the latest history row at or before the safe checkpoint id. +// This is the last state that could ever be relevant during a rollback. // // Why this is safe: -// - Rollbacks will not cross the safe block, so rows older than the anchor can never be referenced again. -// - If nothing changed in reorg threshold (after the safe block), the current state for that id can be reconstructed from the +// - Rollbacks will not cross the safe checkpoint id, so rows older than the anchor can never be referenced again. +// - If nothing changed in reorg threshold (after the safe checkpoint), the current state for that id can be reconstructed from the // origin table; we do not need a pre-safe anchor for it. -// -// Performance notes: -// - Multi-chain batching: inputs are expanded with unnest, letting one prepared statement prune many chains and -// enabling the planner to use indexes per chain_id efficiently. -// - Minimal row touches: we only compute keep_serial per id and delete strictly older rows; this reduces write -// amplification and vacuum pressure compared to broad time-based purges. -// - Contention-awareness: the DELETE joins on ids first, narrowing target rows early to limit locking and buffer churn. -let makePruneStaleEntityHistoryQuery = (~entityName, ~pgSchema) => { - let historyTableName = entityName ++ "_history" - let historyTableRef = `"${pgSchema}"."${historyTableName}"` +let makePruneStaleEntityHistoryQuery = (~entityName, ~entityIndex, ~pgSchema) => { + let historyTableRef = `"${pgSchema}"."${historyTableName(~entityName, ~entityIndex)}"` - `WITH safe AS ( - SELECT s.chain_id, s.block_number - FROM unnest($1::int[], $2::bigint[]) AS s(chain_id, block_number) -), -max_before_safe AS ( - SELECT t.id, MAX(t.serial) AS keep_serial - FROM ${historyTableRef} t - JOIN safe s - ON s.chain_id = t.entity_history_chain_id - AND t.entity_history_block_number <= s.block_number + `WITH anchors AS ( + SELECT t.id, MAX(t.${checkpointIdFieldName}) AS keep_checkpoint_id + FROM ${historyTableRef} t WHERE t.${checkpointIdFieldName} <= $1 GROUP BY t.id -), -post_safe AS ( - SELECT DISTINCT t.id - FROM ${historyTableRef} t - JOIN safe s - ON s.chain_id = t.entity_history_chain_id - AND t.entity_history_block_number > s.block_number ) DELETE FROM ${historyTableRef} d -USING max_before_safe m -LEFT JOIN post_safe p ON p.id = m.id -WHERE d.id = m.id +USING anchors a +WHERE d.id = a.id AND ( - d.serial < m.keep_serial - OR (p.id IS NULL AND d.serial = m.keep_serial) + d.${checkpointIdFieldName} < a.keep_checkpoint_id + OR ( + d.${checkpointIdFieldName} = a.keep_checkpoint_id AND + NOT EXISTS ( + SELECT 1 FROM ${historyTableRef} ps + WHERE ps.id = d.id AND ps.${checkpointIdFieldName} > $1 + ) + ) );` } -let pruneStaleEntityHistory = (sql, ~entityName, ~pgSchema, ~safeReorgBlocks): promise => { +let pruneStaleEntityHistory = ( + sql, + ~entityName, + ~entityIndex, + ~pgSchema, + ~safeCheckpointId, +): promise => { sql->Postgres.preparedUnsafe( - makePruneStaleEntityHistoryQuery(~entityName, ~pgSchema), - (safeReorgBlocks.chainIds, safeReorgBlocks.blockNumbers)->Utils.magic, + makePruneStaleEntityHistoryQuery(~entityName, ~entityIndex, ~pgSchema), + [safeCheckpointId]->Utils.magic, + ) +} + +// If an entity doesn't have a history before the update +// we create it automatically with checkpoint_id 0 +let makeBackfillHistoryQuery = (~pgSchema, ~entityName, ~entityIndex) => { + let historyTableRef = `"${pgSchema}"."${historyTableName(~entityName, ~entityIndex)}"` + `WITH target_ids AS ( + SELECT UNNEST($1::${(Text: Table.fieldType :> string)}[]) AS id +), +missing_history AS ( + SELECT e.* + FROM "${pgSchema}"."${entityName}" e + JOIN target_ids t ON e.id = t.id + LEFT JOIN ${historyTableRef} h ON h.id = e.id + WHERE h.id IS NULL +) +INSERT INTO ${historyTableRef} +SELECT *, 0 AS ${checkpointIdFieldName}, '${(RowAction.SET :> string)}' as ${changeFieldName} +FROM missing_history;` +} + +let backfillHistory = (sql, ~pgSchema, ~entityName, ~entityIndex, ~ids: array) => { + sql + ->Postgres.preparedUnsafe( + makeBackfillHistoryQuery(~entityName, ~entityIndex, ~pgSchema), + [ids]->Obj.magic, + ) + ->Promise.ignoreValue +} + +let insertDeleteUpdates = ( + sql, + ~pgSchema, + ~entityHistory, + ~batchDeleteEntityIds, + ~batchDeleteCheckpointIds, +) => { + sql + ->Postgres.preparedUnsafe( + entityHistory.makeInsertDeleteUpdatesQuery(~pgSchema), + (batchDeleteEntityIds, batchDeleteCheckpointIds)->Obj.magic, + ) + ->Promise.ignoreValue +} + +let rollback = (sql, ~pgSchema, ~entityName, ~entityIndex, ~rollbackTargetCheckpointId: int) => { + sql + ->Postgres.preparedUnsafe( + `DELETE FROM "${pgSchema}"."${historyTableName( + ~entityName, + ~entityIndex, + )}" WHERE "${checkpointIdFieldName}" > $1;`, + [rollbackTargetCheckpointId]->Utils.magic, ) + ->Promise.ignoreValue } diff --git a/codegenerator/cli/npm/envio/src/db/InternalTable.gen.ts b/codegenerator/cli/npm/envio/src/db/InternalTable.gen.ts index 69ffd044e..f59727739 100644 --- a/codegenerator/cli/npm/envio/src/db/InternalTable.gen.ts +++ b/codegenerator/cli/npm/envio/src/db/InternalTable.gen.ts @@ -7,6 +7,19 @@ import type {Json_t as Js_Json_t} from '../../src/Js.shim'; import type {t as Address_t} from '../../src/Address.gen'; +export type DynamicContractRegistry_t = { + readonly id: string; + readonly chain_id: number; + readonly registering_event_block_number: number; + readonly registering_event_log_index: number; + readonly registering_event_block_timestamp: number; + readonly registering_event_contract_name: string; + readonly registering_event_name: string; + readonly registering_event_src_address: Address_t; + readonly contract_address: Address_t; + readonly contract_name: string +}; + export type RawEvents_t = { readonly chain_id: number; readonly event_id: bigint; @@ -21,16 +34,3 @@ export type RawEvents_t = { readonly transaction_fields: Js_Json_t; readonly params: Js_Json_t }; - -export type DynamicContractRegistry_t = { - readonly id: string; - readonly chain_id: number; - readonly registering_event_block_number: number; - readonly registering_event_log_index: number; - readonly registering_event_block_timestamp: number; - readonly registering_event_contract_name: string; - readonly registering_event_name: string; - readonly registering_event_src_address: Address_t; - readonly contract_address: Address_t; - readonly contract_name: string -}; diff --git a/codegenerator/cli/npm/envio/src/db/InternalTable.res b/codegenerator/cli/npm/envio/src/db/InternalTable.res index 1dc236fdf..105b1a5cd 100644 --- a/codegenerator/cli/npm/envio/src/db/InternalTable.res +++ b/codegenerator/cli/npm/envio/src/db/InternalTable.res @@ -5,6 +5,74 @@ let isPrimaryKey = true let isNullable = true let isIndex = true +module DynamicContractRegistry = { + let name = "dynamic_contract_registry" + let index = -1 + + let makeId = (~chainId, ~contractAddress) => { + chainId->Belt.Int.toString ++ "-" ++ contractAddress->Address.toString + } + + // @genType Used for Test DB + @genType + type t = { + id: string, + @as("chain_id") chainId: int, + @as("registering_event_block_number") registeringEventBlockNumber: int, + @as("registering_event_log_index") registeringEventLogIndex: int, + @as("registering_event_block_timestamp") registeringEventBlockTimestamp: int, + @as("registering_event_contract_name") registeringEventContractName: string, + @as("registering_event_name") registeringEventName: string, + @as("registering_event_src_address") registeringEventSrcAddress: Address.t, + @as("contract_address") contractAddress: Address.t, + @as("contract_name") contractName: string, + } + + let schema = S.schema(s => { + id: s.matches(S.string), + chainId: s.matches(S.int), + registeringEventBlockNumber: s.matches(S.int), + registeringEventLogIndex: s.matches(S.int), + registeringEventContractName: s.matches(S.string), + registeringEventName: s.matches(S.string), + registeringEventSrcAddress: s.matches(Address.schema), + registeringEventBlockTimestamp: s.matches(S.int), + contractAddress: s.matches(Address.schema), + contractName: s.matches(S.string), + }) + + let rowsSchema = S.array(schema) + + let table = mkTable( + name, + ~fields=[ + mkField("id", Text, ~isPrimaryKey, ~fieldSchema=S.string), + mkField("chain_id", Integer, ~fieldSchema=S.int), + mkField("registering_event_block_number", Integer, ~fieldSchema=S.int), + mkField("registering_event_log_index", Integer, ~fieldSchema=S.int), + mkField("registering_event_block_timestamp", Integer, ~fieldSchema=S.int), + mkField("registering_event_contract_name", Text, ~fieldSchema=S.string), + mkField("registering_event_name", Text, ~fieldSchema=S.string), + mkField("registering_event_src_address", Text, ~fieldSchema=Address.schema), + mkField("contract_address", Text, ~fieldSchema=Address.schema), + mkField("contract_name", Text, ~fieldSchema=S.string), + ], + ) + + let entityHistory = table->EntityHistory.fromTable(~schema, ~entityIndex=index) + + external castToInternal: t => Internal.entity = "%identity" + + let config = { + name, + index, + schema, + rowsSchema, + table, + entityHistory, + }->Internal.fromGenericEntityConfig +} + module Chains = { type progressFields = [ | #progress_block @@ -16,6 +84,7 @@ module Chains = { | #id | #start_block | #end_block + | #max_reorg_depth | #source_block | #first_event_block | #buffer_block @@ -28,6 +97,7 @@ module Chains = { #id, #start_block, #end_block, + #max_reorg_depth, #source_block, #first_event_block, #buffer_block, @@ -52,6 +122,7 @@ module Chains = { @as("id") id: int, @as("start_block") startBlock: int, @as("end_block") endBlock: Js.null, + @as("max_reorg_depth") maxReorgDepth: int, @as("progress_block") progressBlockNumber: int, @as("events_processed") numEventsProcessed: int, ...metaFields, @@ -64,6 +135,7 @@ module Chains = { // Values populated from config mkField((#start_block: field :> string), Integer, ~fieldSchema=S.int), mkField((#end_block: field :> string), Integer, ~fieldSchema=S.null(S.int), ~isNullable), + mkField((#max_reorg_depth: field :> string), Integer, ~fieldSchema=S.int), // Block number of the latest block that was fetched from the source mkField((#buffer_block: field :> string), Integer, ~fieldSchema=S.int), // Block number of the currently active source @@ -98,6 +170,7 @@ module Chains = { id: chainConfig.id, startBlock: chainConfig.startBlock, endBlock: chainConfig.endBlock->Js.Null.fromOption, + maxReorgDepth: chainConfig.maxReorgDepth, blockHeight: 0, firstEventBlockNumber: Js.Null.empty, latestFetchedBlockNumber: -1, @@ -160,7 +233,51 @@ VALUES ${valuesRows->Js.Array2.joinWith(",\n ")};`, `UPDATE "${pgSchema}"."${table.tableName}" SET ${setClauses->Js.Array2.joinWith(",\n ")} -WHERE "id" = $1;` +WHERE "${(#id: field :> string)}" = $1;` + } + + type rawInitialState = { + id: int, + startBlock: int, + endBlock: Js.Null.t, + maxReorgDepth: int, + firstEventBlockNumber: Js.Null.t, + timestampCaughtUpToHeadOrEndblock: Js.Null.t, + numEventsProcessed: int, + progressBlockNumber: int, + dynamicContracts: array, + } + + // FIXME: Using registering_event_block_number for startBlock + // seems incorrect, since there might be a custom start block + // for the contract. + // TODO: Write a repro test where it might break something and fix + let makeGetInitialStateQuery = (~pgSchema) => { + `SELECT "${(#id: field :> string)}" as "id", +"${(#start_block: field :> string)}" as "startBlock", +"${(#end_block: field :> string)}" as "endBlock", +"${(#max_reorg_depth: field :> string)}" as "maxReorgDepth", +"${(#first_event_block: field :> string)}" as "firstEventBlockNumber", +"${(#ready_at: field :> string)}" as "timestampCaughtUpToHeadOrEndblock", +"${(#events_processed: field :> string)}" as "numEventsProcessed", +"${(#progress_block: field :> string)}" as "progressBlockNumber", +( + SELECT COALESCE(json_agg(json_build_object( + 'address', "contract_address", + 'contractName', "contract_name", + 'startBlock', "registering_event_block_number", + 'registrationBlock', "registering_event_block_number" + )), '[]'::json) + FROM "${pgSchema}"."${DynamicContractRegistry.table.tableName}" + WHERE "chain_id" = chains."${(#id: field :> string)}" +) as "dynamicContracts" +FROM "${pgSchema}"."${table.tableName}" as chains;` + } + + let getInitialState = (sql, ~pgSchema) => { + sql + ->Postgres.unsafe(makeGetInitialStateQuery(~pgSchema)) + ->(Utils.magic: promise> => promise>) } let progressFields: array = [#progress_block, #events_processed] @@ -182,7 +299,7 @@ WHERE "id" = $1;` let promises = [] - chainsData->Utils.Dict.forEachWithKey((chainId, data) => { + chainsData->Utils.Dict.forEachWithKey((data, chainId) => { let params = [] // Push id first (for WHERE clause) @@ -201,7 +318,13 @@ WHERE "id" = $1;` Promise.all(promises) } - let setProgressedChains = (sql, ~pgSchema, ~progressedChains: array) => { + type progressedChain = { + chainId: int, + progressBlockNumber: int, + totalEventsProcessed: int, + } + + let setProgressedChains = (sql, ~pgSchema, ~progressedChains: array) => { let query = makeProgressFieldsUpdateQuery(~pgSchema) let promises = [] @@ -254,21 +377,175 @@ module PersistedState = { ) } -module EndOfBlockRangeScannedData = { +module Checkpoints = { + type field = [ + | #id + | #chain_id + | #block_number + | #block_hash + | #events_processed + ] + type t = { - chain_id: int, - block_number: int, - block_hash: string, + id: int, + @as("chain_id") + chainId: int, + @as("block_number") + blockNumber: int, + @as("block_hash") + blockHash: Js.null, + @as("events_processed") + eventsProcessed: int, } + let initialCheckpointId = 0 + let table = mkTable( - "end_of_block_range_scanned_data", + "envio_checkpoints", ~fields=[ - mkField("chain_id", Integer, ~fieldSchema=S.int, ~isPrimaryKey), - mkField("block_number", Integer, ~fieldSchema=S.int, ~isPrimaryKey), - mkField("block_hash", Text, ~fieldSchema=S.string), + mkField((#id: field :> string), Integer, ~fieldSchema=S.int, ~isPrimaryKey), + mkField((#chain_id: field :> string), Integer, ~fieldSchema=S.int), + mkField((#block_number: field :> string), Integer, ~fieldSchema=S.int), + mkField((#block_hash: field :> string), Text, ~fieldSchema=S.null(S.string), ~isNullable), + mkField((#events_processed: field :> string), Integer, ~fieldSchema=S.int), ], ) + + let makeGetReorgCheckpointsQuery = (~pgSchema): string => { + // Use CTE to pre-filter chains and compute safe_block once per chain + // This is faster because: + // 1. Chains table is small, so filtering it first is cheap + // 2. safe_block is computed once per chain, not per checkpoint + // 3. Query planner can materialize the small CTE result before joining + `WITH reorg_chains AS ( + SELECT + "${(#id: Chains.field :> string)}" as id, + "${(#source_block: Chains.field :> string)}" - "${(#max_reorg_depth: Chains.field :> string)}" AS safe_block + FROM "${pgSchema}"."${Chains.table.tableName}" + WHERE "${(#max_reorg_depth: Chains.field :> string)}" > 0 + AND "${(#progress_block: Chains.field :> string)}" > "${(#source_block: Chains.field :> string)}" - "${(#max_reorg_depth: Chains.field :> string)}" +) +SELECT + cp."${(#id: field :> string)}", + cp."${(#chain_id: field :> string)}", + cp."${(#block_number: field :> string)}", + cp."${(#block_hash: field :> string)}" +FROM "${pgSchema}"."${table.tableName}" cp +INNER JOIN reorg_chains rc + ON cp."${(#chain_id: field :> string)}" = rc.id +WHERE cp."${(#block_hash: field :> string)}" IS NOT NULL + AND cp."${(#block_number: field :> string)}" >= rc.safe_block;` // Include safe_block checkpoint to use it for safe checkpoint tracking + } + + let makeCommitedCheckpointIdQuery = (~pgSchema) => { + `SELECT COALESCE(MAX(${(#id: field :> string)}), ${initialCheckpointId->Belt.Int.toString}) AS id FROM "${pgSchema}"."${table.tableName}";` + } + + let makeInsertCheckpointQuery = (~pgSchema) => { + `INSERT INTO "${pgSchema}"."${table.tableName}" ("${(#id: field :> string)}", "${(#chain_id: field :> string)}", "${(#block_number: field :> string)}", "${(#block_hash: field :> string)}", "${(#events_processed: field :> string)}") +SELECT * FROM unnest($1::${(Integer :> string)}[],$2::${(Integer :> string)}[],$3::${(Integer :> string)}[],$4::${(Text :> string)}[],$5::${(Integer :> string)}[]);` + } + + let insert = ( + sql, + ~pgSchema, + ~checkpointIds, + ~checkpointChainIds, + ~checkpointBlockNumbers, + ~checkpointBlockHashes, + ~checkpointEventsProcessed, + ) => { + let query = makeInsertCheckpointQuery(~pgSchema) + + sql + ->Postgres.preparedUnsafe( + query, + ( + checkpointIds, + checkpointChainIds, + checkpointBlockNumbers, + checkpointBlockHashes, + checkpointEventsProcessed, + )->( + Utils.magic: ( + (array, array, array, array>, array) + ) => unknown + ), + ) + ->Promise.ignoreValue + } + + let rollback = (sql, ~pgSchema, ~rollbackTargetCheckpointId: int) => { + sql + ->Postgres.preparedUnsafe( + `DELETE FROM "${pgSchema}"."${table.tableName}" WHERE "${(#id: field :> string)}" > $1;`, + [rollbackTargetCheckpointId]->Utils.magic, + ) + ->Promise.ignoreValue + } + + let makePruneStaleCheckpointsQuery = (~pgSchema) => { + `DELETE FROM "${pgSchema}"."${table.tableName}" WHERE "${(#id: field :> string)}" < $1;` + } + + let pruneStaleCheckpoints = (sql, ~pgSchema, ~safeCheckpointId: int) => { + sql + ->Postgres.preparedUnsafe( + makePruneStaleCheckpointsQuery(~pgSchema), + [safeCheckpointId]->Obj.magic, + ) + ->Promise.ignoreValue + } + + let makeGetRollbackTargetCheckpointQuery = (~pgSchema) => { + `SELECT "${(#id: field :> string)}" FROM "${pgSchema}"."${table.tableName}" +WHERE + "${(#chain_id: field :> string)}" = $1 AND + "${(#block_number: field :> string)}" <= $2 +ORDER BY "${(#id: field :> string)}" DESC +LIMIT 1;` + } + + let getRollbackTargetCheckpoint = ( + sql, + ~pgSchema, + ~reorgChainId: int, + ~lastKnownValidBlockNumber: int, + ) => { + sql + ->Postgres.preparedUnsafe( + makeGetRollbackTargetCheckpointQuery(~pgSchema), + (reorgChainId, lastKnownValidBlockNumber)->Obj.magic, + ) + ->(Utils.magic: promise => promise>) + } + + let makeGetRollbackProgressDiffQuery = (~pgSchema) => { + `SELECT + "${(#chain_id: field :> string)}", + SUM("${(#events_processed: field :> string)}") as events_processed_diff, + MIN("${(#block_number: field :> string)}") - 1 as new_progress_block_number +FROM "${pgSchema}"."${table.tableName}" +WHERE "${(#id: field :> string)}" > $1 +GROUP BY "${(#chain_id: field :> string)}";` + } + + let getRollbackProgressDiff = (sql, ~pgSchema, ~rollbackTargetCheckpointId: int) => { + sql + ->Postgres.preparedUnsafe( + makeGetRollbackProgressDiffQuery(~pgSchema), + [rollbackTargetCheckpointId]->Obj.magic, + ) + ->( + Utils.magic: promise => promise< + array<{ + "chain_id": int, + "events_processed_diff": string, + "new_progress_block_number": int, + }>, + > + ) + } } module RawEvents = { @@ -331,101 +608,35 @@ module Views = { let makeMetaViewQuery = (~pgSchema) => { `CREATE VIEW "${pgSchema}"."${metaViewName}" AS - SELECT - "${(#id: Chains.field :> string)}" AS "chainId", - "${(#start_block: Chains.field :> string)}" AS "startBlock", - "${(#end_block: Chains.field :> string)}" AS "endBlock", - "${(#progress_block: Chains.field :> string)}" AS "progressBlock", - "${(#buffer_block: Chains.field :> string)}" AS "bufferBlock", - "${(#first_event_block: Chains.field :> string)}" AS "firstEventBlock", - "${(#events_processed: Chains.field :> string)}" AS "eventsProcessed", - "${(#source_block: Chains.field :> string)}" AS "sourceBlock", - "${(#ready_at: Chains.field :> string)}" AS "readyAt", - ("${(#ready_at: Chains.field :> string)}" IS NOT NULL) AS "isReady" - FROM "${pgSchema}"."${Chains.table.tableName}" - ORDER BY "${(#id: Chains.field :> string)}";` +SELECT + "${(#id: Chains.field :> string)}" AS "chainId", + "${(#start_block: Chains.field :> string)}" AS "startBlock", + "${(#end_block: Chains.field :> string)}" AS "endBlock", + "${(#progress_block: Chains.field :> string)}" AS "progressBlock", + "${(#buffer_block: Chains.field :> string)}" AS "bufferBlock", + "${(#first_event_block: Chains.field :> string)}" AS "firstEventBlock", + "${(#events_processed: Chains.field :> string)}" AS "eventsProcessed", + "${(#source_block: Chains.field :> string)}" AS "sourceBlock", + "${(#ready_at: Chains.field :> string)}" AS "readyAt", + ("${(#ready_at: Chains.field :> string)}" IS NOT NULL) AS "isReady" +FROM "${pgSchema}"."${Chains.table.tableName}" +ORDER BY "${(#id: Chains.field :> string)}";` } let makeChainMetadataViewQuery = (~pgSchema) => { `CREATE VIEW "${pgSchema}"."${chainMetadataViewName}" AS - SELECT - "${(#source_block: Chains.field :> string)}" AS "block_height", - "${(#id: Chains.field :> string)}" AS "chain_id", - "${(#end_block: Chains.field :> string)}" AS "end_block", - "${(#first_event_block: Chains.field :> string)}" AS "first_event_block_number", - "${(#_is_hyper_sync: Chains.field :> string)}" AS "is_hyper_sync", - "${(#buffer_block: Chains.field :> string)}" AS "latest_fetched_block_number", - "${(#progress_block: Chains.field :> string)}" AS "latest_processed_block", - "${(#_num_batches_fetched: Chains.field :> string)}" AS "num_batches_fetched", - "${(#events_processed: Chains.field :> string)}" AS "num_events_processed", - "${(#start_block: Chains.field :> string)}" AS "start_block", - "${(#ready_at: Chains.field :> string)}" AS "timestamp_caught_up_to_head_or_endblock" - FROM "${pgSchema}"."${Chains.table.tableName}";` - } -} - -module DynamicContractRegistry = { - let name = "dynamic_contract_registry" - - let makeId = (~chainId, ~contractAddress) => { - chainId->Belt.Int.toString ++ "-" ++ contractAddress->Address.toString - } - - // @genType Used for Test DB - @genType - type t = { - id: string, - @as("chain_id") chainId: int, - @as("registering_event_block_number") registeringEventBlockNumber: int, - @as("registering_event_log_index") registeringEventLogIndex: int, - @as("registering_event_block_timestamp") registeringEventBlockTimestamp: int, - @as("registering_event_contract_name") registeringEventContractName: string, - @as("registering_event_name") registeringEventName: string, - @as("registering_event_src_address") registeringEventSrcAddress: Address.t, - @as("contract_address") contractAddress: Address.t, - @as("contract_name") contractName: string, +SELECT + "${(#source_block: Chains.field :> string)}" AS "block_height", + "${(#id: Chains.field :> string)}" AS "chain_id", + "${(#end_block: Chains.field :> string)}" AS "end_block", + "${(#first_event_block: Chains.field :> string)}" AS "first_event_block_number", + "${(#_is_hyper_sync: Chains.field :> string)}" AS "is_hyper_sync", + "${(#buffer_block: Chains.field :> string)}" AS "latest_fetched_block_number", + "${(#progress_block: Chains.field :> string)}" AS "latest_processed_block", + "${(#_num_batches_fetched: Chains.field :> string)}" AS "num_batches_fetched", + "${(#events_processed: Chains.field :> string)}" AS "num_events_processed", + "${(#start_block: Chains.field :> string)}" AS "start_block", + "${(#ready_at: Chains.field :> string)}" AS "timestamp_caught_up_to_head_or_endblock" +FROM "${pgSchema}"."${Chains.table.tableName}";` } - - let schema = S.schema(s => { - id: s.matches(S.string), - chainId: s.matches(S.int), - registeringEventBlockNumber: s.matches(S.int), - registeringEventLogIndex: s.matches(S.int), - registeringEventContractName: s.matches(S.string), - registeringEventName: s.matches(S.string), - registeringEventSrcAddress: s.matches(Address.schema), - registeringEventBlockTimestamp: s.matches(S.int), - contractAddress: s.matches(Address.schema), - contractName: s.matches(S.string), - }) - - let rowsSchema = S.array(schema) - - let table = mkTable( - name, - ~fields=[ - mkField("id", Text, ~isPrimaryKey, ~fieldSchema=S.string), - mkField("chain_id", Integer, ~fieldSchema=S.int), - mkField("registering_event_block_number", Integer, ~fieldSchema=S.int), - mkField("registering_event_log_index", Integer, ~fieldSchema=S.int), - mkField("registering_event_block_timestamp", Integer, ~fieldSchema=S.int), - mkField("registering_event_contract_name", Text, ~fieldSchema=S.string), - mkField("registering_event_name", Text, ~fieldSchema=S.string), - mkField("registering_event_src_address", Text, ~fieldSchema=Address.schema), - mkField("contract_address", Text, ~fieldSchema=Address.schema), - mkField("contract_name", Text, ~fieldSchema=S.string), - ], - ) - - let entityHistory = table->EntityHistory.fromTable(~schema) - - external castToInternal: t => Internal.entity = "%identity" - - let config = { - name, - schema, - rowsSchema, - table, - entityHistory, - }->Internal.fromGenericEntityConfig } diff --git a/codegenerator/cli/npm/envio/src/db/Table.res b/codegenerator/cli/npm/envio/src/db/Table.res index 714b0495b..95f483c20 100644 --- a/codegenerator/cli/npm/envio/src/db/Table.res +++ b/codegenerator/cli/npm/envio/src/db/Table.res @@ -5,6 +5,7 @@ type derived @unboxed type fieldType = | @as("INTEGER") Integer + | @as("BIGINT") BigInt | @as("BOOLEAN") Boolean | @as("NUMERIC") Numeric | @as("DOUBLE PRECISION") DoublePrecision diff --git a/codegenerator/cli/npm/envio/src/sources/EventRouter.res b/codegenerator/cli/npm/envio/src/sources/EventRouter.res index 22fef72ac..3a62fc178 100644 --- a/codegenerator/cli/npm/envio/src/sources/EventRouter.res +++ b/codegenerator/cli/npm/envio/src/sources/EventRouter.res @@ -34,7 +34,7 @@ module Group = { group: t<'a>, ~contractAddress, ~blockNumber, - ~indexingContracts: dict, + ~indexingContracts: dict, ) => switch group { | {wildcard, byContractName} => diff --git a/codegenerator/cli/npm/envio/src/sources/Source.res b/codegenerator/cli/npm/envio/src/sources/Source.res index e7f491a64..5cbdb30b2 100644 --- a/codegenerator/cli/npm/envio/src/sources/Source.res +++ b/codegenerator/cli/npm/envio/src/sources/Source.res @@ -49,7 +49,7 @@ type t = { ~fromBlock: int, ~toBlock: option, ~addressesByContractName: dict>, - ~indexingContracts: dict, + ~indexingContracts: dict, ~currentBlockHeight: int, ~partitionId: string, ~selection: FetchState.selection, diff --git a/codegenerator/cli/src/config_parsing/chain_helpers.rs b/codegenerator/cli/src/config_parsing/chain_helpers.rs index b9b6f88eb..4cccace43 100644 --- a/codegenerator/cli/src/config_parsing/chain_helpers.rs +++ b/codegenerator/cli/src/config_parsing/chain_helpers.rs @@ -76,7 +76,6 @@ pub enum Network { #[subenum(HypersyncNetwork, NetworkWithExplorer)] Berachain = 80094, - #[subenum(HypersyncNetwork)] BerachainBartio = 80084, #[subenum(HypersyncNetwork, NetworkWithExplorer)] @@ -117,6 +116,15 @@ pub enum Network { #[subenum(HypersyncNetwork)] ChainwebTestnet21 = 5921, + #[subenum(HypersyncNetwork)] + ChainwebTestnet22 = 5922, + + #[subenum(HypersyncNetwork)] + ChainwebTestnet23 = 5923, + + #[subenum(HypersyncNetwork)] + ChainwebTestnet24 = 5924, + #[subenum(HypersyncNetwork)] Chiliz = 88888, @@ -137,6 +145,9 @@ pub enum Network { #[subenum(HypersyncNetwork)] Cyber = 7560, + #[subenum(HypersyncNetwork)] + Damon = 341, + Darwinia = 46, #[subenum( @@ -172,10 +183,7 @@ pub enum Network { #[subenum(GraphNetwork)] Fuse = 122, - #[subenum( - HypersyncNetwork(serde(rename = "galadriel-devnet (Stone)")), - NetworkWithExplorer - )] + #[subenum(NetworkWithExplorer)] GaladrielDevnet = 696969, #[subenum(HypersyncNetwork, NetworkWithExplorer, GraphNetwork)] @@ -293,6 +301,9 @@ pub enum Network { PharosDevnet = 50002, + #[subenum(HypersyncNetwork)] + Plasma = 9745, + #[subenum(HypersyncNetwork)] Plume = 98866, @@ -330,6 +341,9 @@ pub enum Network { #[subenum(GraphNetwork, NetworkWithExplorer)] ScrollSepolia = 534351, + #[subenum(HypersyncNetwork)] + SentientTestnet = 1184075182, + #[subenum(HypersyncNetwork, NetworkWithExplorer, GraphNetwork)] Sepolia = 11155111, @@ -366,7 +380,7 @@ pub enum Network { #[subenum(HypersyncNetwork, NetworkWithExplorer)] Unichain = 130, - #[subenum(HypersyncNetwork, NetworkWithExplorer)] + #[subenum(NetworkWithExplorer)] UnichainSepolia = 1301, #[subenum(HypersyncNetwork, NetworkWithExplorer)] @@ -456,10 +470,14 @@ impl Network { | Network::CeloBaklava | Network::ChainwebTestnet20 | Network::ChainwebTestnet21 + | Network::ChainwebTestnet22 + | Network::ChainwebTestnet23 + | Network::ChainwebTestnet24 | Network::Chiliz | Network::Clover | Network::Crab | Network::Cyber + | Network::Damon | Network::Darwinia | Network::Evmos | Network::EthereumMainnet @@ -498,12 +516,14 @@ impl Network { | Network::Polygon | Network::PolygonZkevm | Network::PolygonZkevmTestnet + | Network::Plasma | Network::Plume | Network::Rinkeby | Network::Rsk | Network::Scroll | Network::ScrollSepolia | Network::Sepolia + | Network::SentientTestnet | Network::ShimmerEvm | Network::Sophon | Network::SophonTestnet @@ -595,17 +615,18 @@ impl HypersyncNetwork { Xdc | Polygon | ArbitrumOne | MegaethTestnet => Silver, Linea | Berachain | Blast | Amoy | ZksyncEra | ArbitrumNova | Avalanche | Bsc - | Taraxa => Bronze, + | Taraxa | Plasma => Bronze, Curtis | PolygonZkevm | Lukso | Abstract | Zora | Unichain | Aurora | Zeta | Manta | Kroma | Flare | Mantle | ShimmerEvm | Boba | Ink | Metall2 | SophonTestnet - | GaladrielDevnet | CitreaTestnet | BscTestnet | UnichainSepolia | Zircuit | Celo - | Opbnb | GnosisChiado | LuksoTestnet | BlastSepolia | Holesky | BerachainBartio - | OptimismSepolia | Fuji | ArbitrumSepolia | Fraxtal | Soneium | BaseSepolia - | MevCommit | Merlin | Mode | MoonbaseAlpha | XdcTestnet | Morph | Harmony - | Saakuru | Cyber | Superseed | Sonic | Worldchain | Sophon | Fantom | Sepolia - | Rsk | Chiliz | Lisk | Hyperliquid | Swell | Moonbeam | ChainwebTestnet20 - | ChainwebTestnet21 | Plume | Scroll | AuroraTurbo | Tangle => Stone, + | CitreaTestnet | BscTestnet | Zircuit | Celo | Opbnb | GnosisChiado | LuksoTestnet + | BlastSepolia | Holesky | OptimismSepolia | Fuji | ArbitrumSepolia | Fraxtal + | Soneium | BaseSepolia | MevCommit | Merlin | Mode | MoonbaseAlpha | XdcTestnet + | Morph | Harmony | Saakuru | Cyber | Superseed | Sonic | Worldchain | Sophon + | Fantom | Sepolia | Rsk | Chiliz | Lisk | Hyperliquid | Swell | Moonbeam + | ChainwebTestnet20 | ChainwebTestnet21 | ChainwebTestnet22 | ChainwebTestnet23 + | ChainwebTestnet24 | Plume | Scroll | AuroraTurbo | Tangle | Damon + | SentientTestnet => Stone, } } diff --git a/codegenerator/cli/src/config_parsing/entity_parsing.rs b/codegenerator/cli/src/config_parsing/entity_parsing.rs index d1cccc5ce..e1327f20a 100644 --- a/codegenerator/cli/src/config_parsing/entity_parsing.rs +++ b/codegenerator/cli/src/config_parsing/entity_parsing.rs @@ -373,6 +373,13 @@ impl Entity { } } + if name.len() > 63 { + return Err(anyhow!( + "Entity name '{}' is too long. It must be less than 64 characters.", + name + )); + } + Ok(Self { name: name.to_string(), fields, diff --git a/codegenerator/cli/templates/dynamic/codegen/src/RegisterHandlers.res.hbs b/codegenerator/cli/templates/dynamic/codegen/src/RegisterHandlers.res.hbs index fc60b33aa..5ff0d00ce 100644 --- a/codegenerator/cli/templates/dynamic/codegen/src/RegisterHandlers.res.hbs +++ b/codegenerator/cli/templates/dynamic/codegen/src/RegisterHandlers.res.hbs @@ -22,69 +22,69 @@ let registerContractHandlers = ( } } -%%private( - let makeGeneratedConfig = () => { - let chains = [ - {{#each chain_configs as | chain_config |}} - { - let contracts = [ - {{#each chain_config.codegen_contracts as | contract |}} - { - InternalConfig.name: "{{contract.name.capitalized}}", - abi: Types.{{contract.name.capitalized}}.abi, - addresses: [ - {{#each contract.addresses as | address |}} - {{#if ../../../is_evm_ecosystem}} - {{#if ../../../lowercase_addresses}} - "{{address}}"->Address.Evm.fromStringLowercaseOrThrow - {{else}} - "{{address}}"->Address.Evm.fromStringOrThrow - {{/if}} - {{else}} - "{{address}}"->Address.unsafeFromString - {{/if}}, - {{/each}} - ], - events: [ - {{#each contract.events as | event |}} - (Types.{{contract.name.capitalized}}.{{event.name}}.register() :> Internal.eventConfig), - {{/each}} - ], - startBlock: {{#if contract.start_block}}Some({{contract.start_block}}){{else}}None{{/if}}, - }, - {{/each}} - ] - let chain = ChainMap.Chain.makeUnsafe(~chainId={{chain_config.network_config.id}}) +let makeGeneratedConfig = () => { + let chains = [ + {{#each chain_configs as | chain_config |}} + { + let contracts = [ + {{#each chain_config.codegen_contracts as | contract |}} { - InternalConfig.confirmedBlockThreshold: {{chain_config.network_config.confirmed_block_threshold}}, - startBlock: {{chain_config.network_config.start_block}}, - {{#if chain_config.network_config.end_block}} - endBlock: {{chain_config.network_config.end_block}}, - {{/if}} - id: {{chain_config.network_config.id}}, - contracts, - sources: {{chain_config.sources_code}} - } - }, - {{/each}} - ] + InternalConfig.name: "{{contract.name.capitalized}}", + abi: Types.{{contract.name.capitalized}}.abi, + addresses: [ + {{#each contract.addresses as | address |}} + {{#if ../../../is_evm_ecosystem}} + {{#if ../../../lowercase_addresses}} + "{{address}}"->Address.Evm.fromStringLowercaseOrThrow + {{else}} + "{{address}}"->Address.Evm.fromStringOrThrow + {{/if}} + {{else}} + "{{address}}"->Address.unsafeFromString + {{/if}}, + {{/each}} + ], + events: [ + {{#each contract.events as | event |}} + (Types.{{contract.name.capitalized}}.{{event.name}}.register() :> Internal.eventConfig), + {{/each}} + ], + startBlock: {{#if contract.start_block}}Some({{contract.start_block}}){{else}}None{{/if}}, + }, + {{/each}} + ] + let chain = ChainMap.Chain.makeUnsafe(~chainId={{chain_config.network_config.id}}) + { + InternalConfig.maxReorgDepth: {{chain_config.network_config.confirmed_block_threshold}}, + startBlock: {{chain_config.network_config.start_block}}, + {{#if chain_config.network_config.end_block}} + endBlock: {{chain_config.network_config.end_block}}, + {{/if}} + id: {{chain_config.network_config.id}}, + contracts, + sources: {{chain_config.sources_code}} + } + }, + {{/each}} + ] - Config.make( - ~shouldRollbackOnReorg={{should_rollback_on_reorg}}, - ~shouldSaveFullHistory={{should_save_full_history}}, - ~isUnorderedMultichainMode={{is_unordered_multichain_mode}}, - ~chains, - ~enableRawEvents={{enable_raw_events}}, - ~batchSize=?Env.batchSize, - ~preloadHandlers={{preload_handlers}}, - ~lowercaseAddresses={{lowercase_addresses}}, - ~shouldUseHypersyncClientDecoder={{should_use_hypersync_client_decoder}}, - {{#if chain_config.is_fuel}} - ~ecosystem=Fuel, - {{/if}} - ) - } + Config.make( + ~shouldRollbackOnReorg={{should_rollback_on_reorg}}, + ~shouldSaveFullHistory={{should_save_full_history}}, + ~isUnorderedMultichainMode={{is_unordered_multichain_mode}}, + ~chains, + ~enableRawEvents={{enable_raw_events}}, + ~batchSize=?Env.batchSize, + ~preloadHandlers={{preload_handlers}}, + ~lowercaseAddresses={{lowercase_addresses}}, + ~shouldUseHypersyncClientDecoder={{should_use_hypersync_client_decoder}}, + {{#if chain_config.is_fuel}} + ~ecosystem=Fuel, + {{/if}} + ) +} +%%private( let config: ref> = ref(None) ) diff --git a/codegenerator/cli/templates/dynamic/codegen/src/TestHelpers_MockDb.res.hbs b/codegenerator/cli/templates/dynamic/codegen/src/TestHelpers_MockDb.res.hbs index cc1073625..4bad74b34 100644 --- a/codegenerator/cli/templates/dynamic/codegen/src/TestHelpers_MockDb.res.hbs +++ b/codegenerator/cli/templates/dynamic/codegen/src/TestHelpers_MockDb.res.hbs @@ -118,10 +118,11 @@ let makeStoreOperatorEntity = ( let table = cloned->getStore table->set( - Delete->Types.mkEntityUpdate( - ~entityId, - ~eventIdentifier={chainId: -1, blockNumber: -1, blockTimestamp: 0, logIndex: -1}, - ), + { + entityId, + entityUpdateAction: Delete, + checkpointId: 0, + }, ~shouldSaveHistory=false, ) @@ -134,10 +135,11 @@ let makeStoreOperatorEntity = ( let entityId = entity->getKey table->set( - Set(entity)->Types.mkEntityUpdate( - ~entityId, - ~eventIdentifier={chainId: -1, blockNumber: -1, blockTimestamp: 0, logIndex: -1}, - ), + { + entityId, + entityUpdateAction: Set(entity), + checkpointId: 0, + }, ~shouldSaveHistory=false, ) @@ -315,127 +317,216 @@ let rec makeWithInMemoryStore: InMemoryStore.t => t = (inMemoryStore: InMemorySt and makeProcessEvents = (mockDb: t, ~chainId=?) => async ( events: array>, ) => { - let itemsWithContractRegister = [] + if events->Utils.Array.isEmpty { + mockDb + } else { + let itemsWithContractRegister = [] + + let registrations = EventRegister.finishRegistration() + + let config = if ( + registrations.hasEvents || !(registrations.onBlockByChainId->Utils.Dict.isEmpty) + ) { + { + ...RegisterHandlers.makeGeneratedConfig(), + registrations: Some(registrations), + } + } else { + RegisterHandlers.registerAllHandlers() + } - let config = { - ...config, - registrations: Some(EventRegister.finishRegistration()), - } + let processingChainId = ref(chainId) + let latestFetchedBlockNumber = ref(0) + let newItems = events->Array.map(event => { + let event = event->Internal.fromGenericEvent + let eventConfig = switch mockEventRegisters->Utils.WeakMap.get(event) { + | Some(register) => register() + | None => + Js.Exn.raiseError( + "Events must be created using the mock API (e.g. createMockEvent) to be processed by mockDb.processEvents", + ) + } + let chainId = switch chainId { + | Some(chainId) => chainId + | None => event.chainId + } - let processingChainId = ref(chainId) - let items = events->Array.map(event => { - let event = event->Internal.fromGenericEvent - let eventConfig = switch mockEventRegisters->Utils.WeakMap.get(event) { - | Some(register) => register() - | None => - Js.Exn.raiseError( - "Events must be created using the mock API (e.g. createMockEvent) to be processed by mockDb.processEvents", + switch processingChainId.contents { + | Some(prevItemChainId) => + if prevItemChainId !== chainId { + Js.Exn.raiseError( + `Processing events on multiple chains is not supported yet. Got chainId ${event.chainId->Belt.Int.toString} but expected ${chainId->Belt.Int.toString}`, + ) + } + | None => processingChainId.contents = Some(chainId) + } + + let chain = config->Config.getChain(~chainId) + let item = Internal.Event({ + eventConfig, + event, + chain, + logIndex: event.logIndex, + timestamp: event.block->Types.Block.getTimestamp, + blockNumber: event.block->Types.Block.getNumber, + }) + latestFetchedBlockNumber.contents = Pervasives.max( + latestFetchedBlockNumber.contents, + event.block->Types.Block.getNumber, ) - } - let chainId = switch chainId { + if eventConfig.contractRegister->Option.isSome { + itemsWithContractRegister->Js.Array2.push(item)->ignore + } + item + }) + + let processingChainId = switch processingChainId.contents { | Some(chainId) => chainId - | None => event.chainId + | None => + Js.Exn.raiseError("No events provided to processEvents. Please provide at least one event.") } + let processingChain = config->Config.getChain(~chainId=processingChainId) - switch processingChainId.contents { - | Some(chainId) => - if chainId != event.chainId { - Js.Exn.raiseError( - `Processing events on multiple chains is not supported yet. Got chainId ${event.chainId->Belt.Int.toString} but expected ${chainId->Belt.Int.toString}`, - ) - } - | None => processingChainId.contents = Some(chainId) + let chainFetcher = ChainFetcher.makeFromConfig( + config.chainMap->ChainMap.get(processingChain), + ~config, + ~targetBufferSize=5000, + ) + + //Deep copy the data in mockDb, mutate the clone and return the clone + //So no side effects occur here and state can be compared between process + //steps + let mockDbClone = mockDb->cloneMockDb + + //Construct a new instance of an in memory store to run for the given event + let inMemoryStore = InMemoryStore.make() + let loadManager = LoadManager.make() + let persistence = { + ...config.persistence, + storage: makeMockStorage(mockDb), + storageStatus: Ready({ + cleanRun: false, + cache: Js.Dict.empty(), + chains: [], + reorgCheckpoints: [], + checkpointId: 0, + }), + } + let config = { + ...config, + persistence, } - let chain = config->Config.getChain(~chainId) - let item = Internal.Event({ - eventConfig, - event, - chain, - logIndex: event.logIndex, - timestamp: event.block->Types.Block.getTimestamp, - blockNumber: event.block->Types.Block.getNumber, - }) - if eventConfig.contractRegister->Option.isSome { - itemsWithContractRegister->Js.Array2.push(item)->ignore + let newItemsWithDcs = if itemsWithContractRegister->Utils.Array.notEmpty { + await ChainFetcher.runContractRegistersOrThrow( + ~itemsWithContractRegister, + ~chain=processingChain, + ~config, + ) + } else { + itemsWithContractRegister } - item - }) - let processingChainId = switch processingChainId.contents { - | Some(chainId) => chainId - | None => - Js.Exn.raiseError("No events provided to processEvents. Please provide at least one event.") - } + let updatedFetchState = ref(chainFetcher.fetchState) - //Deep copy the data in mockDb, mutate the clone and return the clone - //So no side effects occur here and state can be compared between process - //steps - let mockDbClone = mockDb->cloneMockDb - - //Construct a new instance of an in memory store to run for the given event - let inMemoryStore = InMemoryStore.make() - let loadManager = LoadManager.make() - let persistence = { - ...config.persistence, - storage: makeMockStorage(mockDb), - storageStatus: Ready({cleanRun: false, cache: Js.Dict.empty(), chains: []}), - } - let config = { - ...config, - persistence, - } + switch newItemsWithDcs { + | [] => () + | _ => + updatedFetchState := + updatedFetchState.contents->FetchState.registerDynamicContracts(newItemsWithDcs) + } - //No need to check contract is registered or return anything. - //The only purpose is to test the registerContract function and to - //add the entity to the in memory store for asserting registrations - if itemsWithContractRegister->Utils.Array.notEmpty { - let dcs = await ChainFetcher.runContractRegistersOrThrow( - ~itemsWithContractRegister, - ~chain=ChainMap.Chain.makeUnsafe(~chainId=processingChainId), - ~config, + updatedFetchState := + updatedFetchState.contents + ->FetchState.handleQueryResult( + ~latestFetchedBlock={ + blockNumber: latestFetchedBlockNumber.contents, + blockTimestamp: 0, + }, + ~query={ + partitionId: (updatedFetchState.contents.partitions->Array.getUnsafe(0)).id, + fromBlock: 0, + selection: {eventConfigs: [], dependsOnAddresses: false}, + addressesByContractName: Js.Dict.empty(), + target: FetchState.Head, + indexingContracts: Js.Dict.empty(), + }, + ~newItems, + ) + ->Result.getExn + + // Handle query for the rest partitions without items + // to catch up the latest fully fetched block + for idx in 1 to updatedFetchState.contents.partitions->Array.length - 1 { + let partition = updatedFetchState.contents.partitions->Array.getUnsafe(idx) + updatedFetchState := + updatedFetchState.contents + ->FetchState.handleQueryResult( + ~latestFetchedBlock={ + blockNumber: latestFetchedBlockNumber.contents, + blockTimestamp: 0, + }, + ~query={ + partitionId: partition.id, + fromBlock: 0, + selection: {eventConfigs: [], dependsOnAddresses: false}, + addressesByContractName: Js.Dict.empty(), + target: FetchState.Head, + indexingContracts: Js.Dict.empty(), + }, + ~newItems=[], + ) + ->Result.getExn + } + + let batch = Batch.prepareUnorderedBatch( + ~checkpointIdBeforeBatch=0, + ~chainsBeforeBatch=ChainMap.fromArrayUnsafe([ + ( + processingChain, + ( + { + fetchState: updatedFetchState.contents, + reorgDetection: chainFetcher.reorgDetection, + progressBlockNumber: chainFetcher.committedProgressBlockNumber, + sourceBlockNumber: chainFetcher.currentBlockHeight, + totalEventsProcessed: chainFetcher.numEventsProcessed, + }: Batch.chainBeforeBatch + ), + ), + ]), + ~batchSizeTarget=newItems->Array.length, ) - // TODO: Reuse FetchState logic to clean up duplicate dcs - if dcs->Utils.Array.notEmpty { - inMemoryStore->InMemoryStore.setDcsToStore( - Js.Dict.fromArray([(processingChainId->Belt.Int.toString, dcs)]), + inMemoryStore->InMemoryStore.setBatchDcs(~batch, ~shouldSaveHistory=false) + + // Create a mock chains state where the processing chain is ready (simulating "Live" mode) + let chains = Js.Dict.empty() + chains->Js.Dict.set(processingChainId->Int.toString, {Internal.isReady: true}) + + try { + await batch->EventProcessing.preloadBatchOrThrow(~loadManager, ~persistence, ~inMemoryStore, ~chains) + await batch->EventProcessing.runBatchHandlersOrThrow( + ~inMemoryStore, + ~loadManager, + ~config, ~shouldSaveHistory=false, + ~shouldBenchmark=false, + ~chains, ) + } catch { + | EventProcessing.ProcessingError({message, exn, item}) => + exn + ->ErrorHandling.make(~msg=message, ~logger=item->Logging.getItemLogger) + ->ErrorHandling.logAndRaise } - } - // Create a mock chains state where the processing chain is ready (simulating "Live" mode) - let chains = Js.Dict.empty() - chains->Js.Dict.set(processingChainId->Int.toString, {Internal.isReady: true}) - - try { - await items->EventProcessing.preloadBatchOrThrow( - ~loadManager, - ~persistence, - ~inMemoryStore, - ~chains, - ) - await items->EventProcessing.runBatchHandlersOrThrow( - ~inMemoryStore, - ~loadManager, - ~config, - ~shouldSaveHistory=false, - ~shouldBenchmark=false, - ~chains, - ) - } catch { - | EventProcessing.ProcessingError({message, exn, item}) => - exn - ->ErrorHandling.make(~msg=message, ~logger=item->Logging.getItemLogger) - ->ErrorHandling.logAndRaise + //In mem store can still contatin raw events and dynamic contracts for the + //testing framework in cases where either contract register or loaderHandler + //is None + mockDbClone->writeFromMemoryStore(~inMemoryStore) + mockDbClone } - - //In mem store can still contatin raw events and dynamic contracts for the - //testing framework in cases where either contract register or loaderHandler - //is None - mockDbClone->writeFromMemoryStore(~inMemoryStore) - mockDbClone } and makeMockStorage = (mockDb: t): Persistence.storage => { { diff --git a/codegenerator/cli/templates/dynamic/codegen/src/Types.res.hbs b/codegenerator/cli/templates/dynamic/codegen/src/Types.res.hbs index af3c81d46..95a96ca39 100644 --- a/codegenerator/cli/templates/dynamic/codegen/src/Types.res.hbs +++ b/codegenerator/cli/templates/dynamic/codegen/src/Types.res.hbs @@ -76,51 +76,6 @@ type handlerContext = { type {{entity.name.uncapitalized}} = Entities.{{entity.name.capitalized}}.t {{/each}} -type eventIdentifier = { - chainId: int, - blockTimestamp: int, - blockNumber: int, - logIndex: int, -} - -type entityUpdateAction<'entityType> = - | Set('entityType) - | Delete - -type entityUpdate<'entityType> = { - eventIdentifier: eventIdentifier, - entityId: id, - entityUpdateAction: entityUpdateAction<'entityType>, -} - -let mkEntityUpdate = (~eventIdentifier, ~entityId, entityUpdateAction) => { - entityId, - eventIdentifier, - entityUpdateAction, -} - -type entityValueAtStartOfBatch<'entityType> = - | NotSet // The entity isn't in the DB yet - | AlreadySet('entityType) - -type updatedValue<'entityType> = { - latest: entityUpdate<'entityType>, - history: array>, - // In the event of a rollback, some entity updates may have been - // been affected by a rollback diff. If there was no rollback diff - // this will always be false. - // If there was a rollback diff, this will be false in the case of a - // new entity update (where entity affected is not present in the diff) b - // but true if the update is related to an entity that is - // currently present in the diff - containsRollbackDiffChange: bool, -} - -@genType -type inMemoryStoreRowEntity<'entityType> = - | Updated(updatedValue<'entityType>) - | InitialReadFromDb(entityValueAtStartOfBatch<'entityType>) // This means there is no change from the db. - //************* //**CONTRACTS** //************* diff --git a/codegenerator/cli/templates/dynamic/codegen/src/db/Entities.res.hbs b/codegenerator/cli/templates/dynamic/codegen/src/db/Entities.res.hbs index 19867a2af..5a1a39d3a 100644 --- a/codegenerator/cli/templates/dynamic/codegen/src/db/Entities.res.hbs +++ b/codegenerator/cli/templates/dynamic/codegen/src/db/Entities.res.hbs @@ -5,6 +5,7 @@ type id = string type internalEntity = Internal.entity module type Entity = { type t + let index: int let name: string let schema: S.t let rowsSchema: S.t> @@ -43,6 +44,7 @@ type whereOperations<'entity, 'fieldType> = { module {{entity.name.capitalized}} = { let name = ({{entity.name.capitalized}} :> string) + let index = {{@index}} @genType type t = { {{#each entity.params as | param |}} @@ -103,7 +105,7 @@ module {{entity.name.capitalized}} = { {{/if}} ) - let entityHistory = table->EntityHistory.fromTable(~schema) + let entityHistory = table->EntityHistory.fromTable(~schema, ~entityIndex=index) external castToInternal: t => Internal.entity = "%identity" } diff --git a/codegenerator/cli/templates/dynamic/contract_import_templates/javascript/src/EventHandlers.js.hbs b/codegenerator/cli/templates/dynamic/contract_import_templates/javascript/src/EventHandlers.js.hbs index 5be5dc8bb..731a8f57b 100644 --- a/codegenerator/cli/templates/dynamic/contract_import_templates/javascript/src/EventHandlers.js.hbs +++ b/codegenerator/cli/templates/dynamic/contract_import_templates/javascript/src/EventHandlers.js.hbs @@ -13,7 +13,7 @@ const { const entity = { id: {{event.entity_id_from_event_code}}, {{#each event.params as |param|}} - {{param.entity_key.uncapitalized}}: event.params.{{param.event_key.uncapitalized}}{{#if param.tuple_param_accessor_indexes}} + {{param.entity_key.uncapitalized}}: event.params.{{param.event_key.original}}{{#if param.tuple_param_accessor_indexes}} {{#each param.tuple_param_accessor_indexes as |index|}} [{{index}}] {{/each}} diff --git a/codegenerator/cli/templates/dynamic/contract_import_templates/typescript/src/EventHandlers.ts.hbs b/codegenerator/cli/templates/dynamic/contract_import_templates/typescript/src/EventHandlers.ts.hbs index fd4694b4d..37971ccaf 100644 --- a/codegenerator/cli/templates/dynamic/contract_import_templates/typescript/src/EventHandlers.ts.hbs +++ b/codegenerator/cli/templates/dynamic/contract_import_templates/typescript/src/EventHandlers.ts.hbs @@ -17,7 +17,7 @@ import { const entity: {{contract.name.capitalized}}_{{event.name}} = { id: {{event.entity_id_from_event_code}}, {{#each event.params as |param|}} - {{param.entity_key.uncapitalized}}: event.params.{{param.event_key.uncapitalized}}{{#if + {{param.entity_key.uncapitalized}}: event.params.{{param.event_key.original}}{{#if param.tuple_param_accessor_indexes }} {{#each param.tuple_param_accessor_indexes as |index|}} diff --git a/codegenerator/cli/templates/static/codegen/src/Config.res b/codegenerator/cli/templates/static/codegen/src/Config.res index 8f32f3922..41f480396 100644 --- a/codegenerator/cli/templates/static/codegen/src/Config.res +++ b/codegenerator/cli/templates/static/codegen/src/Config.res @@ -37,19 +37,17 @@ let getSyncConfig = ( } let storagePgSchema = Env.Db.publicSchema -let codegenPersistence = Persistence.make( - ~userEntities=Entities.userEntities, - ~allEnums=Enums.allEnums, - ~storage=PgStorage.make( - ~sql=Db.sql, - ~pgSchema=storagePgSchema, +let makeStorage = (~sql=Db.sql, ~pgSchema=storagePgSchema, ~isHasuraEnabled=Env.Hasura.enabled) => { + PgStorage.make( + ~sql, + ~pgSchema, ~pgHost=Env.Db.host, ~pgUser=Env.Db.user, ~pgPort=Env.Db.port, ~pgDatabase=Env.Db.database, ~pgPassword=Env.Db.password, ~onInitialize=?{ - if Env.Hasura.enabled { + if isHasuraEnabled { Some( () => { Hasura.trackDatabase( @@ -76,7 +74,7 @@ let codegenPersistence = Persistence.make( } }, ~onNewTables=?{ - if Env.Hasura.enabled { + if isHasuraEnabled { Some( (~tableNames) => { Hasura.trackTables( @@ -99,7 +97,14 @@ let codegenPersistence = Persistence.make( None } }, - ), + ~isHasuraEnabled, + ) +} + +let codegenPersistence = Persistence.make( + ~userEntities=Entities.userEntities, + ~allEnums=Enums.allEnums, + ~storage=makeStorage(), ) type t = { @@ -133,10 +138,7 @@ let make = ( ~shouldUseHypersyncClientDecoder=true, ) => { // Validate that lowercase addresses is not used with viem decoder - if ( - lowercaseAddresses && - !shouldUseHypersyncClientDecoder - ) { + if lowercaseAddresses && !shouldUseHypersyncClientDecoder { Js.Exn.raiseError( "lowercase addresses is not supported when event_decoder is 'viem'. Please set event_decoder to 'hypersync-client' or change address_format to 'checksum'.", ) diff --git a/codegenerator/cli/templates/static/codegen/src/EventProcessing.res b/codegenerator/cli/templates/static/codegen/src/EventProcessing.res index 68f83b67b..8e2be70e6 100644 --- a/codegenerator/cli/templates/static/codegen/src/EventProcessing.res +++ b/codegenerator/cli/templates/static/codegen/src/EventProcessing.res @@ -15,9 +15,12 @@ let computeChainsState = (chainFetchers: ChainMap.t): Internal.c let chainId = chain->ChainMap.Chain.toChainId->Int.toString let isReady = chainFetcher.timestampCaughtUpToHeadOrEndblock !== None - chains->Js.Dict.set(chainId, { - Internal.isReady: isReady, - }) + chains->Js.Dict.set( + chainId, + { + Internal.isReady: isReady, + }, + ) }) chains @@ -98,6 +101,7 @@ exception ProcessingError({message: string, exn: exn, item: Internal.item}) let runEventHandlerOrThrow = async ( item: Internal.item, + ~checkpointId, ~handler, ~inMemoryStore, ~loadManager, @@ -118,6 +122,7 @@ let runEventHandlerOrThrow = async ( event: eventItem.event, context: UserContext.getHandlerContext({ item, + checkpointId, inMemoryStore, loadManager, persistence, @@ -151,6 +156,7 @@ let runEventHandlerOrThrow = async ( let runHandlerOrThrow = async ( item: Internal.item, + ~checkpointId, ~inMemoryStore, ~loadManager, ~config: Config.t, @@ -174,6 +180,7 @@ let runHandlerOrThrow = async ( loadManager, persistence: config.persistence, shouldSaveHistory, + checkpointId, isPreload: false, chains, }), @@ -184,7 +191,7 @@ let runHandlerOrThrow = async ( | exn => raise( ProcessingError({ - message: "Unexpected error in the event handler. Please handle the error to keep the indexer running smoothly.", + message: "Unexpected error in the block handler. Please handle the error to keep the indexer running smoothly.", item, exn, }), @@ -195,6 +202,7 @@ let runHandlerOrThrow = async ( | Some(handler) => await item->runEventHandlerOrThrow( ~handler, + ~checkpointId, ~inMemoryStore, ~loadManager, ~persistence=config.persistence, @@ -213,7 +221,7 @@ let runHandlerOrThrow = async ( } let preloadBatchOrThrow = async ( - eventBatch: array, + batch: Batch.t, ~loadManager, ~persistence, ~inMemoryStore, @@ -223,15 +231,24 @@ let preloadBatchOrThrow = async ( // whether it's an error or a return type. // We'll rerun the loader again right before the handler run, // to avoid having a stale data returned from the loader. - let _ = await Promise.all( - eventBatch->Array.keepMap(item => { + + let promises = [] + let itemIdx = ref(0) + + for checkpointIdx in 0 to batch.checkpointIds->Array.length - 1 { + let checkpointId = batch.checkpointIds->Js.Array2.unsafe_get(checkpointIdx) + let checkpointEventsProcessed = + batch.checkpointEventsProcessed->Js.Array2.unsafe_get(checkpointIdx) + + for idx in 0 to checkpointEventsProcessed - 1 { + let item = batch.items->Js.Array2.unsafe_get(itemIdx.contents + idx) switch item { | Event({eventConfig: {handler}, event}) => switch handler { - | None => None + | None => () | Some(handler) => try { - Some( + promises->Array.push( handler({ event, context: UserContext.getHandlerContext({ @@ -239,6 +256,7 @@ let preloadBatchOrThrow = async ( inMemoryStore, loadManager, persistence, + checkpointId, isPreload: true, shouldSaveHistory: false, chains, @@ -249,12 +267,12 @@ let preloadBatchOrThrow = async ( // it won't create a rejected promise ) } catch { - | _ => None + | _ => () } } | Block({onBlockConfig: {handler, chainId}, blockNumber}) => try { - Some( + promises->Array.push( handler({ block: { number: blockNumber, @@ -265,6 +283,7 @@ let preloadBatchOrThrow = async ( inMemoryStore, loadManager, persistence, + checkpointId, isPreload: true, shouldSaveHistory: false, chains, @@ -272,15 +291,19 @@ let preloadBatchOrThrow = async ( })->Promise.silentCatch, ) } catch { - | _ => None + | _ => () } } - }), - ) + } + + itemIdx := itemIdx.contents + checkpointEventsProcessed + } + + let _ = await Promise.all(promises) } let runBatchHandlersOrThrow = async ( - eventBatch: array, + batch: Batch.t, ~inMemoryStore, ~loadManager, ~config, @@ -288,17 +311,28 @@ let runBatchHandlersOrThrow = async ( ~shouldBenchmark, ~chains: Internal.chains, ) => { - for i in 0 to eventBatch->Array.length - 1 { - let item = eventBatch->Js.Array2.unsafe_get(i) - await runHandlerOrThrow( - item, - ~inMemoryStore, - ~loadManager, - ~config, - ~shouldSaveHistory, - ~shouldBenchmark, - ~chains, - ) + let itemIdx = ref(0) + + for checkpointIdx in 0 to batch.checkpointIds->Array.length - 1 { + let checkpointId = batch.checkpointIds->Js.Array2.unsafe_get(checkpointIdx) + let checkpointEventsProcessed = + batch.checkpointEventsProcessed->Js.Array2.unsafe_get(checkpointIdx) + + for idx in 0 to checkpointEventsProcessed - 1 { + let item = batch.items->Js.Array2.unsafe_get(itemIdx.contents + idx) + + await runHandlerOrThrow( + item, + ~checkpointId, + ~inMemoryStore, + ~loadManager, + ~config, + ~shouldSaveHistory, + ~shouldBenchmark, + ~chains, + ) + } + itemIdx := itemIdx.contents + checkpointEventsProcessed } } @@ -328,60 +362,60 @@ type logPartitionInfo = { } let processEventBatch = async ( - ~items: array, - ~progressedChains: array, + ~batch: Batch.t, ~inMemoryStore: InMemoryStore.t, ~isInReorgThreshold, ~loadManager, ~config: Config.t, ~chainFetchers: ChainMap.t, ) => { + let totalBatchSize = batch.totalBatchSize // Compute chains state for this batch let chains: Internal.chains = chainFetchers->computeChainsState - let batchSize = items->Array.length - let byChain = Js.Dict.empty() - progressedChains->Js.Array2.forEach(data => { - if data.batchSize > 0 { - byChain->Utils.Dict.setByInt( - data.chainId, - { - "batchSize": data.batchSize, - "toBlockNumber": data.progressBlockNumber, - }, - ) - } + + let logger = Logging.getLogger() + logger->Logging.childTrace({ + "msg": "Started processing batch", + "totalBatchSize": totalBatchSize, + "chains": batch.progressedChainsById->Utils.Dict.mapValues(chainAfterBatch => { + { + "batchSize": chainAfterBatch.batchSize, + "progress": chainAfterBatch.progressBlockNumber, + } + }), }) - let logger = Logging.createChildFrom( - ~logger=Logging.getLogger(), - ~params={ - "totalBatchSize": batchSize, - "byChain": byChain, - }, - ) - logger->Logging.childTrace("Started processing batch") try { let timeRef = Hrtime.makeTimer() - await items->preloadBatchOrThrow(~loadManager, ~persistence=config.persistence, ~inMemoryStore, ~chains) + if batch.items->Utils.Array.notEmpty { + await batch->preloadBatchOrThrow( + ~loadManager, + ~persistence=config.persistence, + ~inMemoryStore, + ~chains, + ) + } let elapsedTimeAfterLoaders = timeRef->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis - await items->runBatchHandlersOrThrow( - ~inMemoryStore, - ~loadManager, - ~config, - ~shouldSaveHistory=config->Config.shouldSaveHistory(~isInReorgThreshold), - ~shouldBenchmark=Env.Benchmark.shouldSaveData, - ~chains, - ) + if batch.items->Utils.Array.notEmpty { + await batch->runBatchHandlersOrThrow( + ~inMemoryStore, + ~loadManager, + ~config, + ~shouldSaveHistory=config->Config.shouldSaveHistory(~isInReorgThreshold), + ~shouldBenchmark=Env.Benchmark.shouldSaveData, + ~chains, + ) + } let elapsedTimeAfterProcessing = timeRef->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis let rec executeBatch = async (~escapeTables=?) => { switch await Db.sql->IO.executeBatch( - ~progressedChains, + ~batch, ~inMemoryStore, ~isInReorgThreshold, ~config, @@ -414,7 +448,7 @@ let processEventBatch = async ( ) if Env.Benchmark.shouldSaveData { Benchmark.addEventProcessing( - ~batchSize, + ~batchSize=totalBatchSize, ~loadDuration=loaderDuration, ~handlerDuration, ~dbWriteDuration, diff --git a/codegenerator/cli/templates/static/codegen/src/IO.res b/codegenerator/cli/templates/static/codegen/src/IO.res index 521926e88..bee5b3040 100644 --- a/codegenerator/cli/templates/static/codegen/src/IO.res +++ b/codegenerator/cli/templates/static/codegen/src/IO.res @@ -12,50 +12,16 @@ let executeSet = ( } } -let getEntityHistoryItems = (entityUpdates, ~containsRollbackDiffChange) => { - let (_, entityHistoryItems) = entityUpdates->Belt.Array.reduce((None, []), ( - prev: (option, array>), - entityUpdate: Types.entityUpdate<'a>, - ) => { - let (optPreviousEventIdentifier, entityHistoryItems) = prev - - let {eventIdentifier, entityUpdateAction, entityId} = entityUpdate - let entityHistoryItems = { - let historyItem: EntityHistory.historyRow<_> = { - current: { - chain_id: eventIdentifier.chainId, - block_timestamp: eventIdentifier.blockTimestamp, - block_number: eventIdentifier.blockNumber, - log_index: eventIdentifier.logIndex, - }, - previous: optPreviousEventIdentifier->Belt.Option.map(prev => { - EntityHistory.chain_id: prev.chainId, - block_timestamp: prev.blockTimestamp, - block_number: prev.blockNumber, - log_index: prev.logIndex, - }), - entityData: switch entityUpdateAction { - | Set(entity) => Set(entity) - | Delete => Delete({id: entityId}) - }, - containsRollbackDiffChange, - } - entityHistoryItems->Belt.Array.concat([historyItem]) - } - (Some(eventIdentifier), entityHistoryItems) - }) - - entityHistoryItems -} - let executeBatch = async ( sql, - ~progressedChains: array, + ~batch: Batch.t, ~inMemoryStore: InMemoryStore.t, ~isInReorgThreshold, ~config, ~escapeTables=?, ) => { + let shouldSaveHistory = config->Config.shouldSaveHistory(~isInReorgThreshold) + let specificError = ref(None) let setRawEvents = executeSet( @@ -74,7 +40,6 @@ let executeBatch = async ( let setEntities = Entities.allEntities->Belt.Array.map(entityConfig => { let entitiesToSet = [] let idsToDelete = [] - let entityHistoryItemsToSet = [] let rows = inMemoryStore @@ -90,155 +55,182 @@ let executeBatch = async ( } }) - if config->Config.shouldSaveHistory(~isInReorgThreshold) { - rows->Js.Array2.forEach(row => { - switch row { - | Updated({history, containsRollbackDiffChange}) => - let entityHistoryItems = history->getEntityHistoryItems(~containsRollbackDiffChange) - entityHistoryItemsToSet->Js.Array2.pushMany(entityHistoryItems)->ignore - | _ => () - } - }) - - // Keep history items in the order of the events. Without sorting, - // they will only be in order per row, but not across the whole entity - // table. - - switch config.multichain { - | Ordered => - let _ = entityHistoryItemsToSet->Js.Array2.sortInPlaceWith((a, b) => { - EventUtils.isEarlier( - ( - a.current.block_timestamp, - a.current.chain_id, - a.current.block_number, - a.current.log_index, - ), - ( - b.current.block_timestamp, - b.current.chain_id, - b.current.block_number, - b.current.log_index, - ), - ) - ? -1 - : 1 - }) - | Unordered => - let _ = entityHistoryItemsToSet->Js.Array2.sortInPlaceWith((a, b) => { - EventUtils.isEarlierUnordered( - (a.current.chain_id, a.current.block_number, a.current.log_index), - (b.current.chain_id, b.current.block_number, b.current.log_index), - ) - ? -1 - : 1 - }) - } - } - let shouldRemoveInvalidUtf8 = switch escapeTables { | Some(tables) if tables->Utils.Set.has(entityConfig.table) => true | _ => false } - sql => { - let promises = [] - if entityHistoryItemsToSet->Utils.Array.notEmpty { - promises - ->Js.Array2.pushMany( - sql->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=entityConfig.entityHistory, - ~rows=entityHistoryItemsToSet, - ~shouldRemoveInvalidUtf8, - ), - ) - ->ignore - } - if entitiesToSet->Utils.Array.notEmpty { - if shouldRemoveInvalidUtf8 { - entitiesToSet->PgStorage.removeInvalidUtf8InPlace + async sql => { + try { + let promises = [] + + if shouldSaveHistory { + let backfillHistoryIds = Utils.Set.make() + let batchSetUpdates = [] + // Use unnest approach + let batchDeleteCheckpointIds = [] + let batchDeleteEntityIds = [] + + rows->Js.Array2.forEach(row => { + switch row { + | Updated({history, containsRollbackDiffChange}) => + history->Js.Array2.forEach( + (entityUpdate: EntityHistory.entityUpdate<'a>) => { + if !containsRollbackDiffChange { + // For every update we want to make sure that there's an existing history item + // with the current entity state. So we backfill history with checkpoint id 0, + // before writing updates. Don't do this if the update has a rollback diff change. + backfillHistoryIds->Utils.Set.add(entityUpdate.entityId)->ignore + } + switch entityUpdate.entityUpdateAction { + | Delete => { + batchDeleteEntityIds->Array.push(entityUpdate.entityId)->ignore + batchDeleteCheckpointIds->Array.push(entityUpdate.checkpointId)->ignore + } + | Set(_) => batchSetUpdates->Js.Array2.push(entityUpdate)->ignore + } + }, + ) + | _ => () + } + }) + + if backfillHistoryIds->Utils.Set.size !== 0 { + // This must run before updating entity or entity history tables + await EntityHistory.backfillHistory( + sql, + ~pgSchema=Db.publicSchema, + ~entityName=entityConfig.name, + ~entityIndex=entityConfig.index, + ~ids=backfillHistoryIds->Utils.Set.toArray, + ) + } + + if batchDeleteCheckpointIds->Utils.Array.notEmpty { + promises->Array.push( + sql->EntityHistory.insertDeleteUpdates( + ~pgSchema=Db.publicSchema, + ~entityHistory=entityConfig.entityHistory, + ~batchDeleteEntityIds, + ~batchDeleteCheckpointIds, + ), + ) + } + + if batchSetUpdates->Utils.Array.notEmpty { + if shouldRemoveInvalidUtf8 { + let entities = batchSetUpdates->Js.Array2.map(batchSetUpdate => { + switch batchSetUpdate.entityUpdateAction { + | Set(entity) => entity + | _ => Js.Exn.raiseError("Expected Set action") + } + }) + entities->PgStorage.removeInvalidUtf8InPlace + } + + promises + ->Js.Array2.push( + sql->PgStorage.setOrThrow( + ~items=batchSetUpdates, + ~itemSchema=entityConfig.entityHistory.setUpdateSchema, + ~table=entityConfig.entityHistory.table, + ~pgSchema=Db.publicSchema, + ), + ) + ->ignore + } } - promises->Array.push( - sql->PgStorage.setOrThrow( - ~items=entitiesToSet, - ~table=entityConfig.table, - ~itemSchema=entityConfig.schema, - ~pgSchema=Config.storagePgSchema, - ), - ) - } - if idsToDelete->Utils.Array.notEmpty { - promises->Array.push(sql->DbFunctionsEntities.batchDelete(~entityConfig)(idsToDelete)) - } - // This should have await, to properly propagate errors to the caller. - promises - ->Promise.all + + if entitiesToSet->Utils.Array.notEmpty { + if shouldRemoveInvalidUtf8 { + entitiesToSet->PgStorage.removeInvalidUtf8InPlace + } + promises->Array.push( + sql->PgStorage.setOrThrow( + ~items=entitiesToSet, + ~table=entityConfig.table, + ~itemSchema=entityConfig.schema, + ~pgSchema=Config.storagePgSchema, + ), + ) + } + if idsToDelete->Utils.Array.notEmpty { + promises->Array.push(sql->DbFunctionsEntities.batchDelete(~entityConfig)(idsToDelete)) + } + + let _ = await promises->Promise.all + } catch { // There's a race condition that sql->Postgres.beginSql // might throw PG error, earlier, than the handled error // from setOrThrow will be passed through. // This is needed for the utf8 encoding fix. - ->Promise.catch(exn => { - /* Note: Entity History doesn't return StorageError yet, and directly throws JsError */ - let normalizedExn = switch exn { - | JsError(_) => exn - | Persistence.StorageError({reason: exn}) => exn - | _ => exn - }->Js.Exn.anyToExnInternal - - switch normalizedExn { - | JsError(error) => - // Workaround for https://github.com/enviodev/hyperindex/issues/446 - // We do escaping only when we actually got an error writing for the first time. - // This is not perfect, but an optimization to avoid escaping for every single item. - - switch error->S.parseOrThrow(PgStorage.pgErrorMessageSchema) { - | `current transaction is aborted, commands ignored until end of transaction block` => () - | `invalid byte sequence for encoding "UTF8": 0x00` => - // Since the transaction is aborted at this point, - // we can't simply retry the function with escaped items, - // so propagate the error, to restart the whole batch write. - // Also, pass the failing table, to escape only its items. - // TODO: Ideally all this should be done in the file, - // so it'll be easier to work on PG specific logic. - specificError.contents = Some(PgStorage.PgEncodingError({table: entityConfig.table})) - | _ => specificError.contents = Some(exn->Utils.prettifyExn) - | exception _ => () + | exn => { + /* Note: Entity History doesn't return StorageError yet, and directly throws JsError */ + let normalizedExn = switch exn { + | JsError(_) => exn + | Persistence.StorageError({reason: exn}) => exn + | _ => exn + }->Js.Exn.anyToExnInternal + + switch normalizedExn { + | JsError(error) => + // Workaround for https://github.com/enviodev/hyperindex/issues/446 + // We do escaping only when we actually got an error writing for the first time. + // This is not perfect, but an optimization to avoid escaping for every single item. + + switch error->S.parseOrThrow(PgStorage.pgErrorMessageSchema) { + | `current transaction is aborted, commands ignored until end of transaction block` => () + | `invalid byte sequence for encoding "UTF8": 0x00` => + // Since the transaction is aborted at this point, + // we can't simply retry the function with escaped items, + // so propagate the error, to restart the whole batch write. + // Also, pass the failing table, to escape only its items. + // TODO: Ideally all this should be done in the file, + // so it'll be easier to work on PG specific logic. + specificError.contents = Some(PgStorage.PgEncodingError({table: entityConfig.table})) + | _ => specificError.contents = Some(exn->Utils.prettifyExn) + | exception _ => () + } + | S.Raised(_) => raise(normalizedExn) // But rethrow this one, since it's not a PG error + | _ => () } - | _ => () - } - // Improtant: Don't rethrow here, since it'll result in - // an unhandled rejected promise error. - // That's fine not to throw, since sql->Postgres.beginSql - // will fail anyways. - Promise.resolve([]) - }) - ->(Utils.magic: promise> => promise) + // Improtant: Don't rethrow here, since it'll result in + // an unhandled rejected promise error. + // That's fine not to throw, since sql->Postgres.beginSql + // will fail anyways. + } + } } }) //In the event of a rollback, rollback all meta tables based on the given //valid event identifier, where all rows created after this eventIdentifier should //be deleted - let rollbackTables = switch inMemoryStore.rollBackEventIdentifier { - | Some(eventIdentifier) => + let rollbackTables = switch inMemoryStore { + | {rollbackTargetCheckpointId: Some(rollbackTargetCheckpointId)} => Some( - sql => - Promise.all2(( - sql->DbFunctions.EntityHistory.deleteAllEntityHistoryAfterEventIdentifier( - ~isUnorderedMultichainMode=switch config.multichain { - | Unordered => true - | Ordered => false - }, - ~eventIdentifier, - ), - sql->DbFunctions.EndOfBlockRangeScannedData.rollbackEndOfBlockRangeScannedDataForChain( - ~chainId=eventIdentifier.chainId, - ~knownBlockNumber=eventIdentifier.blockNumber, + sql => { + let promises = Entities.allEntities->Js.Array2.map(entityConfig => { + sql->EntityHistory.rollback( + ~pgSchema=Db.publicSchema, + ~entityName=entityConfig.name, + ~entityIndex=entityConfig.index, + ~rollbackTargetCheckpointId, + ) + }) + promises + ->Js.Array2.push( + sql->InternalTable.Checkpoints.rollback( + ~pgSchema=Db.publicSchema, + ~rollbackTargetCheckpointId, ), - )), + ) + ->ignore + Promise.all(promises) + }, ) - | None => None + | _ => None } try { @@ -251,17 +243,35 @@ let executeBatch = async ( | None => () } - await Belt.Array.concatMany([ - [ - sql => - sql->InternalTable.Chains.setProgressedChains( - ~pgSchema=Db.publicSchema, - ~progressedChains, - ), - setRawEvents, - ], - setEntities, - ]) + let setOperations = [ + sql => + sql->InternalTable.Chains.setProgressedChains( + ~pgSchema=Db.publicSchema, + ~progressedChains=batch.progressedChainsById->Utils.Dict.mapValuesToArray(( + chainAfterBatch + ): InternalTable.Chains.progressedChain => { + chainId: chainAfterBatch.fetchState.chainId, + progressBlockNumber: chainAfterBatch.progressBlockNumber, + totalEventsProcessed: chainAfterBatch.totalEventsProcessed, + }), + ), + setRawEvents, + ]->Belt.Array.concat(setEntities) + + if shouldSaveHistory { + setOperations->Array.push(sql => + sql->InternalTable.Checkpoints.insert( + ~pgSchema=Db.publicSchema, + ~checkpointIds=batch.checkpointIds, + ~checkpointChainIds=batch.checkpointChainIds, + ~checkpointBlockNumbers=batch.checkpointBlockNumbers, + ~checkpointBlockHashes=batch.checkpointBlockHashes, + ~checkpointEventsProcessed=batch.checkpointEventsProcessed, + ) + ) + } + + await setOperations ->Belt.Array.map(dbFunc => sql->dbFunc) ->Promise.all }), @@ -271,7 +281,7 @@ let executeBatch = async ( ->Js.Dict.keys ->Belt.Array.keepMapU(effectName => { let inMemTable = inMemoryStore.effects->Js.Dict.unsafeGet(effectName) - let {idsToStore, dict, effect} = inMemTable + let {idsToStore, dict, effect, invalidationsCount} = inMemTable switch idsToStore { | [] => None | ids => { @@ -287,7 +297,13 @@ let executeBatch = async ( ), ) }) - Some(config.persistence->Persistence.setEffectCacheOrThrow(~effect, ~items)) + Some( + config.persistence->Persistence.setEffectCacheOrThrow( + ~effect, + ~items, + ~invalidationsCount, + ), + ) } } }) @@ -310,83 +326,71 @@ let executeBatch = async ( } } -module RollBack = { - exception DecodeError(S.error) - let rollBack = async ( - ~chainId, - ~blockTimestamp, - ~blockNumber, - ~logIndex, - ~isUnorderedMultichainMode, - ) => { - let rollBackEventIdentifier: Types.eventIdentifier = { - chainId, - blockTimestamp, - blockNumber, - logIndex, - } +let prepareRollbackDiff = async (~rollbackTargetCheckpointId) => { + let inMemStore = InMemoryStore.make(~rollbackTargetCheckpointId) - let inMemStore = InMemoryStore.make(~rollBackEventIdentifier) + let deletedEntities = Js.Dict.empty() + let setEntities = Js.Dict.empty() - let deletedEntities = Js.Dict.empty() - let setEntities = Js.Dict.empty() + let _ = + await Entities.allEntities + ->Belt.Array.map(async entityConfig => { + let entityTable = inMemStore->InMemoryStore.getInMemTable(~entityConfig) - let fullDiff: dict>> = Js.Dict.empty() + let (removedIdsResult, restoredEntitiesResult) = await Promise.all2(( + // Get IDs of entities that should be deleted (created after rollback target with no prior history) + Db.sql + ->Postgres.preparedUnsafe( + entityConfig.entityHistory.makeGetRollbackRemovedIdsQuery(~pgSchema=Db.publicSchema), + [rollbackTargetCheckpointId]->Utils.magic, + ) + ->(Utils.magic: promise => promise>), + // Get entities that should be restored to their state at or before rollback target + Db.sql + ->Postgres.preparedUnsafe( + entityConfig.entityHistory.makeGetRollbackRestoredEntitiesQuery( + ~pgSchema=Db.publicSchema, + ), + [rollbackTargetCheckpointId]->Utils.magic, + ) + ->(Utils.magic: promise => promise>), + )) - let _ = - await Entities.allEntities - ->Belt.Array.map(async entityConfig => { - let diff = await Db.sql->DbFunctions.EntityHistory.getRollbackDiff( - isUnorderedMultichainMode - ? UnorderedMultichain({ - reorgChainId: chainId, - safeBlockNumber: blockNumber, - }) - : OrderedMultichain({ - safeBlockTimestamp: blockTimestamp, - reorgChainId: chainId, - safeBlockNumber: blockNumber, - }), - ~entityConfig, + // Process removed IDs + removedIdsResult->Js.Array2.forEach(data => { + deletedEntities->Utils.Dict.push(entityConfig.name, data["id"]) + entityTable->InMemoryTable.Entity.set( + { + entityId: data["id"], + checkpointId: 0, + entityUpdateAction: Delete, + }, + ~shouldSaveHistory=false, + ~containsRollbackDiffChange=true, ) - if diff->Utils.Array.notEmpty { - fullDiff->Js.Dict.set(entityConfig.name, diff) - } + }) - let entityTable = inMemStore->InMemoryStore.getInMemTable(~entityConfig) + let restoredEntities = restoredEntitiesResult->S.parseOrThrow(entityConfig.rowsSchema) - diff->Belt.Array.forEach(historyRow => { - let eventIdentifier: Types.eventIdentifier = { - chainId: historyRow.current.chain_id, - blockNumber: historyRow.current.block_number, - logIndex: historyRow.current.log_index, - blockTimestamp: historyRow.current.block_timestamp, - } - switch historyRow.entityData { - | Set(entity: Entities.internalEntity) => - setEntities->Utils.Dict.push(entityConfig.name, entity.id) - entityTable->InMemoryTable.Entity.set( - Set(entity)->Types.mkEntityUpdate(~eventIdentifier, ~entityId=entity.id), - ~shouldSaveHistory=false, - ~containsRollbackDiffChange=true, - ) - | Delete({id}) => - deletedEntities->Utils.Dict.push(entityConfig.name, id) - entityTable->InMemoryTable.Entity.set( - Delete->Types.mkEntityUpdate(~eventIdentifier, ~entityId=id), - ~shouldSaveHistory=false, - ~containsRollbackDiffChange=true, - ) - } - }) + // Process restored entities + restoredEntities->Belt.Array.forEach((entity: Entities.internalEntity) => { + setEntities->Utils.Dict.push(entityConfig.name, entity.id) + entityTable->InMemoryTable.Entity.set( + { + entityId: entity.id, + checkpointId: 0, + entityUpdateAction: Set(entity), + }, + ~shouldSaveHistory=false, + ~containsRollbackDiffChange=true, + ) }) - ->Promise.all + }) + ->Promise.all - { - "inMemStore": inMemStore, - "deletedEntities": deletedEntities, - "setEntities": setEntities, - "fullDiff": fullDiff, - } + { + "inMemStore": inMemStore, + "deletedEntities": deletedEntities, + "setEntities": setEntities, } } diff --git a/codegenerator/cli/templates/static/codegen/src/InMemoryStore.res b/codegenerator/cli/templates/static/codegen/src/InMemoryStore.res index c29f63025..6add93749 100644 --- a/codegenerator/cli/templates/static/codegen/src/InMemoryStore.res +++ b/codegenerator/cli/templates/static/codegen/src/InMemoryStore.res @@ -44,6 +44,7 @@ module EntityTables = { type effectCacheInMemTable = { idsToStore: array, + mutable invalidationsCount: int, dict: dict, effect: Internal.effect, } @@ -52,17 +53,17 @@ type t = { rawEvents: InMemoryTable.t, entities: dict>, effects: dict, - rollBackEventIdentifier: option, + rollbackTargetCheckpointId: option, } let make = ( ~entities: array=Entities.allEntities, - ~rollBackEventIdentifier=?, + ~rollbackTargetCheckpointId=?, ): t => { rawEvents: InMemoryTable.make(~hash=hashRawEventsKey), entities: EntityTables.make(entities), effects: Js.Dict.empty(), - rollBackEventIdentifier, + rollbackTargetCheckpointId, } let clone = (self: t) => { @@ -70,10 +71,11 @@ let clone = (self: t) => { entities: self.entities->EntityTables.clone, effects: Js.Dict.map(table => { idsToStore: table.idsToStore->Array.copy, + invalidationsCount: table.invalidationsCount, dict: table.dict->Utils.Dict.shallowCopy, effect: table.effect, }, self.effects), - rollBackEventIdentifier: self.rollBackEventIdentifier->Lodash.cloneDeep, + rollbackTargetCheckpointId: self.rollbackTargetCheckpointId, } let getEffectInMemTable = (inMemoryStore: t, ~effect: Internal.effect) => { @@ -84,6 +86,7 @@ let getEffectInMemTable = (inMemoryStore: t, ~effect: Internal.effect) => { let table = { idsToStore: [], dict: Js.Dict.empty(), + invalidationsCount: 0, effect, } inMemoryStore.effects->Js.Dict.set(key, table) @@ -98,50 +101,56 @@ let getInMemTable = ( inMemoryStore.entities->EntityTables.get(~entityName=entityConfig.name) } -let isRollingBack = (inMemoryStore: t) => inMemoryStore.rollBackEventIdentifier->Belt.Option.isSome +let isRollingBack = (inMemoryStore: t) => inMemoryStore.rollbackTargetCheckpointId !== None -let setDcsToStore = ( - inMemoryStore: t, - dcsToStoreByChainId: dict>, - ~shouldSaveHistory, -) => { +let setBatchDcs = (inMemoryStore: t, ~batch: Batch.t, ~shouldSaveHistory) => { let inMemTable = inMemoryStore->getInMemTable( ~entityConfig=module(InternalTable.DynamicContractRegistry)->Entities.entityModToInternal, ) - dcsToStoreByChainId->Utils.Dict.forEachWithKey((chainId, dcs) => { - let chainId = chainId->Belt.Int.fromString->Belt.Option.getExn - dcs->Belt.Array.forEach(dc => { - let dcData = switch dc.register { - | Config => Js.Exn.raiseError("Config contract should not be in dcsToStore") - | DC(data) => data - } - let entity: InternalTable.DynamicContractRegistry.t = { - id: InternalTable.DynamicContractRegistry.makeId(~chainId, ~contractAddress=dc.address), - chainId, - contractAddress: dc.address, - contractName: dc.contractName, - registeringEventBlockNumber: dc.startBlock, - registeringEventBlockTimestamp: dcData.registeringEventBlockTimestamp, - registeringEventLogIndex: dcData.registeringEventLogIndex, - registeringEventContractName: dcData.registeringEventContractName, - registeringEventName: dcData.registeringEventName, - registeringEventSrcAddress: dcData.registeringEventSrcAddress, - } - let eventIdentifier: Types.eventIdentifier = { - chainId, - blockTimestamp: dcData.registeringEventBlockTimestamp, - blockNumber: dc.startBlock, - logIndex: dcData.registeringEventLogIndex, + let itemIdx = ref(0) + + for checkpoint in 0 to batch.checkpointIds->Array.length - 1 { + let checkpointId = batch.checkpointIds->Js.Array2.unsafe_get(checkpoint) + let chainId = batch.checkpointChainIds->Js.Array2.unsafe_get(checkpoint) + let checkpointEventsProcessed = + batch.checkpointEventsProcessed->Js.Array2.unsafe_get(checkpoint) + + for idx in 0 to checkpointEventsProcessed - 1 { + let item = batch.items->Js.Array2.unsafe_get(itemIdx.contents + idx) + switch item->Internal.getItemDcs { + | None => () + | Some(dcs) => + // Currently only events support contract registration, so we can cast to event item + let eventItem = item->Internal.castUnsafeEventItem + for dcIdx in 0 to dcs->Array.length - 1 { + let dc = dcs->Js.Array2.unsafe_get(dcIdx) + let entity: InternalTable.DynamicContractRegistry.t = { + id: InternalTable.DynamicContractRegistry.makeId(~chainId, ~contractAddress=dc.address), + chainId, + contractAddress: dc.address, + contractName: dc.contractName, + registeringEventBlockNumber: eventItem.blockNumber, + registeringEventLogIndex: eventItem.logIndex, + registeringEventBlockTimestamp: eventItem.timestamp, + registeringEventContractName: eventItem.eventConfig.contractName, + registeringEventName: eventItem.eventConfig.name, + registeringEventSrcAddress: eventItem.event.srcAddress, + } + + inMemTable->InMemoryTable.Entity.set( + { + entityId: entity.id, + checkpointId, + entityUpdateAction: Set(entity->InternalTable.DynamicContractRegistry.castToInternal), + }, + ~shouldSaveHistory, + ) + } } - inMemTable->InMemoryTable.Entity.set( - Set(entity->InternalTable.DynamicContractRegistry.castToInternal)->Types.mkEntityUpdate( - ~eventIdentifier, - ~entityId=entity.id, - ), - ~shouldSaveHistory, - ) - }) - }) + } + + itemIdx := itemIdx.contents + checkpointEventsProcessed + } } diff --git a/codegenerator/cli/templates/static/codegen/src/InMemoryTable.res b/codegenerator/cli/templates/static/codegen/src/InMemoryTable.res index d3b4e7886..1d27e5ce4 100644 --- a/codegenerator/cli/templates/static/codegen/src/InMemoryTable.res +++ b/codegenerator/cli/templates/static/codegen/src/InMemoryTable.res @@ -39,7 +39,7 @@ module Entity = { type indexFieldNameToIndices = t type entityWithIndices<'entity> = { - entityRow: Types.inMemoryStoreRowEntity<'entity>, + entityRow: Internal.inMemoryStoreRowEntity<'entity>, entityIndices: Utils.Set.t, } type t<'entity> = { @@ -136,7 +136,7 @@ module Entity = { //or if allowOverWriteEntity is true (used for mockDb in test helpers) if shouldWriteEntity { let entityIndices = Utils.Set.make() - let initialStoreRow: Types.inMemoryStoreRowEntity<'entity> = switch entity { + let initialStoreRow: Internal.inMemoryStoreRowEntity<'entity> = switch entity { | Some(entity) => //update table indices in the case where there //is an already set entity @@ -155,16 +155,15 @@ module Entity = { let setRow = set let set = ( inMemTable: t<'entity>, - entityUpdate: Types.entityUpdate<'entity>, + entityUpdate: EntityHistory.entityUpdate<'entity>, ~shouldSaveHistory, ~containsRollbackDiffChange=false, ) => { //New entity row with only the latest update @inline - let newEntityRow = () => Types.Updated({ + let newEntityRow = () => Internal.Updated({ latest: entityUpdate, history: shouldSaveHistory ? [entityUpdate] : [], - // For new entities, apply "containsRollbackDiffChange" from param containsRollbackDiffChange, }) @@ -176,27 +175,22 @@ module Entity = { } | Some({entityRow: Updated(previous_values), entityIndices}) // This prevents two db actions in the same event on the same entity from being recorded to the history table. - if shouldSaveHistory && - previous_values.latest.eventIdentifier == entityUpdate.eventIdentifier => - let entityRow = Types.Updated({ + if shouldSaveHistory && previous_values.latest.checkpointId === entityUpdate.checkpointId => + let entityRow = Internal.Updated({ latest: entityUpdate, history: previous_values.history->Utils.Array.setIndexImmutable( previous_values.history->Array.length - 1, entityUpdate, ), - // For updated entities, apply "containsRollbackDiffChange" from previous values - // (so that the first change if from a rollback diff applies throughout the batch) containsRollbackDiffChange: previous_values.containsRollbackDiffChange, }) {entityRow, entityIndices} | Some({entityRow: Updated(previous_values), entityIndices}) => - let entityRow = Types.Updated({ + let entityRow = Internal.Updated({ latest: entityUpdate, history: shouldSaveHistory ? [...previous_values.history, entityUpdate] : previous_values.history, - // For updated entities, apply "containsRollbackDiffChange" from previous values - // (so that the first change if from a rollback diff applies throughout the batch) containsRollbackDiffChange: previous_values.containsRollbackDiffChange, }) {entityRow, entityIndices} @@ -211,7 +205,7 @@ module Entity = { let rowToEntity = row => switch row.entityRow { - | Types.Updated({latest: {entityUpdateAction: Set(entity)}}) => Some(entity) + | Internal.Updated({latest: {entityUpdateAction: Set(entity)}}) => Some(entity) | Updated({latest: {entityUpdateAction: Delete}}) => None | InitialReadFromDb(AlreadySet(entity)) => Some(entity) | InitialReadFromDb(NotSet) => None diff --git a/codegenerator/cli/templates/static/codegen/src/LoadLayer.res b/codegenerator/cli/templates/static/codegen/src/LoadLayer.res index f40c38258..8d585387b 100644 --- a/codegenerator/cli/templates/static/codegen/src/LoadLayer.res +++ b/codegenerator/cli/templates/static/codegen/src/LoadLayer.res @@ -79,29 +79,49 @@ let loadEffect = ( let idsFromCache = Utils.Set.make() switch effect.cache { - | Some({table, rowsSchema}) + | Some({table, outputSchema}) if switch persistence.storageStatus { | Ready({cache}) => cache->Utils.Dict.has(effect.name) | _ => false } => { + let timerRef = Prometheus.StorageLoad.startOperation(~operation=key) + let dbEntities = try { await (persistence->Persistence.getInitializedStorageOrThrow).loadByIdsOrThrow( ~table, - ~rowsSchema, + ~rowsSchema=Internal.effectCacheItemRowsSchema, ~ids=idsToLoad, ) } catch { | Persistence.StorageError({message, reason}) => - reason->ErrorHandling.mkLogAndRaise( - ~logger=item->Logging.getItemLogger, - ~msg=message, - ) + reason->ErrorHandling.mkLogAndRaise(~logger=item->Logging.getItemLogger, ~msg=message) } - dbEntities->Js.Array2.forEach(entity => { - idsFromCache->Utils.Set.add(entity.id)->ignore - inMemTable.dict->Js.Dict.set(entity.id, entity.output) + dbEntities->Js.Array2.forEach(dbEntity => { + try { + let output = dbEntity.output->S.parseOrThrow(outputSchema) + idsFromCache->Utils.Set.add(dbEntity.id)->ignore + inMemTable.dict->Js.Dict.set(dbEntity.id, output) + } catch { + | S.Raised(error) => + inMemTable.invalidationsCount = inMemTable.invalidationsCount + 1 + Prometheus.EffectCacheInvalidationsCount.increment(~effectName=effect.name) + item + ->Logging.getItemLogger + ->Logging.childTrace({ + "msg": "Invalidated effect cache", + "input": dbEntity.id, + "effect": effect.name, + "err": error->S.Error.message, + }) + } }) + + timerRef->Prometheus.StorageLoad.endOperation( + ~operation=key, + ~whereSize=idsToLoad->Array.length, + ~size=dbEntities->Array.length, + ) } | _ => () } diff --git a/codegenerator/cli/templates/static/codegen/src/UserContext.res b/codegenerator/cli/templates/static/codegen/src/UserContext.res index 11cd02f4c..0dcff349c 100644 --- a/codegenerator/cli/templates/static/codegen/src/UserContext.res +++ b/codegenerator/cli/templates/static/codegen/src/UserContext.res @@ -1,24 +1,8 @@ let codegenHelpMessage = `Rerun 'pnpm dev' to update generated code after schema.graphql changes.` -let makeEventIdentifier = (item: Internal.item): Types.eventIdentifier => { - switch item { - | Internal.Event({chain, blockNumber, logIndex, timestamp}) => { - chainId: chain->ChainMap.Chain.toChainId, - blockTimestamp: timestamp, - blockNumber, - logIndex, - } - | Internal.Block({onBlockConfig: {chainId}, blockNumber, logIndex}) => { - chainId, - blockTimestamp: 0, - blockNumber, - logIndex, - } - } -} - type contextParams = { item: Internal.item, + checkpointId: int, inMemoryStore: InMemoryStore.t, loadManager: LoadManager.t, persistence: Persistence.t, @@ -134,10 +118,11 @@ let entityTraps: Utils.Proxy.traps = { params.inMemoryStore ->InMemoryStore.getInMemTable(~entityConfig=params.entityConfig) ->InMemoryTable.Entity.set( - Set(entity)->Types.mkEntityUpdate( - ~eventIdentifier=params.item->makeEventIdentifier, - ~entityId=entity.id, - ), + { + entityId: entity.id, + checkpointId: params.checkpointId, + entityUpdateAction: Set(entity), + }, ~shouldSaveHistory=params.shouldSaveHistory, ) } @@ -210,10 +195,11 @@ let entityTraps: Utils.Proxy.traps = { params.inMemoryStore ->InMemoryStore.getInMemTable(~entityConfig=params.entityConfig) ->InMemoryTable.Entity.set( - Delete->Types.mkEntityUpdate( - ~eventIdentifier=params.item->makeEventIdentifier, - ~entityId, - ), + { + entityId, + checkpointId: params.checkpointId, + entityUpdateAction: Delete, + }, ~shouldSaveHistory=params.shouldSaveHistory, ) } @@ -248,6 +234,7 @@ let handlerTraps: Utils.Proxy.traps = { loadManager: params.loadManager, persistence: params.persistence, shouldSaveHistory: params.shouldSaveHistory, + checkpointId: params.checkpointId, chains: params.chains, entityConfig, } diff --git a/codegenerator/cli/templates/static/codegen/src/db/DbFunctions.res b/codegenerator/cli/templates/static/codegen/src/db/DbFunctions.res index 9773f94fa..f4d6ab967 100644 --- a/codegenerator/cli/templates/static/codegen/src/db/DbFunctions.res +++ b/codegenerator/cli/templates/static/codegen/src/db/DbFunctions.res @@ -1,6 +1,3 @@ -type chainId = int -type eventId = string - module General = { type existsRes = {exists: bool} @@ -13,323 +10,7 @@ module General = { } } -module EndOfBlockRangeScannedData = { - type endOfBlockRangeScannedData = { - @as("chain_id") chainId: int, - @as("block_number") blockNumber: int, - @as("block_hash") blockHash: string, - } - - @module("./DbFunctionsImplementation.js") - external batchSet: (Postgres.sql, array) => promise = - "batchSetEndOfBlockRangeScannedData" - - let setEndOfBlockRangeScannedData = (sql, endOfBlockRangeScannedData) => - batchSet(sql, [endOfBlockRangeScannedData]) - - @module("./DbFunctionsImplementation.js") - external readEndOfBlockRangeScannedDataForChain: ( - Postgres.sql, - ~chainId: int, - ) => promise> = "readEndOfBlockRangeScannedDataForChain" - - @module("./DbFunctionsImplementation.js") - external deleteStaleEndOfBlockRangeScannedDataForChain: ( - Postgres.sql, - ~chainId: int, - //minimum blockNumber that should be kept in db - ~blockNumberThreshold: int, - ) => promise = "deleteStaleEndOfBlockRangeScannedDataForChain" - - @module("./DbFunctionsImplementation.js") - external rollbackEndOfBlockRangeScannedDataForChain: ( - Postgres.sql, - ~chainId: int, - //The known block number we are rollbacking to - ~knownBlockNumber: int, - ) => promise = "rollbackEndOfBlockRangeScannedDataForChain" -} - -module DynamicContractRegistry = { - @module("./DbFunctionsImplementation.js") - external readAllDynamicContractsRaw: (Postgres.sql, ~chainId: chainId) => promise = - "readAllDynamicContracts" - - let readAllDynamicContracts = async (sql: Postgres.sql, ~chainId: chainId) => { - let json = await sql->readAllDynamicContractsRaw(~chainId) - json->S.parseJsonOrThrow(InternalTable.DynamicContractRegistry.rowsSchema) - } -} - module EntityHistory = { - type dynamicSqlQuery - module UnorderedMultichain = { - @module("./DbFunctionsImplementation.js") - external getFirstChangeSerial: ( - Postgres.sql, - ~reorgChainId: int, - ~safeBlockNumber: int, - ~entityName: string, - ) => dynamicSqlQuery = "getFirstChangeSerial_UnorderedMultichain" - } - - module OrderedMultichain = { - @module("./DbFunctionsImplementation.js") - external getFirstChangeSerial: ( - Postgres.sql, - ~safeBlockTimestamp: int, - ~reorgChainId: int, - ~safeBlockNumber: int, - ~entityName: string, - ) => dynamicSqlQuery = "getFirstChangeSerial_OrderedMultichain" - } - - @module("./DbFunctionsImplementation.js") - external getFirstChangeEntityHistoryPerChain: ( - Postgres.sql, - ~entityName: string, - ~getFirstChangeSerial: Postgres.sql => dynamicSqlQuery, - ) => promise = "getFirstChangeEntityHistoryPerChain" - - @module("./DbFunctionsImplementation.js") - external getRollbackDiffInternal: ( - Postgres.sql, - ~entityName: string, - ~getFirstChangeSerial: Postgres.sql => dynamicSqlQuery, - ) => //Returns an array of entity history rows - promise = "getRollbackDiff" - - @module("./DbFunctionsImplementation.js") - external deleteRolledBackEntityHistory: ( - Postgres.sql, - ~entityName: string, - ~getFirstChangeSerial: Postgres.sql => dynamicSqlQuery, - ) => promise = "deleteRolledBackEntityHistory" - - let rollbacksGroup = "Rollbacks" - - module Args = { - type t = - | OrderedMultichain({safeBlockTimestamp: int, reorgChainId: int, safeBlockNumber: int}) - | UnorderedMultichain({reorgChainId: int, safeBlockNumber: int}) - - /** - Uses two different methods for determining the first change event after rollback block - - This is needed since unordered multichain mode only cares about any changes that - occurred after the first change on the reorg chain. To prevent skipping or double processing events - on the other chains. If for instance there are no entity changes based on the reorg chain, the other - chains do not need to be rolled back, and if the reorg chain has new included events, it does not matter - that if those events are processed out of order from other chains since this is "unordered_multichain_mode" - - Ordered multichain mode needs to ensure that all chains rollback to any event that occurred after the reorg chain - block number. Regardless of whether the reorg chain incurred any changes or not to entities. - */ - let makeGetFirstChangeSerial = (self: t, ~entityName) => - switch self { - | OrderedMultichain({safeBlockTimestamp, reorgChainId, safeBlockNumber}) => - sql => - OrderedMultichain.getFirstChangeSerial( - sql, - ~safeBlockTimestamp, - ~reorgChainId, - ~safeBlockNumber, - ~entityName, - ) - | UnorderedMultichain({reorgChainId, safeBlockNumber}) => - sql => - UnorderedMultichain.getFirstChangeSerial( - sql, - ~reorgChainId, - ~safeBlockNumber, - ~entityName, - ) - } - - let getLogger = (self: t, ~entityName) => { - switch self { - | OrderedMultichain({safeBlockTimestamp, reorgChainId, safeBlockNumber}) => - Logging.createChild( - ~params={ - "type": "OrderedMultichain", - "safeBlockTimestamp": safeBlockTimestamp, - "reorgChainId": reorgChainId, - "safeBlockNumber": safeBlockNumber, - "entityName": entityName, - }, - ) - | UnorderedMultichain({reorgChainId, safeBlockNumber}) => - Logging.createChild( - ~params={ - "type": "UnorderedMultichain", - "reorgChainId": reorgChainId, - "safeBlockNumber": safeBlockNumber, - "entityName": entityName, - }, - ) - } - } - } - - let deleteAllEntityHistoryAfterEventIdentifier = async ( - sql, - ~isUnorderedMultichainMode, - ~eventIdentifier: Types.eventIdentifier, - ~allEntities=Entities.allEntities, - ): unit => { - let startTime = Hrtime.makeTimer() - - let {chainId, blockNumber, blockTimestamp} = eventIdentifier - let args: Args.t = isUnorderedMultichainMode - ? UnorderedMultichain({reorgChainId: chainId, safeBlockNumber: blockNumber}) - : OrderedMultichain({ - reorgChainId: chainId, - safeBlockNumber: blockNumber, - safeBlockTimestamp: blockTimestamp, - }) - - let _ = - await allEntities - ->Belt.Array.map(async entityConfig => { - try await deleteRolledBackEntityHistory( - sql, - ~entityName=entityConfig.name, - ~getFirstChangeSerial=args->Args.makeGetFirstChangeSerial(~entityName=entityConfig.name), - ) catch { - | exn => - exn->ErrorHandling.mkLogAndRaise( - ~msg=`Failed to delete rolled back entity history`, - ~logger=args->Args.getLogger(~entityName=entityConfig.name), - ) - } - }) - ->Promise.all - - if Env.Benchmark.shouldSaveData { - let elapsedTimeMillis = Hrtime.timeSince(startTime)->Hrtime.toMillis->Hrtime.floatFromMillis - - Benchmark.addSummaryData( - ~group=rollbacksGroup, - ~label=`Delete Rolled Back History Time (ms)`, - ~value=elapsedTimeMillis, - ) - } - } - - let getRollbackDiff = async (sql, args: Args.t, ~entityConfig: Internal.entityConfig) => { - let startTime = Hrtime.makeTimer() - - let diffRes = switch await getRollbackDiffInternal( - sql, - ~getFirstChangeSerial=args->Args.makeGetFirstChangeSerial(~entityName=entityConfig.name), - ~entityName=entityConfig.name, - ) { - | exception exn => - exn->ErrorHandling.mkLogAndRaise( - ~msg="Failed to get rollback diff from entity history", - ~logger=args->Args.getLogger(~entityName=entityConfig.name), - ) - | res => res - } - - if Env.Benchmark.shouldSaveData { - let elapsedTimeMillis = Hrtime.timeSince(startTime)->Hrtime.toMillis->Hrtime.floatFromMillis - - Benchmark.addSummaryData( - ~group=rollbacksGroup, - ~label=`Diff Creation Time (ms)`, - ~value=elapsedTimeMillis, - ) - } - - switch diffRes->S.parseOrThrow(entityConfig.entityHistory.schemaRows) { - | exception exn => - exn->ErrorHandling.mkLogAndRaise( - ~msg="Failed to parse rollback diff from entity history", - ~logger=args->Args.getLogger(~entityName=entityConfig.name), - ) - | diffRows => diffRows - } - } - - module FirstChangeEventPerChain = { - type t = Js.Dict.t - let getKey = chainId => chainId->Belt.Int.toString - let make = () => Js.Dict.empty() - let get = (self: t, ~chainId) => self->Utils.Dict.dangerouslyGetNonOption(getKey(chainId)) - - let setIfEarlier = (self: t, ~chainId, ~event: FetchState.blockNumberAndLogIndex) => { - let chainKey = chainId->Belt.Int.toString - switch self->Utils.Dict.dangerouslyGetNonOption(chainKey) { - | Some(existingEvent) => - if ( - (event.blockNumber, event.logIndex) < (existingEvent.blockNumber, existingEvent.logIndex) - ) { - self->Js.Dict.set(chainKey, event) - } - | None => self->Js.Dict.set(chainKey, event) - } - } - } - - let getFirstChangeEventPerChain = async ( - sql, - args: Args.t, - ~allEntities=Entities.allEntities, - ) => { - let startTime = Hrtime.makeTimer() - let firstChangeEventPerChain = FirstChangeEventPerChain.make() - - let _ = - await allEntities - ->Belt.Array.map(async entityConfig => { - let res = try await getFirstChangeEntityHistoryPerChain( - sql, - ~entityName=entityConfig.name, - ~getFirstChangeSerial=args->Args.makeGetFirstChangeSerial(~entityName=entityConfig.name), - ) catch { - | exn => - exn->ErrorHandling.mkLogAndRaise( - ~msg=`Failed to get first change entity history per chain for entity`, - ~logger=args->Args.getLogger(~entityName=entityConfig.name), - ) - } - - let chainHistoryRows = try res->S.parseOrThrow( - entityConfig.entityHistory.schemaRows, - ) catch { - | exn => - exn->ErrorHandling.mkLogAndRaise( - ~msg=`Failed to parse entity history rows from db on getFirstChangeEntityHistoryPerChain`, - ~logger=args->Args.getLogger(~entityName=entityConfig.name), - ) - } - - chainHistoryRows->Belt.Array.forEach(chainHistoryRow => { - firstChangeEventPerChain->FirstChangeEventPerChain.setIfEarlier( - ~chainId=chainHistoryRow.current.chain_id, - ~event={ - blockNumber: chainHistoryRow.current.block_number, - logIndex: chainHistoryRow.current.log_index, - }, - ) - }) - }) - ->Promise.all - - if Env.Benchmark.shouldSaveData { - let elapsedTimeMillis = Hrtime.timeSince(startTime)->Hrtime.toMillis->Hrtime.floatFromMillis - - Benchmark.addSummaryData( - ~group=rollbacksGroup, - ~label=`Get First Change Event Per Chain Time (ms)`, - ~value=elapsedTimeMillis, - ) - } - - firstChangeEventPerChain - } - let hasRows = async sql => { let all = await Entities.allEntities diff --git a/codegenerator/cli/templates/static/codegen/src/db/DbFunctionsImplementation.js b/codegenerator/cli/templates/static/codegen/src/db/DbFunctionsImplementation.js index 42878e2b8..d36a66914 100644 --- a/codegenerator/cli/templates/static/codegen/src/db/DbFunctionsImplementation.js +++ b/codegenerator/cli/templates/static/codegen/src/db/DbFunctionsImplementation.js @@ -1,21 +1,6 @@ const TableModule = require("envio/src/db/Table.res.js"); -const Utils = require("envio/src/Utils.res.js"); const { publicSchema } = require("./Db.res.js"); -// db operations for raw_events: -const MAX_ITEMS_PER_QUERY = 500; - -const chunkBatchQuery = (queryToExecute) => async (sql, entityDataArray) => { - const responses = []; - // Split entityDataArray into chunks of MAX_ITEMS_PER_QUERY - for (let i = 0; i < entityDataArray.length; i += MAX_ITEMS_PER_QUERY) { - const chunk = entityDataArray.slice(i, i + MAX_ITEMS_PER_QUERY); - const pendingRes = queryToExecute(sql, chunk); - responses.push(pendingRes); - } - return Promise.all(responses); -}; - module.exports.batchDeleteItemsInTable = (table, sql, pkArray) => { const primaryKeyFieldNames = TableModule.getPrimaryKeyFieldNames(table); @@ -30,221 +15,3 @@ module.exports.batchDeleteItemsInTable = (table, sql, pkArray) => { //May be best to make pkArray an array of objects with fieldName -> value } }; - -const batchSetEndOfBlockRangeScannedDataCore = (sql, rowDataArray) => { - return sql` - INSERT INTO ${sql(publicSchema)}."end_of_block_range_scanned_data" - ${sql(rowDataArray, "chain_id", "block_number", "block_hash")} - ON CONFLICT(chain_id, block_number) DO UPDATE - SET - "chain_id" = EXCLUDED."chain_id", - "block_number" = EXCLUDED."block_number", - "block_hash" = EXCLUDED."block_hash";`; -}; - -module.exports.batchSetEndOfBlockRangeScannedData = chunkBatchQuery( - batchSetEndOfBlockRangeScannedDataCore -); - -module.exports.readEndOfBlockRangeScannedDataForChain = (sql, chainId) => { - return sql` - SELECT * FROM ${sql(publicSchema)}."end_of_block_range_scanned_data" - WHERE - chain_id = ${chainId} - ORDER BY block_number ASC;`; -}; - -module.exports.deleteStaleEndOfBlockRangeScannedDataForChain = ( - sql, - chainId, - blockNumberThreshold -) => { - return sql` - DELETE - FROM ${sql(publicSchema)}."end_of_block_range_scanned_data" - WHERE chain_id = ${chainId} - AND block_number < ${blockNumberThreshold};`; -}; - -module.exports.rollbackEndOfBlockRangeScannedDataForChain = ( - sql, - chainId, - knownBlockNumber -) => { - return sql` - DELETE - FROM ${sql(publicSchema)}."end_of_block_range_scanned_data" - WHERE chain_id = ${chainId} - AND block_number > ${knownBlockNumber};`; -}; - -module.exports.readAllDynamicContracts = (sql, chainId) => sql` - SELECT * - FROM ${sql(publicSchema)}."dynamic_contract_registry" - WHERE chain_id = ${chainId};`; - -const makeHistoryTableName = (entityName) => entityName + "_history"; - -/** - Find the "first change" serial originating from the reorg chain above the safe block number - (Using serial to account for unordered multi chain reorgs, where an earier event on another chain could be rolled back) - - If for instance there are no entity changes based on the reorg chain, the other - chains do not need to be rolled back, and if the reorg chain has new included events, it does not matter - that if those events are processed out of order from other chains since this is "unordered_multichain_mode" -*/ -module.exports.getFirstChangeSerial_UnorderedMultichain = ( - sql, - reorgChainId, - safeBlockNumber, - entityName -) => - sql` - SELECT - MIN(serial) AS first_change_serial - FROM - ${sql(publicSchema)}.${sql(makeHistoryTableName(entityName))} - WHERE - entity_history_chain_id = ${reorgChainId} - AND entity_history_block_number > ${safeBlockNumber} - `; - -/** - Find the "first change" serial originating from any chain above the provided safe block - - Ordered multichain mode needs to ensure that all chains rollback to any event that occurred after the reorg chain - block number. Regardless of whether the reorg chain incurred any changes or not to entities. There could be no changes - on the orphaned blocks, but new changes on the reorged blocks where other chains need to be processed in order after this - fact. -*/ -module.exports.getFirstChangeSerial_OrderedMultichain = ( - sql, - safeBlockTimestamp, - reorgChainId, - safeBlockNumber, - entityName -) => - sql` - SELECT - MIN(serial) AS first_change_serial - FROM - ${sql(publicSchema)}.${sql(makeHistoryTableName(entityName))} - WHERE - entity_history_block_timestamp > ${safeBlockTimestamp} - OR - (entity_history_block_timestamp = ${safeBlockTimestamp} AND entity_history_chain_id > ${reorgChainId}) - OR - (entity_history_block_timestamp = ${safeBlockTimestamp} AND entity_history_chain_id = ${reorgChainId} AND entity_history_block_number > ${safeBlockNumber}) - `; - -module.exports.getFirstChangeEntityHistoryPerChain = ( - sql, - entityName, - getFirstChangeSerial -) => sql` - WITH - first_change AS ( - -- Step 1: Find the "first change" serial originating from the reorg chain above the safe block number - -- (Using serial to account for unordered multi chain reorgs, where an earier event on another chain could be rolled back) - ${getFirstChangeSerial(sql)} - ) - -- Step 2: Distinct on entity_history_chain_id, get the entity_history_block_number of the row with the - -- lowest serial >= the first change serial - SELECT DISTINCT - ON (entity_history_chain_id) * - FROM - ${sql(publicSchema)}.${sql(makeHistoryTableName(entityName))} - WHERE - serial >= ( - SELECT - first_change_serial - FROM - first_change - ) - ORDER BY - entity_history_chain_id, - serial - ASC; -- Select the row with the lowest serial per id -`; - -module.exports.deleteRolledBackEntityHistory = ( - sql, - entityName, - getFirstChangeSerial -) => sql` - WITH - first_change AS ( - -- Step 1: Find the "first change" serial originating from the reorg chain above the safe block number - -- (Using serial to account for unordered multi chain reorgs, where an earier event on another chain could be rolled back) - ${getFirstChangeSerial(sql)} - ) - -- Step 2: Delete all rows that have a serial >= the first change serial - DELETE FROM - ${sql(publicSchema)}.${sql(makeHistoryTableName(entityName))} - WHERE - serial >= ( - SELECT - first_change_serial - FROM - first_change - ) - -- Filter out rows with a chain_id of 0 since they are the copied history rows - -- check timestamp as well in case a future chain is added with id of 0 - AND NOT ( - entity_history_chain_id = 0 AND - entity_history_block_timestamp = 0 - ); - `; - -module.exports.getRollbackDiff = (sql, entityName, getFirstChangeSerial) => sql` - WITH - first_change AS ( - -- Step 1: Find the "first change" serial originating from the reorg chain above the safe block number - -- (Using serial to account for unordered multi chain reorgs, where an earier event on another chain could be rolled back) - ${getFirstChangeSerial(sql)} - ), - rollback_ids AS ( - -- Step 2: Get all unique entity ids of rows that require rollbacks where the row's serial is above the first change serial - SELECT DISTINCT - ON (id) after.* - FROM - ${sql(publicSchema)}.${sql(makeHistoryTableName(entityName))} after - WHERE - after.serial >= ( - SELECT - first_change_serial - FROM - first_change - ) - -- Filter out rows with a chain_id of 0 since they are the copied history rows - -- check timestamp as well in case a future chain is added with id of 0 - AND NOT ( - after.entity_history_chain_id = 0 AND - after.entity_history_block_timestamp = 0 - ) - ORDER BY - after.id, - after.serial ASC -- Select the row with the lowest serial per id - ) - -- Step 3: For each relevant id, join to the row on the "previous_entity_history" fields - SELECT - -- Select all before fields, overriding the needed values with defaults - before.*, - -- In the case where no previous row exists, coalesce the needed values since this new entity - -- will need to be deleted - COALESCE(before.id, after.id) AS id, - COALESCE(before.action, 'DELETE') AS action, - -- Deleting at 0 values will work fine for future rollbacks - COALESCE(before.entity_history_block_number, 0) AS entity_history_block_number, - COALESCE(before.entity_history_block_timestamp, 0) AS entity_history_block_timestamp, - COALESCE(before.entity_history_chain_id, 0) AS entity_history_chain_id, - COALESCE(before.entity_history_log_index, 0) AS entity_history_log_index - FROM - -- Use a RIGHT JOIN, to ensure that nulls get returned if there is no "before" row - ${sql(publicSchema)}.${sql(makeHistoryTableName(entityName))} before - RIGHT JOIN rollback_ids after ON before.id = after.id - AND before.entity_history_block_timestamp = after.previous_entity_history_block_timestamp - AND before.entity_history_chain_id = after.previous_entity_history_chain_id - AND before.entity_history_block_number = after.previous_entity_history_block_number - AND before.entity_history_log_index = after.previous_entity_history_log_index; -`; diff --git a/codegenerator/cli/templates/static/codegen/src/eventFetching/ChainFetcher.res b/codegenerator/cli/templates/static/codegen/src/eventFetching/ChainFetcher.res index 58ded8d68..3cf590642 100644 --- a/codegenerator/cli/templates/static/codegen/src/eventFetching/ChainFetcher.res +++ b/codegenerator/cli/templates/static/codegen/src/eventFetching/ChainFetcher.res @@ -20,17 +20,14 @@ type t = { firstEventBlockNumber: option, numEventsProcessed: int, numBatchesFetched: int, - lastBlockScannedHashes: ReorgDetection.LastBlockScannedHashes.t, - //An optional list of filters to apply on event queries - //Used for reorgs and restarts - processingFilters: option>, + reorgDetection: ReorgDetection.t, + safeCheckpointTracking: option, } //CONSTRUCTION let make = ( ~chainConfig: InternalConfig.chain, - ~lastBlockScannedHashes, - ~dynamicContracts: array, + ~dynamicContracts: array, ~startBlock, ~endBlock, ~firstEventBlockNumber, @@ -42,6 +39,8 @@ let make = ( ~numEventsProcessed, ~numBatchesFetched, ~isInReorgThreshold, + ~reorgCheckpoints: array, + ~maxReorgDepth, ): t => { // We don't need the router itself, but only validation logic, // since now event router is created for selection of events @@ -99,31 +98,18 @@ let make = ( contract.addresses->Array.forEach(address => { contracts->Array.push({ - FetchState.address, + Internal.address, contractName: contract.name, startBlock: switch contract.startBlock { | Some(startBlock) => startBlock | None => chainConfig.startBlock }, - register: Config, + registrationBlock: None, }) }) }) - dynamicContracts->Array.forEach(dc => - contracts->Array.push({ - FetchState.address: dc.contractAddress, - contractName: dc.contractName, - startBlock: dc.registeringEventBlockNumber, - register: DC({ - registeringEventLogIndex: dc.registeringEventLogIndex, - registeringEventBlockTimestamp: dc.registeringEventBlockTimestamp, - registeringEventContractName: dc.registeringEventContractName, - registeringEventName: dc.registeringEventName, - registeringEventSrcAddress: dc.registeringEventSrcAddress, - }), - }) - ) + dynamicContracts->Array.forEach(dc => contracts->Array.push(dc)) if notRegisteredEvents->Utils.Array.notEmpty { logger->Logging.childInfo( @@ -180,14 +166,20 @@ let make = ( ~targetBufferSize, ~chainId=chainConfig.id, ~blockLag=Pervasives.max( - !(config->Config.shouldRollbackOnReorg) || isInReorgThreshold - ? 0 - : chainConfig.confirmedBlockThreshold, + !(config->Config.shouldRollbackOnReorg) || isInReorgThreshold ? 0 : chainConfig.maxReorgDepth, Env.indexingBlockLag->Option.getWithDefault(0), ), ~onBlockConfigs?, ) + let chainReorgCheckpoints = reorgCheckpoints->Array.keepMapU(reorgCheckpoint => { + if reorgCheckpoint.chainId === chainConfig.id { + Some(reorgCheckpoint) + } else { + None + } + }) + { logger, chainConfig, @@ -195,7 +187,16 @@ let make = ( ~sources=chainConfig.sources, ~maxPartitionConcurrency=Env.maxPartitionConcurrency, ), - lastBlockScannedHashes, + reorgDetection: ReorgDetection.make( + ~chainReorgCheckpoints, + ~maxReorgDepth, + ~shouldRollbackOnReorg=config->Config.shouldRollbackOnReorg, + ), + safeCheckpointTracking: SafeCheckpointTracking.make( + ~maxReorgDepth, + ~shouldRollbackOnReorg=config->Config.shouldRollbackOnReorg, + ~chainReorgCheckpoints, + ), currentBlockHeight: 0, isProgressAtHead: false, fetchState, @@ -204,24 +205,21 @@ let make = ( timestampCaughtUpToHeadOrEndblock, numEventsProcessed, numBatchesFetched, - processingFilters: None, } } let makeFromConfig = (chainConfig: InternalConfig.chain, ~config, ~targetBufferSize) => { let logger = Logging.createChild(~params={"chainId": chainConfig.id}) - let lastBlockScannedHashes = ReorgDetection.LastBlockScannedHashes.empty( - ~confirmedBlockThreshold=chainConfig.confirmedBlockThreshold, - ) make( ~chainConfig, ~config, ~startBlock=chainConfig.startBlock, ~endBlock=chainConfig.endBlock, - ~lastBlockScannedHashes, + ~reorgCheckpoints=[], + ~maxReorgDepth=chainConfig.maxReorgDepth, ~firstEventBlockNumber=None, - ~progressBlockNumber=chainConfig.startBlock - 1, + ~progressBlockNumber=-1, ~timestampCaughtUpToHeadOrEndblock=None, ~numEventsProcessed=0, ~numBatchesFetched=0, @@ -237,35 +235,15 @@ let makeFromConfig = (chainConfig: InternalConfig.chain, ~config, ~targetBufferS */ let makeFromDbState = async ( chainConfig: InternalConfig.chain, - ~resumedChainState: InternalTable.Chains.t, + ~resumedChainState: Persistence.initialChainState, + ~reorgCheckpoints, ~isInReorgThreshold, ~config, ~targetBufferSize, - ~sql=Db.sql, ) => { let chainId = chainConfig.id let logger = Logging.createChild(~params={"chainId": chainId}) - // Since we deleted all contracts after the restart point, - // we can simply query all dcs we have in db - let dbRecoveredDynamicContracts = - await sql->DbFunctions.DynamicContractRegistry.readAllDynamicContracts(~chainId) - - let endOfBlockRangeScannedData = - await sql->DbFunctions.EndOfBlockRangeScannedData.readEndOfBlockRangeScannedDataForChain( - ~chainId, - ) - - let lastBlockScannedHashes = - endOfBlockRangeScannedData - ->Array.map(({blockNumber, blockHash}) => { - ReorgDetection.blockNumber, - blockHash, - }) - ->ReorgDetection.LastBlockScannedHashes.makeWithData( - ~confirmedBlockThreshold=chainConfig.confirmedBlockThreshold, - ) - Prometheus.ProgressEventsCount.set(~processedCount=resumedChainState.numEventsProcessed, ~chainId) let progressBlockNumber = @@ -275,17 +253,18 @@ let makeFromDbState = async ( : resumedChainState.startBlock - 1 make( - ~dynamicContracts=dbRecoveredDynamicContracts, + ~dynamicContracts=resumedChainState.dynamicContracts, ~chainConfig, ~startBlock=resumedChainState.startBlock, - ~endBlock=resumedChainState.endBlock->Js.Null.toOption, + ~endBlock=resumedChainState.endBlock, ~config, - ~lastBlockScannedHashes, - ~firstEventBlockNumber=resumedChainState.firstEventBlockNumber->Js.Null.toOption, + ~reorgCheckpoints, + ~maxReorgDepth=resumedChainState.maxReorgDepth, + ~firstEventBlockNumber=resumedChainState.firstEventBlockNumber, ~progressBlockNumber, ~timestampCaughtUpToHeadOrEndblock=Env.updateSyncTimeOnRestart ? None - : resumedChainState.timestampCaughtUpToHeadOrEndblock->Js.Null.toOption, + : resumedChainState.timestampCaughtUpToHeadOrEndblock, ~numEventsProcessed=resumedChainState.numEventsProcessed, ~numBatchesFetched=0, ~logger, @@ -294,34 +273,6 @@ let makeFromDbState = async ( ) } -/** -Adds an event filter that will be passed to worker on query -isValid is a function that determines when the filter -should be cleaned up -*/ -let addProcessingFilter = (self: t, ~filter, ~isValid) => { - let processingFilter: processingFilter = {filter, isValid} - { - ...self, - processingFilters: switch self.processingFilters { - | Some(processingFilters) => Some(processingFilters->Array.concat([processingFilter])) - | None => Some([processingFilter]) - }, - } -} - -//Run the clean up condition "isNoLongerValid" against fetchState on each eventFilter and remove -//any that meet the cleanup condition -let cleanUpProcessingFilters = ( - processingFilters: array, - ~fetchState: FetchState.t, -) => { - switch processingFilters->Array.keep(processingFilter => processingFilter.isValid(~fetchState)) { - | [] => None - | filters => Some(filters) - } -} - /** * Helper function to get the configured start block for a contract from config */ @@ -341,7 +292,7 @@ let runContractRegistersOrThrow = async ( ~chain: ChainMap.Chain.t, ~config: Config.t, ) => { - let dynamicContracts = [] + let itemsWithDcs = [] let isDone = ref(false) let onRegister = (~item: Internal.item, ~contractAddress, ~contractName) => { @@ -352,7 +303,7 @@ let runContractRegistersOrThrow = async ( `Skipping contract registration: The context.add${(contractName: Enums.ContractType.t :> string)} was called after the contract register resolved. Use await or return a promise from the contract register handler to avoid this error.`, ) } else { - let {timestamp, blockNumber, logIndex, eventConfig, event} = eventItem + let {blockNumber} = eventItem // Use contract-specific start block if configured, otherwise fall back to registration block let contractStartBlock = switch getContractStartBlock( @@ -364,20 +315,20 @@ let runContractRegistersOrThrow = async ( | None => blockNumber } - let dc: FetchState.indexingContract = { + let dc: Internal.indexingContract = { address: contractAddress, contractName: (contractName: Enums.ContractType.t :> string), startBlock: contractStartBlock, - register: DC({ - registeringEventBlockTimestamp: timestamp, - registeringEventLogIndex: logIndex, - registeringEventName: eventConfig.name, - registeringEventContractName: eventConfig.contractName, - registeringEventSrcAddress: event.srcAddress, - }), + registrationBlock: Some(blockNumber), } - dynamicContracts->Array.push(dc) + switch item->Internal.getItemDcs { + | None => { + item->Internal.setItemDcs([dc]) + itemsWithDcs->Array.push(item) + } + | Some(dcs) => dcs->Array.push(dc) + } } } @@ -420,42 +371,26 @@ let runContractRegistersOrThrow = async ( } isDone.contents = true - dynamicContracts -} - -@inline -let applyProcessingFilters = (~item: Internal.item, ~processingFilters) => { - processingFilters->Js.Array2.every(processingFilter => processingFilter.filter(item)) + itemsWithDcs } -/** -Updates of fetchState and cleans up event filters. Should be used whenever updating fetchState -to ensure processingFilters are always valid. -Returns Error if the node with given id cannot be found (unexpected) -*/ let handleQueryResult = ( chainFetcher: t, ~query: FetchState.query, ~newItems, - ~dynamicContracts, + ~newItemsWithDcs, ~latestFetchedBlock, ) => { - let fs = switch dynamicContracts { + let fs = switch newItemsWithDcs { | [] => chainFetcher.fetchState - | _ => chainFetcher.fetchState->FetchState.registerDynamicContracts(dynamicContracts) + | _ => chainFetcher.fetchState->FetchState.registerDynamicContracts(newItemsWithDcs) } fs ->FetchState.handleQueryResult(~query, ~latestFetchedBlock, ~newItems) - ->Result.map(fetchState => { - { - ...chainFetcher, - fetchState, - processingFilters: switch chainFetcher.processingFilters { - | Some(processingFilters) => processingFilters->cleanUpProcessingFilters(~fetchState) - | None => None - }, - } + ->Result.map(fs => { + ...chainFetcher, + fetchState: fs, }) } @@ -475,7 +410,7 @@ let hasNoMoreEventsToProcess = (self: t) => { } let getHighestBlockBelowThreshold = (cf: t): int => { - let highestBlockBelowThreshold = cf.currentBlockHeight - cf.chainConfig.confirmedBlockThreshold + let highestBlockBelowThreshold = cf.currentBlockHeight - cf.chainConfig.maxReorgDepth highestBlockBelowThreshold < 0 ? 0 : highestBlockBelowThreshold } @@ -489,7 +424,7 @@ let getLastKnownValidBlock = async ( ~getBlockHashes=(chainFetcher.sourceManager->SourceManager.getActiveSource).getBlockHashes, ) => { let scannedBlockNumbers = - chainFetcher.lastBlockScannedHashes->ReorgDetection.LastBlockScannedHashes.getThresholdBlockNumbers( + chainFetcher.reorgDetection->ReorgDetection.getThresholdBlockNumbers( ~currentBlockHeight=chainFetcher.currentBlockHeight, ) @@ -524,7 +459,7 @@ let getLastKnownValidBlock = async ( while blockRef.contents->Option.isNone { let blockNumbersAndHashes = await getBlockHashes(scannedBlockNumbers) - switch chainFetcher.lastBlockScannedHashes->ReorgDetection.LastBlockScannedHashes.getLatestValidScannedBlock( + switch chainFetcher.reorgDetection->ReorgDetection.getLatestValidScannedBlock( ~blockNumbersAndHashes, ~currentBlockHeight=chainFetcher.currentBlockHeight, ~skipReorgDuplicationCheck=retryCount.contents > 2, diff --git a/codegenerator/cli/templates/static/codegen/src/eventFetching/ChainManager.res b/codegenerator/cli/templates/static/codegen/src/eventFetching/ChainManager.res index eedbc7042..184e246c4 100644 --- a/codegenerator/cli/templates/static/codegen/src/eventFetching/ChainManager.res +++ b/codegenerator/cli/templates/static/codegen/src/eventFetching/ChainManager.res @@ -1,6 +1,7 @@ open Belt type t = { + commitedCheckpointId: int, chainFetchers: ChainMap.t, multichain: InternalConfig.multichain, isInReorgThreshold: bool, @@ -23,6 +24,7 @@ let makeFromConfig = (~config: Config.t): t => { let chainFetchers = config.chainMap->ChainMap.map(ChainFetcher.makeFromConfig(_, ~config, ~targetBufferSize)) { + commitedCheckpointId: 0, chainFetchers, multichain: config.multichain, isInReorgThreshold: false, @@ -53,10 +55,13 @@ let makeFromDbState = async (~initialState: Persistence.initialState, ~config: C Prometheus.ProcessingMaxBatchSize.set(~maxBatchSize=config.batchSize) Prometheus.IndexingTargetBufferSize.set(~targetBufferSize) Prometheus.ReorgThreshold.set(~isInReorgThreshold) + initialState.cache->Utils.Dict.forEach(({effectName, count}) => { + Prometheus.EffectCacheCount.set(~count, ~effectName) + }) let chainFetchersArr = await initialState.chains - ->Array.map(async (resumedChainState: InternalTable.Chains.t) => { + ->Array.map(async (resumedChainState: Persistence.initialChainState) => { let chain = Config.getChain(config, ~chainId=resumedChainState.id) let chainConfig = config.chainMap->ChainMap.get(chain) @@ -64,6 +69,7 @@ let makeFromDbState = async (~initialState: Persistence.initialState, ~config: C chain, await chainConfig->ChainFetcher.makeFromDbState( ~resumedChainState, + ~reorgCheckpoints=initialState.reorgCheckpoints, ~isInReorgThreshold, ~targetBufferSize, ~config, @@ -75,6 +81,7 @@ let makeFromDbState = async (~initialState: Persistence.initialState, ~config: C let chainFetchers = ChainMap.fromArrayUnsafe(chainFetchersArr) { + commitedCheckpointId: initialState.checkpointId, multichain: config.multichain, chainFetchers, isInReorgThreshold, @@ -95,113 +102,28 @@ let setChainFetcher = (chainManager: t, chainFetcher: ChainFetcher.t) => { } } -let getFetchStates = (chainManager: t): ChainMap.t => { - chainManager.chainFetchers->ChainMap.map(cf => { - cf.fetchState - }) -} - let nextItemIsNone = (chainManager: t): bool => { - !Batch.hasMultichainReadyItem(chainManager->getFetchStates, ~multichain=chainManager.multichain) + !Batch.hasMultichainReadyItem( + chainManager.chainFetchers->ChainMap.map(cf => { + cf.fetchState + }), + ~multichain=chainManager.multichain, + ) } let createBatch = (chainManager: t, ~batchSizeTarget: int): Batch.t => { - let refTime = Hrtime.makeTimer() - let fetchStates = chainManager->getFetchStates - - let mutBatchSizePerChain = Js.Dict.empty() - let items = if ( - switch chainManager.multichain { - | Unordered => true - | Ordered => fetchStates->ChainMap.size === 1 - } - ) { - Batch.prepareUnorderedBatch(~batchSizeTarget, ~fetchStates, ~mutBatchSizePerChain) - } else { - Batch.prepareOrderedBatch(~batchSizeTarget, ~fetchStates, ~mutBatchSizePerChain) - } - let batchSizePerChain = mutBatchSizePerChain - - let dcsToStoreByChainId = Js.Dict.empty() - // Needed to: - // - Recalculate the computed queue sizes - // - Accumulate registered dynamic contracts to store in the db - // - Trigger onBlock pointer update - let updatedFetchStates = fetchStates->ChainMap.map(fetchState => { - switch batchSizePerChain->Utils.Dict.dangerouslyGetNonOption(fetchState.chainId->Int.toString) { - | Some(batchSize) => - let leftItems = fetchState.buffer->Js.Array2.sliceFrom(batchSize) - switch fetchState.dcsToStore { - | [] => fetchState->FetchState.updateInternal(~mutItems=leftItems) - | dcs => { - let leftDcsToStore = [] - let batchDcs = [] - let updatedFetchState = - fetchState->FetchState.updateInternal(~mutItems=leftItems, ~dcsToStore=leftDcsToStore) - let nextProgressBlockNumber = updatedFetchState->FetchState.getProgressBlockNumber - - dcs->Array.forEach(dc => { - // Important: This should be a registering block number. - // This works for now since dc.startBlock is a registering block number. - if dc.startBlock <= nextProgressBlockNumber { - batchDcs->Array.push(dc) - } else { - // Mutate the array we passed to the updateInternal beforehand - leftDcsToStore->Array.push(dc) - } - }) - - dcsToStoreByChainId->Js.Dict.set(fetchState.chainId->Int.toString, batchDcs) - updatedFetchState - } - } - // Skip not affected chains - | None => fetchState - } - }) - - let progressedChains = [] - chainManager.chainFetchers - ->ChainMap.entries - ->Array.forEach(((chain, chainFetcher)) => { - let updatedFetchState = updatedFetchStates->ChainMap.get(chain) - let nextProgressBlockNumber = updatedFetchState->FetchState.getProgressBlockNumber - let maybeItemsCountInBatch = - batchSizePerChain->Utils.Dict.dangerouslyGetNonOption( - chain->ChainMap.Chain.toChainId->Int.toString, - ) - if ( - chainFetcher.committedProgressBlockNumber < nextProgressBlockNumber || - // It should never be 0 - maybeItemsCountInBatch->Option.isSome - ) { - let chainBatchSize = maybeItemsCountInBatch->Option.getWithDefault(0) - progressedChains - ->Js.Array2.push( - ( - { - chainId: chain->ChainMap.Chain.toChainId, - batchSize: chainBatchSize, - progressBlockNumber: nextProgressBlockNumber, - totalEventsProcessed: chainFetcher.numEventsProcessed + chainBatchSize, - // Snapshot the value at the moment of batch creation - // so we don't have a case where we can't catch up the head because of the - // defference between processing and new blocks - isProgressAtHead: nextProgressBlockNumber >= chainFetcher.currentBlockHeight, - }: Batch.progressedChain - ), - ) - ->ignore - } - }) - - { - items, - progressedChains, - updatedFetchStates, - dcsToStoreByChainId, - creationTimeMs: refTime->Hrtime.timeSince->Hrtime.toMillis->Hrtime.intFromMillis, - } + Batch.make( + ~checkpointIdBeforeBatch=chainManager.commitedCheckpointId, + ~chainsBeforeBatch=chainManager.chainFetchers->ChainMap.map((cf): Batch.chainBeforeBatch => { + fetchState: cf.fetchState, + progressBlockNumber: cf.committedProgressBlockNumber, + totalEventsProcessed: cf.numEventsProcessed, + sourceBlockNumber: cf.currentBlockHeight, + reorgDetection: cf.reorgDetection, + }), + ~multichain=chainManager.multichain, + ~batchSizeTarget, + ) } let isProgressAtHead = chainManager => @@ -214,17 +136,31 @@ let isActivelyIndexing = chainManager => ->ChainMap.values ->Js.Array2.every(ChainFetcher.isActivelyIndexing) -let getSafeReorgBlocks = (chainManager: t): EntityHistory.safeReorgBlocks => { - let chainIds = [] - let blockNumbers = [] - chainManager.chainFetchers - ->ChainMap.values - ->Array.forEach(cf => { - chainIds->Js.Array2.push(cf.chainConfig.id)->ignore - blockNumbers->Js.Array2.push(cf->ChainFetcher.getHighestBlockBelowThreshold)->ignore - }) - { - chainIds, - blockNumbers, +let getSafeCheckpointId = (chainManager: t) => { + let chainFetchers = chainManager.chainFetchers->ChainMap.values + + let infinity = (%raw(`Infinity`): int) + let result = ref(infinity) + + for idx in 0 to chainFetchers->Array.length - 1 { + let chainFetcher = chainFetchers->Array.getUnsafe(idx) + switch chainFetcher.safeCheckpointTracking { + | None => () // Skip chains with maxReorgDepth = 0 + | Some(safeCheckpointTracking) => { + let safeCheckpointId = + safeCheckpointTracking->SafeCheckpointTracking.getSafeCheckpointId( + ~sourceBlockNumber=chainFetcher.currentBlockHeight, + ) + if safeCheckpointId < result.contents { + result := safeCheckpointId + } + } + } + } + + if result.contents === infinity || result.contents === 0 { + None // No safe checkpoint found + } else { + Some(result.contents) } } diff --git a/codegenerator/cli/templates/static/codegen/src/globalState/GlobalState.res b/codegenerator/cli/templates/static/codegen/src/globalState/GlobalState.res index 15c78087c..14300fcf5 100644 --- a/codegenerator/cli/templates/static/codegen/src/globalState/GlobalState.res +++ b/codegenerator/cli/templates/static/codegen/src/globalState/GlobalState.res @@ -1,15 +1,23 @@ open Belt type chain = ChainMap.Chain.t -type rollbackState = NoRollback | RollingBack(chain) | RollbackInMemStore(InMemoryStore.t) +type rollbackState = + | NoRollback + | ReorgDetected({chain: chain}) + | FindingReorgDepth + | FoundReorgDepth({ + chain: chain, + lastKnownValidBlockNumber: int, + lastKnownValidBlockTimestamp: int, + }) + | RollbackReady({diffInMemoryStore: InMemoryStore.t}) module WriteThrottlers = { type t = { chainMetaData: Throttler.t, - pruneStaleEndBlockData: ChainMap.t, pruneStaleEntityHistory: Throttler.t, } - let make = (~config: Config.t): t => { + let make = (): t => { let chainMetaData = { let intervalMillis = Env.ThrottleWrites.chainMetadataIntervalMillis let logger = Logging.createChild( @@ -21,18 +29,6 @@ module WriteThrottlers = { Throttler.make(~intervalMillis, ~logger) } - let pruneStaleEndBlockData = config.chainMap->ChainMap.map(cfg => { - let intervalMillis = Env.ThrottleWrites.pruneStaleDataIntervalMillis - let logger = Logging.createChild( - ~params={ - "context": "Throttler for pruning stale endblock data", - "intervalMillis": intervalMillis, - "chain": cfg.id, - }, - ) - Throttler.make(~intervalMillis, ~logger) - }) - let pruneStaleEntityHistory = { let intervalMillis = Env.ThrottleWrites.pruneStaleDataIntervalMillis let logger = Logging.createChild( @@ -43,7 +39,7 @@ module WriteThrottlers = { ) Throttler.make(~intervalMillis, ~logger) } - {chainMetaData, pruneStaleEndBlockData, pruneStaleEntityHistory} + {chainMetaData, pruneStaleEntityHistory} } } @@ -70,7 +66,7 @@ let make = (~config: Config.t, ~chainManager: ChainManager.t, ~shouldUseTui=fals chainManager, indexerStartTime: Js.Date.make(), rollbackState: NoRollback, - writeThrottlers: WriteThrottlers.make(~config), + writeThrottlers: WriteThrottlers.make(), loadManager: LoadManager.make(), shouldUseTui, id: 0, @@ -79,16 +75,20 @@ let make = (~config: Config.t, ~chainManager: ChainManager.t, ~shouldUseTui=fals let getId = self => self.id let incrementId = self => {...self, id: self.id + 1} -let setRollingBack = (self, chain) => {...self, rollbackState: RollingBack(chain)} let setChainManager = (self, chainManager) => { ...self, chainManager, } -let isRollingBack = state => +let isPreparingRollback = state => switch state.rollbackState { - | RollingBack(_) => true - | _ => false + | NoRollback + | // We already updated fetch states here + // so we treat it as not rolling back + RollbackReady(_) => false + | FindingReorgDepth + | ReorgDetected(_) + | FoundReorgDepth(_) => true } type partitionQueryResponse = { @@ -110,38 +110,35 @@ type action = // So after it's finished we dispatch the submit action to get the latest fetch state. | SubmitPartitionQueryResponse({ newItems: array, - dynamicContracts: array, + newItemsWithDcs: array, currentBlockHeight: int, latestFetchedBlock: FetchState.blockNumberAndTimestamp, query: FetchState.query, chain: chain, }) | FinishWaitingForNewBlock({chain: chain, currentBlockHeight: int}) - | EventBatchProcessed({ - progressedChains: array, - items: array, - }) + | EventBatchProcessed({batch: Batch.t}) | StartProcessingBatch + | StartFindingReorgDepth + | FindReorgDepth({ + chain: chain, + lastKnownValidBlockNumber: int, + lastKnownValidBlockTimestamp: int, + }) | EnterReorgThreshold | UpdateQueues({ - updatedFetchStates: ChainMap.t, + progressedChainsById: dict, // Needed to prevent overwriting the blockLag // set by EnterReorgThreshold shouldEnterReorgThreshold: bool, }) | SuccessExit | ErrorExit(ErrorHandling.t) - | SetRollbackState(InMemoryStore.t, ChainManager.t) - | ResetRollbackState + | SetRollbackState({diffInMemoryStore: InMemoryStore.t, rollbackedChainManager: ChainManager.t}) type queryChain = CheckAllChains | Chain(chain) type task = | NextQuery(queryChain) - | UpdateEndOfBlockRangeScannedData({ - chain: chain, - blockNumberThreshold: int, - nextEndOfBlockRangeScannedData: DbFunctions.EndOfBlockRangeScannedData.endOfBlockRangeScannedData, - }) | ProcessPartitionQueryResponse(partitionQueryResponse) | ProcessEventBatch | UpdateChainMetaDataAndCheckForExit(shouldExit) @@ -195,11 +192,9 @@ let updateChainMetadataTable = (cm: ChainManager.t, ~throttler: Throttler.t) => Takes in a chain manager and sets all chains timestamp caught up to head when valid state lines up and returns an updated chain manager */ -let updateProgressedChains = ( - chainManager: ChainManager.t, - ~progressedChains: array, - ~items: array, -) => { +let updateProgressedChains = (chainManager: ChainManager.t, ~batch: Batch.t) => { + Prometheus.ProgressBatchCount.increment() + let nextQueueItemIsNone = chainManager->ChainManager.nextItemIsNone let allChainsAtHead = chainManager->ChainManager.isProgressAtHead @@ -207,22 +202,22 @@ let updateProgressedChains = ( let chainFetchers = chainManager.chainFetchers->ChainMap.map(cf => { let chain = ChainMap.Chain.makeUnsafe(~chainId=cf.chainConfig.id) - let maybeProgressData = - progressedChains->Js.Array2.find(progressedChain => - progressedChain.chainId === chain->ChainMap.Chain.toChainId + let maybeChainAfterBatch = + batch.progressedChainsById->Utils.Dict.dangerouslyGetByIntNonOption( + chain->ChainMap.Chain.toChainId, ) - let cf = switch maybeProgressData { - | Some(progressData) => { - if cf.committedProgressBlockNumber !== progressData.progressBlockNumber { + let cf = switch maybeChainAfterBatch { + | Some(chainAfterBatch) => { + if cf.committedProgressBlockNumber !== chainAfterBatch.progressBlockNumber { Prometheus.ProgressBlockNumber.set( - ~blockNumber=progressData.progressBlockNumber, + ~blockNumber=chainAfterBatch.progressBlockNumber, ~chainId=chain->ChainMap.Chain.toChainId, ) } - if cf.numEventsProcessed !== progressData.totalEventsProcessed { + if cf.numEventsProcessed !== chainAfterBatch.totalEventsProcessed { Prometheus.ProgressEventsCount.set( - ~processedCount=progressData.totalEventsProcessed, + ~processedCount=chainAfterBatch.totalEventsProcessed, ~chainId=chain->ChainMap.Chain.toChainId, ) } @@ -232,21 +227,24 @@ let updateProgressedChains = ( // we need to calculate it once, by using the first item in a batch firstEventBlockNumber: switch cf.firstEventBlockNumber { | Some(_) => cf.firstEventBlockNumber - | None => - switch items->Js.Array2.find(item => - switch item { - | Internal.Event({chain: eventChain}) => eventChain === chain - | Internal.Block({onBlockConfig: {chainId}}) => - chainId === chain->ChainMap.Chain.toChainId - } - ) { - | Some(item) => Some(item->Internal.getItemBlockNumber) - | None => None - } + | None => batch->Batch.findFirstEventBlockNumber(~chainId=chain->ChainMap.Chain.toChainId) + }, + committedProgressBlockNumber: chainAfterBatch.progressBlockNumber, + numEventsProcessed: chainAfterBatch.totalEventsProcessed, + isProgressAtHead: cf.isProgressAtHead || chainAfterBatch.isProgressAtHeadWhenBatchCreated, + safeCheckpointTracking: switch cf.safeCheckpointTracking { + | Some(safeCheckpointTracking) => + Some( + safeCheckpointTracking->SafeCheckpointTracking.updateOnNewBatch( + ~sourceBlockNumber=cf.currentBlockHeight, + ~chainId=chain->ChainMap.Chain.toChainId, + ~batchCheckpointIds=batch.checkpointIds, + ~batchCheckpointBlockNumbers=batch.checkpointBlockNumbers, + ~batchCheckpointChainIds=batch.checkpointChainIds, + ), + ) + | None => None }, - isProgressAtHead: cf.isProgressAtHead || progressData.isProgressAtHead, - committedProgressBlockNumber: progressData.progressBlockNumber, - numEventsProcessed: progressData.totalEventsProcessed, } } | None => cf @@ -324,6 +322,10 @@ let updateProgressedChains = ( { ...chainManager, + commitedCheckpointId: switch batch.checkpointIds->Utils.Array.last { + | Some(checkpointId) => checkpointId + | None => chainManager.commitedCheckpointId + }, chainFetchers, } } @@ -341,7 +343,6 @@ let validatePartitionQueryResponse = ( reorgGuard, fromBlockQueried, } = response - let {rangeLastBlock} = reorgGuard if currentBlockHeight > chainFetcher.currentBlockHeight { Prometheus.SourceHeight.set( @@ -372,16 +373,12 @@ let validatePartitionQueryResponse = ( ) } - let (updatedLastBlockScannedHashes, reorgResult) = - chainFetcher.lastBlockScannedHashes->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( - ~reorgGuard, - ~currentBlockHeight, - ~shouldRollbackOnReorg=state.config->Config.shouldRollbackOnReorg, - ) + let (updatedReorgDetection, reorgResult: ReorgDetection.reorgResult) = + chainFetcher.reorgDetection->ReorgDetection.registerReorgGuard(~reorgGuard, ~currentBlockHeight) let updatedChainFetcher = { ...chainFetcher, - lastBlockScannedHashes: updatedLastBlockScannedHashes, + reorgDetection: updatedReorgDetection, } let nextState = { @@ -392,7 +389,7 @@ let validatePartitionQueryResponse = ( }, } - let isRollback = switch reorgResult { + let isRollbackOnReorg = switch reorgResult { | ReorgDetected(reorgDetected) => { chainFetcher.logger->Logging.childInfo( reorgDetected->ReorgDetection.reorgDetectedToLogParams( @@ -409,40 +406,23 @@ let validatePartitionQueryResponse = ( | NoReorg => false } - if isRollback { - (nextState->incrementId->setRollingBack(chain), [Rollback]) - } else { - let updateEndOfBlockRangeScannedDataArr = - //Only update endOfBlockRangeScannedData if rollbacks are enabled - state.config->Config.shouldRollbackOnReorg - ? [ - UpdateEndOfBlockRangeScannedData({ - chain, - blockNumberThreshold: rangeLastBlock.blockNumber - - updatedChainFetcher.chainConfig.confirmedBlockThreshold, - nextEndOfBlockRangeScannedData: { - chainId: chain->ChainMap.Chain.toChainId, - blockNumber: rangeLastBlock.blockNumber, - blockHash: rangeLastBlock.blockHash, - }, - }), - ] - : [] - + if isRollbackOnReorg { ( - nextState, - Array.concat( - updateEndOfBlockRangeScannedDataArr, - [ProcessPartitionQueryResponse(partitionQueryResponse)], - ), + { + ...nextState->incrementId, + rollbackState: ReorgDetected({chain: chain}), + }, + [Rollback], ) + } else { + (nextState, [ProcessPartitionQueryResponse(partitionQueryResponse)]) } } let submitPartitionQueryResponse = ( state, ~newItems, - ~dynamicContracts, + ~newItemsWithDcs, ~currentBlockHeight, ~latestFetchedBlock, ~query, @@ -452,7 +432,7 @@ let submitPartitionQueryResponse = ( let updatedChainFetcher = chainFetcher - ->ChainFetcher.handleQueryResult(~query, ~latestFetchedBlock, ~newItems, ~dynamicContracts) + ->ChainFetcher.handleQueryResult(~query, ~latestFetchedBlock, ~newItems, ~newItemsWithDcs) ->Utils.unwrapResultExn ->updateChainFetcherCurrentBlockHeight(~currentBlockHeight) @@ -461,10 +441,7 @@ let submitPartitionQueryResponse = ( numBatchesFetched: updatedChainFetcher.numBatchesFetched + 1, } - let wasFetchingAtHead = chainFetcher.isProgressAtHead - let isCurrentlyFetchingAtHead = updatedChainFetcher.isProgressAtHead - - if !wasFetchingAtHead && isCurrentlyFetchingAtHead { + if !chainFetcher.isProgressAtHead && updatedChainFetcher.isProgressAtHead { updatedChainFetcher.logger->Logging.childInfo("All events have been fetched") } @@ -487,7 +464,6 @@ let processPartitionQueryResponse = async ( {chain, response, query}: partitionQueryResponse, ~dispatchAction, ) => { - let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(chain) let { parsedQueueItems, latestFetchedBlockNumber, @@ -501,26 +477,17 @@ let processPartitionQueryResponse = async ( for idx in 0 to parsedQueueItems->Array.length - 1 { let item = parsedQueueItems->Array.getUnsafe(idx) let eventItem = item->Internal.castUnsafeEventItem - if ( - switch chainFetcher.processingFilters { - | None => true - | Some(processingFilters) => ChainFetcher.applyProcessingFilters(~item, ~processingFilters) - } - ) { - if eventItem.eventConfig.contractRegister !== None { - itemsWithContractRegister->Array.push(item) - } - - // TODO: Don't really need to keep it in the queue - // when there's no handler (besides raw_events, processed counter, and dcsToStore consuming) - newItems->Array.push(item) + if eventItem.eventConfig.contractRegister !== None { + itemsWithContractRegister->Array.push(item) } + + // TODO: Don't really need to keep it in the queue + // when there's no handler (besides raw_events, processed counter, and dcsToStore consuming) + newItems->Array.push(item) } - let dynamicContracts = switch itemsWithContractRegister { - | [] as empty => - // A small optimisation to not recreate an empty array - empty->(Utils.magic: array => array) + let newItemsWithDcs = switch itemsWithContractRegister { + | [] as empty => empty | _ => await ChainFetcher.runContractRegistersOrThrow( ~itemsWithContractRegister, @@ -532,7 +499,7 @@ let processPartitionQueryResponse = async ( dispatchAction( SubmitPartitionQueryResponse({ newItems, - dynamicContracts, + newItemsWithDcs, currentBlockHeight, latestFetchedBlock: { blockNumber: latestFetchedBlockNumber, @@ -557,11 +524,34 @@ let updateChainFetcher = (chainFetcherUpdate, ~state, ~chain) => { ) } +let onEnterReorgThreshold = (~state: t) => { + Logging.info("Reorg threshold reached") + Prometheus.ReorgThreshold.set(~isInReorgThreshold=true) + + let chainFetchers = state.chainManager.chainFetchers->ChainMap.map(chainFetcher => { + { + ...chainFetcher, + fetchState: chainFetcher.fetchState->FetchState.updateInternal( + ~blockLag=Env.indexingBlockLag->Option.getWithDefault(0), + ), + } + }) + + { + ...state, + chainManager: { + ...state.chainManager, + chainFetchers, + isInReorgThreshold: true, + }, + } +} + let actionReducer = (state: t, action: action) => { switch action { | FinishWaitingForNewBlock({chain, currentBlockHeight}) => { - let isInReorgThreshold = state.chainManager.isInReorgThreshold - let isBelowReorgThreshold = !isInReorgThreshold && state.config->Config.shouldRollbackOnReorg + let isBelowReorgThreshold = + !state.chainManager.isInReorgThreshold && state.config->Config.shouldRollbackOnReorg let shouldEnterReorgThreshold = isBelowReorgThreshold && state.chainManager.chainFetchers @@ -570,37 +560,27 @@ let actionReducer = (state: t, action: action) => { chainFetcher.fetchState->FetchState.isReadyToEnterReorgThreshold(~currentBlockHeight) }) - ( - { - ...state, - chainManager: { - ...state.chainManager, - isInReorgThreshold: isInReorgThreshold || shouldEnterReorgThreshold, - chainFetchers: state.chainManager.chainFetchers->ChainMap.update( - chain, - chainFetcher => { - if shouldEnterReorgThreshold { - { - ...chainFetcher, - fetchState: chainFetcher.fetchState->FetchState.updateInternal( - ~blockLag=Env.indexingBlockLag->Option.getWithDefault(0), - ), - } - } else { - chainFetcher - }->updateChainFetcherCurrentBlockHeight(~currentBlockHeight) - }, - ), - }, + let state = { + ...state, + chainManager: { + ...state.chainManager, + chainFetchers: state.chainManager.chainFetchers->ChainMap.update(chain, chainFetcher => { + chainFetcher->updateChainFetcherCurrentBlockHeight(~currentBlockHeight) + }), }, - [NextQuery(Chain(chain))], - ) + } + + if shouldEnterReorgThreshold { + (onEnterReorgThreshold(~state), [NextQuery(CheckAllChains)]) + } else { + (state, [NextQuery(Chain(chain))]) + } } | ValidatePartitionQueryResponse(partitionQueryResponse) => state->validatePartitionQueryResponse(partitionQueryResponse) | SubmitPartitionQueryResponse({ newItems, - dynamicContracts, + newItemsWithDcs, currentBlockHeight, latestFetchedBlock, query, @@ -608,13 +588,13 @@ let actionReducer = (state: t, action: action) => { }) => state->submitPartitionQueryResponse( ~newItems, - ~dynamicContracts, + ~newItemsWithDcs, ~currentBlockHeight, ~latestFetchedBlock, ~query, ~chain, ) - | EventBatchProcessed({progressedChains, items}) => + | EventBatchProcessed({batch}) => let maybePruneEntityHistory = state.config->Config.shouldPruneHistory( ~isInReorgThreshold=state.chainManager.isInReorgThreshold, @@ -624,7 +604,10 @@ let actionReducer = (state: t, action: action) => { let state = { ...state, - chainManager: state.chainManager->updateProgressedChains(~progressedChains, ~items), + // Can safely reset rollback state, since overwrite is not possible. + // If rollback is pending, the EventBatchProcessed will be handled by the invalid action reducer instead. + rollbackState: NoRollback, + chainManager: state.chainManager->updateProgressedChains(~batch), currentlyProcessingBatch: false, processedBatches: state.processedBatches + 1, } @@ -654,33 +637,27 @@ let actionReducer = (state: t, action: action) => { ) | StartProcessingBatch => ({...state, currentlyProcessingBatch: true}, []) - | EnterReorgThreshold => - Logging.info("Reorg threshold reached") - Prometheus.ReorgThreshold.set(~isInReorgThreshold=true) - - let chainFetchers = state.chainManager.chainFetchers->ChainMap.map(chainFetcher => { - { - ...chainFetcher, - fetchState: chainFetcher.fetchState->FetchState.updateInternal( - ~blockLag=Env.indexingBlockLag->Option.getWithDefault(0), - ), - } - }) - - ( + | StartFindingReorgDepth => ({...state, rollbackState: FindingReorgDepth}, []) + | FindReorgDepth({chain, lastKnownValidBlockNumber, lastKnownValidBlockTimestamp}) => ( { ...state, - chainManager: { - ...state.chainManager, - chainFetchers, - isInReorgThreshold: true, - }, + rollbackState: FoundReorgDepth({ + chain, + lastKnownValidBlockNumber, + lastKnownValidBlockTimestamp, + }), }, - [NextQuery(CheckAllChains)], + [Rollback], ) - | UpdateQueues({updatedFetchStates, shouldEnterReorgThreshold}) => + | EnterReorgThreshold => (onEnterReorgThreshold(~state), [NextQuery(CheckAllChains)]) + | UpdateQueues({progressedChainsById, shouldEnterReorgThreshold}) => let chainFetchers = state.chainManager.chainFetchers->ChainMap.mapWithKey((chain, cf) => { - let fs = ChainMap.get(updatedFetchStates, chain) + let fs = switch progressedChainsById->Utils.Dict.dangerouslyGetByIntNonOption( + chain->ChainMap.Chain.toChainId, + ) { + | Some(chainAfterBatch) => chainAfterBatch.fetchState + | None => cf.fetchState + } { ...cf, fetchState: shouldEnterReorgThreshold @@ -701,11 +678,16 @@ let actionReducer = (state: t, action: action) => { }, [NextQuery(CheckAllChains)], ) - | SetRollbackState(inMemoryStore, chainManager) => ( - {...state, rollbackState: RollbackInMemStore(inMemoryStore), chainManager}, + | SetRollbackState({diffInMemoryStore, rollbackedChainManager}) => ( + { + ...state, + rollbackState: RollbackReady({ + diffInMemoryStore: diffInMemoryStore, + }), + chainManager: rollbackedChainManager, + }, [NextQuery(CheckAllChains), ProcessEventBatch], ) - | ResetRollbackState => ({...state, rollbackState: NoRollback}, []) | SuccessExit => { Logging.info("Exiting with success") NodeJs.process->NodeJs.exitWithCode(Success) @@ -719,14 +701,19 @@ let actionReducer = (state: t, action: action) => { } let invalidatedActionReducer = (state: t, action: action) => - switch (state, action) { - | ({rollbackState: RollingBack(_)}, EventBatchProcessed(_)) => + switch action { + | EventBatchProcessed({batch}) if state->isPreparingRollback => Logging.info("Finished processing batch before rollback, actioning rollback") ( - {...state, currentlyProcessingBatch: false, processedBatches: state.processedBatches + 1}, + { + ...state, + chainManager: state.chainManager->updateProgressedChains(~batch), + currentlyProcessingBatch: false, + processedBatches: state.processedBatches + 1, + }, [Rollback], ) - | (_, ErrorExit(_)) => actionReducer(state, action) + | ErrorExit(_) => actionReducer(state, action) | _ => Logging.info({ "msg": "Invalidated action discarded", @@ -744,7 +731,7 @@ let checkAndFetchForChain = ( ~dispatchAction, ) => async chain => { let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(chain) - if !isRollingBack(state) { + if !isPreparingRollback(state) { let {currentBlockHeight, fetchState} = chainFetcher await chainFetcher.sourceManager->SourceManager.fetchNext( @@ -781,51 +768,16 @@ let injectedTaskReducer = ( switch task { | ProcessPartitionQueryResponse(partitionQueryResponse) => state->processPartitionQueryResponse(partitionQueryResponse, ~dispatchAction)->Promise.done - | UpdateEndOfBlockRangeScannedData({ - chain, - blockNumberThreshold, - nextEndOfBlockRangeScannedData, - }) => - let timeRef = Hrtime.makeTimer() - await Db.sql->DbFunctions.EndOfBlockRangeScannedData.setEndOfBlockRangeScannedData( - nextEndOfBlockRangeScannedData, - ) - - if Env.Benchmark.shouldSaveData { - let elapsedTimeMillis = Hrtime.timeSince(timeRef)->Hrtime.toMillis->Hrtime.intFromMillis - Benchmark.addSummaryData( - ~group="Other", - ~label=`Chain ${chain->ChainMap.Chain.toString} UpdateEndOfBlockRangeScannedData (ms)`, - ~value=elapsedTimeMillis->Belt.Int.toFloat, - ) - } - - //These prune functions can be scheduled and throttled if a more recent prune function gets called - //before the current one is executed - let runPrune = async () => { - let timeRef = Hrtime.makeTimer() - await Db.sql->DbFunctions.EndOfBlockRangeScannedData.deleteStaleEndOfBlockRangeScannedDataForChain( - ~chainId=chain->ChainMap.Chain.toChainId, - ~blockNumberThreshold, - ) - - if Env.Benchmark.shouldSaveData { - let elapsedTimeMillis = Hrtime.timeSince(timeRef)->Hrtime.toMillis->Hrtime.intFromMillis - Benchmark.addSummaryData( - ~group="Other", - ~label=`Chain ${chain->ChainMap.Chain.toString} PruneStaleData (ms)`, - ~value=elapsedTimeMillis->Belt.Int.toFloat, - ) - } - } - - let throttler = state.writeThrottlers.pruneStaleEndBlockData->ChainMap.get(chain) - throttler->Throttler.schedule(runPrune) | PruneStaleEntityHistory => let runPrune = async () => { - let safeReorgBlocks = state.chainManager->ChainManager.getSafeReorgBlocks + switch state.chainManager->ChainManager.getSafeCheckpointId { + | None => () + | Some(safeCheckpointId) => + await Db.sql->InternalTable.Checkpoints.pruneStaleCheckpoints( + ~pgSchema=Env.Db.publicSchema, + ~safeCheckpointId, + ) - if safeReorgBlocks.chainIds->Utils.Array.notEmpty { for idx in 0 to Entities.allEntities->Array.length - 1 { if idx !== 0 { // Add some delay between entities @@ -838,8 +790,9 @@ let injectedTaskReducer = ( let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( ~entityName=entityConfig.name, + ~entityIndex=entityConfig.index, ~pgSchema=Env.Db.publicSchema, - ~safeReorgBlocks, + ~safeCheckpointId, ) } catch { | exn => @@ -848,12 +801,7 @@ let injectedTaskReducer = ( ~logger=Logging.createChild( ~params={ "entityName": entityConfig.name, - "safeBlockNumbers": safeReorgBlocks.chainIds - ->Js.Array2.mapi((chainId, idx) => ( - chainId->Belt.Int.toString, - safeReorgBlocks.blockNumbers->Js.Array2.unsafe_get(idx), - )) - ->Js.Dict.fromArray, + "safeCheckpointId": safeCheckpointId, }, ), ) @@ -896,103 +844,66 @@ let injectedTaskReducer = ( ->Promise.all } | ProcessEventBatch => - if !state.currentlyProcessingBatch && !isRollingBack(state) { + if !state.currentlyProcessingBatch && !isPreparingRollback(state) { let batch = state.chainManager->ChainManager.createBatch(~batchSizeTarget=state.config.batchSize) - let updatedFetchStates = batch.updatedFetchStates + let progressedChainsById = batch.progressedChainsById + let totalBatchSize = batch.totalBatchSize let isInReorgThreshold = state.chainManager.isInReorgThreshold + let shouldSaveHistory = state.config->Config.shouldSaveHistory(~isInReorgThreshold) + let isBelowReorgThreshold = !state.chainManager.isInReorgThreshold && state.config->Config.shouldRollbackOnReorg let shouldEnterReorgThreshold = isBelowReorgThreshold && - updatedFetchStates - ->ChainMap.keys - ->Array.every(chain => { - updatedFetchStates - ->ChainMap.get(chain) - ->FetchState.isReadyToEnterReorgThreshold( - ~currentBlockHeight=( - state.chainManager.chainFetchers->ChainMap.get(chain) - ).currentBlockHeight, + state.chainManager.chainFetchers + ->ChainMap.values + ->Array.every(chainFetcher => { + let fetchState = switch progressedChainsById->Utils.Dict.dangerouslyGetByIntNonOption( + chainFetcher.fetchState.chainId, + ) { + | Some(chainAfterBatch) => chainAfterBatch.fetchState + | None => chainFetcher.fetchState + } + fetchState->FetchState.isReadyToEnterReorgThreshold( + ~currentBlockHeight=chainFetcher.currentBlockHeight, ) }) + if shouldEnterReorgThreshold { dispatchAction(EnterReorgThreshold) } - switch batch { - | {progressedChains: []} => () - | {items: [], progressedChains} => - dispatchAction(StartProcessingBatch) - // For this case there shouldn't be any FetchState changes - // so we don't dispatch UpdateQueues - only update the progress for chains without events - await Db.sql->InternalTable.Chains.setProgressedChains( - ~pgSchema=Db.publicSchema, - ~progressedChains, - ) - // FIXME: When state.rollbackState is RollbackInMemStore - // If we increase progress in this case (no items) - // and then indexer restarts - there's a high chance of missing - // the rollback. This should be tested and fixed. - dispatchAction(EventBatchProcessed({progressedChains, items: batch.items})) - | {items, progressedChains, updatedFetchStates, dcsToStoreByChainId} => + if progressedChainsById->Utils.Dict.isEmpty { + () + } else { if Env.Benchmark.shouldSaveData { let group = "Other" - Benchmark.addSummaryData( - ~group, - ~label=`Batch Creation Time (ms)`, - ~value=batch.creationTimeMs->Belt.Int.toFloat, - ) Benchmark.addSummaryData( ~group, ~label=`Batch Size`, - ~value=items->Array.length->Belt.Int.toFloat, + ~value=totalBatchSize->Belt.Int.toFloat, ) } dispatchAction(StartProcessingBatch) - dispatchAction(UpdateQueues({updatedFetchStates, shouldEnterReorgThreshold})) + dispatchAction(UpdateQueues({progressedChainsById, shouldEnterReorgThreshold})) //In the case of a rollback, use the provided in memory store //With rolled back values let rollbackInMemStore = switch state.rollbackState { - | RollbackInMemStore(inMemoryStore) => Some(inMemoryStore) - | NoRollback - | RollingBack( - _, - ) /* This is an impossible case due to the surrounding if statement check */ => - None + | RollbackReady({diffInMemoryStore}) => Some(diffInMemoryStore) + | _ => None } let inMemoryStore = rollbackInMemStore->Option.getWithDefault(InMemoryStore.make()) - if dcsToStoreByChainId->Utils.Dict.size > 0 { - let shouldSaveHistory = state.config->Config.shouldSaveHistory(~isInReorgThreshold) - inMemoryStore->InMemoryStore.setDcsToStore(dcsToStoreByChainId, ~shouldSaveHistory) - } - - state.chainManager.chainFetchers - ->ChainMap.keys - ->Array.forEach(chain => { - let chainId = chain->ChainMap.Chain.toChainId - switch progressedChains->Js.Array2.find(progressedChain => - progressedChain.chainId === chainId - ) { - | Some(progressData) => - Prometheus.ProcessingBatchSize.set(~batchSize=progressData.batchSize, ~chainId) - Prometheus.ProcessingBlockNumber.set( - ~blockNumber=progressData.progressBlockNumber, - ~chainId, - ) - | None => Prometheus.ProcessingBatchSize.set(~batchSize=0, ~chainId) - } - }) + inMemoryStore->InMemoryStore.setBatchDcs(~batch, ~shouldSaveHistory) switch await EventProcessing.processEventBatch( - ~items, - ~progressedChains, + ~batch, ~inMemoryStore, ~isInReorgThreshold, ~loadManager=state.loadManager, @@ -1006,13 +917,8 @@ let injectedTaskReducer = ( exn->ErrorHandling.make(~msg="A top level unexpected error occurred during processing") dispatchAction(ErrorExit(errHandler)) | res => - if rollbackInMemStore->Option.isSome { - //if the batch was executed with a rollback inMemoryStore - //reset the rollback state once the batch has been processed - dispatchAction(ResetRollbackState) - } switch res { - | Ok() => dispatchAction(EventBatchProcessed({progressedChains, items})) + | Ok() => dispatchAction(EventBatchProcessed({batch: batch})) | Error(errHandler) => dispatchAction(ErrorExit(errHandler)) } } @@ -1021,17 +927,39 @@ let injectedTaskReducer = ( | Rollback => //If it isn't processing a batch currently continue with rollback otherwise wait for current batch to finish processing switch state { - | {currentlyProcessingBatch: false, rollbackState: RollingBack(reorgChain)} => + | {rollbackState: NoRollback | RollbackReady(_)} => + Js.Exn.raiseError("Internal error: Rollback initiated with invalid state") + | {rollbackState: ReorgDetected({chain})} => { + let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(chain) + + dispatchAction(StartFindingReorgDepth) + let { + blockNumber: lastKnownValidBlockNumber, + blockTimestamp: lastKnownValidBlockTimestamp, + }: ReorgDetection.blockDataWithTimestamp = + await chainFetcher->getLastKnownValidBlock + + dispatchAction( + FindReorgDepth({chain, lastKnownValidBlockNumber, lastKnownValidBlockTimestamp}), + ) + } + // We can come to this case when event batch finished processing + // while we are still finding the reorg depth + // Do nothing here, just wait for reorg depth to be found + | {rollbackState: FindingReorgDepth} => () + | {rollbackState: FoundReorgDepth(_), currentlyProcessingBatch: true} => + Logging.info("Waiting for batch to finish processing before executing rollback") + | { + rollbackState: FoundReorgDepth({ + chain: reorgChain, + lastKnownValidBlockNumber, + lastKnownValidBlockTimestamp, + }), + } => let startTime = Hrtime.makeTimer() let chainFetcher = state.chainManager.chainFetchers->ChainMap.get(reorgChain) - let { - blockNumber: lastKnownValidBlockNumber, - blockTimestamp: lastKnownValidBlockTimestamp, - }: ReorgDetection.blockDataWithTimestamp = - await chainFetcher->getLastKnownValidBlock - let logger = Logging.createChildFrom( ~logger=chainFetcher.logger, ~params={ @@ -1049,104 +977,136 @@ let injectedTaskReducer = ( let reorgChainId = reorgChain->ChainMap.Chain.toChainId - //Get the first change event that occurred on each chain after the last known valid block - //Uses a different method depending on if the reorg chain is ordered or unordered - let firstChangeEventIdentifierPerChain = - await Db.sql->DbFunctions.EntityHistory.getFirstChangeEventPerChain( - switch state.config.multichain { - | Unordered => - UnorderedMultichain({ - reorgChainId, - safeBlockNumber: lastKnownValidBlockNumber, - }) - | Ordered => - OrderedMultichain({ - safeBlockTimestamp: lastKnownValidBlockTimestamp, - reorgChainId, - safeBlockNumber: lastKnownValidBlockNumber, - }) - }, - ) + let rollbackTargetCheckpointId = { + switch await Db.sql->InternalTable.Checkpoints.getRollbackTargetCheckpoint( + ~pgSchema=Env.Db.publicSchema, + ~reorgChainId, + ~lastKnownValidBlockNumber, + ) { + | [checkpoint] => checkpoint["id"] + | _ => 0 + } + } - firstChangeEventIdentifierPerChain->DbFunctions.EntityHistory.FirstChangeEventPerChain.setIfEarlier( - ~chainId=reorgChainId, - ~event={ - blockNumber: lastKnownValidBlockNumber + 1, - logIndex: 0, - }, - ) + let eventsProcessedDiffPerChain = Js.Dict.empty() + let newProgressBlockNumberPerChain = Js.Dict.empty() + let rollbackedProcessedEvents = ref(0) + + { + let rollbackProgressDiff = + await Db.sql->InternalTable.Checkpoints.getRollbackProgressDiff( + ~pgSchema=Env.Db.publicSchema, + ~rollbackTargetCheckpointId, + ) + for idx in 0 to rollbackProgressDiff->Js.Array2.length - 1 { + let diff = rollbackProgressDiff->Js.Array2.unsafe_get(idx) + eventsProcessedDiffPerChain->Utils.Dict.setByInt( + diff["chain_id"], + switch diff["events_processed_diff"]->Int.fromString { + | Some(eventsProcessedDiff) => { + rollbackedProcessedEvents := + rollbackedProcessedEvents.contents + eventsProcessedDiff + eventsProcessedDiff + } + | None => + Js.Exn.raiseError( + `Unexpedted case: Invalid events processed diff ${diff["events_processed_diff"]}`, + ) + }, + ) + newProgressBlockNumberPerChain->Utils.Dict.setByInt( + diff["chain_id"], + if rollbackTargetCheckpointId === 0 && diff["chain_id"] === reorgChainId { + Pervasives.min(diff["new_progress_block_number"], lastKnownValidBlockNumber) + } else { + diff["new_progress_block_number"] + }, + ) + } + } let chainFetchers = state.chainManager.chainFetchers->ChainMap.mapWithKey((chain, cf) => { - switch firstChangeEventIdentifierPerChain->DbFunctions.EntityHistory.FirstChangeEventPerChain.get( - ~chainId=chain->ChainMap.Chain.toChainId, + switch newProgressBlockNumberPerChain->Utils.Dict.dangerouslyGetByIntNonOption( + chain->ChainMap.Chain.toChainId, ) { - | Some(firstChangeEvent) => - let fetchState = cf.fetchState->FetchState.rollback(~firstChangeEvent) + | Some(newProgressBlockNumber) => + let fetchState = + cf.fetchState->FetchState.rollback(~targetBlockNumber=newProgressBlockNumber) + let newTotalEventsProcessed = + cf.numEventsProcessed - + eventsProcessedDiffPerChain + ->Utils.Dict.dangerouslyGetByIntNonOption(chain->ChainMap.Chain.toChainId) + ->Option.getUnsafe + + if cf.committedProgressBlockNumber !== newProgressBlockNumber { + Prometheus.ProgressBlockNumber.set( + ~blockNumber=newProgressBlockNumber, + ~chainId=chain->ChainMap.Chain.toChainId, + ) + } + if cf.numEventsProcessed !== newTotalEventsProcessed { + Prometheus.ProgressEventsCount.set( + ~processedCount=newTotalEventsProcessed, + ~chainId=chain->ChainMap.Chain.toChainId, + ) + } - let rolledBackCf = { + { ...cf, - lastBlockScannedHashes: chain == reorgChain - ? cf.lastBlockScannedHashes->ReorgDetection.LastBlockScannedHashes.rollbackToValidBlockNumber( + reorgDetection: chain == reorgChain + ? cf.reorgDetection->ReorgDetection.rollbackToValidBlockNumber( ~blockNumber=lastKnownValidBlockNumber, ) - : cf.lastBlockScannedHashes, + : cf.reorgDetection, + safeCheckpointTracking: switch cf.safeCheckpointTracking { + | Some(safeCheckpointTracking) => + Some( + safeCheckpointTracking->SafeCheckpointTracking.rollback( + ~targetBlockNumber=newProgressBlockNumber, + ), + ) + | None => None + }, fetchState, + committedProgressBlockNumber: newProgressBlockNumber, + numEventsProcessed: newTotalEventsProcessed, } - //On other chains, filter out evennts based on the first change present on the chain after the reorg - rolledBackCf->ChainFetcher.addProcessingFilter( - ~filter=item => { - switch item { - | Internal.Event({blockNumber, logIndex}) - | Internal.Block({blockNumber, logIndex}) => - //Filter out events that occur passed the block where the query starts but - //are lower than the timestamp where we rolled back to - (blockNumber, logIndex) >= (firstChangeEvent.blockNumber, firstChangeEvent.logIndex) - } - }, - ~isValid=(~fetchState) => { - //Remove the event filter once the fetchState has fetched passed the - //blockNumber of the valid first change event - fetchState->FetchState.bufferBlockNumber <= firstChangeEvent.blockNumber - }, - ) + | None => //If no change was produced on the given chain after the reorged chain, no need to rollback anything cf } }) - //Construct a rolledback in Memory store - let rollbackResult = await IO.RollBack.rollBack( - ~chainId=reorgChain->ChainMap.Chain.toChainId, - ~blockTimestamp=lastKnownValidBlockTimestamp, - ~blockNumber=lastKnownValidBlockNumber, - ~logIndex=0, - ~isUnorderedMultichainMode=switch state.config.multichain { - | Unordered => true - | Ordered => false - }, - ) + // Construct in Memory store with rollback diff + let diff = await IO.prepareRollbackDiff(~rollbackTargetCheckpointId) let chainManager = { ...state.chainManager, + commitedCheckpointId: rollbackTargetCheckpointId, chainFetchers, } logger->Logging.childTrace({ "msg": "Finished rollback on reorg", "entityChanges": { - "deleted": rollbackResult["deletedEntities"], - "upserted": rollbackResult["setEntities"], + "deleted": diff["deletedEntities"], + "upserted": diff["setEntities"], }, + "rollbackedEvents": rollbackedProcessedEvents.contents, + "beforeCheckpointId": state.chainManager.commitedCheckpointId, + "targetCheckpointId": rollbackTargetCheckpointId, }) - logger->Logging.childTrace({ - "msg": "Initial diff of rollback entity history", - "diff": rollbackResult["fullDiff"], - }) - Prometheus.RollbackSuccess.increment(~timeMillis=Hrtime.timeSince(startTime)->Hrtime.toMillis) - - dispatchAction(SetRollbackState(rollbackResult["inMemStore"], chainManager)) + Prometheus.RollbackSuccess.increment( + ~timeMillis=Hrtime.timeSince(startTime)->Hrtime.toMillis, + ~rollbackedProcessedEvents=rollbackedProcessedEvents.contents, + ) - | _ => Logging.info("Waiting for batch to finish processing before executing rollback") //wait for batch to finish processing + dispatchAction( + SetRollbackState({ + diffInMemoryStore: diff["inMemStore"], + rollbackedChainManager: chainManager, + }), + ) } } } diff --git a/codegenerator/cli/templates/static/codegen/src/globalState/GlobalStateManager.res b/codegenerator/cli/templates/static/codegen/src/globalState/GlobalStateManager.res index d50879ad9..33d585a96 100644 --- a/codegenerator/cli/templates/static/codegen/src/globalState/GlobalStateManager.res +++ b/codegenerator/cli/templates/static/codegen/src/globalState/GlobalStateManager.res @@ -10,6 +10,11 @@ module type State = { let getId: t => int } +let handleFatalError = e => { + e->ErrorHandling.make(~msg="Indexer has failed with an unxpected error")->ErrorHandling.log + NodeJs.process->NodeJs.exitWithCode(Failure) +} + module MakeManager = (S: State) => { type t = {mutable state: S.t, stateUpdatedHook: option unit>} @@ -32,9 +37,7 @@ module MakeManager = (S: State) => { self.state = nextState nextTasks->Array.forEach(task => dispatchTask(self, task)) } catch { - | e => - e->ErrorHandling.make(~msg="Indexer has failed with an unxpected error")->ErrorHandling.log - NodeJs.process->NodeJs.exitWithCode(Failure) + | e => e->handleFatalError } } and dispatchTask = (self, task: S.task) => { @@ -43,9 +46,18 @@ module MakeManager = (S: State) => { if stateId !== self.state->S.getId { Logging.info("Invalidated task discarded") } else { - S.taskReducer(self.state, task, ~dispatchAction=action => - dispatchAction(~stateId, self, action) - )->ignore + try { + S.taskReducer(self.state, task, ~dispatchAction=action => + dispatchAction(~stateId, self, action) + ) + ->Promise.catch(e => { + e->handleFatalError + Promise.resolve() + }) + ->ignore + } catch { + | e => e->handleFatalError + } } }, 0)->ignore } diff --git a/scenarios/erc20_multichain_factory/test/MockChainData.res b/scenarios/erc20_multichain_factory/test/MockChainData.res index 0c7fddf83..8c7e16288 100644 --- a/scenarios/erc20_multichain_factory/test/MockChainData.res +++ b/scenarios/erc20_multichain_factory/test/MockChainData.res @@ -1,9 +1 @@ -module Indexer = { - module ErrorHandling = ErrorHandling - module Types = Types - module Config = Config - module Source = Source - module FetchState = FetchState -} - -include Helpers.ChainMocking.Make(Indexer) +include Helpers.ChainMocking.Make() diff --git a/scenarios/erc20_multichain_factory/test/RollbackDynamicContract_test.res b/scenarios/erc20_multichain_factory/test/RollbackDynamicContract_test.res index 2b952b628..dea2b8962 100644 --- a/scenarios/erc20_multichain_factory/test/RollbackDynamicContract_test.res +++ b/scenarios/erc20_multichain_factory/test/RollbackDynamicContract_test.res @@ -100,26 +100,6 @@ ensure that this doesn't trigger a reorg | _ => Js.Exn.raiseError("Unexpected chain") } ) - - let getUpdateEndofBlockRangeScannedData = ( - mcdMap, - ~chain, - ~blockNumber, - ~blockNumberThreshold, - ) => { - let {blockNumber, blockHash} = - mcdMap->ChainMap.get(chain)->MockChainData.getBlock(~blockNumber)->Option.getUnsafe - - GlobalState.UpdateEndOfBlockRangeScannedData({ - blockNumberThreshold, - chain, - nextEndOfBlockRangeScannedData: { - blockNumber, - blockHash, - chainId: chain->ChainMap.Chain.toChainId, - }, - }) - } } module Sql = RollbackMultichain_test.Sql diff --git a/scenarios/erc20_multichain_factory/test/RollbackMultichain_test.res b/scenarios/erc20_multichain_factory/test/RollbackMultichain_test.res index 7aa98f4c3..f28360a2c 100644 --- a/scenarios/erc20_multichain_factory/test/RollbackMultichain_test.res +++ b/scenarios/erc20_multichain_factory/test/RollbackMultichain_test.res @@ -166,26 +166,6 @@ module Mock = { | _ => Js.Exn.raiseError("Unexpected chain") } ) - - let getUpdateEndofBlockRangeScannedData = ( - mcdMap, - ~chain, - ~blockNumber, - ~blockNumberThreshold, - ) => { - let {blockNumber, blockHash} = - mcdMap->ChainMap.get(chain)->MockChainData.getBlock(~blockNumber)->Option.getUnsafe - - GlobalState.UpdateEndOfBlockRangeScannedData({ - blockNumberThreshold, - chain, - nextEndOfBlockRangeScannedData: { - blockNumber, - blockHash, - chainId: chain->ChainMap.Chain.toChainId, - }, - }) - } } module Sql = { diff --git a/scenarios/erc20_multichain_factory/test/TestDeleteEntity.res b/scenarios/erc20_multichain_factory/test/TestDeleteEntity.res index 1a3600e5c..f6fe493e7 100644 --- a/scenarios/erc20_multichain_factory/test/TestDeleteEntity.res +++ b/scenarios/erc20_multichain_factory/test/TestDeleteEntity.res @@ -88,26 +88,6 @@ module Mock = { | _ => Js.Exn.raiseError("Unexpected chain") } ) - - let getUpdateEndofBlockRangeScannedData = ( - mcdMap, - ~chain, - ~blockNumber, - ~blockNumberThreshold, - ) => { - let {blockNumber, blockHash} = - mcdMap->ChainMap.get(chain)->MockChainData.getBlock(~blockNumber)->Option.getUnsafe - - GlobalState.UpdateEndOfBlockRangeScannedData({ - blockNumberThreshold, - chain, - nextEndOfBlockRangeScannedData: { - blockNumber, - blockHash, - chainId: chain->ChainMap.Chain.toChainId, - }, - }) - } } module Sql = RollbackMultichain_test.Sql diff --git a/scenarios/erc20_multichain_factory/test/TestWhereQuery.res b/scenarios/erc20_multichain_factory/test/TestWhereQuery.res index 823db7b66..957a5332f 100644 --- a/scenarios/erc20_multichain_factory/test/TestWhereQuery.res +++ b/scenarios/erc20_multichain_factory/test/TestWhereQuery.res @@ -82,26 +82,6 @@ module Mock = { | _ => Js.Exn.raiseError("Unexpected chain") } ) - - let getUpdateEndofBlockRangeScannedData = ( - mcdMap, - ~chain, - ~blockNumber, - ~blockNumberThreshold, - ) => { - let {blockNumber, blockHash} = - mcdMap->ChainMap.get(chain)->MockChainData.getBlock(~blockNumber)->Option.getUnsafe - - GlobalState.UpdateEndOfBlockRangeScannedData({ - blockNumberThreshold, - chain, - nextEndOfBlockRangeScannedData: { - blockNumber, - blockHash, - chainId: chain->ChainMap.Chain.toChainId, - }, - }) - } } module Sql = RollbackMultichain_test.Sql diff --git a/scenarios/fuel_test/pnpm-lock.yaml b/scenarios/fuel_test/pnpm-lock.yaml index e39b10002..d34c22a28 100644 --- a/scenarios/fuel_test/pnpm-lock.yaml +++ b/scenarios/fuel_test/pnpm-lock.yaml @@ -2013,6 +2013,8 @@ snapshots: envio@file:../../codegenerator/cli/npm/envio(typescript@5.2.2): dependencies: + '@elastic/ecs-pino-format': 1.4.0 + '@envio-dev/hyperfuel-client': 1.2.2 '@envio-dev/hypersync-client': 0.6.6 bignumber.js: 9.1.2 pino: 8.16.1 diff --git a/scenarios/helpers/src/ChainMocking.res b/scenarios/helpers/src/ChainMocking.res index 63c7e7a71..699334fe0 100644 --- a/scenarios/helpers/src/ChainMocking.res +++ b/scenarios/helpers/src/ChainMocking.res @@ -34,8 +34,7 @@ module Crypto = { input->hashKeccak256(~toString=v => anyToString(v) ++ previousHash) } -module Make = (Indexer: Indexer.S) => { - open Indexer +module Make = () => { type log = { item: Internal.item, srcAddress: Address.t, diff --git a/scenarios/helpers/src/Indexer.res b/scenarios/helpers/src/Indexer.res deleted file mode 100644 index f59269862..000000000 --- a/scenarios/helpers/src/Indexer.res +++ /dev/null @@ -1,75 +0,0 @@ -module type S = { - module ErrorHandling: { - type t - } - - module FetchState: { - type indexingContract - - type selection = { - eventConfigs: array, - dependsOnAddresses: bool, - } - - type queryTarget = - | Head - | EndBlock({toBlock: int}) - | Merge({ - // The partition we are going to merge into - // It shouldn't be fetching during the query - intoPartitionId: string, - toBlock: int, - }) - - type query = { - partitionId: string, - fromBlock: int, - selection: selection, - addressesByContractName: dict>, - target: queryTarget, - indexingContracts: dict, - } - } - - module Source: { - type blockRangeFetchStats = { - @as("total time elapsed (ms)") totalTimeElapsed: int, - @as("parsing time (ms)") parsingTimeElapsed?: int, - @as("page fetch time (ms)") pageFetchTime?: int, - } - type blockRangeFetchResponse = { - currentBlockHeight: int, - reorgGuard: ReorgDetection.reorgGuard, - parsedQueueItems: array, - fromBlockQueried: int, - latestFetchedBlockNumber: int, - latestFetchedBlockTimestamp: int, - stats: blockRangeFetchStats, - } - type sourceFor = Sync | Fallback - type t = { - name: string, - sourceFor: sourceFor, - chain: ChainMap.Chain.t, - poweredByHyperSync: bool, - /* Frequency (in ms) used when polling for new events on this network. */ - pollingInterval: int, - getBlockHashes: ( - ~blockNumbers: array, - ~logger: Pino.t, - ) => promise, exn>>, - getHeightOrThrow: unit => promise, - getItemsOrThrow: ( - ~fromBlock: int, - ~toBlock: option, - ~addressesByContractName: dict>, - ~indexingContracts: dict, - ~currentBlockHeight: int, - ~partitionId: string, - ~selection: FetchState.selection, - ~retry: int, - ~logger: Pino.t, - ) => promise, - } - } -} diff --git a/scenarios/test_codegen/pnpm-lock.yaml b/scenarios/test_codegen/pnpm-lock.yaml index 109c67edf..485adabe9 100644 --- a/scenarios/test_codegen/pnpm-lock.yaml +++ b/scenarios/test_codegen/pnpm-lock.yaml @@ -35,6 +35,10 @@ importers: viem: specifier: 2.21.0 version: 2.21.0(typescript@5.5.4) + optionalDependencies: + generated: + specifier: ./generated + version: link:generated devDependencies: '@glennsl/rescript-jest': specifier: ^0.9.2 @@ -90,10 +94,6 @@ importers: ts-node: specifier: ^10.9.1 version: 10.9.2(@types/node@18.19.47)(typescript@5.5.4) - optionalDependencies: - generated: - specifier: ./generated - version: link:generated ../helpers: dependencies: @@ -143,7 +143,7 @@ importers: specifier: 16.4.5 version: 16.4.5 envio: - specifier: file:/Users/enguerrand/dev/nim/hyperindex/codegenerator/target/debug/envio/../../../cli/npm/envio + specifier: file:/Users/dzakh/code/envio/hyperindex/codegenerator/target/debug/envio/../../../cli/npm/envio version: file:../../codegenerator/cli/npm/envio(typescript@5.5.4) ethers: specifier: 6.8.0 diff --git a/scenarios/test_codegen/schema.graphql b/scenarios/test_codegen/schema.graphql index 514a811d7..6f129a911 100644 --- a/scenarios/test_codegen/schema.graphql +++ b/scenarios/test_codegen/schema.graphql @@ -125,6 +125,14 @@ type EntityWithAllTypes { optEnumField: AccountType } +type EntityWith63LenghtName______________________________________one { + id: ID! +} + +type EntityWith63LenghtName______________________________________two { + id: ID! +} + # We are using insert unnest for the case # So test it as well type EntityWithAllNonArrayTypes { diff --git a/scenarios/test_codegen/test/ChainFetcher_test.res b/scenarios/test_codegen/test/ChainFetcher_test.res deleted file mode 100644 index 77f743af8..000000000 --- a/scenarios/test_codegen/test/ChainFetcher_test.res +++ /dev/null @@ -1,75 +0,0 @@ -open RescriptMocha -open Belt - -describe("Test Processing Filters", () => { - // Assert.deepEqual doesn't work, because of deeply nested rescript-schema objects - // Assert.equal doesn't work because the array is always recreated on filter - // So I added the helper - let assertEqualItems = (items1, items2) => { - Assert.equal( - items1->Array.length, - items2->Array.length, - ~message="Length of the items doesn't match", - ) - items1->Array.forEachWithIndex((i, item1) => { - let item2 = items2->Js.Array2.unsafe_get(i) - Assert.equal(item1, item2) - }) - } - - it("Keeps items when there are not filters", () => { - let items = MockEvents.eventBatchItems - assertEqualItems( - items, - items->Js.Array2.filter( - item => ChainFetcher.applyProcessingFilters(~item, ~processingFilters=[]), - ), - ) - }) - - it("Keeps items when all filters return true", () => { - let items = MockEvents.eventBatchItems - assertEqualItems( - items, - items->Js.Array2.filter( - item => - ChainFetcher.applyProcessingFilters( - ~item, - ~processingFilters=[ - { - filter: _ => true, - isValid: (~fetchState as _) => true, - }, - { - filter: _ => true, - isValid: (~fetchState as _) => true, - }, - ], - ), - ), - ) - }) - - it("Removes all items when there is one filter returning false", () => { - let items = MockEvents.eventBatchItems - assertEqualItems( - [], - items->Js.Array2.filter( - item => - ChainFetcher.applyProcessingFilters( - ~item, - ~processingFilters=[ - { - filter: _ => false, - isValid: (~fetchState as _) => true, - }, - { - filter: _ => true, - isValid: (~fetchState as _) => true, - }, - ], - ), - ), - ) - }) -}) diff --git a/scenarios/test_codegen/test/ChainManager_test.res b/scenarios/test_codegen/test/ChainManager_test.res index 6ec3530ea..7a420dec7 100644 --- a/scenarios/test_codegen/test/ChainManager_test.res +++ b/scenarios/test_codegen/test/ChainManager_test.res @@ -31,7 +31,7 @@ let populateChainQueuesWithRandomEvents = (~runTime=1000, ~maxBlockTime=15, ()) ~contracts=[], ~startBlock=0, ~targetBufferSize=5000, - ~chainId=0, + ~chainId=1, ) let fetchState = ref(fetcherStateInit) @@ -108,12 +108,14 @@ let populateChainQueuesWithRandomEvents = (~runTime=1000, ~maxBlockTime=15, ()) ), chainConfig, // This is quite a hack - but it works! - lastBlockScannedHashes: ReorgDetection.LastBlockScannedHashes.empty( - ~confirmedBlockThreshold=200, + reorgDetection: ReorgDetection.make( + ~chainReorgCheckpoints=[], + ~maxReorgDepth=200, + ~shouldRollbackOnReorg=false, ), + safeCheckpointTracking: None, isProgressAtHead: false, currentBlockHeight: 0, - processingFilters: None, } mockChainFetcher @@ -123,6 +125,7 @@ let populateChainQueuesWithRandomEvents = (~runTime=1000, ~maxBlockTime=15, ()) { ChainManager.chainFetchers, multichain: Ordered, + commitedCheckpointId: 0, isInReorgThreshold: false, }, numberOfMockEventsCreated.contents, @@ -154,20 +157,22 @@ describe("ChainManager", () => { let numberOfMockEventsReadFromQueues = ref(0) let allEventsRead = [] let rec testThatCreatedEventsAreOrderedCorrectly = (chainManager, lastEvent) => { - let eventsInBlock = ChainManager.createBatch(chainManager, ~batchSizeTarget=10000) + let {items, totalBatchSize, progressedChainsById} = ChainManager.createBatch( + chainManager, + ~batchSizeTarget=10000, + ) // ensure that the events are ordered correctly - switch eventsInBlock { - | {items: []} => chainManager - | {items, updatedFetchStates} => - items->Belt.Array.forEach( - i => { - let _ = allEventsRead->Js.Array2.push(i) + if totalBatchSize === 0 { + chainManager + } else { + items->Array.forEach( + item => { + allEventsRead->Js.Array2.push(item)->ignore }, ) - let batchSize = items->Array.length numberOfMockEventsReadFromQueues := - numberOfMockEventsReadFromQueues.contents + batchSize + numberOfMockEventsReadFromQueues.contents + totalBatchSize let firstEventInBlock = items[0]->Option.getExn @@ -180,7 +185,12 @@ describe("ChainManager", () => { let nextChainFetchers = chainManager.chainFetchers->ChainMap.mapWithKey( (chain, fetcher) => { - let fetchState = updatedFetchStates->ChainMap.get(chain) + let fetchState = switch progressedChainsById->Utils.Dict.dangerouslyGetByIntNonOption( + chain->ChainMap.Chain.toChainId, + ) { + | Some(chainAfterBatch) => chainAfterBatch.fetchState + | None => fetcher.fetchState + } { ...fetcher, fetchState, diff --git a/scenarios/test_codegen/test/ChainsStateComputation_test.res b/scenarios/test_codegen/test/ChainsStateComputation_test.res deleted file mode 100644 index 0699dc487..000000000 --- a/scenarios/test_codegen/test/ChainsStateComputation_test.res +++ /dev/null @@ -1,419 +0,0 @@ -open RescriptMocha - -// Helper function to check if all chains are ready (synced/caught up to head or endblock) -let allChainsReady = (chains: Internal.chains): bool => { - chains - ->Js.Dict.values - ->Belt.Array.every(chainInfo => chainInfo.isReady) -} - -describe("Chains State Computation", () => { - describe("computeChainsState", () => { - it("should set isReady=true when all chains have reached their end block", () => { - // Create mock chain fetchers that have all reached end block - let chainFetcher1 = { - "committedProgressBlockNumber": 1000, - "fetchState": { - "endBlock": Some(1000), - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetcher2 = { - "committedProgressBlockNumber": 2000, - "fetchState": { - "endBlock": Some(2000), - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetchers = - ChainMap.fromArrayUnsafe([ - (ChainMap.Chain.makeUnsafe(~chainId=1), chainFetcher1), - (ChainMap.Chain.makeUnsafe(~chainId=2), chainFetcher2), - ]) - - let chains = EventProcessing.computeChainsState(chainFetchers) - - // Verify that both chains are marked as ready - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(chains->Js.Dict.get("2")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(allChainsReady(chains), true) - }) - - it("should set isReady=false when at least one chain has not reached end block", () => { - // Chain 1 has reached end block, but chain 2 has not - let chainFetcher1 = { - "committedProgressBlockNumber": 1000, - "fetchState": { - "endBlock": Some(1000), - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetcher2 = { - "committedProgressBlockNumber": 1500, - "fetchState": { - "endBlock": Some(2000), // Not yet reached - }, - "timestampCaughtUpToHeadOrEndblock": None, - }->Utils.magic - - let chainFetchers = - ChainMap.fromArrayUnsafe([ - (ChainMap.Chain.makeUnsafe(~chainId=1), chainFetcher1), - (ChainMap.Chain.makeUnsafe(~chainId=2), chainFetcher2), - ]) - - let chains = EventProcessing.computeChainsState(chainFetchers) - - // Chain 1 should be ready, chain 2 should not be ready - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(chains->Js.Dict.get("2")->Belt.Option.map(c => c.isReady), Some(false)) - Assert.equal(allChainsReady(chains), false) - }) - - it("should set isReady=false when a chain has no end block (live mode)", () => { - // Chain with no end block set (continuous live indexing) - let chainFetcher1 = { - "committedProgressBlockNumber": 1000, - "fetchState": { - "endBlock": None, // Live mode, no end block - }, - "timestampCaughtUpToHeadOrEndblock": None, - }->Utils.magic - - let chainFetchers = ChainMap.fromArrayUnsafe([(ChainMap.Chain.makeUnsafe(~chainId=1), chainFetcher1)]) - - let chains = EventProcessing.computeChainsState(chainFetchers) - - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(false)) - Assert.equal(allChainsReady(chains), false) - }) - - it("should set isReady=false when committedProgressBlockNumber is below endBlock", () => { - let chainFetcher1 = { - "committedProgressBlockNumber": 500, - "fetchState": { - "endBlock": Some(1000), - }, - "timestampCaughtUpToHeadOrEndblock": None, - }->Utils.magic - - let chainFetchers = ChainMap.fromArrayUnsafe([(ChainMap.Chain.makeUnsafe(~chainId=1), chainFetcher1)]) - - let chains = EventProcessing.computeChainsState(chainFetchers) - - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(false)) - Assert.equal(allChainsReady(chains), false) - }) - - it("should set isReady=true when committedProgressBlockNumber exceeds endBlock", () => { - // Progress can go beyond end block in some edge cases - let chainFetcher1 = { - "committedProgressBlockNumber": 1500, - "fetchState": { - "endBlock": Some(1000), - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetchers = ChainMap.fromArrayUnsafe([(ChainMap.Chain.makeUnsafe(~chainId=1), chainFetcher1)]) - - let chains = EventProcessing.computeChainsState(chainFetchers) - - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(allChainsReady(chains), true) - }) - - it("should handle empty chainFetchers map (edge case)", () => { - let chainFetchers = ChainMap.fromArrayUnsafe([]) - - let chains = EventProcessing.computeChainsState(chainFetchers) - - // Empty dict means no chains, which technically means "all chains are ready" (vacuous truth) - Assert.equal(chains->Js.Dict.keys->Belt.Array.length, 0) - Assert.equal(allChainsReady(chains), true) - }) - - it("should correctly track each chain state in multi-chain scenario when only some reached end", () => { - let chainFetcher1 = { - "committedProgressBlockNumber": 1000, - "fetchState": { - "endBlock": Some(1000), // Reached - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetcher2 = { - "committedProgressBlockNumber": 1999, - "fetchState": { - "endBlock": Some(2000), // Not reached (1 block away) - }, - "timestampCaughtUpToHeadOrEndblock": None, - }->Utils.magic - - let chainFetcher3 = { - "committedProgressBlockNumber": 3000, - "fetchState": { - "endBlock": Some(3000), // Reached - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetchers = - ChainMap.fromArrayUnsafe([ - (ChainMap.Chain.makeUnsafe(~chainId=1), chainFetcher1), - (ChainMap.Chain.makeUnsafe(~chainId=2), chainFetcher2), - (ChainMap.Chain.makeUnsafe(~chainId=3), chainFetcher3), - ]) - - let chains = EventProcessing.computeChainsState(chainFetchers) - - // Verify individual chain states - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(chains->Js.Dict.get("2")->Belt.Option.map(c => c.isReady), Some(false)) - Assert.equal(chains->Js.Dict.get("3")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(allChainsReady(chains), false) - }) - - it("should mark all chains as ready only when ALL chains in multi-chain scenario reached end", () => { - let chainFetcher1 = { - "committedProgressBlockNumber": 1000, - "fetchState": { - "endBlock": Some(1000), - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetcher2 = { - "committedProgressBlockNumber": 2000, - "fetchState": { - "endBlock": Some(2000), - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetcher3 = { - "committedProgressBlockNumber": 3000, - "fetchState": { - "endBlock": Some(3000), - }, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - - let chainFetchers = - ChainMap.fromArrayUnsafe([ - (ChainMap.Chain.makeUnsafe(~chainId=1), chainFetcher1), - (ChainMap.Chain.makeUnsafe(~chainId=2), chainFetcher2), - (ChainMap.Chain.makeUnsafe(~chainId=3), chainFetcher3), - ]) - - let chains = EventProcessing.computeChainsState(chainFetchers) - - // All chains should be ready - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(chains->Js.Dict.get("2")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(chains->Js.Dict.get("3")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(allChainsReady(chains), true) - }) - }) - - describe("chains state in processEventBatch", () => { - // These tests actually invoke EventProcessing.processEventBatch and verify - // that it correctly passes chains state to handlers based on chainFetchers state - - Async.it("should pass chains with isReady=false when chains have not reached end block", async () => { - EventHandlers.lastEmptyEventChains := None - - // Setup: chain has NOT reached its end block (500 < 1000) - let chainFetcher = { - "committedProgressBlockNumber": 500, - "fetchState": {"endBlock": Some(1000)}, - "timestampCaughtUpToHeadOrEndblock": None, - }->Utils.magic - let chainFetchers = ChainMap.fromArrayUnsafe([(ChainMap.Chain.makeUnsafe(~chainId=54321), chainFetcher)]) - - let config = RegisterHandlers.registerAllHandlers() - let inMemoryStore = InMemoryStore.make() - let loadManager = LoadManager.make() - - // Create an EmptyEvent that will trigger our handler - let emptyEventLog: Types.eventLog = { - params: (), - chainId: 54321, - srcAddress: "0xabc0000000000000000000000000000000000000"->Address.Evm.fromStringOrThrow, - logIndex: 1, - transaction: MockEvents.tx1, - block: MockEvents.block1, - } - - let item = Internal.Event({ - timestamp: emptyEventLog.block.timestamp, - chain: ChainMap.Chain.makeUnsafe(~chainId=54321), - blockNumber: emptyEventLog.block.number, - logIndex: emptyEventLog.logIndex, - eventConfig: (Types.Gravatar.EmptyEvent.register() :> Internal.eventConfig), - event: emptyEventLog->Internal.fromGenericEvent, - }) - - // Actually call processEventBatch - the real code path - let _ = (await EventProcessing.processEventBatch( - ~items=[item], - ~progressedChains=[{ - chainId: 54321, - batchSize: 1, - progressBlockNumber: 500, - isProgressAtHead: false, - totalEventsProcessed: 1, - }], - ~inMemoryStore, - ~isInReorgThreshold=false, - ~loadManager, - ~config, - ~chainFetchers, - ))->Belt.Result.getExn - - // Assert on the chains state that processEventBatch passed to the handler - switch EventHandlers.lastEmptyEventChains.contents { - | Some(chains) => { - // Verify chain 54321 exists and is not ready - Assert.equal(chains->Js.Dict.get("54321")->Belt.Option.map(c => c.isReady), Some(false)) - Assert.equal(allChainsReady(chains), false) - } - | None => Assert.fail("Handler was not called - processEventBatch didn't execute handler") - } - }) - - Async.it("should pass chains with isReady=true when all chains have reached end block", async () => { - EventHandlers.lastEmptyEventChains := None - - // Setup: chain HAS reached its end block (1000 >= 1000) - let chainFetcher = { - "committedProgressBlockNumber": 1000, - "fetchState": {"endBlock": Some(1000)}, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - let chainFetchers = ChainMap.fromArrayUnsafe([(ChainMap.Chain.makeUnsafe(~chainId=54321), chainFetcher)]) - - let config = RegisterHandlers.registerAllHandlers() - let inMemoryStore = InMemoryStore.make() - let loadManager = LoadManager.make() - - let emptyEventLog: Types.eventLog = { - params: (), - chainId: 54321, - srcAddress: "0xabc0000000000000000000000000000000000000"->Address.Evm.fromStringOrThrow, - logIndex: 1, - transaction: MockEvents.tx1, - block: MockEvents.block1, - } - - let item = Internal.Event({ - timestamp: emptyEventLog.block.timestamp, - chain: ChainMap.Chain.makeUnsafe(~chainId=54321), - blockNumber: emptyEventLog.block.number, - logIndex: emptyEventLog.logIndex, - eventConfig: (Types.Gravatar.EmptyEvent.register() :> Internal.eventConfig), - event: emptyEventLog->Internal.fromGenericEvent, - }) - - // Actually call processEventBatch - let _ = (await EventProcessing.processEventBatch( - ~items=[item], - ~progressedChains=[{ - chainId: 54321, - batchSize: 1, - progressBlockNumber: 1000, - isProgressAtHead: true, - totalEventsProcessed: 1, - }], - ~inMemoryStore, - ~isInReorgThreshold=false, - ~loadManager, - ~config, - ~chainFetchers, - ))->Belt.Result.getExn - - // Assert on what processEventBatch actually passed to the handler - switch EventHandlers.lastEmptyEventChains.contents { - | Some(chains) => { - // Verify chain 54321 exists and is ready - Assert.equal(chains->Js.Dict.get("54321")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(allChainsReady(chains), true) - } - | None => Assert.fail("Handler was not called") - } - }) - - Async.it("should pass correct per-chain state in multi-chain when not all reached end", async () => { - EventHandlers.lastEmptyEventChains := None - - // Setup: chain 1 reached end, chain 2 has not - let chainFetcher1 = { - "committedProgressBlockNumber": 1000, - "fetchState": {"endBlock": Some(1000)}, - "timestampCaughtUpToHeadOrEndblock": Some(123456), - }->Utils.magic - let chainFetcher2 = { - "committedProgressBlockNumber": 1500, - "fetchState": {"endBlock": Some(2000)}, - "timestampCaughtUpToHeadOrEndblock": None, - }->Utils.magic - let chainFetchers = ChainMap.fromArrayUnsafe([ - (ChainMap.Chain.makeUnsafe(~chainId=1), chainFetcher1), - (ChainMap.Chain.makeUnsafe(~chainId=2), chainFetcher2), - ]) - - let config = RegisterHandlers.registerAllHandlers() - let inMemoryStore = InMemoryStore.make() - let loadManager = LoadManager.make() - - let emptyEventLog: Types.eventLog = { - params: (), - chainId: 1, - srcAddress: "0xabc0000000000000000000000000000000000000"->Address.Evm.fromStringOrThrow, - logIndex: 1, - transaction: MockEvents.tx1, - block: MockEvents.block1, - } - - let item = Internal.Event({ - timestamp: emptyEventLog.block.timestamp, - chain: ChainMap.Chain.makeUnsafe(~chainId=1), - blockNumber: emptyEventLog.block.number, - logIndex: emptyEventLog.logIndex, - eventConfig: (Types.Gravatar.EmptyEvent.register() :> Internal.eventConfig), - event: emptyEventLog->Internal.fromGenericEvent, - }) - - // Call the real processEventBatch with multi-chain scenario - let _ = (await EventProcessing.processEventBatch( - ~items=[item], - ~progressedChains=[{ - chainId: 1, - batchSize: 1, - progressBlockNumber: 1000, - isProgressAtHead: false, - totalEventsProcessed: 1, - }], - ~inMemoryStore, - ~isInReorgThreshold=false, - ~loadManager, - ~config, - ~chainFetchers, - ))->Belt.Result.getExn - - // Verify processEventBatch correctly computed chain states - switch EventHandlers.lastEmptyEventChains.contents { - | Some(chains) => { - // Chain 1 should be ready, chain 2 should not be ready - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(true)) - Assert.equal(chains->Js.Dict.get("2")->Belt.Option.map(c => c.isReady), Some(false)) - Assert.equal(allChainsReady(chains), false) - } - | None => Assert.fail("Handler was not called") - } - }) - }) -}) diff --git a/scenarios/test_codegen/test/E2EEthNode_test.res b/scenarios/test_codegen/test/E2EEthNode_test.res index 334df9f74..a6a941826 100644 --- a/scenarios/test_codegen/test/E2EEthNode_test.res +++ b/scenarios/test_codegen/test/E2EEthNode_test.res @@ -39,7 +39,7 @@ describe("E2E Integration Test", () => { ) let chain = MockConfig.chain1337 { - confirmedBlockThreshold: 200, + maxReorgDepth: 200, startBlock: 0, id: 1337, contracts, diff --git a/scenarios/test_codegen/test/E2E_test.res b/scenarios/test_codegen/test/E2E_test.res index aa2d792e9..508b91864 100644 --- a/scenarios/test_codegen/test/E2E_test.res +++ b/scenarios/test_codegen/test/E2E_test.res @@ -62,7 +62,7 @@ describe("E2E tests", () => { [{value: "0", labels: Js.Dict.empty()}], ) - await Mock.Helper.initialEnterReorgThreshold(~sourceMock) + await Mock.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock) Assert.deepEqual( await indexerMock.metric("envio_reorg_threshold"), @@ -83,7 +83,7 @@ describe("E2E tests", () => { ) }) - // A regression test for bug introduced in 2.30.0 + // A regression test for a bug introduced in 2.30.0 Async.it("Correct event ordering for ordered multichain indexer", async () => { let sourceMock1337 = Mock.Source.make( [#getHeightOrThrow, #getItemsOrThrow, #getBlockHashes], @@ -110,8 +110,8 @@ describe("E2E tests", () => { // Test inside of reorg threshold, so we can check the history order let _ = await Promise.all2(( - Mock.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock1337), - Mock.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock100), + Mock.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock1337), + Mock.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock100), )) let callCount = ref(0) @@ -158,58 +158,322 @@ describe("E2E tests", () => { await indexerMock.getBatchWritePromise() Assert.deepEqual( - await indexerMock.queryHistory(module(Entities.SimpleEntity)), + await Promise.all2(( + indexerMock.queryCheckpoints(), + indexerMock.queryHistory(module(Entities.SimpleEntity)), + )), + ( + [ + { + id: 2, + chainId: 100, + blockNumber: 150, + blockHash: Js.Null.Null, + eventsProcessed: 1, + }, + { + id: 3, + chainId: 1337, + blockNumber: 100, + blockHash: Js.Null.Value("0x100"), + eventsProcessed: 0, + }, + { + id: 4, + chainId: 1337, + blockNumber: 150, + blockHash: Js.Null.Null, + eventsProcessed: 1, + }, + { + id: 5, + chainId: 100, + blockNumber: 151, + blockHash: Js.Null.Null, + eventsProcessed: 1, + }, + { + id: 6, + chainId: 100, + blockNumber: 160, + blockHash: Js.Null.Value("0x160"), + eventsProcessed: 0, + }, + ], + [ + { + checkpointId: 2, + entityId: "1", + entityUpdateAction: Set({ + Entities.SimpleEntity.id: "1", + value: "call-0", + }), + }, + { + checkpointId: 4, + entityId: "1", + entityUpdateAction: Set({ + Entities.SimpleEntity.id: "1", + value: "call-1", + }), + }, + { + checkpointId: 5, + entityId: "1", + entityUpdateAction: Set({ + Entities.SimpleEntity.id: "1", + value: "call-2", + }), + }, + ], + ), + ) + }) + + Async.it("Track effects in prom metrics", async () => { + let sourceMock = Mock.Source.make( + [#getHeightOrThrow, #getItemsOrThrow, #getBlockHashes], + ~chain=#1337, + ) + let indexerMock = await Mock.Indexer.make( + ~chains=[ + { + chain: #1337, + sources: [sourceMock.source], + }, + ], + ) + await Utils.delay(0) + + let testEffectWithCache = Envio.experimental_createEffect( + { + name: "testEffectWithCache", + input: S.string, + output: S.string, + cache: true, + }, + async ({input}) => { + input ++ "-output" + }, + ) + let testEffect = Envio.experimental_createEffect( + { + name: "testEffect", + input: S.string, + output: S.string, + }, + async ({input}) => { + input ++ "-output" + }, + ) + + Assert.deepEqual( + await indexerMock.metric("envio_effect_calls_count"), + [], + ~message="should have no effect calls in the beginning", + ) + Assert.deepEqual( + await indexerMock.metric("envio_effect_cache_count"), + [], + ~message="should have no effect cache in the beginning", + ) + + sourceMock.resolveGetHeightOrThrow(300) + await Utils.delay(0) + await Utils.delay(0) + sourceMock.resolveGetItemsOrThrow( + [ + { + blockNumber: 100, + logIndex: 0, + handler: async ({context}) => { + Assert.deepEqual(await context.effect(testEffect, "test"), "test-output") + Assert.deepEqual(await context.effect(testEffectWithCache, "test"), "test-output") + }, + }, + ], + ~latestFetchedBlockNumber=100, + ) + await indexerMock.getBatchWritePromise() + + Assert.deepEqual( + await indexerMock.metric("envio_effect_calls_count"), + [ + { + value: "1", + labels: Js.Dict.fromArray([("effect", "testEffect")]), + }, + { + value: "1", + labels: Js.Dict.fromArray([("effect", "testEffectWithCache")]), + }, + ], + ~message="should increment effect calls count", + ) + Assert.deepEqual( + await indexerMock.metric("envio_effect_cache_count"), + [ + { + value: "1", + labels: Js.Dict.fromArray([("effect", "testEffectWithCache")]), + }, + ], + ~message="should increment effect cache count", + ) + Assert.deepEqual( + await indexerMock.metric("envio_storage_load_count"), + [], + ~message="Shouldn't load anything from storage at this point", + ) + Assert.deepEqual( + await indexerMock.queryEffectCache("testEffectWithCache"), + [{"id": `"test"`, "output": %raw(`"test-output"`)}], + ~message="should have the cache entry in db", + ) + + let indexerMock = await indexerMock.restart() + await Utils.delay(0) + + Assert.deepEqual( + await indexerMock.metric("envio_effect_cache_count"), + [ + { + value: "1", + labels: Js.Dict.fromArray([("effect", "testEffectWithCache")]), + }, + ], + ~message="should resume effect cache count on restart", + ) + + sourceMock.resolveGetHeightOrThrow(300) + await Utils.delay(0) + await Utils.delay(0) + sourceMock.resolveGetItemsOrThrow( + [ + { + blockNumber: 101, + logIndex: 0, + handler: async ({context}) => { + Assert.deepEqual( + await Promise.all2(( + context.effect(testEffectWithCache, "test"), + context.effect(testEffectWithCache, "test-2"), + )), + ("test-output", "test-2-output"), + ) + }, + }, + ], + ~latestFetchedBlockNumber=101, + ) + await indexerMock.getBatchWritePromise() + + Assert.deepEqual( + await Promise.all3(( + indexerMock.metric("envio_storage_load_where_size"), + indexerMock.metric("envio_storage_load_size"), + indexerMock.metric("envio_storage_load_count"), + )), + ( + [ + { + value: "2", + labels: Js.Dict.fromArray([("operation", "testEffectWithCache.effect")]), + }, + ], + [ + { + value: "1", + labels: Js.Dict.fromArray([("operation", "testEffectWithCache.effect")]), + }, + ], + [ + { + value: "1", + labels: Js.Dict.fromArray([("operation", "testEffectWithCache.effect")]), + }, + ], + ), + ~message="Time to load cache from storage now", + ) + Assert.deepEqual( + await Promise.all2(( + indexerMock.metric("envio_effect_calls_count"), + indexerMock.metric("envio_effect_cache_count"), + )), + ( + [ + { + // It resumes in-memory during test, but it'll reset on process restart + // In the real-world it'll be 1 + value: "2", + labels: Js.Dict.fromArray([("effect", "testEffectWithCache")]), + }, + ], + [ + { + value: "2", + labels: Js.Dict.fromArray([("effect", "testEffectWithCache")]), + }, + ], + ), + ~message="Should increment effect calls count and cache count", + ) + + let testEffectWithCacheV2 = Envio.experimental_createEffect( + { + name: "testEffectWithCache", + input: S.string, + output: S.string->S.refine( + s => v => + if !(v->Js.String2.includes("2")) { + s.fail(`Expected to include '2', got ${v}`) + }, + ), + cache: true, + }, + async ({input}) => { + input ++ "-output-v2" + }, + ) + + sourceMock.resolveGetItemsOrThrow( + [ + { + blockNumber: 102, + logIndex: 0, + handler: async ({context}) => { + Assert.deepEqual( + await Promise.all2(( + context.effect(testEffectWithCacheV2, "test"), + context.effect(testEffectWithCacheV2, "test-2"), + )), + ("test-output-v2", "test-2-output"), + ) + }, + }, + ], + ~latestFetchedBlockNumber=102, + ) + await indexerMock.getBatchWritePromise() + + Assert.deepEqual( + await indexerMock.queryEffectCache("testEffectWithCache"), + [ + {"id": `"test-2"`, "output": %raw(`"test-2-output"`)}, + {"id": `"test"`, "output": %raw(`"test-output-v2"`)}, + ], + ~message="Should invalidate loaded cache and store new one", + ) + Assert.deepEqual( + await indexerMock.metric("envio_effect_cache_count"), [ { - current: { - chain_id: 100, - block_timestamp: 150, - block_number: 150, - log_index: 0, - }, - previous: undefined, - entityData: Set({ - Entities.SimpleEntity.id: "1", - value: "call-0", - }), - }, - { - current: { - chain_id: 1337, - block_timestamp: 150, - block_number: 150, - log_index: 2, - }, - previous: Some({ - chain_id: 100, - block_timestamp: 150, - block_number: 150, - log_index: 0, - }), - entityData: Set({ - Entities.SimpleEntity.id: "1", - value: "call-1", - }), - }, - { - current: { - chain_id: 100, - block_timestamp: 151, - block_number: 151, - log_index: 0, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 150, - block_number: 150, - log_index: 2, - }), - entityData: Set({ - Entities.SimpleEntity.id: "1", - value: "call-2", - }), + value: "2", + labels: Js.Dict.fromArray([("effect", "testEffectWithCache")]), }, ], + ~message="Shouldn't increment on invalidation", ) }) }) diff --git a/scenarios/test_codegen/test/EventHandler_test.ts b/scenarios/test_codegen/test/EventHandler_test.ts index 0a27812a5..d535cb2aa 100644 --- a/scenarios/test_codegen/test/EventHandler_test.ts +++ b/scenarios/test_codegen/test/EventHandler_test.ts @@ -13,6 +13,12 @@ describe("Use Envio test framework to test event handlers", () => { const event = Gravatar.FactoryEvent.createMockEvent({ contract: dcAddress, testCase: "syncRegistration", + mockEventData: { + chainId: 1337, + block: { + number: 2, + }, + }, }); const updatedMockDb = await mockDbInitial.processEvents([event]); @@ -20,11 +26,11 @@ describe("Use Envio test framework to test event handlers", () => { const registeredDcs = updatedMockDb.dynamicContractRegistry.getAll(); assert.deepEqual(registeredDcs, [ { - id: `1-${dcAddress}`, + id: `1337-${dcAddress}`, contract_name: "SimpleNft", contract_address: dcAddress, - chain_id: 1, - registering_event_block_number: 0, + chain_id: 1337, + registering_event_block_number: 2, registering_event_log_index: 0, registering_event_name: "FactoryEvent", registering_event_src_address: `0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266`, @@ -43,6 +49,9 @@ describe("Use Envio test framework to test event handlers", () => { const event = Gravatar.FactoryEvent.createMockEvent({ contract: dcAddress, testCase: "asyncRegistration", + mockEventData: { + chainId: 1337, + }, }); const updatedMockDb = await mockDbInitial.processEvents([event]); @@ -50,10 +59,10 @@ describe("Use Envio test framework to test event handlers", () => { const registeredDcs = updatedMockDb.dynamicContractRegistry.getAll(); assert.deepEqual(registeredDcs, [ { - id: `1-${dcAddress}`, + id: `1337-${dcAddress}`, contract_name: "SimpleNft", contract_address: dcAddress, - chain_id: 1, + chain_id: 1337, registering_event_block_number: 0, registering_event_log_index: 0, registering_event_name: "FactoryEvent", @@ -304,6 +313,7 @@ describe("Use Envio test framework to test event handlers", () => { testCase: "checksumsAddress", mockEventData: { srcAddress: eventAddress, + chainId: 1337, }, }); @@ -312,10 +322,10 @@ describe("Use Envio test framework to test event handlers", () => { const registeredDcs = updatedMockDb.dynamicContractRegistry.getAll(); assert.deepEqual(registeredDcs, [ { - id: `1-${expectedChecksummedAddress}`, + id: `1337-${expectedChecksummedAddress}`, contract_name: "SimpleNft", contract_address: expectedChecksummedAddress, - chain_id: 1, + chain_id: 1337, registering_event_block_number: 0, registering_event_log_index: 0, registering_event_name: "FactoryEvent", diff --git a/scenarios/test_codegen/test/EventOrigin_test.res b/scenarios/test_codegen/test/EventOrigin_test.res index a462acb33..3dd4e57e8 100644 --- a/scenarios/test_codegen/test/EventOrigin_test.res +++ b/scenarios/test_codegen/test/EventOrigin_test.res @@ -2,52 +2,68 @@ open RescriptMocha describe("Chains State", () => { describe("chainInfo type", () => { - it("should have isReady field set to false", () => { - let chainInfo: Internal.chainInfo = {isReady: false} - Assert.equal(chainInfo.isReady, false) - }) - - it("should have isReady field set to true", () => { - let chainInfo: Internal.chainInfo = {isReady: true} - Assert.equal(chainInfo.isReady, true) - }) + it( + "should have isReady field set to false", + () => { + let chainInfo: Internal.chainInfo = {isReady: false} + Assert.equal(chainInfo.isReady, false) + }, + ) + + it( + "should have isReady field set to true", + () => { + let chainInfo: Internal.chainInfo = {isReady: true} + Assert.equal(chainInfo.isReady, true) + }, + ) }) describe("chains dict", () => { - it("should support multiple chains with different states", () => { - let chains: Internal.chains = Js.Dict.empty() - chains->Js.Dict.set("1", {Internal.isReady: false}) - chains->Js.Dict.set("2", {Internal.isReady: true}) - - Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(false)) - Assert.equal(chains->Js.Dict.get("2")->Belt.Option.map(c => c.isReady), Some(true)) - }) + it( + "should support multiple chains with different states", + () => { + let chains: Internal.chains = Js.Dict.empty() + chains->Js.Dict.set("1", {Internal.isReady: false}) + chains->Js.Dict.set("2", {Internal.isReady: true}) + + Assert.equal(chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(false)) + Assert.equal(chains->Js.Dict.get("2")->Belt.Option.map(c => c.isReady), Some(true)) + }, + ) }) describe("chains in context", () => { - Async.it("should be accessible in handler context", async () => { - // This test verifies that the chains field is accessible - // The actual integration test is in EventHandlers.res with the EmptyEvent handler - let inMemoryStore = InMemoryStore.make() - let loadManager = LoadManager.make() - - let item = MockEvents.newGravatarLog1->MockEvents.newGravatarEventToBatchItem - - let chains = Js.Dict.empty() - chains->Js.Dict.set("1", {Internal.isReady: false}) - - let handlerContext = UserContext.getHandlerContext({ - item, - loadManager, - persistence: Config.codegenPersistence, - inMemoryStore, - shouldSaveHistory: false, - isPreload: false, - chains, - }) - - // Verify we can access chains - Assert.equal(handlerContext.chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), Some(false)) - }) + Async.it( + "should be accessible in handler context", + async () => { + // This test verifies that the chains field is accessible + // The actual integration test is in EventHandlers.res with the EmptyEvent handler + let inMemoryStore = InMemoryStore.make() + let loadManager = LoadManager.make() + + let item = MockEvents.newGravatarLog1->MockEvents.newGravatarEventToBatchItem + + let chains = Js.Dict.empty() + chains->Js.Dict.set("1", {Internal.isReady: false}) + + let handlerContext = UserContext.getHandlerContext({ + item, + loadManager, + persistence: Config.codegenPersistence, + inMemoryStore, + shouldSaveHistory: false, + isPreload: false, + checkpointId: 0, + chains, + }) + + // Verify we can access chains + Assert.equal( + handlerContext.chains->Js.Dict.get("1")->Belt.Option.map(c => c.isReady), + Some(false), + ) + }, + ) }) }) diff --git a/scenarios/test_codegen/test/Integration_ts_helpers.res b/scenarios/test_codegen/test/Integration_ts_helpers.res index 6d41b246b..7263e6bf1 100644 --- a/scenarios/test_codegen/test/Integration_ts_helpers.res +++ b/scenarios/test_codegen/test/Integration_ts_helpers.res @@ -28,7 +28,7 @@ let getLocalChainConfig = (nftFactoryContractAddress): chainConfig => { }) let chain = MockConfig.chain1337 { - confirmedBlockThreshold: 200, + maxReorgDepth: 200, startBlock: 1, id: 1337, contracts, @@ -68,7 +68,7 @@ let makeChainManager = (cfg: chainConfig): chainManager => { ~config=Config.make( ~isUnorderedMultichainMode=true, ~chains=[cfg], - ~registrations={onBlockByChainId: Js.Dict.empty()}, + ~registrations={onBlockByChainId: Js.Dict.empty(), hasEvents: false}, ), ) } diff --git a/scenarios/test_codegen/test/Mock_test.res b/scenarios/test_codegen/test/Mock_test.res deleted file mode 100644 index 3cdf9b43c..000000000 --- a/scenarios/test_codegen/test/Mock_test.res +++ /dev/null @@ -1,100 +0,0 @@ -open RescriptMocha - -let inMemoryStore = InMemoryStore.make() - -describe("E2E Mock Event Batch", () => { - Async.before(async () => { - DbStub.setGravatarDb(~gravatar=MockEntities.gravatarEntity1) - DbStub.setGravatarDb(~gravatar=MockEntities.gravatarEntity2) - // EventProcessing.processEventBatch(MockEvents.eventBatch) - - let loadManager = LoadManager.make() - - // Create mock chains state (simulating historical indexing) - let chains = Js.Dict.empty() - chains->Js.Dict.set("1", {Internal.isReady: false}) - - try { - await MockEvents.eventBatchItems->EventProcessing.runBatchHandlersOrThrow( - ~inMemoryStore, - ~loadManager, - ~config=RegisterHandlers.getConfig(), - ~shouldSaveHistory=false, - ~shouldBenchmark=false, - ~chains, - ) - } catch { - | EventProcessing.ProcessingError({message, exn, item}) => - exn - ->ErrorHandling.make(~msg=message, ~logger=item->Logging.getItemLogger) - ->ErrorHandling.logAndRaise - } - }) -}) - -// NOTE: skipping this test for now since there seems to be some invalid DB state. Need to investigate again. -// TODO: add a similar kind of test back again. -// describe_skip("E2E Db check", () => { -// Async.before(async () => { -// await DbHelpers.runUpDownMigration() - -// let config = RegisterHandlers.registerAllHandlers() -// let loadLayer = LoadLayer.makeWithDbConnection() - -// let _ = await DbFunctionsEntities.batchSet(~entityMod=module(Entities.Gravatar))( -// Migrations.sql, -// [MockEntities.gravatarEntity1, MockEntities.gravatarEntity2], -// ) - -// let _ = await EventProcessing.processEventBatch( -// ~inMemoryStore, -// ~eventBatch=MockEvents.eventBatchItems, -// ~latestProcessedBlocks=EventProcessing.EventsProcessed.makeEmpty(~config), -// ~loadLayer, -// ~config, -// ~isInReorgThreshold=false, -// ) - -// //// TODO: write code (maybe via dependency injection) to allow us to use the stub rather than the actual database here. -// // DbStub.setGravatarDb(~gravatar=MockEntities.gravatarEntity1) -// // DbStub.setGravatarDb(~gravatar=MockEntities.gravatarEntity2) -// // await EventProcessing.processEventBatch(MockEvents.eventBatch, ~context=Context.getContext()) -// }) - -// it("Validate inmemory store state", () => { -// let gravatars = -// inMemoryStore.entities -// ->InMemoryStore.EntityTables.get(module(Entities.Gravatar)) -// ->InMemoryTable.Entity.values - -// Assert.deepEqual( -// gravatars, -// [ -// { -// id: "1001", -// owner_id: "0x1230000000000000000000000000000000000000", -// displayName: "update1", -// imageUrl: "https://gravatar1.com", -// updatesCount: BigInt.fromInt(2), -// size: MEDIUM, -// }, -// { -// id: "1002", -// owner_id: "0x4560000000000000000000000000000000000000", -// displayName: "update2", -// imageUrl: "https://gravatar2.com", -// updatesCount: BigInt.fromInt(2), -// size: MEDIUM, -// }, -// { -// id: "1003", -// owner_id: "0x7890000000000000000000000000000000000000", -// displayName: "update3", -// imageUrl: "https://gravatar3.com", -// updatesCount: BigInt.fromInt(2), -// size: MEDIUM, -// }, -// ], -// ) -// }) -// }) diff --git a/scenarios/test_codegen/test/ReorgDetection_test.res b/scenarios/test_codegen/test/ReorgDetection_test.res index 9dd2ff88c..4d1f34ca5 100644 --- a/scenarios/test_codegen/test/ReorgDetection_test.res +++ b/scenarios/test_codegen/test/ReorgDetection_test.res @@ -1,76 +1,77 @@ open RescriptMocha open Belt -open ReorgDetection describe("Validate reorg detection functions", () => { let scannedHashesFixture = [(1, "0x123"), (50, "0x456"), (300, "0x789"), (500, "0x5432")] - let shouldRollbackOnReorg = true let pipeNoReorg = ((updated, reorgResult)) => { switch reorgResult { - | ReorgDetected(_) => Js.Exn.raiseError("Unexpected reorg detected") + | ReorgDetection.ReorgDetected(_) => Js.Exn.raiseError("Unexpected reorg detected") | NoReorg => updated } } - let mock = (arr, ~confirmedBlockThreshold=200, ~detectedReorgBlock=?) => { - arr - ->Array.map(((blockNumber, blockHash)) => { - blockNumber, - blockHash, - }) - ->LastBlockScannedHashes.makeWithData(~confirmedBlockThreshold, ~detectedReorgBlock?) + let mock = (arr, ~maxReorgDepth=200, ~shouldRollbackOnReorg=true, ~detectedReorgBlock=?) => { + ReorgDetection.make( + ~chainReorgCheckpoints=arr->Array.map((( + blockNumber, + blockHash, + )): Internal.reorgCheckpoint => { + chainId: 0, // It's not used + checkpointId: 0, // It's not used + blockNumber, + blockHash, + }), + ~maxReorgDepth, + ~detectedReorgBlock?, + ~shouldRollbackOnReorg, + ) } it("getThresholdBlockNumbers works as expected", () => { Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=200, - )->ReorgDetection.LastBlockScannedHashes.getThresholdBlockNumbers(~currentBlockHeight=500), + mock(scannedHashesFixture, ~maxReorgDepth=200)->ReorgDetection.getThresholdBlockNumbers( + ~currentBlockHeight=500, + ), [300, 500], ~message="Both 300 and 500 should be included in the threshold", ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=200, - )->ReorgDetection.LastBlockScannedHashes.getThresholdBlockNumbers(~currentBlockHeight=501), + mock(scannedHashesFixture, ~maxReorgDepth=200)->ReorgDetection.getThresholdBlockNumbers( + ~currentBlockHeight=501, + ), [500], ~message="If chain progresses one more block, 300 is not included in the threshold anymore", ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=200, - )->ReorgDetection.LastBlockScannedHashes.getThresholdBlockNumbers(~currentBlockHeight=499), + mock(scannedHashesFixture, ~maxReorgDepth=200)->ReorgDetection.getThresholdBlockNumbers( + ~currentBlockHeight=499, + ), [300, 500], ~message="We don't prevent blocks higher than currentBlockHeight from being included in the threshold, since the case is not possible", ) Assert.deepEqual( mock( [(300, "0x789"), (50, "0x456"), (500, "0x5432"), (1, "0x123")], - ~confirmedBlockThreshold=200, - )->ReorgDetection.LastBlockScannedHashes.getThresholdBlockNumbers(~currentBlockHeight=500), + ~maxReorgDepth=200, + )->ReorgDetection.getThresholdBlockNumbers(~currentBlockHeight=500), [300, 500], ~message="The order of blocks doesn't matter when we create reorg detection object", ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=199, - )->ReorgDetection.LastBlockScannedHashes.getThresholdBlockNumbers(~currentBlockHeight=500), + mock(scannedHashesFixture, ~maxReorgDepth=199)->ReorgDetection.getThresholdBlockNumbers( + ~currentBlockHeight=500, + ), [500], - ~message="Possible to shrink confirmedBlockThreshold", + ~message="Possible to shrink maxReorgDepth", ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=450, - )->ReorgDetection.LastBlockScannedHashes.getThresholdBlockNumbers(~currentBlockHeight=500), + mock(scannedHashesFixture, ~maxReorgDepth=450)->ReorgDetection.getThresholdBlockNumbers( + ~currentBlockHeight=500, + ), [50, 300, 500], - ~message="Possible to increase confirmedBlockThreshold", + ~message="Possible to increase maxReorgDepth", ) }) @@ -78,8 +79,8 @@ describe("Validate reorg detection functions", () => { let currentBlockHeight = 500 let reorgDetection = - ReorgDetection.LastBlockScannedHashes.empty(~confirmedBlockThreshold=500) - ->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + mock([], ~maxReorgDepth=500) + ->ReorgDetection.registerReorgGuard( ~reorgGuard={ rangeLastBlock: { blockNumber: 1, @@ -88,10 +89,9 @@ describe("Validate reorg detection functions", () => { prevRangeLastBlock: None, }, ~currentBlockHeight, - ~shouldRollbackOnReorg, ) ->pipeNoReorg - ->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + ->ReorgDetection.registerReorgGuard( ~reorgGuard={ rangeLastBlock: { blockNumber: 50, @@ -103,10 +103,9 @@ describe("Validate reorg detection functions", () => { }), }, ~currentBlockHeight, - ~shouldRollbackOnReorg, ) ->pipeNoReorg - ->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + ->ReorgDetection.registerReorgGuard( ~reorgGuard={ rangeLastBlock: { blockNumber: 300, @@ -118,10 +117,9 @@ describe("Validate reorg detection functions", () => { }), }, ~currentBlockHeight, - ~shouldRollbackOnReorg, ) ->pipeNoReorg - ->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + ->ReorgDetection.registerReorgGuard( ~reorgGuard={ rangeLastBlock: { blockNumber: 500, @@ -133,13 +131,12 @@ describe("Validate reorg detection functions", () => { }), }, ~currentBlockHeight, - ~shouldRollbackOnReorg, ) ->pipeNoReorg Assert.deepEqual( reorgDetection, - mock(scannedHashesFixture, ~confirmedBlockThreshold=500), + mock(scannedHashesFixture, ~maxReorgDepth=500), ~message="Should have the same data as the mock", ) }) @@ -149,8 +146,8 @@ describe("Validate reorg detection functions", () => { () => { let currentBlockHeight = 500 let reorgDetection = - ReorgDetection.LastBlockScannedHashes.empty(~confirmedBlockThreshold=200) - ->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + mock([], ~maxReorgDepth=200) + ->ReorgDetection.registerReorgGuard( ~reorgGuard={ rangeLastBlock: { blockNumber: 50, @@ -162,13 +159,12 @@ describe("Validate reorg detection functions", () => { }), }, ~currentBlockHeight, - ~shouldRollbackOnReorg, ) ->pipeNoReorg Assert.deepEqual( reorgDetection, - mock([(1, "0x123"), (50, "0x456")], ~confirmedBlockThreshold=200), + mock([(1, "0x123"), (50, "0x456")], ~maxReorgDepth=200), ~message="Should add two records. One for rangeLastBlock and one for prevRangeLastBlock", ) }, @@ -176,8 +172,8 @@ describe("Validate reorg detection functions", () => { it("Should prune records outside of the reorg threshold on registering new data", () => { let reorgDetection = - mock([(1, "0x1"), (2, "0x2"), (3, "0x3")], ~confirmedBlockThreshold=2) - ->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + mock([(1, "0x1"), (2, "0x2"), (3, "0x3")], ~maxReorgDepth=2) + ->ReorgDetection.registerReorgGuard( ~reorgGuard={ rangeLastBlock: { blockNumber: 4, @@ -189,21 +185,20 @@ describe("Validate reorg detection functions", () => { }), }, ~currentBlockHeight=4, - ~shouldRollbackOnReorg, ) ->pipeNoReorg Assert.deepEqual( reorgDetection, - mock([(2, "0x2"), (3, "0x3"), (4, "0x4")], ~confirmedBlockThreshold=2), + mock([(2, "0x2"), (3, "0x3"), (4, "0x4")], ~maxReorgDepth=2), ~message="Should prune 1 since it's outside of reorg threshold", // Keeping block n 2 is questionable ) }) it("Shouldn't validate reorg detection if it's outside of the reorg threshold", () => { let reorgDetection = - mock(scannedHashesFixture, ~confirmedBlockThreshold=200) - ->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + mock(scannedHashesFixture, ~maxReorgDepth=200) + ->ReorgDetection.registerReorgGuard( ~reorgGuard={ rangeLastBlock: { blockNumber: 50, @@ -215,7 +210,6 @@ describe("Validate reorg detection functions", () => { }), }, ~currentBlockHeight=500, - ~shouldRollbackOnReorg, ) ->pipeNoReorg @@ -223,7 +217,7 @@ describe("Validate reorg detection functions", () => { reorgDetection, mock( [(20, "0x20-invalid"), (50, "0x50-invalid"), (300, "0x789"), (500, "0x5432")], - ~confirmedBlockThreshold=200, + ~maxReorgDepth=200, ), ~message="Prunes original blocks at 1 and 50. It writes invalid data for block 20 and 50, but they are outside of the reorg thershold, so we don't care", ) @@ -233,7 +227,7 @@ describe("Validate reorg detection functions", () => { "Correctly getLatestValidScannedBlock when returned invalid block from another instance", () => { let reorgGuard = { - rangeLastBlock: { + ReorgDetection.rangeLastBlock: { blockNumber: 10, blockHash: "0x10", }, @@ -242,11 +236,7 @@ describe("Validate reorg detection functions", () => { let hashes = mock([(9, "0x9"), (10, "0x10-invalid")]) let (updatedHashes, reorgResult) = - hashes->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( - ~reorgGuard, - ~currentBlockHeight=10, - ~shouldRollbackOnReorg, - ) + hashes->ReorgDetection.registerReorgGuard(~reorgGuard, ~currentBlockHeight=10) Assert.deepEqual( updatedHashes, @@ -270,12 +260,12 @@ describe("Validate reorg detection functions", () => { }), ) Assert.deepEqual( - updatedHashes->LastBlockScannedHashes.getThresholdBlockNumbers(~currentBlockHeight=10), + updatedHashes->ReorgDetection.getThresholdBlockNumbers(~currentBlockHeight=10), [9, 10], ~message="Returns block numbers in hashes together with the invalid one", ) Assert.deepEqual( - updatedHashes->LastBlockScannedHashes.getLatestValidScannedBlock( + updatedHashes->ReorgDetection.getLatestValidScannedBlock( ~blockNumbersAndHashes=[ { blockNumber: 9, @@ -296,7 +286,7 @@ describe("Validate reorg detection functions", () => { `, ) Assert.deepEqual( - updatedHashes->LastBlockScannedHashes.getLatestValidScannedBlock( + updatedHashes->ReorgDetection.getLatestValidScannedBlock( ~blockNumbersAndHashes=[ { blockNumber: 9, @@ -320,7 +310,7 @@ describe("Validate reorg detection functions", () => { ) Assert.deepEqual( - updatedHashes->LastBlockScannedHashes.rollbackToValidBlockNumber(~blockNumber=9), + updatedHashes->ReorgDetection.rollbackToValidBlockNumber(~blockNumber=9), mock([(9, "0x9")]), ~message=`Should clean up the invalid block during rollback`, ) @@ -329,24 +319,21 @@ describe("Validate reorg detection functions", () => { it("Should detect reorg when rangeLastBlock hash doesn't match the scanned block", () => { let reorgGuard = { - rangeLastBlock: { + ReorgDetection.rangeLastBlock: { blockNumber: 10, blockHash: "0x10", }, prevRangeLastBlock: None, } let scannedBlock = { - blockNumber: 10, + ReorgDetection.blockNumber: 10, blockHash: "0x10-invalid", } - let hashes = mock([(10, "0x10-invalid")]) - Assert.deepEqual( - hashes->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + mock([(10, "0x10-invalid")], ~shouldRollbackOnReorg=true)->ReorgDetection.registerReorgGuard( ~reorgGuard, ~currentBlockHeight=10, - ~shouldRollbackOnReorg, ), ( mock([(10, "0x10-invalid")], ~detectedReorgBlock=scannedBlock), @@ -358,13 +345,12 @@ describe("Validate reorg detection functions", () => { ) Assert.deepEqual( - hashes->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( + mock([(10, "0x10-invalid")], ~shouldRollbackOnReorg=false)->ReorgDetection.registerReorgGuard( ~reorgGuard, ~currentBlockHeight=10, - ~shouldRollbackOnReorg=false, ), ( - mock([]), + mock([], ~shouldRollbackOnReorg=false), ReorgDetected({ scannedBlock, receivedBlock: reorgGuard.rangeLastBlock, @@ -377,7 +363,7 @@ describe("Validate reorg detection functions", () => { it("Should detect reorg when prevRangeLastBlock hash doesn't match the scanned block", () => { let reorgGuard = { - rangeLastBlock: { + ReorgDetection.rangeLastBlock: { blockNumber: 11, blockHash: "0x11", }, @@ -387,21 +373,17 @@ describe("Validate reorg detection functions", () => { }), } - let hashes = mock([(10, "0x10-invalid")], ~confirmedBlockThreshold=2) + let hashes = mock([(10, "0x10-invalid")], ~maxReorgDepth=2) let reorgDetectionResult = - hashes->ReorgDetection.LastBlockScannedHashes.registerReorgGuard( - ~reorgGuard, - ~currentBlockHeight=11, - ~shouldRollbackOnReorg, - ) + hashes->ReorgDetection.registerReorgGuard(~reorgGuard, ~currentBlockHeight=11) Assert.deepEqual( reorgDetectionResult, ( mock( [(10, "0x10-invalid")], - ~confirmedBlockThreshold=2, + ~maxReorgDepth=2, ~detectedReorgBlock={ blockNumber: 10, blockHash: "0x10-invalid", @@ -422,16 +404,16 @@ describe("Validate reorg detection functions", () => { }) it("rollbackToValidBlockNumber works as expected", () => { - let reorgDetection = mock(scannedHashesFixture, ~confirmedBlockThreshold=200) + let reorgDetection = mock(scannedHashesFixture, ~maxReorgDepth=200) Assert.deepEqual( - reorgDetection->LastBlockScannedHashes.rollbackToValidBlockNumber(~blockNumber=500), + reorgDetection->ReorgDetection.rollbackToValidBlockNumber(~blockNumber=500), reorgDetection, ~message="Shouldn't prune anything when the latest block number is the valid one", ) Assert.deepEqual( - reorgDetection->LastBlockScannedHashes.rollbackToValidBlockNumber(~blockNumber=499), - mock([(1, "0x123"), (50, "0x456"), (300, "0x789")], ~confirmedBlockThreshold=200), + reorgDetection->ReorgDetection.rollbackToValidBlockNumber(~blockNumber=499), + mock([(1, "0x123"), (50, "0x456"), (300, "0x789")], ~maxReorgDepth=200), ~message="Shouldn't prune blocks outside of the threshold. Would be nice, but it doesn't matter", ) }) @@ -452,10 +434,7 @@ describe("Validate reorg detection functions", () => { ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=500, - )->LastBlockScannedHashes.getLatestValidScannedBlock( + mock(scannedHashesFixture, ~maxReorgDepth=500)->ReorgDetection.getLatestValidScannedBlock( ~blockNumbersAndHashes, ~currentBlockHeight=500, ), @@ -467,10 +446,7 @@ describe("Validate reorg detection functions", () => { ~message="Should return the latest non-different block if we assume that all blocks are in the threshold", ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=200, - )->LastBlockScannedHashes.getLatestValidScannedBlock( + mock(scannedHashesFixture, ~maxReorgDepth=200)->ReorgDetection.getLatestValidScannedBlock( ~blockNumbersAndHashes, ~currentBlockHeight=500, ), @@ -491,10 +467,7 @@ describe("Validate reorg detection functions", () => { }, ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=500, - )->LastBlockScannedHashes.getLatestValidScannedBlock( + mock(scannedHashesFixture, ~maxReorgDepth=500)->ReorgDetection.getLatestValidScannedBlock( ~blockNumbersAndHashes, ~currentBlockHeight=500, ), @@ -506,10 +479,7 @@ describe("Validate reorg detection functions", () => { ~message="Case when the different block is in between of valid ones", ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=200, - )->LastBlockScannedHashes.getLatestValidScannedBlock( + mock(scannedHashesFixture, ~maxReorgDepth=200)->ReorgDetection.getLatestValidScannedBlock( ~blockNumbersAndHashes, ~currentBlockHeight=500, ), @@ -517,10 +487,7 @@ describe("Validate reorg detection functions", () => { ~message="Returns Error(NotFound) if the different block is the last one in the threshold", ) Assert.deepEqual( - mock( - scannedHashesFixture, - ~confirmedBlockThreshold=200, - )->LastBlockScannedHashes.getLatestValidScannedBlock( + mock(scannedHashesFixture, ~maxReorgDepth=200)->ReorgDetection.getLatestValidScannedBlock( ~blockNumbersAndHashes, ~currentBlockHeight=501, ), diff --git a/scenarios/test_codegen/test/SerDe_Test.res b/scenarios/test_codegen/test/SerDe_Test.res deleted file mode 100644 index 003295154..000000000 --- a/scenarios/test_codegen/test/SerDe_Test.res +++ /dev/null @@ -1,242 +0,0 @@ -open RescriptMocha - -@send external padStart: (string, ~padCount: int, ~padChar: string) => string = "padStart" - -let mockDate = (~year=2024, ~month=1, ~day=1) => { - let padInt = i => i->Belt.Int.toString->padStart(~padCount=2, ~padChar="0") - Js.Date.fromString(`${year->padInt}-${month->padInt}-${day->padInt}T00:00:00Z`) -} - -describe("SerDe Test", () => { - Async.before(async () => { - await DbHelpers.runUpDownMigration() - }) - - Async.it("All type entity", async () => { - let storage = Config.codegenPersistence->Persistence.getInitializedStorageOrThrow - - let entity: Entities.EntityWithAllTypes.t = { - id: "1", - string: "string", - optString: Some("optString"), - arrayOfStrings: ["arrayOfStrings1", "arrayOfStrings2"], - int_: 1, - optInt: Some(2), - arrayOfInts: [3, 4], - float_: 1.1, - optFloat: Some(2.2), - arrayOfFloats: [3.3, 4.4], - bool: true, - optBool: Some(false), - //TODO: get array of bools working - // arrayOfBool: [true, false], - bigInt: BigInt.fromInt(1), - optBigInt: Some(BigInt.fromInt(2)), - arrayOfBigInts: [BigInt.fromInt(3), BigInt.fromInt(4)], - bigDecimal: BigDecimal.fromStringUnsafe("1.1"), - bigDecimalWithConfig: BigDecimal.fromStringUnsafe("1.1"), - optBigDecimal: Some(BigDecimal.fromStringUnsafe("2.2")), - arrayOfBigDecimals: [BigDecimal.fromStringUnsafe("3.3"), BigDecimal.fromStringUnsafe("4.4")], - //TODO: get timestamp working - // timestamp: mockDate(~day=1), - // optTimestamp: Some(mockDate(~day=2)), - // arrayOfTimestamps: [Js.Date.fromFloat(3.3), Js.Date.fromFloat(4.4)], - // arrayOfTimestamps: [], - json: %raw(`{"foo": ["bar"]}`), - enumField: ADMIN, - optEnumField: Some(ADMIN), - } - - let entityHistoryItem: EntityHistory.historyRow<_> = { - current: { - chain_id: 1, - block_timestamp: 1, - block_number: 1, - log_index: 1, - }, - previous: None, - entityData: Set(entity), - } - - //Fails if serialziation does not work - let set = (sql, items) => - sql->PgStorage.setOrThrow( - ~items, - ~table=Entities.EntityWithAllTypes.table, - ~itemSchema=Entities.EntityWithAllTypes.schema, - ~pgSchema=Config.storagePgSchema, - ) - - //Fails if parsing does not work - let read = ids => - storage.loadByIdsOrThrow( - ~ids, - ~table=Entities.EntityWithAllTypes.table, - ~rowsSchema=Entities.EntityWithAllTypes.rowsSchema, - ) - - let setHistory = (sql, row) => - Promise.all( - sql->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=Entities.EntityWithAllTypes.entityHistory, - ~rows=[row], - ), - )->Promise.ignoreValue - - try await Db.sql->setHistory(entityHistoryItem) catch { - | exn => - Js.log2("setHistory exn", exn) - Assert.fail("Failed to set entity history in table") - } - - //set the entity - try await Db.sql->set([entity->Entities.EntityWithAllTypes.castToInternal]) catch { - | exn => - Js.log(exn) - Assert.fail("Failed to set entity in table") - } - - switch await read([entity.id]) { - | exception exn => - Js.log(exn) - Assert.fail("Failed to read entity from table") - | [_entity] => Assert.deepEqual(_entity, entity) - | _ => Assert.fail("Should have returned a row on batch read fn") - } - - //The copy function will do it's custom postgres serialization of the entity - // await Db.sql->DbFunctions.EntityHistory.copyAllEntitiesToEntityHistory - - let res = await Db.sql->Postgres.unsafe(`SELECT * FROM public."EntityWithAllTypes_history";`) - - switch res { - | [row] => - let parsed = row->S.parseJsonOrThrow(Entities.EntityWithAllTypes.entityHistory.schema) - Assert.deepEqual( - parsed.entityData, - Set(entity), - ~message="Postgres json serialization should be compatable with our schema", - ) - | _ => Assert.fail("Should have returned a row") - } - }) - - it("contains correct query for unnest entity", () => { - let createQuery = - Entities.EntityWithAllNonArrayTypes.table->PgStorage.makeCreateTableQuery(~pgSchema="public") - Assert.equal( - createQuery, - `CREATE TABLE IF NOT EXISTS "public"."EntityWithAllNonArrayTypes"("bigDecimal" NUMERIC NOT NULL, "bigDecimalWithConfig" NUMERIC(10, 8) NOT NULL, "bigInt" NUMERIC NOT NULL, "bool" BOOLEAN NOT NULL, "enumField" "public".AccountType NOT NULL, "float_" DOUBLE PRECISION NOT NULL, "id" TEXT NOT NULL, "int_" INTEGER NOT NULL, "optBigDecimal" NUMERIC, "optBigInt" NUMERIC, "optBool" BOOLEAN, "optEnumField" "public".AccountType, "optFloat" DOUBLE PRECISION, "optInt" INTEGER, "optString" TEXT, "string" TEXT NOT NULL, PRIMARY KEY("id"));`, - ) - let query = PgStorage.makeInsertUnnestSetQuery( - ~table=Entities.EntityWithAllNonArrayTypes.table, - ~itemSchema=Entities.EntityWithAllNonArrayTypes.schema, - ~isRawEvents=false, - ~pgSchema="public", - ) - - Assert.equal( - query, - `INSERT INTO "public"."EntityWithAllNonArrayTypes" ("bigDecimal", "bigDecimalWithConfig", "bigInt", "bool", "enumField", "float_", "id", "int_", "optBigDecimal", "optBigInt", "optBool", "optEnumField", "optFloat", "optInt", "optString", "string") -SELECT * FROM unnest($1::NUMERIC[],$2::NUMERIC(10, 8)[],$3::NUMERIC[],$4::INTEGER[]::BOOLEAN[],$5::TEXT[]::"public".AccountType[],$6::DOUBLE PRECISION[],$7::TEXT[],$8::INTEGER[],$9::NUMERIC[],$10::NUMERIC[],$11::INTEGER[]::BOOLEAN[],$12::TEXT[]::"public".AccountType[],$13::DOUBLE PRECISION[],$14::INTEGER[],$15::TEXT[],$16::TEXT[])ON CONFLICT("id") DO UPDATE SET "bigDecimal" = EXCLUDED."bigDecimal","bigDecimalWithConfig" = EXCLUDED."bigDecimalWithConfig","bigInt" = EXCLUDED."bigInt","bool" = EXCLUDED."bool","enumField" = EXCLUDED."enumField","float_" = EXCLUDED."float_","int_" = EXCLUDED."int_","optBigDecimal" = EXCLUDED."optBigDecimal","optBigInt" = EXCLUDED."optBigInt","optBool" = EXCLUDED."optBool","optEnumField" = EXCLUDED."optEnumField","optFloat" = EXCLUDED."optFloat","optInt" = EXCLUDED."optInt","optString" = EXCLUDED."optString","string" = EXCLUDED."string";`, - ) - }) - - Async.it("All type entity without array types for unnest case", async () => { - let storage = Config.codegenPersistence->Persistence.getInitializedStorageOrThrow - - let entity: Entities.EntityWithAllNonArrayTypes.t = { - id: "1", - string: "string", - optString: Some("optString"), - int_: 1, - optInt: Some(2), - float_: 1.1, - optFloat: Some(2.2), - bool: true, - optBool: Some(false), - bigInt: BigInt.fromInt(1), - optBigInt: Some(BigInt.fromInt(2)), - bigDecimal: BigDecimal.fromStringUnsafe("1.1"), - optBigDecimal: Some(BigDecimal.fromStringUnsafe("2.2")), - bigDecimalWithConfig: BigDecimal.fromStringUnsafe("1.1"), - enumField: ADMIN, - optEnumField: Some(ADMIN), - } - - let entityHistoryItem: EntityHistory.historyRow<_> = { - current: { - chain_id: 1, - block_timestamp: 1, - block_number: 1, - log_index: 1, - }, - previous: None, - entityData: Set(entity), - } - - //Fails if serialziation does not work - let set = (sql, items) => { - sql->PgStorage.setOrThrow( - ~items, - ~table=Entities.EntityWithAllNonArrayTypes.table, - ~itemSchema=Entities.EntityWithAllNonArrayTypes.schema, - ~pgSchema="public", - ) - } - - //Fails if parsing does not work - let read = ids => - storage.loadByIdsOrThrow( - ~ids, - ~table=Entities.EntityWithAllNonArrayTypes.table, - ~rowsSchema=Entities.EntityWithAllNonArrayTypes.rowsSchema, - ) - - let setHistory = (sql, row) => - Promise.all( - sql->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=Entities.EntityWithAllNonArrayTypes.entityHistory, - ~rows=[row], - ), - )->Promise.ignoreValue - - try await Db.sql->setHistory(entityHistoryItem) catch { - | exn => - Js.log2("setHistory exn", exn) - Assert.fail("Failed to set entity history in table") - } - - //set the entity - try await Db.sql->set([entity->Entities.EntityWithAllNonArrayTypes.castToInternal]) catch { - | exn => - Js.log(exn) - Assert.fail("Failed to set entity in table") - } - - switch await read([entity.id]) { - | exception exn => - Js.log(exn) - Assert.fail("Failed to read entity from table") - | [_entity] => Assert.deepEqual(_entity, entity) - | _ => Assert.fail("Should have returned a row on batch read fn") - } - - //The copy function will do it's custom postgres serialization of the entity - // await Db.sql->DbFunctions.EntityHistory.copyAllEntitiesToEntityHistory - - let res = - await Db.sql->Postgres.unsafe(`SELECT * FROM public."EntityWithAllNonArrayTypes_history";`) - - switch res { - | [row] => - let parsed = row->S.parseJsonOrThrow(Entities.EntityWithAllNonArrayTypes.entityHistory.schema) - Assert.deepEqual( - parsed.entityData, - Set(entity), - ~message="Postgres json serialization should be compatable with our schema", - ) - | _ => Assert.fail("Should have returned a row") - } - }) -}) diff --git a/scenarios/test_codegen/test/WriteRead_test.res b/scenarios/test_codegen/test/WriteRead_test.res new file mode 100644 index 000000000..54365361e --- /dev/null +++ b/scenarios/test_codegen/test/WriteRead_test.res @@ -0,0 +1,199 @@ +open RescriptMocha + +@send external padStart: (string, ~padCount: int, ~padChar: string) => string = "padStart" + +let mockDate = (~year=2024, ~month=1, ~day=1) => { + let padInt = i => i->Belt.Int.toString->padStart(~padCount=2, ~padChar="0") + Js.Date.fromString(`${year->padInt}-${month->padInt}-${day->padInt}T00:00:00Z`) +} + +describe("Write/read tests", () => { + Async.it("Test writing and reading entities with special cases", async () => { + let sourceMock = Mock.Source.make(~chain=#1337, [#getHeightOrThrow, #getItemsOrThrow]) + let indexerMock = await Mock.Indexer.make( + ~chains=[{chain: #1337, sources: [sourceMock.source]}], + ~saveFullHistory=true, + ~enableHasura=true, + ) + await Utils.delay(0) + + Assert.deepEqual( + sourceMock.getHeightOrThrowCalls->Array.length, + 1, + ~message="should have called getHeightOrThrow to get initial height", + ) + sourceMock.resolveGetHeightOrThrow(300) + await Utils.delay(0) + await Utils.delay(0) + + let entityWithAllTypes: Entities.EntityWithAllTypes.t = { + id: "1", + string: "string", + optString: Some("optString"), + arrayOfStrings: ["arrayOfStrings1", "arrayOfStrings2"], + int_: 1, + optInt: Some(2), + arrayOfInts: [3, 4], + float_: 1.1, + optFloat: Some(2.2), + arrayOfFloats: [3.3, 4.4], + bool: true, + optBool: Some(false), + //TODO: get array of bools working + // arrayOfBool: [true, false], + bigInt: BigInt.fromInt(1), + optBigInt: Some(BigInt.fromInt(2)), + arrayOfBigInts: [BigInt.fromInt(3), BigInt.fromInt(4)], + bigDecimal: BigDecimal.fromStringUnsafe("1.1"), + bigDecimalWithConfig: BigDecimal.fromStringUnsafe("1.1"), + optBigDecimal: Some(BigDecimal.fromStringUnsafe("2.2")), + arrayOfBigDecimals: [BigDecimal.fromStringUnsafe("3.3"), BigDecimal.fromStringUnsafe("4.4")], + //TODO: get timestamp working + // timestamp: mockDate(~day=1), + // optTimestamp: Some(mockDate(~day=2)), + // arrayOfTimestamps: [Js.Date.fromFloat(3.3), Js.Date.fromFloat(4.4)], + // arrayOfTimestamps: [], + json: %raw(`{"foo": ["bar"]}`), + enumField: ADMIN, + optEnumField: Some(ADMIN), + } + let entityWithAllNonArrayTypes: Entities.EntityWithAllNonArrayTypes.t = { + id: "1", + string: "string", + optString: Some("optString"), + int_: 1, + optInt: Some(2), + float_: 1.1, + optFloat: Some(2.2), + bool: true, + optBool: Some(false), + bigInt: BigInt.fromInt(1), + optBigInt: Some(BigInt.fromInt(2)), + bigDecimal: BigDecimal.fromStringUnsafe("1.1"), + optBigDecimal: Some(BigDecimal.fromStringUnsafe("2.2")), + bigDecimalWithConfig: BigDecimal.fromStringUnsafe("1.1"), + enumField: ADMIN, + optEnumField: Some(ADMIN), + } + + sourceMock.resolveGetItemsOrThrow([ + { + blockNumber: 50, + logIndex: 1, + handler: async ({context}) => { + context.entityWithAllTypes.set(entityWithAllTypes) + context.entityWithAllNonArrayTypes.set(entityWithAllNonArrayTypes) + + // Test that for entities of max length, we can correctly save history (envio_history_) is truncated correctly. + context.entityWith63LenghtName______________________________________one.set({ + id: "1", + }) + context.entityWith63LenghtName______________________________________two.set({ + id: "2", + }) + }, + }, + ]) + await indexerMock.getBatchWritePromise() + + Assert.deepEqual( + await indexerMock.query(module(Entities.EntityWithAllTypes)), + [entityWithAllTypes], + ) + Assert.deepEqual( + await indexerMock.queryHistory(module(Entities.EntityWithAllTypes)), + [ + { + checkpointId: 1, + entityId: "1", + entityUpdateAction: Set(entityWithAllTypes), + }, + ], + ) + Assert.deepEqual( + await indexerMock.query(module(Entities.EntityWithAllNonArrayTypes)), + [entityWithAllNonArrayTypes], + ) + Assert.deepEqual( + await indexerMock.queryHistory(module(Entities.EntityWithAllNonArrayTypes)), + [ + { + checkpointId: 1, + entityId: "1", + entityUpdateAction: Set(entityWithAllNonArrayTypes), + }, + ], + ) + + Assert.deepEqual( + await indexerMock.query( + module(Entities.EntityWith63LenghtName______________________________________one), + ), + [ + { + id: "1", + }, + ], + ) + Assert.deepEqual( + await indexerMock.queryHistory( + module(Entities.EntityWith63LenghtName______________________________________one), + ), + [ + { + checkpointId: 1, + entityId: "1", + entityUpdateAction: Set({ + id: "1", + }), + }, + ], + ) + Assert.deepEqual( + await indexerMock.query( + module(Entities.EntityWith63LenghtName______________________________________two), + ), + [ + { + id: "2", + }, + ], + ) + Assert.deepEqual( + await indexerMock.queryHistory( + module(Entities.EntityWith63LenghtName______________________________________two), + ), + [ + { + checkpointId: 1, + entityId: "2", + entityUpdateAction: Set({ + id: "2", + }), + }, + ], + ) + + Assert.deepEqual( + await indexerMock.graphql(`query { + EntityWithAllTypes { + arrayOfBigInts + arrayOfBigDecimals + } +}`), + { + data: { + "EntityWithAllTypes": [ + { + "arrayOfBigInts": ["3", "4"], + "arrayOfBigDecimals": ["3.3", "4.4"], + }, + ], + }, + }, + ~message=`We internally turn NUMERIC[] to TEXT[] when Hasura is enabled, +to workaround a bug, when the values returned as number[] instead of string[], +breaking precicion on big values. https://github.com/enviodev/hyperindex/issues/788`, + ) + }) +}) diff --git a/scenarios/test_codegen/test/__mocks__/DbStub.res b/scenarios/test_codegen/test/__mocks__/DbStub.res deleted file mode 100644 index 7d67621c5..000000000 --- a/scenarios/test_codegen/test/__mocks__/DbStub.res +++ /dev/null @@ -1,35 +0,0 @@ -let deleteDictKey = (_dict: dict<'a>, _key: string) => %raw(`delete _dict[_key]`) - -let databaseDict: dict = Js.Dict.empty() - -let getGravatarDb = (~id: string) => { - Js.Dict.get(databaseDict, id) -} - -let setGravatarDb = (~gravatar: Entities.Gravatar.t) => { - Js.Dict.set(databaseDict, gravatar.id, gravatar) -} - -let batchSetGravatar = (batch: array) => { - batch - ->Belt.Array.forEach(entity => { - setGravatarDb(~gravatar=entity) - }) - ->Promise.resolve -} - -let batchDeleteGravatar = (batch: array) => { - batch - ->Belt.Array.forEach(entity => { - deleteDictKey(databaseDict, entity.id) - }) - ->Promise.resolve -} - -let readGravatarEntities = (entityReads: array): promise> => { - entityReads - ->Belt.Array.keepMap(id => { - getGravatarDb(~id)->Belt.Option.map(gravatar => gravatar) - }) - ->Promise.resolve -} diff --git a/scenarios/test_codegen/test/__mocks__/MockConfig.res b/scenarios/test_codegen/test/__mocks__/MockConfig.res index b1316c884..9f583ed8b 100644 --- a/scenarios/test_codegen/test/__mocks__/MockConfig.res +++ b/scenarios/test_codegen/test/__mocks__/MockConfig.res @@ -40,7 +40,7 @@ let evmContracts = contracts->Js.Array2.map((contract): Internal.evmContractConf let mockChainConfig: InternalConfig.chain = { id: 1337, - confirmedBlockThreshold: 200, + maxReorgDepth: 200, startBlock: 1, contracts, sources: [ diff --git a/scenarios/test_codegen/test/helpers/Mock.res b/scenarios/test_codegen/test/helpers/Mock.res index 9aa7c7fc4..5094f6cac 100644 --- a/scenarios/test_codegen/test/helpers/Mock.res +++ b/scenarios/test_codegen/test/helpers/Mock.res @@ -8,15 +8,11 @@ module InMemoryStore = { ) let entity = entity->(Utils.magic: 'a => Entities.internalEntity) inMemTable->InMemoryTable.Entity.set( - Set(entity)->Types.mkEntityUpdate( - ~eventIdentifier={ - chainId: 0, - blockTimestamp: 0, - blockNumber: 0, - logIndex: 0, - }, - ~entityId=entity->Entities.getEntityId, - ), + { + entityId: entity->Entities.getEntityId, + checkpointId: 0, + entityUpdateAction: Set(entity), + }, ~shouldSaveHistory=RegisterHandlers.getConfig()->Config.shouldSaveHistory( ~isInReorgThreshold=false, ), @@ -200,6 +196,8 @@ module Storage = { cleanRun: false, cache: Js.Dict.empty(), chains: [], + reorgCheckpoints: [], + checkpointId: 0, }), } } @@ -210,19 +208,32 @@ module Indexer = { value: string, labels: dict, } - type t = { + type graphqlResponse<'a> = {data?: {..} as 'a} + type rec t = { getBatchWritePromise: unit => promise, getRollbackReadyPromise: unit => promise, query: 'entity. module(Entities.Entity with type t = 'entity) => promise>, queryHistory: 'entity. module(Entities.Entity with type t = 'entity) => promise< - array>, + array>, >, + queryCheckpoints: unit => promise>, + queryEffectCache: string => promise>, metric: string => promise>, + restart: unit => promise, + graphql: 'data. string => promise>, } type chainConfig = {chain: Types.chain, sources: array, startBlock?: int} - let make = async (~chains: array, ~multichain=InternalConfig.Unordered) => { + let rec make = async ( + ~chains: array, + ~multichain=InternalConfig.Unordered, + ~saveFullHistory=false, + // Reinit storage without Hasura + // makes tests ~1.9 seconds faster + ~enableHasura=false, + ~reset=true, + ) => { DbHelpers.resetPostgresClient() // TODO: Should stop using global client PromClient.defaultRegister->PromClient.resetMetrics @@ -247,21 +258,17 @@ module Indexer = { }) ->ChainMap.fromArrayUnsafe + let graphqlClient = Rest.client(`${Env.Hasura.url}/v1/graphql`) + let graphqlRoute = Rest.route(() => { + method: Post, + path: "", + input: s => s.field("query", S.string), + responses: [s => s.data(S.unknown)], + }) + let sql = Db.sql let pgSchema = Env.Db.publicSchema - // Reinit storage without Hasura - // This made the test 1.9 seconds faster - // TODO: Improve indexer initialization time - // by parallizing hasura (at least for dev) - let storage = PgStorage.make( - ~sql, - ~pgSchema, - ~pgHost=Env.Db.host, - ~pgUser=Env.Db.user, - ~pgPort=Env.Db.port, - ~pgDatabase=Env.Db.database, - ~pgPassword=Env.Db.password, - ) + let storage = Config.makeStorage(~sql, ~pgSchema, ~isHasuraEnabled=enableHasura) let persistence = { ...config.persistence, storageStatus: Persistence.Unknown, @@ -269,6 +276,10 @@ module Indexer = { } let config: Config.t = { ...config, + historyConfig: { + rollbackFlag: RollbackOnReorg, + historyFlag: saveFullHistory ? FullHistory : MinHistory, + }, persistence, enableRawEvents: false, chainMap, @@ -279,7 +290,7 @@ module Indexer = { await config.persistence->Persistence.init( ~chainConfigs=config.chainMap->ChainMap.values, - ~reset=true, + ~reset, ) let chainManager = await ChainManager.makeFromDbState( @@ -302,7 +313,7 @@ module Indexer = { Promise.makeAsync(async (resolve, _reject) => { let before = (gsManager->GlobalStateManager.getState).processedBatches while before >= (gsManager->GlobalStateManager.getState).processedBatches { - await Utils.delay(50) + await Utils.delay(1) } resolve() }) @@ -311,13 +322,14 @@ module Indexer = { Promise.makeAsync(async (resolve, _reject) => { while ( switch (gsManager->GlobalStateManager.getState).rollbackState { - | RollbackInMemStore(_) => false - | RollingBack(_) - | NoRollback => true + | RollbackReady(_) => false + | _ => true } ) { - await Utils.delay(50) + await Utils.delay(1) } + // Skip an extra microtask for indexer to fire actions + await Utils.delay(0) resolve() }) }, @@ -342,14 +354,45 @@ module Indexer = { ), ) ->Promise.thenResolve(items => { - items->S.parseOrThrow(S.array(entityConfig.entityHistory.schema)) + items->S.parseOrThrow( + S.array( + S.union([ + entityConfig.entityHistory.setUpdateSchema, + S.object((s): EntityHistory.entityUpdate<'entity> => { + s.tag(EntityHistory.changeFieldName, EntityHistory.RowAction.DELETE) + { + entityId: s.field("id", S.string), + checkpointId: s.field(EntityHistory.checkpointIdFieldName, S.int), + entityUpdateAction: Delete, + } + }), + ]), + ), + ) }) ->( - Utils.magic: promise>> => promise< - array>, + Utils.magic: promise>> => promise< + array>, > ) }, + queryCheckpoints: () => { + Db.sql + ->Postgres.unsafe( + PgStorage.makeLoadAllQuery( + ~pgSchema, + ~tableName=InternalTable.Checkpoints.table.tableName, + ), + ) + ->(Utils.magic: promise => promise>) + }, + queryEffectCache: (effectName: string) => { + Db.sql + ->Postgres.unsafe( + PgStorage.makeLoadAllQuery(~pgSchema, ~tableName=Internal.cacheTablePrefix ++ effectName), + ) + ->(Utils.magic: promise => promise>) + }, metric: async name => { switch PromClient.defaultRegister->PromClient.getSingleMetric(name) { | Some(m) => @@ -360,6 +403,25 @@ module Indexer = { | None => [] } }, + restart: () => { + let state = gsManager->GlobalStateManager.getState + gsManager->GlobalStateManager.setState({ + ...gsManager->GlobalStateManager.getState, + id: state.id + 1, + }) + make(~chains, ~enableHasura, ~multichain, ~saveFullHistory, ~reset=false) + }, + graphql: query => { + if !enableHasura { + Js.Exn.raiseError( + "It's require to set ~enableHasura=true during indexer mock creation to access this feature.", + ) + } + + graphqlRoute + ->Rest.fetch(query, ~client=graphqlClient) + ->(Utils.magic: promise => promise>) + }, } } } @@ -389,6 +451,8 @@ module Source = { resolveGetItemsOrThrow: ( array, ~latestFetchedBlockNumber: int=?, + ~latestFetchedBlockHash: string=?, + ~currentBlockHeight: int=?, ~prevRangeLastBlock: ReorgDetection.blockData=?, ) => unit, rejectGetItemsOrThrow: 'exn. 'exn => unit, @@ -426,12 +490,20 @@ module Source = { getHeightOrThrowRejectFns->Array.forEach(reject => reject(exn->Obj.magic)) }, getItemsOrThrowCalls, - resolveGetItemsOrThrow: (items, ~latestFetchedBlockNumber=?, ~prevRangeLastBlock=?) => { + resolveGetItemsOrThrow: ( + items, + ~latestFetchedBlockNumber=?, + ~latestFetchedBlockHash=?, + ~currentBlockHeight=?, + ~prevRangeLastBlock=?, + ) => { getItemsOrThrowResolveFns->Array.forEach(resolve => resolve({ "items": items, "latestFetchedBlockNumber": latestFetchedBlockNumber, + "latestFetchedBlockHash": latestFetchedBlockHash, "prevRangeLastBlock": prevRangeLastBlock, + "currentBlockHeight": currentBlockHeight, }) ) }, @@ -490,11 +562,16 @@ module Source = { ) resolve({ - Source.currentBlockHeight, + Source.currentBlockHeight: data["currentBlockHeight"]->Option.getWithDefault( + currentBlockHeight, + ), reorgGuard: { rangeLastBlock: { blockNumber: latestFetchedBlockNumber, - blockHash: `0x${latestFetchedBlockNumber->Int.toString}`, + blockHash: switch data["latestFetchedBlockHash"] { + | Some(latestFetchedBlockHash) => latestFetchedBlockHash + | None => `0x${latestFetchedBlockNumber->Int.toString}` + }, }, prevRangeLastBlock: switch data["prevRangeLastBlock"] { | Some(prevRangeLastBlock) => Some(prevRangeLastBlock) @@ -590,7 +667,7 @@ module Source = { } module Helper = { - let initialEnterReorgThreshold = async (~sourceMock: Source.t) => { + let initialEnterReorgThreshold = async (~indexerMock: Indexer.t, ~sourceMock: Source.t) => { open RescriptMocha Assert.deepEqual( @@ -610,9 +687,7 @@ module Helper = { ~message="Should request items until reorg threshold", ) sourceMock.resolveGetItemsOrThrow([]) - await Utils.delay(0) - await Utils.delay(0) - await Utils.delay(0) + await indexerMock.getBatchWritePromise() } } diff --git a/scenarios/test_codegen/test/lib_tests/EntityHistory_test.res b/scenarios/test_codegen/test/lib_tests/EntityHistory_test.res index 7384ac50b..4c7ebe68d 100644 --- a/scenarios/test_codegen/test/lib_tests/EntityHistory_test.res +++ b/scenarios/test_codegen/test/lib_tests/EntityHistory_test.res @@ -1,1213 +1,1053 @@ -open RescriptMocha - -//unsafe polymorphic toString binding for any type -@send external toStringUnsafe: 'a => string = "toString" - -let stripUndefinedFieldsInPlace = (val: 'a): 'a => { - let json = val->(Utils.magic: 'a => Js.Json.t) - //Hot fix for rescript equality check that removes optional fields - let rec strip = (json: Js.Json.t) => { - switch json { - | Object(obj) => - obj - ->Js.Dict.keys - ->Belt.Array.forEach(key => { - let value = obj->Utils.Dict.dangerouslyGetNonOption(key) - if value === %raw(`undefined`) { - obj->Utils.Dict.deleteInPlace(key) - } else { - strip(value->Belt.Option.getExn) - } - }) - | Array(arr) => arr->Belt.Array.forEach(value => strip(value)) - | _ => () - } - } - - json->strip - json->(Utils.magic: Js.Json.t => 'a) -} - -module TestEntity = { - type t = { - id: string, - fieldA: int, - fieldB: option, - } - - let name = "TestEntity" - let schema = S.schema(s => { - id: s.matches(S.string), - fieldA: s.matches(S.int), - fieldB: s.matches(S.null(S.string)), - }) - - let rowsSchema = S.array(schema) - let table = Table.mkTable( - "TestEntity", - ~fields=[ - Table.mkField("id", Text, ~fieldSchema=S.string, ~isPrimaryKey=true), - Table.mkField("fieldA", Integer, ~fieldSchema=S.int), - Table.mkField("fieldB", Text, ~fieldSchema=S.null(S.string), ~isNullable=true), - ], - ) - - let entityHistory = table->EntityHistory.fromTable(~schema) - - external castToInternal: t => Internal.entity = "%identity" -} - -type testEntityHistory = EntityHistory.historyRow -let testEntityHistorySchema = EntityHistory.makeHistoryRowSchema(TestEntity.schema) - -let batchSetMockEntity = (sql, items) => - PgStorage.setOrThrow( - sql, - ~items, - ~pgSchema="public", - ~table=TestEntity.table, - ~itemSchema=TestEntity.schema, - ) - -let getAllMockEntity = sql => - sql - ->Postgres.unsafe(`SELECT * FROM "public"."${TestEntity.table.tableName}"`) - ->Promise.thenResolve(json => json->S.parseJsonOrThrow(TestEntity.rowsSchema)) - -let getAllMockEntityHistory = sql => - sql->Postgres.unsafe(`SELECT * FROM "public"."${TestEntity.entityHistory.table.tableName}"`) - -describe("Entity history serde", () => { - it("serializes and deserializes correctly", () => { - let history: testEntityHistory = { - current: { - chain_id: 1, - block_number: 2, - block_timestamp: 3, - log_index: 4, - }, - previous: None, - entityData: Set({id: "1", fieldA: 1, fieldB: Some("test")}), - } - - let serializedHistory = history->S.reverseConvertToJsonOrThrow(testEntityHistorySchema) - let expected = %raw(`{ - "entity_history_block_timestamp": 3, - "entity_history_chain_id": 1, - "entity_history_block_number": 2, - "entity_history_log_index": 4, - "previous_entity_history_block_timestamp": null, - "previous_entity_history_chain_id": null, - "previous_entity_history_block_number": null, - "previous_entity_history_log_index": null, - "id": "1", - "fieldA": 1, - "fieldB": "test", - "action": "SET" - }`) - - Assert.deepEqual(serializedHistory, expected) - let deserializedHistory = serializedHistory->S.parseJsonOrThrow(testEntityHistorySchema) - Assert.deepEqual(deserializedHistory->stripUndefinedFieldsInPlace, history) - }) - - it("serializes and deserializes correctly with previous history", () => { - let history: testEntityHistory = { - current: { - chain_id: 1, - block_number: 2, - block_timestamp: 3, - log_index: 4, - }, - previous: Some({ - chain_id: 5, - block_number: 6, - block_timestamp: 7, - log_index: 8, - }), //previous - entityData: Set({id: "1", fieldA: 1, fieldB: Some("test")}), - } - let serializedHistory = history->S.reverseConvertToJsonOrThrow(testEntityHistorySchema) - let expected = %raw(`{ - "entity_history_block_timestamp": 3, - "entity_history_chain_id": 1, - "entity_history_block_number": 2, - "entity_history_log_index": 4, - "previous_entity_history_block_timestamp": 7, - "previous_entity_history_chain_id": 5, - "previous_entity_history_block_number": 6, - "previous_entity_history_log_index": 8, - "id": "1", - "fieldA": 1, - "fieldB": "test", - "action": "SET" - }`) - - Assert.deepEqual(serializedHistory, expected) - let deserializedHistory = serializedHistory->S.parseJsonOrThrow(testEntityHistorySchema) - Assert.deepEqual(deserializedHistory, history) - }) - - it("serializes and deserializes correctly with deleted entity", () => { - let history: testEntityHistory = { - current: { - chain_id: 1, - block_number: 2, - block_timestamp: 3, - log_index: 4, - }, - previous: None, - entityData: Delete({id: "1"}), - } - let serializedHistory = history->S.reverseConvertToJsonOrThrow(testEntityHistorySchema) - let expected = %raw(`{ - "entity_history_block_timestamp": 3, - "entity_history_chain_id": 1, - "entity_history_block_number": 2, - "entity_history_log_index": 4, - "previous_entity_history_block_timestamp": null, - "previous_entity_history_chain_id": null, - "previous_entity_history_block_number": null, - "previous_entity_history_log_index": null, - "id": "1", - "fieldA": null, - "fieldB":null, - "action": "DELETE" - }`) - - Assert.deepEqual(serializedHistory, expected) - }) -}) - -describe("Entity History Codegen", () => { - it("Creates a postgres insert function", () => { - let expected = `CREATE OR REPLACE FUNCTION "insert_TestEntity_history"(history_row "public"."TestEntity_history", should_copy_current_entity BOOLEAN) -RETURNS void AS $$ -DECLARE - v_previous_record RECORD; - v_origin_record RECORD; -BEGIN - -- Check if previous values are not provided - IF history_row.previous_entity_history_block_timestamp IS NULL OR history_row.previous_entity_history_chain_id IS NULL OR history_row.previous_entity_history_block_number IS NULL OR history_row.previous_entity_history_log_index IS NULL THEN - -- Find the most recent record for the same id - SELECT entity_history_block_timestamp, entity_history_chain_id, entity_history_block_number, entity_history_log_index INTO v_previous_record - FROM "public"."TestEntity_history" - WHERE id = history_row.id - ORDER BY entity_history_block_timestamp DESC, entity_history_chain_id DESC, entity_history_block_number DESC, entity_history_log_index DESC - LIMIT 1; - - -- If a previous record exists, use its values - IF FOUND THEN - history_row.previous_entity_history_block_timestamp := v_previous_record.entity_history_block_timestamp; history_row.previous_entity_history_chain_id := v_previous_record.entity_history_chain_id; history_row.previous_entity_history_block_number := v_previous_record.entity_history_block_number; history_row.previous_entity_history_log_index := v_previous_record.entity_history_log_index; - ElSIF should_copy_current_entity THEN - -- Check if a value for the id exists in the origin table and if so, insert a history row for it. - SELECT "id", "fieldA", "fieldB" FROM "public"."TestEntity" WHERE id = history_row.id INTO v_origin_record; - IF FOUND THEN - INSERT INTO "public"."TestEntity_history" (entity_history_block_timestamp, entity_history_chain_id, entity_history_block_number, entity_history_log_index, "id", "fieldA", "fieldB", "action") - -- SET the current change data fields to 0 since we don't know what they were - -- and it doesn't matter provided they are less than any new values - VALUES (0, 0, 0, 0, v_origin_record."id", v_origin_record."fieldA", v_origin_record."fieldB", 'SET'); - - history_row.previous_entity_history_block_timestamp := 0; history_row.previous_entity_history_chain_id := 0; history_row.previous_entity_history_block_number := 0; history_row.previous_entity_history_log_index := 0; - END IF; - END IF; - END IF; - - INSERT INTO "public"."TestEntity_history" ("entity_history_block_timestamp", "entity_history_chain_id", "entity_history_block_number", "entity_history_log_index", "previous_entity_history_block_timestamp", "previous_entity_history_chain_id", "previous_entity_history_block_number", "previous_entity_history_log_index", "id", "fieldA", "fieldB", "action") - VALUES (history_row."entity_history_block_timestamp", history_row."entity_history_chain_id", history_row."entity_history_block_number", history_row."entity_history_log_index", history_row."previous_entity_history_block_timestamp", history_row."previous_entity_history_chain_id", history_row."previous_entity_history_block_number", history_row."previous_entity_history_log_index", history_row."id", history_row."fieldA", history_row."fieldB", history_row."action"); -END; -$$ LANGUAGE plpgsql;` - - Assert.equal(expected, TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public")) - }) - it("Creates an entity history table", () => { - let createQuery = - TestEntity.entityHistory.table->PgStorage.makeCreateTableQuery(~pgSchema="public") - Assert.equal( - `CREATE TABLE IF NOT EXISTS "public"."TestEntity_history"("entity_history_block_timestamp" INTEGER NOT NULL, "entity_history_chain_id" INTEGER NOT NULL, "entity_history_block_number" INTEGER NOT NULL, "entity_history_log_index" INTEGER NOT NULL, "previous_entity_history_block_timestamp" INTEGER, "previous_entity_history_chain_id" INTEGER, "previous_entity_history_block_number" INTEGER, "previous_entity_history_log_index" INTEGER, "id" TEXT NOT NULL, "fieldA" INTEGER, "fieldB" TEXT, "action" "public".ENTITY_HISTORY_ROW_ACTION NOT NULL, "serial" SERIAL, PRIMARY KEY("entity_history_block_timestamp", "entity_history_chain_id", "entity_history_block_number", "entity_history_log_index", "id"));`, - createQuery, - ) - }) - - it("Creates a js insert function", () => { - let insertFnString = TestEntity.entityHistory.insertFn->toStringUnsafe - - let expected = `(sql, rowArgs, shouldCopyCurrentEntity) => - sql\`select "insert_TestEntity_history"(ROW(\${rowArgs["entity_history_block_timestamp"]}, \${rowArgs["entity_history_chain_id"]}, \${rowArgs["entity_history_block_number"]}, \${rowArgs["entity_history_log_index"]}, \${rowArgs["previous_entity_history_block_timestamp"]}, \${rowArgs["previous_entity_history_chain_id"]}, \${rowArgs["previous_entity_history_block_number"]}, \${rowArgs["previous_entity_history_log_index"]}, \${rowArgs["id"]}, \${rowArgs["fieldA"]}, \${rowArgs["fieldB"]}, \${rowArgs["action"]}, NULL), --NULL argument for SERIAL field - \${shouldCopyCurrentEntity});\`` - - Assert.equal(insertFnString, expected) - }) - - Async.it("Creating tables and functions works", async () => { - let storage = PgStorage.make( - ~sql=Db.sql, - ~pgSchema=Env.Db.publicSchema, - ~pgUser=Env.Db.user, - ~pgDatabase=Env.Db.database, - ~pgPassword=Env.Db.password, - ~pgHost=Env.Db.host, - ~pgPort=Env.Db.port, - ) - try { - let _ = await storage.initialize( - ~chainConfigs=[], - ~entities=[module(TestEntity)->Entities.entityModToInternal], - ~enums=[Persistence.entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig], - ) - } catch { - | exn => - Js.log2("Setup exn", exn) - Assert.fail("Failed setting up tables") - } - - switch await Db.sql->Postgres.unsafe( - TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public"), - ) { - | exception exn => - Js.log2("createInsertFnQuery exn", exn) - Assert.fail("Failed creating insert function") - | _ => () - } - - let mockEntity: TestEntity.t = {id: "1", fieldA: 1, fieldB: Some("test")} - switch await Db.sql->batchSetMockEntity([mockEntity]) { - | exception exn => - Js.log2("batchSetMockEntity exn", exn) - Assert.fail("Failed to set mock entity in table") - | _ => () - } - let afterInsert = switch await Db.sql->getAllMockEntity { - | exception exn => - Js.log2("getAllMockEntity exn", exn) - Assert.fail("Failed to get mock entity from table")->Utils.magic - | entities => entities - } - - Assert.deepEqual(afterInsert, [mockEntity], ~message="Should have inserted mock entity") - - let chainId = 137 - let blockNumber = 123456 - let blockTimestamp = blockNumber * 15 - let logIndex = 1 - - let entityHistoryItem: testEntityHistory = { - current: { - chain_id: chainId, - block_timestamp: blockTimestamp, - block_number: blockNumber, - log_index: logIndex, - }, - previous: None, - entityData: Set({ - id: "1", - fieldA: 2, - fieldB: Some("test2"), - }), - } - - switch { - await Promise.all( - Db.sql->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=TestEntity.entityHistory, - ~rows=[entityHistoryItem], - ~shouldCopyCurrentEntity=true, - ), - ) - } { - | exception exn => - Js.log2("insertRow exn", exn) - Assert.fail("Failed to insert mock entity history") - | _ => () - } - - let expectedResult = [ - { - "entity_history_block_timestamp": 0, - "entity_history_chain_id": 0, - "entity_history_block_number": 0, - "entity_history_log_index": 0, - "previous_entity_history_block_timestamp": Js.Nullable.Null, - "previous_entity_history_chain_id": Js.Nullable.Null, - "previous_entity_history_block_number": Js.Nullable.Null, - "previous_entity_history_log_index": Js.Nullable.Null, - "id": "1", - "fieldA": 1, - "fieldB": "test", - "action": "SET", - "serial": 1, - }, - { - "entity_history_block_timestamp": blockTimestamp, - "entity_history_chain_id": chainId, - "entity_history_block_number": blockNumber, - "entity_history_log_index": logIndex, - "previous_entity_history_block_timestamp": Js.Nullable.Value(0), - "previous_entity_history_chain_id": Js.Nullable.Value(0), - "previous_entity_history_block_number": Js.Nullable.Value(0), - "previous_entity_history_log_index": Js.Nullable.Value(0), - "id": "1", - "fieldA": 2, - "fieldB": "test2", - "action": "SET", - "serial": 2, - }, - ] - - let currentHistoryItems = await Db.sql->getAllMockEntityHistory - Assert.deepEqual(currentHistoryItems, expectedResult) - - switch await Promise.all( - Db.sql->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=TestEntity.entityHistory, - ~rows=[ - { - entityData: Set({id: "2", fieldA: 1, fieldB: None}), - previous: None, - current: { - chain_id: 1, - block_timestamp: 4, - block_number: 4, - log_index: 6, - }, - }, - ], - ~shouldCopyCurrentEntity=true, - ), - ) { - | exception exn => - Js.log2("insertRow exn", exn) - Assert.fail("Failed to insert mock entity history") - | _ => () - } - switch await Promise.all( - Db.sql->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=TestEntity.entityHistory, - ~rows=[ - { - entityData: Set({id: "2", fieldA: 3, fieldB: None}), - previous: None, - current: { - chain_id: 1, - block_timestamp: 4, - block_number: 10, - log_index: 6, - }, - }, - ], - ~shouldCopyCurrentEntity=true, - ), - ) { - | exception exn => - Js.log2("insertRow exn", exn) - Assert.fail("Failed to insert mock entity history") - | _ => () - } - - let _ = await Promise.all( - Db.sql->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=TestEntity.entityHistory, - ~rows=[ - { - entityData: Set({id: "3", fieldA: 4, fieldB: None}), - previous: None, - current: { - chain_id: 137, - block_timestamp: 4, - block_number: 7, - log_index: 6, - }, - }, - ], - ~shouldCopyCurrentEntity=true, - ), - ) - }) - - it("Creates prune stale entity history query", () => { - let query = EntityHistory.makePruneStaleEntityHistoryQuery( - ~entityName="TestEntity", - ~pgSchema="foo", - ) - Assert.equal( - query, - `WITH safe AS ( - SELECT s.chain_id, s.block_number - FROM unnest($1::int[], $2::bigint[]) AS s(chain_id, block_number) -), -max_before_safe AS ( - SELECT t.id, MAX(t.serial) AS keep_serial - FROM "foo"."TestEntity_history" t - JOIN safe s - ON s.chain_id = t.entity_history_chain_id - AND t.entity_history_block_number <= s.block_number - GROUP BY t.id -), -post_safe AS ( - SELECT DISTINCT t.id - FROM "foo"."TestEntity_history" t - JOIN safe s - ON s.chain_id = t.entity_history_chain_id - AND t.entity_history_block_number > s.block_number -) -DELETE FROM "foo"."TestEntity_history" d -USING max_before_safe m -LEFT JOIN post_safe p ON p.id = m.id -WHERE d.id = m.id - AND ( - d.serial < m.keep_serial - OR (p.id IS NULL AND d.serial = m.keep_serial) - );`, - ) - }) -}) - -module Mocks = { - module Entity = { - open TestEntity - let entityId1 = "1" - let mockEntity1 = {id: entityId1, fieldA: 1, fieldB: Some("test")} - let mockEntity2 = {id: entityId1, fieldA: 2, fieldB: Some("test2")} - let mockEntity3 = {id: entityId1, fieldA: 3, fieldB: Some("test3")} - let mockEntity4 = {id: entityId1, fieldA: 4, fieldB: Some("test4")} - - let entityId2 = "2" - let mockEntity5 = {id: entityId2, fieldA: 5, fieldB: None} - let mockEntity6 = {id: entityId2, fieldA: 6, fieldB: None} - - let entityId3 = "3" - let mockEntity7 = {id: entityId3, fieldA: 7, fieldB: None} - let mockEntity8 = {id: entityId3, fieldA: 8, fieldB: None} - } - - module GnosisBug = { - let chain_id = 1 - - let event1: EntityHistory.historyFields = { - chain_id, - block_timestamp: 10 * 5, - block_number: 10, - log_index: 0, - } - - let event2: EntityHistory.historyFields = { - chain_id, - block_timestamp: 10 * 5, - block_number: 10, - log_index: 1, - } - - let historyRow1: testEntityHistory = { - current: event1, - previous: None, - entityData: Set(Entity.mockEntity2), - } - - let historyRow2: testEntityHistory = { - current: event2, - previous: None, - entityData: Set(Entity.mockEntity6), - } - - let historyRows = [historyRow1, historyRow2] - - // For setting a different entity and testing pruning - let event3: EntityHistory.historyFields = { - chain_id, - block_timestamp: 12 * 5, - block_number: 12, - log_index: 0, - } - - let historyRow3: testEntityHistory = { - current: event3, - previous: None, - entityData: Set(Entity.mockEntity3), - } - - let historyRow4: testEntityHistory = { - current: event3, - previous: None, - entityData: Set(Entity.mockEntity8), - } - - let historyRowsForPrune = [historyRow3, historyRow4] - } - - module Chain1 = { - let chain_id = 1 - - let event1: EntityHistory.historyFields = { - chain_id, - block_timestamp: 1, - block_number: 1, - log_index: 0, - } - - let event2: EntityHistory.historyFields = { - chain_id, - block_timestamp: 5, - block_number: 2, - log_index: 1, - } - - let event3: EntityHistory.historyFields = { - chain_id, - block_timestamp: 15, - block_number: 4, - log_index: 2, - } - - let historyRow1: testEntityHistory = { - current: event1, - previous: None, - entityData: Set(Entity.mockEntity1), - } - - let historyRow2: testEntityHistory = { - current: event2, - previous: Some(event1), - entityData: Set(Entity.mockEntity2), - } - - let historyRow3: testEntityHistory = { - current: event3, - previous: Some(event2), - entityData: Set(Entity.mockEntity3), - } - - let historyRows = [historyRow1, historyRow2, historyRow3] - - //Shows a case where no event exists on this block - let rollbackEventIdentifier: Types.eventIdentifier = { - blockTimestamp: 10, - chainId: chain_id, - blockNumber: 3, - logIndex: 0, - } - - let orderedMultichainArg = DbFunctions.EntityHistory.Args.OrderedMultichain({ - safeBlockTimestamp: rollbackEventIdentifier.blockTimestamp, - reorgChainId: chain_id, - safeBlockNumber: rollbackEventIdentifier.blockNumber, - }) - - let unorderedMultichainArg = DbFunctions.EntityHistory.Args.UnorderedMultichain({ - reorgChainId: chain_id, - safeBlockNumber: rollbackEventIdentifier.blockNumber, - }) - } - - module Chain2 = { - let chain_id = 2 - - let event1: EntityHistory.historyFields = { - chain_id, - block_timestamp: 3, - block_number: 1, - log_index: 0, - } - - let event2: EntityHistory.historyFields = { - chain_id, - block_timestamp: 8, - block_number: 2, - log_index: 1, - } - - let event3: EntityHistory.historyFields = { - chain_id, - block_timestamp: 13, - block_number: 3, - log_index: 2, - } - - let historyRow1: testEntityHistory = { - current: event1, - previous: None, - entityData: Set(Entity.mockEntity5), - } - - let historyRow2: testEntityHistory = { - current: event2, - previous: Some(event1), - entityData: Delete({id: Entity.entityId2}), - } - let historyRow3: testEntityHistory = { - current: event3, - previous: Some(event2), - entityData: Set(Entity.mockEntity6), - } - - let historyRows = [historyRow1, historyRow2, historyRow3] - } - - let historyRows = Utils.Array.mergeSorted( - (a, b) => a.EntityHistory.current.block_timestamp < b.current.block_timestamp, - Chain1.historyRows, - Chain2.historyRows, - ) -} - -describe("Entity history rollbacks", () => { - Async.beforeEach(async () => { - try { - let _ = DbHelpers.resetPostgresClient() - let storage = PgStorage.make( - ~sql=Db.sql, - ~pgSchema=Env.Db.publicSchema, - ~pgUser=Env.Db.user, - ~pgDatabase=Env.Db.database, - ~pgPassword=Env.Db.password, - ~pgHost=Env.Db.host, - ~pgPort=Env.Db.port, - ) - let _ = await storage.initialize( - ~chainConfigs=[], - ~entities=[module(TestEntity)->Entities.entityModToInternal], - ~enums=[Persistence.entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig], - ) - - let _ = - await Db.sql->Postgres.unsafe( - TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public"), - ) - - try await Db.sql->PgStorage.setOrThrow( - ~items=[ - Mocks.Entity.mockEntity1->TestEntity.castToInternal, - Mocks.Entity.mockEntity5->TestEntity.castToInternal, - ], - ~table=TestEntity.table, - ~itemSchema=TestEntity.schema, - ~pgSchema=Config.storagePgSchema, - ) catch { - | exn => - Js.log2("batchSet mock entity exn", exn) - Assert.fail("Failed to set mock entity in table") - } - - try await Db.sql->Postgres.beginSql( - sql => - sql - ->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=TestEntity.entityHistory, - ~rows=Mocks.GnosisBug.historyRows, - ) - ->Promise.all - ->Promise.ignoreValue, - ) catch { - | exn => - Js.log2("insert mock rows exn", exn) - Assert.fail("Failed to insert mock rows") - } - - let historyItems = { - let items = await Db.sql->getAllMockEntityHistory - items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - } - Assert.equal(historyItems->Js.Array2.length, 4, ~message="Should have 4 history items") - Assert.ok( - historyItems->Belt.Array.some(item => item.current.chain_id == 0), - ~message="Should contain 2 copied items", - ) - } catch { - | exn => - Js.log2(" Entity history setup exn", exn) - Assert.fail("Failed setting up tables") - } - }) - - Async.it("Rollback ignores copied entities as an item in reorg threshold", async () => { - let rollbackDiff = await Db.sql->DbFunctions.EntityHistory.getRollbackDiff( - OrderedMultichain({ - reorgChainId: Mocks.GnosisBug.chain_id, - safeBlockNumber: 9, - safeBlockTimestamp: 9 * 5, - }), - ~entityConfig=module(TestEntity)->Entities.entityModToInternal, - ) - - let expectedDiff: array> = [ - { - current: {chain_id: 0, block_timestamp: 0, block_number: 0, log_index: 0}, - previous: %raw(`undefined`), - entityData: Set(Mocks.Entity.mockEntity1->TestEntity.castToInternal), - }, - { - current: {chain_id: 0, block_timestamp: 0, block_number: 0, log_index: 0}, - previous: %raw(`undefined`), - entityData: Set(Mocks.Entity.mockEntity5->TestEntity.castToInternal), - }, - ] - - Assert.deepStrictEqual( - rollbackDiff, - expectedDiff, - ~message="Should rollback to the copied entity", - ) - }) - - Async.it( - "Deleting items after reorg event should not remove the copied history item", - async () => { - await Db.sql->DbFunctions.EntityHistory.deleteAllEntityHistoryAfterEventIdentifier( - ~isUnorderedMultichainMode=false, - ~eventIdentifier={ - chainId: Mocks.GnosisBug.chain_id, - blockTimestamp: 9 * 5, - blockNumber: 9, - logIndex: 0, - }, - ~allEntities=[module(TestEntity)->Entities.entityModToInternal], - ) - - let historyItems = { - let items = await Db.sql->getAllMockEntityHistory - items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - } - - Assert.equal(historyItems->Js.Array2.length, 2, ~message="Should have the 2 copied items") - - let allItemsAreZeroChainId = - historyItems->Belt.Array.every(item => item.current.chain_id == 0) - - Assert.ok( - allItemsAreZeroChainId, - ~message="Should have all items in the zero chain id since they are copied", - ) - }, - ) - - Async.it("Prunes history correctly with items in reorg threshold", async () => { - // set the current entity of id 3 - await Db.sql->PgStorage.setOrThrow( - ~items=[Mocks.Entity.mockEntity7->TestEntity.castToInternal], - ~table=TestEntity.table, - ~itemSchema=TestEntity.schema, - ~pgSchema=Config.storagePgSchema, - ) - - // set an updated version of its row to get a copied entity history - try await Db.sql->Postgres.beginSql( - sql => - sql - ->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=TestEntity.entityHistory, - ~rows=Mocks.GnosisBug.historyRowsForPrune, - ) - ->Promise.all - ->Promise.ignoreValue, - ) catch { - | exn => - Js.log2("insert mock rows exn", exn) - Assert.fail("Failed to insert mock rows") - } - - // let historyItemsBefore = { - // let items = await Db.sql->getAllMockEntityHistory - // Js.log2("history items before prune", items) - // items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - // } - - let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( - ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, - ~pgSchema=Env.Db.publicSchema, - ~safeReorgBlocks={ - chainIds: [Mocks.GnosisBug.chain_id], - blockNumbers: [11], - }, - ) - - let historyItemsAfter = { - let items = await Db.sql->getAllMockEntityHistory - // Js.log2("history items after prune", items) - items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - } - - Assert.equal( - historyItemsAfter->Js.Array2.length, - 4, - ~message="Should have 4 history items for entity id 1 and 3 before and after block 11", - ) - }) -}) - -describe("Entity history rollbacks", () => { - Async.beforeEach(async () => { - try { - let _ = DbHelpers.resetPostgresClient() - let storage = PgStorage.make( - ~sql=Db.sql, - ~pgSchema=Env.Db.publicSchema, - ~pgUser=Env.Db.user, - ~pgDatabase=Env.Db.database, - ~pgPassword=Env.Db.password, - ~pgHost=Env.Db.host, - ~pgPort=Env.Db.port, - ) - let _ = await storage.initialize( - ~chainConfigs=[], - ~entities=[module(TestEntity)->Entities.entityModToInternal], - ~enums=[Persistence.entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig], - ) - - let _ = - await Db.sql->Postgres.unsafe( - TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public"), - ) - - try await Db.sql->Postgres.beginSql( - sql => - sql - ->PgStorage.setEntityHistoryOrThrow( - ~entityHistory=TestEntity.entityHistory, - ~rows=Mocks.historyRows, - ) - ->Promise.all - ->Promise.ignoreValue, - ) catch { - | exn => - Js.log2("insert mock rows exn", exn) - Assert.fail("Failed to insert mock rows") - } - - let historyItems = { - let items = await Db.sql->getAllMockEntityHistory - items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - } - Assert.equal(historyItems->Js.Array2.length, 6, ~message="Should have 6 history items") - Assert.ok( - !(historyItems->Belt.Array.some(item => item.current.chain_id == 0)), - ~message="No defaulted/copied values should exist in history", - ) - } catch { - | exn => - Js.log2(" Entity history setup exn", exn) - Assert.fail("Failed setting up tables") - } - }) - - Async.it("Returns expected diff for ordered multichain mode", async () => { - let orderdMultichainRollbackDiff = try await Db.sql->DbFunctions.EntityHistory.getRollbackDiff( - Mocks.Chain1.orderedMultichainArg, - ~entityConfig=module(TestEntity)->Entities.entityModToInternal, - ) catch { - | exn => - Js.log2("getRollbackDiff exn", exn) - Assert.fail("Failed to get rollback diff") - } - - switch orderdMultichainRollbackDiff { - | [ - {current: currentA, entityData: Set(entitySetA)}, - {current: currentB, entityData: Delete({id: entityDeleteB})}, - ] => - Assert.deepEqual( - currentA, - Mocks.Chain1.event2, - ~message="First history item should haved diffed to event2", - ) - Assert.deepEqual( - entitySetA, - Mocks.Entity.mockEntity2->TestEntity.castToInternal, - ~message="First history item should haved diffed to mockEntity2", - ) - Assert.deepEqual( - currentB, - Mocks.Chain2.event2, - ~message="Second history item should haved diffed to event3", - ) - Assert.deepEqual( - entityDeleteB, - Mocks.Entity.entityId2, - ~message="Second history item should haved diffed a delete of entityId2", - ) - | _ => Assert.fail("Should have a set and delete history item in diff") - } - }) - - Async.it("Returns expected diff for unordered multichain mode", async () => { - let unorderedMultichainRollbackDiff = try await Db.sql->DbFunctions.EntityHistory.getRollbackDiff( - Mocks.Chain1.unorderedMultichainArg, - ~entityConfig=module(TestEntity)->Entities.entityModToInternal, - ) catch { - | exn => - Js.log2("getRollbackDiff exn", exn) - Assert.fail("Failed to get rollback diff") - } - - switch unorderedMultichainRollbackDiff { - | [{current: currentA, entityData: Set(entitySetA)}] => - Assert.deepEqual( - currentA, - Mocks.Chain1.event2, - ~message="First history item should haved diffed to event2", - ) - Assert.deepEqual( - entitySetA, - Mocks.Entity.mockEntity2->TestEntity.castToInternal, - ~message="First history item should haved diffed to mockEntity2", - ) - | _ => Assert.fail("Should have only chain 1 item in diff") - } - }) - - Async.it("Gets first event change per chain ordered mode", async () => { - let firstChangeEventPerChain = try await Db.sql->DbFunctions.EntityHistory.getFirstChangeEventPerChain( - Mocks.Chain1.orderedMultichainArg, - ~allEntities=[module(TestEntity)->Entities.entityModToInternal], - ) catch { - | exn => - Js.log2("getFirstChangeEventPerChain exn", exn) - Assert.fail("Failed to get rollback diff") - } - - let expected = DbFunctions.EntityHistory.FirstChangeEventPerChain.make() - expected->DbFunctions.EntityHistory.FirstChangeEventPerChain.setIfEarlier( - ~chainId=Mocks.Chain1.chain_id, - ~event={ - blockNumber: Mocks.Chain1.event3.block_number, - logIndex: Mocks.Chain1.event3.log_index, - }, - ) - expected->DbFunctions.EntityHistory.FirstChangeEventPerChain.setIfEarlier( - ~chainId=Mocks.Chain2.chain_id, - ~event={ - blockNumber: Mocks.Chain2.event3.block_number, - logIndex: Mocks.Chain2.event3.log_index, - }, - ) - - Assert.deepEqual( - firstChangeEventPerChain, - expected, - ~message="Should have chain 1 and 2 first change events", - ) - }) - - Async.it("Gets first event change per chain unordered mode", async () => { - let firstChangeEventPerChain = try await Db.sql->DbFunctions.EntityHistory.getFirstChangeEventPerChain( - Mocks.Chain1.unorderedMultichainArg, - ~allEntities=[module(TestEntity)->Entities.entityModToInternal], - ) catch { - | exn => - Js.log2("getFirstChangeEventPerChain exn", exn) - Assert.fail("Failed to get rollback diff") - } - - let expected = DbFunctions.EntityHistory.FirstChangeEventPerChain.make() - expected->DbFunctions.EntityHistory.FirstChangeEventPerChain.setIfEarlier( - ~chainId=Mocks.Chain1.chain_id, - ~event={ - blockNumber: Mocks.Chain1.event3.block_number, - logIndex: Mocks.Chain1.event3.log_index, - }, - ) - - Assert.deepEqual( - firstChangeEventPerChain, - expected, - ~message="Should only have chain 1 first change event", - ) - }) - - Async.it("Deletes current history after rollback ordered", async () => { - let _ = - await Db.sql->DbFunctions.EntityHistory.deleteAllEntityHistoryAfterEventIdentifier( - ~isUnorderedMultichainMode=false, - ~eventIdentifier=Mocks.Chain1.rollbackEventIdentifier, - ~allEntities=[module(TestEntity)->Entities.entityModToInternal], - ) - - let currentHistoryItems = await Db.sql->getAllMockEntityHistory - let parsedHistoryItems = - currentHistoryItems->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - - let expectedHistoryItems = Mocks.historyRows->Belt.Array.slice(~offset=0, ~len=4) - - Assert.deepEqual( - parsedHistoryItems->stripUndefinedFieldsInPlace, - expectedHistoryItems->stripUndefinedFieldsInPlace, - ~message="Should have deleted last 2 items in history", - ) - }) - - Async.it("Deletes current history after rollback unordered", async () => { - let _ = - await Db.sql->DbFunctions.EntityHistory.deleteAllEntityHistoryAfterEventIdentifier( - ~isUnorderedMultichainMode=true, - ~eventIdentifier=Mocks.Chain1.rollbackEventIdentifier, - ~allEntities=[module(TestEntity)->Entities.entityModToInternal], - ) - - let currentHistoryItems = await Db.sql->getAllMockEntityHistory - let parsedHistoryItems = - currentHistoryItems->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - - let expectedHistoryItems = Mocks.historyRows->Belt.Array.slice(~offset=0, ~len=5) - - Assert.deepEqual( - parsedHistoryItems->stripUndefinedFieldsInPlace, - expectedHistoryItems->stripUndefinedFieldsInPlace, - ~message="Should have deleted just the last item in history", - ) - }) - - Async.it("Prunes history correctly with items in reorg threshold", async () => { - let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( - ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, - ~pgSchema=Env.Db.publicSchema, - ~safeReorgBlocks={ - chainIds: [1, 2], - blockNumbers: [3, 2], - }, - ) - let currentHistoryItems = await Db.sql->getAllMockEntityHistory - - let parsedHistoryItems = - currentHistoryItems->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - - let expectedHistoryItems = [ - Mocks.Chain1.historyRow2, - Mocks.Chain1.historyRow3, - Mocks.Chain2.historyRow2, - Mocks.Chain2.historyRow3, - ] - - let sort = arr => - arr->Js.Array2.sortInPlaceWith( - (a, b) => a.EntityHistory.current.block_number - b.current.block_number, - ) - - Assert.deepEqual( - parsedHistoryItems->sort->stripUndefinedFieldsInPlace, - expectedHistoryItems->sort->stripUndefinedFieldsInPlace, - ~message="Should have deleted the unneeded first items in history", - ) - }) - - Async.it("Prunes history correctly with items in reorg threshold", async () => { - let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( - ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, - ~pgSchema=Env.Db.publicSchema, - ~safeReorgBlocks={ - chainIds: [1, 2], - blockNumbers: [3, 2], - }, - ) - let currentHistoryItems = await Db.sql->getAllMockEntityHistory - - let parsedHistoryItems = - currentHistoryItems->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) - - let sort = arr => - arr->Js.Array2.sortInPlaceWith( - (a, b) => a.EntityHistory.current.block_number - b.current.block_number, - ) - - Assert.deepEqual( - parsedHistoryItems->sort->stripUndefinedFieldsInPlace, - [ - Mocks.Chain1.historyRow2, - Mocks.Chain2.historyRow2, - Mocks.Chain2.historyRow3, - Mocks.Chain1.historyRow3, - ]->stripUndefinedFieldsInPlace, - ~message="Should have deleted the unneeded first items in history", - ) - }) - - Async.it("Prunes history correctly with no items in reorg threshold", async () => { - let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( - ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, - ~pgSchema=Env.Db.publicSchema, - ~safeReorgBlocks={ - chainIds: [1, 2], - blockNumbers: [4, 3], - }, - ) - let currentHistoryItems = await Db.sql->getAllMockEntityHistory - - Assert.ok( - currentHistoryItems->Array.length == 0, - ~message="Should have deleted all items in history", - ) - }) -}) - -describe_skip("Prune performance test", () => { - Async.it("Print benchmark of prune function", async () => { - let _ = DbHelpers.resetPostgresClient() - let storage = PgStorage.make( - ~sql=Db.sql, - ~pgSchema=Env.Db.publicSchema, - ~pgUser=Env.Db.user, - ~pgDatabase=Env.Db.database, - ~pgPassword=Env.Db.password, - ~pgHost=Env.Db.host, - ~pgPort=Env.Db.port, - ) - let _ = await storage.initialize( - ~entities=[module(TestEntity)->Entities.entityModToInternal], - ~chainConfigs=[], - ~enums=[Persistence.entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig], - ) - - let _ = - await Db.sql->Postgres.unsafe(TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public")) - - let rows: array = [] - for i in 0 to 1000 { - let mockEntity: TestEntity.t = { - id: i->mod(10)->Belt.Int.toString, - fieldA: i, - fieldB: None, - } - - let historyRow: testEntityHistory = { - current: { - chain_id: 1, - block_timestamp: i * 5, - block_number: i, - log_index: 0, - }, - previous: None, - entityData: Set(mockEntity), - } - rows->Js.Array2.push(historyRow)->ignore - } - - try await Db.sql->Postgres.beginSql( - sql => - sql - ->PgStorage.setEntityHistoryOrThrow(~entityHistory=TestEntity.entityHistory, ~rows) - ->Promise.all - ->Promise.ignoreValue, - ) catch { - | exn => - Js.log2("insert mock rows exn", exn) - Assert.fail("Failed to insert mock rows") - } - - let startTime = Hrtime.makeTimer() - - try { - let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( - ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, - ~pgSchema=Env.Db.publicSchema, - ~safeReorgBlocks={ - chainIds: [1], - blockNumbers: [500], - }, - ) - } catch { - | exn => - Js.log2("prune stale entity history exn", exn) - Assert.fail("Failed to prune stale entity history") - } - - let elapsedTime = Hrtime.timeSince(startTime)->Hrtime.toMillis->Hrtime.intFromMillis - Js.log2("Elapsed time", elapsedTime) - }) -}) +// open RescriptMocha + +// //unsafe polymorphic toString binding for any type +// @send external toStringUnsafe: 'a => string = "toString" + +// let stripUndefinedFieldsInPlace = (val: 'a): 'a => { +// let json = val->(Utils.magic: 'a => Js.Json.t) +// //Hot fix for rescript equality check that removes optional fields +// let rec strip = (json: Js.Json.t) => { +// switch json { +// | Object(obj) => +// obj +// ->Js.Dict.keys +// ->Belt.Array.forEach(key => { +// let value = obj->Utils.Dict.dangerouslyGetNonOption(key) +// if value === %raw(`undefined`) { +// obj->Utils.Dict.deleteInPlace(key) +// } else { +// strip(value->Belt.Option.getExn) +// } +// }) +// | Array(arr) => arr->Belt.Array.forEach(value => strip(value)) +// | _ => () +// } +// } + +// json->strip +// json->(Utils.magic: Js.Json.t => 'a) +// } + +// module TestEntity = { +// type t = { +// id: string, +// fieldA: int, +// fieldB: option, +// } + +// let name = "TestEntity" +// let schema = S.schema(s => { +// id: s.matches(S.string), +// fieldA: s.matches(S.int), +// fieldB: s.matches(S.null(S.string)), +// }) + +// let rowsSchema = S.array(schema) +// let table = Table.mkTable( +// "TestEntity", +// ~fields=[ +// Table.mkField("id", Text, ~fieldSchema=S.string, ~isPrimaryKey=true), +// Table.mkField("fieldA", Integer, ~fieldSchema=S.int), +// Table.mkField("fieldB", Text, ~fieldSchema=S.null(S.string), ~isNullable=true), +// ], +// ) + +// let entityHistory = table->EntityHistory.fromTable(~schema) + +// external castToInternal: t => Internal.entity = "%identity" +// } + +// type testEntityHistory = EntityHistory.entityUpdate +// let testEntitySetUpdateSchema = EntityHistory.makeSetUpdateSchema(TestEntity.schema) + +// let batchSetMockEntity = (sql, items) => +// PgStorage.setOrThrow( +// sql, +// ~items, +// ~pgSchema="public", +// ~table=TestEntity.table, +// ~itemSchema=TestEntity.schema, +// ) + +// let getAllMockEntity = sql => +// sql +// ->Postgres.unsafe(`SELECT * FROM "public"."${TestEntity.table.tableName}"`) +// ->Promise.thenResolve(json => json->S.parseJsonOrThrow(TestEntity.rowsSchema)) + +// let getAllMockEntityHistory = sql => +// sql->Postgres.unsafe(`SELECT * FROM "public"."${TestEntity.entityHistory.table.tableName}"`) + +// describe("Entity History Codegen", () => { +// it("Creates a postgres insert function", () => { +// let expected = `CREATE OR REPLACE FUNCTION "insert_TestEntity_history"(history_row "public"."TestEntity_history", should_copy_current_entity BOOLEAN) +// RETURNS void AS $$ +// DECLARE +// v_previous_record RECORD; +// v_origin_record RECORD; +// BEGIN +// -- Check if previous values are not provided +// IF history_row.previous_entity_history_block_timestamp IS NULL OR history_row.previous_entity_history_chain_id IS NULL OR history_row.previous_entity_history_block_number IS NULL OR history_row.previous_entity_history_log_index IS NULL THEN +// -- Find the most recent record for the same id +// SELECT entity_history_block_timestamp, entity_history_chain_id, entity_history_block_number, entity_history_log_index INTO v_previous_record +// FROM "public"."TestEntity_history" +// WHERE id = history_row.id +// ORDER BY entity_history_block_timestamp DESC, entity_history_chain_id DESC, entity_history_block_number DESC, entity_history_log_index DESC +// LIMIT 1; + +// -- If a previous record exists, use its values +// IF FOUND THEN +// history_row.previous_entity_history_block_timestamp := v_previous_record.entity_history_block_timestamp; history_row.previous_entity_history_chain_id := v_previous_record.entity_history_chain_id; history_row.previous_entity_history_block_number := v_previous_record.entity_history_block_number; history_row.previous_entity_history_log_index := v_previous_record.entity_history_log_index; +// ElSIF should_copy_current_entity THEN +// -- Check if a value for the id exists in the origin table and if so, insert a history row for it. +// SELECT "id", "fieldA", "fieldB" FROM "public"."TestEntity" WHERE id = history_row.id INTO v_origin_record; +// IF FOUND THEN +// INSERT INTO "public"."TestEntity_history" (entity_history_block_timestamp, entity_history_chain_id, entity_history_block_number, entity_history_log_index, "id", "fieldA", "fieldB", "action") +// -- SET the current change data fields to 0 since we don't know what they were +// -- and it doesn't matter provided they are less than any new values +// VALUES (0, 0, 0, 0, v_origin_record."id", v_origin_record."fieldA", v_origin_record."fieldB", 'SET'); + +// history_row.previous_entity_history_block_timestamp := 0; history_row.previous_entity_history_chain_id := 0; history_row.previous_entity_history_block_number := 0; history_row.previous_entity_history_log_index := 0; +// END IF; +// END IF; +// END IF; + +// INSERT INTO "public"."TestEntity_history" ("entity_history_block_timestamp", "entity_history_chain_id", "entity_history_block_number", "entity_history_log_index", "previous_entity_history_block_timestamp", "previous_entity_history_chain_id", "previous_entity_history_block_number", "previous_entity_history_log_index", "id", "fieldA", "fieldB", "action") +// VALUES (history_row."entity_history_block_timestamp", history_row."entity_history_chain_id", history_row."entity_history_block_number", history_row."entity_history_log_index", history_row."previous_entity_history_block_timestamp", history_row."previous_entity_history_chain_id", history_row."previous_entity_history_block_number", history_row."previous_entity_history_log_index", history_row."id", history_row."fieldA", history_row."fieldB", history_row."action"); +// END; +// $$ LANGUAGE plpgsql;` + +// Assert.equal(expected, TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public")) +// }) +// it("Creates an entity history table", () => { +// let createQuery = +// TestEntity.entityHistory.table->PgStorage.makeCreateTableQuery(~pgSchema="public") +// Assert.equal( +// `CREATE TABLE IF NOT EXISTS "public"."TestEntity_history"("entity_history_block_timestamp" INTEGER NOT NULL, "entity_history_chain_id" INTEGER NOT NULL, "entity_history_block_number" INTEGER NOT NULL, "entity_history_log_index" INTEGER NOT NULL, "previous_entity_history_block_timestamp" INTEGER, "previous_entity_history_chain_id" INTEGER, "previous_entity_history_block_number" INTEGER, "previous_entity_history_log_index" INTEGER, "id" TEXT NOT NULL, "fieldA" INTEGER, "fieldB" TEXT, "action" "public".ENTITY_HISTORY_ROW_ACTION NOT NULL, "serial" SERIAL, PRIMARY KEY("entity_history_block_timestamp", "entity_history_chain_id", "entity_history_block_number", "entity_history_log_index", "id"));`, +// createQuery, +// ) +// }) + +// it("Creates a js insert function", () => { +// let insertFnString = TestEntity.entityHistory.insertFn->toStringUnsafe + +// let expected = `(sql, rowArgs, shouldCopyCurrentEntity) => +// sql\`select "insert_TestEntity_history"(ROW(\${rowArgs["entity_history_block_timestamp"]}, \${rowArgs["entity_history_chain_id"]}, \${rowArgs["entity_history_block_number"]}, \${rowArgs["entity_history_log_index"]}, \${rowArgs["previous_entity_history_block_timestamp"]}, \${rowArgs["previous_entity_history_chain_id"]}, \${rowArgs["previous_entity_history_block_number"]}, \${rowArgs["previous_entity_history_log_index"]}, \${rowArgs["id"]}, \${rowArgs["fieldA"]}, \${rowArgs["fieldB"]}, \${rowArgs["action"]}, NULL), --NULL argument for SERIAL field +// \${shouldCopyCurrentEntity});\`` + +// Assert.equal(insertFnString, expected) +// }) + +// Async.it("Creating tables and functions works", async () => { +// let storage = PgStorage.make( +// ~sql=Db.sql, +// ~pgSchema=Env.Db.publicSchema, +// ~pgUser=Env.Db.user, +// ~pgDatabase=Env.Db.database, +// ~pgPassword=Env.Db.password, +// ~pgHost=Env.Db.host, +// ~pgPort=Env.Db.port, +// ) +// try { +// let _ = await storage.initialize( +// ~chainConfigs=[], +// ~entities=[module(TestEntity)->Entities.entityModToInternal], +// ~enums=[Persistence.entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig], +// ) +// } catch { +// | exn => +// Js.log2("Setup exn", exn) +// Assert.fail("Failed setting up tables") +// } + +// switch await Db.sql->Postgres.unsafe( +// TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public"), +// ) { +// | exception exn => +// Js.log2("createInsertFnQuery exn", exn) +// Assert.fail("Failed creating insert function") +// | _ => () +// } + +// let mockEntity: TestEntity.t = {id: "1", fieldA: 1, fieldB: Some("test")} +// switch await Db.sql->batchSetMockEntity([mockEntity]) { +// | exception exn => +// Js.log2("batchSetMockEntity exn", exn) +// Assert.fail("Failed to set mock entity in table") +// | _ => () +// } +// let afterInsert = switch await Db.sql->getAllMockEntity { +// | exception exn => +// Js.log2("getAllMockEntity exn", exn) +// Assert.fail("Failed to get mock entity from table")->Utils.magic +// | entities => entities +// } + +// Assert.deepEqual(afterInsert, [mockEntity], ~message="Should have inserted mock entity") + +// let chainId = 137 +// let blockNumber = 123456 +// let blockTimestamp = blockNumber * 15 +// let logIndex = 1 + +// let entityHistoryItem: testEntityHistory = { +// current: { +// chain_id: chainId, +// block_timestamp: blockTimestamp, +// block_number: blockNumber, +// log_index: logIndex, +// }, +// previous: None, +// entityData: Set({ +// id: "1", +// fieldA: 2, +// fieldB: Some("test2"), +// }), +// } + +// switch { +// await Promise.all( +// Db.sql->PgStorage.setEntityHistoryOrThrow( +// ~entityHistory=TestEntity.entityHistory, +// ~rows=[entityHistoryItem], +// ~shouldCopyCurrentEntity=true, +// ), +// ) +// } { +// | exception exn => +// Js.log2("insertRow exn", exn) +// Assert.fail("Failed to insert mock entity history") +// | _ => () +// } + +// let expectedResult = [ +// { +// "entity_history_block_timestamp": 0, +// "entity_history_chain_id": 0, +// "entity_history_block_number": 0, +// "entity_history_log_index": 0, +// "previous_entity_history_block_timestamp": Js.Nullable.Null, +// "previous_entity_history_chain_id": Js.Nullable.Null, +// "previous_entity_history_block_number": Js.Nullable.Null, +// "previous_entity_history_log_index": Js.Nullable.Null, +// "id": "1", +// "fieldA": 1, +// "fieldB": "test", +// "action": "SET", +// "serial": 1, +// }, +// { +// "entity_history_block_timestamp": blockTimestamp, +// "entity_history_chain_id": chainId, +// "entity_history_block_number": blockNumber, +// "entity_history_log_index": logIndex, +// "previous_entity_history_block_timestamp": Js.Nullable.Value(0), +// "previous_entity_history_chain_id": Js.Nullable.Value(0), +// "previous_entity_history_block_number": Js.Nullable.Value(0), +// "previous_entity_history_log_index": Js.Nullable.Value(0), +// "id": "1", +// "fieldA": 2, +// "fieldB": "test2", +// "action": "SET", +// "serial": 2, +// }, +// ] + +// let currentHistoryItems = await Db.sql->getAllMockEntityHistory +// Assert.deepEqual(currentHistoryItems, expectedResult) + +// switch await Promise.all( +// Db.sql->PgStorage.setEntityHistoryOrThrow( +// ~entityHistory=TestEntity.entityHistory, +// ~rows=[ +// { +// entityData: Set({id: "2", fieldA: 1, fieldB: None}), +// previous: None, +// current: { +// chain_id: 1, +// block_timestamp: 4, +// block_number: 4, +// log_index: 6, +// }, +// }, +// ], +// ~shouldCopyCurrentEntity=true, +// ), +// ) { +// | exception exn => +// Js.log2("insertRow exn", exn) +// Assert.fail("Failed to insert mock entity history") +// | _ => () +// } +// switch await Promise.all( +// Db.sql->PgStorage.setEntityHistoryOrThrow( +// ~entityHistory=TestEntity.entityHistory, +// ~rows=[ +// { +// entityData: Set({id: "2", fieldA: 3, fieldB: None}), +// previous: None, +// current: { +// chain_id: 1, +// block_timestamp: 4, +// block_number: 10, +// log_index: 6, +// }, +// }, +// ], +// ~shouldCopyCurrentEntity=true, +// ), +// ) { +// | exception exn => +// Js.log2("insertRow exn", exn) +// Assert.fail("Failed to insert mock entity history") +// | _ => () +// } + +// let _ = await Promise.all( +// Db.sql->PgStorage.setEntityHistoryOrThrow( +// ~entityHistory=TestEntity.entityHistory, +// ~rows=[ +// { +// entityData: Set({id: "3", fieldA: 4, fieldB: None}), +// previous: None, +// current: { +// chain_id: 137, +// block_timestamp: 4, +// block_number: 7, +// log_index: 6, +// }, +// }, +// ], +// ~shouldCopyCurrentEntity=true, +// ), +// ) +// }) + +// it("Creates prune stale entity history query", () => { +// let query = EntityHistory.makePruneStaleEntityHistoryQuery( +// ~entityName="TestEntity", +// ~pgSchema="foo", +// ) +// Assert.equal( +// query, +// `WITH safe AS ( +// SELECT s.chain_id, s.block_number +// FROM unnest($1::int[], $2::bigint[]) AS s(chain_id, block_number) +// ), +// max_before_safe AS ( +// SELECT t.id, MAX(t.serial) AS keep_serial +// FROM "foo"."TestEntity_history" t +// JOIN safe s +// ON s.chain_id = t.entity_history_chain_id +// AND t.entity_history_block_number <= s.block_number +// GROUP BY t.id +// ), +// post_safe AS ( +// SELECT DISTINCT t.id +// FROM "foo"."TestEntity_history" t +// JOIN safe s +// ON s.chain_id = t.entity_history_chain_id +// AND t.entity_history_block_number > s.block_number +// ) +// DELETE FROM "foo"."TestEntity_history" d +// USING max_before_safe m +// LEFT JOIN post_safe p ON p.id = m.id +// WHERE d.id = m.id +// AND ( +// d.serial < m.keep_serial +// OR (p.id IS NULL AND d.serial = m.keep_serial) +// );`, +// ) +// }) +// }) + +// module Mocks = { +// module Entity = { +// open TestEntity +// let entityId1 = "1" +// let mockEntity1 = {id: entityId1, fieldA: 1, fieldB: Some("test")} +// let mockEntity2 = {id: entityId1, fieldA: 2, fieldB: Some("test2")} +// let mockEntity3 = {id: entityId1, fieldA: 3, fieldB: Some("test3")} +// let mockEntity4 = {id: entityId1, fieldA: 4, fieldB: Some("test4")} + +// let entityId2 = "2" +// let mockEntity5 = {id: entityId2, fieldA: 5, fieldB: None} +// let mockEntity6 = {id: entityId2, fieldA: 6, fieldB: None} + +// let entityId3 = "3" +// let mockEntity7 = {id: entityId3, fieldA: 7, fieldB: None} +// let mockEntity8 = {id: entityId3, fieldA: 8, fieldB: None} +// } + +// module GnosisBug = { +// let chain_id = 1 + +// let event1: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 10 * 5, +// block_number: 10, +// log_index: 0, +// } + +// let event2: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 10 * 5, +// block_number: 10, +// log_index: 1, +// } + +// let historyRow1: testEntityHistory = { +// current: event1, +// previous: None, +// entityData: Set(Entity.mockEntity2), +// } + +// let historyRow2: testEntityHistory = { +// current: event2, +// previous: None, +// entityData: Set(Entity.mockEntity6), +// } + +// let historyRows = [historyRow1, historyRow2] + +// // For setting a different entity and testing pruning +// let event3: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 12 * 5, +// block_number: 12, +// log_index: 0, +// } + +// let historyRow3: testEntityHistory = { +// current: event3, +// previous: None, +// entityData: Set(Entity.mockEntity3), +// } + +// let historyRow4: testEntityHistory = { +// current: event3, +// previous: None, +// entityData: Set(Entity.mockEntity8), +// } + +// let historyRowsForPrune = [historyRow3, historyRow4] +// } + +// module Chain1 = { +// let chain_id = 1 + +// let event1: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 1, +// block_number: 1, +// log_index: 0, +// } + +// let event2: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 5, +// block_number: 2, +// log_index: 1, +// } + +// let event3: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 15, +// block_number: 4, +// log_index: 2, +// } + +// let historyRow1: testEntityHistory = { +// current: event1, +// previous: None, +// entityData: Set(Entity.mockEntity1), +// } + +// let historyRow2: testEntityHistory = { +// current: event2, +// previous: Some(event1), +// entityData: Set(Entity.mockEntity2), +// } + +// let historyRow3: testEntityHistory = { +// current: event3, +// previous: Some(event2), +// entityData: Set(Entity.mockEntity3), +// } + +// let historyRows = [historyRow1, historyRow2, historyRow3] + +// //Shows a case where no event exists on this block +// let rollbackEventIdentifier: Types.eventIdentifier = { +// blockTimestamp: 10, +// chainId: chain_id, +// blockNumber: 3, +// logIndex: 0, +// } + +// let orderedMultichainArg = DbFunctions.EntityHistory.Args.OrderedMultichain({ +// safeBlockTimestamp: rollbackEventIdentifier.blockTimestamp, +// reorgChainId: chain_id, +// safeBlockNumber: rollbackEventIdentifier.blockNumber, +// }) + +// let unorderedMultichainArg = DbFunctions.EntityHistory.Args.UnorderedMultichain({ +// reorgChainId: chain_id, +// safeBlockNumber: rollbackEventIdentifier.blockNumber, +// }) +// } + +// module Chain2 = { +// let chain_id = 2 + +// let event1: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 3, +// block_number: 1, +// log_index: 0, +// } + +// let event2: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 8, +// block_number: 2, +// log_index: 1, +// } + +// let event3: EntityHistory.historyFields = { +// chain_id, +// block_timestamp: 13, +// block_number: 3, +// log_index: 2, +// } + +// let historyRow1: testEntityHistory = { +// current: event1, +// previous: None, +// entityData: Set(Entity.mockEntity5), +// } + +// let historyRow2: testEntityHistory = { +// current: event2, +// previous: Some(event1), +// entityData: Delete({id: Entity.entityId2}), +// } +// let historyRow3: testEntityHistory = { +// current: event3, +// previous: Some(event2), +// entityData: Set(Entity.mockEntity6), +// } + +// let historyRows = [historyRow1, historyRow2, historyRow3] +// } + +// let historyRows = Utils.Array.mergeSorted( +// (a, b) => a.EntityHistory.current.block_timestamp < b.current.block_timestamp, +// Chain1.historyRows, +// Chain2.historyRows, +// ) +// } + +// describe("Entity history rollbacks", () => { +// Async.beforeEach(async () => { +// try { +// let _ = DbHelpers.resetPostgresClient() +// let storage = PgStorage.make( +// ~sql=Db.sql, +// ~pgSchema=Env.Db.publicSchema, +// ~pgUser=Env.Db.user, +// ~pgDatabase=Env.Db.database, +// ~pgPassword=Env.Db.password, +// ~pgHost=Env.Db.host, +// ~pgPort=Env.Db.port, +// ) +// let _ = await storage.initialize( +// ~chainConfigs=[], +// ~entities=[module(TestEntity)->Entities.entityModToInternal], +// ~enums=[Persistence.entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig], +// ) + +// let _ = +// await Db.sql->Postgres.unsafe( +// TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public"), +// ) + +// try await Db.sql->PgStorage.setOrThrow( +// ~items=[ +// Mocks.Entity.mockEntity1->TestEntity.castToInternal, +// Mocks.Entity.mockEntity5->TestEntity.castToInternal, +// ], +// ~table=TestEntity.table, +// ~itemSchema=TestEntity.schema, +// ~pgSchema=Config.storagePgSchema, +// ) catch { +// | exn => +// Js.log2("batchSet mock entity exn", exn) +// Assert.fail("Failed to set mock entity in table") +// } + +// try await Db.sql->Postgres.beginSql( +// sql => +// sql +// ->PgStorage.setEntityHistoryOrThrow( +// ~entityHistory=TestEntity.entityHistory, +// ~rows=Mocks.GnosisBug.historyRows, +// ) +// ->Promise.all +// ->Promise.ignoreValue, +// ) catch { +// | exn => +// Js.log2("insert mock rows exn", exn) +// Assert.fail("Failed to insert mock rows") +// } + +// let historyItems = { +// let items = await Db.sql->getAllMockEntityHistory +// items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) +// } +// Assert.equal(historyItems->Js.Array2.length, 4, ~message="Should have 4 history items") +// Assert.ok( +// historyItems->Belt.Array.some(item => item.current.chain_id == 0), +// ~message="Should contain 2 copied items", +// ) +// } catch { +// | exn => +// Js.log2(" Entity history setup exn", exn) +// Assert.fail("Failed setting up tables") +// } +// }) + +// Async.it("Rollback ignores copied entities as an item in reorg threshold", async () => { +// let rollbackDiff = await Db.sql->DbFunctions.EntityHistory.getRollbackDiff( +// OrderedMultichain({ +// reorgChainId: Mocks.GnosisBug.chain_id, +// safeBlockNumber: 9, +// safeBlockTimestamp: 9 * 5, +// }), +// ~entityConfig=module(TestEntity)->Entities.entityModToInternal, +// ) + +// let expectedDiff: array> = [ +// { +// current: {chain_id: 0, block_timestamp: 0, block_number: 0, log_index: 0}, +// previous: %raw(`undefined`), +// entityData: Set(Mocks.Entity.mockEntity1->TestEntity.castToInternal), +// }, +// { +// current: {chain_id: 0, block_timestamp: 0, block_number: 0, log_index: 0}, +// previous: %raw(`undefined`), +// entityData: Set(Mocks.Entity.mockEntity5->TestEntity.castToInternal), +// }, +// ] + +// Assert.deepStrictEqual( +// rollbackDiff, +// expectedDiff, +// ~message="Should rollback to the copied entity", +// ) +// }) + +// Async.it( +// "Deleting items after reorg event should not remove the copied history item", +// async () => { +// await Db.sql->DbFunctions.EntityHistory.deleteAllEntityHistoryAfterEventIdentifier( +// ~isUnorderedMultichainMode=false, +// ~eventIdentifier={ +// chainId: Mocks.GnosisBug.chain_id, +// blockTimestamp: 9 * 5, +// blockNumber: 9, +// logIndex: 0, +// }, +// ~allEntities=[module(TestEntity)->Entities.entityModToInternal], +// ) + +// let historyItems = { +// let items = await Db.sql->getAllMockEntityHistory +// items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) +// } + +// Assert.equal(historyItems->Js.Array2.length, 2, ~message="Should have the 2 copied items") + +// let allItemsAreZeroChainId = +// historyItems->Belt.Array.every(item => item.current.chain_id == 0) + +// Assert.ok( +// allItemsAreZeroChainId, +// ~message="Should have all items in the zero chain id since they are copied", +// ) +// }, +// ) + +// Async.it("Prunes history correctly with items in reorg threshold", async () => { +// // set the current entity of id 3 +// await Db.sql->PgStorage.setOrThrow( +// ~items=[Mocks.Entity.mockEntity7->TestEntity.castToInternal], +// ~table=TestEntity.table, +// ~itemSchema=TestEntity.schema, +// ~pgSchema=Config.storagePgSchema, +// ) + +// // set an updated version of its row to get a copied entity history +// try await Db.sql->Postgres.beginSql( +// sql => +// sql +// ->PgStorage.setEntityHistoryOrThrow( +// ~entityHistory=TestEntity.entityHistory, +// ~rows=Mocks.GnosisBug.historyRowsForPrune, +// ) +// ->Promise.all +// ->Promise.ignoreValue, +// ) catch { +// | exn => +// Js.log2("insert mock rows exn", exn) +// Assert.fail("Failed to insert mock rows") +// } + +// // let historyItemsBefore = { +// // let items = await Db.sql->getAllMockEntityHistory +// // Js.log2("history items before prune", items) +// // items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) +// // } + +// let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( +// ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, +// ~pgSchema=Env.Db.publicSchema, +// ~safeReorgBlocks={ +// chainIds: [Mocks.GnosisBug.chain_id], +// blockNumbers: [11], +// }, +// ) + +// let historyItemsAfter = { +// let items = await Db.sql->getAllMockEntityHistory +// // Js.log2("history items after prune", items) +// items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) +// } + +// Assert.equal( +// historyItemsAfter->Js.Array2.length, +// 4, +// ~message="Should have 4 history items for entity id 1 and 3 before and after block 11", +// ) +// }) +// }) + +// describe("Entity history rollbacks", () => { +// Async.beforeEach(async () => { +// try { +// let _ = DbHelpers.resetPostgresClient() +// let storage = PgStorage.make( +// ~sql=Db.sql, +// ~pgSchema=Env.Db.publicSchema, +// ~pgUser=Env.Db.user, +// ~pgDatabase=Env.Db.database, +// ~pgPassword=Env.Db.password, +// ~pgHost=Env.Db.host, +// ~pgPort=Env.Db.port, +// ) +// let _ = await storage.initialize( +// ~chainConfigs=[], +// ~entities=[module(TestEntity)->Entities.entityModToInternal], +// ~enums=[Persistence.entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig], +// ) + +// let _ = +// await Db.sql->Postgres.unsafe( +// TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public"), +// ) + +// try await Db.sql->Postgres.beginSql( +// sql => +// sql +// ->PgStorage.setEntityHistoryOrThrow( +// ~entityHistory=TestEntity.entityHistory, +// ~rows=Mocks.historyRows, +// ) +// ->Promise.all +// ->Promise.ignoreValue, +// ) catch { +// | exn => +// Js.log2("insert mock rows exn", exn) +// Assert.fail("Failed to insert mock rows") +// } + +// let historyItems = { +// let items = await Db.sql->getAllMockEntityHistory +// items->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) +// } +// Assert.equal(historyItems->Js.Array2.length, 6, ~message="Should have 6 history items") +// Assert.ok( +// !(historyItems->Belt.Array.some(item => item.current.chain_id == 0)), +// ~message="No defaulted/copied values should exist in history", +// ) +// } catch { +// | exn => +// Js.log2(" Entity history setup exn", exn) +// Assert.fail("Failed setting up tables") +// } +// }) + +// Async.it("Returns expected diff for ordered multichain mode", async () => { +// let orderdMultichainRollbackDiff = try await Db.sql->DbFunctions.EntityHistory.getRollbackDiff( +// Mocks.Chain1.orderedMultichainArg, +// ~entityConfig=module(TestEntity)->Entities.entityModToInternal, +// ) catch { +// | exn => +// Js.log2("getRollbackDiff exn", exn) +// Assert.fail("Failed to get rollback diff") +// } + +// switch orderdMultichainRollbackDiff { +// | [ +// {current: currentA, entityData: Set(entitySetA)}, +// {current: currentB, entityData: Delete({id: entityDeleteB})}, +// ] => +// Assert.deepEqual( +// currentA, +// Mocks.Chain1.event2, +// ~message="First history item should haved diffed to event2", +// ) +// Assert.deepEqual( +// entitySetA, +// Mocks.Entity.mockEntity2->TestEntity.castToInternal, +// ~message="First history item should haved diffed to mockEntity2", +// ) +// Assert.deepEqual( +// currentB, +// Mocks.Chain2.event2, +// ~message="Second history item should haved diffed to event3", +// ) +// Assert.deepEqual( +// entityDeleteB, +// Mocks.Entity.entityId2, +// ~message="Second history item should haved diffed a delete of entityId2", +// ) +// | _ => Assert.fail("Should have a set and delete history item in diff") +// } +// }) + +// Async.it("Returns expected diff for unordered multichain mode", async () => { +// let unorderedMultichainRollbackDiff = try await Db.sql->DbFunctions.EntityHistory.getRollbackDiff( +// Mocks.Chain1.unorderedMultichainArg, +// ~entityConfig=module(TestEntity)->Entities.entityModToInternal, +// ) catch { +// | exn => +// Js.log2("getRollbackDiff exn", exn) +// Assert.fail("Failed to get rollback diff") +// } + +// switch unorderedMultichainRollbackDiff { +// | [{current: currentA, entityData: Set(entitySetA)}] => +// Assert.deepEqual( +// currentA, +// Mocks.Chain1.event2, +// ~message="First history item should haved diffed to event2", +// ) +// Assert.deepEqual( +// entitySetA, +// Mocks.Entity.mockEntity2->TestEntity.castToInternal, +// ~message="First history item should haved diffed to mockEntity2", +// ) +// | _ => Assert.fail("Should have only chain 1 item in diff") +// } +// }) + +// Async.it("Deletes current history after rollback ordered", async () => { +// let _ = +// await Db.sql->DbFunctions.EntityHistory.deleteAllEntityHistoryAfterEventIdentifier( +// ~isUnorderedMultichainMode=false, +// ~eventIdentifier=Mocks.Chain1.rollbackEventIdentifier, +// ~allEntities=[module(TestEntity)->Entities.entityModToInternal], +// ) + +// let currentHistoryItems = await Db.sql->getAllMockEntityHistory +// let parsedHistoryItems = +// currentHistoryItems->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) + +// let expectedHistoryItems = Mocks.historyRows->Belt.Array.slice(~offset=0, ~len=4) + +// Assert.deepEqual( +// parsedHistoryItems->stripUndefinedFieldsInPlace, +// expectedHistoryItems->stripUndefinedFieldsInPlace, +// ~message="Should have deleted last 2 items in history", +// ) +// }) + +// Async.it("Deletes current history after rollback unordered", async () => { +// let _ = +// await Db.sql->DbFunctions.EntityHistory.deleteAllEntityHistoryAfterEventIdentifier( +// ~isUnorderedMultichainMode=true, +// ~eventIdentifier=Mocks.Chain1.rollbackEventIdentifier, +// ~allEntities=[module(TestEntity)->Entities.entityModToInternal], +// ) + +// let currentHistoryItems = await Db.sql->getAllMockEntityHistory +// let parsedHistoryItems = +// currentHistoryItems->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) + +// let expectedHistoryItems = Mocks.historyRows->Belt.Array.slice(~offset=0, ~len=5) + +// Assert.deepEqual( +// parsedHistoryItems->stripUndefinedFieldsInPlace, +// expectedHistoryItems->stripUndefinedFieldsInPlace, +// ~message="Should have deleted just the last item in history", +// ) +// }) + +// Async.it("Prunes history correctly with items in reorg threshold", async () => { +// let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( +// ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, +// ~pgSchema=Env.Db.publicSchema, +// ~safeReorgBlocks={ +// chainIds: [1, 2], +// blockNumbers: [3, 2], +// }, +// ) +// let currentHistoryItems = await Db.sql->getAllMockEntityHistory + +// let parsedHistoryItems = +// currentHistoryItems->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) + +// let expectedHistoryItems = [ +// Mocks.Chain1.historyRow2, +// Mocks.Chain1.historyRow3, +// Mocks.Chain2.historyRow2, +// Mocks.Chain2.historyRow3, +// ] + +// let sort = arr => +// arr->Js.Array2.sortInPlaceWith( +// (a, b) => a.EntityHistory.current.block_number - b.current.block_number, +// ) + +// Assert.deepEqual( +// parsedHistoryItems->sort->stripUndefinedFieldsInPlace, +// expectedHistoryItems->sort->stripUndefinedFieldsInPlace, +// ~message="Should have deleted the unneeded first items in history", +// ) +// }) + +// Async.it("Prunes history correctly with items in reorg threshold", async () => { +// let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( +// ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, +// ~pgSchema=Env.Db.publicSchema, +// ~safeReorgBlocks={ +// chainIds: [1, 2], +// blockNumbers: [3, 2], +// }, +// ) +// let currentHistoryItems = await Db.sql->getAllMockEntityHistory + +// let parsedHistoryItems = +// currentHistoryItems->S.parseJsonOrThrow(TestEntity.entityHistory.schemaRows) + +// let sort = arr => +// arr->Js.Array2.sortInPlaceWith( +// (a, b) => a.EntityHistory.current.block_number - b.current.block_number, +// ) + +// Assert.deepEqual( +// parsedHistoryItems->sort->stripUndefinedFieldsInPlace, +// [ +// Mocks.Chain1.historyRow2, +// Mocks.Chain2.historyRow2, +// Mocks.Chain2.historyRow3, +// Mocks.Chain1.historyRow3, +// ]->stripUndefinedFieldsInPlace, +// ~message="Should have deleted the unneeded first items in history", +// ) +// }) + +// Async.it("Prunes history correctly with no items in reorg threshold", async () => { +// let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( +// ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, +// ~pgSchema=Env.Db.publicSchema, +// ~safeReorgBlocks={ +// chainIds: [1, 2], +// blockNumbers: [4, 3], +// }, +// ) +// let currentHistoryItems = await Db.sql->getAllMockEntityHistory + +// Assert.ok( +// currentHistoryItems->Array.length == 0, +// ~message="Should have deleted all items in history", +// ) +// }) +// }) + +// describe_skip("Prune performance test", () => { +// Async.it("Print benchmark of prune function", async () => { +// let _ = DbHelpers.resetPostgresClient() +// let storage = PgStorage.make( +// ~sql=Db.sql, +// ~pgSchema=Env.Db.publicSchema, +// ~pgUser=Env.Db.user, +// ~pgDatabase=Env.Db.database, +// ~pgPassword=Env.Db.password, +// ~pgHost=Env.Db.host, +// ~pgPort=Env.Db.port, +// ) +// let _ = await storage.initialize( +// ~entities=[module(TestEntity)->Entities.entityModToInternal], +// ~chainConfigs=[], +// ~enums=[Persistence.entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig], +// ) + +// let _ = +// await Db.sql->Postgres.unsafe(TestEntity.entityHistory.makeInsertFnQuery(~pgSchema="public")) + +// let rows: array = [] +// for i in 0 to 1000 { +// let mockEntity: TestEntity.t = { +// id: i->mod(10)->Belt.Int.toString, +// fieldA: i, +// fieldB: None, +// } + +// let historyRow: testEntityHistory = { +// current: { +// chain_id: 1, +// block_timestamp: i * 5, +// block_number: i, +// log_index: 0, +// }, +// previous: None, +// entityData: Set(mockEntity), +// } +// rows->Js.Array2.push(historyRow)->ignore +// } + +// try await Db.sql->Postgres.beginSql( +// sql => +// sql +// ->PgStorage.setEntityHistoryOrThrow(~entityHistory=TestEntity.entityHistory, ~rows) +// ->Promise.all +// ->Promise.ignoreValue, +// ) catch { +// | exn => +// Js.log2("insert mock rows exn", exn) +// Assert.fail("Failed to insert mock rows") +// } + +// let startTime = Hrtime.makeTimer() + +// try { +// let () = await Db.sql->EntityHistory.pruneStaleEntityHistory( +// ~entityName=(module(TestEntity)->Entities.entityModToInternal).name, +// ~pgSchema=Env.Db.publicSchema, +// ~safeReorgBlocks={ +// chainIds: [1], +// blockNumbers: [500], +// }, +// ) +// } catch { +// | exn => +// Js.log2("prune stale entity history exn", exn) +// Assert.fail("Failed to prune stale entity history") +// } + +// let elapsedTime = Hrtime.timeSince(startTime)->Hrtime.toMillis->Hrtime.intFromMillis +// Js.log2("Elapsed time", elapsedTime) +// }) +// }) + diff --git a/scenarios/test_codegen/test/lib_tests/EventRouter_test.res b/scenarios/test_codegen/test/lib_tests/EventRouter_test.res index a073def7f..998ac2c4a 100644 --- a/scenarios/test_codegen/test/lib_tests/EventRouter_test.res +++ b/scenarios/test_codegen/test/lib_tests/EventRouter_test.res @@ -155,10 +155,10 @@ describe("EventRouter", () => { indexingContracts->Js.Dict.set( nonWildcardContractAddress->Address.toString, { - FetchState.startBlock: 0, + Internal.startBlock: 0, contractName: nonWildcardContractName, address: nonWildcardContractAddress, - register: Config, + registrationBlock: None, }, ) diff --git a/scenarios/test_codegen/test/lib_tests/FetchState_onBlock_test.res b/scenarios/test_codegen/test/lib_tests/FetchState_onBlock_test.res index ff596a289..d8aa7f139 100644 --- a/scenarios/test_codegen/test/lib_tests/FetchState_onBlock_test.res +++ b/scenarios/test_codegen/test/lib_tests/FetchState_onBlock_test.res @@ -38,10 +38,10 @@ let makeInitialWithOnBlock = (~startBlock=0, ~onBlockConfigs) => { ~eventConfigs=[baseEventConfig], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock, - register: Config, + registrationBlock: None, }, ], ~startBlock, diff --git a/scenarios/test_codegen/test/lib_tests/FetchState_test.res b/scenarios/test_codegen/test/lib_tests/FetchState_test.res index 3bbf797c9..b7e29f939 100644 --- a/scenarios/test_codegen/test/lib_tests/FetchState_test.res +++ b/scenarios/test_codegen/test/lib_tests/FetchState_test.res @@ -45,32 +45,22 @@ let getBlockData = (~blockNumber): FetchState.blockNumberAndTimestamp => { let makeDynContractRegistration = ( ~contractAddress, ~blockNumber, - ~logIndex=0, ~contractType=Gravatar, - ~registeringEventContractName="MockGravatarFactory", - ~registeringEventName="MockCreateGravatar", - ~registeringEventSrcAddress=mockFactoryAddress, -): FetchState.indexingContract => { +): Internal.indexingContract => { { address: contractAddress, contractName: (contractType :> string), startBlock: blockNumber, - register: DC({ - registeringEventLogIndex: logIndex, - registeringEventBlockTimestamp: getTimestamp(~blockNumber), - registeringEventContractName, - registeringEventName, - registeringEventSrcAddress, - }), + registrationBlock: Some(blockNumber), } } -let makeConfigContract = (contractName, address): FetchState.indexingContract => { +let makeConfigContract = (contractName, address): Internal.indexingContract => { { address, contractName, startBlock: 0, - register: Config, + registrationBlock: None, } } @@ -83,6 +73,12 @@ let mockEvent = (~blockNumber, ~logIndex=0, ~chainId=1): Internal.item => Intern event: Utils.magic("Mock event in fetchstate test"), }) +let dcToItem = (dc: Internal.indexingContract) => { + let item = mockEvent(~blockNumber=dc.startBlock) + item->Internal.setItemDcs([dc]) + item +} + let baseEventConfig = (Mock.evmEventConfig( ~id="0", ~contractName="Gravatar", @@ -103,10 +99,10 @@ let makeInitial = ( ~eventConfigs=[baseEventConfig, baseEventConfig2], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock, - register: Config, + registrationBlock: None, }, ], ~startBlock, @@ -120,7 +116,7 @@ let makeInitial = ( // Helper to build indexingContracts dict for test expectations // Note: dynamic contract info is now only tracked by the register field (DC variant) -let makeIndexingContractsWithDynamics = (dcs: array, ~static=[]) => { +let makeIndexingContractsWithDynamics = (dcs: array, ~static=[]) => { let dict = Js.Dict.empty() dcs->Array.forEach(dc => { dict->Js.Dict.set(dc.address->Address.toString, dc) @@ -132,7 +128,7 @@ let makeIndexingContractsWithDynamics = (dcs: array address, contractName: (Gravatar :> string), startBlock: 0, - register: Config, + registrationBlock: None, }, ) }) @@ -177,7 +173,6 @@ describe("FetchState.make", () => { chainId: 0, indexingContracts: fetchState.indexingContracts, contractConfigs: fetchState.contractConfigs, - dcsToStore: [], blockLag: 0, onBlockConfigs: [], }, @@ -250,7 +245,6 @@ describe("FetchState.make", () => { chainId, indexingContracts: fetchState.indexingContracts, contractConfigs: fetchState.contractConfigs, - dcsToStore: [], blockLag: 0, onBlockConfigs: [], }, @@ -316,7 +310,6 @@ describe("FetchState.make", () => { chainId, indexingContracts: fetchState.indexingContracts, contractConfigs: fetchState.contractConfigs, - dcsToStore: [], blockLag: 0, onBlockConfigs: [], }, @@ -414,7 +407,6 @@ describe("FetchState.make", () => { chainId, indexingContracts: fetchState.indexingContracts, contractConfigs: fetchState.contractConfigs, - dcsToStore: [], blockLag: 0, onBlockConfigs: [], }, @@ -440,7 +432,7 @@ describe("FetchState.registerDynamicContracts", () => { Assert.equal( fetchState->FetchState.registerDynamicContracts([ - makeDynContractRegistration(~blockNumber=0, ~contractAddress=mockAddress0), + makeDynContractRegistration(~blockNumber=0, ~contractAddress=mockAddress0)->dcToItem, ]), fetchState, ~message="Should return fetchState without updating it", @@ -454,7 +446,7 @@ describe("FetchState.registerDynamicContracts", () => { let dc1 = makeDynContractRegistration(~blockNumber=2, ~contractAddress=mockAddress1) - let fetchStateWithDc1 = fetchState->FetchState.registerDynamicContracts([dc1]) + let fetchStateWithDc1 = fetchState->FetchState.registerDynamicContracts([dc1->dcToItem]) Assert.deepEqual( (fetchState.partitions->Array.length, fetchStateWithDc1.partitions->Array.length), @@ -463,14 +455,14 @@ describe("FetchState.registerDynamicContracts", () => { ) Assert.equal( - fetchStateWithDc1->FetchState.registerDynamicContracts([dc1]), + fetchStateWithDc1->FetchState.registerDynamicContracts([dc1->dcToItem]), fetchStateWithDc1, ~message="Calling it with the same dc for the second time shouldn't change anything", ) Assert.equal( fetchStateWithDc1->FetchState.registerDynamicContracts([ - makeDynContractRegistration(~blockNumber=0, ~contractAddress=mockAddress1), + makeDynContractRegistration(~blockNumber=0, ~contractAddress=mockAddress1)->dcToItem, ]), fetchStateWithDc1, ~message=`BROKEN: Calling it with the same dc @@ -490,7 +482,13 @@ describe("FetchState.registerDynamicContracts", () => { let dc3 = makeDynContractRegistration(~blockNumber=2, ~contractAddress=mockAddress3) let dc4 = makeDynContractRegistration(~blockNumber=2, ~contractAddress=mockAddress4) - let updatedFetchState = fetchState->FetchState.registerDynamicContracts([dc1, dc2, dc3, dc4]) + let updatedFetchState = + fetchState->FetchState.registerDynamicContracts([ + dc1->dcToItem, + dc2->dcToItem, + dc3->dcToItem, + dc4->dcToItem, + ]) Assert.deepEqual( updatedFetchState.partitions, @@ -533,10 +531,10 @@ describe("FetchState.registerDynamicContracts", () => { ) let updatedFetchState = fetchState->FetchState.registerDynamicContracts([ - dc1FromAnotherContract, - dc2, - dc3, - dc4FromAnotherContract, + dc1FromAnotherContract->dcToItem, + dc2->dcToItem, + dc3->dcToItem, + dc4FromAnotherContract->dcToItem, ]) Assert.deepEqual( @@ -634,7 +632,13 @@ describe("FetchState.registerDynamicContracts", () => { ) let updatedFetchState = - fetchState->FetchState.registerDynamicContracts([dc1, dc2, dc3, dc4, dc5]) + fetchState->FetchState.registerDynamicContracts([ + dc1->dcToItem, + dc2->dcToItem, + dc3->dcToItem, + dc4->dcToItem, + dc5->dcToItem, + ]) Assert.deepEqual( updatedFetchState.partitions, @@ -715,13 +719,16 @@ describe("FetchState.registerDynamicContracts", () => { let dc1 = makeDynContractRegistration(~blockNumber=20, ~contractAddress=mockAddress1) let dc2 = makeDynContractRegistration(~blockNumber=10, ~contractAddress=mockAddress1) + let dcItem1 = dc1->dcToItem + let dcItem2 = dc2->dcToItem - let updatedFetchState = fetchState->FetchState.registerDynamicContracts([dc1, dc2]) + let updatedFetchState = fetchState->FetchState.registerDynamicContracts([dcItem2, dcItem1]) Assert.deepEqual( - updatedFetchState.dcsToStore, - [dc2], - ~message="Should choose the earliest dc from the batch", + (dcItem1->Internal.getItemDcs, dcItem2->Internal.getItemDcs), + (Some([]), Some([dc2])), + ~message=`Should choose the earliest dc from the batch +End remove the dc from the later one, so they are not duplicated in the db`, ) Assert.deepEqual( updatedFetchState.indexingContracts, @@ -764,13 +771,12 @@ describe("FetchState.registerDynamicContracts", () => { let updatedFetchState = fetchState->FetchState.registerDynamicContracts(// Order of dcs doesn't matter // but they are not sorted in fetch state - [dc1, dc3, dc2]) + [dc1->dcToItem, dc3->dcToItem, dc2->dcToItem]) Assert.equal(updatedFetchState.indexingContracts->Utils.Dict.size, 4) Assert.deepEqual( updatedFetchState, { ...fetchState, - dcsToStore: [dc1, dc3, dc2], indexingContracts: updatedFetchState.indexingContracts, nextPartitionIndex: 2, partitions: fetchState.partitions->Array.concat([ @@ -890,7 +896,6 @@ describe("FetchState.registerDynamicContracts", () => { chainId, indexingContracts: fetchState.indexingContracts, contractConfigs: fetchState.contractConfigs, - dcsToStore: [], blockLag: 0, onBlockConfigs: [], }, @@ -931,7 +936,6 @@ describe("FetchState.getNextQuery & integration", () => { buffer: [mockEvent(~blockNumber=1), mockEvent(~blockNumber=2)], startBlock: 0, endBlock: None, - dcsToStore: [], blockLag: 0, normalSelection, chainId, @@ -939,10 +943,10 @@ describe("FetchState.getNextQuery & integration", () => { ( mockAddress0->Address.toString, { - FetchState.contractName: (Gravatar :> string), + Internal.contractName: (Gravatar :> string), startBlock: 0, address: mockAddress0, - register: Config, + registrationBlock: None, }, ), ]), @@ -954,7 +958,6 @@ describe("FetchState.getNextQuery & integration", () => { let makeIntermidiateDcMerge = (): FetchState.t => { let normalSelection = makeInitial().normalSelection { - dcsToStore: [dc2, dc1, dc3], partitions: [ { id: "0", @@ -1231,14 +1234,13 @@ describe("FetchState.getNextQuery & integration", () => { let fetchStateWithDcs = fetchState - ->FetchState.registerDynamicContracts([dc2, dc1]) - ->FetchState.registerDynamicContracts([dc3]) + ->FetchState.registerDynamicContracts([dc2->dcToItem, dc1->dcToItem]) + ->FetchState.registerDynamicContracts([dc3->dcToItem]) Assert.deepEqual( fetchStateWithDcs, { ...fetchState, - dcsToStore: [dc2, dc1, dc3], indexingContracts: makeIndexingContractsWithDynamics( [dc2, dc1, dc3], ~static=[mockAddress0], @@ -1626,7 +1628,7 @@ describe("FetchState.getNextQuery & integration", () => { ~targetBufferSize=10, ~chainId, )->FetchState.registerDynamicContracts([ - makeDynContractRegistration(~blockNumber=2, ~contractAddress=mockAddress2), + makeDynContractRegistration(~blockNumber=2, ~contractAddress=mockAddress2)->dcToItem, ]) Assert.deepEqual(fetchState.partitions->Array.length, 3) @@ -1667,14 +1669,12 @@ describe("FetchState.getNextQuery & integration", () => { it("Correctly rollbacks fetch state", () => { let fetchState = makeIntermidiateDcMerge() - let fetchStateAfterRollback1 = - fetchState->FetchState.rollback(~firstChangeEvent={blockNumber: 2, logIndex: 0}) + let fetchStateAfterRollback1 = fetchState->FetchState.rollback(~targetBlockNumber=1) Assert.deepEqual( fetchStateAfterRollback1, { ...fetchState, - dcsToStore: [dc1], indexingContracts: makeIndexingContractsWithDynamics([dc1], ~static=[mockAddress0]), partitions: [ { @@ -1708,13 +1708,12 @@ describe("FetchState.getNextQuery & integration", () => { // Rollback even more to see the removal of partition "2" let fetchStateAfterRollback2 = - fetchStateAfterRollback1->FetchState.rollback(~firstChangeEvent={blockNumber: 0, logIndex: 0}) + fetchStateAfterRollback1->FetchState.rollback(~targetBlockNumber=-1) Assert.deepEqual( fetchStateAfterRollback2, { ...fetchStateAfterRollback1, - dcsToStore: [], indexingContracts: makeIndexingContractsWithDynamics([], ~static=[mockAddress0]), partitions: [ { @@ -1761,7 +1760,7 @@ describe("FetchState.getNextQuery & integration", () => { ~targetBufferSize=10, ~chainId, )->FetchState.registerDynamicContracts([ - makeDynContractRegistration(~blockNumber=2, ~contractAddress=mockAddress2), + makeDynContractRegistration(~blockNumber=2, ~contractAddress=mockAddress2)->dcToItem, ]) // Additionally test that state being reset @@ -1788,14 +1787,12 @@ describe("FetchState.getNextQuery & integration", () => { ~message=`Should have 2 partitions before rollback`, ) - let fetchStateAfterRollback = - fetchState->FetchState.rollback(~firstChangeEvent={blockNumber: 2, logIndex: 0}) + let fetchStateAfterRollback = fetchState->FetchState.rollback(~targetBlockNumber=1) Assert.deepEqual( fetchStateAfterRollback, { ...fetchState, - dcsToStore: [], indexingContracts: Js.Dict.empty(), partitions: [ { @@ -1882,7 +1879,6 @@ describe("FetchState unit tests for specific cases", () => { updatedFetchState, { ...fetchState, - dcsToStore: [], partitions: [ { id: "0", @@ -2080,7 +2076,7 @@ describe("FetchState unit tests for specific cases", () => { makeDynContractRegistration( ~contractAddress=mockAddress1, ~blockNumber=registeringBlockNumber, - ), + )->dcToItem, ]) Assert.deepEqual( @@ -2182,7 +2178,7 @@ describe("FetchState unit tests for specific cases", () => { Assert.deepEqual( fetchState ->FetchState.registerDynamicContracts([ - makeDynContractRegistration(~contractAddress=mockAddress1, ~blockNumber=2), + makeDynContractRegistration(~contractAddress=mockAddress1, ~blockNumber=2)->dcToItem, ]) ->getEarliestEvent, NoItem({ @@ -2277,7 +2273,7 @@ describe("FetchState unit tests for specific cases", () => { //Dynamic contract A registered at block 100 let dcA = makeDynContractRegistration(~contractAddress=mockAddress2, ~blockNumber=100) - let fetchStateWithDcA = fetchState->FetchState.registerDynamicContracts([dcA]) + let fetchStateWithDcA = fetchState->FetchState.registerDynamicContracts([dcA->dcToItem]) let queryA = switch fetchStateWithDcA->FetchState.getNextQuery( ~concurrencyLimit=10, @@ -2314,7 +2310,7 @@ describe("FetchState unit tests for specific cases", () => { //Next registration happens at block 200, between the first register and the upperbound of it's query let fetchStateWithDcB = fetchStateWithDcA->FetchState.registerDynamicContracts([ - makeDynContractRegistration(~contractAddress=mockAddress3, ~blockNumber=200), + makeDynContractRegistration(~contractAddress=mockAddress3, ~blockNumber=200)->dcToItem, ]) Assert.deepEqual( @@ -2362,53 +2358,51 @@ describe("FetchState unit tests for specific cases", () => { ) }) -describe("FetchState.filterAndSortForUnorderedBatch", () => { - it( - "Filters out states without eligible items and sorts by earliest timestamp (public API)", - () => { - let mk = () => makeInitial() - let mkQuery = (fetchState: FetchState.t) => { - { - FetchState.partitionId: "0", - target: Head, - selection: fetchState.normalSelection, - addressesByContractName: Js.Dict.empty(), - fromBlock: 0, - indexingContracts: fetchState.indexingContracts, - } +describe("FetchState.sortForUnorderedBatch", () => { + it("Sorts by earliest timestamp. Chains without eligible items should go last", () => { + let mk = () => makeInitial() + let mkQuery = (fetchState: FetchState.t) => { + { + FetchState.partitionId: "0", + target: Head, + selection: fetchState.normalSelection, + addressesByContractName: Js.Dict.empty(), + fromBlock: 0, + indexingContracts: fetchState.indexingContracts, } + } - // Helper: create a fetch state with desired latestFetchedBlock and queue items via public API - let makeFsWith = (~latestBlock: int, ~queueBlocks: array): FetchState.t => { - let fs0 = mk() - let query = mkQuery(fs0) - fs0 - ->FetchState.handleQueryResult( - ~query, - ~latestFetchedBlock={blockNumber: latestBlock, blockTimestamp: latestBlock}, - ~newItems=queueBlocks->Array.map(b => mockEvent(~blockNumber=b)), - ) - ->Result.getExn - } + // Helper: create a fetch state with desired latestFetchedBlock and queue items via public API + let makeFsWith = (~latestBlock: int, ~queueBlocks: array): FetchState.t => { + let fs0 = mk() + let query = mkQuery(fs0) + fs0 + ->FetchState.handleQueryResult( + ~query, + ~latestFetchedBlock={blockNumber: latestBlock, blockTimestamp: latestBlock}, + ~newItems=queueBlocks->Array.map(b => mockEvent(~blockNumber=b)), + ) + ->Result.getExn + } - // Included: last queue item at block 1, latestFullyFetchedBlock = 10 - let fsEarly = makeFsWith(~latestBlock=10, ~queueBlocks=[2, 1]) - // Included: last queue item at block 5, latestFullyFetchedBlock = 10 - let fsLate = makeFsWith(~latestBlock=10, ~queueBlocks=[5]) - // Excluded: last queue item at block 11 (> latestFullyFetchedBlock = 10) - let fsExcluded = makeFsWith(~latestBlock=10, ~queueBlocks=[11]) + // Included: last queue item at block 1, latestFullyFetchedBlock = 10 + let fsEarly = makeFsWith(~latestBlock=10, ~queueBlocks=[2, 1]) + // Included: last queue item at block 5, latestFullyFetchedBlock = 10 + let fsLate = makeFsWith(~latestBlock=10, ~queueBlocks=[5]) + // Excluded: last queue item at block 11 (> latestFullyFetchedBlock = 10) + // UPD: Starting from 2.30.1+ it should go last instead of filtered + let fsExcluded = makeFsWith(~latestBlock=10, ~queueBlocks=[11]) - let prepared = FetchState.filterAndSortForUnorderedBatch( - [fsLate, fsExcluded, fsEarly], - ~batchSizeTarget=3, - ) + let prepared = FetchState.sortForUnorderedBatch( + [fsLate, fsExcluded, fsEarly], + ~batchSizeTarget=3, + ) - Assert.deepEqual( - prepared->Array.map(fs => fs.buffer->Belt.Array.getUnsafe(0)->Internal.getItemBlockNumber), - [1, 5], - ) - }, - ) + Assert.deepEqual( + prepared->Array.map(fs => fs.buffer->Belt.Array.getUnsafe(0)->Internal.getItemBlockNumber), + [1, 5, 11], + ) + }) it("Prioritizes full batches over half full ones", () => { let mk = () => makeInitial() @@ -2440,7 +2434,7 @@ describe("FetchState.filterAndSortForUnorderedBatch", () => { // Half-full batch (1 item) but earlier earliest item (block 1) let fsHalfEarlier = makeFsWith(~latestBlock=10, ~queueBlocks=[1]) - let prepared = FetchState.filterAndSortForUnorderedBatch( + let prepared = FetchState.sortForUnorderedBatch( [fsHalfEarlier, fsFullLater], ~batchSizeTarget=2, ) @@ -2481,7 +2475,7 @@ describe("FetchState.filterAndSortForUnorderedBatch", () => { // Half-full (1 item) but earlier earliest item let fsHalfEarlier = makeFsWith(~latestBlock=10, ~queueBlocks=[1]) - let prepared = FetchState.filterAndSortForUnorderedBatch( + let prepared = FetchState.sortForUnorderedBatch( [fsHalfEarlier, fsExactFull], ~batchSizeTarget=2, ) @@ -2517,10 +2511,10 @@ describe("FetchState.isReadyToEnterReorgThreshold", () => { ~eventConfigs=[baseEventConfig, baseEventConfig2], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock: 6, - register: Config, + registrationBlock: None, }, ], ~startBlock=6, @@ -2539,10 +2533,10 @@ describe("FetchState.isReadyToEnterReorgThreshold", () => { ~eventConfigs=[baseEventConfig, baseEventConfig2], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock: 50, - register: Config, + registrationBlock: None, }, ], ~startBlock=50, @@ -2561,10 +2555,10 @@ describe("FetchState.isReadyToEnterReorgThreshold", () => { ~eventConfigs=[baseEventConfig, baseEventConfig2], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock: 50, - register: Config, + registrationBlock: None, }, ], ~startBlock=50, @@ -2583,10 +2577,10 @@ describe("FetchState.isReadyToEnterReorgThreshold", () => { ~eventConfigs=[baseEventConfig, baseEventConfig2], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock: 51, - register: Config, + registrationBlock: None, }, ], ~startBlock=51, @@ -2605,10 +2599,10 @@ describe("FetchState.isReadyToEnterReorgThreshold", () => { ~eventConfigs=[baseEventConfig, baseEventConfig2], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock: 50, - register: Config, + registrationBlock: None, }, ], ~startBlock=50, @@ -2627,10 +2621,10 @@ describe("FetchState.isReadyToEnterReorgThreshold", () => { ~eventConfigs=[baseEventConfig, baseEventConfig2], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock: 6, - register: Config, + registrationBlock: None, }, ], ~startBlock=6, @@ -2652,10 +2646,10 @@ describe("FetchState.isReadyToEnterReorgThreshold", () => { ~eventConfigs=[baseEventConfig, baseEventConfig2], ~contracts=[ { - FetchState.address: mockAddress0, + Internal.address: mockAddress0, contractName: "Gravatar", startBlock: 6, - register: Config, + registrationBlock: None, }, ], ~startBlock=6, @@ -2681,7 +2675,8 @@ describe("Dynamic contracts with start blocks", () => { ) // Register the contract at block 100 (before its startBlock) - let updatedFetchState = fetchState->FetchState.registerDynamicContracts([dynamicContract]) + let updatedFetchState = + fetchState->FetchState.registerDynamicContracts([dynamicContract->dcToItem]) // The contract should be registered in indexingContracts Assert.ok( @@ -2721,7 +2716,8 @@ describe("Dynamic contracts with start blocks", () => { ~contractType=Gravatar, ) - let updatedFetchState = fetchState->FetchState.registerDynamicContracts([contract1, contract2]) + let updatedFetchState = + fetchState->FetchState.registerDynamicContracts([contract1->dcToItem, contract2->dcToItem]) // Verify both contracts are registered with correct startBlocks let contract1Registered = @@ -2771,7 +2767,7 @@ describe("FetchState progress tracking", () => { let fetchStateEmpty = makeFetchStateWith(~latestBlock=100, ~queueBlocks=[]) Assert.equal( - fetchStateEmpty->FetchState.getProgressBlockNumber, + fetchStateEmpty->FetchState.getUnorderedMultichainProgressBlockNumberAt(~index=0), 100, ~message="Should return latestFullyFetchedBlock.blockNumber when queue is empty", ) @@ -2781,7 +2777,7 @@ describe("FetchState progress tracking", () => { let fetchStateSingleItem = makeFetchStateWith(~latestBlock=55, ~queueBlocks=[(55, 0)]) Assert.equal( - fetchStateSingleItem->FetchState.getProgressBlockNumber, + fetchStateSingleItem->FetchState.getUnorderedMultichainProgressBlockNumberAt(~index=0), 54, ~message="Should return single queue item blockNumber - 1", ) @@ -2791,7 +2787,7 @@ describe("FetchState progress tracking", () => { let fetchStateSingleItem = makeFetchStateWith(~latestBlock=55, ~queueBlocks=[(55, 5)]) Assert.equal( - fetchStateSingleItem->FetchState.getProgressBlockNumber, + fetchStateSingleItem->FetchState.getUnorderedMultichainProgressBlockNumberAt(~index=0), 54, ~message="Should return single queue item blockNumber - 1", ) @@ -2804,7 +2800,7 @@ describe("FetchState progress tracking", () => { ) Assert.equal( - fetchStateWithQueue->FetchState.getProgressBlockNumber, + fetchStateWithQueue->FetchState.getUnorderedMultichainProgressBlockNumberAt(~index=0), 90, ~message="Should return latest fetched block number", ) @@ -2820,7 +2816,8 @@ describe("FetchState buffer overflow prevention", () => { // Create a second partition to ensure buffer limiting logic is exercised across partitions // Register at a later block, so partition "0" remains the earliest and is selected let dc = makeDynContractRegistration(~blockNumber=0, ~contractAddress=mockAddress1) - let fetchStateWithTwoPartitions = fetchState->FetchState.registerDynamicContracts([dc]) + let fetchStateWithTwoPartitions = + fetchState->FetchState.registerDynamicContracts([dc->dcToItem]) // Build up a large queue using public API (handleQueryResult) // queue.length = 15, targetBufferSize = 10 diff --git a/scenarios/test_codegen/test/lib_tests/Persistence_test.res b/scenarios/test_codegen/test/lib_tests/Persistence_test.res index 6159565b1..d698036b8 100644 --- a/scenarios/test_codegen/test/lib_tests/Persistence_test.res +++ b/scenarios/test_codegen/test/lib_tests/Persistence_test.res @@ -78,6 +78,8 @@ describe("Test Persistence layer init", () => { cleanRun: true, chains: [], cache: Js.Dict.empty(), + reorgCheckpoints: [], + checkpointId: 0, } storageMock.resolveInitialize(initialState) let _ = await Promise.resolve() @@ -142,6 +144,8 @@ describe("Test Persistence layer init", () => { cleanRun: false, chains: [], cache: Js.Dict.empty(), + reorgCheckpoints: [], + checkpointId: 0, } storageMock.resolveLoadInitialState(initialState) let _ = await Promise.resolve() diff --git a/scenarios/test_codegen/test/lib_tests/PgStorage_test.res b/scenarios/test_codegen/test/lib_tests/PgStorage_test.res index 4fddda1c3..1df64d79b 100644 --- a/scenarios/test_codegen/test/lib_tests/PgStorage_test.res +++ b/scenarios/test_codegen/test/lib_tests/PgStorage_test.res @@ -63,7 +63,11 @@ describe("Test PgStorage SQL generation functions", () => { Async.it( "Should create SQL for A entity table", async () => { - let query = PgStorage.makeCreateTableQuery(Entities.A.table, ~pgSchema="test_schema") + let query = PgStorage.makeCreateTableQuery( + Entities.A.table, + ~pgSchema="test_schema", + ~isNumericArrayAsText=false, + ) let expectedTableSql = `CREATE TABLE IF NOT EXISTS "test_schema"."A"("b_id" TEXT NOT NULL, "id" TEXT NOT NULL, "optionalStringToTestLinkedEntities" TEXT, PRIMARY KEY("id"));` Assert.equal(query, expectedTableSql, ~message="A table SQL should match exactly") @@ -73,7 +77,11 @@ describe("Test PgStorage SQL generation functions", () => { Async.it( "Should create SQL for B entity table with derived fields", async () => { - let query = PgStorage.makeCreateTableQuery(Entities.B.table, ~pgSchema="test_schema") + let query = PgStorage.makeCreateTableQuery( + Entities.B.table, + ~pgSchema="test_schema", + ~isNumericArrayAsText=false, + ) let expectedBTableSql = `CREATE TABLE IF NOT EXISTS "test_schema"."B"("c_id" TEXT, "id" TEXT NOT NULL, PRIMARY KEY("id"));` Assert.equal(query, expectedBTableSql, ~message="B table SQL should match exactly") @@ -83,7 +91,11 @@ describe("Test PgStorage SQL generation functions", () => { Async.it( "Should handle default values", async () => { - let query = PgStorage.makeCreateTableQuery(Entities.A.table, ~pgSchema="test_schema") + let query = PgStorage.makeCreateTableQuery( + Entities.A.table, + ~pgSchema="test_schema", + ~isNumericArrayAsText=false, + ) let expectedDefaultTestSql = `CREATE TABLE IF NOT EXISTS "test_schema"."A"("b_id" TEXT NOT NULL, "id" TEXT NOT NULL, "optionalStringToTestLinkedEntities" TEXT, PRIMARY KEY("id"));` Assert.equal( @@ -102,6 +114,13 @@ describe("Test PgStorage SQL generation functions", () => { let entities = [ module(Entities.A)->Entities.entityModToInternal, module(Entities.B)->Entities.entityModToInternal, + module( + Entities.EntityWith63LenghtName______________________________________one + )->Entities.entityModToInternal, + module( + Entities.EntityWith63LenghtName______________________________________two + )->Entities.entityModToInternal, + module(Entities.EntityWithAllTypes)->Entities.entityModToInternal, ] let enums = [Enums.EntityType.config->Internal.fromGenericEnumConfig] @@ -115,18 +134,21 @@ describe("Test PgStorage SQL generation functions", () => { id: 1, startBlock: 100, endBlock: 200, - confirmedBlockThreshold: 10, + maxReorgDepth: 10, contracts: [], sources: [], }, { id: 137, startBlock: 0, - confirmedBlockThreshold: 200, + maxReorgDepth: 200, contracts: [], sources: [], }, ], + // Because of the line arrayOfBigInts and arrayOfBigDecimals should become TEXT[] instead of NUMERIC[] + // Related to https://github.com/enviodev/hyperindex/issues/788 + ~isHasuraEnabled=true, ) // Should return exactly 2 queries: main DDL + functions @@ -137,73 +159,65 @@ describe("Test PgStorage SQL generation functions", () => { ) let mainQuery = queries->Belt.Array.get(0)->Belt.Option.getExn - let functionsQuery = queries->Belt.Array.get(1)->Belt.Option.getExn let expectedMainQuery = `DROP SCHEMA IF EXISTS "test_schema" CASCADE; CREATE SCHEMA "test_schema"; GRANT ALL ON SCHEMA "test_schema" TO "postgres"; GRANT ALL ON SCHEMA "test_schema" TO public; -CREATE TYPE "test_schema".ENTITY_TYPE AS ENUM('A', 'B', 'C', 'CustomSelectionTestPass', 'D', 'EntityWithAllNonArrayTypes', 'EntityWithAllTypes', 'EntityWithBigDecimal', 'EntityWithTimestamp', 'Gravatar', 'NftCollection', 'PostgresNumericPrecisionEntityTester', 'SimpleEntity', 'Token', 'User', 'dynamic_contract_registry'); -CREATE TABLE IF NOT EXISTS "test_schema"."envio_chains"("id" INTEGER NOT NULL, "start_block" INTEGER NOT NULL, "end_block" INTEGER, "buffer_block" INTEGER NOT NULL, "source_block" INTEGER NOT NULL, "first_event_block" INTEGER, "ready_at" TIMESTAMP WITH TIME ZONE NULL, "events_processed" INTEGER NOT NULL, "_is_hyper_sync" BOOLEAN NOT NULL, "progress_block" INTEGER NOT NULL, "_num_batches_fetched" INTEGER NOT NULL, PRIMARY KEY("id")); +CREATE TYPE "test_schema".ENTITY_TYPE AS ENUM('A', 'B', 'C', 'CustomSelectionTestPass', 'D', 'EntityWith63LenghtName______________________________________one', 'EntityWith63LenghtName______________________________________two', 'EntityWithAllNonArrayTypes', 'EntityWithAllTypes', 'EntityWithBigDecimal', 'EntityWithTimestamp', 'Gravatar', 'NftCollection', 'PostgresNumericPrecisionEntityTester', 'SimpleEntity', 'Token', 'User', 'dynamic_contract_registry'); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_chains"("id" INTEGER NOT NULL, "start_block" INTEGER NOT NULL, "end_block" INTEGER, "max_reorg_depth" INTEGER NOT NULL, "buffer_block" INTEGER NOT NULL, "source_block" INTEGER NOT NULL, "first_event_block" INTEGER, "ready_at" TIMESTAMP WITH TIME ZONE NULL, "events_processed" INTEGER NOT NULL, "_is_hyper_sync" BOOLEAN NOT NULL, "progress_block" INTEGER NOT NULL, "_num_batches_fetched" INTEGER NOT NULL, PRIMARY KEY("id")); CREATE TABLE IF NOT EXISTS "test_schema"."persisted_state"("id" SERIAL NOT NULL, "envio_version" TEXT NOT NULL, "config_hash" TEXT NOT NULL, "schema_hash" TEXT NOT NULL, "handler_files_hash" TEXT NOT NULL, "abi_files_hash" TEXT NOT NULL, PRIMARY KEY("id")); -CREATE TABLE IF NOT EXISTS "test_schema"."end_of_block_range_scanned_data"("chain_id" INTEGER NOT NULL, "block_number" INTEGER NOT NULL, "block_hash" TEXT NOT NULL, PRIMARY KEY("chain_id", "block_number")); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_checkpoints"("id" INTEGER NOT NULL, "chain_id" INTEGER NOT NULL, "block_number" INTEGER NOT NULL, "block_hash" TEXT, "events_processed" INTEGER NOT NULL, PRIMARY KEY("id")); CREATE TABLE IF NOT EXISTS "test_schema"."raw_events"("chain_id" INTEGER NOT NULL, "event_id" NUMERIC NOT NULL, "event_name" TEXT NOT NULL, "contract_name" TEXT NOT NULL, "block_number" INTEGER NOT NULL, "log_index" INTEGER NOT NULL, "src_address" TEXT NOT NULL, "block_hash" TEXT NOT NULL, "block_timestamp" INTEGER NOT NULL, "block_fields" JSONB NOT NULL, "transaction_fields" JSONB NOT NULL, "params" JSONB NOT NULL, "serial" SERIAL, PRIMARY KEY("serial")); CREATE TABLE IF NOT EXISTS "test_schema"."A"("b_id" TEXT NOT NULL, "id" TEXT NOT NULL, "optionalStringToTestLinkedEntities" TEXT, PRIMARY KEY("id")); -CREATE TABLE IF NOT EXISTS "test_schema"."A_history"("entity_history_block_timestamp" INTEGER NOT NULL, "entity_history_chain_id" INTEGER NOT NULL, "entity_history_block_number" INTEGER NOT NULL, "entity_history_log_index" INTEGER NOT NULL, "previous_entity_history_block_timestamp" INTEGER, "previous_entity_history_chain_id" INTEGER, "previous_entity_history_block_number" INTEGER, "previous_entity_history_log_index" INTEGER, "b_id" TEXT, "id" TEXT NOT NULL, "optionalStringToTestLinkedEntities" TEXT, "action" "test_schema".ENTITY_HISTORY_ROW_ACTION NOT NULL, "serial" SERIAL, PRIMARY KEY("entity_history_block_timestamp", "entity_history_chain_id", "entity_history_block_number", "entity_history_log_index", "id")); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_history_A"("b_id" TEXT, "id" TEXT NOT NULL, "optionalStringToTestLinkedEntities" TEXT, "checkpoint_id" INTEGER NOT NULL, "envio_change" "test_schema".ENVIO_HISTORY_CHANGE NOT NULL, PRIMARY KEY("id", "checkpoint_id")); CREATE TABLE IF NOT EXISTS "test_schema"."B"("c_id" TEXT, "id" TEXT NOT NULL, PRIMARY KEY("id")); -CREATE TABLE IF NOT EXISTS "test_schema"."B_history"("entity_history_block_timestamp" INTEGER NOT NULL, "entity_history_chain_id" INTEGER NOT NULL, "entity_history_block_number" INTEGER NOT NULL, "entity_history_log_index" INTEGER NOT NULL, "previous_entity_history_block_timestamp" INTEGER, "previous_entity_history_chain_id" INTEGER, "previous_entity_history_block_number" INTEGER, "previous_entity_history_log_index" INTEGER, "c_id" TEXT, "id" TEXT NOT NULL, "action" "test_schema".ENTITY_HISTORY_ROW_ACTION NOT NULL, "serial" SERIAL, PRIMARY KEY("entity_history_block_timestamp", "entity_history_chain_id", "entity_history_block_number", "entity_history_log_index", "id")); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_history_B"("c_id" TEXT, "id" TEXT NOT NULL, "checkpoint_id" INTEGER NOT NULL, "envio_change" "test_schema".ENVIO_HISTORY_CHANGE NOT NULL, PRIMARY KEY("id", "checkpoint_id")); +CREATE TABLE IF NOT EXISTS "test_schema"."EntityWith63LenghtName______________________________________one"("id" TEXT NOT NULL, PRIMARY KEY("id")); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_history_EntityWith63LenghtName__________________________5"("id" TEXT NOT NULL, "checkpoint_id" INTEGER NOT NULL, "envio_change" "test_schema".ENVIO_HISTORY_CHANGE NOT NULL, PRIMARY KEY("id", "checkpoint_id")); +CREATE TABLE IF NOT EXISTS "test_schema"."EntityWith63LenghtName______________________________________two"("id" TEXT NOT NULL, PRIMARY KEY("id")); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_history_EntityWith63LenghtName__________________________6"("id" TEXT NOT NULL, "checkpoint_id" INTEGER NOT NULL, "envio_change" "test_schema".ENVIO_HISTORY_CHANGE NOT NULL, PRIMARY KEY("id", "checkpoint_id")); +CREATE TABLE IF NOT EXISTS "test_schema"."EntityWithAllTypes"("arrayOfBigDecimals" TEXT[] NOT NULL, "arrayOfBigInts" TEXT[] NOT NULL, "arrayOfFloats" DOUBLE PRECISION[] NOT NULL, "arrayOfInts" INTEGER[] NOT NULL, "arrayOfStrings" TEXT[] NOT NULL, "bigDecimal" NUMERIC NOT NULL, "bigDecimalWithConfig" NUMERIC(10, 8) NOT NULL, "bigInt" NUMERIC NOT NULL, "bool" BOOLEAN NOT NULL, "enumField" "test_schema".AccountType NOT NULL, "float_" DOUBLE PRECISION NOT NULL, "id" TEXT NOT NULL, "int_" INTEGER NOT NULL, "json" JSONB NOT NULL, "optBigDecimal" NUMERIC, "optBigInt" NUMERIC, "optBool" BOOLEAN, "optEnumField" "test_schema".AccountType, "optFloat" DOUBLE PRECISION, "optInt" INTEGER, "optString" TEXT, "string" TEXT NOT NULL, PRIMARY KEY("id")); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_history_EntityWithAllTypes"("arrayOfBigDecimals" TEXT[], "arrayOfBigInts" TEXT[], "arrayOfFloats" DOUBLE PRECISION[], "arrayOfInts" INTEGER[], "arrayOfStrings" TEXT[], "bigDecimal" NUMERIC, "bigDecimalWithConfig" NUMERIC(10, 8), "bigInt" NUMERIC, "bool" BOOLEAN, "enumField" "test_schema".AccountType, "float_" DOUBLE PRECISION, "id" TEXT NOT NULL, "int_" INTEGER, "json" JSONB, "optBigDecimal" NUMERIC, "optBigInt" NUMERIC, "optBool" BOOLEAN, "optEnumField" "test_schema".AccountType, "optFloat" DOUBLE PRECISION, "optInt" INTEGER, "optString" TEXT, "string" TEXT, "checkpoint_id" INTEGER NOT NULL, "envio_change" "test_schema".ENVIO_HISTORY_CHANGE NOT NULL, PRIMARY KEY("id", "checkpoint_id")); CREATE INDEX IF NOT EXISTS "A_b_id" ON "test_schema"."A"("b_id"); -CREATE INDEX IF NOT EXISTS "A_history_serial" ON "test_schema"."A_history"("serial"); -CREATE INDEX IF NOT EXISTS "B_history_serial" ON "test_schema"."B_history"("serial"); CREATE INDEX IF NOT EXISTS "A_b_id" ON "test_schema"."A"("b_id"); CREATE VIEW "test_schema"."_meta" AS - SELECT - "id" AS "chainId", - "start_block" AS "startBlock", - "end_block" AS "endBlock", - "progress_block" AS "progressBlock", - "buffer_block" AS "bufferBlock", - "first_event_block" AS "firstEventBlock", - "events_processed" AS "eventsProcessed", - "source_block" AS "sourceBlock", - "ready_at" AS "readyAt", - ("ready_at" IS NOT NULL) AS "isReady" - FROM "test_schema"."envio_chains" - ORDER BY "id"; +SELECT + "id" AS "chainId", + "start_block" AS "startBlock", + "end_block" AS "endBlock", + "progress_block" AS "progressBlock", + "buffer_block" AS "bufferBlock", + "first_event_block" AS "firstEventBlock", + "events_processed" AS "eventsProcessed", + "source_block" AS "sourceBlock", + "ready_at" AS "readyAt", + ("ready_at" IS NOT NULL) AS "isReady" +FROM "test_schema"."envio_chains" +ORDER BY "id"; CREATE VIEW "test_schema"."chain_metadata" AS - SELECT - "source_block" AS "block_height", - "id" AS "chain_id", - "end_block" AS "end_block", - "first_event_block" AS "first_event_block_number", - "_is_hyper_sync" AS "is_hyper_sync", - "buffer_block" AS "latest_fetched_block_number", - "progress_block" AS "latest_processed_block", - "_num_batches_fetched" AS "num_batches_fetched", - "events_processed" AS "num_events_processed", - "start_block" AS "start_block", - "ready_at" AS "timestamp_caught_up_to_head_or_endblock" - FROM "test_schema"."envio_chains"; -INSERT INTO "test_schema"."envio_chains" ("id", "start_block", "end_block", "source_block", "first_event_block", "buffer_block", "progress_block", "ready_at", "events_processed", "_is_hyper_sync", "_num_batches_fetched") -VALUES (1, 100, 200, 0, NULL, -1, -1, NULL, 0, false, 0), - (137, 0, NULL, 0, NULL, -1, -1, NULL, 0, false, 0);` +SELECT + "source_block" AS "block_height", + "id" AS "chain_id", + "end_block" AS "end_block", + "first_event_block" AS "first_event_block_number", + "_is_hyper_sync" AS "is_hyper_sync", + "buffer_block" AS "latest_fetched_block_number", + "progress_block" AS "latest_processed_block", + "_num_batches_fetched" AS "num_batches_fetched", + "events_processed" AS "num_events_processed", + "start_block" AS "start_block", + "ready_at" AS "timestamp_caught_up_to_head_or_endblock" +FROM "test_schema"."envio_chains"; +INSERT INTO "test_schema"."envio_chains" ("id", "start_block", "end_block", "max_reorg_depth", "source_block", "first_event_block", "buffer_block", "progress_block", "ready_at", "events_processed", "_is_hyper_sync", "_num_batches_fetched") +VALUES (1, 100, 200, 10, 0, NULL, -1, -1, NULL, 0, false, 0), + (137, 0, NULL, 200, 0, NULL, -1, -1, NULL, 0, false, 0);` Assert.equal( mainQuery, expectedMainQuery, ~message="Main query should match expected SQL exactly", ) - - // Functions query should contain both A and B history functions - Assert.ok( - functionsQuery->Js.String2.includes(`CREATE OR REPLACE FUNCTION "insert_A_history"`), - ~message="Should contain A history function", - ) - - Assert.ok( - functionsQuery->Js.String2.includes(`CREATE OR REPLACE FUNCTION "insert_B_history"`), - ~message="Should contain B history function", - ) }, ) @@ -214,6 +228,7 @@ VALUES (1, 100, 200, 0, NULL, -1, -1, NULL, 0, false, 0), ~pgSchema="test_schema", ~pgUser="postgres", ~enums=[], + ~isHasuraEnabled=false, ) // Should return exactly 2 query (main DDL, and a function for cache) @@ -229,38 +244,38 @@ VALUES (1, 100, 200, 0, NULL, -1, -1, NULL, 0, false, 0), CREATE SCHEMA "test_schema"; GRANT ALL ON SCHEMA "test_schema" TO "postgres"; GRANT ALL ON SCHEMA "test_schema" TO public; -CREATE TABLE IF NOT EXISTS "test_schema"."envio_chains"("id" INTEGER NOT NULL, "start_block" INTEGER NOT NULL, "end_block" INTEGER, "buffer_block" INTEGER NOT NULL, "source_block" INTEGER NOT NULL, "first_event_block" INTEGER, "ready_at" TIMESTAMP WITH TIME ZONE NULL, "events_processed" INTEGER NOT NULL, "_is_hyper_sync" BOOLEAN NOT NULL, "progress_block" INTEGER NOT NULL, "_num_batches_fetched" INTEGER NOT NULL, PRIMARY KEY("id")); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_chains"("id" INTEGER NOT NULL, "start_block" INTEGER NOT NULL, "end_block" INTEGER, "max_reorg_depth" INTEGER NOT NULL, "buffer_block" INTEGER NOT NULL, "source_block" INTEGER NOT NULL, "first_event_block" INTEGER, "ready_at" TIMESTAMP WITH TIME ZONE NULL, "events_processed" INTEGER NOT NULL, "_is_hyper_sync" BOOLEAN NOT NULL, "progress_block" INTEGER NOT NULL, "_num_batches_fetched" INTEGER NOT NULL, PRIMARY KEY("id")); CREATE TABLE IF NOT EXISTS "test_schema"."persisted_state"("id" SERIAL NOT NULL, "envio_version" TEXT NOT NULL, "config_hash" TEXT NOT NULL, "schema_hash" TEXT NOT NULL, "handler_files_hash" TEXT NOT NULL, "abi_files_hash" TEXT NOT NULL, PRIMARY KEY("id")); -CREATE TABLE IF NOT EXISTS "test_schema"."end_of_block_range_scanned_data"("chain_id" INTEGER NOT NULL, "block_number" INTEGER NOT NULL, "block_hash" TEXT NOT NULL, PRIMARY KEY("chain_id", "block_number")); +CREATE TABLE IF NOT EXISTS "test_schema"."envio_checkpoints"("id" INTEGER NOT NULL, "chain_id" INTEGER NOT NULL, "block_number" INTEGER NOT NULL, "block_hash" TEXT, "events_processed" INTEGER NOT NULL, PRIMARY KEY("id")); CREATE TABLE IF NOT EXISTS "test_schema"."raw_events"("chain_id" INTEGER NOT NULL, "event_id" NUMERIC NOT NULL, "event_name" TEXT NOT NULL, "contract_name" TEXT NOT NULL, "block_number" INTEGER NOT NULL, "log_index" INTEGER NOT NULL, "src_address" TEXT NOT NULL, "block_hash" TEXT NOT NULL, "block_timestamp" INTEGER NOT NULL, "block_fields" JSONB NOT NULL, "transaction_fields" JSONB NOT NULL, "params" JSONB NOT NULL, "serial" SERIAL, PRIMARY KEY("serial")); CREATE VIEW "test_schema"."_meta" AS - SELECT - "id" AS "chainId", - "start_block" AS "startBlock", - "end_block" AS "endBlock", - "progress_block" AS "progressBlock", - "buffer_block" AS "bufferBlock", - "first_event_block" AS "firstEventBlock", - "events_processed" AS "eventsProcessed", - "source_block" AS "sourceBlock", - "ready_at" AS "readyAt", - ("ready_at" IS NOT NULL) AS "isReady" - FROM "test_schema"."envio_chains" - ORDER BY "id"; +SELECT + "id" AS "chainId", + "start_block" AS "startBlock", + "end_block" AS "endBlock", + "progress_block" AS "progressBlock", + "buffer_block" AS "bufferBlock", + "first_event_block" AS "firstEventBlock", + "events_processed" AS "eventsProcessed", + "source_block" AS "sourceBlock", + "ready_at" AS "readyAt", + ("ready_at" IS NOT NULL) AS "isReady" +FROM "test_schema"."envio_chains" +ORDER BY "id"; CREATE VIEW "test_schema"."chain_metadata" AS - SELECT - "source_block" AS "block_height", - "id" AS "chain_id", - "end_block" AS "end_block", - "first_event_block" AS "first_event_block_number", - "_is_hyper_sync" AS "is_hyper_sync", - "buffer_block" AS "latest_fetched_block_number", - "progress_block" AS "latest_processed_block", - "_num_batches_fetched" AS "num_batches_fetched", - "events_processed" AS "num_events_processed", - "start_block" AS "start_block", - "ready_at" AS "timestamp_caught_up_to_head_or_endblock" - FROM "test_schema"."envio_chains";` +SELECT + "source_block" AS "block_height", + "id" AS "chain_id", + "end_block" AS "end_block", + "first_event_block" AS "first_event_block_number", + "_is_hyper_sync" AS "is_hyper_sync", + "buffer_block" AS "latest_fetched_block_number", + "progress_block" AS "latest_processed_block", + "_num_batches_fetched" AS "num_batches_fetched", + "events_processed" AS "num_events_processed", + "start_block" AS "start_block", + "ready_at" AS "timestamp_caught_up_to_head_or_endblock" +FROM "test_schema"."envio_chains";` Assert.equal( mainQuery, @@ -270,8 +285,7 @@ CREATE VIEW "test_schema"."chain_metadata" AS Assert.equal( queries->Belt.Array.get(1)->Belt.Option.getExn, - ` -CREATE OR REPLACE FUNCTION get_cache_row_count(table_name text) + `CREATE OR REPLACE FUNCTION get_cache_row_count(table_name text) RETURNS integer AS $$ DECLARE result integer; @@ -296,6 +310,7 @@ $$ LANGUAGE plpgsql;`, ~pgUser="postgres", ~entities, ~enums=[], + ~isHasuraEnabled=false, ) Assert.equal( @@ -311,42 +326,41 @@ $$ LANGUAGE plpgsql;`, CREATE SCHEMA "public"; GRANT ALL ON SCHEMA "public" TO "postgres"; GRANT ALL ON SCHEMA "public" TO public; -CREATE TABLE IF NOT EXISTS "public"."envio_chains"("id" INTEGER NOT NULL, "start_block" INTEGER NOT NULL, "end_block" INTEGER, "buffer_block" INTEGER NOT NULL, "source_block" INTEGER NOT NULL, "first_event_block" INTEGER, "ready_at" TIMESTAMP WITH TIME ZONE NULL, "events_processed" INTEGER NOT NULL, "_is_hyper_sync" BOOLEAN NOT NULL, "progress_block" INTEGER NOT NULL, "_num_batches_fetched" INTEGER NOT NULL, PRIMARY KEY("id")); +CREATE TABLE IF NOT EXISTS "public"."envio_chains"("id" INTEGER NOT NULL, "start_block" INTEGER NOT NULL, "end_block" INTEGER, "max_reorg_depth" INTEGER NOT NULL, "buffer_block" INTEGER NOT NULL, "source_block" INTEGER NOT NULL, "first_event_block" INTEGER, "ready_at" TIMESTAMP WITH TIME ZONE NULL, "events_processed" INTEGER NOT NULL, "_is_hyper_sync" BOOLEAN NOT NULL, "progress_block" INTEGER NOT NULL, "_num_batches_fetched" INTEGER NOT NULL, PRIMARY KEY("id")); CREATE TABLE IF NOT EXISTS "public"."persisted_state"("id" SERIAL NOT NULL, "envio_version" TEXT NOT NULL, "config_hash" TEXT NOT NULL, "schema_hash" TEXT NOT NULL, "handler_files_hash" TEXT NOT NULL, "abi_files_hash" TEXT NOT NULL, PRIMARY KEY("id")); -CREATE TABLE IF NOT EXISTS "public"."end_of_block_range_scanned_data"("chain_id" INTEGER NOT NULL, "block_number" INTEGER NOT NULL, "block_hash" TEXT NOT NULL, PRIMARY KEY("chain_id", "block_number")); +CREATE TABLE IF NOT EXISTS "public"."envio_checkpoints"("id" INTEGER NOT NULL, "chain_id" INTEGER NOT NULL, "block_number" INTEGER NOT NULL, "block_hash" TEXT, "events_processed" INTEGER NOT NULL, PRIMARY KEY("id")); CREATE TABLE IF NOT EXISTS "public"."raw_events"("chain_id" INTEGER NOT NULL, "event_id" NUMERIC NOT NULL, "event_name" TEXT NOT NULL, "contract_name" TEXT NOT NULL, "block_number" INTEGER NOT NULL, "log_index" INTEGER NOT NULL, "src_address" TEXT NOT NULL, "block_hash" TEXT NOT NULL, "block_timestamp" INTEGER NOT NULL, "block_fields" JSONB NOT NULL, "transaction_fields" JSONB NOT NULL, "params" JSONB NOT NULL, "serial" SERIAL, PRIMARY KEY("serial")); CREATE TABLE IF NOT EXISTS "public"."A"("b_id" TEXT NOT NULL, "id" TEXT NOT NULL, "optionalStringToTestLinkedEntities" TEXT, PRIMARY KEY("id")); -CREATE TABLE IF NOT EXISTS "public"."A_history"("entity_history_block_timestamp" INTEGER NOT NULL, "entity_history_chain_id" INTEGER NOT NULL, "entity_history_block_number" INTEGER NOT NULL, "entity_history_log_index" INTEGER NOT NULL, "previous_entity_history_block_timestamp" INTEGER, "previous_entity_history_chain_id" INTEGER, "previous_entity_history_block_number" INTEGER, "previous_entity_history_log_index" INTEGER, "b_id" TEXT, "id" TEXT NOT NULL, "optionalStringToTestLinkedEntities" TEXT, "action" "public".ENTITY_HISTORY_ROW_ACTION NOT NULL, "serial" SERIAL, PRIMARY KEY("entity_history_block_timestamp", "entity_history_chain_id", "entity_history_block_number", "entity_history_log_index", "id")); +CREATE TABLE IF NOT EXISTS "public"."envio_history_A"("b_id" TEXT, "id" TEXT NOT NULL, "optionalStringToTestLinkedEntities" TEXT, "checkpoint_id" INTEGER NOT NULL, "envio_change" "public".ENVIO_HISTORY_CHANGE NOT NULL, PRIMARY KEY("id", "checkpoint_id")); CREATE INDEX IF NOT EXISTS "A_b_id" ON "public"."A"("b_id"); -CREATE INDEX IF NOT EXISTS "A_history_serial" ON "public"."A_history"("serial"); CREATE VIEW "public"."_meta" AS - SELECT - "id" AS "chainId", - "start_block" AS "startBlock", - "end_block" AS "endBlock", - "progress_block" AS "progressBlock", - "buffer_block" AS "bufferBlock", - "first_event_block" AS "firstEventBlock", - "events_processed" AS "eventsProcessed", - "source_block" AS "sourceBlock", - "ready_at" AS "readyAt", - ("ready_at" IS NOT NULL) AS "isReady" - FROM "public"."envio_chains" - ORDER BY "id"; +SELECT + "id" AS "chainId", + "start_block" AS "startBlock", + "end_block" AS "endBlock", + "progress_block" AS "progressBlock", + "buffer_block" AS "bufferBlock", + "first_event_block" AS "firstEventBlock", + "events_processed" AS "eventsProcessed", + "source_block" AS "sourceBlock", + "ready_at" AS "readyAt", + ("ready_at" IS NOT NULL) AS "isReady" +FROM "public"."envio_chains" +ORDER BY "id"; CREATE VIEW "public"."chain_metadata" AS - SELECT - "source_block" AS "block_height", - "id" AS "chain_id", - "end_block" AS "end_block", - "first_event_block" AS "first_event_block_number", - "_is_hyper_sync" AS "is_hyper_sync", - "buffer_block" AS "latest_fetched_block_number", - "progress_block" AS "latest_processed_block", - "_num_batches_fetched" AS "num_batches_fetched", - "events_processed" AS "num_events_processed", - "start_block" AS "start_block", - "ready_at" AS "timestamp_caught_up_to_head_or_endblock" - FROM "public"."envio_chains";` +SELECT + "source_block" AS "block_height", + "id" AS "chain_id", + "end_block" AS "end_block", + "first_event_block" AS "first_event_block_number", + "_is_hyper_sync" AS "is_hyper_sync", + "buffer_block" AS "latest_fetched_block_number", + "progress_block" AS "latest_processed_block", + "_num_batches_fetched" AS "num_batches_fetched", + "events_processed" AS "num_events_processed", + "start_block" AS "start_block", + "ready_at" AS "timestamp_caught_up_to_head_or_endblock" +FROM "public"."envio_chains";` Assert.equal( mainQuery, @@ -355,9 +369,18 @@ CREATE VIEW "public"."chain_metadata" AS ) // Verify functions query contains the A history function - Assert.ok( - functionsQuery->Js.String2.includes(`CREATE OR REPLACE FUNCTION "insert_A_history"`), - ~message="Should contain A history function definition", + Assert.equal( + functionsQuery, + `CREATE OR REPLACE FUNCTION get_cache_row_count(table_name text) +RETURNS integer AS $$ +DECLARE + result integer; +BEGIN + EXECUTE format('SELECT COUNT(*) FROM "public".%I', table_name) INTO result; + RETURN result; +END; +$$ LANGUAGE plpgsql;`, + ~message="Should contain cache row count function definition", ) }, ) @@ -544,6 +567,41 @@ WHERE "id" = $1;` ) }) + describe("InternalTable.Checkpoints.makeGetReorgCheckpointsQuery", () => { + Async.it( + "Should generate optimized SQL query with CTE", + async () => { + let query = InternalTable.Checkpoints.makeGetReorgCheckpointsQuery(~pgSchema="test_schema") + + // The query should use a CTE to pre-filter chains and compute safe_block + let expectedQuery = `WITH reorg_chains AS ( + SELECT + "id" as id, + "source_block" - "max_reorg_depth" AS safe_block + FROM "test_schema"."envio_chains" + WHERE "max_reorg_depth" > 0 + AND "progress_block" > "source_block" - "max_reorg_depth" +) +SELECT + cp."id", + cp."chain_id", + cp."block_number", + cp."block_hash" +FROM "test_schema"."envio_checkpoints" cp +INNER JOIN reorg_chains rc + ON cp."chain_id" = rc.id +WHERE cp."block_hash" IS NOT NULL + AND cp."block_number" >= rc.safe_block;` + + Assert.equal( + query, + expectedQuery, + ~message="Should generate optimized CTE query filtering chains outside reorg threshold", + ) + }, + ) + }) + describe("InternalTable.Chains.makeInitialValuesQuery", () => { Async.it( "Should return empty string for empty chain configs", @@ -568,7 +626,7 @@ WHERE "id" = $1;` id: 1, startBlock: 100, endBlock: 200, - confirmedBlockThreshold: 5, + maxReorgDepth: 5, contracts: [], sources: [], } @@ -578,8 +636,8 @@ WHERE "id" = $1;` ~chainConfigs=[chainConfig], ) - let expectedQuery = `INSERT INTO "test_schema"."envio_chains" ("id", "start_block", "end_block", "source_block", "first_event_block", "buffer_block", "progress_block", "ready_at", "events_processed", "_is_hyper_sync", "_num_batches_fetched") -VALUES (1, 100, 200, 0, NULL, -1, -1, NULL, 0, false, 0);` + let expectedQuery = `INSERT INTO "test_schema"."envio_chains" ("id", "start_block", "end_block", "max_reorg_depth", "source_block", "first_event_block", "buffer_block", "progress_block", "ready_at", "events_processed", "_is_hyper_sync", "_num_batches_fetched") +VALUES (1, 100, 200, 5, 0, NULL, -1, -1, NULL, 0, false, 0);` Assert.equal( query, @@ -595,7 +653,7 @@ VALUES (1, 100, 200, 0, NULL, -1, -1, NULL, 0, false, 0);` let chainConfig: InternalConfig.chain = { id: 1, startBlock: 100, - confirmedBlockThreshold: 5, + maxReorgDepth: 5, contracts: [], sources: [], } @@ -605,8 +663,8 @@ VALUES (1, 100, 200, 0, NULL, -1, -1, NULL, 0, false, 0);` ~chainConfigs=[chainConfig], ) - let expectedQuery = `INSERT INTO "public"."envio_chains" ("id", "start_block", "end_block", "source_block", "first_event_block", "buffer_block", "progress_block", "ready_at", "events_processed", "_is_hyper_sync", "_num_batches_fetched") -VALUES (1, 100, NULL, 0, NULL, -1, -1, NULL, 0, false, 0);` + let expectedQuery = `INSERT INTO "public"."envio_chains" ("id", "start_block", "end_block", "max_reorg_depth", "source_block", "first_event_block", "buffer_block", "progress_block", "ready_at", "events_processed", "_is_hyper_sync", "_num_batches_fetched") +VALUES (1, 100, NULL, 5, 0, NULL, -1, -1, NULL, 0, false, 0);` Assert.equal( query, @@ -623,7 +681,7 @@ VALUES (1, 100, NULL, 0, NULL, -1, -1, NULL, 0, false, 0);` id: 1, startBlock: 100, endBlock: 200, - confirmedBlockThreshold: 5, + maxReorgDepth: 5, contracts: [], sources: [], } @@ -631,7 +689,7 @@ VALUES (1, 100, NULL, 0, NULL, -1, -1, NULL, 0, false, 0);` let chainConfig2: InternalConfig.chain = { id: 42, startBlock: 500, - confirmedBlockThreshold: 0, + maxReorgDepth: 0, contracts: [], sources: [], } @@ -641,9 +699,9 @@ VALUES (1, 100, NULL, 0, NULL, -1, -1, NULL, 0, false, 0);` ~chainConfigs=[chainConfig1, chainConfig2], ) - let expectedQuery = `INSERT INTO "production"."envio_chains" ("id", "start_block", "end_block", "source_block", "first_event_block", "buffer_block", "progress_block", "ready_at", "events_processed", "_is_hyper_sync", "_num_batches_fetched") -VALUES (1, 100, 200, 0, NULL, -1, -1, NULL, 0, false, 0), - (42, 500, NULL, 0, NULL, -1, -1, NULL, 0, false, 0);` + let expectedQuery = `INSERT INTO "production"."envio_chains" ("id", "start_block", "end_block", "max_reorg_depth", "source_block", "first_event_block", "buffer_block", "progress_block", "ready_at", "events_processed", "_is_hyper_sync", "_num_batches_fetched") +VALUES (1, 100, 200, 5, 0, NULL, -1, -1, NULL, 0, false, 0), + (42, 500, NULL, 0, 0, NULL, -1, -1, NULL, 0, false, 0);` Assert.equal( query, @@ -653,4 +711,130 @@ VALUES (1, 100, 200, 0, NULL, -1, -1, NULL, 0, false, 0), }, ) }) + + describe("InternalTable.Chains.makeGetInitialStateQuery", () => { + Async.it( + "Should create correct SQL for initial state query", + async () => { + let query = InternalTable.Chains.makeGetInitialStateQuery(~pgSchema="test_schema") + + let expectedQuery = `SELECT "id" as "id", +"start_block" as "startBlock", +"end_block" as "endBlock", +"max_reorg_depth" as "maxReorgDepth", +"first_event_block" as "firstEventBlockNumber", +"ready_at" as "timestampCaughtUpToHeadOrEndblock", +"events_processed" as "numEventsProcessed", +"progress_block" as "progressBlockNumber", +( + SELECT COALESCE(json_agg(json_build_object( + 'address', "contract_address", + 'contractName', "contract_name", + 'startBlock', "registering_event_block_number", + 'registrationBlock', "registering_event_block_number" + )), '[]'::json) + FROM "test_schema"."dynamic_contract_registry" + WHERE "chain_id" = chains."id" +) as "dynamicContracts" +FROM "test_schema"."envio_chains" as chains;` + + Assert.equal(query, expectedQuery, ~message="Initial state SQL should match exactly") + }, + ) + }) + + describe("InternalTable.Checkpoints.makeCommitedCheckpointIdQuery", () => { + Async.it( + "Should create correct SQL to get committed checkpoint id", + async () => { + let query = InternalTable.Checkpoints.makeCommitedCheckpointIdQuery(~pgSchema="test_schema") + + Assert.equal( + query, + `SELECT COALESCE(MAX(id), 0) AS id FROM "test_schema"."envio_checkpoints";`, + ~message="Committed checkpoint id SQL should match exactly", + ) + }, + ) + }) + + describe("InternalTable.Checkpoints.makeInsertCheckpointQuery", () => { + Async.it( + "Should create correct SQL for inserting checkpoints with unnest", + async () => { + let query = InternalTable.Checkpoints.makeInsertCheckpointQuery(~pgSchema="test_schema") + + let expectedQuery = `INSERT INTO "test_schema"."envio_checkpoints" ("id", "chain_id", "block_number", "block_hash", "events_processed") +SELECT * FROM unnest($1::INTEGER[],$2::INTEGER[],$3::INTEGER[],$4::TEXT[],$5::INTEGER[]);` + + Assert.equal(query, expectedQuery, ~message="Insert checkpoints SQL should match exactly") + }, + ) + }) + + describe("InternalTable.Checkpoints.makePruneStaleCheckpointsQuery", () => { + Async.it( + "Should create correct SQL for pruning stale checkpoints", + async () => { + let query = InternalTable.Checkpoints.makePruneStaleCheckpointsQuery( + ~pgSchema="test_schema", + ) + + Assert.equal( + query, + `DELETE FROM "test_schema"."envio_checkpoints" WHERE "id" < $1;`, + ~message="Prune stale checkpoints SQL should match exactly", + ) + }, + ) + }) + + describe("InternalTable.Checkpoints.makeGetRollbackTargetCheckpointQuery", () => { + Async.it( + "Should create correct SQL for rollback target checkpoint", + async () => { + let query = InternalTable.Checkpoints.makeGetRollbackTargetCheckpointQuery( + ~pgSchema="test_schema", + ) + + let expectedQuery = `SELECT "id" FROM "test_schema"."envio_checkpoints" +WHERE + "chain_id" = $1 AND + "block_number" <= $2 +ORDER BY "id" DESC +LIMIT 1;` + + Assert.equal( + query, + expectedQuery, + ~message="Rollback target checkpoint SQL should match exactly", + ) + }, + ) + }) + + describe("InternalTable.Checkpoints.makeGetRollbackProgressDiffQuery", () => { + Async.it( + "Should create correct SQL for rollback progress diff", + async () => { + let query = InternalTable.Checkpoints.makeGetRollbackProgressDiffQuery( + ~pgSchema="test_schema", + ) + + let expectedQuery = `SELECT + "chain_id", + SUM("events_processed") as events_processed_diff, + MIN("block_number") - 1 as new_progress_block_number +FROM "test_schema"."envio_checkpoints" +WHERE "id" > $1 +GROUP BY "chain_id";` + + Assert.equal( + query, + expectedQuery, + ~message="Rollback progress diff SQL should match exactly", + ) + }, + ) + }) }) diff --git a/scenarios/test_codegen/test/lib_tests/SourceManager_test.res b/scenarios/test_codegen/test/lib_tests/SourceManager_test.res index aa781affa..f0be6ac5a 100644 --- a/scenarios/test_codegen/test/lib_tests/SourceManager_test.res +++ b/scenarios/test_codegen/test/lib_tests/SourceManager_test.res @@ -168,10 +168,10 @@ describe("SourceManager fetchNext", () => { indexingContracts->Js.Dict.set( address->Address.toString, { - FetchState.contractName, + Internal.contractName, startBlock: 0, address, - register: Config, + registrationBlock: None, }, ) }, @@ -194,7 +194,6 @@ describe("SourceManager fetchNext", () => { chainId: 0, indexingContracts, contractConfigs: Js.Dict.empty(), - dcsToStore: [], blockLag: 0, onBlockConfigs: [], // All the null values should be computed during updateInternal diff --git a/scenarios/test_codegen/test/rollback/MockChainData.res b/scenarios/test_codegen/test/rollback/MockChainData.res index 0c7fddf83..8c7e16288 100644 --- a/scenarios/test_codegen/test/rollback/MockChainData.res +++ b/scenarios/test_codegen/test/rollback/MockChainData.res @@ -1,9 +1 @@ -module Indexer = { - module ErrorHandling = ErrorHandling - module Types = Types - module Config = Config - module Source = Source - module FetchState = FetchState -} - -include Helpers.ChainMocking.Make(Indexer) +include Helpers.ChainMocking.Make() diff --git a/scenarios/test_codegen/test/rollback/Rollback_test.res b/scenarios/test_codegen/test/rollback/Rollback_test.res index 08d049694..4889e029e 100644 --- a/scenarios/test_codegen/test/rollback/Rollback_test.res +++ b/scenarios/test_codegen/test/rollback/Rollback_test.res @@ -112,10 +112,10 @@ module Stubs = { let dispatchAllTasks = async (gsManager, mockChainData) => { let tasksToRun = tasks.contents tasks := [] - let _ = - await tasksToRun - ->Array.map(task => dispatchTask(gsManager, mockChainData, task)) - ->Js.Promise.all + for idx in 0 to tasksToRun->Array.length - 1 { + let taskToRun = tasksToRun->Array.getUnsafe(idx) + await dispatchTask(gsManager, mockChainData, taskToRun) + } } } @@ -158,31 +158,23 @@ describe("Single Chain Simple Rollback", () => { await dispatchTaskInitalChain(NextQuery(Chain(chain))) - Assert.deepEqual( - tasks.contents, - [NextQuery(Chain(chain))], - ~message="should only be one task of next query now that currentBlockHeight is set", - ) + Assert.deepEqual(tasks.contents, [NextQuery(CheckAllChains)]) await dispatchAllTasksInitalChain() - let block2 = Mock.mockChainData->MockChainData.getBlock(~blockNumber=2)->Option.getUnsafe - Assert.deepEqual( - tasks.contents->Utils.getVariantsTags, - ["UpdateEndOfBlockRangeScannedData", "ProcessPartitionQueryResponse"], - ) - Assert.deepEqual( - tasks.contents->Js.Array2.unsafe_get(0), - UpdateEndOfBlockRangeScannedData({ - blockNumberThreshold: -198, - chain: MockConfig.chain1337, - nextEndOfBlockRangeScannedData: { - blockHash: block2.blockHash, - blockNumber: block2.blockNumber, - chainId: 1337, - }, - }), - ) + Assert.deepEqual(tasks.contents->Utils.getVariantsTags, ["ProcessPartitionQueryResponse"]) + // Assert.deepEqual( + // tasks.contents->Js.Array2.unsafe_get(0), + // UpdateEndOfBlockRangeScannedData({ + // blockNumberThreshold: -198, + // chain: MockConfig.chain1337, + // nextEndOfBlockRangeScannedData: { + // blockHash: block2.blockHash, + // blockNumber: block2.blockNumber, + // chainId: 1337, + // }, + // }), + // ) await dispatchAllTasksInitalChain() @@ -224,31 +216,23 @@ describe("Single Chain Simple Rollback", () => { await dispatchTaskInitalChain(NextQuery(Chain(chain))) - Assert.deepEqual( - tasks.contents, - [NextQuery(Chain(chain))], - ~message="should only be one task of next query now that currentBlockHeight is set", - ) + Assert.deepEqual(tasks.contents, [NextQuery(CheckAllChains)]) await dispatchAllTasksInitalChain() - let block2 = Mock.mockChainData->MockChainData.getBlock(~blockNumber=2)->Option.getUnsafe - Assert.deepEqual( - tasks.contents->Utils.getVariantsTags, - ["UpdateEndOfBlockRangeScannedData", "ProcessPartitionQueryResponse"], - ) - Assert.deepEqual( - tasks.contents->Js.Array2.unsafe_get(0), - UpdateEndOfBlockRangeScannedData({ - blockNumberThreshold: -198, - chain: MockConfig.chain1337, - nextEndOfBlockRangeScannedData: { - blockHash: block2.blockHash, - blockNumber: block2.blockNumber, - chainId: 1337, - }, - }), - ) + Assert.deepEqual(tasks.contents->Utils.getVariantsTags, ["ProcessPartitionQueryResponse"]) + // Assert.deepEqual( + // tasks.contents->Js.Array2.unsafe_get(0), + // UpdateEndOfBlockRangeScannedData({ + // blockNumberThreshold: -198, + // chain: MockConfig.chain1337, + // nextEndOfBlockRangeScannedData: { + // blockHash: block2.blockHash, + // blockNumber: block2.blockNumber, + // chainId: 1337, + // }, + // }), + // ) await dispatchAllTasksInitalChain() @@ -305,10 +289,10 @@ describe("Single Chain Simple Rollback", () => { tasks.contents, [ GlobalState.NextQuery(CheckAllChains), - Rollback, UpdateChainMetaDataAndCheckForExit(NoExit), ProcessEventBatch, PruneStaleEntityHistory, + Rollback, ], ~message="should detect rollback with reorg chain", ) @@ -317,36 +301,36 @@ describe("Single Chain Simple Rollback", () => { Assert.deepEqual( tasks.contents, - [GlobalState.NextQuery(CheckAllChains), ProcessEventBatch], - ~message="Rollback should have actioned, and now next queries and process event batch should action", + [Rollback], + ~message="Should finishe processing current batch and fire rollback again", ) await dispatchAllTasksReorgChain() - let block2 = - Mock.mockChainDataReorg - ->MockChainData.getBlock(~blockNumber=2) - ->Option.getUnsafe - Assert.deepEqual( - tasks.contents->Utils.getVariantsTags, - ["UpdateEndOfBlockRangeScannedData", "ProcessPartitionQueryResponse"], - ) - Assert.deepEqual( - tasks.contents->Js.Array2.unsafe_get(0), - GlobalState.UpdateEndOfBlockRangeScannedData({ - blockNumberThreshold: -198, - chain: MockConfig.chain1337, - nextEndOfBlockRangeScannedData: { - blockHash: block2.blockHash, - blockNumber: block2.blockNumber, - chainId: 1337, - }, - }), + tasks.contents, + [GlobalState.NextQuery(CheckAllChains), ProcessEventBatch], + ~message="Rollback should have actioned, and now next queries and process event batch should action", ) await dispatchAllTasksReorgChain() + Assert.deepEqual(tasks.contents->Utils.getVariantsTags, ["ProcessPartitionQueryResponse"]) + // Assert.deepEqual( + // tasks.contents->Js.Array2.unsafe_get(0), + // GlobalState.UpdateEndOfBlockRangeScannedData({ + // blockNumberThreshold: -198, + // chain: MockConfig.chain1337, + // nextEndOfBlockRangeScannedData: { + // blockHash: block2.blockHash, + // blockNumber: block2.blockNumber, + // chainId: 1337, + // }, + // }), + // ) + + await dispatchAllTasksReorgChain() + Assert.deepEqual( tasks.contents, [UpdateChainMetaDataAndCheckForExit(NoExit), ProcessEventBatch, NextQuery(Chain(chain))], @@ -355,34 +339,28 @@ describe("Single Chain Simple Rollback", () => { await dispatchAllTasksReorgChain() - let block4 = - Mock.mockChainDataReorg - ->MockChainData.getBlock(~blockNumber=4) - ->Option.getUnsafe - Assert.deepEqual( tasks.contents->Utils.getVariantsTags, [ "NextQuery", - "UpdateEndOfBlockRangeScannedData", - "ProcessPartitionQueryResponse", "UpdateChainMetaDataAndCheckForExit", "ProcessEventBatch", "PruneStaleEntityHistory", + "ProcessPartitionQueryResponse", ], ) - Assert.deepEqual( - tasks.contents->Js.Array2.unsafe_get(1), - GlobalState.UpdateEndOfBlockRangeScannedData({ - blockNumberThreshold: -196, - chain: MockConfig.chain1337, - nextEndOfBlockRangeScannedData: { - blockHash: block4.blockHash, - blockNumber: block4.blockNumber, - chainId: 1337, - }, - }), - ) + // Assert.deepEqual( + // tasks.contents->Js.Array2.unsafe_get(1), + // GlobalState.UpdateEndOfBlockRangeScannedData({ + // blockNumberThreshold: -196, + // chain: MockConfig.chain1337, + // nextEndOfBlockRangeScannedData: { + // blockHash: block4.blockHash, + // blockNumber: block4.blockNumber, + // chainId: 1337, + // }, + // }), + // ) let expectedGravatars: array = [ { @@ -417,7 +395,11 @@ describe("Single Chain Simple Rollback", () => { let undefined = (%raw(`undefined`): option<'a>) describe("E2E rollback tests", () => { - let testSingleChainRollback = async (~sourceMock: M.Source.t, ~indexerMock: M.Indexer.t) => { + let testSingleChainRollback = async ( + ~sourceMock: M.Source.t, + ~indexerMock: M.Indexer.t, + ~firstHistoryCheckpointId=2, + ) => { Assert.deepEqual( sourceMock.getItemsOrThrowCalls->Utils.Array.last, Some({ @@ -455,11 +437,18 @@ describe("E2E rollback tests", () => { blockNumber: 101, logIndex: 1, handler: async ({context}) => { - // This should create a new history row + // This should overwrite the previous value + // set on log index 0. No history rows should be created + // since they are per batch now. context.simpleEntity.set({ id: "2", value: "value-2", }) + + context.simpleEntity.set({ + id: "4", + value: "value-1", + }) }, }, { @@ -471,6 +460,9 @@ describe("E2E rollback tests", () => { id: "3", value: "value-1", }) + + // Test rollback of creating + deleting an entity + context.simpleEntity.deleteUnsafe("4") }, }, { @@ -490,12 +482,30 @@ describe("E2E rollback tests", () => { ) await indexerMock.getBatchWritePromise() + Assert.deepEqual( - await Promise.all2(( + await Promise.all3(( + indexerMock.queryCheckpoints(), indexerMock.query(module(Entities.SimpleEntity)), indexerMock.queryHistory(module(Entities.SimpleEntity)), )), ( + [ + { + id: firstHistoryCheckpointId, + blockHash: Js.Null.empty, + blockNumber: 101, + chainId: 1337, + eventsProcessed: 2, + }, + { + id: firstHistoryCheckpointId + 1, + blockHash: Js.Null.Value("0x102"), + blockNumber: 102, + chainId: 1337, + eventsProcessed: 1, + }, + ], [ { Entities.SimpleEntity.id: "1", @@ -512,62 +522,42 @@ describe("E2E rollback tests", () => { ], [ { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 0, - }, - previous: undefined, - entityData: Set({ + checkpointId: firstHistoryCheckpointId, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "value-2", }), }, { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 0, - }, - previous: undefined, - entityData: Set({ + checkpointId: firstHistoryCheckpointId, + entityId: "2", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "2", - value: "value-1", + value: "value-2", }), }, { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 0, - }), - entityData: Set({ - Entities.SimpleEntity.id: "2", - value: "value-2", + checkpointId: firstHistoryCheckpointId + 1, + entityId: "3", + entityUpdateAction: Set({ + Entities.SimpleEntity.id: "3", + value: "value-1", }), }, { - current: { - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 0, - }, - previous: undefined, - entityData: Set({ - Entities.SimpleEntity.id: "3", + checkpointId: firstHistoryCheckpointId, + entityId: "4", + entityUpdateAction: Set({ + Entities.SimpleEntity.id: "4", value: "value-1", }), }, + { + checkpointId: firstHistoryCheckpointId + 1, + entityId: "4", + entityUpdateAction: Delete, + }, ], ), ~message="Should have two entities in the db", @@ -650,11 +640,21 @@ describe("E2E rollback tests", () => { await indexerMock.getBatchWritePromise() Assert.deepEqual( - await Promise.all2(( + await Promise.all3(( + indexerMock.queryCheckpoints(), indexerMock.query(module(Entities.SimpleEntity)), indexerMock.queryHistory(module(Entities.SimpleEntity)), )), ( + [ + { + id: 1, + blockHash: Js.Null.Value("0x101"), + blockNumber: 101, + chainId: 1337, + eventsProcessed: 1, + }, + ], [ { Entities.SimpleEntity.id: "1", @@ -667,27 +667,17 @@ describe("E2E rollback tests", () => { ], [ { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }, - previous: undefined, - entityData: Set({ + checkpointId: 1, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "value-1", }), }, { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }, - previous: undefined, - entityData: Set({ + checkpointId: 1, + entityId: "2", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "2", value: "value-2", }), @@ -698,6 +688,105 @@ describe("E2E rollback tests", () => { ) } + Async.it("Should re-enter reorg threshold on restart", async () => { + let sourceMock1337 = M.Source.make( + [#getHeightOrThrow, #getItemsOrThrow, #getBlockHashes], + ~chain=#1337, + ) + let sourceMock100 = M.Source.make( + [#getHeightOrThrow, #getItemsOrThrow, #getBlockHashes], + ~chain=#100, + ) + let chains = [ + { + M.Indexer.chain: #1337, + sources: [sourceMock1337.source], + }, + { + M.Indexer.chain: #100, + sources: [sourceMock100.source], + }, + ] + let indexerMock = await M.Indexer.make(~chains) + await Utils.delay(0) + + let _ = await Promise.all2(( + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock1337), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock100), + )) + + Assert.deepEqual( + sourceMock1337.getItemsOrThrowCalls->Utils.Array.last, + Some({ + "fromBlock": 101, + "toBlock": None, + "retry": 0, + }), + ~message="Should enter reorg threshold and request now to the latest block", + ) + sourceMock1337.resolveGetItemsOrThrow([], ~latestFetchedBlockNumber=110) + await indexerMock.getBatchWritePromise() + + Assert.deepEqual( + await indexerMock.metric("envio_reorg_threshold"), + [{value: "1", labels: Js.Dict.empty()}], + ) + + let indexerMock = await indexerMock.restart() + + sourceMock1337.getHeightOrThrowCalls->Utils.Array.clearInPlace + sourceMock1337.getItemsOrThrowCalls->Utils.Array.clearInPlace + + await Utils.delay(0) + + Assert.deepEqual( + await indexerMock.metric("envio_reorg_threshold"), + [{value: "0", labels: Js.Dict.empty()}], + ) + + Assert.deepEqual( + sourceMock1337.getHeightOrThrowCalls->Array.length, + 1, + ~message="should have called getHeightOrThrow on restart", + ) + sourceMock1337.resolveGetHeightOrThrow(300) + await Utils.delay(0) + await Utils.delay(0) + + Assert.deepEqual( + sourceMock1337.getItemsOrThrowCalls->Utils.Array.last, + Some({ + "fromBlock": 111, + "toBlock": None, + "retry": 0, + }), + ~message="Should enter reorg threshold for the second time and request now to the latest block", + ) + + sourceMock1337.resolveGetItemsOrThrow( + [], + ~latestFetchedBlockNumber=200, + ~currentBlockHeight=320, + ) + + await indexerMock.getBatchWritePromise() + + Assert.deepEqual( + sourceMock1337.getItemsOrThrowCalls->Utils.Array.last, + Some({ + "fromBlock": 201, + "toBlock": None, + "retry": 0, + }), + ~message="Should enter reorg threshold for the second time and request now to the latest block", + ) + + Assert.deepEqual( + await indexerMock.metric("envio_reorg_threshold"), + [{value: "1", labels: Js.Dict.empty()}], + ) + }) + Async.it("Rollback of a single chain indexer", async () => { let sourceMock = M.Source.make( [#getHeightOrThrow, #getItemsOrThrow, #getBlockHashes], @@ -713,10 +802,128 @@ describe("E2E rollback tests", () => { ) await Utils.delay(0) - await M.Helper.initialEnterReorgThreshold(~sourceMock) + await M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock) await testSingleChainRollback(~sourceMock, ~indexerMock) }) + Async.it( + "Stores checkpoints inside of the reorg threshold for batches without items", + async () => { + let sourceMock = M.Source.make( + [#getHeightOrThrow, #getItemsOrThrow, #getBlockHashes], + ~chain=#1337, + ) + let indexerMock = await M.Indexer.make( + ~chains=[ + { + chain: #1337, + sources: [sourceMock.source], + }, + ], + ) + await Utils.delay(0) + + await M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock) + + sourceMock.resolveGetItemsOrThrow([], ~latestFetchedBlockNumber=102) + + await indexerMock.getBatchWritePromise() + + Assert.deepEqual( + await indexerMock.queryCheckpoints(), + [ + { + id: 2, + eventsProcessed: 0, + chainId: 1337, + blockNumber: 102, + blockHash: Js.Null.Value("0x102"), + }, + ], + ~message="Should have added a checkpoint even though there are no items in the batch", + ) + }, + ) + + Async.it("Shouldn't detect reorg for rollbacked block", async () => { + let sourceMock = M.Source.make( + [#getHeightOrThrow, #getItemsOrThrow, #getBlockHashes], + ~chain=#1337, + ) + let indexerMock = await M.Indexer.make( + ~chains=[ + { + chain: #1337, + sources: [sourceMock.source], + }, + ], + ) + await Utils.delay(0) + + await M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock) + + sourceMock.resolveGetItemsOrThrow([], ~latestFetchedBlockNumber=102) + await indexerMock.getBatchWritePromise() + + sourceMock.resolveGetItemsOrThrow( + [], + ~latestFetchedBlockNumber=103, + ~prevRangeLastBlock={ + blockNumber: 102, + blockHash: "0x102-reorged", + }, + ) + await Utils.delay(0) + await Utils.delay(0) + + Assert.deepEqual( + sourceMock.getBlockHashesCalls, + [[100, 102]], + ~message="Should have called getBlockHashes to find rollback depth", + ) + sourceMock.resolveGetBlockHashes([ + // The block 100 is untouched so we can rollback to it + {blockNumber: 100, blockHash: "0x100", blockTimestamp: 100}, + {blockNumber: 102, blockHash: "0x102-reorged", blockTimestamp: 102}, + ]) + + sourceMock.getItemsOrThrowCalls->Utils.Array.clearInPlace + + await indexerMock.getRollbackReadyPromise() + Assert.deepEqual( + sourceMock.getItemsOrThrowCalls, + [ + { + "fromBlock": 101, + "toBlock": None, + "retry": 0, + }, + ], + ~message="Should rollback fetch state and re-request items", + ) + + sourceMock.resolveGetItemsOrThrow( + [], + ~latestFetchedBlockNumber=102, + ~latestFetchedBlockHash="0x102-reorged", + ) + await indexerMock.getBatchWritePromise() + + Assert.deepEqual( + await indexerMock.queryCheckpoints(), + [ + { + id: 1, + eventsProcessed: 0, + chainId: 1337, + blockNumber: 102, + blockHash: Js.Null.Value("0x102-reorged"), + }, + ], + ~message="Should update the checkpoint without retriggering a reorg", + ) + }) + Async.it( "Single chain rollback should also work for unordered multichain indexer when another chains are stale", async () => { @@ -743,11 +950,15 @@ describe("E2E rollback tests", () => { await Utils.delay(0) let _ = await Promise.all2(( - M.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock1), - M.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock2), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock1), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock2), )) - await testSingleChainRollback(~sourceMock=sourceMock1, ~indexerMock) + await testSingleChainRollback( + ~sourceMock=sourceMock1, + ~indexerMock, + ~firstHistoryCheckpointId=3, + ) }, ) @@ -766,7 +977,7 @@ describe("E2E rollback tests", () => { ) await Utils.delay(0) - await M.Helper.initialEnterReorgThreshold(~sourceMock) + await M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock) let calls = [] let handler: Types.HandlerTypes.loader = async ({event}) => { @@ -1027,8 +1238,8 @@ This might be wrong after we start exposing a block hash for progress block.`, await Utils.delay(0) let _ = await Promise.all2(( - M.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock1337), - M.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock100), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock1337), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock100), )) let callCount = ref(0) @@ -1083,21 +1294,71 @@ This might be wrong after we start exposing a block hash for progress block.`, }, ]) await indexerMock.getBatchWritePromise() - sourceMock1337.resolveGetItemsOrThrow([ - { - blockNumber: 102, - logIndex: 4, - handler, - }, - ]) + sourceMock1337.resolveGetItemsOrThrow( + [ + { + blockNumber: 103, + logIndex: 4, + handler, + }, + ], + ~latestFetchedBlockNumber=105, + ) await indexerMock.getBatchWritePromise() Assert.deepEqual( - await Promise.all2(( + await Promise.all3(( + indexerMock.queryCheckpoints(), indexerMock.query(module(Entities.SimpleEntity)), indexerMock.queryHistory(module(Entities.SimpleEntity)), )), ( + [ + { + id: 3, + eventsProcessed: 1, + chainId: 100, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 4, + eventsProcessed: 2, + chainId: 1337, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 5, + eventsProcessed: 1, + chainId: 1337, + blockNumber: 102, + blockHash: Js.Null.Value("0x102"), + }, + { + id: 6, + eventsProcessed: 1, + chainId: 100, + blockNumber: 102, + blockHash: Js.Null.Value("0x102"), + }, + { + id: 7, + eventsProcessed: 1, + chainId: 1337, + blockNumber: 103, + blockHash: Js.Null.Null, + }, + // Block 104 is skipped, since we don't have + // ether events processed or block hash for it + { + id: 8, + eventsProcessed: 0, + chainId: 1337, + blockNumber: 105, + blockHash: Js.Null.Value("0x105"), + }, + ], [ { Entities.SimpleEntity.id: "1", @@ -1106,114 +1367,75 @@ This might be wrong after we start exposing a block hash for progress block.`, ], [ { - current: { - chain_id: 100, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: undefined, - entityData: Set({ + checkpointId: 3, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-0", }), }, { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }, - previous: Some({ - chain_id: 100, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ - Entities.SimpleEntity.id: "1", - value: "call-1", - }), - }, - { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }), - entityData: Set({ + checkpointId: 4, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-2", }), }, { - current: { - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ + checkpointId: 5, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-3", }), }, { - current: { - chain_id: 100, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }), - entityData: Set({ + checkpointId: 6, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-4", }), }, { - current: { - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 4, - }, - // FIXME: This looks wrong - previous: Some({ - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }), - entityData: Set({ + checkpointId: 7, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-5", }), }, ], ), - ~message=`Should create multiple history rows: -Sorted for the batch for block number 101 -Different batches for block number 102`, + ~message=`Should create history rows and checkpoints`, + ) + + Assert.deepEqual( + await indexerMock.metric("envio_progress_events_count"), + [ + {value: "2", labels: Js.Dict.fromArray([("chainId", "100")])}, + {value: "4", labels: Js.Dict.fromArray([("chainId", "1337")])}, + ], + ~message="Events count before rollback", + ) + Assert.deepEqual( + await indexerMock.metric("envio_progress_block_number"), + [ + {value: "102", labels: Js.Dict.fromArray([("chainId", "100")])}, + {value: "105", labels: Js.Dict.fromArray([("chainId", "1337")])}, + ], + ~message="Progress block number before rollback", + ) + Assert.deepEqual( + await indexerMock.metric("envio_rollback_events_count"), + [{value: "0", labels: Js.Dict.empty()}], + ~message="Rollbacked events count before rollback", + ) + Assert.deepEqual( + await indexerMock.metric("envio_rollback_count"), + [{value: "0", labels: Js.Dict.empty()}], + ~message="Rollbacks count before rollback", ) // Should trigger rollback @@ -1229,7 +1451,7 @@ Different batches for block number 102`, Assert.deepEqual( sourceMock1337.getBlockHashesCalls, - [[100, 101, 102, 103]], + [[100, 101, 102, 105]], ~message="Should have called getBlockHashes to find rollback depth", ) sourceMock1337.resolveGetBlockHashes([ @@ -1237,11 +1459,38 @@ Different batches for block number 102`, {blockNumber: 100, blockHash: "0x100", blockTimestamp: 100}, {blockNumber: 101, blockHash: "0x101", blockTimestamp: 101}, {blockNumber: 102, blockHash: "0x102-reorged", blockTimestamp: 102}, - {blockNumber: 103, blockHash: "0x103-reorged", blockTimestamp: 103}, + {blockNumber: 105, blockHash: "0x105-reorged", blockTimestamp: 105}, ]) await indexerMock.getRollbackReadyPromise() + Assert.deepEqual( + await indexerMock.metric("envio_progress_events_count"), + [ + {value: "1", labels: Js.Dict.fromArray([("chainId", "100")])}, + {value: "2", labels: Js.Dict.fromArray([("chainId", "1337")])}, + ], + ~message="Events count after rollback", + ) + Assert.deepEqual( + await indexerMock.metric("envio_progress_block_number"), + [ + {value: "101", labels: Js.Dict.fromArray([("chainId", "100")])}, + {value: "101", labels: Js.Dict.fromArray([("chainId", "1337")])}, + ], + ~message="Progress block number after rollback", + ) + Assert.deepEqual( + await indexerMock.metric("envio_rollback_events_count"), + [{value: "3", labels: Js.Dict.empty()}], + ~message="Rollbacked events count after rollback", + ) + Assert.deepEqual( + await indexerMock.metric("envio_rollback_count"), + [{value: "1", labels: Js.Dict.empty()}], + ~message="Rollbacks count after rollback", + ) + Assert.deepEqual( ( sourceMock1337.getItemsOrThrowCalls->Utils.Array.last, @@ -1289,11 +1538,38 @@ Different batches for block number 102`, await indexerMock.getBatchWritePromise() Assert.deepEqual( - await Promise.all2(( + await Promise.all3(( + indexerMock.queryCheckpoints(), indexerMock.query(module(Entities.SimpleEntity)), indexerMock.queryHistory(module(Entities.SimpleEntity)), )), ( + [ + { + id: 3, + eventsProcessed: 1, + chainId: 100, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 4, + eventsProcessed: 2, + chainId: 1337, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + // Reorg checkpoint id was checkpoint id 5 + // for chain 1337. After rollback it was removed + // and replaced with chain id 100 + { + id: 5, + eventsProcessed: 2, + chainId: 100, + blockNumber: 102, + blockHash: Js.Null.Value("0x102"), + }, + ], [ { Entities.SimpleEntity.id: "1", @@ -1302,68 +1578,25 @@ Different batches for block number 102`, ], [ { - current: { - chain_id: 100, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: undefined, - entityData: Set({ + checkpointId: 3, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-0", }), }, { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }, - previous: Some({ - chain_id: 100, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ - Entities.SimpleEntity.id: "1", - value: "call-1", - }), - }, - { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }), - entityData: Set({ + checkpointId: 4, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-2", }), }, { - current: { - chain_id: 100, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ + checkpointId: 5, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-4", }), @@ -1373,8 +1606,8 @@ Different batches for block number 102`, ) }) - // Fixes duplicate history bug before 2.29.3 - Async.it_skip( + // Fixes duplicate history bug before 2.31 + Async.it( "Rollback of unordered multichain indexer (single entity id change + another entity on non-reorg chain)", async () => { let sourceMock1337 = M.Source.make( @@ -1400,8 +1633,8 @@ Different batches for block number 102`, await Utils.delay(0) let _ = await Promise.all2(( - M.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock1337), - M.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock100), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock1337), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock100), )) let callCount = ref(0) @@ -1466,21 +1699,71 @@ Different batches for block number 102`, }, ]) await indexerMock.getBatchWritePromise() - sourceMock1337.resolveGetItemsOrThrow([ - { - blockNumber: 102, - logIndex: 4, - handler, - }, - ]) + sourceMock1337.resolveGetItemsOrThrow( + [ + { + blockNumber: 103, + logIndex: 4, + handler, + }, + ], + ~latestFetchedBlockNumber=105, + ) await indexerMock.getBatchWritePromise() Assert.deepEqual( - await Promise.all2(( + await Promise.all3(( + indexerMock.queryCheckpoints(), indexerMock.query(module(Entities.SimpleEntity)), indexerMock.queryHistory(module(Entities.SimpleEntity)), )), ( + [ + { + id: 3, + eventsProcessed: 1, + chainId: 100, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 4, + eventsProcessed: 2, + chainId: 1337, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 5, + eventsProcessed: 1, + chainId: 1337, + blockNumber: 102, + blockHash: Js.Null.Value("0x102"), + }, + { + id: 6, + eventsProcessed: 2, + chainId: 100, + blockNumber: 102, + blockHash: Js.Null.Value("0x102"), + }, + { + id: 7, + eventsProcessed: 1, + chainId: 1337, + blockNumber: 103, + blockHash: Js.Null.Null, + }, + // Block 104 is skipped, since we don't have + // ether events processed or block hash for it + { + id: 8, + eventsProcessed: 0, + chainId: 1337, + blockNumber: 105, + blockHash: Js.Null.Value("0x105"), + }, + ], [ { Entities.SimpleEntity.id: "1", @@ -1489,114 +1772,48 @@ Different batches for block number 102`, ], [ { - current: { - chain_id: 100, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: undefined, - entityData: Set({ + checkpointId: 3, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-0", }), }, { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }, - previous: Some({ - chain_id: 100, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ - Entities.SimpleEntity.id: "1", - value: "call-1", - }), - }, - { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }), - entityData: Set({ + checkpointId: 4, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-2", }), }, { - current: { - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ + checkpointId: 5, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-3", }), }, { - current: { - chain_id: 100, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }), - entityData: Set({ + checkpointId: 6, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-4", }), }, { - current: { - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 4, - }, - // FIXME: This looks wrong - previous: Some({ - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }), - entityData: Set({ + checkpointId: 7, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-5", }), }, ], ), - ~message=`Should create multiple history rows: -Sorted for the batch for block number 101 -Different batches for block number 102`, + ~message=`Should create history rows and checkpoints`, ) Assert.deepEqual( await Promise.all2(( @@ -1612,21 +1829,16 @@ Different batches for block number 102`, ], [ { - current: { - chain_id: 100, - block_timestamp: 102, - block_number: 102, - log_index: 3, - }, - previous: undefined, - entityData: Set({ + checkpointId: 6, + entityId: "foo", + entityUpdateAction: Set({ Entities.EntityWithBigDecimal.id: "foo", bigDecimal: BigDecimal.fromFloat(0.), }), }, ], ), - ~message="Should also add another entity for a non-reorg chain, which should also be rollbacked (theoretically)", + ~message="Should also add another entity for a non-reorg chain, which should also be rollbacked", ) // Should trigger rollback @@ -1642,7 +1854,7 @@ Different batches for block number 102`, Assert.deepEqual( sourceMock1337.getBlockHashesCalls, - [[100, 101, 102, 103]], + [[100, 101, 102, 105]], ~message="Should have called getBlockHashes to find rollback depth", ) sourceMock1337.resolveGetBlockHashes([ @@ -1650,7 +1862,7 @@ Different batches for block number 102`, {blockNumber: 100, blockHash: "0x100", blockTimestamp: 100}, {blockNumber: 101, blockHash: "0x101", blockTimestamp: 101}, {blockNumber: 102, blockHash: "0x102-reorged", blockTimestamp: 102}, - {blockNumber: 103, blockHash: "0x103-reorged", blockTimestamp: 103}, + {blockNumber: 105, blockHash: "0x105-reorged", blockTimestamp: 105}, ]) await indexerMock.getRollbackReadyPromise() @@ -1702,11 +1914,38 @@ Different batches for block number 102`, await indexerMock.getBatchWritePromise() Assert.deepEqual( - await Promise.all2(( + await Promise.all3(( + indexerMock.queryCheckpoints(), indexerMock.query(module(Entities.SimpleEntity)), indexerMock.queryHistory(module(Entities.SimpleEntity)), )), ( + [ + { + id: 3, + eventsProcessed: 1, + chainId: 100, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 4, + eventsProcessed: 2, + chainId: 1337, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + // Reorg checkpoint id was checkpoint id 5 + // for chain 1337. After rollback it was removed + // and replaced with chain id 100 + { + id: 5, + eventsProcessed: 2, + chainId: 100, + blockNumber: 102, + blockHash: Js.Null.Value("0x102"), + }, + ], [ { Entities.SimpleEntity.id: "1", @@ -1715,68 +1954,25 @@ Different batches for block number 102`, ], [ { - current: { - chain_id: 100, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: undefined, - entityData: Set({ + checkpointId: 3, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-0", }), }, { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }, - previous: Some({ - chain_id: 100, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ - Entities.SimpleEntity.id: "1", - value: "call-1", - }), - }, - { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 1, - }), - entityData: Set({ + checkpointId: 4, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-2", }), }, { - current: { - chain_id: 100, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ + checkpointId: 5, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-4", }), @@ -1798,14 +1994,9 @@ Different batches for block number 102`, ], [ { - current: { - chain_id: 100, - block_timestamp: 102, - block_number: 102, - log_index: 3, - }, - previous: undefined, - entityData: Set({ + checkpointId: 5, + entityId: "foo", + entityUpdateAction: Set({ Entities.EntityWithBigDecimal.id: "foo", bigDecimal: BigDecimal.fromFloat(0.), }), @@ -1844,8 +2035,8 @@ Different batches for block number 102`, await Utils.delay(0) let _ = await Promise.all2(( - M.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock1337), - M.Helper.initialEnterReorgThreshold(~sourceMock=sourceMock100), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock1337), + M.Helper.initialEnterReorgThreshold(~indexerMock, ~sourceMock=sourceMock100), )) let callCount = ref(0) @@ -1905,11 +2096,56 @@ Different batches for block number 102`, await indexerMock.getBatchWritePromise() Assert.deepEqual( - await Promise.all2(( + await Promise.all3(( + indexerMock.queryCheckpoints(), indexerMock.query(module(Entities.SimpleEntity)), indexerMock.queryHistory(module(Entities.SimpleEntity)), )), ( + [ + { + id: 2, + eventsProcessed: 0, + chainId: 100, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 3, + eventsProcessed: 0, + chainId: 1337, + blockNumber: 100, + blockHash: Js.Null.Value("0x100"), + }, + { + id: 4, + eventsProcessed: 1, + chainId: 1337, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 5, + eventsProcessed: 1, + chainId: 100, + blockNumber: 102, + blockHash: Js.Null.Null, + }, + { + id: 6, + eventsProcessed: 1, + chainId: 1337, + blockNumber: 102, + blockHash: Js.Null.Value("0x102"), + }, + { + id: 7, + eventsProcessed: 1, + chainId: 100, + blockNumber: 103, + blockHash: Js.Null.Value("0x103"), + }, + ], [ { Entities.SimpleEntity.id: "1", @@ -1918,50 +2154,25 @@ Different batches for block number 102`, ], [ { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: undefined, - entityData: Set({ + checkpointId: 4, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-0", }), }, { - current: { - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ + checkpointId: 6, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-1", }), }, { - current: { - chain_id: 100, - block_timestamp: 103, - block_number: 103, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }), - entityData: Set({ + checkpointId: 7, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-2", }), @@ -1985,14 +2196,9 @@ Sorted by timestamp and chain id`, ], [ { - current: { - chain_id: 100, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: undefined, - entityData: Set({ + checkpointId: 5, + entityId: "foo", + entityUpdateAction: Set({ Entities.EntityWithBigDecimal.id: "foo", bigDecimal: BigDecimal.fromFloat(0.), }), @@ -2002,6 +2208,23 @@ Sorted by timestamp and chain id`, ~message="Should also add another entity for a non-reorg chain, which should also be rollbacked (theoretically)", ) + Assert.deepEqual( + await indexerMock.metric("envio_progress_events_count"), + [ + {value: "2", labels: Js.Dict.fromArray([("chainId", "100")])}, + {value: "2", labels: Js.Dict.fromArray([("chainId", "1337")])}, + ], + ~message="Events count before rollback", + ) + Assert.deepEqual( + await indexerMock.metric("envio_progress_block_number"), + [ + {value: "103", labels: Js.Dict.fromArray([("chainId", "100")])}, + {value: "102", labels: Js.Dict.fromArray([("chainId", "1337")])}, + ], + ~message="Progress block number before rollback", + ) + // Should trigger rollback sourceMock1337.resolveGetItemsOrThrow( [], @@ -2028,6 +2251,23 @@ Sorted by timestamp and chain id`, await indexerMock.getRollbackReadyPromise() + Assert.deepEqual( + await indexerMock.metric("envio_progress_events_count"), + [ + {value: "0", labels: Js.Dict.fromArray([("chainId", "100")])}, + {value: "1", labels: Js.Dict.fromArray([("chainId", "1337")])}, + ], + ~message="Events count after rollback", + ) + Assert.deepEqual( + await indexerMock.metric("envio_progress_block_number"), + [ + {value: "101", labels: Js.Dict.fromArray([("chainId", "100")])}, + {value: "101", labels: Js.Dict.fromArray([("chainId", "1337")])}, + ], + ~message="Progress block number after rollback", + ) + Assert.deepEqual( ( sourceMock1337.getItemsOrThrowCalls->Utils.Array.last, @@ -2075,11 +2315,51 @@ Sorted by timestamp and chain id`, await indexerMock.getBatchWritePromise() Assert.deepEqual( - await Promise.all2(( + await Promise.all3(( + indexerMock.queryCheckpoints(), indexerMock.query(module(Entities.SimpleEntity)), indexerMock.queryHistory(module(Entities.SimpleEntity)), )), ( + [ + { + id: 2, + eventsProcessed: 0, + chainId: 100, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + { + id: 3, + eventsProcessed: 0, + chainId: 1337, + blockNumber: 100, + blockHash: Js.Null.Value("0x100"), + }, + { + id: 4, + eventsProcessed: 1, + chainId: 1337, + blockNumber: 101, + blockHash: Js.Null.Value("0x101"), + }, + // Block 101 for chain 100 is skipped, + // since it doesn't have events processed or block hash + { + id: 5, + eventsProcessed: 1, + chainId: 100, + blockNumber: 102, + blockHash: Js.Null.Null, + }, + { + id: 6, + eventsProcessed: 1, + chainId: 100, + blockNumber: 103, + blockHash: Js.Null.Value("0x103"), + }, + ], [ { Entities.SimpleEntity.id: "1", @@ -2088,32 +2368,17 @@ Sorted by timestamp and chain id`, ], [ { - current: { - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }, - previous: undefined, - entityData: Set({ + checkpointId: 4, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-0", }), }, { - current: { - chain_id: 100, - block_timestamp: 103, - block_number: 103, - log_index: 2, - }, - previous: Some({ - chain_id: 1337, - block_timestamp: 101, - block_number: 101, - log_index: 2, - }), - entityData: Set({ + checkpointId: 6, + entityId: "1", + entityUpdateAction: Set({ Entities.SimpleEntity.id: "1", value: "call-3", }), @@ -2135,14 +2400,9 @@ Sorted by timestamp and chain id`, ], [ { - current: { - chain_id: 100, - block_timestamp: 102, - block_number: 102, - log_index: 2, - }, - previous: undefined, - entityData: Set({ + checkpointId: 5, + entityId: "foo", + entityUpdateAction: Set({ Entities.EntityWithBigDecimal.id: "foo", bigDecimal: BigDecimal.fromFloat(0.), }), diff --git a/scenarios/test_codegen/test/schema_types/BigDecimal_test.res b/scenarios/test_codegen/test/schema_types/BigDecimal_test.res index c47550ef1..3b8b9121f 100644 --- a/scenarios/test_codegen/test/schema_types/BigDecimal_test.res +++ b/scenarios/test_codegen/test/schema_types/BigDecimal_test.res @@ -49,6 +49,7 @@ describe("Load and save an entity with a BigDecimal from DB", () => { inMemoryStore, shouldSaveHistory: false, isPreload: false, + checkpointId: 0, chains, })->(Utils.magic: Internal.handlerContext => Types.loaderContext) diff --git a/scenarios/test_codegen/test/schema_types/Timestamp_test.res b/scenarios/test_codegen/test/schema_types/Timestamp_test.res index 8b554d3c3..3091e7faf 100644 --- a/scenarios/test_codegen/test/schema_types/Timestamp_test.res +++ b/scenarios/test_codegen/test/schema_types/Timestamp_test.res @@ -41,6 +41,7 @@ describe("Load and save an entity with a Timestamp from DB", () => { inMemoryStore, shouldSaveHistory: false, isPreload: false, + checkpointId: 0, chains, })->(Utils.magic: Internal.handlerContext => Types.loaderContext)