diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml new file mode 100644 index 0000000..ac449d3 --- /dev/null +++ b/.github/actions/setup/action.yml @@ -0,0 +1,37 @@ +name: Setup +description: Perform standard setup and install dependencies using pnpm. +inputs: + node-version: + description: The version of Node.js to install + required: false + deno-version: + description: The version of Deno to install + required: false + bun-version: + description: The version of Bun to install + required: false + +runs: + using: composite + steps: + - name: Install pnpm + uses: pnpm/action-setup@v3 + - name: Install node + uses: actions/setup-node@v4 + if: ${{ inputs.node-version != '' }} + with: + cache: pnpm + node-version: ${{ inputs.node-version }} + - name: Install deno + uses: denoland/setup-deno@v2 + if: ${{ inputs.deno-version != '' }} + with: + deno-version: ${{ inputs.deno-version }} + - name: Install bun + uses: oven-sh/setup-bun@v2 + if: ${{ inputs.bun-version != '' }} + with: + bun-version: ${{ inputs.bun-version }} + - name: Install dependencies + shell: bash + run: pnpm install diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml new file mode 100644 index 0000000..b6abda5 --- /dev/null +++ b/.github/workflows/check.yml @@ -0,0 +1,106 @@ +name: Check + +on: + workflow_dispatch: + pull_request: + branches: [main] + push: + branches: [main] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: {} + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + permissions: + contents: read + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - name: Install dependencies + uses: ./.github/actions/setup + with: + node-version: 24.10.0 + - run: pnpm lint + + types: + name: Types + runs-on: ubuntu-latest + permissions: + contents: read + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - name: Install dependencies + uses: ./.github/actions/setup + with: + node-version: 24.10.0 + - run: pnpm check + + # TODO: discuss with Sebastian whether we should add Deno / Bun configuration / checks + # types-deno: + # name: Types on Deno + # runs-on: ubuntu-latest + # permissions: + # contents: read + # timeout-minutes: 10 + # steps: + # - uses: actions/checkout@v4 + # - name: Install dependencies + # uses: ./.github/actions/setup + # with: + # deno-version: v2.5.x + # - run: deno check . + + test: + name: Test + runs-on: ubuntu-latest + permissions: + contents: read + timeout-minutes: 10 + strategy: + fail-fast: false + matrix: + shard: [1/1] + runtime: [Node] # TODO: Deno + steps: + - uses: actions/checkout@v4 + + - name: Install dependencies + if: matrix.runtime == 'Node' + uses: ./.github/actions/setup + with: + node-version: 24.10.0 + - name: Test + if: matrix.runtime == 'Node' + run: pnpm test --shard ${{ matrix.shard }} + + - name: Install dependencies + if: matrix.runtime == 'Deno' + uses: ./.github/actions/setup + with: + deno-version: v2.5.x + - name: Test + if: matrix.runtime == 'Deno' + run: deno task test --shard ${{ matrix.shard }} + + # TODO: discuss with Sebastian whether we want circularity checks + # circular: + # name: Circular Dependencies + # runs-on: ubuntu-latest + # permissions: + # contents: read + # timeout-minutes: 10 + # steps: + # - uses: actions/checkout@v4 + # - name: Install dependencies + # uses: ./.github/actions/setup + # with: + # node-version: 24.10.0 + # - name: Check for circular dependencies + # run: pnpm circular diff --git a/.gitignore b/.gitignore index 17ecf7a..2c3bbb8 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,6 @@ node_modules/ .DS_Store # Scratchpad Files +scratchpad/**/*.md scratchpad/**/*.ts +!scratchpad/index.ts diff --git a/eslint.config.js b/eslint.config.js index c05c9ae..3ad77ad 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -20,8 +20,7 @@ export default defineConfig( { plugins: { "simple-import-sort": simpleImportSort, - "sort-destructure-keys": sortDestructureKeys, - "unused-imports": unusedImports + "sort-destructure-keys": sortDestructureKeys }, languageOptions: { @@ -71,7 +70,6 @@ export default defineConfig( "import-x/order": "off", "simple-import-sort/imports": "off", "sort-destructure-keys/sort-destructure-keys": "error", - "unused-imports/no-unused-imports": "error", "deprecation/deprecation": "off", "@typescript-eslint/array-type": [ @@ -130,5 +128,28 @@ export default defineConfig( rules: { "no-console": "error" } + }, + { + files: ["scratchpad/eslint/**/*"], + plugins: { + "unused-imports": unusedImports + }, + rules: { + "unused-imports/no-unused-imports": "error", + "@effect/dprint": [ + "error", + { + config: { + indentWidth: 2, + lineWidth: 80, + semiColons: "asi", + quoteStyle: "alwaysDouble", + trailingCommas: "never", + operatorPosition: "maintain", + "arrowFunction.useParentheses": "force" + } + } + ] + } } ) diff --git a/package.json b/package.json index 39099ef..0eeecba 100644 --- a/package.json +++ b/package.json @@ -1,25 +1,36 @@ { "private": true, "type": "module", - "packageManager": "pnpm@10.24.0+sha512.01ff8ae71b4419903b65c60fb2dc9d34cf8bb6e06d03bde112ef38f7a34d6904c424ba66bea5cdcf12890230bf39f9580473140ed9c946fef328b6e5238a345a", + "packageManager": "pnpm@10.25.0+sha512.5e82639027af37cf832061bcc6d639c219634488e0f2baebe785028a793de7b525ffcd3f7ff574f5e9860654e098fe852ba8ac5dd5cefe1767d23a020a92f501", "scripts": { "check": "tspc -b tsconfig.json", + "clean": "node scripts/clean.mjs", "lint": "eslint \"**/{src,test,examples,dtslint}/**/*.{ts,mjs}\"", - "lint-fix": "pnpm lint --fix" + "lint-fix": "pnpm lint --fix", + "test": "vitest" }, "devDependencies": { "@effect/eslint-plugin": "^0.3.2", - "@effect/language-service": "^0.57.1", + "@effect/language-service": "^0.62.0", + "@effect/vitest": "^0.27.0", "@eslint/js": "^9.39.1", - "@types/node": "^24.10.1", + "@types/node": "^25.0.0", + "@vitest/coverage-v8": "^4.0.15", + "@vitest/ui": "^4.0.15", + "effect": "^3.19.11", "eslint": "^9.39.1", "eslint-import-resolver-typescript": "^4.4.4", "eslint-plugin-import-x": "^4.16.1", "eslint-plugin-simple-import-sort": "^12.1.1", "eslint-plugin-sort-destructure-keys": "^2.0.0", "eslint-plugin-unused-imports": "^4.3.0", + "glob": "^13.0.0", + "globals": "^16.5.0", "ts-patch": "^3.3.0", "typescript": "^5.9.3", - "typescript-eslint": "^8.48.0" + "typescript-eslint": "^8.49.0", + "vite-tsconfig-paths": "^5.1.4", + "vitest": "^4.0.15", + "vitest-mock-express": "^2.2.0" } } diff --git a/packages/amp/README.md b/packages/amp/README.md new file mode 100644 index 0000000..cf3f630 --- /dev/null +++ b/packages/amp/README.md @@ -0,0 +1,9 @@ +# Amp TypeScript SDK + +## Known Issues + +**Enabling TypeScript's `erasableSyntaxOnly` Feature** + +For some completely absurd reason, the `@bufbuild/protoc-gen-es` plugin does not support generating enum values as anything other than TypeScript enums at this time, so we cannot yet enable this flag. + +See the [related `protobuf-es` issue](https://github.com/bufbuild/protobuf-es/issues/1139) for more information. diff --git a/packages/amp/buf.gen.yaml b/packages/amp/buf.gen.yaml new file mode 100644 index 0000000..01f306f --- /dev/null +++ b/packages/amp/buf.gen.yaml @@ -0,0 +1,12 @@ +version: v2 + +inputs: + - git_repo: https://github.com/apache/arrow + subdir: format + +plugins: + - local: protoc-gen-es + out: src/Protobuf + opt: + - target=ts + - import_extension=ts diff --git a/packages/amp/package.json b/packages/amp/package.json index ab5de4e..8c4deda 100644 --- a/packages/amp/package.json +++ b/packages/amp/package.json @@ -10,10 +10,12 @@ "url": "https://github.com/edgeandnode/amp-typescript", "directory": "packages/amp" }, + "sideEffects": [], "exports": { "./package.json": "./package.json", ".": "./src/index.ts", - "./*": "./src/*.ts" + "./*": "./src/*.ts", + "./internal/*.ts": null }, "files": [ "src/**/*.ts", @@ -22,10 +24,27 @@ "dist/**/*.d.ts", "dist/**/*.d.ts.map" ], + "publishConfig": { + "provenance": true, + "exports": { + "./package.json": "./package.json", + ".": "./dist/index.js", + "./*": "./dist/*.js", + "./internal/*.ts": null + } + }, "peerDependencies": { - "effect": "^3.19.8" + "@bufbuild/protobuf": "^2.10.1", + "@connectrpc/connect": "^2.1.1", + "@connectrpc/connect-node": "^2.1.1", + "effect": "^3.19.11" }, "devDependencies": { - "effect": "^3.19.8" + "@bufbuild/buf": "^1.61.0", + "@bufbuild/protobuf": "^2.10.1", + "@bufbuild/protoc-gen-es": "^2.10.1", + "@connectrpc/connect": "^2.1.1", + "@connectrpc/connect-node": "^2.1.1", + "effect": "^3.19.11" } } diff --git a/packages/amp/src/ArrowFlight.ts b/packages/amp/src/ArrowFlight.ts new file mode 100644 index 0000000..975b6ad --- /dev/null +++ b/packages/amp/src/ArrowFlight.ts @@ -0,0 +1,254 @@ +import { create, toBinary } from "@bufbuild/protobuf" +import { anyPack, AnySchema } from "@bufbuild/protobuf/wkt" +import { type Client, createClient, type Transport as ConnectTransport } from "@connectrpc/connect" +import * as Console from "effect/Console" +import * as Context from "effect/Context" +import * as Effect from "effect/Effect" +import * as Layer from "effect/Layer" +import * as Schema from "effect/Schema" +import * as Stream from "effect/Stream" +import { decodeDictionaryBatch, decodeRecordBatch, DictionaryRegistry } from "./internal/arrow-flight-ipc/Decoder.ts" +import { recordBatchToJson } from "./internal/arrow-flight-ipc/Json.ts" +import { readColumnValues } from "./internal/arrow-flight-ipc/Readers.ts" +import { parseDictionaryBatch, parseRecordBatch } from "./internal/arrow-flight-ipc/RecordBatch.ts" +import { type ArrowSchema, getMessageType, MessageHeaderType, parseSchema } from "./internal/arrow-flight-ipc/Schema.ts" +import { FlightDescriptor_DescriptorType, FlightDescriptorSchema, FlightService } from "./Protobuf/Flight_pb.ts" +import { CommandStatementQuerySchema } from "./Protobuf/FlightSql_pb.ts" + +// ============================================================================= +// Errors +// ============================================================================= + +// TODO: improve the error model +/** + * Represents the possible errors that can occur when executing an Arrow Flight + * query. + */ +export type ArrowFlightQueryError = + | RpcError + | NoEndpointsError + | MultipleEndpointsError + | TicketNotFoundError + | ParseRecordBatchError + | ParseDictionaryBatchError + | ParseSchemaError + +/** + * Represents an Arrow Flight RPC request that failed. + */ +export class RpcError extends Schema.TaggedError( + "Amp/RpcError" +)("RpcError", { + method: Schema.String, + /** + * The underlying reason for the failed RPC request. + */ + cause: Schema.Defect +}) {} + +/** + * Represents an error that occurred as a result of a `FlightInfo` request + * returning an empty list of endpoints from which data can be acquired. + */ +export class NoEndpointsError extends Schema.TaggedError( + "Amp/NoEndpointsError" +)("NoEndpointsError", { + /** + * The SQL query that was requested. + */ + query: Schema.String +}) {} + +// TODO: determine if this is _really_ a logical error case +/** + * Represents an error that occured as a result of a `FlightInfo` request + * returning multiple endpoints from which data can be acquired. + * + * For Amp queries, there should only ever be **one** authoritative source + * of data. + */ +export class MultipleEndpointsError extends Schema.TaggedError( + "Amp/MultipleEndpointsError" +)("MultipleEndpointsError", { + /** + * The SQL query that was requested. + */ + query: Schema.String +}) {} + +/** + * Represents an error that occurred as a result of a `FlightInfo` request + * whose endpoint did not have a ticket. + */ +export class TicketNotFoundError extends Schema.TaggedError( + "Amp/TicketNotFoundError" +)("TicketNotFoundError", { + /** + * The SQL query that was requested. + */ + query: Schema.String +}) {} + +/** + * Represents an error that occurred as a result of failing to parse an Apache + * Arrow RecordBatch. + */ +export class ParseRecordBatchError extends Schema.TaggedError( + "Amp/ParseRecordBatchError" +)("ParseRecordBatchError", { + /** + * The underlying reason for the failure to parse a record batch. + */ + cause: Schema.Defect +}) {} + +/** + * Represents an error that occurred as a result of failing to parse an Apache + * Arrow DictionaryBatch. + */ +export class ParseDictionaryBatchError extends Schema.TaggedError( + "Amp/ParseDictionaryBatchError" +)("ParseDictionaryBatchError", { + /** + * The underlying reason for the failure to parse a dictionary batch. + */ + cause: Schema.Defect +}) {} + +/** + * Represents an error that occurred as a result of failing to parse an Apache + * Arrow Schema. + */ +export class ParseSchemaError extends Schema.TaggedError( + "Amp/ParseSchemaError" +)("ParseSchemaError", { + /** + * The underlying reason for the failure to parse a schema. + */ + cause: Schema.Defect +}) {} + +// ============================================================================= +// Arrow Flight Service +// ============================================================================= + +// TODO: cleanup service interface (just implemented as is for testing right now) +/** + * A service which can be used to execute queries against an Arrow Flight API. + */ +export class ArrowFlight extends Context.Tag("@edgeandnode/amp/ArrowFlight") + + readonly query: (query: string) => Effect.Effect +}>() {} + +const make = Effect.gen(function*() { + const transport = yield* Transport + const client = createClient(FlightService, transport) + + /** + * Execute a SQL query and return a stream of rows. + */ + const query = Effect.fn("ArrowFlight.request")(function*(query: string) { + const cmd = create(CommandStatementQuerySchema, { query }) + const any = anyPack(CommandStatementQuerySchema, cmd) + const desc = create(FlightDescriptorSchema, { + type: FlightDescriptor_DescriptorType.CMD, + cmd: toBinary(AnySchema, any) + }) + + const flightInfo = yield* Effect.tryPromise({ + try: (signal) => client.getFlightInfo(desc, { signal }), + catch: (cause) => new RpcError({ cause, method: "getFlightInfo" }) + }) + + if (flightInfo.endpoint.length !== 1) { + return yield* flightInfo.endpoint.length <= 0 + ? new NoEndpointsError({ query }) + : new MultipleEndpointsError({ query }) + } + + const { ticket } = flightInfo.endpoint[0]! + + if (ticket === undefined) { + return yield* new TicketNotFoundError({ query }) + } + + const flightDataStream = Stream.unwrapScoped(Effect.gen(function*() { + const controller = yield* Effect.acquireRelease( + Effect.sync(() => new AbortController()), + (controller) => Effect.sync(() => controller.abort()) + ) + return Stream.fromAsyncIterable( + client.doGet(ticket, { signal: controller.signal }), + (cause) => new RpcError({ cause, method: "doGet" }) + ) + })) + + let schema: ArrowSchema | undefined + const dictionaryRegistry = new DictionaryRegistry() + + // Convert FlightData stream to a stream of rows + return yield* flightDataStream.pipe( + Stream.runForEach(Effect.fnUntraced(function*(flightData) { + const messageType = yield* Effect.orDie(getMessageType(flightData)) + + switch (messageType) { + case MessageHeaderType.SCHEMA: { + schema = yield* parseSchema(flightData).pipe( + Effect.mapError((cause) => new ParseSchemaError({ cause })) + ) + break + } + case MessageHeaderType.DICTIONARY_BATCH: { + const dictionaryBatch = yield* parseDictionaryBatch(flightData).pipe( + Effect.mapError((cause) => new ParseDictionaryBatchError({ cause })) + ) + decodeDictionaryBatch(dictionaryBatch, flightData.dataBody, schema!, dictionaryRegistry, readColumnValues) + break + } + case MessageHeaderType.RECORD_BATCH: { + const recordBatch = yield* parseRecordBatch(flightData).pipe( + Effect.mapError((cause) => new ParseRecordBatchError({ cause })) + ) + const decodedRecordBatch = decodeRecordBatch(recordBatch, flightData.dataBody, schema!) + const json = recordBatchToJson(decodedRecordBatch, { dictionaryRegistry }) + yield* Console.dir(json, { depth: null, colors: true }) + break + } + } + + return yield* Effect.void + })) + ) + }) + + return { + client, + query + } as const +}) + +/** + * A layer which constructs a concrete implementation of an `ArrowFlight` + * service and depends upon some implementation of a `Transport`. + */ +export const layer: Layer.Layer = Layer.effect(ArrowFlight, make) + +// ============================================================================= +// Transport Service +// ============================================================================= + +/** + * A service which abstracts the underlying transport for a given client. + * + * A transport implements a protocol, such as Connect or gRPC-web, and allows + * for the concrete clients to be independent of the protocol. + */ +export class Transport extends Context.Tag("@edgeandnode/amp/Transport")< + Transport, + ConnectTransport +>() {} diff --git a/packages/amp/src/ArrowFlight/Node.ts b/packages/amp/src/ArrowFlight/Node.ts new file mode 100644 index 0000000..f2e77d0 --- /dev/null +++ b/packages/amp/src/ArrowFlight/Node.ts @@ -0,0 +1,9 @@ +import { createGrpcTransport, type GrpcTransportOptions } from "@connectrpc/connect-node" +import * as Layer from "effect/Layer" +import { Transport } from "../ArrowFlight.ts" + +/** + * Create a `Transport` for the gRPC protocol using the Node.js `http2` module. + */ +export const layerTransportGrpc = (options: GrpcTransportOptions): Layer.Layer => + Layer.sync(Transport, () => createGrpcTransport(options)) diff --git a/packages/amp/src/Protobuf/FlightSql_pb.ts b/packages/amp/src/Protobuf/FlightSql_pb.ts new file mode 100644 index 0000000..9b8c840 --- /dev/null +++ b/packages/amp/src/Protobuf/FlightSql_pb.ts @@ -0,0 +1,3632 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +//

+// http://www.apache.org/licenses/LICENSE-2.0 +//

+// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts,import_extension=ts" +// @generated from file FlightSql.proto (package arrow.flight.protocol.sql, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenExtension, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; +import { enumDesc, extDesc, fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2"; +import type { MessageOptions } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_descriptor } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file FlightSql.proto. + */ +export const file_FlightSql: GenFile = /*@__PURE__*/ + fileDesc("Cg9GbGlnaHRTcWwucHJvdG8SGWFycm93LmZsaWdodC5wcm90b2NvbC5zcWwiIQoRQ29tbWFuZEdldFNxbEluZm8SDAoEaW5mbxgBIAMoDSI+ChZDb21tYW5kR2V0WGRiY1R5cGVJbmZvEhYKCWRhdGFfdHlwZRgBIAEoBUgAiAEBQgwKCl9kYXRhX3R5cGUiFAoSQ29tbWFuZEdldENhdGFsb2dzInsKE0NvbW1hbmRHZXREYlNjaGVtYXMSFAoHY2F0YWxvZxgBIAEoCUgAiAEBEiUKGGRiX3NjaGVtYV9maWx0ZXJfcGF0dGVybhgCIAEoCUgBiAEBQgoKCF9jYXRhbG9nQhsKGV9kYl9zY2hlbWFfZmlsdGVyX3BhdHRlcm4i6wEKEENvbW1hbmRHZXRUYWJsZXMSFAoHY2F0YWxvZxgBIAEoCUgAiAEBEiUKGGRiX3NjaGVtYV9maWx0ZXJfcGF0dGVybhgCIAEoCUgBiAEBEiYKGXRhYmxlX25hbWVfZmlsdGVyX3BhdHRlcm4YAyABKAlIAogBARITCgt0YWJsZV90eXBlcxgEIAMoCRIWCg5pbmNsdWRlX3NjaGVtYRgFIAEoCEIKCghfY2F0YWxvZ0IbChlfZGJfc2NoZW1hX2ZpbHRlcl9wYXR0ZXJuQhwKGl90YWJsZV9uYW1lX2ZpbHRlcl9wYXR0ZXJuIhYKFENvbW1hbmRHZXRUYWJsZVR5cGVzIm4KFUNvbW1hbmRHZXRQcmltYXJ5S2V5cxIUCgdjYXRhbG9nGAEgASgJSACIAQESFgoJZGJfc2NoZW1hGAIgASgJSAGIAQESDQoFdGFibGUYAyABKAlCCgoIX2NhdGFsb2dCDAoKX2RiX3NjaGVtYSJvChZDb21tYW5kR2V0RXhwb3J0ZWRLZXlzEhQKB2NhdGFsb2cYASABKAlIAIgBARIWCglkYl9zY2hlbWEYAiABKAlIAYgBARINCgV0YWJsZRgDIAEoCUIKCghfY2F0YWxvZ0IMCgpfZGJfc2NoZW1hIm8KFkNvbW1hbmRHZXRJbXBvcnRlZEtleXMSFAoHY2F0YWxvZxgBIAEoCUgAiAEBEhYKCWRiX3NjaGVtYRgCIAEoCUgBiAEBEg0KBXRhYmxlGAMgASgJQgoKCF9jYXRhbG9nQgwKCl9kYl9zY2hlbWEi5gEKGENvbW1hbmRHZXRDcm9zc1JlZmVyZW5jZRIXCgpwa19jYXRhbG9nGAEgASgJSACIAQESGQoMcGtfZGJfc2NoZW1hGAIgASgJSAGIAQESEAoIcGtfdGFibGUYAyABKAkSFwoKZmtfY2F0YWxvZxgEIAEoCUgCiAEBEhkKDGZrX2RiX3NjaGVtYRgFIAEoCUgDiAEBEhAKCGZrX3RhYmxlGAYgASgJQg0KC19wa19jYXRhbG9nQg8KDV9wa19kYl9zY2hlbWFCDQoLX2ZrX2NhdGFsb2dCDwoNX2ZrX2RiX3NjaGVtYSJlCiRBY3Rpb25DcmVhdGVQcmVwYXJlZFN0YXRlbWVudFJlcXVlc3QSDQoFcXVlcnkYASABKAkSGwoOdHJhbnNhY3Rpb25faWQYAiABKAxIAIgBAUIRCg9fdHJhbnNhY3Rpb25faWQiLgoNU3Vic3RyYWl0UGxhbhIMCgRwbGFuGAEgASgMEg8KB3ZlcnNpb24YAiABKAkikgEKKEFjdGlvbkNyZWF0ZVByZXBhcmVkU3Vic3RyYWl0UGxhblJlcXVlc3QSNgoEcGxhbhgBIAEoCzIoLmFycm93LmZsaWdodC5wcm90b2NvbC5zcWwuU3Vic3RyYWl0UGxhbhIbCg50cmFuc2FjdGlvbl9pZBgCIAEoDEgAiAEBQhEKD190cmFuc2FjdGlvbl9pZCJ6CiNBY3Rpb25DcmVhdGVQcmVwYXJlZFN0YXRlbWVudFJlc3VsdBIhChlwcmVwYXJlZF9zdGF0ZW1lbnRfaGFuZGxlGAEgASgMEhYKDmRhdGFzZXRfc2NoZW1hGAIgASgMEhgKEHBhcmFtZXRlcl9zY2hlbWEYAyABKAwiSAojQWN0aW9uQ2xvc2VQcmVwYXJlZFN0YXRlbWVudFJlcXVlc3QSIQoZcHJlcGFyZWRfc3RhdGVtZW50X2hhbmRsZRgBIAEoDCIfCh1BY3Rpb25CZWdpblRyYW5zYWN0aW9uUmVxdWVzdCJDChtBY3Rpb25CZWdpblNhdmVwb2ludFJlcXVlc3QSFgoOdHJhbnNhY3Rpb25faWQYASABKAwSDAoEbmFtZRgCIAEoCSI2ChxBY3Rpb25CZWdpblRyYW5zYWN0aW9uUmVzdWx0EhYKDnRyYW5zYWN0aW9uX2lkGAEgASgMIjIKGkFjdGlvbkJlZ2luU2F2ZXBvaW50UmVzdWx0EhQKDHNhdmVwb2ludF9pZBgBIAEoDCL5AQobQWN0aW9uRW5kVHJhbnNhY3Rpb25SZXF1ZXN0EhYKDnRyYW5zYWN0aW9uX2lkGAEgASgMElUKBmFjdGlvbhgCIAEoDjJFLmFycm93LmZsaWdodC5wcm90b2NvbC5zcWwuQWN0aW9uRW5kVHJhbnNhY3Rpb25SZXF1ZXN0LkVuZFRyYW5zYWN0aW9uImsKDkVuZFRyYW5zYWN0aW9uEh8KG0VORF9UUkFOU0FDVElPTl9VTlNQRUNJRklFRBAAEhoKFkVORF9UUkFOU0FDVElPTl9DT01NSVQQARIcChhFTkRfVFJBTlNBQ1RJT05fUk9MTEJBQ0sQAiLqAQoZQWN0aW9uRW5kU2F2ZXBvaW50UmVxdWVzdBIUCgxzYXZlcG9pbnRfaWQYASABKAwSUQoGYWN0aW9uGAIgASgOMkEuYXJyb3cuZmxpZ2h0LnByb3RvY29sLnNxbC5BY3Rpb25FbmRTYXZlcG9pbnRSZXF1ZXN0LkVuZFNhdmVwb2ludCJkCgxFbmRTYXZlcG9pbnQSHQoZRU5EX1NBVkVQT0lOVF9VTlNQRUNJRklFRBAAEhkKFUVORF9TQVZFUE9JTlRfUkVMRUFTRRABEhoKFkVORF9TQVZFUE9JTlRfUk9MTEJBQ0sQAiJWChVDb21tYW5kU3RhdGVtZW50UXVlcnkSDQoFcXVlcnkYASABKAkSGwoOdHJhbnNhY3Rpb25faWQYAiABKAxIAIgBAUIRCg9fdHJhbnNhY3Rpb25faWQihwEKHUNvbW1hbmRTdGF0ZW1lbnRTdWJzdHJhaXRQbGFuEjYKBHBsYW4YASABKAsyKC5hcnJvdy5mbGlnaHQucHJvdG9jb2wuc3FsLlN1YnN0cmFpdFBsYW4SGwoOdHJhbnNhY3Rpb25faWQYAiABKAxIAIgBAUIRCg9fdHJhbnNhY3Rpb25faWQiMAoUVGlja2V0U3RhdGVtZW50UXVlcnkSGAoQc3RhdGVtZW50X2hhbmRsZRgBIAEoDCJCCh1Db21tYW5kUHJlcGFyZWRTdGF0ZW1lbnRRdWVyeRIhChlwcmVwYXJlZF9zdGF0ZW1lbnRfaGFuZGxlGAEgASgMIlcKFkNvbW1hbmRTdGF0ZW1lbnRVcGRhdGUSDQoFcXVlcnkYASABKAkSGwoOdHJhbnNhY3Rpb25faWQYAiABKAxIAIgBAUIRCg9fdHJhbnNhY3Rpb25faWQiQwoeQ29tbWFuZFByZXBhcmVkU3RhdGVtZW50VXBkYXRlEiEKGXByZXBhcmVkX3N0YXRlbWVudF9oYW5kbGUYASABKAwitgcKFkNvbW1hbmRTdGF0ZW1lbnRJbmdlc3QSagoYdGFibGVfZGVmaW5pdGlvbl9vcHRpb25zGAEgASgLMkguYXJyb3cuZmxpZ2h0LnByb3RvY29sLnNxbC5Db21tYW5kU3RhdGVtZW50SW5nZXN0LlRhYmxlRGVmaW5pdGlvbk9wdGlvbnMSDQoFdGFibGUYAiABKAkSEwoGc2NoZW1hGAMgASgJSACIAQESFAoHY2F0YWxvZxgEIAEoCUgBiAEBEhEKCXRlbXBvcmFyeRgFIAEoCBIbCg50cmFuc2FjdGlvbl9pZBgGIAEoDEgCiAEBElAKB29wdGlvbnMY6AcgAygLMj4uYXJyb3cuZmxpZ2h0LnByb3RvY29sLnNxbC5Db21tYW5kU3RhdGVtZW50SW5nZXN0Lk9wdGlvbnNFbnRyeRqZBAoWVGFibGVEZWZpbml0aW9uT3B0aW9ucxJyCgxpZl9ub3RfZXhpc3QYASABKA4yXC5hcnJvdy5mbGlnaHQucHJvdG9jb2wuc3FsLkNvbW1hbmRTdGF0ZW1lbnRJbmdlc3QuVGFibGVEZWZpbml0aW9uT3B0aW9ucy5UYWJsZU5vdEV4aXN0T3B0aW9uEm0KCWlmX2V4aXN0cxgCIAEoDjJaLmFycm93LmZsaWdodC5wcm90b2NvbC5zcWwuQ29tbWFuZFN0YXRlbWVudEluZ2VzdC5UYWJsZURlZmluaXRpb25PcHRpb25zLlRhYmxlRXhpc3RzT3B0aW9uIoEBChNUYWJsZU5vdEV4aXN0T3B0aW9uEiYKIlRBQkxFX05PVF9FWElTVF9PUFRJT05fVU5TUEVDSUZJRUQQABIhCh1UQUJMRV9OT1RfRVhJU1RfT1BUSU9OX0NSRUFURRABEh8KG1RBQkxFX05PVF9FWElTVF9PUFRJT05fRkFJTBACIpcBChFUYWJsZUV4aXN0c09wdGlvbhIjCh9UQUJMRV9FWElTVFNfT1BUSU9OX1VOU1BFQ0lGSUVEEAASHAoYVEFCTEVfRVhJU1RTX09QVElPTl9GQUlMEAESHgoaVEFCTEVfRVhJU1RTX09QVElPTl9BUFBFTkQQAhIfChtUQUJMRV9FWElTVFNfT1BUSU9OX1JFUExBQ0UQAxouCgxPcHRpb25zRW50cnkSCwoDa2V5GAEgASgJEg0KBXZhbHVlGAIgASgJOgI4AUIJCgdfc2NoZW1hQgoKCF9jYXRhbG9nQhEKD190cmFuc2FjdGlvbl9pZCIpChFEb1B1dFVwZGF0ZVJlc3VsdBIUCgxyZWNvcmRfY291bnQYASABKAMiZAocRG9QdXRQcmVwYXJlZFN0YXRlbWVudFJlc3VsdBImChlwcmVwYXJlZF9zdGF0ZW1lbnRfaGFuZGxlGAEgASgMSACIAQFCHAoaX3ByZXBhcmVkX3N0YXRlbWVudF9oYW5kbGUiLAoYQWN0aW9uQ2FuY2VsUXVlcnlSZXF1ZXN0EgwKBGluZm8YASABKAw6AhgBIvwBChdBY3Rpb25DYW5jZWxRdWVyeVJlc3VsdBJPCgZyZXN1bHQYASABKA4yPy5hcnJvdy5mbGlnaHQucHJvdG9jb2wuc3FsLkFjdGlvbkNhbmNlbFF1ZXJ5UmVzdWx0LkNhbmNlbFJlc3VsdCKLAQoMQ2FuY2VsUmVzdWx0Eh0KGUNBTkNFTF9SRVNVTFRfVU5TUEVDSUZJRUQQABIbChdDQU5DRUxfUkVTVUxUX0NBTkNFTExFRBABEhwKGENBTkNFTF9SRVNVTFRfQ0FOQ0VMTElORxACEiEKHUNBTkNFTF9SRVNVTFRfTk9UX0NBTkNFTExBQkxFEAM6AhgBKpIZCgdTcWxJbmZvEhoKFkZMSUdIVF9TUUxfU0VSVkVSX05BTUUQABIdChlGTElHSFRfU1FMX1NFUlZFUl9WRVJTSU9OEAESIwofRkxJR0hUX1NRTF9TRVJWRVJfQVJST1dfVkVSU0lPThACEh8KG0ZMSUdIVF9TUUxfU0VSVkVSX1JFQURfT05MWRADEhkKFUZMSUdIVF9TUUxfU0VSVkVSX1NRTBAEEh8KG0ZMSUdIVF9TUUxfU0VSVkVSX1NVQlNUUkFJVBAFEisKJ0ZMSUdIVF9TUUxfU0VSVkVSX1NVQlNUUkFJVF9NSU5fVkVSU0lPThAGEisKJ0ZMSUdIVF9TUUxfU0VSVkVSX1NVQlNUUkFJVF9NQVhfVkVSU0lPThAHEiEKHUZMSUdIVF9TUUxfU0VSVkVSX1RSQU5TQUNUSU9OEAgSHAoYRkxJR0hUX1NRTF9TRVJWRVJfQ0FOQ0VMEAkSJAogRkxJR0hUX1NRTF9TRVJWRVJfQlVMS19JTkdFU1RJT04QChIzCi9GTElHSFRfU1FMX1NFUlZFUl9JTkdFU1RfVFJBTlNBQ1RJT05TX1NVUFBPUlRFRBALEicKI0ZMSUdIVF9TUUxfU0VSVkVSX1NUQVRFTUVOVF9USU1FT1VUEGQSKQolRkxJR0hUX1NRTF9TRVJWRVJfVFJBTlNBQ1RJT05fVElNRU9VVBBlEhQKD1NRTF9ERExfQ0FUQUxPRxD0AxITCg5TUUxfRERMX1NDSEVNQRD1AxISCg1TUUxfRERMX1RBQkxFEPYDEhgKE1NRTF9JREVOVElGSUVSX0NBU0UQ9wMSHgoZU1FMX0lERU5USUZJRVJfUVVPVEVfQ0hBUhD4AxIfChpTUUxfUVVPVEVEX0lERU5USUZJRVJfQ0FTRRD5AxIiCh1TUUxfQUxMX1RBQkxFU19BUkVfU0VMRUNUQUJMRRD6AxIWChFTUUxfTlVMTF9PUkRFUklORxD7AxIRCgxTUUxfS0VZV09SRFMQ/AMSGgoVU1FMX05VTUVSSUNfRlVOQ1RJT05TEP0DEhkKFFNRTF9TVFJJTkdfRlVOQ1RJT05TEP4DEhkKFFNRTF9TWVNURU1fRlVOQ1RJT05TEP8DEhsKFlNRTF9EQVRFVElNRV9GVU5DVElPTlMQgAQSHQoYU1FMX1NFQVJDSF9TVFJJTkdfRVNDQVBFEIEEEh4KGVNRTF9FWFRSQV9OQU1FX0NIQVJBQ1RFUlMQggQSIQocU1FMX1NVUFBPUlRTX0NPTFVNTl9BTElBU0lORxCDBBIfChpTUUxfTlVMTF9QTFVTX05VTExfSVNfTlVMTBCEBBIZChRTUUxfU1VQUE9SVFNfQ09OVkVSVBCFBBIpCiRTUUxfU1VQUE9SVFNfVEFCTEVfQ09SUkVMQVRJT05fTkFNRVMQhgQSMwouU1FMX1NVUFBPUlRTX0RJRkZFUkVOVF9UQUJMRV9DT1JSRUxBVElPTl9OQU1FUxCHBBIpCiRTUUxfU1VQUE9SVFNfRVhQUkVTU0lPTlNfSU5fT1JERVJfQlkQiAQSJAofU1FMX1NVUFBPUlRTX09SREVSX0JZX1VOUkVMQVRFRBCJBBIbChZTUUxfU1VQUE9SVEVEX0dST1VQX0JZEIoEEiQKH1NRTF9TVVBQT1JUU19MSUtFX0VTQ0FQRV9DTEFVU0UQiwQSJgohU1FMX1NVUFBPUlRTX05PTl9OVUxMQUJMRV9DT0xVTU5TEIwEEhoKFVNRTF9TVVBQT1JURURfR1JBTU1BUhCNBBIfChpTUUxfQU5TSTkyX1NVUFBPUlRFRF9MRVZFTBCOBBIwCitTUUxfU1VQUE9SVFNfSU5URUdSSVRZX0VOSEFOQ0VNRU5UX0ZBQ0lMSVRZEI8EEiIKHVNRTF9PVVRFUl9KT0lOU19TVVBQT1JUX0xFVkVMEJAEEhQKD1NRTF9TQ0hFTUFfVEVSTRCRBBIXChJTUUxfUFJPQ0VEVVJFX1RFUk0QkgQSFQoQU1FMX0NBVEFMT0dfVEVSTRCTBBIZChRTUUxfQ0FUQUxPR19BVF9TVEFSVBCUBBIiCh1TUUxfU0NIRU1BU19TVVBQT1JURURfQUNUSU9OUxCVBBIjCh5TUUxfQ0FUQUxPR1NfU1VQUE9SVEVEX0FDVElPTlMQlgQSJgohU1FMX1NVUFBPUlRFRF9QT1NJVElPTkVEX0NPTU1BTkRTEJcEEiQKH1NRTF9TRUxFQ1RfRk9SX1VQREFURV9TVVBQT1JURUQQmAQSJAofU1FMX1NUT1JFRF9QUk9DRURVUkVTX1NVUFBPUlRFRBCZBBIdChhTUUxfU1VQUE9SVEVEX1NVQlFVRVJJRVMQmgQSKAojU1FMX0NPUlJFTEFURURfU1VCUVVFUklFU19TVVBQT1JURUQQmwQSGQoUU1FMX1NVUFBPUlRFRF9VTklPTlMQnAQSIgodU1FMX01BWF9CSU5BUllfTElURVJBTF9MRU5HVEgQnQQSIAobU1FMX01BWF9DSEFSX0xJVEVSQUxfTEVOR1RIEJ4EEh8KGlNRTF9NQVhfQ09MVU1OX05BTUVfTEVOR1RIEJ8EEiAKG1NRTF9NQVhfQ09MVU1OU19JTl9HUk9VUF9CWRCgBBIdChhTUUxfTUFYX0NPTFVNTlNfSU5fSU5ERVgQoQQSIAobU1FMX01BWF9DT0xVTU5TX0lOX09SREVSX0JZEKIEEh4KGVNRTF9NQVhfQ09MVU1OU19JTl9TRUxFQ1QQowQSHQoYU1FMX01BWF9DT0xVTU5TX0lOX1RBQkxFEKQEEhgKE1NRTF9NQVhfQ09OTkVDVElPTlMQpQQSHwoaU1FMX01BWF9DVVJTT1JfTkFNRV9MRU5HVEgQpgQSGQoUU1FMX01BWF9JTkRFWF9MRU5HVEgQpwQSHgoZU1FMX0RCX1NDSEVNQV9OQU1FX0xFTkdUSBCoBBIiCh1TUUxfTUFYX1BST0NFRFVSRV9OQU1FX0xFTkdUSBCpBBIgChtTUUxfTUFYX0NBVEFMT0dfTkFNRV9MRU5HVEgQqgQSFQoQU1FMX01BWF9ST1dfU0laRRCrBBIkCh9TUUxfTUFYX1JPV19TSVpFX0lOQ0xVREVTX0JMT0JTEKwEEh0KGFNRTF9NQVhfU1RBVEVNRU5UX0xFTkdUSBCtBBIXChJTUUxfTUFYX1NUQVRFTUVOVFMQrgQSHgoZU1FMX01BWF9UQUJMRV9OQU1FX0xFTkdUSBCvBBIdChhTUUxfTUFYX1RBQkxFU19JTl9TRUxFQ1QQsAQSHAoXU1FMX01BWF9VU0VSTkFNRV9MRU5HVEgQsQQSJgohU1FMX0RFRkFVTFRfVFJBTlNBQ1RJT05fSVNPTEFUSU9OELIEEh8KGlNRTF9UUkFOU0FDVElPTlNfU1VQUE9SVEVEELMEEjAKK1NRTF9TVVBQT1JURURfVFJBTlNBQ1RJT05TX0lTT0xBVElPTl9MRVZFTFMQtAQSMgotU1FMX0RBVEFfREVGSU5JVElPTl9DQVVTRVNfVFJBTlNBQ1RJT05fQ09NTUlUELUEEjEKLFNRTF9EQVRBX0RFRklOSVRJT05TX0lOX1RSQU5TQUNUSU9OU19JR05PUkVEELYEEiMKHlNRTF9TVVBQT1JURURfUkVTVUxUX1NFVF9UWVBFUxC3BBI7CjZTUUxfU1VQUE9SVEVEX0NPTkNVUlJFTkNJRVNfRk9SX1JFU1VMVF9TRVRfVU5TUEVDSUZJRUQQuAQSPAo3U1FMX1NVUFBPUlRFRF9DT05DVVJSRU5DSUVTX0ZPUl9SRVNVTFRfU0VUX0ZPUldBUkRfT05MWRC5BBJACjtTUUxfU1VQUE9SVEVEX0NPTkNVUlJFTkNJRVNfRk9SX1JFU1VMVF9TRVRfU0NST0xMX1NFTlNJVElWRRC6BBJCCj1TUUxfU1VQUE9SVEVEX0NPTkNVUlJFTkNJRVNfRk9SX1JFU1VMVF9TRVRfU0NST0xMX0lOU0VOU0lUSVZFELsEEiAKG1NRTF9CQVRDSF9VUERBVEVTX1NVUFBPUlRFRBC8BBIdChhTUUxfU0FWRVBPSU5UU19TVVBQT1JURUQQvQQSIwoeU1FMX05BTUVEX1BBUkFNRVRFUlNfU1VQUE9SVEVEEL4EEh0KGFNRTF9MT0NBVE9SU19VUERBVEVfQ09QWRC/BBI1CjBTUUxfU1RPUkVEX0ZVTkNUSU9OU19VU0lOR19DQUxMX1NZTlRBWF9TVVBQT1JURUQQwAQqkQEKF1NxbFN1cHBvcnRlZFRyYW5zYWN0aW9uEiIKHlNRTF9TVVBQT1JURURfVFJBTlNBQ1RJT05fTk9ORRAAEikKJVNRTF9TVVBQT1JURURfVFJBTlNBQ1RJT05fVFJBTlNBQ1RJT04QARInCiNTUUxfU1VQUE9SVEVEX1RSQU5TQUNUSU9OX1NBVkVQT0lOVBACKrIBChtTcWxTdXBwb3J0ZWRDYXNlU2Vuc2l0aXZpdHkSIAocU1FMX0NBU0VfU0VOU0lUSVZJVFlfVU5LTk9XThAAEikKJVNRTF9DQVNFX1NFTlNJVElWSVRZX0NBU0VfSU5TRU5TSVRJVkUQARIiCh5TUUxfQ0FTRV9TRU5TSVRJVklUWV9VUFBFUkNBU0UQAhIiCh5TUUxfQ0FTRV9TRU5TSVRJVklUWV9MT1dFUkNBU0UQAyqCAQoPU3FsTnVsbE9yZGVyaW5nEhkKFVNRTF9OVUxMU19TT1JURURfSElHSBAAEhgKFFNRTF9OVUxMU19TT1JURURfTE9XEAESHQoZU1FMX05VTExTX1NPUlRFRF9BVF9TVEFSVBACEhsKF1NRTF9OVUxMU19TT1JURURfQVRfRU5EEAMqXgoTU3VwcG9ydGVkU3FsR3JhbW1hchIXChNTUUxfTUlOSU1VTV9HUkFNTUFSEAASFAoQU1FMX0NPUkVfR1JBTU1BUhABEhgKFFNRTF9FWFRFTkRFRF9HUkFNTUFSEAIqaAoeU3VwcG9ydGVkQW5zaTkyU3FsR3JhbW1hckxldmVsEhQKEEFOU0k5Ml9FTlRSWV9TUUwQABIbChdBTlNJOTJfSU5URVJNRURJQVRFX1NRTBABEhMKD0FOU0k5Ml9GVUxMX1NRTBACKm0KGVNxbE91dGVySm9pbnNTdXBwb3J0TGV2ZWwSGQoVU1FMX0pPSU5TX1VOU1VQUE9SVEVEEAASGwoXU1FMX0xJTUlURURfT1VURVJfSk9JTlMQARIYChRTUUxfRlVMTF9PVVRFUl9KT0lOUxACKlEKE1NxbFN1cHBvcnRlZEdyb3VwQnkSGgoWU1FMX0dST1VQX0JZX1VOUkVMQVRFRBAAEh4KGlNRTF9HUk9VUF9CWV9CRVlPTkRfU0VMRUNUEAEqkAEKGlNxbFN1cHBvcnRlZEVsZW1lbnRBY3Rpb25zEiIKHlNRTF9FTEVNRU5UX0lOX1BST0NFRFVSRV9DQUxMUxAAEiQKIFNRTF9FTEVNRU5UX0lOX0lOREVYX0RFRklOSVRJT05TEAESKAokU1FMX0VMRU1FTlRfSU5fUFJJVklMRUdFX0RFRklOSVRJT05TEAIqVgoeU3FsU3VwcG9ydGVkUG9zaXRpb25lZENvbW1hbmRzEhkKFVNRTF9QT1NJVElPTkVEX0RFTEVURRAAEhkKFVNRTF9QT1NJVElPTkVEX1VQREFURRABKpcBChZTcWxTdXBwb3J0ZWRTdWJxdWVyaWVzEiEKHVNRTF9TVUJRVUVSSUVTX0lOX0NPTVBBUklTT05TEAASHAoYU1FMX1NVQlFVRVJJRVNfSU5fRVhJU1RTEAESGQoVU1FMX1NVQlFVRVJJRVNfSU5fSU5TEAISIQodU1FMX1NVQlFVRVJJRVNfSU5fUVVBTlRJRklFRFMQAyo2ChJTcWxTdXBwb3J0ZWRVbmlvbnMSDQoJU1FMX1VOSU9OEAASEQoNU1FMX1VOSU9OX0FMTBABKskBChxTcWxUcmFuc2FjdGlvbklzb2xhdGlvbkxldmVsEhgKFFNRTF9UUkFOU0FDVElPTl9OT05FEAASJAogU1FMX1RSQU5TQUNUSU9OX1JFQURfVU5DT01NSVRURUQQARIiCh5TUUxfVFJBTlNBQ1RJT05fUkVBRF9DT01NSVRURUQQAhIjCh9TUUxfVFJBTlNBQ1RJT05fUkVQRUFUQUJMRV9SRUFEEAMSIAocU1FMX1RSQU5TQUNUSU9OX1NFUklBTElaQUJMRRAEKokBChhTcWxTdXBwb3J0ZWRUcmFuc2FjdGlvbnMSHwobU1FMX1RSQU5TQUNUSU9OX1VOU1BFQ0lGSUVEEAASJAogU1FMX0RBVEFfREVGSU5JVElPTl9UUkFOU0FDVElPTlMQARImCiJTUUxfREFUQV9NQU5JUFVMQVRJT05fVFJBTlNBQ1RJT05TEAIqvAEKGVNxbFN1cHBvcnRlZFJlc3VsdFNldFR5cGUSIwofU1FMX1JFU1VMVF9TRVRfVFlQRV9VTlNQRUNJRklFRBAAEiQKIFNRTF9SRVNVTFRfU0VUX1RZUEVfRk9SV0FSRF9PTkxZEAESKgomU1FMX1JFU1VMVF9TRVRfVFlQRV9TQ1JPTExfSU5TRU5TSVRJVkUQAhIoCiRTUUxfUkVTVUxUX1NFVF9UWVBFX1NDUk9MTF9TRU5TSVRJVkUQAyqiAQogU3FsU3VwcG9ydGVkUmVzdWx0U2V0Q29uY3VycmVuY3kSKgomU1FMX1JFU1VMVF9TRVRfQ09OQ1VSUkVOQ1lfVU5TUEVDSUZJRUQQABIoCiRTUUxfUkVTVUxUX1NFVF9DT05DVVJSRU5DWV9SRUFEX09OTFkQARIoCiRTUUxfUkVTVUxUX1NFVF9DT05DVVJSRU5DWV9VUERBVEFCTEUQAiqZBAoSU3FsU3VwcG9ydHNDb252ZXJ0EhYKElNRTF9DT05WRVJUX0JJR0lOVBAAEhYKElNRTF9DT05WRVJUX0JJTkFSWRABEhMKD1NRTF9DT05WRVJUX0JJVBACEhQKEFNRTF9DT05WRVJUX0NIQVIQAxIUChBTUUxfQ09OVkVSVF9EQVRFEAQSFwoTU1FMX0NPTlZFUlRfREVDSU1BTBAFEhUKEVNRTF9DT05WRVJUX0ZMT0FUEAYSFwoTU1FMX0NPTlZFUlRfSU5URUdFUhAHEiEKHVNRTF9DT05WRVJUX0lOVEVSVkFMX0RBWV9USU1FEAgSIwofU1FMX0NPTlZFUlRfSU5URVJWQUxfWUVBUl9NT05USBAJEh0KGVNRTF9DT05WRVJUX0xPTkdWQVJCSU5BUlkQChIbChdTUUxfQ09OVkVSVF9MT05HVkFSQ0hBUhALEhcKE1NRTF9DT05WRVJUX05VTUVSSUMQDBIUChBTUUxfQ09OVkVSVF9SRUFMEA0SGAoUU1FMX0NPTlZFUlRfU01BTExJTlQQDhIUChBTUUxfQ09OVkVSVF9USU1FEA8SGQoVU1FMX0NPTlZFUlRfVElNRVNUQU1QEBASFwoTU1FMX0NPTlZFUlRfVElOWUlOVBAREhkKFVNRTF9DT05WRVJUX1ZBUkJJTkFSWRASEhcKE1NRTF9DT05WRVJUX1ZBUkNIQVIQEyqPBAoMWGRiY0RhdGFUeXBlEhUKEVhEQkNfVU5LTk9XTl9UWVBFEAASDQoJWERCQ19DSEFSEAESEAoMWERCQ19OVU1FUklDEAISEAoMWERCQ19ERUNJTUFMEAMSEAoMWERCQ19JTlRFR0VSEAQSEQoNWERCQ19TTUFMTElOVBAFEg4KClhEQkNfRkxPQVQQBhINCglYREJDX1JFQUwQBxIPCgtYREJDX0RPVUJMRRAIEhEKDVhEQkNfREFURVRJTUUQCRIRCg1YREJDX0lOVEVSVkFMEAoSEAoMWERCQ19WQVJDSEFSEAwSDQoJWERCQ19EQVRFEFsSDQoJWERCQ19USU1FEFwSEgoOWERCQ19USU1FU1RBTVAQXRIdChBYREJDX0xPTkdWQVJDSEFSEP///////////wESGAoLWERCQ19CSU5BUlkQ/v//////////ARIbCg5YREJDX1ZBUkJJTkFSWRD9//////////8BEh8KElhEQkNfTE9OR1ZBUkJJTkFSWRD8//////////8BEhgKC1hEQkNfQklHSU5UEPv//////////wESGQoMWERCQ19USU5ZSU5UEPr//////////wESFQoIWERCQ19CSVQQ+f//////////ARIXCgpYREJDX1dDSEFSEPj//////////wESGgoNWERCQ19XVkFSQ0hBUhD3//////////8BKqMIChNYZGJjRGF0ZXRpbWVTdWJjb2RlEhgKFFhEQkNfU1VCQ09ERV9VTktOT1dOEAASFQoRWERCQ19TVUJDT0RFX1lFQVIQARIVChFYREJDX1NVQkNPREVfREFURRABEhUKEVhEQkNfU1VCQ09ERV9USU1FEAISFgoSWERCQ19TVUJDT0RFX01PTlRIEAISGgoWWERCQ19TVUJDT0RFX1RJTUVTVEFNUBADEhQKEFhEQkNfU1VCQ09ERV9EQVkQAxIjCh9YREJDX1NVQkNPREVfVElNRV9XSVRIX1RJTUVaT05FEAQSFQoRWERCQ19TVUJDT0RFX0hPVVIQBBIoCiRYREJDX1NVQkNPREVfVElNRVNUQU1QX1dJVEhfVElNRVpPTkUQBRIXChNYREJDX1NVQkNPREVfTUlOVVRFEAUSFwoTWERCQ19TVUJDT0RFX1NFQ09ORBAGEh4KGlhEQkNfU1VCQ09ERV9ZRUFSX1RPX01PTlRIEAcSHAoYWERCQ19TVUJDT0RFX0RBWV9UT19IT1VSEAgSHgoaWERCQ19TVUJDT0RFX0RBWV9UT19NSU5VVEUQCRIeChpYREJDX1NVQkNPREVfREFZX1RPX1NFQ09ORBAKEh8KG1hEQkNfU1VCQ09ERV9IT1VSX1RPX01JTlVURRALEh8KG1hEQkNfU1VCQ09ERV9IT1VSX1RPX1NFQ09ORBAMEiEKHVhEQkNfU1VCQ09ERV9NSU5VVEVfVE9fU0VDT05EEA0SHgoaWERCQ19TVUJDT0RFX0lOVEVSVkFMX1lFQVIQZRIfChtYREJDX1NVQkNPREVfSU5URVJWQUxfTU9OVEgQZhIdChlYREJDX1NVQkNPREVfSU5URVJWQUxfREFZEGcSHgoaWERCQ19TVUJDT0RFX0lOVEVSVkFMX0hPVVIQaBIgChxYREJDX1NVQkNPREVfSU5URVJWQUxfTUlOVVRFEGkSIAocWERCQ19TVUJDT0RFX0lOVEVSVkFMX1NFQ09ORBBqEicKI1hEQkNfU1VCQ09ERV9JTlRFUlZBTF9ZRUFSX1RPX01PTlRIEGsSJQohWERCQ19TVUJDT0RFX0lOVEVSVkFMX0RBWV9UT19IT1VSEGwSJwojWERCQ19TVUJDT0RFX0lOVEVSVkFMX0RBWV9UT19NSU5VVEUQbRInCiNYREJDX1NVQkNPREVfSU5URVJWQUxfREFZX1RPX1NFQ09ORBBuEigKJFhEQkNfU1VCQ09ERV9JTlRFUlZBTF9IT1VSX1RPX01JTlVURRBvEigKJFhEQkNfU1VCQ09ERV9JTlRFUlZBTF9IT1VSX1RPX1NFQ09ORBBwEioKJlhEQkNfU1VCQ09ERV9JTlRFUlZBTF9NSU5VVEVfVE9fU0VDT05EEHEaAhABKlcKCE51bGxhYmxlEhgKFE5VTExBQklMSVRZX05PX05VTExTEAASGAoUTlVMTEFCSUxJVFlfTlVMTEFCTEUQARIXChNOVUxMQUJJTElUWV9VTktOT1dOEAIqYQoKU2VhcmNoYWJsZRITCg9TRUFSQ0hBQkxFX05PTkUQABITCg9TRUFSQ0hBQkxFX0NIQVIQARIUChBTRUFSQ0hBQkxFX0JBU0lDEAISEwoPU0VBUkNIQUJMRV9GVUxMEAMqXAoRVXBkYXRlRGVsZXRlUnVsZXMSCwoHQ0FTQ0FERRAAEgwKCFJFU1RSSUNUEAESDAoIU0VUX05VTEwQAhINCglOT19BQ1RJT04QAxIPCgtTRVRfREVGQVVMVBAEOkQKDGV4cGVyaW1lbnRhbBIfLmdvb2dsZS5wcm90b2J1Zi5NZXNzYWdlT3B0aW9ucxjoByABKAhSDGV4cGVyaW1lbnRhbEJWCiBvcmcuYXBhY2hlLmFycm93LmZsaWdodC5zcWwuaW1wbFoyZ2l0aHViLmNvbS9hcGFjaGUvYXJyb3ctZ28vYXJyb3cvZmxpZ2h0L2dlbi9mbGlnaHRiBnByb3RvMw", [file_google_protobuf_descriptor]); + +/** + * + * Represents a metadata request. Used in the command member of FlightDescriptor + * for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the metadata request. + * + * The returned Arrow schema will be: + * < + * info_name: uint32 not null, + * value: dense_union< + * string_value: utf8, + * bool_value: bool, + * bigint_value: int64, + * int32_bitmask: int32, + * string_list: list + * int32_to_int32_list_map: map> + * > + * where there is one row per requested piece of metadata information. + * + * @generated from message arrow.flight.protocol.sql.CommandGetSqlInfo + */ +export type CommandGetSqlInfo = Message<"arrow.flight.protocol.sql.CommandGetSqlInfo"> & { + /** + * + * Values are modelled after ODBC's SQLGetInfo() function. This information is intended to provide + * Flight SQL clients with basic, SQL syntax and SQL functions related information. + * More information types can be added in future releases. + * E.g. more SQL syntax support types, scalar functions support, type conversion support etc. + * + * Note that the set of metadata may expand. + * + * Initially, Flight SQL will support the following information types: + * - Server Information - Range [0-500) + * - Syntax Information - Range [500-1000) + * Range [0-10,000) is reserved for defaults (see SqlInfo enum for default options). + * Custom options should start at 10,000. + * + * If omitted, then all metadata will be retrieved. + * Flight SQL Servers may choose to include additional metadata above and beyond the specified set, however they must + * at least return the specified set. IDs ranging from 0 to 10,000 (exclusive) are reserved for future use. + * If additional metadata is included, the metadata IDs should start from 10,000. + * + * @generated from field: repeated uint32 info = 1; + */ + info: number[]; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetSqlInfo. + * Use `create(CommandGetSqlInfoSchema)` to create a new message. + */ +export const CommandGetSqlInfoSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 0); + +/** + * + * Represents a request to retrieve information about data type supported on a Flight SQL enabled backend. + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned schema will be: + * < + * type_name: utf8 not null (The name of the data type, for example: VARCHAR, INTEGER, etc), + * data_type: int32 not null (The SQL data type), + * column_size: int32 (The maximum size supported by that column. + * In case of exact numeric types, this represents the maximum precision. + * In case of string types, this represents the character length. + * In case of datetime data types, this represents the length in characters of the string representation. + * NULL is returned for data types where column size is not applicable.), + * literal_prefix: utf8 (Character or characters used to prefix a literal, NULL is returned for + * data types where a literal prefix is not applicable.), + * literal_suffix: utf8 (Character or characters used to terminate a literal, + * NULL is returned for data types where a literal suffix is not applicable.), + * create_params: list + * (A list of keywords corresponding to which parameters can be used when creating + * a column for that specific type. + * NULL is returned if there are no parameters for the data type definition.), + * nullable: int32 not null (Shows if the data type accepts a NULL value. The possible values can be seen in the + * Nullable enum.), + * case_sensitive: bool not null (Shows if a character data type is case-sensitive in collations and comparisons), + * searchable: int32 not null (Shows how the data type is used in a WHERE clause. The possible values can be seen in the + * Searchable enum.), + * unsigned_attribute: bool (Shows if the data type is unsigned. NULL is returned if the attribute is + * not applicable to the data type or the data type is not numeric.), + * fixed_prec_scale: bool not null (Shows if the data type has predefined fixed precision and scale.), + * auto_increment: bool (Shows if the data type is auto incremental. NULL is returned if the attribute + * is not applicable to the data type or the data type is not numeric.), + * local_type_name: utf8 (Localized version of the data source-dependent name of the data type. NULL + * is returned if a localized name is not supported by the data source), + * minimum_scale: int32 (The minimum scale of the data type on the data source. + * If a data type has a fixed scale, the MINIMUM_SCALE and MAXIMUM_SCALE + * columns both contain this value. NULL is returned if scale is not applicable.), + * maximum_scale: int32 (The maximum scale of the data type on the data source. + * NULL is returned if scale is not applicable.), + * sql_data_type: int32 not null (The value of the SQL DATA TYPE which has the same values + * as data_type value. Except for interval and datetime, which + * uses generic values. More info about those types can be + * obtained through datetime_subcode. The possible values can be seen + * in the XdbcDataType enum.), + * datetime_subcode: int32 (Only used when the SQL DATA TYPE is interval or datetime. It contains + * its sub types. For type different from interval and datetime, this value + * is NULL. The possible values can be seen in the XdbcDatetimeSubcode enum.), + * num_prec_radix: int32 (If the data type is an approximate numeric type, this column contains + * the value 2 to indicate that COLUMN_SIZE specifies a number of bits. For + * exact numeric types, this column contains the value 10 to indicate that + * column size specifies a number of decimal digits. Otherwise, this column is NULL.), + * interval_precision: int32 (If the data type is an interval data type, then this column contains the value + * of the interval leading precision. Otherwise, this column is NULL. This fields + * is only relevant to be used by ODBC). + * > + * The returned data should be ordered by data_type and then by type_name. + * + * @generated from message arrow.flight.protocol.sql.CommandGetXdbcTypeInfo + */ +export type CommandGetXdbcTypeInfo = Message<"arrow.flight.protocol.sql.CommandGetXdbcTypeInfo"> & { + /** + * + * Specifies the data type to search for the info. + * + * @generated from field: optional int32 data_type = 1; + */ + dataType?: number; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetXdbcTypeInfo. + * Use `create(CommandGetXdbcTypeInfoSchema)` to create a new message. + */ +export const CommandGetXdbcTypeInfoSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 1); + +/** + * + * Represents a request to retrieve the list of catalogs on a Flight SQL enabled backend. + * The definition of a catalog depends on vendor/implementation. It is usually the database itself + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned Arrow schema will be: + * < + * catalog_name: utf8 not null + * > + * The returned data should be ordered by catalog_name. + * + * @generated from message arrow.flight.protocol.sql.CommandGetCatalogs + */ +export type CommandGetCatalogs = Message<"arrow.flight.protocol.sql.CommandGetCatalogs"> & { +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetCatalogs. + * Use `create(CommandGetCatalogsSchema)` to create a new message. + */ +export const CommandGetCatalogsSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 2); + +/** + * + * Represents a request to retrieve the list of database schemas on a Flight SQL enabled backend. + * The definition of a database schema depends on vendor/implementation. It is usually a collection of tables. + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned Arrow schema will be: + * < + * catalog_name: utf8, + * db_schema_name: utf8 not null + * > + * The returned data should be ordered by catalog_name, then db_schema_name. + * + * @generated from message arrow.flight.protocol.sql.CommandGetDbSchemas + */ +export type CommandGetDbSchemas = Message<"arrow.flight.protocol.sql.CommandGetDbSchemas"> & { + /** + * + * Specifies the Catalog to search for the tables. + * An empty string retrieves those without a catalog. + * If omitted the catalog name should not be used to narrow the search. + * + * @generated from field: optional string catalog = 1; + */ + catalog?: string; + + /** + * + * Specifies a filter pattern for schemas to search for. + * When no db_schema_filter_pattern is provided, the pattern will not be used to narrow the search. + * In the pattern string, two special characters can be used to denote matching rules: + * - "%" means to match any substring with 0 or more characters. + * - "_" means to match any one character. + * + * @generated from field: optional string db_schema_filter_pattern = 2; + */ + dbSchemaFilterPattern?: string; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetDbSchemas. + * Use `create(CommandGetDbSchemasSchema)` to create a new message. + */ +export const CommandGetDbSchemasSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 3); + +/** + * + * Represents a request to retrieve the list of tables, and optionally their schemas, on a Flight SQL enabled backend. + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned Arrow schema will be: + * < + * catalog_name: utf8, + * db_schema_name: utf8, + * table_name: utf8 not null, + * table_type: utf8 not null, + * [optional] table_schema: bytes not null (schema of the table as described in Schema.fbs::Schema, + * it is serialized as an IPC message.) + * > + * Fields on table_schema may contain the following metadata: + * - ARROW:FLIGHT:SQL:CATALOG_NAME - Table's catalog name + * - ARROW:FLIGHT:SQL:DB_SCHEMA_NAME - Database schema name + * - ARROW:FLIGHT:SQL:TABLE_NAME - Table name + * - ARROW:FLIGHT:SQL:TYPE_NAME - The data source-specific name for the data type of the column. + * - ARROW:FLIGHT:SQL:PRECISION - Column precision/size + * - ARROW:FLIGHT:SQL:SCALE - Column scale/decimal digits if applicable + * - ARROW:FLIGHT:SQL:IS_AUTO_INCREMENT - "1" indicates if the column is auto incremented, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_CASE_SENSITIVE - "1" indicates if the column is case-sensitive, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_READ_ONLY - "1" indicates if the column is read only, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_SEARCHABLE - "1" indicates if the column is searchable via WHERE clause, "0" otherwise. + * - ARROW:FLIGHT:SQL:REMARKS - A comment describing the column. This field has been added after all others, clients should be prepared to find it missing. + * The returned data should be ordered by catalog_name, db_schema_name, table_name, then table_type, followed by table_schema if requested. + * + * @generated from message arrow.flight.protocol.sql.CommandGetTables + */ +export type CommandGetTables = Message<"arrow.flight.protocol.sql.CommandGetTables"> & { + /** + * + * Specifies the Catalog to search for the tables. + * An empty string retrieves those without a catalog. + * If omitted the catalog name should not be used to narrow the search. + * + * @generated from field: optional string catalog = 1; + */ + catalog?: string; + + /** + * + * Specifies a filter pattern for schemas to search for. + * When no db_schema_filter_pattern is provided, all schemas matching other filters are searched. + * In the pattern string, two special characters can be used to denote matching rules: + * - "%" means to match any substring with 0 or more characters. + * - "_" means to match any one character. + * + * @generated from field: optional string db_schema_filter_pattern = 2; + */ + dbSchemaFilterPattern?: string; + + /** + * + * Specifies a filter pattern for tables to search for. + * When no table_name_filter_pattern is provided, all tables matching other filters are searched. + * In the pattern string, two special characters can be used to denote matching rules: + * - "%" means to match any substring with 0 or more characters. + * - "_" means to match any one character. + * + * @generated from field: optional string table_name_filter_pattern = 3; + */ + tableNameFilterPattern?: string; + + /** + * + * Specifies a filter of table types which must match. + * The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. + * TABLE, VIEW, and SYSTEM TABLE are commonly supported. + * + * @generated from field: repeated string table_types = 4; + */ + tableTypes: string[]; + + /** + * Specifies if the Arrow schema should be returned for found tables. + * + * @generated from field: bool include_schema = 5; + */ + includeSchema: boolean; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetTables. + * Use `create(CommandGetTablesSchema)` to create a new message. + */ +export const CommandGetTablesSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 4); + +/** + * + * Represents a request to retrieve the list of table types on a Flight SQL enabled backend. + * The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. + * TABLE, VIEW, and SYSTEM TABLE are commonly supported. + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned Arrow schema will be: + * < + * table_type: utf8 not null + * > + * The returned data should be ordered by table_type. + * + * @generated from message arrow.flight.protocol.sql.CommandGetTableTypes + */ +export type CommandGetTableTypes = Message<"arrow.flight.protocol.sql.CommandGetTableTypes"> & { +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetTableTypes. + * Use `create(CommandGetTableTypesSchema)` to create a new message. + */ +export const CommandGetTableTypesSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 5); + +/** + * + * Represents a request to retrieve the primary keys of a table on a Flight SQL enabled backend. + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned Arrow schema will be: + * < + * catalog_name: utf8, + * db_schema_name: utf8, + * table_name: utf8 not null, + * column_name: utf8 not null, + * key_name: utf8, + * key_sequence: int32 not null + * > + * The returned data should be ordered by catalog_name, db_schema_name, table_name, key_name, then key_sequence. + * + * @generated from message arrow.flight.protocol.sql.CommandGetPrimaryKeys + */ +export type CommandGetPrimaryKeys = Message<"arrow.flight.protocol.sql.CommandGetPrimaryKeys"> & { + /** + * + * Specifies the catalog to search for the table. + * An empty string retrieves those without a catalog. + * If omitted the catalog name should not be used to narrow the search. + * + * @generated from field: optional string catalog = 1; + */ + catalog?: string; + + /** + * + * Specifies the schema to search for the table. + * An empty string retrieves those without a schema. + * If omitted the schema name should not be used to narrow the search. + * + * @generated from field: optional string db_schema = 2; + */ + dbSchema?: string; + + /** + * Specifies the table to get the primary keys for. + * + * @generated from field: string table = 3; + */ + table: string; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetPrimaryKeys. + * Use `create(CommandGetPrimaryKeysSchema)` to create a new message. + */ +export const CommandGetPrimaryKeysSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 6); + +/** + * + * Represents a request to retrieve a description of the foreign key columns that reference the given table's + * primary key columns (the foreign keys exported by a table) of a table on a Flight SQL enabled backend. + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned Arrow schema will be: + * < + * pk_catalog_name: utf8, + * pk_db_schema_name: utf8, + * pk_table_name: utf8 not null, + * pk_column_name: utf8 not null, + * fk_catalog_name: utf8, + * fk_db_schema_name: utf8, + * fk_table_name: utf8 not null, + * fk_column_name: utf8 not null, + * key_sequence: int32 not null, + * fk_key_name: utf8, + * pk_key_name: utf8, + * update_rule: uint8 not null, + * delete_rule: uint8 not null + * > + * The returned data should be ordered by fk_catalog_name, fk_db_schema_name, fk_table_name, fk_key_name, then key_sequence. + * update_rule and delete_rule returns a byte that is equivalent to actions declared on UpdateDeleteRules enum. + * + * @generated from message arrow.flight.protocol.sql.CommandGetExportedKeys + */ +export type CommandGetExportedKeys = Message<"arrow.flight.protocol.sql.CommandGetExportedKeys"> & { + /** + * + * Specifies the catalog to search for the foreign key table. + * An empty string retrieves those without a catalog. + * If omitted the catalog name should not be used to narrow the search. + * + * @generated from field: optional string catalog = 1; + */ + catalog?: string; + + /** + * + * Specifies the schema to search for the foreign key table. + * An empty string retrieves those without a schema. + * If omitted the schema name should not be used to narrow the search. + * + * @generated from field: optional string db_schema = 2; + */ + dbSchema?: string; + + /** + * Specifies the foreign key table to get the foreign keys for. + * + * @generated from field: string table = 3; + */ + table: string; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetExportedKeys. + * Use `create(CommandGetExportedKeysSchema)` to create a new message. + */ +export const CommandGetExportedKeysSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 7); + +/** + * + * Represents a request to retrieve the foreign keys of a table on a Flight SQL enabled backend. + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned Arrow schema will be: + * < + * pk_catalog_name: utf8, + * pk_db_schema_name: utf8, + * pk_table_name: utf8 not null, + * pk_column_name: utf8 not null, + * fk_catalog_name: utf8, + * fk_db_schema_name: utf8, + * fk_table_name: utf8 not null, + * fk_column_name: utf8 not null, + * key_sequence: int32 not null, + * fk_key_name: utf8, + * pk_key_name: utf8, + * update_rule: uint8 not null, + * delete_rule: uint8 not null + * > + * The returned data should be ordered by pk_catalog_name, pk_db_schema_name, pk_table_name, pk_key_name, then key_sequence. + * update_rule and delete_rule returns a byte that is equivalent to actions: + * - 0 = CASCADE + * - 1 = RESTRICT + * - 2 = SET NULL + * - 3 = NO ACTION + * - 4 = SET DEFAULT + * + * @generated from message arrow.flight.protocol.sql.CommandGetImportedKeys + */ +export type CommandGetImportedKeys = Message<"arrow.flight.protocol.sql.CommandGetImportedKeys"> & { + /** + * + * Specifies the catalog to search for the primary key table. + * An empty string retrieves those without a catalog. + * If omitted the catalog name should not be used to narrow the search. + * + * @generated from field: optional string catalog = 1; + */ + catalog?: string; + + /** + * + * Specifies the schema to search for the primary key table. + * An empty string retrieves those without a schema. + * If omitted the schema name should not be used to narrow the search. + * + * @generated from field: optional string db_schema = 2; + */ + dbSchema?: string; + + /** + * Specifies the primary key table to get the foreign keys for. + * + * @generated from field: string table = 3; + */ + table: string; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetImportedKeys. + * Use `create(CommandGetImportedKeysSchema)` to create a new message. + */ +export const CommandGetImportedKeysSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 8); + +/** + * + * Represents a request to retrieve a description of the foreign key columns in the given foreign key table that + * reference the primary key or the columns representing a unique constraint of the parent table (could be the same + * or a different table) on a Flight SQL enabled backend. + * Used in the command member of FlightDescriptor for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * - GetFlightInfo: execute the catalog metadata request. + * + * The returned Arrow schema will be: + * < + * pk_catalog_name: utf8, + * pk_db_schema_name: utf8, + * pk_table_name: utf8 not null, + * pk_column_name: utf8 not null, + * fk_catalog_name: utf8, + * fk_db_schema_name: utf8, + * fk_table_name: utf8 not null, + * fk_column_name: utf8 not null, + * key_sequence: int32 not null, + * fk_key_name: utf8, + * pk_key_name: utf8, + * update_rule: uint8 not null, + * delete_rule: uint8 not null + * > + * The returned data should be ordered by pk_catalog_name, pk_db_schema_name, pk_table_name, pk_key_name, then key_sequence. + * update_rule and delete_rule returns a byte that is equivalent to actions: + * - 0 = CASCADE + * - 1 = RESTRICT + * - 2 = SET NULL + * - 3 = NO ACTION + * - 4 = SET DEFAULT + * + * @generated from message arrow.flight.protocol.sql.CommandGetCrossReference + */ +export type CommandGetCrossReference = Message<"arrow.flight.protocol.sql.CommandGetCrossReference"> & { + /** + * * + * The catalog name where the parent table is. + * An empty string retrieves those without a catalog. + * If omitted the catalog name should not be used to narrow the search. + * + * @generated from field: optional string pk_catalog = 1; + */ + pkCatalog?: string; + + /** + * * + * The Schema name where the parent table is. + * An empty string retrieves those without a schema. + * If omitted the schema name should not be used to narrow the search. + * + * @generated from field: optional string pk_db_schema = 2; + */ + pkDbSchema?: string; + + /** + * * + * The parent table name. It cannot be null. + * + * @generated from field: string pk_table = 3; + */ + pkTable: string; + + /** + * * + * The catalog name where the foreign table is. + * An empty string retrieves those without a catalog. + * If omitted the catalog name should not be used to narrow the search. + * + * @generated from field: optional string fk_catalog = 4; + */ + fkCatalog?: string; + + /** + * * + * The schema name where the foreign table is. + * An empty string retrieves those without a schema. + * If omitted the schema name should not be used to narrow the search. + * + * @generated from field: optional string fk_db_schema = 5; + */ + fkDbSchema?: string; + + /** + * * + * The foreign table name. It cannot be null. + * + * @generated from field: string fk_table = 6; + */ + fkTable: string; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandGetCrossReference. + * Use `create(CommandGetCrossReferenceSchema)` to create a new message. + */ +export const CommandGetCrossReferenceSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 9); + +/** + * + * Request message for the "CreatePreparedStatement" action on a Flight SQL enabled backend. + * + * @generated from message arrow.flight.protocol.sql.ActionCreatePreparedStatementRequest + */ +export type ActionCreatePreparedStatementRequest = Message<"arrow.flight.protocol.sql.ActionCreatePreparedStatementRequest"> & { + /** + * The valid SQL string to create a prepared statement for. + * + * @generated from field: string query = 1; + */ + query: string; + + /** + * Create/execute the prepared statement as part of this transaction (if + * unset, executions of the prepared statement will be auto-committed). + * + * @generated from field: optional bytes transaction_id = 2; + */ + transactionId?: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionCreatePreparedStatementRequest. + * Use `create(ActionCreatePreparedStatementRequestSchema)` to create a new message. + */ +export const ActionCreatePreparedStatementRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 10); + +/** + * + * An embedded message describing a Substrait plan to execute. + * + * @generated from message arrow.flight.protocol.sql.SubstraitPlan + */ +export type SubstraitPlan = Message<"arrow.flight.protocol.sql.SubstraitPlan"> & { + /** + * The serialized substrait.Plan to create a prepared statement for. + * XXX(ARROW-16902): this is bytes instead of an embedded message + * because Protobuf does not really support one DLL using Protobuf + * definitions from another DLL. + * + * @generated from field: bytes plan = 1; + */ + plan: Uint8Array; + + /** + * The Substrait release, e.g. "0.12.0". This information is not + * tracked in the plan itself, so this is the only way for consumers + * to potentially know if they can handle the plan. + * + * @generated from field: string version = 2; + */ + version: string; +}; + +/** + * Describes the message arrow.flight.protocol.sql.SubstraitPlan. + * Use `create(SubstraitPlanSchema)` to create a new message. + */ +export const SubstraitPlanSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 11); + +/** + * + * Request message for the "CreatePreparedSubstraitPlan" action on a Flight SQL enabled backend. + * + * @generated from message arrow.flight.protocol.sql.ActionCreatePreparedSubstraitPlanRequest + */ +export type ActionCreatePreparedSubstraitPlanRequest = Message<"arrow.flight.protocol.sql.ActionCreatePreparedSubstraitPlanRequest"> & { + /** + * The serialized substrait.Plan to create a prepared statement for. + * + * @generated from field: arrow.flight.protocol.sql.SubstraitPlan plan = 1; + */ + plan?: SubstraitPlan; + + /** + * Create/execute the prepared statement as part of this transaction (if + * unset, executions of the prepared statement will be auto-committed). + * + * @generated from field: optional bytes transaction_id = 2; + */ + transactionId?: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionCreatePreparedSubstraitPlanRequest. + * Use `create(ActionCreatePreparedSubstraitPlanRequestSchema)` to create a new message. + */ +export const ActionCreatePreparedSubstraitPlanRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 12); + +/** + * + * Wrap the result of a "CreatePreparedStatement" or "CreatePreparedSubstraitPlan" action. + * + * The resultant PreparedStatement can be closed either: + * - Manually, through the "ClosePreparedStatement" action; + * - Automatically, by a server timeout. + * + * The result should be wrapped in a google.protobuf.Any message. + * + * @generated from message arrow.flight.protocol.sql.ActionCreatePreparedStatementResult + */ +export type ActionCreatePreparedStatementResult = Message<"arrow.flight.protocol.sql.ActionCreatePreparedStatementResult"> & { + /** + * Opaque handle for the prepared statement on the server. + * + * @generated from field: bytes prepared_statement_handle = 1; + */ + preparedStatementHandle: Uint8Array; + + /** + * If a result set generating query was provided, dataset_schema contains the + * schema of the result set. It should be an IPC-encapsulated Schema, as described in Schema.fbs. + * For some queries, the schema of the results may depend on the schema of the parameters. The server + * should provide its best guess as to the schema at this point. Clients must not assume that this + * schema, if provided, will be accurate. + * + * @generated from field: bytes dataset_schema = 2; + */ + datasetSchema: Uint8Array; + + /** + * If the query provided contained parameters, parameter_schema contains the + * schema of the expected parameters. It should be an IPC-encapsulated Schema, as described in Schema.fbs. + * + * @generated from field: bytes parameter_schema = 3; + */ + parameterSchema: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionCreatePreparedStatementResult. + * Use `create(ActionCreatePreparedStatementResultSchema)` to create a new message. + */ +export const ActionCreatePreparedStatementResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 13); + +/** + * + * Request message for the "ClosePreparedStatement" action on a Flight SQL enabled backend. + * Closes server resources associated with the prepared statement handle. + * + * @generated from message arrow.flight.protocol.sql.ActionClosePreparedStatementRequest + */ +export type ActionClosePreparedStatementRequest = Message<"arrow.flight.protocol.sql.ActionClosePreparedStatementRequest"> & { + /** + * Opaque handle for the prepared statement on the server. + * + * @generated from field: bytes prepared_statement_handle = 1; + */ + preparedStatementHandle: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionClosePreparedStatementRequest. + * Use `create(ActionClosePreparedStatementRequestSchema)` to create a new message. + */ +export const ActionClosePreparedStatementRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 14); + +/** + * + * Request message for the "BeginTransaction" action. + * Begins a transaction. + * + * @generated from message arrow.flight.protocol.sql.ActionBeginTransactionRequest + */ +export type ActionBeginTransactionRequest = Message<"arrow.flight.protocol.sql.ActionBeginTransactionRequest"> & { +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionBeginTransactionRequest. + * Use `create(ActionBeginTransactionRequestSchema)` to create a new message. + */ +export const ActionBeginTransactionRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 15); + +/** + * + * Request message for the "BeginSavepoint" action. + * Creates a savepoint within a transaction. + * + * Only supported if FLIGHT_SQL_TRANSACTION is + * FLIGHT_SQL_TRANSACTION_SUPPORT_SAVEPOINT. + * + * @generated from message arrow.flight.protocol.sql.ActionBeginSavepointRequest + */ +export type ActionBeginSavepointRequest = Message<"arrow.flight.protocol.sql.ActionBeginSavepointRequest"> & { + /** + * The transaction to which a savepoint belongs. + * + * @generated from field: bytes transaction_id = 1; + */ + transactionId: Uint8Array; + + /** + * Name for the savepoint. + * + * @generated from field: string name = 2; + */ + name: string; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionBeginSavepointRequest. + * Use `create(ActionBeginSavepointRequestSchema)` to create a new message. + */ +export const ActionBeginSavepointRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 16); + +/** + * + * The result of a "BeginTransaction" action. + * + * The transaction can be manipulated with the "EndTransaction" action, or + * automatically via server timeout. If the transaction times out, then it is + * automatically rolled back. + * + * The result should be wrapped in a google.protobuf.Any message. + * + * @generated from message arrow.flight.protocol.sql.ActionBeginTransactionResult + */ +export type ActionBeginTransactionResult = Message<"arrow.flight.protocol.sql.ActionBeginTransactionResult"> & { + /** + * Opaque handle for the transaction on the server. + * + * @generated from field: bytes transaction_id = 1; + */ + transactionId: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionBeginTransactionResult. + * Use `create(ActionBeginTransactionResultSchema)` to create a new message. + */ +export const ActionBeginTransactionResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 17); + +/** + * + * The result of a "BeginSavepoint" action. + * + * The transaction can be manipulated with the "EndSavepoint" action. + * If the associated transaction is committed, rolled back, or times + * out, then the savepoint is also invalidated. + * + * The result should be wrapped in a google.protobuf.Any message. + * + * @generated from message arrow.flight.protocol.sql.ActionBeginSavepointResult + */ +export type ActionBeginSavepointResult = Message<"arrow.flight.protocol.sql.ActionBeginSavepointResult"> & { + /** + * Opaque handle for the savepoint on the server. + * + * @generated from field: bytes savepoint_id = 1; + */ + savepointId: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionBeginSavepointResult. + * Use `create(ActionBeginSavepointResultSchema)` to create a new message. + */ +export const ActionBeginSavepointResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 18); + +/** + * + * Request message for the "EndTransaction" action. + * + * Commit (COMMIT) or rollback (ROLLBACK) the transaction. + * + * If the action completes successfully, the transaction handle is + * invalidated, as are all associated savepoints. + * + * @generated from message arrow.flight.protocol.sql.ActionEndTransactionRequest + */ +export type ActionEndTransactionRequest = Message<"arrow.flight.protocol.sql.ActionEndTransactionRequest"> & { + /** + * Opaque handle for the transaction on the server. + * + * @generated from field: bytes transaction_id = 1; + */ + transactionId: Uint8Array; + + /** + * Whether to commit/rollback the given transaction. + * + * @generated from field: arrow.flight.protocol.sql.ActionEndTransactionRequest.EndTransaction action = 2; + */ + action: ActionEndTransactionRequest_EndTransaction; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionEndTransactionRequest. + * Use `create(ActionEndTransactionRequestSchema)` to create a new message. + */ +export const ActionEndTransactionRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 19); + +/** + * @generated from enum arrow.flight.protocol.sql.ActionEndTransactionRequest.EndTransaction + */ +export enum ActionEndTransactionRequest_EndTransaction { + /** + * @generated from enum value: END_TRANSACTION_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * Commit the transaction. + * + * @generated from enum value: END_TRANSACTION_COMMIT = 1; + */ + COMMIT = 1, + + /** + * Roll back the transaction. + * + * @generated from enum value: END_TRANSACTION_ROLLBACK = 2; + */ + ROLLBACK = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.ActionEndTransactionRequest.EndTransaction. + */ +export const ActionEndTransactionRequest_EndTransactionSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 19, 0); + +/** + * + * Request message for the "EndSavepoint" action. + * + * Release (RELEASE) the savepoint or rollback (ROLLBACK) to the + * savepoint. + * + * Releasing a savepoint invalidates that savepoint. Rolling back to + * a savepoint does not invalidate the savepoint, but invalidates all + * savepoints created after the current savepoint. + * + * @generated from message arrow.flight.protocol.sql.ActionEndSavepointRequest + */ +export type ActionEndSavepointRequest = Message<"arrow.flight.protocol.sql.ActionEndSavepointRequest"> & { + /** + * Opaque handle for the savepoint on the server. + * + * @generated from field: bytes savepoint_id = 1; + */ + savepointId: Uint8Array; + + /** + * Whether to rollback/release the given savepoint. + * + * @generated from field: arrow.flight.protocol.sql.ActionEndSavepointRequest.EndSavepoint action = 2; + */ + action: ActionEndSavepointRequest_EndSavepoint; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionEndSavepointRequest. + * Use `create(ActionEndSavepointRequestSchema)` to create a new message. + */ +export const ActionEndSavepointRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 20); + +/** + * @generated from enum arrow.flight.protocol.sql.ActionEndSavepointRequest.EndSavepoint + */ +export enum ActionEndSavepointRequest_EndSavepoint { + /** + * @generated from enum value: END_SAVEPOINT_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * Release the savepoint. + * + * @generated from enum value: END_SAVEPOINT_RELEASE = 1; + */ + RELEASE = 1, + + /** + * Roll back to a savepoint. + * + * @generated from enum value: END_SAVEPOINT_ROLLBACK = 2; + */ + ROLLBACK = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.ActionEndSavepointRequest.EndSavepoint. + */ +export const ActionEndSavepointRequest_EndSavepointSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 20, 0); + +/** + * + * Represents a SQL query. Used in the command member of FlightDescriptor + * for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * Fields on this schema may contain the following metadata: + * - ARROW:FLIGHT:SQL:CATALOG_NAME - Table's catalog name + * - ARROW:FLIGHT:SQL:DB_SCHEMA_NAME - Database schema name + * - ARROW:FLIGHT:SQL:TABLE_NAME - Table name + * - ARROW:FLIGHT:SQL:TYPE_NAME - The data source-specific name for the data type of the column. + * - ARROW:FLIGHT:SQL:PRECISION - Column precision/size + * - ARROW:FLIGHT:SQL:SCALE - Column scale/decimal digits if applicable + * - ARROW:FLIGHT:SQL:IS_AUTO_INCREMENT - "1" indicates if the column is auto incremented, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_CASE_SENSITIVE - "1" indicates if the column is case-sensitive, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_READ_ONLY - "1" indicates if the column is read only, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_SEARCHABLE - "1" indicates if the column is searchable via WHERE clause, "0" otherwise. + * - ARROW:FLIGHT:SQL:REMARKS - A comment describing the column. This field has been added after all others, clients should be prepared to find it missing. + * - GetFlightInfo: execute the query. + * + * @generated from message arrow.flight.protocol.sql.CommandStatementQuery + */ +export type CommandStatementQuery = Message<"arrow.flight.protocol.sql.CommandStatementQuery"> & { + /** + * The SQL syntax. + * + * @generated from field: string query = 1; + */ + query: string; + + /** + * Include the query as part of this transaction (if unset, the query is auto-committed). + * + * @generated from field: optional bytes transaction_id = 2; + */ + transactionId?: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandStatementQuery. + * Use `create(CommandStatementQuerySchema)` to create a new message. + */ +export const CommandStatementQuerySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 21); + +/** + * + * Represents a Substrait plan. Used in the command member of FlightDescriptor + * for the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * Fields on this schema may contain the following metadata: + * - ARROW:FLIGHT:SQL:CATALOG_NAME - Table's catalog name + * - ARROW:FLIGHT:SQL:DB_SCHEMA_NAME - Database schema name + * - ARROW:FLIGHT:SQL:TABLE_NAME - Table name + * - ARROW:FLIGHT:SQL:TYPE_NAME - The data source-specific name for the data type of the column. + * - ARROW:FLIGHT:SQL:PRECISION - Column precision/size + * - ARROW:FLIGHT:SQL:SCALE - Column scale/decimal digits if applicable + * - ARROW:FLIGHT:SQL:IS_AUTO_INCREMENT - "1" indicates if the column is auto incremented, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_CASE_SENSITIVE - "1" indicates if the column is case-sensitive, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_READ_ONLY - "1" indicates if the column is read only, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_SEARCHABLE - "1" indicates if the column is searchable via WHERE clause, "0" otherwise. + * - ARROW:FLIGHT:SQL:REMARKS - A comment describing the column. This field has been added after all others, clients should be prepared to find it missing. + * - GetFlightInfo: execute the query. + * - DoPut: execute the query. + * + * @generated from message arrow.flight.protocol.sql.CommandStatementSubstraitPlan + */ +export type CommandStatementSubstraitPlan = Message<"arrow.flight.protocol.sql.CommandStatementSubstraitPlan"> & { + /** + * A serialized substrait.Plan + * + * @generated from field: arrow.flight.protocol.sql.SubstraitPlan plan = 1; + */ + plan?: SubstraitPlan; + + /** + * Include the query as part of this transaction (if unset, the query is auto-committed). + * + * @generated from field: optional bytes transaction_id = 2; + */ + transactionId?: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandStatementSubstraitPlan. + * Use `create(CommandStatementSubstraitPlanSchema)` to create a new message. + */ +export const CommandStatementSubstraitPlanSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 22); + +/** + * * + * Represents a ticket resulting from GetFlightInfo with a CommandStatementQuery. + * This should be used only once and treated as an opaque value, that is, clients should not attempt to parse this. + * + * @generated from message arrow.flight.protocol.sql.TicketStatementQuery + */ +export type TicketStatementQuery = Message<"arrow.flight.protocol.sql.TicketStatementQuery"> & { + /** + * Unique identifier for the instance of the statement to execute. + * + * @generated from field: bytes statement_handle = 1; + */ + statementHandle: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.TicketStatementQuery. + * Use `create(TicketStatementQuerySchema)` to create a new message. + */ +export const TicketStatementQuerySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 23); + +/** + * + * Represents an instance of executing a prepared statement. Used in the command member of FlightDescriptor for + * the following RPC calls: + * - GetSchema: return the Arrow schema of the query. + * Fields on this schema may contain the following metadata: + * - ARROW:FLIGHT:SQL:CATALOG_NAME - Table's catalog name + * - ARROW:FLIGHT:SQL:DB_SCHEMA_NAME - Database schema name + * - ARROW:FLIGHT:SQL:TABLE_NAME - Table name + * - ARROW:FLIGHT:SQL:TYPE_NAME - The data source-specific name for the data type of the column. + * - ARROW:FLIGHT:SQL:PRECISION - Column precision/size + * - ARROW:FLIGHT:SQL:SCALE - Column scale/decimal digits if applicable + * - ARROW:FLIGHT:SQL:IS_AUTO_INCREMENT - "1" indicates if the column is auto incremented, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_CASE_SENSITIVE - "1" indicates if the column is case-sensitive, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_READ_ONLY - "1" indicates if the column is read only, "0" otherwise. + * - ARROW:FLIGHT:SQL:IS_SEARCHABLE - "1" indicates if the column is searchable via WHERE clause, "0" otherwise. + * - ARROW:FLIGHT:SQL:REMARKS - A comment describing the column. This field has been added after all others, clients should be prepared to find it missing. + * + * If the schema is retrieved after parameter values have been bound with DoPut, then the server should account + * for the parameters when determining the schema. + * - DoPut: bind parameter values. All of the bound parameter sets will be executed as a single atomic execution. + * - GetFlightInfo: execute the prepared statement instance. + * + * @generated from message arrow.flight.protocol.sql.CommandPreparedStatementQuery + */ +export type CommandPreparedStatementQuery = Message<"arrow.flight.protocol.sql.CommandPreparedStatementQuery"> & { + /** + * Opaque handle for the prepared statement on the server. + * + * @generated from field: bytes prepared_statement_handle = 1; + */ + preparedStatementHandle: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandPreparedStatementQuery. + * Use `create(CommandPreparedStatementQuerySchema)` to create a new message. + */ +export const CommandPreparedStatementQuerySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 24); + +/** + * + * Represents a SQL update query. Used in the command member of FlightDescriptor + * for the RPC call DoPut to cause the server to execute the included SQL update. + * + * @generated from message arrow.flight.protocol.sql.CommandStatementUpdate + */ +export type CommandStatementUpdate = Message<"arrow.flight.protocol.sql.CommandStatementUpdate"> & { + /** + * The SQL syntax. + * + * @generated from field: string query = 1; + */ + query: string; + + /** + * Include the query as part of this transaction (if unset, the query is auto-committed). + * + * @generated from field: optional bytes transaction_id = 2; + */ + transactionId?: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandStatementUpdate. + * Use `create(CommandStatementUpdateSchema)` to create a new message. + */ +export const CommandStatementUpdateSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 25); + +/** + * + * Represents a SQL update query. Used in the command member of FlightDescriptor + * for the RPC call DoPut to cause the server to execute the included + * prepared statement handle as an update. + * + * @generated from message arrow.flight.protocol.sql.CommandPreparedStatementUpdate + */ +export type CommandPreparedStatementUpdate = Message<"arrow.flight.protocol.sql.CommandPreparedStatementUpdate"> & { + /** + * Opaque handle for the prepared statement on the server. + * + * @generated from field: bytes prepared_statement_handle = 1; + */ + preparedStatementHandle: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandPreparedStatementUpdate. + * Use `create(CommandPreparedStatementUpdateSchema)` to create a new message. + */ +export const CommandPreparedStatementUpdateSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 26); + +/** + * + * Represents a bulk ingestion request. Used in the command member of FlightDescriptor + * for the the RPC call DoPut to cause the server load the contents of the stream's + * FlightData into the target destination. + * + * @generated from message arrow.flight.protocol.sql.CommandStatementIngest + */ +export type CommandStatementIngest = Message<"arrow.flight.protocol.sql.CommandStatementIngest"> & { + /** + * The behavior for handling the table definition. + * + * @generated from field: arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions table_definition_options = 1; + */ + tableDefinitionOptions?: CommandStatementIngest_TableDefinitionOptions; + + /** + * The table to load data into. + * + * @generated from field: string table = 2; + */ + table: string; + + /** + * The db_schema of the destination table to load data into. If unset, a backend-specific default may be used. + * + * @generated from field: optional string schema = 3; + */ + schema?: string; + + /** + * The catalog of the destination table to load data into. If unset, a backend-specific default may be used. + * + * @generated from field: optional string catalog = 4; + */ + catalog?: string; + + /** + * + * Store ingested data in a temporary table. + * The effect of setting temporary is to place the table in a backend-defined namespace, and to drop the table at the end of the session. + * The namespacing may make use of a backend-specific schema and/or catalog. + * The server should return an error if an explicit choice of schema or catalog is incompatible with the server's namespacing decision. + * + * @generated from field: bool temporary = 5; + */ + temporary: boolean; + + /** + * Perform the ingestion as part of this transaction. If specified, results should not be committed in the event of an error/cancellation. + * + * @generated from field: optional bytes transaction_id = 6; + */ + transactionId?: Uint8Array; + + /** + * Backend-specific options. + * + * @generated from field: map options = 1000; + */ + options: { [key: string]: string }; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandStatementIngest. + * Use `create(CommandStatementIngestSchema)` to create a new message. + */ +export const CommandStatementIngestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 27); + +/** + * Options for table definition behavior + * + * @generated from message arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions + */ +export type CommandStatementIngest_TableDefinitionOptions = Message<"arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions"> & { + /** + * @generated from field: arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions.TableNotExistOption if_not_exist = 1; + */ + ifNotExist: CommandStatementIngest_TableDefinitionOptions_TableNotExistOption; + + /** + * @generated from field: arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions.TableExistsOption if_exists = 2; + */ + ifExists: CommandStatementIngest_TableDefinitionOptions_TableExistsOption; +}; + +/** + * Describes the message arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions. + * Use `create(CommandStatementIngest_TableDefinitionOptionsSchema)` to create a new message. + */ +export const CommandStatementIngest_TableDefinitionOptionsSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 27, 0); + +/** + * The action to take if the target table does not exist + * + * @generated from enum arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions.TableNotExistOption + */ +export enum CommandStatementIngest_TableDefinitionOptions_TableNotExistOption { + /** + * Do not use. Servers should error if this is specified by a client. + * + * @generated from enum value: TABLE_NOT_EXIST_OPTION_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * Create the table if it does not exist + * + * @generated from enum value: TABLE_NOT_EXIST_OPTION_CREATE = 1; + */ + CREATE = 1, + + /** + * Fail if the table does not exist + * + * @generated from enum value: TABLE_NOT_EXIST_OPTION_FAIL = 2; + */ + FAIL = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions.TableNotExistOption. + */ +export const CommandStatementIngest_TableDefinitionOptions_TableNotExistOptionSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 27, 0, 0); + +/** + * The action to take if the target table already exists + * + * @generated from enum arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions.TableExistsOption + */ +export enum CommandStatementIngest_TableDefinitionOptions_TableExistsOption { + /** + * Do not use. Servers should error if this is specified by a client. + * + * @generated from enum value: TABLE_EXISTS_OPTION_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * Fail if the table already exists + * + * @generated from enum value: TABLE_EXISTS_OPTION_FAIL = 1; + */ + FAIL = 1, + + /** + * Append to the table if it already exists + * + * @generated from enum value: TABLE_EXISTS_OPTION_APPEND = 2; + */ + APPEND = 2, + + /** + * Drop and recreate the table if it already exists + * + * @generated from enum value: TABLE_EXISTS_OPTION_REPLACE = 3; + */ + REPLACE = 3, +} + +/** + * Describes the enum arrow.flight.protocol.sql.CommandStatementIngest.TableDefinitionOptions.TableExistsOption. + */ +export const CommandStatementIngest_TableDefinitionOptions_TableExistsOptionSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 27, 0, 1); + +/** + * + * Returned from the RPC call DoPut when a CommandStatementUpdate, + * CommandPreparedStatementUpdate, or CommandStatementIngest was + * in the request, containing results from the update. + * + * @generated from message arrow.flight.protocol.sql.DoPutUpdateResult + */ +export type DoPutUpdateResult = Message<"arrow.flight.protocol.sql.DoPutUpdateResult"> & { + /** + * The number of records updated. A return value of -1 represents + * an unknown updated record count. + * + * @generated from field: int64 record_count = 1; + */ + recordCount: bigint; +}; + +/** + * Describes the message arrow.flight.protocol.sql.DoPutUpdateResult. + * Use `create(DoPutUpdateResultSchema)` to create a new message. + */ +export const DoPutUpdateResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 28); + +/** + * An *optional* response returned when `DoPut` is called with `CommandPreparedStatementQuery`. + * + * *Note on legacy behavior*: previous versions of the protocol did not return any result for + * this command, and that behavior should still be supported by clients. In that case, the client + * can continue as though the fields in this message were not provided or set to sensible default values. + * + * @generated from message arrow.flight.protocol.sql.DoPutPreparedStatementResult + */ +export type DoPutPreparedStatementResult = Message<"arrow.flight.protocol.sql.DoPutPreparedStatementResult"> & { + /** + * Represents a (potentially updated) opaque handle for the prepared statement on the server. + * Because the handle could potentially be updated, any previous handles for this prepared + * statement should be considered invalid, and all subsequent requests for this prepared + * statement must use this new handle. + * The updated handle allows implementing query parameters with stateless services. + * + * When an updated handle is not provided by the server, clients should continue + * using the previous handle provided by `ActionCreatePreparedStatementResonse`. + * + * @generated from field: optional bytes prepared_statement_handle = 1; + */ + preparedStatementHandle?: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.DoPutPreparedStatementResult. + * Use `create(DoPutPreparedStatementResultSchema)` to create a new message. + */ +export const DoPutPreparedStatementResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 29); + +/** + * + * Request message for the "CancelQuery" action. + * + * Explicitly cancel a running query. + * + * This lets a single client explicitly cancel work, no matter how many clients + * are involved/whether the query is distributed or not, given server support. + * The transaction/statement is not rolled back; it is the application's job to + * commit or rollback as appropriate. This only indicates the client no longer + * wishes to read the remainder of the query results or continue submitting + * data. + * + * This command is idempotent. + * + * This command is deprecated since 13.0.0. Use the "CancelFlightInfo" + * action with DoAction instead. + * + * @generated from message arrow.flight.protocol.sql.ActionCancelQueryRequest + * @deprecated + */ +export type ActionCancelQueryRequest = Message<"arrow.flight.protocol.sql.ActionCancelQueryRequest"> & { + /** + * The result of the GetFlightInfo RPC that initiated the query. + * XXX(ARROW-16902): this must be a serialized FlightInfo, but is + * rendered as bytes because Protobuf does not really support one + * DLL using Protobuf definitions from another DLL. + * + * @generated from field: bytes info = 1; + */ + info: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionCancelQueryRequest. + * Use `create(ActionCancelQueryRequestSchema)` to create a new message. + * @deprecated + */ +export const ActionCancelQueryRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 30); + +/** + * + * The result of cancelling a query. + * + * The result should be wrapped in a google.protobuf.Any message. + * + * This command is deprecated since 13.0.0. Use the "CancelFlightInfo" + * action with DoAction instead. + * + * @generated from message arrow.flight.protocol.sql.ActionCancelQueryResult + * @deprecated + */ +export type ActionCancelQueryResult = Message<"arrow.flight.protocol.sql.ActionCancelQueryResult"> & { + /** + * @generated from field: arrow.flight.protocol.sql.ActionCancelQueryResult.CancelResult result = 1; + */ + result: ActionCancelQueryResult_CancelResult; +}; + +/** + * Describes the message arrow.flight.protocol.sql.ActionCancelQueryResult. + * Use `create(ActionCancelQueryResultSchema)` to create a new message. + * @deprecated + */ +export const ActionCancelQueryResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_FlightSql, 31); + +/** + * @generated from enum arrow.flight.protocol.sql.ActionCancelQueryResult.CancelResult + * @deprecated + */ +export enum ActionCancelQueryResult_CancelResult { + /** + * The cancellation status is unknown. Servers should avoid using + * this value (send a NOT_FOUND error if the requested query is + * not known). Clients can retry the request. + * + * @generated from enum value: CANCEL_RESULT_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * The cancellation request is complete. Subsequent requests with + * the same payload may return CANCELLED or a NOT_FOUND error. + * + * @generated from enum value: CANCEL_RESULT_CANCELLED = 1; + */ + CANCELLED = 1, + + /** + * The cancellation request is in progress. The client may retry + * the cancellation request. + * + * @generated from enum value: CANCEL_RESULT_CANCELLING = 2; + */ + CANCELLING = 2, + + /** + * The query is not cancellable. The client should not retry the + * cancellation request. + * + * @generated from enum value: CANCEL_RESULT_NOT_CANCELLABLE = 3; + */ + NOT_CANCELLABLE = 3, +} + +/** + * Describes the enum arrow.flight.protocol.sql.ActionCancelQueryResult.CancelResult. + * @deprecated + */ +export const ActionCancelQueryResult_CancelResultSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 31, 0); + +/** + * Options for CommandGetSqlInfo. + * + * @generated from enum arrow.flight.protocol.sql.SqlInfo + */ +export enum SqlInfo { + /** + * Retrieves a UTF-8 string with the name of the Flight SQL Server. + * + * @generated from enum value: FLIGHT_SQL_SERVER_NAME = 0; + */ + FLIGHT_SQL_SERVER_NAME = 0, + + /** + * Retrieves a UTF-8 string with the native version of the Flight SQL Server. + * + * @generated from enum value: FLIGHT_SQL_SERVER_VERSION = 1; + */ + FLIGHT_SQL_SERVER_VERSION = 1, + + /** + * Retrieves a UTF-8 string with the Arrow format version of the Flight SQL Server. + * + * @generated from enum value: FLIGHT_SQL_SERVER_ARROW_VERSION = 2; + */ + FLIGHT_SQL_SERVER_ARROW_VERSION = 2, + + /** + * + * Retrieves a boolean value indicating whether the Flight SQL Server is read only. + * + * Returns: + * - false: if read-write + * - true: if read only + * + * @generated from enum value: FLIGHT_SQL_SERVER_READ_ONLY = 3; + */ + FLIGHT_SQL_SERVER_READ_ONLY = 3, + + /** + * + * Retrieves a boolean value indicating whether the Flight SQL Server supports executing + * SQL queries. + * + * Note that the absence of this info (as opposed to a false value) does not necessarily + * mean that SQL is not supported, as this property was not originally defined. + * + * @generated from enum value: FLIGHT_SQL_SERVER_SQL = 4; + */ + FLIGHT_SQL_SERVER_SQL = 4, + + /** + * + * Retrieves a boolean value indicating whether the Flight SQL Server supports executing + * Substrait plans. + * + * @generated from enum value: FLIGHT_SQL_SERVER_SUBSTRAIT = 5; + */ + FLIGHT_SQL_SERVER_SUBSTRAIT = 5, + + /** + * + * Retrieves a string value indicating the minimum supported Substrait version, or null + * if Substrait is not supported. + * + * @generated from enum value: FLIGHT_SQL_SERVER_SUBSTRAIT_MIN_VERSION = 6; + */ + FLIGHT_SQL_SERVER_SUBSTRAIT_MIN_VERSION = 6, + + /** + * + * Retrieves a string value indicating the maximum supported Substrait version, or null + * if Substrait is not supported. + * + * @generated from enum value: FLIGHT_SQL_SERVER_SUBSTRAIT_MAX_VERSION = 7; + */ + FLIGHT_SQL_SERVER_SUBSTRAIT_MAX_VERSION = 7, + + /** + * + * Retrieves an int32 indicating whether the Flight SQL Server supports the + * BeginTransaction/EndTransaction/BeginSavepoint/EndSavepoint actions. + * + * Even if this is not supported, the database may still support explicit "BEGIN + * TRANSACTION"/"COMMIT" SQL statements (see SQL_TRANSACTIONS_SUPPORTED); this property + * is only about whether the server implements the Flight SQL API endpoints. + * + * The possible values are listed in `SqlSupportedTransaction`. + * + * @generated from enum value: FLIGHT_SQL_SERVER_TRANSACTION = 8; + */ + FLIGHT_SQL_SERVER_TRANSACTION = 8, + + /** + * + * Retrieves a boolean value indicating whether the Flight SQL Server supports explicit + * query cancellation (the CancelQuery action). + * + * @generated from enum value: FLIGHT_SQL_SERVER_CANCEL = 9; + */ + FLIGHT_SQL_SERVER_CANCEL = 9, + + /** + * + * Retrieves a boolean value indicating whether the Flight SQL Server supports executing + * bulk ingestion. + * + * @generated from enum value: FLIGHT_SQL_SERVER_BULK_INGESTION = 10; + */ + FLIGHT_SQL_SERVER_BULK_INGESTION = 10, + + /** + * + * Retrieves a boolean value indicating whether transactions are supported for bulk ingestion. If not, invoking + * the method commit in the context of a bulk ingestion is a noop, and the isolation level is + * `arrow.flight.protocol.sql.SqlTransactionIsolationLevel.TRANSACTION_NONE`. + * + * Returns: + * - false: if bulk ingestion transactions are unsupported; + * - true: if bulk ingestion transactions are supported. + * + * @generated from enum value: FLIGHT_SQL_SERVER_INGEST_TRANSACTIONS_SUPPORTED = 11; + */ + FLIGHT_SQL_SERVER_INGEST_TRANSACTIONS_SUPPORTED = 11, + + /** + * + * Retrieves an int32 indicating the timeout (in milliseconds) for prepared statement handles. + * + * If 0, there is no timeout. Servers should reset the timeout when the handle is used in a command. + * + * @generated from enum value: FLIGHT_SQL_SERVER_STATEMENT_TIMEOUT = 100; + */ + FLIGHT_SQL_SERVER_STATEMENT_TIMEOUT = 100, + + /** + * + * Retrieves an int32 indicating the timeout (in milliseconds) for transactions, since transactions are not tied to a connection. + * + * If 0, there is no timeout. Servers should reset the timeout when the handle is used in a command. + * + * @generated from enum value: FLIGHT_SQL_SERVER_TRANSACTION_TIMEOUT = 101; + */ + FLIGHT_SQL_SERVER_TRANSACTION_TIMEOUT = 101, + + /** + * + * Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of catalogs. + * + * Returns: + * - false: if it doesn't support CREATE and DROP of catalogs. + * - true: if it supports CREATE and DROP of catalogs. + * + * @generated from enum value: SQL_DDL_CATALOG = 500; + */ + SQL_DDL_CATALOG = 500, + + /** + * + * Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of schemas. + * + * Returns: + * - false: if it doesn't support CREATE and DROP of schemas. + * - true: if it supports CREATE and DROP of schemas. + * + * @generated from enum value: SQL_DDL_SCHEMA = 501; + */ + SQL_DDL_SCHEMA = 501, + + /** + * + * Indicates whether the Flight SQL Server supports CREATE and DROP of tables. + * + * Returns: + * - false: if it doesn't support CREATE and DROP of tables. + * - true: if it supports CREATE and DROP of tables. + * + * @generated from enum value: SQL_DDL_TABLE = 502; + */ + SQL_DDL_TABLE = 502, + + /** + * + * Retrieves a int32 ordinal representing the case sensitivity of catalog, table, schema and table names. + * + * The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. + * + * @generated from enum value: SQL_IDENTIFIER_CASE = 503; + */ + SQL_IDENTIFIER_CASE = 503, + + /** + * Retrieves a UTF-8 string with the supported character(s) used to surround a delimited identifier. + * + * @generated from enum value: SQL_IDENTIFIER_QUOTE_CHAR = 504; + */ + SQL_IDENTIFIER_QUOTE_CHAR = 504, + + /** + * + * Retrieves a int32 describing the case sensitivity of quoted identifiers. + * + * The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. + * + * @generated from enum value: SQL_QUOTED_IDENTIFIER_CASE = 505; + */ + SQL_QUOTED_IDENTIFIER_CASE = 505, + + /** + * + * Retrieves a boolean value indicating whether all tables are selectable. + * + * Returns: + * - false: if not all tables are selectable or if none are; + * - true: if all tables are selectable. + * + * @generated from enum value: SQL_ALL_TABLES_ARE_SELECTABLE = 506; + */ + SQL_ALL_TABLES_ARE_SELECTABLE = 506, + + /** + * + * Retrieves the null ordering. + * + * Returns a int32 ordinal for the null ordering being used, as described in + * `arrow.flight.protocol.sql.SqlNullOrdering`. + * + * @generated from enum value: SQL_NULL_ORDERING = 507; + */ + SQL_NULL_ORDERING = 507, + + /** + * Retrieves a UTF-8 string list with values of the supported keywords. + * + * @generated from enum value: SQL_KEYWORDS = 508; + */ + SQL_KEYWORDS = 508, + + /** + * Retrieves a UTF-8 string list with values of the supported numeric functions. + * + * @generated from enum value: SQL_NUMERIC_FUNCTIONS = 509; + */ + SQL_NUMERIC_FUNCTIONS = 509, + + /** + * Retrieves a UTF-8 string list with values of the supported string functions. + * + * @generated from enum value: SQL_STRING_FUNCTIONS = 510; + */ + SQL_STRING_FUNCTIONS = 510, + + /** + * Retrieves a UTF-8 string list with values of the supported system functions. + * + * @generated from enum value: SQL_SYSTEM_FUNCTIONS = 511; + */ + SQL_SYSTEM_FUNCTIONS = 511, + + /** + * Retrieves a UTF-8 string list with values of the supported datetime functions. + * + * @generated from enum value: SQL_DATETIME_FUNCTIONS = 512; + */ + SQL_DATETIME_FUNCTIONS = 512, + + /** + * + * Retrieves the UTF-8 string that can be used to escape wildcard characters. + * This is the string that can be used to escape '_' or '%' in the catalog search parameters that are a pattern + * (and therefore use one of the wildcard characters). + * The '_' character represents any single character; the '%' character represents any sequence of zero or more + * characters. + * + * @generated from enum value: SQL_SEARCH_STRING_ESCAPE = 513; + */ + SQL_SEARCH_STRING_ESCAPE = 513, + + /** + * + * Retrieves a UTF-8 string with all the "extra" characters that can be used in unquoted identifier names + * (those beyond a-z, A-Z, 0-9 and _). + * + * @generated from enum value: SQL_EXTRA_NAME_CHARACTERS = 514; + */ + SQL_EXTRA_NAME_CHARACTERS = 514, + + /** + * + * Retrieves a boolean value indicating whether column aliasing is supported. + * If so, the SQL AS clause can be used to provide names for computed columns or to provide alias names for columns + * as required. + * + * Returns: + * - false: if column aliasing is unsupported; + * - true: if column aliasing is supported. + * + * @generated from enum value: SQL_SUPPORTS_COLUMN_ALIASING = 515; + */ + SQL_SUPPORTS_COLUMN_ALIASING = 515, + + /** + * + * Retrieves a boolean value indicating whether concatenations between null and non-null values being + * null are supported. + * + * - Returns: + * - false: if concatenations between null and non-null values being null are unsupported; + * - true: if concatenations between null and non-null values being null are supported. + * + * @generated from enum value: SQL_NULL_PLUS_NULL_IS_NULL = 516; + */ + SQL_NULL_PLUS_NULL_IS_NULL = 516, + + /** + * + * Retrieves a map where the key is the type to convert from and the value is a list with the types to convert to, + * indicating the supported conversions. Each key and each item on the list value is a value to a predefined type on + * SqlSupportsConvert enum. + * The returned map will be: map> + * + * @generated from enum value: SQL_SUPPORTS_CONVERT = 517; + */ + SQL_SUPPORTS_CONVERT = 517, + + /** + * + * Retrieves a boolean value indicating whether, when table correlation names are supported, + * they are restricted to being different from the names of the tables. + * + * Returns: + * - false: if table correlation names are unsupported; + * - true: if table correlation names are supported. + * + * @generated from enum value: SQL_SUPPORTS_TABLE_CORRELATION_NAMES = 518; + */ + SQL_SUPPORTS_TABLE_CORRELATION_NAMES = 518, + + /** + * + * Retrieves a boolean value indicating whether, when table correlation names are supported, + * they are restricted to being different from the names of the tables. + * + * Returns: + * - false: if different table correlation names are unsupported; + * - true: if different table correlation names are supported + * + * @generated from enum value: SQL_SUPPORTS_DIFFERENT_TABLE_CORRELATION_NAMES = 519; + */ + SQL_SUPPORTS_DIFFERENT_TABLE_CORRELATION_NAMES = 519, + + /** + * + * Retrieves a boolean value indicating whether expressions in ORDER BY lists are supported. + * + * Returns: + * - false: if expressions in ORDER BY are unsupported; + * - true: if expressions in ORDER BY are supported; + * + * @generated from enum value: SQL_SUPPORTS_EXPRESSIONS_IN_ORDER_BY = 520; + */ + SQL_SUPPORTS_EXPRESSIONS_IN_ORDER_BY = 520, + + /** + * + * Retrieves a boolean value indicating whether using a column that is not in the SELECT statement in a GROUP BY + * clause is supported. + * + * Returns: + * - false: if using a column that is not in the SELECT statement in a GROUP BY clause is unsupported; + * - true: if using a column that is not in the SELECT statement in a GROUP BY clause is supported. + * + * @generated from enum value: SQL_SUPPORTS_ORDER_BY_UNRELATED = 521; + */ + SQL_SUPPORTS_ORDER_BY_UNRELATED = 521, + + /** + * + * Retrieves the supported GROUP BY commands; + * + * Returns an int32 bitmask value representing the supported commands. + * The returned bitmask should be parsed in order to retrieve the supported commands. + * + * For instance: + * - return 0 (\b0) => [] (GROUP BY is unsupported); + * - return 1 (\b1) => [SQL_GROUP_BY_UNRELATED]; + * - return 2 (\b10) => [SQL_GROUP_BY_BEYOND_SELECT]; + * - return 3 (\b11) => [SQL_GROUP_BY_UNRELATED, SQL_GROUP_BY_BEYOND_SELECT]. + * Valid GROUP BY types are described under `arrow.flight.protocol.sql.SqlSupportedGroupBy`. + * + * @generated from enum value: SQL_SUPPORTED_GROUP_BY = 522; + */ + SQL_SUPPORTED_GROUP_BY = 522, + + /** + * + * Retrieves a boolean value indicating whether specifying a LIKE escape clause is supported. + * + * Returns: + * - false: if specifying a LIKE escape clause is unsupported; + * - true: if specifying a LIKE escape clause is supported. + * + * @generated from enum value: SQL_SUPPORTS_LIKE_ESCAPE_CLAUSE = 523; + */ + SQL_SUPPORTS_LIKE_ESCAPE_CLAUSE = 523, + + /** + * + * Retrieves a boolean value indicating whether columns may be defined as non-nullable. + * + * Returns: + * - false: if columns cannot be defined as non-nullable; + * - true: if columns may be defined as non-nullable. + * + * @generated from enum value: SQL_SUPPORTS_NON_NULLABLE_COLUMNS = 524; + */ + SQL_SUPPORTS_NON_NULLABLE_COLUMNS = 524, + + /** + * + * Retrieves the supported SQL grammar level as per the ODBC specification. + * + * Returns an int32 bitmask value representing the supported SQL grammar level. + * The returned bitmask should be parsed in order to retrieve the supported grammar levels. + * + * For instance: + * - return 0 (\b0) => [] (SQL grammar is unsupported); + * - return 1 (\b1) => [SQL_MINIMUM_GRAMMAR]; + * - return 2 (\b10) => [SQL_CORE_GRAMMAR]; + * - return 3 (\b11) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR]; + * - return 4 (\b100) => [SQL_EXTENDED_GRAMMAR]; + * - return 5 (\b101) => [SQL_MINIMUM_GRAMMAR, SQL_EXTENDED_GRAMMAR]; + * - return 6 (\b110) => [SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]; + * - return 7 (\b111) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]. + * Valid SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedSqlGrammar`. + * + * @generated from enum value: SQL_SUPPORTED_GRAMMAR = 525; + */ + SQL_SUPPORTED_GRAMMAR = 525, + + /** + * + * Retrieves the supported ANSI92 SQL grammar level. + * + * Returns an int32 bitmask value representing the supported ANSI92 SQL grammar level. + * The returned bitmask should be parsed in order to retrieve the supported commands. + * + * For instance: + * - return 0 (\b0) => [] (ANSI92 SQL grammar is unsupported); + * - return 1 (\b1) => [ANSI92_ENTRY_SQL]; + * - return 2 (\b10) => [ANSI92_INTERMEDIATE_SQL]; + * - return 3 (\b11) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL]; + * - return 4 (\b100) => [ANSI92_FULL_SQL]; + * - return 5 (\b101) => [ANSI92_ENTRY_SQL, ANSI92_FULL_SQL]; + * - return 6 (\b110) => [ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]; + * - return 7 (\b111) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]. + * Valid ANSI92 SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedAnsi92SqlGrammarLevel`. + * + * @generated from enum value: SQL_ANSI92_SUPPORTED_LEVEL = 526; + */ + SQL_ANSI92_SUPPORTED_LEVEL = 526, + + /** + * + * Retrieves a boolean value indicating whether the SQL Integrity Enhancement Facility is supported. + * + * Returns: + * - false: if the SQL Integrity Enhancement Facility is supported; + * - true: if the SQL Integrity Enhancement Facility is supported. + * + * @generated from enum value: SQL_SUPPORTS_INTEGRITY_ENHANCEMENT_FACILITY = 527; + */ + SQL_SUPPORTS_INTEGRITY_ENHANCEMENT_FACILITY = 527, + + /** + * + * Retrieves the support level for SQL OUTER JOINs. + * + * Returns a int32 ordinal for the SQL ordering being used, as described in + * `arrow.flight.protocol.sql.SqlOuterJoinsSupportLevel`. + * + * @generated from enum value: SQL_OUTER_JOINS_SUPPORT_LEVEL = 528; + */ + SQL_OUTER_JOINS_SUPPORT_LEVEL = 528, + + /** + * Retrieves a UTF-8 string with the preferred term for "schema". + * + * @generated from enum value: SQL_SCHEMA_TERM = 529; + */ + SQL_SCHEMA_TERM = 529, + + /** + * Retrieves a UTF-8 string with the preferred term for "procedure". + * + * @generated from enum value: SQL_PROCEDURE_TERM = 530; + */ + SQL_PROCEDURE_TERM = 530, + + /** + * + * Retrieves a UTF-8 string with the preferred term for "catalog". + * If a empty string is returned its assumed that the server does NOT supports catalogs. + * + * @generated from enum value: SQL_CATALOG_TERM = 531; + */ + SQL_CATALOG_TERM = 531, + + /** + * + * Retrieves a boolean value indicating whether a catalog appears at the start of a fully qualified table name. + * + * - false: if a catalog does not appear at the start of a fully qualified table name; + * - true: if a catalog appears at the start of a fully qualified table name. + * + * @generated from enum value: SQL_CATALOG_AT_START = 532; + */ + SQL_CATALOG_AT_START = 532, + + /** + * + * Retrieves the supported actions for a SQL schema. + * + * Returns an int32 bitmask value representing the supported actions for a SQL schema. + * The returned bitmask should be parsed in order to retrieve the supported actions for a SQL schema. + * + * For instance: + * - return 0 (\b0) => [] (no supported actions for SQL schema); + * - return 1 (\b1) => [SQL_ELEMENT_IN_PROCEDURE_CALLS]; + * - return 2 (\b10) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + * - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + * - return 4 (\b100) => [SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + * - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + * - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + * - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. + * Valid actions for a SQL schema described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. + * + * @generated from enum value: SQL_SCHEMAS_SUPPORTED_ACTIONS = 533; + */ + SQL_SCHEMAS_SUPPORTED_ACTIONS = 533, + + /** + * + * Retrieves the supported actions for a SQL schema. + * + * Returns an int32 bitmask value representing the supported actions for a SQL catalog. + * The returned bitmask should be parsed in order to retrieve the supported actions for a SQL catalog. + * + * For instance: + * - return 0 (\b0) => [] (no supported actions for SQL catalog); + * - return 1 (\b1) => [SQL_ELEMENT_IN_PROCEDURE_CALLS]; + * - return 2 (\b10) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + * - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + * - return 4 (\b100) => [SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + * - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + * - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + * - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. + * Valid actions for a SQL catalog are described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. + * + * @generated from enum value: SQL_CATALOGS_SUPPORTED_ACTIONS = 534; + */ + SQL_CATALOGS_SUPPORTED_ACTIONS = 534, + + /** + * + * Retrieves the supported SQL positioned commands. + * + * Returns an int32 bitmask value representing the supported SQL positioned commands. + * The returned bitmask should be parsed in order to retrieve the supported SQL positioned commands. + * + * For instance: + * - return 0 (\b0) => [] (no supported SQL positioned commands); + * - return 1 (\b1) => [SQL_POSITIONED_DELETE]; + * - return 2 (\b10) => [SQL_POSITIONED_UPDATE]; + * - return 3 (\b11) => [SQL_POSITIONED_DELETE, SQL_POSITIONED_UPDATE]. + * Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedPositionedCommands`. + * + * @generated from enum value: SQL_SUPPORTED_POSITIONED_COMMANDS = 535; + */ + SQL_SUPPORTED_POSITIONED_COMMANDS = 535, + + /** + * + * Retrieves a boolean value indicating whether SELECT FOR UPDATE statements are supported. + * + * Returns: + * - false: if SELECT FOR UPDATE statements are unsupported; + * - true: if SELECT FOR UPDATE statements are supported. + * + * @generated from enum value: SQL_SELECT_FOR_UPDATE_SUPPORTED = 536; + */ + SQL_SELECT_FOR_UPDATE_SUPPORTED = 536, + + /** + * + * Retrieves a boolean value indicating whether stored procedure calls that use the stored procedure escape syntax + * are supported. + * + * Returns: + * - false: if stored procedure calls that use the stored procedure escape syntax are unsupported; + * - true: if stored procedure calls that use the stored procedure escape syntax are supported. + * + * @generated from enum value: SQL_STORED_PROCEDURES_SUPPORTED = 537; + */ + SQL_STORED_PROCEDURES_SUPPORTED = 537, + + /** + * + * Retrieves the supported SQL subqueries. + * + * Returns an int32 bitmask value representing the supported SQL subqueries. + * The returned bitmask should be parsed in order to retrieve the supported SQL subqueries. + * + * For instance: + * - return 0 (\b0) => [] (no supported SQL subqueries); + * - return 1 (\b1) => [SQL_SUBQUERIES_IN_COMPARISONS]; + * - return 2 (\b10) => [SQL_SUBQUERIES_IN_EXISTS]; + * - return 3 (\b11) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS]; + * - return 4 (\b100) => [SQL_SUBQUERIES_IN_INS]; + * - return 5 (\b101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS]; + * - return 6 (\b110) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_EXISTS]; + * - return 7 (\b111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS]; + * - return 8 (\b1000) => [SQL_SUBQUERIES_IN_QUANTIFIEDS]; + * - return 9 (\b1001) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + * - return 10 (\b1010) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + * - return 11 (\b1011) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + * - return 12 (\b1100) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + * - return 13 (\b1101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + * - return 14 (\b1110) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + * - return 15 (\b1111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + * - ... + * Valid SQL subqueries are described under `arrow.flight.protocol.sql.SqlSupportedSubqueries`. + * + * @generated from enum value: SQL_SUPPORTED_SUBQUERIES = 538; + */ + SQL_SUPPORTED_SUBQUERIES = 538, + + /** + * + * Retrieves a boolean value indicating whether correlated subqueries are supported. + * + * Returns: + * - false: if correlated subqueries are unsupported; + * - true: if correlated subqueries are supported. + * + * @generated from enum value: SQL_CORRELATED_SUBQUERIES_SUPPORTED = 539; + */ + SQL_CORRELATED_SUBQUERIES_SUPPORTED = 539, + + /** + * + * Retrieves the supported SQL UNIONs. + * + * Returns an int32 bitmask value representing the supported SQL UNIONs. + * The returned bitmask should be parsed in order to retrieve the supported SQL UNIONs. + * + * For instance: + * - return 0 (\b0) => [] (no supported SQL positioned commands); + * - return 1 (\b1) => [SQL_UNION]; + * - return 2 (\b10) => [SQL_UNION_ALL]; + * - return 3 (\b11) => [SQL_UNION, SQL_UNION_ALL]. + * Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedUnions`. + * + * @generated from enum value: SQL_SUPPORTED_UNIONS = 540; + */ + SQL_SUPPORTED_UNIONS = 540, + + /** + * Retrieves a int64 value representing the maximum number of hex characters allowed in an inline binary literal. + * + * @generated from enum value: SQL_MAX_BINARY_LITERAL_LENGTH = 541; + */ + SQL_MAX_BINARY_LITERAL_LENGTH = 541, + + /** + * Retrieves a int64 value representing the maximum number of characters allowed for a character literal. + * + * @generated from enum value: SQL_MAX_CHAR_LITERAL_LENGTH = 542; + */ + SQL_MAX_CHAR_LITERAL_LENGTH = 542, + + /** + * Retrieves a int64 value representing the maximum number of characters allowed for a column name. + * + * @generated from enum value: SQL_MAX_COLUMN_NAME_LENGTH = 543; + */ + SQL_MAX_COLUMN_NAME_LENGTH = 543, + + /** + * Retrieves a int64 value representing the maximum number of columns allowed in a GROUP BY clause. + * + * @generated from enum value: SQL_MAX_COLUMNS_IN_GROUP_BY = 544; + */ + SQL_MAX_COLUMNS_IN_GROUP_BY = 544, + + /** + * Retrieves a int64 value representing the maximum number of columns allowed in an index. + * + * @generated from enum value: SQL_MAX_COLUMNS_IN_INDEX = 545; + */ + SQL_MAX_COLUMNS_IN_INDEX = 545, + + /** + * Retrieves a int64 value representing the maximum number of columns allowed in an ORDER BY clause. + * + * @generated from enum value: SQL_MAX_COLUMNS_IN_ORDER_BY = 546; + */ + SQL_MAX_COLUMNS_IN_ORDER_BY = 546, + + /** + * Retrieves a int64 value representing the maximum number of columns allowed in a SELECT list. + * + * @generated from enum value: SQL_MAX_COLUMNS_IN_SELECT = 547; + */ + SQL_MAX_COLUMNS_IN_SELECT = 547, + + /** + * Retrieves a int64 value representing the maximum number of columns allowed in a table. + * + * @generated from enum value: SQL_MAX_COLUMNS_IN_TABLE = 548; + */ + SQL_MAX_COLUMNS_IN_TABLE = 548, + + /** + * Retrieves a int64 value representing the maximum number of concurrent connections possible. + * + * @generated from enum value: SQL_MAX_CONNECTIONS = 549; + */ + SQL_MAX_CONNECTIONS = 549, + + /** + * Retrieves a int64 value the maximum number of characters allowed in a cursor name. + * + * @generated from enum value: SQL_MAX_CURSOR_NAME_LENGTH = 550; + */ + SQL_MAX_CURSOR_NAME_LENGTH = 550, + + /** + * + * Retrieves a int64 value representing the maximum number of bytes allowed for an index, + * including all of the parts of the index. + * + * @generated from enum value: SQL_MAX_INDEX_LENGTH = 551; + */ + SQL_MAX_INDEX_LENGTH = 551, + + /** + * Retrieves a int64 value representing the maximum number of characters allowed in a schema name. + * + * @generated from enum value: SQL_DB_SCHEMA_NAME_LENGTH = 552; + */ + SQL_DB_SCHEMA_NAME_LENGTH = 552, + + /** + * Retrieves a int64 value representing the maximum number of characters allowed in a procedure name. + * + * @generated from enum value: SQL_MAX_PROCEDURE_NAME_LENGTH = 553; + */ + SQL_MAX_PROCEDURE_NAME_LENGTH = 553, + + /** + * Retrieves a int64 value representing the maximum number of characters allowed in a catalog name. + * + * @generated from enum value: SQL_MAX_CATALOG_NAME_LENGTH = 554; + */ + SQL_MAX_CATALOG_NAME_LENGTH = 554, + + /** + * Retrieves a int64 value representing the maximum number of bytes allowed in a single row. + * + * @generated from enum value: SQL_MAX_ROW_SIZE = 555; + */ + SQL_MAX_ROW_SIZE = 555, + + /** + * + * Retrieves a boolean indicating whether the return value for the JDBC method getMaxRowSize includes the SQL + * data types LONGVARCHAR and LONGVARBINARY. + * + * Returns: + * - false: if return value for the JDBC method getMaxRowSize does + * not include the SQL data types LONGVARCHAR and LONGVARBINARY; + * - true: if return value for the JDBC method getMaxRowSize includes + * the SQL data types LONGVARCHAR and LONGVARBINARY. + * + * @generated from enum value: SQL_MAX_ROW_SIZE_INCLUDES_BLOBS = 556; + */ + SQL_MAX_ROW_SIZE_INCLUDES_BLOBS = 556, + + /** + * + * Retrieves a int64 value representing the maximum number of characters allowed for an SQL statement; + * a result of 0 (zero) means that there is no limit or the limit is not known. + * + * @generated from enum value: SQL_MAX_STATEMENT_LENGTH = 557; + */ + SQL_MAX_STATEMENT_LENGTH = 557, + + /** + * Retrieves a int64 value representing the maximum number of active statements that can be open at the same time. + * + * @generated from enum value: SQL_MAX_STATEMENTS = 558; + */ + SQL_MAX_STATEMENTS = 558, + + /** + * Retrieves a int64 value representing the maximum number of characters allowed in a table name. + * + * @generated from enum value: SQL_MAX_TABLE_NAME_LENGTH = 559; + */ + SQL_MAX_TABLE_NAME_LENGTH = 559, + + /** + * Retrieves a int64 value representing the maximum number of tables allowed in a SELECT statement. + * + * @generated from enum value: SQL_MAX_TABLES_IN_SELECT = 560; + */ + SQL_MAX_TABLES_IN_SELECT = 560, + + /** + * Retrieves a int64 value representing the maximum number of characters allowed in a user name. + * + * @generated from enum value: SQL_MAX_USERNAME_LENGTH = 561; + */ + SQL_MAX_USERNAME_LENGTH = 561, + + /** + * + * Retrieves this database's default transaction isolation level as described in + * `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. + * + * Returns a int32 ordinal for the SQL transaction isolation level. + * + * @generated from enum value: SQL_DEFAULT_TRANSACTION_ISOLATION = 562; + */ + SQL_DEFAULT_TRANSACTION_ISOLATION = 562, + + /** + * + * Retrieves a boolean value indicating whether transactions are supported. If not, invoking the method commit is a + * noop, and the isolation level is `arrow.flight.protocol.sql.SqlTransactionIsolationLevel.TRANSACTION_NONE`. + * + * Returns: + * - false: if transactions are unsupported; + * - true: if transactions are supported. + * + * @generated from enum value: SQL_TRANSACTIONS_SUPPORTED = 563; + */ + SQL_TRANSACTIONS_SUPPORTED = 563, + + /** + * + * Retrieves the supported transactions isolation levels. + * + * Returns an int32 bitmask value representing the supported transactions isolation levels. + * The returned bitmask should be parsed in order to retrieve the supported transactions isolation levels. + * + * For instance: + * - return 0 (\b0) => [] (no supported SQL transactions isolation levels); + * - return 1 (\b1) => [SQL_TRANSACTION_NONE]; + * - return 2 (\b10) => [SQL_TRANSACTION_READ_UNCOMMITTED]; + * - return 3 (\b11) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED]; + * - return 4 (\b100) => [SQL_TRANSACTION_REPEATABLE_READ]; + * - return 5 (\b101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 6 (\b110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 7 (\b111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 8 (\b1000) => [SQL_TRANSACTION_REPEATABLE_READ]; + * - return 9 (\b1001) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 10 (\b1010) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 11 (\b1011) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 12 (\b1100) => [SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 13 (\b1101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 14 (\b1110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 15 (\b1111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + * - return 16 (\b10000) => [SQL_TRANSACTION_SERIALIZABLE]; + * - ... + * Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. + * + * @generated from enum value: SQL_SUPPORTED_TRANSACTIONS_ISOLATION_LEVELS = 564; + */ + SQL_SUPPORTED_TRANSACTIONS_ISOLATION_LEVELS = 564, + + /** + * + * Retrieves a boolean value indicating whether a data definition statement within a transaction forces + * the transaction to commit. + * + * Returns: + * - false: if a data definition statement within a transaction does not force the transaction to commit; + * - true: if a data definition statement within a transaction forces the transaction to commit. + * + * @generated from enum value: SQL_DATA_DEFINITION_CAUSES_TRANSACTION_COMMIT = 565; + */ + SQL_DATA_DEFINITION_CAUSES_TRANSACTION_COMMIT = 565, + + /** + * + * Retrieves a boolean value indicating whether a data definition statement within a transaction is ignored. + * + * Returns: + * - false: if a data definition statement within a transaction is taken into account; + * - true: a data definition statement within a transaction is ignored. + * + * @generated from enum value: SQL_DATA_DEFINITIONS_IN_TRANSACTIONS_IGNORED = 566; + */ + SQL_DATA_DEFINITIONS_IN_TRANSACTIONS_IGNORED = 566, + + /** + * + * Retrieves an int32 bitmask value representing the supported result set types. + * The returned bitmask should be parsed in order to retrieve the supported result set types. + * + * For instance: + * - return 0 (\b0) => [] (no supported result set types); + * - return 1 (\b1) => [SQL_RESULT_SET_TYPE_UNSPECIFIED]; + * - return 2 (\b10) => [SQL_RESULT_SET_TYPE_FORWARD_ONLY]; + * - return 3 (\b11) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY]; + * - return 4 (\b100) => [SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + * - return 5 (\b101) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + * - return 6 (\b110) => [SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + * - return 7 (\b111) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + * - return 8 (\b1000) => [SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE]; + * - ... + * Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetType`. + * + * @generated from enum value: SQL_SUPPORTED_RESULT_SET_TYPES = 567; + */ + SQL_SUPPORTED_RESULT_SET_TYPES = 567, + + /** + * + * Returns an int32 bitmask value concurrency types supported for + * `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_UNSPECIFIED`. + * + * For instance: + * - return 0 (\b0) => [] (no supported concurrency types for this result set type) + * - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] + * - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + * - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + * - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + * + * @generated from enum value: SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_UNSPECIFIED = 568; + */ + SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_UNSPECIFIED = 568, + + /** + * + * Returns an int32 bitmask value concurrency types supported for + * `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_FORWARD_ONLY`. + * + * For instance: + * - return 0 (\b0) => [] (no supported concurrency types for this result set type) + * - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] + * - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + * - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + * - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + * + * @generated from enum value: SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_FORWARD_ONLY = 569; + */ + SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_FORWARD_ONLY = 569, + + /** + * + * Returns an int32 bitmask value concurrency types supported for + * `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE`. + * + * For instance: + * - return 0 (\b0) => [] (no supported concurrency types for this result set type) + * - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] + * - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + * - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + * - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + * + * @generated from enum value: SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_SCROLL_SENSITIVE = 570; + */ + SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_SCROLL_SENSITIVE = 570, + + /** + * + * Returns an int32 bitmask value concurrency types supported for + * `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE`. + * + * For instance: + * - return 0 (\b0) => [] (no supported concurrency types for this result set type) + * - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] + * - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + * - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + * - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + * Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + * + * @generated from enum value: SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_SCROLL_INSENSITIVE = 571; + */ + SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_SCROLL_INSENSITIVE = 571, + + /** + * + * Retrieves a boolean value indicating whether this database supports batch updates. + * + * - false: if this database does not support batch updates; + * - true: if this database supports batch updates. + * + * @generated from enum value: SQL_BATCH_UPDATES_SUPPORTED = 572; + */ + SQL_BATCH_UPDATES_SUPPORTED = 572, + + /** + * + * Retrieves a boolean value indicating whether this database supports savepoints. + * + * Returns: + * - false: if this database does not support savepoints; + * - true: if this database supports savepoints. + * + * @generated from enum value: SQL_SAVEPOINTS_SUPPORTED = 573; + */ + SQL_SAVEPOINTS_SUPPORTED = 573, + + /** + * + * Retrieves a boolean value indicating whether named parameters are supported in callable statements. + * + * Returns: + * - false: if named parameters in callable statements are unsupported; + * - true: if named parameters in callable statements are supported. + * + * @generated from enum value: SQL_NAMED_PARAMETERS_SUPPORTED = 574; + */ + SQL_NAMED_PARAMETERS_SUPPORTED = 574, + + /** + * + * Retrieves a boolean value indicating whether updates made to a LOB are made on a copy or directly to the LOB. + * + * Returns: + * - false: if updates made to a LOB are made directly to the LOB; + * - true: if updates made to a LOB are made on a copy. + * + * @generated from enum value: SQL_LOCATORS_UPDATE_COPY = 575; + */ + SQL_LOCATORS_UPDATE_COPY = 575, + + /** + * + * Retrieves a boolean value indicating whether invoking user-defined or vendor functions + * using the stored procedure escape syntax is supported. + * + * Returns: + * - false: if invoking user-defined or vendor functions using the stored procedure escape syntax is unsupported; + * - true: if invoking user-defined or vendor functions using the stored procedure escape syntax is supported. + * + * @generated from enum value: SQL_STORED_FUNCTIONS_USING_CALL_SYNTAX_SUPPORTED = 576; + */ + SQL_STORED_FUNCTIONS_USING_CALL_SYNTAX_SUPPORTED = 576, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlInfo. + */ +export const SqlInfoSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 0); + +/** + * The level of support for Flight SQL transaction RPCs. + * + * @generated from enum arrow.flight.protocol.sql.SqlSupportedTransaction + */ +export enum SqlSupportedTransaction { + /** + * Unknown/not indicated/no support + * + * @generated from enum value: SQL_SUPPORTED_TRANSACTION_NONE = 0; + */ + NONE = 0, + + /** + * Transactions, but not savepoints. + * A savepoint is a mark within a transaction that can be individually + * rolled back to. Not all databases support savepoints. + * + * @generated from enum value: SQL_SUPPORTED_TRANSACTION_TRANSACTION = 1; + */ + TRANSACTION = 1, + + /** + * Transactions and savepoints + * + * @generated from enum value: SQL_SUPPORTED_TRANSACTION_SAVEPOINT = 2; + */ + SAVEPOINT = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedTransaction. + */ +export const SqlSupportedTransactionSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 1); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedCaseSensitivity + */ +export enum SqlSupportedCaseSensitivity { + /** + * @generated from enum value: SQL_CASE_SENSITIVITY_UNKNOWN = 0; + */ + SQL_CASE_SENSITIVITY_UNKNOWN = 0, + + /** + * @generated from enum value: SQL_CASE_SENSITIVITY_CASE_INSENSITIVE = 1; + */ + SQL_CASE_SENSITIVITY_CASE_INSENSITIVE = 1, + + /** + * @generated from enum value: SQL_CASE_SENSITIVITY_UPPERCASE = 2; + */ + SQL_CASE_SENSITIVITY_UPPERCASE = 2, + + /** + * @generated from enum value: SQL_CASE_SENSITIVITY_LOWERCASE = 3; + */ + SQL_CASE_SENSITIVITY_LOWERCASE = 3, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedCaseSensitivity. + */ +export const SqlSupportedCaseSensitivitySchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 2); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlNullOrdering + */ +export enum SqlNullOrdering { + /** + * @generated from enum value: SQL_NULLS_SORTED_HIGH = 0; + */ + SQL_NULLS_SORTED_HIGH = 0, + + /** + * @generated from enum value: SQL_NULLS_SORTED_LOW = 1; + */ + SQL_NULLS_SORTED_LOW = 1, + + /** + * @generated from enum value: SQL_NULLS_SORTED_AT_START = 2; + */ + SQL_NULLS_SORTED_AT_START = 2, + + /** + * @generated from enum value: SQL_NULLS_SORTED_AT_END = 3; + */ + SQL_NULLS_SORTED_AT_END = 3, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlNullOrdering. + */ +export const SqlNullOrderingSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 3); + +/** + * @generated from enum arrow.flight.protocol.sql.SupportedSqlGrammar + */ +export enum SupportedSqlGrammar { + /** + * @generated from enum value: SQL_MINIMUM_GRAMMAR = 0; + */ + SQL_MINIMUM_GRAMMAR = 0, + + /** + * @generated from enum value: SQL_CORE_GRAMMAR = 1; + */ + SQL_CORE_GRAMMAR = 1, + + /** + * @generated from enum value: SQL_EXTENDED_GRAMMAR = 2; + */ + SQL_EXTENDED_GRAMMAR = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SupportedSqlGrammar. + */ +export const SupportedSqlGrammarSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 4); + +/** + * @generated from enum arrow.flight.protocol.sql.SupportedAnsi92SqlGrammarLevel + */ +export enum SupportedAnsi92SqlGrammarLevel { + /** + * @generated from enum value: ANSI92_ENTRY_SQL = 0; + */ + ANSI92_ENTRY_SQL = 0, + + /** + * @generated from enum value: ANSI92_INTERMEDIATE_SQL = 1; + */ + ANSI92_INTERMEDIATE_SQL = 1, + + /** + * @generated from enum value: ANSI92_FULL_SQL = 2; + */ + ANSI92_FULL_SQL = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SupportedAnsi92SqlGrammarLevel. + */ +export const SupportedAnsi92SqlGrammarLevelSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 5); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlOuterJoinsSupportLevel + */ +export enum SqlOuterJoinsSupportLevel { + /** + * @generated from enum value: SQL_JOINS_UNSUPPORTED = 0; + */ + SQL_JOINS_UNSUPPORTED = 0, + + /** + * @generated from enum value: SQL_LIMITED_OUTER_JOINS = 1; + */ + SQL_LIMITED_OUTER_JOINS = 1, + + /** + * @generated from enum value: SQL_FULL_OUTER_JOINS = 2; + */ + SQL_FULL_OUTER_JOINS = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlOuterJoinsSupportLevel. + */ +export const SqlOuterJoinsSupportLevelSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 6); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedGroupBy + */ +export enum SqlSupportedGroupBy { + /** + * @generated from enum value: SQL_GROUP_BY_UNRELATED = 0; + */ + SQL_GROUP_BY_UNRELATED = 0, + + /** + * @generated from enum value: SQL_GROUP_BY_BEYOND_SELECT = 1; + */ + SQL_GROUP_BY_BEYOND_SELECT = 1, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedGroupBy. + */ +export const SqlSupportedGroupBySchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 7); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedElementActions + */ +export enum SqlSupportedElementActions { + /** + * @generated from enum value: SQL_ELEMENT_IN_PROCEDURE_CALLS = 0; + */ + SQL_ELEMENT_IN_PROCEDURE_CALLS = 0, + + /** + * @generated from enum value: SQL_ELEMENT_IN_INDEX_DEFINITIONS = 1; + */ + SQL_ELEMENT_IN_INDEX_DEFINITIONS = 1, + + /** + * @generated from enum value: SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS = 2; + */ + SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedElementActions. + */ +export const SqlSupportedElementActionsSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 8); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedPositionedCommands + */ +export enum SqlSupportedPositionedCommands { + /** + * @generated from enum value: SQL_POSITIONED_DELETE = 0; + */ + SQL_POSITIONED_DELETE = 0, + + /** + * @generated from enum value: SQL_POSITIONED_UPDATE = 1; + */ + SQL_POSITIONED_UPDATE = 1, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedPositionedCommands. + */ +export const SqlSupportedPositionedCommandsSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 9); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedSubqueries + */ +export enum SqlSupportedSubqueries { + /** + * @generated from enum value: SQL_SUBQUERIES_IN_COMPARISONS = 0; + */ + SQL_SUBQUERIES_IN_COMPARISONS = 0, + + /** + * @generated from enum value: SQL_SUBQUERIES_IN_EXISTS = 1; + */ + SQL_SUBQUERIES_IN_EXISTS = 1, + + /** + * @generated from enum value: SQL_SUBQUERIES_IN_INS = 2; + */ + SQL_SUBQUERIES_IN_INS = 2, + + /** + * @generated from enum value: SQL_SUBQUERIES_IN_QUANTIFIEDS = 3; + */ + SQL_SUBQUERIES_IN_QUANTIFIEDS = 3, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedSubqueries. + */ +export const SqlSupportedSubqueriesSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 10); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedUnions + */ +export enum SqlSupportedUnions { + /** + * @generated from enum value: SQL_UNION = 0; + */ + SQL_UNION = 0, + + /** + * @generated from enum value: SQL_UNION_ALL = 1; + */ + SQL_UNION_ALL = 1, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedUnions. + */ +export const SqlSupportedUnionsSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 11); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlTransactionIsolationLevel + */ +export enum SqlTransactionIsolationLevel { + /** + * @generated from enum value: SQL_TRANSACTION_NONE = 0; + */ + SQL_TRANSACTION_NONE = 0, + + /** + * @generated from enum value: SQL_TRANSACTION_READ_UNCOMMITTED = 1; + */ + SQL_TRANSACTION_READ_UNCOMMITTED = 1, + + /** + * @generated from enum value: SQL_TRANSACTION_READ_COMMITTED = 2; + */ + SQL_TRANSACTION_READ_COMMITTED = 2, + + /** + * @generated from enum value: SQL_TRANSACTION_REPEATABLE_READ = 3; + */ + SQL_TRANSACTION_REPEATABLE_READ = 3, + + /** + * @generated from enum value: SQL_TRANSACTION_SERIALIZABLE = 4; + */ + SQL_TRANSACTION_SERIALIZABLE = 4, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlTransactionIsolationLevel. + */ +export const SqlTransactionIsolationLevelSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 12); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedTransactions + */ +export enum SqlSupportedTransactions { + /** + * @generated from enum value: SQL_TRANSACTION_UNSPECIFIED = 0; + */ + SQL_TRANSACTION_UNSPECIFIED = 0, + + /** + * @generated from enum value: SQL_DATA_DEFINITION_TRANSACTIONS = 1; + */ + SQL_DATA_DEFINITION_TRANSACTIONS = 1, + + /** + * @generated from enum value: SQL_DATA_MANIPULATION_TRANSACTIONS = 2; + */ + SQL_DATA_MANIPULATION_TRANSACTIONS = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedTransactions. + */ +export const SqlSupportedTransactionsSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 13); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedResultSetType + */ +export enum SqlSupportedResultSetType { + /** + * @generated from enum value: SQL_RESULT_SET_TYPE_UNSPECIFIED = 0; + */ + SQL_RESULT_SET_TYPE_UNSPECIFIED = 0, + + /** + * @generated from enum value: SQL_RESULT_SET_TYPE_FORWARD_ONLY = 1; + */ + SQL_RESULT_SET_TYPE_FORWARD_ONLY = 1, + + /** + * @generated from enum value: SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE = 2; + */ + SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE = 2, + + /** + * @generated from enum value: SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE = 3; + */ + SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE = 3, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedResultSetType. + */ +export const SqlSupportedResultSetTypeSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 14); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency + */ +export enum SqlSupportedResultSetConcurrency { + /** + * @generated from enum value: SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED = 0; + */ + SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED = 0, + + /** + * @generated from enum value: SQL_RESULT_SET_CONCURRENCY_READ_ONLY = 1; + */ + SQL_RESULT_SET_CONCURRENCY_READ_ONLY = 1, + + /** + * @generated from enum value: SQL_RESULT_SET_CONCURRENCY_UPDATABLE = 2; + */ + SQL_RESULT_SET_CONCURRENCY_UPDATABLE = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency. + */ +export const SqlSupportedResultSetConcurrencySchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 15); + +/** + * @generated from enum arrow.flight.protocol.sql.SqlSupportsConvert + */ +export enum SqlSupportsConvert { + /** + * @generated from enum value: SQL_CONVERT_BIGINT = 0; + */ + SQL_CONVERT_BIGINT = 0, + + /** + * @generated from enum value: SQL_CONVERT_BINARY = 1; + */ + SQL_CONVERT_BINARY = 1, + + /** + * @generated from enum value: SQL_CONVERT_BIT = 2; + */ + SQL_CONVERT_BIT = 2, + + /** + * @generated from enum value: SQL_CONVERT_CHAR = 3; + */ + SQL_CONVERT_CHAR = 3, + + /** + * @generated from enum value: SQL_CONVERT_DATE = 4; + */ + SQL_CONVERT_DATE = 4, + + /** + * @generated from enum value: SQL_CONVERT_DECIMAL = 5; + */ + SQL_CONVERT_DECIMAL = 5, + + /** + * @generated from enum value: SQL_CONVERT_FLOAT = 6; + */ + SQL_CONVERT_FLOAT = 6, + + /** + * @generated from enum value: SQL_CONVERT_INTEGER = 7; + */ + SQL_CONVERT_INTEGER = 7, + + /** + * @generated from enum value: SQL_CONVERT_INTERVAL_DAY_TIME = 8; + */ + SQL_CONVERT_INTERVAL_DAY_TIME = 8, + + /** + * @generated from enum value: SQL_CONVERT_INTERVAL_YEAR_MONTH = 9; + */ + SQL_CONVERT_INTERVAL_YEAR_MONTH = 9, + + /** + * @generated from enum value: SQL_CONVERT_LONGVARBINARY = 10; + */ + SQL_CONVERT_LONGVARBINARY = 10, + + /** + * @generated from enum value: SQL_CONVERT_LONGVARCHAR = 11; + */ + SQL_CONVERT_LONGVARCHAR = 11, + + /** + * @generated from enum value: SQL_CONVERT_NUMERIC = 12; + */ + SQL_CONVERT_NUMERIC = 12, + + /** + * @generated from enum value: SQL_CONVERT_REAL = 13; + */ + SQL_CONVERT_REAL = 13, + + /** + * @generated from enum value: SQL_CONVERT_SMALLINT = 14; + */ + SQL_CONVERT_SMALLINT = 14, + + /** + * @generated from enum value: SQL_CONVERT_TIME = 15; + */ + SQL_CONVERT_TIME = 15, + + /** + * @generated from enum value: SQL_CONVERT_TIMESTAMP = 16; + */ + SQL_CONVERT_TIMESTAMP = 16, + + /** + * @generated from enum value: SQL_CONVERT_TINYINT = 17; + */ + SQL_CONVERT_TINYINT = 17, + + /** + * @generated from enum value: SQL_CONVERT_VARBINARY = 18; + */ + SQL_CONVERT_VARBINARY = 18, + + /** + * @generated from enum value: SQL_CONVERT_VARCHAR = 19; + */ + SQL_CONVERT_VARCHAR = 19, +} + +/** + * Describes the enum arrow.flight.protocol.sql.SqlSupportsConvert. + */ +export const SqlSupportsConvertSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 16); + +/** + * * + * The JDBC/ODBC-defined type of any object. + * All the values here are the same as in the JDBC and ODBC specs. + * + * @generated from enum arrow.flight.protocol.sql.XdbcDataType + */ +export enum XdbcDataType { + /** + * @generated from enum value: XDBC_UNKNOWN_TYPE = 0; + */ + XDBC_UNKNOWN_TYPE = 0, + + /** + * @generated from enum value: XDBC_CHAR = 1; + */ + XDBC_CHAR = 1, + + /** + * @generated from enum value: XDBC_NUMERIC = 2; + */ + XDBC_NUMERIC = 2, + + /** + * @generated from enum value: XDBC_DECIMAL = 3; + */ + XDBC_DECIMAL = 3, + + /** + * @generated from enum value: XDBC_INTEGER = 4; + */ + XDBC_INTEGER = 4, + + /** + * @generated from enum value: XDBC_SMALLINT = 5; + */ + XDBC_SMALLINT = 5, + + /** + * @generated from enum value: XDBC_FLOAT = 6; + */ + XDBC_FLOAT = 6, + + /** + * @generated from enum value: XDBC_REAL = 7; + */ + XDBC_REAL = 7, + + /** + * @generated from enum value: XDBC_DOUBLE = 8; + */ + XDBC_DOUBLE = 8, + + /** + * @generated from enum value: XDBC_DATETIME = 9; + */ + XDBC_DATETIME = 9, + + /** + * @generated from enum value: XDBC_INTERVAL = 10; + */ + XDBC_INTERVAL = 10, + + /** + * @generated from enum value: XDBC_VARCHAR = 12; + */ + XDBC_VARCHAR = 12, + + /** + * @generated from enum value: XDBC_DATE = 91; + */ + XDBC_DATE = 91, + + /** + * @generated from enum value: XDBC_TIME = 92; + */ + XDBC_TIME = 92, + + /** + * @generated from enum value: XDBC_TIMESTAMP = 93; + */ + XDBC_TIMESTAMP = 93, + + /** + * @generated from enum value: XDBC_LONGVARCHAR = -1; + */ + XDBC_LONGVARCHAR = -1, + + /** + * @generated from enum value: XDBC_BINARY = -2; + */ + XDBC_BINARY = -2, + + /** + * @generated from enum value: XDBC_VARBINARY = -3; + */ + XDBC_VARBINARY = -3, + + /** + * @generated from enum value: XDBC_LONGVARBINARY = -4; + */ + XDBC_LONGVARBINARY = -4, + + /** + * @generated from enum value: XDBC_BIGINT = -5; + */ + XDBC_BIGINT = -5, + + /** + * @generated from enum value: XDBC_TINYINT = -6; + */ + XDBC_TINYINT = -6, + + /** + * @generated from enum value: XDBC_BIT = -7; + */ + XDBC_BIT = -7, + + /** + * @generated from enum value: XDBC_WCHAR = -8; + */ + XDBC_WCHAR = -8, + + /** + * @generated from enum value: XDBC_WVARCHAR = -9; + */ + XDBC_WVARCHAR = -9, +} + +/** + * Describes the enum arrow.flight.protocol.sql.XdbcDataType. + */ +export const XdbcDataTypeSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 17); + +/** + * * + * Detailed subtype information for XDBC_TYPE_DATETIME and XDBC_TYPE_INTERVAL. + * + * @generated from enum arrow.flight.protocol.sql.XdbcDatetimeSubcode + */ +export enum XdbcDatetimeSubcode { + /** + * @generated from enum value: XDBC_SUBCODE_UNKNOWN = 0; + */ + XDBC_SUBCODE_UNKNOWN = 0, + + /** + * @generated from enum value: XDBC_SUBCODE_YEAR = 1; + */ + XDBC_SUBCODE_YEAR = 1, + + /** + * @generated from enum value: XDBC_SUBCODE_DATE = 1; + */ + XDBC_SUBCODE_DATE = 1, + + /** + * @generated from enum value: XDBC_SUBCODE_TIME = 2; + */ + XDBC_SUBCODE_TIME = 2, + + /** + * @generated from enum value: XDBC_SUBCODE_MONTH = 2; + */ + XDBC_SUBCODE_MONTH = 2, + + /** + * @generated from enum value: XDBC_SUBCODE_TIMESTAMP = 3; + */ + XDBC_SUBCODE_TIMESTAMP = 3, + + /** + * @generated from enum value: XDBC_SUBCODE_DAY = 3; + */ + XDBC_SUBCODE_DAY = 3, + + /** + * @generated from enum value: XDBC_SUBCODE_TIME_WITH_TIMEZONE = 4; + */ + XDBC_SUBCODE_TIME_WITH_TIMEZONE = 4, + + /** + * @generated from enum value: XDBC_SUBCODE_HOUR = 4; + */ + XDBC_SUBCODE_HOUR = 4, + + /** + * @generated from enum value: XDBC_SUBCODE_TIMESTAMP_WITH_TIMEZONE = 5; + */ + XDBC_SUBCODE_TIMESTAMP_WITH_TIMEZONE = 5, + + /** + * @generated from enum value: XDBC_SUBCODE_MINUTE = 5; + */ + XDBC_SUBCODE_MINUTE = 5, + + /** + * @generated from enum value: XDBC_SUBCODE_SECOND = 6; + */ + XDBC_SUBCODE_SECOND = 6, + + /** + * @generated from enum value: XDBC_SUBCODE_YEAR_TO_MONTH = 7; + */ + XDBC_SUBCODE_YEAR_TO_MONTH = 7, + + /** + * @generated from enum value: XDBC_SUBCODE_DAY_TO_HOUR = 8; + */ + XDBC_SUBCODE_DAY_TO_HOUR = 8, + + /** + * @generated from enum value: XDBC_SUBCODE_DAY_TO_MINUTE = 9; + */ + XDBC_SUBCODE_DAY_TO_MINUTE = 9, + + /** + * @generated from enum value: XDBC_SUBCODE_DAY_TO_SECOND = 10; + */ + XDBC_SUBCODE_DAY_TO_SECOND = 10, + + /** + * @generated from enum value: XDBC_SUBCODE_HOUR_TO_MINUTE = 11; + */ + XDBC_SUBCODE_HOUR_TO_MINUTE = 11, + + /** + * @generated from enum value: XDBC_SUBCODE_HOUR_TO_SECOND = 12; + */ + XDBC_SUBCODE_HOUR_TO_SECOND = 12, + + /** + * @generated from enum value: XDBC_SUBCODE_MINUTE_TO_SECOND = 13; + */ + XDBC_SUBCODE_MINUTE_TO_SECOND = 13, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_YEAR = 101; + */ + XDBC_SUBCODE_INTERVAL_YEAR = 101, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_MONTH = 102; + */ + XDBC_SUBCODE_INTERVAL_MONTH = 102, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_DAY = 103; + */ + XDBC_SUBCODE_INTERVAL_DAY = 103, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_HOUR = 104; + */ + XDBC_SUBCODE_INTERVAL_HOUR = 104, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_MINUTE = 105; + */ + XDBC_SUBCODE_INTERVAL_MINUTE = 105, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_SECOND = 106; + */ + XDBC_SUBCODE_INTERVAL_SECOND = 106, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_YEAR_TO_MONTH = 107; + */ + XDBC_SUBCODE_INTERVAL_YEAR_TO_MONTH = 107, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_DAY_TO_HOUR = 108; + */ + XDBC_SUBCODE_INTERVAL_DAY_TO_HOUR = 108, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_DAY_TO_MINUTE = 109; + */ + XDBC_SUBCODE_INTERVAL_DAY_TO_MINUTE = 109, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_DAY_TO_SECOND = 110; + */ + XDBC_SUBCODE_INTERVAL_DAY_TO_SECOND = 110, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE = 111; + */ + XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE = 111, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND = 112; + */ + XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND = 112, + + /** + * @generated from enum value: XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND = 113; + */ + XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND = 113, +} + +/** + * Describes the enum arrow.flight.protocol.sql.XdbcDatetimeSubcode. + */ +export const XdbcDatetimeSubcodeSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 18); + +/** + * @generated from enum arrow.flight.protocol.sql.Nullable + */ +export enum Nullable { + /** + * * + * Indicates that the fields does not allow the use of null values. + * + * @generated from enum value: NULLABILITY_NO_NULLS = 0; + */ + NULLABILITY_NO_NULLS = 0, + + /** + * * + * Indicates that the fields allow the use of null values. + * + * @generated from enum value: NULLABILITY_NULLABLE = 1; + */ + NULLABILITY_NULLABLE = 1, + + /** + * * + * Indicates that nullability of the fields cannot be determined. + * + * @generated from enum value: NULLABILITY_UNKNOWN = 2; + */ + NULLABILITY_UNKNOWN = 2, +} + +/** + * Describes the enum arrow.flight.protocol.sql.Nullable. + */ +export const NullableSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 19); + +/** + * @generated from enum arrow.flight.protocol.sql.Searchable + */ +export enum Searchable { + /** + * * + * Indicates that column cannot be used in a WHERE clause. + * + * @generated from enum value: SEARCHABLE_NONE = 0; + */ + NONE = 0, + + /** + * * + * Indicates that the column can be used in a WHERE clause if it is using a + * LIKE operator. + * + * @generated from enum value: SEARCHABLE_CHAR = 1; + */ + CHAR = 1, + + /** + * * + * Indicates that the column can be used In a WHERE clause with any + * operator other than LIKE. + * + * - Allowed operators: comparison, quantified comparison, BETWEEN, + * DISTINCT, IN, MATCH, and UNIQUE. + * + * @generated from enum value: SEARCHABLE_BASIC = 2; + */ + BASIC = 2, + + /** + * * + * Indicates that the column can be used in a WHERE clause using any operator. + * + * @generated from enum value: SEARCHABLE_FULL = 3; + */ + FULL = 3, +} + +/** + * Describes the enum arrow.flight.protocol.sql.Searchable. + */ +export const SearchableSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 20); + +/** + * @generated from enum arrow.flight.protocol.sql.UpdateDeleteRules + */ +export enum UpdateDeleteRules { + /** + * @generated from enum value: CASCADE = 0; + */ + CASCADE = 0, + + /** + * @generated from enum value: RESTRICT = 1; + */ + RESTRICT = 1, + + /** + * @generated from enum value: SET_NULL = 2; + */ + SET_NULL = 2, + + /** + * @generated from enum value: NO_ACTION = 3; + */ + NO_ACTION = 3, + + /** + * @generated from enum value: SET_DEFAULT = 4; + */ + SET_DEFAULT = 4, +} + +/** + * Describes the enum arrow.flight.protocol.sql.UpdateDeleteRules. + */ +export const UpdateDeleteRulesSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_FlightSql, 21); + +/** + * @generated from extension: bool experimental = 1000; + */ +export const experimental: GenExtension = /*@__PURE__*/ + extDesc(file_FlightSql, 0); + diff --git a/packages/amp/src/Protobuf/Flight_pb.ts b/packages/amp/src/Protobuf/Flight_pb.ts new file mode 100644 index 0000000..c733700 --- /dev/null +++ b/packages/amp/src/Protobuf/Flight_pb.ts @@ -0,0 +1,1312 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +//

+// http://www.apache.org/licenses/LICENSE-2.0 +//

+// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.10.1 with parameter "target=ts,import_extension=ts" +// @generated from file Flight.proto (package arrow.flight.protocol, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import { enumDesc, fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; +import type { Timestamp } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file Flight.proto. + */ +export const file_Flight: GenFile = /*@__PURE__*/ + fileDesc("CgxGbGlnaHQucHJvdG8SFWFycm93LmZsaWdodC5wcm90b2NvbCI9ChBIYW5kc2hha2VSZXF1ZXN0EhgKEHByb3RvY29sX3ZlcnNpb24YASABKAQSDwoHcGF5bG9hZBgCIAEoDCI+ChFIYW5kc2hha2VSZXNwb25zZRIYChBwcm90b2NvbF92ZXJzaW9uGAEgASgEEg8KB3BheWxvYWQYAiABKAwiLwoJQmFzaWNBdXRoEhAKCHVzZXJuYW1lGAIgASgJEhAKCHBhc3N3b3JkGAMgASgJIgcKBUVtcHR5Ii8KCkFjdGlvblR5cGUSDAoEdHlwZRgBIAEoCRITCgtkZXNjcmlwdGlvbhgCIAEoCSIeCghDcml0ZXJpYRISCgpleHByZXNzaW9uGAEgASgMIiQKBkFjdGlvbhIMCgR0eXBlGAEgASgJEgwKBGJvZHkYAiABKAwiFgoGUmVzdWx0EgwKBGJvZHkYASABKAwiHgoMU2NoZW1hUmVzdWx0Eg4KBnNjaGVtYRgBIAEoDCKlAQoQRmxpZ2h0RGVzY3JpcHRvchJECgR0eXBlGAEgASgOMjYuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkZsaWdodERlc2NyaXB0b3IuRGVzY3JpcHRvclR5cGUSCwoDY21kGAIgASgMEgwKBHBhdGgYAyADKAkiMAoORGVzY3JpcHRvclR5cGUSCwoHVU5LTk9XThAAEggKBFBBVEgQARIHCgNDTUQQAiLsAQoKRmxpZ2h0SW5mbxIOCgZzY2hlbWEYASABKAwSQgoRZmxpZ2h0X2Rlc2NyaXB0b3IYAiABKAsyJy5hcnJvdy5mbGlnaHQucHJvdG9jb2wuRmxpZ2h0RGVzY3JpcHRvchI3CghlbmRwb2ludBgDIAMoCzIlLmFycm93LmZsaWdodC5wcm90b2NvbC5GbGlnaHRFbmRwb2ludBIVCg10b3RhbF9yZWNvcmRzGAQgASgDEhMKC3RvdGFsX2J5dGVzGAUgASgDEg8KB29yZGVyZWQYBiABKAgSFAoMYXBwX21ldGFkYXRhGAcgASgMItgBCghQb2xsSW5mbxIvCgRpbmZvGAEgASgLMiEuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkZsaWdodEluZm8SQgoRZmxpZ2h0X2Rlc2NyaXB0b3IYAiABKAsyJy5hcnJvdy5mbGlnaHQucHJvdG9jb2wuRmxpZ2h0RGVzY3JpcHRvchIVCghwcm9ncmVzcxgDIAEoAUgAiAEBEjMKD2V4cGlyYXRpb25fdGltZRgEIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5UaW1lc3RhbXBCCwoJX3Byb2dyZXNzIkoKF0NhbmNlbEZsaWdodEluZm9SZXF1ZXN0Ei8KBGluZm8YASABKAsyIS5hcnJvdy5mbGlnaHQucHJvdG9jb2wuRmxpZ2h0SW5mbyJNChZDYW5jZWxGbGlnaHRJbmZvUmVzdWx0EjMKBnN0YXR1cxgBIAEoDjIjLmFycm93LmZsaWdodC5wcm90b2NvbC5DYW5jZWxTdGF0dXMiGAoGVGlja2V0Eg4KBnRpY2tldBgBIAEoDCIXCghMb2NhdGlvbhILCgN1cmkYASABKAkivQEKDkZsaWdodEVuZHBvaW50Ei0KBnRpY2tldBgBIAEoCzIdLmFycm93LmZsaWdodC5wcm90b2NvbC5UaWNrZXQSMQoIbG9jYXRpb24YAiADKAsyHy5hcnJvdy5mbGlnaHQucHJvdG9jb2wuTG9jYXRpb24SMwoPZXhwaXJhdGlvbl90aW1lGAMgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcBIUCgxhcHBfbWV0YWRhdGEYBCABKAwiVQoaUmVuZXdGbGlnaHRFbmRwb2ludFJlcXVlc3QSNwoIZW5kcG9pbnQYASABKAsyJS5hcnJvdy5mbGlnaHQucHJvdG9jb2wuRmxpZ2h0RW5kcG9pbnQijwEKCkZsaWdodERhdGESQgoRZmxpZ2h0X2Rlc2NyaXB0b3IYASABKAsyJy5hcnJvdy5mbGlnaHQucHJvdG9jb2wuRmxpZ2h0RGVzY3JpcHRvchITCgtkYXRhX2hlYWRlchgCIAEoDBIUCgxhcHBfbWV0YWRhdGEYAyABKAwSEgoJZGF0YV9ib2R5GOgHIAEoDCIhCglQdXRSZXN1bHQSFAoMYXBwX21ldGFkYXRhGAEgASgMIvwBChJTZXNzaW9uT3B0aW9uVmFsdWUSFgoMc3RyaW5nX3ZhbHVlGAEgASgJSAASFAoKYm9vbF92YWx1ZRgCIAEoCEgAEhUKC2ludDY0X3ZhbHVlGAMgASgQSAASFgoMZG91YmxlX3ZhbHVlGAQgASgBSAASVgoRc3RyaW5nX2xpc3RfdmFsdWUYBSABKAsyOS5hcnJvdy5mbGlnaHQucHJvdG9jb2wuU2Vzc2lvbk9wdGlvblZhbHVlLlN0cmluZ0xpc3RWYWx1ZUgAGiEKD1N0cmluZ0xpc3RWYWx1ZRIOCgZ2YWx1ZXMYASADKAlCDgoMb3B0aW9uX3ZhbHVlItoBChhTZXRTZXNzaW9uT3B0aW9uc1JlcXVlc3QSXAoPc2Vzc2lvbl9vcHRpb25zGAEgAygLMkMuYXJyb3cuZmxpZ2h0LnByb3RvY29sLlNldFNlc3Npb25PcHRpb25zUmVxdWVzdC5TZXNzaW9uT3B0aW9uc0VudHJ5GmAKE1Nlc3Npb25PcHRpb25zRW50cnkSCwoDa2V5GAEgASgJEjgKBXZhbHVlGAIgASgLMikuYXJyb3cuZmxpZ2h0LnByb3RvY29sLlNlc3Npb25PcHRpb25WYWx1ZToCOAEi7AIKF1NldFNlc3Npb25PcHRpb25zUmVzdWx0EkoKBmVycm9ycxgBIAMoCzI6LmFycm93LmZsaWdodC5wcm90b2NvbC5TZXRTZXNzaW9uT3B0aW9uc1Jlc3VsdC5FcnJvcnNFbnRyeRpRCgVFcnJvchJICgV2YWx1ZRgBIAEoDjI5LmFycm93LmZsaWdodC5wcm90b2NvbC5TZXRTZXNzaW9uT3B0aW9uc1Jlc3VsdC5FcnJvclZhbHVlGmMKC0Vycm9yc0VudHJ5EgsKA2tleRgBIAEoCRJDCgV2YWx1ZRgCIAEoCzI0LmFycm93LmZsaWdodC5wcm90b2NvbC5TZXRTZXNzaW9uT3B0aW9uc1Jlc3VsdC5FcnJvcjoCOAEiTQoKRXJyb3JWYWx1ZRIPCgtVTlNQRUNJRklFRBAAEhAKDElOVkFMSURfTkFNRRABEhEKDUlOVkFMSURfVkFMVUUQAhIJCgVFUlJPUhADIhoKGEdldFNlc3Npb25PcHRpb25zUmVxdWVzdCLYAQoXR2V0U2Vzc2lvbk9wdGlvbnNSZXN1bHQSWwoPc2Vzc2lvbl9vcHRpb25zGAEgAygLMkIuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkdldFNlc3Npb25PcHRpb25zUmVzdWx0LlNlc3Npb25PcHRpb25zRW50cnkaYAoTU2Vzc2lvbk9wdGlvbnNFbnRyeRILCgNrZXkYASABKAkSOAoFdmFsdWUYAiABKAsyKS5hcnJvdy5mbGlnaHQucHJvdG9jb2wuU2Vzc2lvbk9wdGlvblZhbHVlOgI4ASIVChNDbG9zZVNlc3Npb25SZXF1ZXN0Ip0BChJDbG9zZVNlc3Npb25SZXN1bHQSQAoGc3RhdHVzGAEgASgOMjAuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkNsb3NlU2Vzc2lvblJlc3VsdC5TdGF0dXMiRQoGU3RhdHVzEg8KC1VOU1BFQ0lGSUVEEAASCgoGQ0xPU0VEEAESCwoHQ0xPU0lORxACEhEKDU5PVF9DTE9TRUFCTEUQAyqLAQoMQ2FuY2VsU3RhdHVzEh0KGUNBTkNFTF9TVEFUVVNfVU5TUEVDSUZJRUQQABIbChdDQU5DRUxfU1RBVFVTX0NBTkNFTExFRBABEhwKGENBTkNFTF9TVEFUVVNfQ0FOQ0VMTElORxACEiEKHUNBTkNFTF9TVEFUVVNfTk9UX0NBTkNFTExBQkxFEAMyhQcKDUZsaWdodFNlcnZpY2USZAoJSGFuZHNoYWtlEicuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkhhbmRzaGFrZVJlcXVlc3QaKC5hcnJvdy5mbGlnaHQucHJvdG9jb2wuSGFuZHNoYWtlUmVzcG9uc2UiACgBMAESVQoLTGlzdEZsaWdodHMSHy5hcnJvdy5mbGlnaHQucHJvdG9jb2wuQ3JpdGVyaWEaIS5hcnJvdy5mbGlnaHQucHJvdG9jb2wuRmxpZ2h0SW5mbyIAMAESXQoNR2V0RmxpZ2h0SW5mbxInLmFycm93LmZsaWdodC5wcm90b2NvbC5GbGlnaHREZXNjcmlwdG9yGiEuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkZsaWdodEluZm8iABJcCg5Qb2xsRmxpZ2h0SW5mbxInLmFycm93LmZsaWdodC5wcm90b2NvbC5GbGlnaHREZXNjcmlwdG9yGh8uYXJyb3cuZmxpZ2h0LnByb3RvY29sLlBvbGxJbmZvIgASWwoJR2V0U2NoZW1hEicuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkZsaWdodERlc2NyaXB0b3IaIy5hcnJvdy5mbGlnaHQucHJvdG9jb2wuU2NoZW1hUmVzdWx0IgASTQoFRG9HZXQSHS5hcnJvdy5mbGlnaHQucHJvdG9jb2wuVGlja2V0GiEuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkZsaWdodERhdGEiADABElIKBURvUHV0EiEuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkZsaWdodERhdGEaIC5hcnJvdy5mbGlnaHQucHJvdG9jb2wuUHV0UmVzdWx0IgAoATABElgKCkRvRXhjaGFuZ2USIS5hcnJvdy5mbGlnaHQucHJvdG9jb2wuRmxpZ2h0RGF0YRohLmFycm93LmZsaWdodC5wcm90b2NvbC5GbGlnaHREYXRhIgAoATABEkwKCERvQWN0aW9uEh0uYXJyb3cuZmxpZ2h0LnByb3RvY29sLkFjdGlvbhodLmFycm93LmZsaWdodC5wcm90b2NvbC5SZXN1bHQiADABElIKC0xpc3RBY3Rpb25zEhwuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkVtcHR5GiEuYXJyb3cuZmxpZ2h0LnByb3RvY29sLkFjdGlvblR5cGUiADABQnEKHG9yZy5hcGFjaGUuYXJyb3cuZmxpZ2h0LmltcGxaMmdpdGh1Yi5jb20vYXBhY2hlL2Fycm93LWdvL2Fycm93L2ZsaWdodC9nZW4vZmxpZ2h0qgIcQXBhY2hlLkFycm93LkZsaWdodC5Qcm90b2NvbGIGcHJvdG8z", [file_google_protobuf_timestamp]); + +/** + * + * The request that a client provides to a server on handshake. + * + * @generated from message arrow.flight.protocol.HandshakeRequest + */ +export type HandshakeRequest = Message<"arrow.flight.protocol.HandshakeRequest"> & { + /** + * + * A defined protocol version + * + * @generated from field: uint64 protocol_version = 1; + */ + protocolVersion: bigint; + + /** + * + * Arbitrary auth/handshake info. + * + * @generated from field: bytes payload = 2; + */ + payload: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.HandshakeRequest. + * Use `create(HandshakeRequestSchema)` to create a new message. + */ +export const HandshakeRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 0); + +/** + * @generated from message arrow.flight.protocol.HandshakeResponse + */ +export type HandshakeResponse = Message<"arrow.flight.protocol.HandshakeResponse"> & { + /** + * + * A defined protocol version + * + * @generated from field: uint64 protocol_version = 1; + */ + protocolVersion: bigint; + + /** + * + * Arbitrary auth/handshake info. + * + * @generated from field: bytes payload = 2; + */ + payload: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.HandshakeResponse. + * Use `create(HandshakeResponseSchema)` to create a new message. + */ +export const HandshakeResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 1); + +/** + * + * A message for doing simple auth. + * + * @generated from message arrow.flight.protocol.BasicAuth + */ +export type BasicAuth = Message<"arrow.flight.protocol.BasicAuth"> & { + /** + * @generated from field: string username = 2; + */ + username: string; + + /** + * @generated from field: string password = 3; + */ + password: string; +}; + +/** + * Describes the message arrow.flight.protocol.BasicAuth. + * Use `create(BasicAuthSchema)` to create a new message. + */ +export const BasicAuthSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 2); + +/** + * @generated from message arrow.flight.protocol.Empty + */ +export type Empty = Message<"arrow.flight.protocol.Empty"> & { +}; + +/** + * Describes the message arrow.flight.protocol.Empty. + * Use `create(EmptySchema)` to create a new message. + */ +export const EmptySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 3); + +/** + * + * Describes an available action, including both the name used for execution + * along with a short description of the purpose of the action. + * + * @generated from message arrow.flight.protocol.ActionType + */ +export type ActionType = Message<"arrow.flight.protocol.ActionType"> & { + /** + * @generated from field: string type = 1; + */ + type: string; + + /** + * @generated from field: string description = 2; + */ + description: string; +}; + +/** + * Describes the message arrow.flight.protocol.ActionType. + * Use `create(ActionTypeSchema)` to create a new message. + */ +export const ActionTypeSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 4); + +/** + * + * A service specific expression that can be used to return a limited set + * of available Arrow Flight streams. + * + * @generated from message arrow.flight.protocol.Criteria + */ +export type Criteria = Message<"arrow.flight.protocol.Criteria"> & { + /** + * @generated from field: bytes expression = 1; + */ + expression: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.Criteria. + * Use `create(CriteriaSchema)` to create a new message. + */ +export const CriteriaSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 5); + +/** + * + * An opaque action specific for the service. + * + * @generated from message arrow.flight.protocol.Action + */ +export type Action = Message<"arrow.flight.protocol.Action"> & { + /** + * @generated from field: string type = 1; + */ + type: string; + + /** + * @generated from field: bytes body = 2; + */ + body: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.Action. + * Use `create(ActionSchema)` to create a new message. + */ +export const ActionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 6); + +/** + * + * An opaque result returned after executing an action. + * + * @generated from message arrow.flight.protocol.Result + */ +export type Result = Message<"arrow.flight.protocol.Result"> & { + /** + * @generated from field: bytes body = 1; + */ + body: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.Result. + * Use `create(ResultSchema)` to create a new message. + */ +export const ResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 7); + +/** + * + * Wrap the result of a getSchema call + * + * @generated from message arrow.flight.protocol.SchemaResult + */ +export type SchemaResult = Message<"arrow.flight.protocol.SchemaResult"> & { + /** + * The schema of the dataset in its IPC form: + * 4 bytes - an optional IPC_CONTINUATION_TOKEN prefix + * 4 bytes - the byte length of the payload + * a flatbuffer Message whose header is the Schema + * + * @generated from field: bytes schema = 1; + */ + schema: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.SchemaResult. + * Use `create(SchemaResultSchema)` to create a new message. + */ +export const SchemaResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 8); + +/** + * + * The name or tag for a Flight. May be used as a way to retrieve or generate + * a flight or be used to expose a set of previously defined flights. + * + * @generated from message arrow.flight.protocol.FlightDescriptor + */ +export type FlightDescriptor = Message<"arrow.flight.protocol.FlightDescriptor"> & { + /** + * @generated from field: arrow.flight.protocol.FlightDescriptor.DescriptorType type = 1; + */ + type: FlightDescriptor_DescriptorType; + + /** + * + * Opaque value used to express a command. Should only be defined when + * type = CMD. + * + * @generated from field: bytes cmd = 2; + */ + cmd: Uint8Array; + + /** + * + * List of strings identifying a particular dataset. Should only be defined + * when type = PATH. + * + * @generated from field: repeated string path = 3; + */ + path: string[]; +}; + +/** + * Describes the message arrow.flight.protocol.FlightDescriptor. + * Use `create(FlightDescriptorSchema)` to create a new message. + */ +export const FlightDescriptorSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 9); + +/** + * + * Describes what type of descriptor is defined. + * + * @generated from enum arrow.flight.protocol.FlightDescriptor.DescriptorType + */ +export enum FlightDescriptor_DescriptorType { + /** + * Protobuf pattern, not used. + * + * @generated from enum value: UNKNOWN = 0; + */ + UNKNOWN = 0, + + /** + * + * A named path that identifies a dataset. A path is composed of a string + * or list of strings describing a particular dataset. This is conceptually + * similar to a path inside a filesystem. + * + * @generated from enum value: PATH = 1; + */ + PATH = 1, + + /** + * + * An opaque command to generate a dataset. + * + * @generated from enum value: CMD = 2; + */ + CMD = 2, +} + +/** + * Describes the enum arrow.flight.protocol.FlightDescriptor.DescriptorType. + */ +export const FlightDescriptor_DescriptorTypeSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_Flight, 9, 0); + +/** + * + * The access coordinates for retrieval of a dataset. With a FlightInfo, a + * consumer is able to determine how to retrieve a dataset. + * + * @generated from message arrow.flight.protocol.FlightInfo + */ +export type FlightInfo = Message<"arrow.flight.protocol.FlightInfo"> & { + /** + * The schema of the dataset in its IPC form: + * 4 bytes - an optional IPC_CONTINUATION_TOKEN prefix + * 4 bytes - the byte length of the payload + * a flatbuffer Message whose header is the Schema + * + * @generated from field: bytes schema = 1; + */ + schema: Uint8Array; + + /** + * + * The descriptor associated with this info. + * + * @generated from field: arrow.flight.protocol.FlightDescriptor flight_descriptor = 2; + */ + flightDescriptor?: FlightDescriptor; + + /** + * + * A list of endpoints associated with the flight. To consume the + * whole flight, all endpoints (and hence all Tickets) must be + * consumed. Endpoints can be consumed in any order. + * + * In other words, an application can use multiple endpoints to + * represent partitioned data. + * + * If the returned data has an ordering, an application can use + * "FlightInfo.ordered = true" or should return the all data in a + * single endpoint. Otherwise, there is no ordering defined on + * endpoints or the data within. + * + * A client can read ordered data by reading data from returned + * endpoints, in order, from front to back. + * + * Note that a client may ignore "FlightInfo.ordered = true". If an + * ordering is important for an application, an application must + * choose one of them: + * + * * An application requires that all clients must read data in + * returned endpoints order. + * * An application must return the all data in a single endpoint. + * + * @generated from field: repeated arrow.flight.protocol.FlightEndpoint endpoint = 3; + */ + endpoint: FlightEndpoint[]; + + /** + * Set these to -1 if unknown. + * + * @generated from field: int64 total_records = 4; + */ + totalRecords: bigint; + + /** + * @generated from field: int64 total_bytes = 5; + */ + totalBytes: bigint; + + /** + * + * FlightEndpoints are in the same order as the data. + * + * @generated from field: bool ordered = 6; + */ + ordered: boolean; + + /** + * + * Application-defined metadata. + * + * There is no inherent or required relationship between this + * and the app_metadata fields in the FlightEndpoints or resulting + * FlightData messages. Since this metadata is application-defined, + * a given application could define there to be a relationship, + * but there is none required by the spec. + * + * @generated from field: bytes app_metadata = 7; + */ + appMetadata: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.FlightInfo. + * Use `create(FlightInfoSchema)` to create a new message. + */ +export const FlightInfoSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 10); + +/** + * + * The information to process a long-running query. + * + * @generated from message arrow.flight.protocol.PollInfo + */ +export type PollInfo = Message<"arrow.flight.protocol.PollInfo"> & { + /** + * + * The currently available results. + * + * If "flight_descriptor" is not specified, the query is complete + * and "info" specifies all results. Otherwise, "info" contains + * partial query results. + * + * Note that each PollInfo response contains a complete + * FlightInfo (not just the delta between the previous and current + * FlightInfo). + * + * Subsequent PollInfo responses may only append new endpoints to + * info. + * + * Clients can begin fetching results via DoGet(Ticket) with the + * ticket in the info before the query is + * completed. FlightInfo.ordered is also valid. + * + * @generated from field: arrow.flight.protocol.FlightInfo info = 1; + */ + info?: FlightInfo; + + /** + * + * The descriptor the client should use on the next try. + * If unset, the query is complete. + * + * @generated from field: arrow.flight.protocol.FlightDescriptor flight_descriptor = 2; + */ + flightDescriptor?: FlightDescriptor; + + /** + * + * Query progress. If known, must be in [0.0, 1.0] but need not be + * monotonic or nondecreasing. If unknown, do not set. + * + * @generated from field: optional double progress = 3; + */ + progress?: number; + + /** + * + * Expiration time for this request. After this passes, the server + * might not accept the retry descriptor anymore (and the query may + * be cancelled). This may be updated on a call to PollFlightInfo. + * + * @generated from field: google.protobuf.Timestamp expiration_time = 4; + */ + expirationTime?: Timestamp; +}; + +/** + * Describes the message arrow.flight.protocol.PollInfo. + * Use `create(PollInfoSchema)` to create a new message. + */ +export const PollInfoSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 11); + +/** + * + * The request of the CancelFlightInfo action. + * + * The request should be stored in Action.body. + * + * @generated from message arrow.flight.protocol.CancelFlightInfoRequest + */ +export type CancelFlightInfoRequest = Message<"arrow.flight.protocol.CancelFlightInfoRequest"> & { + /** + * @generated from field: arrow.flight.protocol.FlightInfo info = 1; + */ + info?: FlightInfo; +}; + +/** + * Describes the message arrow.flight.protocol.CancelFlightInfoRequest. + * Use `create(CancelFlightInfoRequestSchema)` to create a new message. + */ +export const CancelFlightInfoRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 12); + +/** + * + * The result of the CancelFlightInfo action. + * + * The result should be stored in Result.body. + * + * @generated from message arrow.flight.protocol.CancelFlightInfoResult + */ +export type CancelFlightInfoResult = Message<"arrow.flight.protocol.CancelFlightInfoResult"> & { + /** + * @generated from field: arrow.flight.protocol.CancelStatus status = 1; + */ + status: CancelStatus; +}; + +/** + * Describes the message arrow.flight.protocol.CancelFlightInfoResult. + * Use `create(CancelFlightInfoResultSchema)` to create a new message. + */ +export const CancelFlightInfoResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 13); + +/** + * + * An opaque identifier that the service can use to retrieve a particular + * portion of a stream. + * + * Tickets are meant to be single use. It is an error/application-defined + * behavior to reuse a ticket. + * + * @generated from message arrow.flight.protocol.Ticket + */ +export type Ticket = Message<"arrow.flight.protocol.Ticket"> & { + /** + * @generated from field: bytes ticket = 1; + */ + ticket: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.Ticket. + * Use `create(TicketSchema)` to create a new message. + */ +export const TicketSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 14); + +/** + * + * A location to retrieve a particular stream from. This URI should be one of + * the following: + * - An empty string or the string 'arrow-flight-reuse-connection://?': + * indicating that the ticket can be redeemed on the service where the + * ticket was generated via a DoGet request. + * - A valid grpc URI (grpc://, grpc+tls://, grpc+unix://, etc.): + * indicating that the ticket can be redeemed on the service at the given + * URI via a DoGet request. + * - A valid HTTP URI (http://, https://, etc.): + * indicating that the client should perform a GET request against the + * given URI to retrieve the stream. The ticket should be empty + * in this case and should be ignored by the client. Cloud object storage + * can be utilized by presigned URLs or mediating the auth separately and + * returning the full URL (e.g. https://amzn-s3-demo-bucket.s3.us-west-2.amazonaws.com/...). + * + * We allow non-Flight URIs for the purpose of allowing Flight services to indicate that + * results can be downloaded in formats other than Arrow (such as Parquet) or to allow + * direct fetching of results from a URI to reduce excess copying and data movement. + * In these cases, the following conventions should be followed by servers and clients: + * + * - Unless otherwise specified by the 'Content-Type' header of the response, + * a client should assume the response is using the Arrow IPC Streaming format. + * Usage of an IANA media type like 'application/octet-stream' should be assumed to + * be using the Arrow IPC Streaming format. + * - The server may allow the client to choose a specific response format by + * specifying an 'Accept' header in the request, such as 'application/vnd.apache.parquet' + * or 'application/vnd.apache.arrow.stream'. If multiple types are requested and + * supported by the server, the choice of which to use is server-specific. If + * none of the requested content-types are supported, the server may respond with + * either 406 (Not Acceptable) or 415 (Unsupported Media Type), or successfully + * respond with a different format that it does support along with the correct + * 'Content-Type' header. + * + * Note: new schemes may be proposed in the future to allow for more flexibility based + * on community requests. + * + * @generated from message arrow.flight.protocol.Location + */ +export type Location = Message<"arrow.flight.protocol.Location"> & { + /** + * @generated from field: string uri = 1; + */ + uri: string; +}; + +/** + * Describes the message arrow.flight.protocol.Location. + * Use `create(LocationSchema)` to create a new message. + */ +export const LocationSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 15); + +/** + * + * A particular stream or split associated with a flight. + * + * @generated from message arrow.flight.protocol.FlightEndpoint + */ +export type FlightEndpoint = Message<"arrow.flight.protocol.FlightEndpoint"> & { + /** + * + * Token used to retrieve this stream. + * + * @generated from field: arrow.flight.protocol.Ticket ticket = 1; + */ + ticket?: Ticket; + + /** + * + * A list of URIs where this ticket can be redeemed via DoGet(). + * + * If the list is empty, the expectation is that the ticket can only + * be redeemed on the current service where the ticket was + * generated. + * + * If the list is not empty, the expectation is that the ticket can be + * redeemed at any of the locations, and that the data returned will be + * equivalent. In this case, the ticket may only be redeemed at one of the + * given locations, and not (necessarily) on the current service. If one + * of the given locations is "arrow-flight-reuse-connection://?", the + * client may redeem the ticket on the service where the ticket was + * generated (i.e., the same as above), in addition to the other + * locations. (This URI was chosen to maximize compatibility, as 'scheme:' + * or 'scheme://' are not accepted by Java's java.net.URI.) + * + * In other words, an application can use multiple locations to + * represent redundant and/or load balanced services. + * + * @generated from field: repeated arrow.flight.protocol.Location location = 2; + */ + location: Location[]; + + /** + * + * Expiration time of this stream. If present, clients may assume + * they can retry DoGet requests. Otherwise, it is + * application-defined whether DoGet requests may be retried. + * + * @generated from field: google.protobuf.Timestamp expiration_time = 3; + */ + expirationTime?: Timestamp; + + /** + * + * Application-defined metadata. + * + * There is no inherent or required relationship between this + * and the app_metadata fields in the FlightInfo or resulting + * FlightData messages. Since this metadata is application-defined, + * a given application could define there to be a relationship, + * but there is none required by the spec. + * + * @generated from field: bytes app_metadata = 4; + */ + appMetadata: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.FlightEndpoint. + * Use `create(FlightEndpointSchema)` to create a new message. + */ +export const FlightEndpointSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 16); + +/** + * + * The request of the RenewFlightEndpoint action. + * + * The request should be stored in Action.body. + * + * @generated from message arrow.flight.protocol.RenewFlightEndpointRequest + */ +export type RenewFlightEndpointRequest = Message<"arrow.flight.protocol.RenewFlightEndpointRequest"> & { + /** + * @generated from field: arrow.flight.protocol.FlightEndpoint endpoint = 1; + */ + endpoint?: FlightEndpoint; +}; + +/** + * Describes the message arrow.flight.protocol.RenewFlightEndpointRequest. + * Use `create(RenewFlightEndpointRequestSchema)` to create a new message. + */ +export const RenewFlightEndpointRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 17); + +/** + * + * A batch of Arrow data as part of a stream of batches. + * + * @generated from message arrow.flight.protocol.FlightData + */ +export type FlightData = Message<"arrow.flight.protocol.FlightData"> & { + /** + * + * The descriptor of the data. This is only relevant when a client is + * starting a new DoPut stream. + * + * @generated from field: arrow.flight.protocol.FlightDescriptor flight_descriptor = 1; + */ + flightDescriptor?: FlightDescriptor; + + /** + * + * Header for message data as described in Message.fbs::Message. + * + * @generated from field: bytes data_header = 2; + */ + dataHeader: Uint8Array; + + /** + * + * Application-defined metadata. + * + * @generated from field: bytes app_metadata = 3; + */ + appMetadata: Uint8Array; + + /** + * + * The actual batch of Arrow data. Preferably handled with minimal-copies + * coming last in the definition to help with sidecar patterns (it is + * expected that some implementations will fetch this field off the wire + * with specialized code to avoid extra memory copies). + * + * @generated from field: bytes data_body = 1000; + */ + dataBody: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.FlightData. + * Use `create(FlightDataSchema)` to create a new message. + */ +export const FlightDataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 18); + +/** + * * + * The response message associated with the submission of a DoPut. + * + * @generated from message arrow.flight.protocol.PutResult + */ +export type PutResult = Message<"arrow.flight.protocol.PutResult"> & { + /** + * @generated from field: bytes app_metadata = 1; + */ + appMetadata: Uint8Array; +}; + +/** + * Describes the message arrow.flight.protocol.PutResult. + * Use `create(PutResultSchema)` to create a new message. + */ +export const PutResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 19); + +/** + * + * EXPERIMENTAL: Union of possible value types for a Session Option to be set to. + * + * By convention, an attempt to set a valueless SessionOptionValue should + * attempt to unset or clear the named option value on the server. + * + * @generated from message arrow.flight.protocol.SessionOptionValue + */ +export type SessionOptionValue = Message<"arrow.flight.protocol.SessionOptionValue"> & { + /** + * @generated from oneof arrow.flight.protocol.SessionOptionValue.option_value + */ + optionValue: { + /** + * @generated from field: string string_value = 1; + */ + value: string; + case: "stringValue"; + } | { + /** + * @generated from field: bool bool_value = 2; + */ + value: boolean; + case: "boolValue"; + } | { + /** + * @generated from field: sfixed64 int64_value = 3; + */ + value: bigint; + case: "int64Value"; + } | { + /** + * @generated from field: double double_value = 4; + */ + value: number; + case: "doubleValue"; + } | { + /** + * @generated from field: arrow.flight.protocol.SessionOptionValue.StringListValue string_list_value = 5; + */ + value: SessionOptionValue_StringListValue; + case: "stringListValue"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message arrow.flight.protocol.SessionOptionValue. + * Use `create(SessionOptionValueSchema)` to create a new message. + */ +export const SessionOptionValueSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 20); + +/** + * @generated from message arrow.flight.protocol.SessionOptionValue.StringListValue + */ +export type SessionOptionValue_StringListValue = Message<"arrow.flight.protocol.SessionOptionValue.StringListValue"> & { + /** + * @generated from field: repeated string values = 1; + */ + values: string[]; +}; + +/** + * Describes the message arrow.flight.protocol.SessionOptionValue.StringListValue. + * Use `create(SessionOptionValue_StringListValueSchema)` to create a new message. + */ +export const SessionOptionValue_StringListValueSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 20, 0); + +/** + * + * EXPERIMENTAL: A request to set session options for an existing or new (implicit) + * server session. + * + * Sessions are persisted and referenced via a transport-level state management, typically + * RFC 6265 HTTP cookies when using an HTTP transport. The suggested cookie name or state + * context key is 'arrow_flight_session_id', although implementations may freely choose their + * own name. + * + * Session creation (if one does not already exist) is implied by this RPC request, however + * server implementations may choose to initiate a session that also contains client-provided + * session options at any other time, e.g. on authentication, or when any other call is made + * and the server wishes to use a session to persist any state (or lack thereof). + * + * @generated from message arrow.flight.protocol.SetSessionOptionsRequest + */ +export type SetSessionOptionsRequest = Message<"arrow.flight.protocol.SetSessionOptionsRequest"> & { + /** + * @generated from field: map session_options = 1; + */ + sessionOptions: { [key: string]: SessionOptionValue }; +}; + +/** + * Describes the message arrow.flight.protocol.SetSessionOptionsRequest. + * Use `create(SetSessionOptionsRequestSchema)` to create a new message. + */ +export const SetSessionOptionsRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 21); + +/** + * + * EXPERIMENTAL: The results (individually) of setting a set of session options. + * + * Option names should only be present in the response if they were not successfully + * set on the server; that is, a response without an Error for a name provided in the + * SetSessionOptionsRequest implies that the named option value was set successfully. + * + * @generated from message arrow.flight.protocol.SetSessionOptionsResult + */ +export type SetSessionOptionsResult = Message<"arrow.flight.protocol.SetSessionOptionsResult"> & { + /** + * @generated from field: map errors = 1; + */ + errors: { [key: string]: SetSessionOptionsResult_Error }; +}; + +/** + * Describes the message arrow.flight.protocol.SetSessionOptionsResult. + * Use `create(SetSessionOptionsResultSchema)` to create a new message. + */ +export const SetSessionOptionsResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 22); + +/** + * @generated from message arrow.flight.protocol.SetSessionOptionsResult.Error + */ +export type SetSessionOptionsResult_Error = Message<"arrow.flight.protocol.SetSessionOptionsResult.Error"> & { + /** + * @generated from field: arrow.flight.protocol.SetSessionOptionsResult.ErrorValue value = 1; + */ + value: SetSessionOptionsResult_ErrorValue; +}; + +/** + * Describes the message arrow.flight.protocol.SetSessionOptionsResult.Error. + * Use `create(SetSessionOptionsResult_ErrorSchema)` to create a new message. + */ +export const SetSessionOptionsResult_ErrorSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 22, 0); + +/** + * @generated from enum arrow.flight.protocol.SetSessionOptionsResult.ErrorValue + */ +export enum SetSessionOptionsResult_ErrorValue { + /** + * Protobuf deserialization fallback value: The status is unknown or unrecognized. + * Servers should avoid using this value. The request may be retried by the client. + * + * @generated from enum value: UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * The given session option name is invalid. + * + * @generated from enum value: INVALID_NAME = 1; + */ + INVALID_NAME = 1, + + /** + * The session option value or type is invalid. + * + * @generated from enum value: INVALID_VALUE = 2; + */ + INVALID_VALUE = 2, + + /** + * The session option cannot be set. + * + * @generated from enum value: ERROR = 3; + */ + ERROR = 3, +} + +/** + * Describes the enum arrow.flight.protocol.SetSessionOptionsResult.ErrorValue. + */ +export const SetSessionOptionsResult_ErrorValueSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_Flight, 22, 0); + +/** + * + * EXPERIMENTAL: A request to access the session options for the current server session. + * + * The existing session is referenced via a cookie header or similar (see + * SetSessionOptionsRequest above); it is an error to make this request with a missing, + * invalid, or expired session cookie header or other implementation-defined session + * reference token. + * + * @generated from message arrow.flight.protocol.GetSessionOptionsRequest + */ +export type GetSessionOptionsRequest = Message<"arrow.flight.protocol.GetSessionOptionsRequest"> & { +}; + +/** + * Describes the message arrow.flight.protocol.GetSessionOptionsRequest. + * Use `create(GetSessionOptionsRequestSchema)` to create a new message. + */ +export const GetSessionOptionsRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 23); + +/** + * + * EXPERIMENTAL: The result containing the current server session options. + * + * @generated from message arrow.flight.protocol.GetSessionOptionsResult + */ +export type GetSessionOptionsResult = Message<"arrow.flight.protocol.GetSessionOptionsResult"> & { + /** + * @generated from field: map session_options = 1; + */ + sessionOptions: { [key: string]: SessionOptionValue }; +}; + +/** + * Describes the message arrow.flight.protocol.GetSessionOptionsResult. + * Use `create(GetSessionOptionsResultSchema)` to create a new message. + */ +export const GetSessionOptionsResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 24); + +/** + * + * Request message for the "Close Session" action. + * + * The exiting session is referenced via a cookie header. + * + * @generated from message arrow.flight.protocol.CloseSessionRequest + */ +export type CloseSessionRequest = Message<"arrow.flight.protocol.CloseSessionRequest"> & { +}; + +/** + * Describes the message arrow.flight.protocol.CloseSessionRequest. + * Use `create(CloseSessionRequestSchema)` to create a new message. + */ +export const CloseSessionRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 25); + +/** + * + * The result of closing a session. + * + * @generated from message arrow.flight.protocol.CloseSessionResult + */ +export type CloseSessionResult = Message<"arrow.flight.protocol.CloseSessionResult"> & { + /** + * @generated from field: arrow.flight.protocol.CloseSessionResult.Status status = 1; + */ + status: CloseSessionResult_Status; +}; + +/** + * Describes the message arrow.flight.protocol.CloseSessionResult. + * Use `create(CloseSessionResultSchema)` to create a new message. + */ +export const CloseSessionResultSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_Flight, 26); + +/** + * @generated from enum arrow.flight.protocol.CloseSessionResult.Status + */ +export enum CloseSessionResult_Status { + /** + * Protobuf deserialization fallback value: The session close status is unknown or + * not recognized. Servers should avoid using this value (send a NOT_FOUND error if + * the requested session is not known or expired). Clients can retry the request. + * + * @generated from enum value: UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * The session close request is complete. Subsequent requests with + * the same session produce a NOT_FOUND error. + * + * @generated from enum value: CLOSED = 1; + */ + CLOSED = 1, + + /** + * The session close request is in progress. The client may retry + * the close request. + * + * @generated from enum value: CLOSING = 2; + */ + CLOSING = 2, + + /** + * The session is not closeable. The client should not retry the + * close request. + * + * @generated from enum value: NOT_CLOSEABLE = 3; + */ + NOT_CLOSEABLE = 3, +} + +/** + * Describes the enum arrow.flight.protocol.CloseSessionResult.Status. + */ +export const CloseSessionResult_StatusSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_Flight, 26, 0); + +/** + * + * The result of a cancel operation. + * + * This is used by CancelFlightInfoResult.status. + * + * @generated from enum arrow.flight.protocol.CancelStatus + */ +export enum CancelStatus { + /** + * The cancellation status is unknown. Servers should avoid using + * this value (send a NOT_FOUND error if the requested query is + * not known). Clients can retry the request. + * + * @generated from enum value: CANCEL_STATUS_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * The cancellation request is complete. Subsequent requests with + * the same payload may return CANCELLED or a NOT_FOUND error. + * + * @generated from enum value: CANCEL_STATUS_CANCELLED = 1; + */ + CANCELLED = 1, + + /** + * The cancellation request is in progress. The client may retry + * the cancellation request. + * + * @generated from enum value: CANCEL_STATUS_CANCELLING = 2; + */ + CANCELLING = 2, + + /** + * The query is not cancellable. The client should not retry the + * cancellation request. + * + * @generated from enum value: CANCEL_STATUS_NOT_CANCELLABLE = 3; + */ + NOT_CANCELLABLE = 3, +} + +/** + * Describes the enum arrow.flight.protocol.CancelStatus. + */ +export const CancelStatusSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_Flight, 0); + +/** + * + * A flight service is an endpoint for retrieving or storing Arrow data. A + * flight service can expose one or more predefined endpoints that can be + * accessed using the Arrow Flight Protocol. Additionally, a flight service + * can expose a set of actions that are available. + * + * @generated from service arrow.flight.protocol.FlightService + */ +export const FlightService: GenService<{ + /** + * + * Handshake between client and server. Depending on the server, the + * handshake may be required to determine the token that should be used for + * future operations. Both request and response are streams to allow multiple + * round-trips depending on auth mechanism. + * + * @generated from rpc arrow.flight.protocol.FlightService.Handshake + */ + handshake: { + methodKind: "bidi_streaming"; + input: typeof HandshakeRequestSchema; + output: typeof HandshakeResponseSchema; + }, + /** + * + * Get a list of available streams given a particular criteria. Most flight + * services will expose one or more streams that are readily available for + * retrieval. This api allows listing the streams available for + * consumption. A user can also provide a criteria. The criteria can limit + * the subset of streams that can be listed via this interface. Each flight + * service allows its own definition of how to consume criteria. + * + * @generated from rpc arrow.flight.protocol.FlightService.ListFlights + */ + listFlights: { + methodKind: "server_streaming"; + input: typeof CriteriaSchema; + output: typeof FlightInfoSchema; + }, + /** + * + * For a given FlightDescriptor, get information about how the flight can be + * consumed. This is a useful interface if the consumer of the interface + * already can identify the specific flight to consume. This interface can + * also allow a consumer to generate a flight stream through a specified + * descriptor. For example, a flight descriptor might be something that + * includes a SQL statement or a Pickled Python operation that will be + * executed. In those cases, the descriptor will not be previously available + * within the list of available streams provided by ListFlights but will be + * available for consumption for the duration defined by the specific flight + * service. + * + * @generated from rpc arrow.flight.protocol.FlightService.GetFlightInfo + */ + getFlightInfo: { + methodKind: "unary"; + input: typeof FlightDescriptorSchema; + output: typeof FlightInfoSchema; + }, + /** + * + * For a given FlightDescriptor, start a query and get information + * to poll its execution status. This is a useful interface if the + * query may be a long-running query. The first PollFlightInfo call + * should return as quickly as possible. (GetFlightInfo doesn't + * return until the query is complete.) + * + * A client can consume any available results before + * the query is completed. See PollInfo.info for details. + * + * A client can poll the updated query status by calling + * PollFlightInfo() with PollInfo.flight_descriptor. A server + * should not respond until the result would be different from last + * time. That way, the client can "long poll" for updates + * without constantly making requests. Clients can set a short timeout + * to avoid blocking calls if desired. + * + * A client can't use PollInfo.flight_descriptor after + * PollInfo.expiration_time passes. A server might not accept the + * retry descriptor anymore and the query may be cancelled. + * + * A client may use the CancelFlightInfo action with + * PollInfo.info to cancel the running query. + * + * @generated from rpc arrow.flight.protocol.FlightService.PollFlightInfo + */ + pollFlightInfo: { + methodKind: "unary"; + input: typeof FlightDescriptorSchema; + output: typeof PollInfoSchema; + }, + /** + * + * For a given FlightDescriptor, get the Schema as described in Schema.fbs::Schema + * This is used when a consumer needs the Schema of flight stream. Similar to + * GetFlightInfo this interface may generate a new flight that was not previously + * available in ListFlights. + * + * @generated from rpc arrow.flight.protocol.FlightService.GetSchema + */ + getSchema: { + methodKind: "unary"; + input: typeof FlightDescriptorSchema; + output: typeof SchemaResultSchema; + }, + /** + * + * Retrieve a single stream associated with a particular descriptor + * associated with the referenced ticket. A Flight can be composed of one or + * more streams where each stream can be retrieved using a separate opaque + * ticket that the flight service uses for managing a collection of streams. + * + * @generated from rpc arrow.flight.protocol.FlightService.DoGet + */ + doGet: { + methodKind: "server_streaming"; + input: typeof TicketSchema; + output: typeof FlightDataSchema; + }, + /** + * + * Push a stream to the flight service associated with a particular + * flight stream. This allows a client of a flight service to upload a stream + * of data. Depending on the particular flight service, a client consumer + * could be allowed to upload a single stream per descriptor or an unlimited + * number. In the latter, the service might implement a 'seal' action that + * can be applied to a descriptor once all streams are uploaded. + * + * @generated from rpc arrow.flight.protocol.FlightService.DoPut + */ + doPut: { + methodKind: "bidi_streaming"; + input: typeof FlightDataSchema; + output: typeof PutResultSchema; + }, + /** + * + * Open a bidirectional data channel for a given descriptor. This + * allows clients to send and receive arbitrary Arrow data and + * application-specific metadata in a single logical stream. In + * contrast to DoGet/DoPut, this is more suited for clients + * offloading computation (rather than storage) to a Flight service. + * + * @generated from rpc arrow.flight.protocol.FlightService.DoExchange + */ + doExchange: { + methodKind: "bidi_streaming"; + input: typeof FlightDataSchema; + output: typeof FlightDataSchema; + }, + /** + * + * Flight services can support an arbitrary number of simple actions in + * addition to the possible ListFlights, GetFlightInfo, DoGet, DoPut + * operations that are potentially available. DoAction allows a flight client + * to do a specific action against a flight service. An action includes + * opaque request and response objects that are specific to the type action + * being undertaken. + * + * @generated from rpc arrow.flight.protocol.FlightService.DoAction + */ + doAction: { + methodKind: "server_streaming"; + input: typeof ActionSchema; + output: typeof ResultSchema; + }, + /** + * + * A flight service exposes all of the available action types that it has + * along with descriptions. This allows different flight consumers to + * understand the capabilities of the flight service. + * + * @generated from rpc arrow.flight.protocol.FlightService.ListActions + */ + listActions: { + methodKind: "server_streaming"; + input: typeof EmptySchema; + output: typeof ActionTypeSchema; + }, +}> = /*@__PURE__*/ + serviceDesc(file_Flight, 0); + diff --git a/packages/amp/src/index.ts b/packages/amp/src/index.ts index 077e16b..ba9ee30 100644 --- a/packages/amp/src/index.ts +++ b/packages/amp/src/index.ts @@ -1,3 +1,4 @@ -import * as Effect from "effect/Effect" - -export const program = Effect.void +/** + * An implementation of the Arrow Flight protocol. + */ +export * as ArrowFlight from "./ArrowFlight.ts" diff --git a/packages/amp/src/internal/arrow-flight-ipc/Decoder.ts b/packages/amp/src/internal/arrow-flight-ipc/Decoder.ts new file mode 100644 index 0000000..03f5b89 --- /dev/null +++ b/packages/amp/src/internal/arrow-flight-ipc/Decoder.ts @@ -0,0 +1,270 @@ +/** + * Arrow RecordBatch Decoder + * + * This module provides utilities for decoding Arrow RecordBatch messages + * by combining metadata with body data. + * + * @internal + */ +import { + type BufferType, + DecodedColumn, + DecodedRecordBatch, + type DictionaryBatch, + getBufferTypesForType, + type RecordBatch +} from "./RecordBatch.ts" +import type { ArrowField, ArrowSchema, IntType } from "./Schema.ts" + +// ============================================================================= +// Dictionary Registry +// ============================================================================= + +/** + * A decoded dictionary containing the values that dictionary-encoded columns + * reference via indices. + */ +export class DecodedDictionary { + /** + * The dictionary ID that matches the field's `dictionaryEncoding.id`. + */ + readonly id: bigint + + /** + * The decoded values that indices reference into. + */ + readonly values: ReadonlyArray + + /** + * The value type of this dictionary (e.g., utf8, int, etc.). + */ + readonly valueType: ArrowField + + constructor(id: bigint, values: ReadonlyArray, valueType: ArrowField) { + this.id = id + this.values = values + this.valueType = valueType + } +} + +/** + * A registry for storing decoded dictionaries. Dictionary-encoded columns + * reference dictionaries by ID, and this registry maintains the mapping + * from ID to decoded dictionary values. + * + * This class is mutable and should be used within a streaming context where + * dictionary batches may arrive before or interleaved with record batches. + */ +export class DictionaryRegistry { + private readonly dictionaries = new Map() + + /** + * Register a decoded dictionary. If `isDelta` is true, the values are + * appended to an existing dictionary with the same ID. Otherwise, the + * dictionary replaces any existing one. + */ + register(dictionary: DecodedDictionary, isDelta: boolean): void { + if (isDelta) { + const existing = this.dictionaries.get(dictionary.id) + if (existing) { + // Append delta values to existing dictionary + const combinedValues = [...existing.values, ...dictionary.values] + this.dictionaries.set( + dictionary.id, + new DecodedDictionary(dictionary.id, combinedValues, dictionary.valueType) + ) + } else { + // No existing dictionary, just set it + this.dictionaries.set(dictionary.id, dictionary) + } + } else { + // Replace any existing dictionary + this.dictionaries.set(dictionary.id, dictionary) + } + } + + /** + * Get a dictionary by ID. + */ + get(id: bigint): DecodedDictionary | undefined { + return this.dictionaries.get(id) + } + + /** + * Check if a dictionary with the given ID exists. + */ + has(id: bigint): boolean { + return this.dictionaries.has(id) + } + + /** + * Clear all dictionaries from the registry. + */ + clear(): void { + this.dictionaries.clear() + } +} + +// ============================================================================= +// Dictionary Batch Decoding +// ============================================================================= + +/** + * Find a field in the schema that references the given dictionary ID. + * Dictionary batches need to know the value type, which is stored in the + * schema field's type definition. + */ +const findFieldByDictionaryId = ( + fields: ReadonlyArray, + dictionaryId: bigint +): ArrowField | undefined => { + for (const field of fields) { + if (field.dictionaryEncoding?.id === dictionaryId) { + return field + } + // Check children recursively + const found = findFieldByDictionaryId(field.children, dictionaryId) + if (found) { + return found + } + } + return undefined +} + +/** + * A function type for reading column values from a decoded column. + * This is passed as a parameter to avoid circular dependencies with Readers.ts. + */ +export type ColumnValueReader = (column: DecodedColumn) => ReadonlyArray + +/** + * Decodes a `DictionaryBatch` by combining metadata with body data and + * registers it in the provided dictionary registry. + * + * @param dictionaryBatch - The parsed dictionary batch metadata + * @param body - The binary body data containing the dictionary values + * @param schema - The Arrow schema (used to find the field that references this dictionary) + * @param registry - The dictionary registry to store the decoded dictionary + * @param readColumnValues - Function to read column values (passed to avoid circular deps) + */ +export const decodeDictionaryBatch = ( + dictionaryBatch: DictionaryBatch, + body: Uint8Array, + schema: ArrowSchema, + registry: DictionaryRegistry, + readColumnValues: ColumnValueReader +): void => { + const { data, id, isDelta } = dictionaryBatch + + // Find the field that references this dictionary to get the value type + const field = findFieldByDictionaryId(schema.fields, id) + if (!field) { + throw new Error(`No field found referencing dictionary ID ${id}`) + } + + // Extract buffers from the dictionary batch body + const extractedBuffers = data.buffers.map((descriptor) => { + const start = Number(descriptor.offset) + const end = start + Number(descriptor.length) + return body.subarray(start, end) + }) + + // Dictionary batches have a single field with the dictionary values + // The field type is the actual value type (e.g., utf8 for string dictionaries) + const node = data.nodes[0] + const bufferTypes = getBufferTypesForType(field.type) + + const buffers: Array = [] + for (let i = 0; i < bufferTypes.length; i++) { + buffers.push(extractedBuffers[i]) + } + + // Create a decoded column for the dictionary values + const dictionaryColumn = new DecodedColumn(field, node, buffers, []) + + // Read the dictionary values using the provided reader function + const values = readColumnValues(dictionaryColumn) + + // Register the dictionary + const decodedDictionary = new DecodedDictionary(id, values, field) + registry.register(decodedDictionary, isDelta) +} + +// ============================================================================= +// Record Batch Decoding +// ============================================================================= + +/** + * Decodes a `RecordBatch` by combining metadata with body data. + * + * @throws {Error} If the record batch uses compression (not currently supported) + */ +export const decodeRecordBatch = ( + recordBatch: RecordBatch, + body: Uint8Array, + schema: ArrowSchema +): DecodedRecordBatch => { + // Check for compression - not currently supported + if (recordBatch.compression) { + const codecName = recordBatch.compression.codec === 0 ? "LZ4_FRAME" : "ZSTD" + throw new Error( + `Compressed record batches are not currently supported. ` + + `This batch uses ${codecName} compression. ` + + `To process this data, the server should be configured to send uncompressed data, ` + + `or compression support needs to be added to this library.` + ) + } + + const extractedBuffers = recordBatch.buffers.map((descriptor) => { + // TODO: figure out if this can lead to bugs due to loss of precision when + // converting bigint to number + const start = Number(descriptor.offset) + const end = start + Number(descriptor.length) + return body.subarray(start, end) + }) + + let nodeIndex = 0 + let bufferIndex = 0 + + function decodeField(field: ArrowField): DecodedColumn { + const node = recordBatch.nodes[nodeIndex++] + + // For dictionary-encoded fields, the buffer layout is based on the index type, + // not the value type. Dictionary-encoded columns have [validity, data] layout + // where data contains integer indices. + const bufferTypes = field.dictionaryEncoding + ? getBufferTypesForDictionaryEncoding(field.dictionaryEncoding.indexType) + : getBufferTypesForType(field.type) + + const buffers: Array = [] + for (let i = 0; i < bufferTypes.length; i++) { + buffers.push(extractedBuffers[bufferIndex++]) + } + + // Decode children recursively + const children: ReadonlyArray = field.children.map((field) => decodeField(field)) + + return new DecodedColumn(field, node, buffers, children) + } + + const numRows = recordBatch.length + const columns = schema.fields.map((field) => decodeField(field)) + + return new DecodedRecordBatch(schema, numRows, columns) +} + +/** + * Get the buffer types for a dictionary-encoded field. + * Dictionary-encoded fields store integer indices, so they have the same + * buffer layout as an integer type: [validity, data]. + */ +const getBufferTypesForDictionaryEncoding = ( + _indexType: IntType +): ReadonlyArray => { + // Dictionary-encoded columns always have: validity bitmap + index data + // The index type determines the bit width but not the buffer layout + return [ + 0, // BufferType.VALIDITY + 3 // BufferType.DATA + ] +} diff --git a/packages/amp/src/internal/arrow-flight-ipc/Errors.ts b/packages/amp/src/internal/arrow-flight-ipc/Errors.ts new file mode 100644 index 0000000..f23124e --- /dev/null +++ b/packages/amp/src/internal/arrow-flight-ipc/Errors.ts @@ -0,0 +1,64 @@ +import * as Schema from "effect/Schema" + +/** + * An error which occurs when attempting to parse an invalid Arrow data type. + * + * @internal + */ +export class InvalidArrowDataTypeError extends Schema.TaggedError( + "Amp/InvalidArrowDataTypeError" +)("InvalidArrowDataTypeError", { + type: Schema.Number, + offset: Schema.Number +}) {} + +/** + * An error which occurs when the expected FlatBuffer message header type is not + * valid. + * + * @internal + */ +export class InvalidMessageTypeError extends Schema.TaggedError( + "Amp/InvalidMessageTypeError" +)("InvalidMessageTypeError", { + value: Schema.Number +}) { + override get message(): string { + return `Received invalid value for Arrow Flight message type: ${this.value} ` + } +} + +/** + * An error which occurs when attempting to access a FlatBuffers field which + * does not exist. + * + * @internal + */ +export class MissingFieldError extends Schema.TaggedError( + "Amp/MissingFieldError" +)("MissingFieldError", { + fieldName: Schema.String, + fieldIndex: Schema.Number, + tableOffset: Schema.Number +}) { + override get message(): string { + return `Failed to find message field '${this.fieldName}' at index ${this.fieldIndex} (offset: ${this.tableOffset})` + } +} + +/** + * An error which occurs when the expected FlatBuffer message header type is not + * the same as the received message header type. + * + * @internal + */ +export class UnexpectedMessageTypeError extends Schema.TaggedError( + "Amp/UnexpectedMessageTypeError" +)("UnexpectedMessageTypeError", { + expected: Schema.Number, + received: Schema.Number +}) { + override get message(): string { + return `Expected to receive message header type ${this.expected} - received: ${this.received}` + } +} diff --git a/packages/amp/src/internal/arrow-flight-ipc/FlatBufferReader.ts b/packages/amp/src/internal/arrow-flight-ipc/FlatBufferReader.ts new file mode 100644 index 0000000..cfb4487 --- /dev/null +++ b/packages/amp/src/internal/arrow-flight-ipc/FlatBufferReader.ts @@ -0,0 +1,98 @@ +/** + * A utility class for reading FlatBuffer-encoded data. + * + * FlatBuffers are a serialization format that allows efficient reading of + * structured data without parsing or unpacking. + * + * @internal + */ +export class FlatBufferReader { + private view: DataView + private bytes: Uint8Array + + constructor(bytes: Uint8Array) { + this.bytes = bytes + this.view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength) + } + + readInt8(offset: number): number { + return this.view.getInt8(offset) + } + + readUint8(offset: number): number { + return this.view.getUint8(offset) + } + + readInt16(offset: number): number { + return this.view.getInt16(offset, true) + } + + readUint16(offset: number): number { + return this.view.getUint16(offset, true) + } + + readInt32(offset: number): number { + return this.view.getInt32(offset, true) + } + + readUint32(offset: number): number { + return this.view.getUint32(offset, true) + } + + readInt64(offset: number): bigint { + return this.view.getBigInt64(offset, true) + } + + readUint64(offset: number): bigint { + return this.view.getBigUint64(offset, true) + } + + /** + * Read offset to a table/vector (indirect offset) + */ + readOffset(offset: number): number { + return offset + this.readInt32(offset) + } + + /** + * Read a string from a FlatBuffer offset + */ + readString(offset: number): string { + const stringOffset = this.readOffset(offset) + const length = this.readInt32(stringOffset) + const stringStart = stringOffset + 4 + const stringBytes = this.bytes.subarray(stringStart, stringStart + length) + return new TextDecoder().decode(stringBytes) + } + + // Read vector length + readVectorLength(offset: number): number { + return this.readInt32(offset) + } + + /** + * Get table field offset using vtable + */ + getFieldOffset(tableOffset: number, fieldIndex: number): number { + const vtableOffset = tableOffset - this.readInt32(tableOffset) + const vtableSize = this.readInt16(vtableOffset) + const fieldVtableOffset = 4 + fieldIndex * 2 + + if (fieldVtableOffset >= vtableSize) { + return 0 // Field not present + } + + return this.readInt16(vtableOffset + fieldVtableOffset) + } + + /** + * Get absolute position of a field in a table + */ + getFieldPosition(tableOffset: number, fieldIndex: number): number | null { + const fieldOffset = this.getFieldOffset(tableOffset, fieldIndex) + if (fieldOffset === 0) { + return null + } + return tableOffset + fieldOffset + } +} diff --git a/packages/amp/src/internal/arrow-flight-ipc/Json.ts b/packages/amp/src/internal/arrow-flight-ipc/Json.ts new file mode 100644 index 0000000..a84a39f --- /dev/null +++ b/packages/amp/src/internal/arrow-flight-ipc/Json.ts @@ -0,0 +1,105 @@ +/** + * Arrow RecordBatch to JSON Conversion + * + * This module provides utilities for converting decoded Arrow RecordBatch + * data to JSON-compatible row objects. + * + * @internal + */ +import type { DictionaryRegistry } from "./Decoder.ts" +import { readColumnValues } from "./Readers.ts" +import type { DecodedRecordBatch } from "./RecordBatch.ts" + +export interface RecordBatchToJsonOptions { + bigIntHandling?: "string" | "number" | "bigint" + dateHandling?: "iso" | "timestamp" | "date" + binaryHandling?: "base64" | "hex" | "array" + includeNulls?: boolean + /** + * Dictionary registry for resolving dictionary-encoded columns. + * Required if the record batch contains dictionary-encoded fields. + */ + dictionaryRegistry?: DictionaryRegistry +} + +const uint8ArrayToBase64 = (bytes: Uint8Array): string => { + let binary = "" + for (let i = 0; i < bytes.length; i++) binary += String.fromCharCode(bytes[i]) + return btoa(binary) +} + +const uint8ArrayToHex = (bytes: Uint8Array): string => { + return Array.from(bytes).map((b) => b.toString(16).padStart(2, "0")).join("") +} + +const convertValue = ( + value: unknown, + opts: { bigIntHandling: string; dateHandling: string; binaryHandling: string } +): unknown => { + if (value === null || value === undefined) return null + if (typeof value === "bigint") { + return opts.bigIntHandling === "string" + ? value.toString() + : opts.bigIntHandling === "number" + ? Number(value) + : value + } + if (value instanceof Date) { + return opts.dateHandling === "iso" + ? value.toISOString() + : opts.dateHandling === "timestamp" + ? value.getTime() + : value + } + if (value instanceof Uint8Array) { + return opts.binaryHandling === "base64" + ? uint8ArrayToBase64(value) + : opts.binaryHandling === "hex" + ? uint8ArrayToHex(value) + : Array.from(value) + } + if (Array.isArray(value)) { + return value.map((v) => convertValue(v, opts)) + } + if (typeof value === "object") { + const result: Record = {} + for (const [k, v] of Object.entries(value)) result[k] = convertValue(v, opts) + return result + } + return value +} + +/** + * Convert a decoded record batch to an array of JSON-compatible row objects. + */ +export const recordBatchToJson = ( + batch: DecodedRecordBatch, + options: RecordBatchToJsonOptions = {} +): Array> => { + const { + bigIntHandling = "string", + binaryHandling = "base64", + dateHandling = "iso", + dictionaryRegistry, + includeNulls = true + } = options + + const opts = { bigIntHandling, dateHandling, binaryHandling } + const numRows = Number(batch.numRows) + const rows: Array> = [] + const columnValues = new Map(batch.columns.map((c) => [c.field.name, readColumnValues(c, dictionaryRegistry)])) + + for (let i = 0; i < numRows; i++) { + const row: Record = {} + for (const col of batch.columns) { + const value = columnValues.get(col.field.name)![i] + if (value === null) { + if (includeNulls) row[col.field.name] = null + } else { + row[col.field.name] = convertValue(value, opts) + } + } + rows.push(row) + } + return rows +} diff --git a/packages/amp/src/internal/arrow-flight-ipc/Readers.ts b/packages/amp/src/internal/arrow-flight-ipc/Readers.ts new file mode 100644 index 0000000..a974148 --- /dev/null +++ b/packages/amp/src/internal/arrow-flight-ipc/Readers.ts @@ -0,0 +1,1680 @@ +/** + * Arrow Value Readers + * + * This module provides utilities for reading typed values from Arrow buffers. + * + * @internal + */ +import type { Predicate } from "effect/Predicate" +import type { DictionaryRegistry } from "./Decoder.ts" +import type { DecodedColumn } from "./RecordBatch.ts" +import type { DictionaryEncoding, IntBitWidth, TimeBitWidth, TimeUnit, UnionMode } from "./Schema.ts" + +// ============================================================================= +// Constants +// ============================================================================= + +/** + * The maximum safe integer value that can be precisely represented in JavaScript. + * Values larger than this will lose precision when converted from bigint to number. + */ +const MAX_SAFE_INTEGER = BigInt(Number.MAX_SAFE_INTEGER) +const MIN_SAFE_INTEGER = BigInt(Number.MIN_SAFE_INTEGER) + +// ----------------------------------------------------------------------------- +// Byte Sizes +// ----------------------------------------------------------------------------- + +/** Number of bytes per 16-bit integer */ +const BYTES_PER_INT16 = 2 +/** Number of bytes per 32-bit integer */ +const BYTES_PER_INT32 = 4 +/** Number of bytes per 64-bit integer */ +const BYTES_PER_INT64 = 8 +/** Number of bytes per interval (month-day-nano) value: 4 + 4 + 8 = 16 */ +const BYTES_PER_INTERVAL_MONTH_DAY_NANO = 16 +/** Number of bits per byte (for validity bitmap calculations) */ +const BITS_PER_BYTE = 8 + +// ----------------------------------------------------------------------------- +// Time Constants +// ----------------------------------------------------------------------------- + +/** Milliseconds per day for date conversions */ +const MS_PER_DAY = 86400000 + +/** + * Module-level singleton TextDecoder for UTF-8 decoding. + * Reusing a single instance avoids repeated allocations. + */ +const textDecoder = new TextDecoder() + +/** + * Lookup table for converting time units to milliseconds. + * Defined at module level to avoid repeated object creation. + */ +const TIME_UNIT_TO_MS: Record = { + MICROSECOND: 0.001, + MILLISECOND: 1, + NANOSECOND: 0.000001, + SECOND: 1000 +} + +/** + * Lookup table for converting timestamp units to milliseconds. + * Uses functions because SECOND and MILLISECOND need overflow checking. + */ +const TIMESTAMP_UNIT_TO_MS: Record number> = { + MICROSECOND: (v) => Number(v / 1000n), + MILLISECOND: (v, idx) => bigintToNumberSafe(v, `timestamp[${idx}] milliseconds`), + NANOSECOND: (v) => Number(v / 1000000n), + SECOND: (v, idx) => bigintToNumberSafe(v, `timestamp[${idx}] seconds`) * 1000 +} + +// ============================================================================= +// Utilities +// ============================================================================= + +/** + * Safely converts a bigint to a number, throwing an error if precision would be lost. + * + * JavaScript's `Number` type can only precisely represent integers up to 2^53 - 1. + * This function ensures that conversions from bigint to number don't silently + * lose precision for large values. + * + * @throws {RangeError} If the value exceeds Number.MAX_SAFE_INTEGER or is less than Number.MIN_SAFE_INTEGER + */ +const bigintToNumberSafe = (value: bigint, context?: string): number => { + if (value > MAX_SAFE_INTEGER || value < MIN_SAFE_INTEGER) { + const contextMsg = context ? ` (${context})` : "" + throw new RangeError( + `Value ${value}${contextMsg} exceeds safe integer range for Number conversion. ` + + `Use BigInt-based APIs for values outside the range [${Number.MIN_SAFE_INTEGER}, ${Number.MAX_SAFE_INTEGER}].` + ) + } + return Number(value) +} + +// ============================================================================= +// Validity +// ============================================================================= + +/** + * Creates a predicate function that checks whether a value at a given index is + * valid (non-null) based on an Arrow validity bitmap buffer. + * + * In the Arrow columnar format, null values are tracked using a validity bitmap + * where each bit corresponds to one value in the column. A bit value of 1 indicates + * the value is valid (non-null), while 0 indicates the value is null. + * + * The bitmap uses least-significant bit (LSB) numbering within each byte, meaning + * bit 0 of byte 0 corresponds to index 0, bit 1 of byte 0 corresponds to index 1, + * and so on. After 8 values, we move to the next byte. + * + * @param validityBuffer - The validity bitmap buffer from an Arrow column. If this + * buffer is empty (length 0), all values are considered valid per the Arrow spec. + * The buffer should contain ceil(length / 8) bytes to cover all values. + * + * @returns A predicate function that takes an index and returns `true` if the value + * at that index is valid (non-null), or `false` if the value is null. For indices + * that exceed the buffer's range, returns `false` (treated as null) for safety. + * + * @example + * ```ts + * // Buffer where first value is valid, second is null, third is valid + * // Binary: 00000101 = bits 0 and 2 are set + * const bitmap = new Uint8Array([0b00000101]) + * const isValid = createValidityChecker(bitmap) + * + * isValid(0) // true - bit 0 is set + * isValid(1) // false - bit 1 is not set + * isValid(2) // true - bit 2 is set + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#validity-bitmaps | Arrow Validity Bitmaps} + */ +export const createValidityChecker = (validityBuffer: Uint8Array): Predicate => { + if (validityBuffer.length === 0) { + // No validity buffer means all values are valid + return () => true + } + + const bufferLength = validityBuffer.length + + return (index) => { + const byteIndex = Math.floor(index / BITS_PER_BYTE) + // Out of bounds access - treat as invalid (null) + if (byteIndex >= bufferLength) { + return false + } + const bitIndex = index % BITS_PER_BYTE + return (validityBuffer[byteIndex] & (1 << bitIndex)) !== 0 + } +} + +// ============================================================================= +// Booleans +// ============================================================================= + +/** + * Reads boolean values from an Arrow boolean column's buffers. + * + * Arrow stores boolean values in a packed bit format where each bit represents + * one boolean value. Like the validity bitmap, boolean data uses LSB (least-significant + * bit) numbering: bit 0 of byte 0 is value 0, bit 7 of byte 0 is value 7, bit 0 of + * byte 1 is value 8, and so on. + * + * The function respects the validity bitmap, returning `null` for values where + * the corresponding validity bit is 0. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The packed boolean data buffer where each bit represents one + * boolean value. Should contain at least ceil(length / 8) bytes. + * @param length - The number of boolean values to read from the buffers. + * + * @returns An array of boolean values (or null for invalid/null entries) with the + * specified length. The array is marked readonly to prevent mutation. + * + * @example + * ```ts + * // Data buffer: 0b00000101 = values [true, false, true, false, false, false, false, false] + * // Validity buffer: 0b00000111 = first 3 values valid, rest null + * const validity = new Uint8Array([0b00000111]) + * const data = new Uint8Array([0b00000101]) + * const values = readBoolValues(validity, data, 4) + * // Result: [true, false, true, null] + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readBoolValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const byteIndex = Math.floor(i / BITS_PER_BYTE) + const bitIndex = i % BITS_PER_BYTE + values[i] = (dataBuffer[byteIndex] & (1 << bitIndex)) !== 0 + } else { + values[i] = null + } + } + return values +} + +// ============================================================================= +// Integers +// ============================================================================= + +/** + * Reads 8-bit integer values from an Arrow Int8/UInt8 column's buffers. + * + * Arrow stores fixed-width integers in a contiguous buffer with native byte order. + * Each 8-bit integer occupies exactly 1 byte, with values stored sequentially + * starting at byte offset 0. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing packed 8-bit integers. Should contain + * at least `length` bytes. + * @param length - The number of integer values to read. + * @param signed - If `true`, interprets values as signed integers (Int8, range -128 to 127). + * If `false`, interprets as unsigned integers (UInt8, range 0 to 255). + * + * @returns An array of number values (or null for invalid entries). All values fit + * safely within JavaScript's number type without precision loss. + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readInt8Values = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + signed: boolean +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + values[i] = validityChecker(i) ? (signed ? view.getInt8(i) : view.getUint8(i)) : null + } + return values +} + +/** + * Reads 16-bit integer values from an Arrow Int16/UInt16 column's buffers. + * + * Arrow stores 16-bit integers in little-endian byte order. Each value occupies + * 2 bytes, with values stored contiguously starting at byte offset 0. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing packed 16-bit integers. Should contain + * at least `length * 2` bytes. + * @param length - The number of integer values to read. + * @param signed - If `true`, interprets values as signed integers (Int16, range -32768 to 32767). + * If `false`, interprets as unsigned integers (UInt16, range 0 to 65535). + * + * @returns An array of number values (or null for invalid entries). All values fit + * safely within JavaScript's number type without precision loss. + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readInt16Values = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + signed: boolean +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + const offset = i * BYTES_PER_INT16 + values[i] = validityChecker(i) ? (signed ? view.getInt16(offset, true) : view.getUint16(offset, true)) : null + } + return values +} + +/** + * Reads 32-bit integer values from an Arrow Int32/UInt32 column's buffers. + * + * Arrow stores 32-bit integers in little-endian byte order. Each value occupies + * 4 bytes, with values stored contiguously starting at byte offset 0. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing packed 32-bit integers. Should contain + * at least `length * 4` bytes. + * @param length - The number of integer values to read. + * @param signed - If `true`, interprets values as signed integers (Int32, range -2^31 to 2^31-1). + * If `false`, interprets as unsigned integers (UInt32, range 0 to 2^32-1). Defaults to `true`. + * + * @returns An array of number values (or null for invalid entries). All values fit + * safely within JavaScript's number type without precision loss. + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readInt32Values = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + signed: boolean = true +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + const offset = i * BYTES_PER_INT32 + values[i] = validityChecker(i) ? (signed ? view.getInt32(offset, true) : view.getUint32(offset, true)) : null + } + return values +} + +/** + * Reads 64-bit integer values from an Arrow Int64/UInt64 column's buffers. + * + * Arrow stores 64-bit integers in little-endian byte order. Each value occupies + * 8 bytes, with values stored contiguously starting at byte offset 0. + * + * **Important**: Unlike smaller integer types, 64-bit integers are returned as + * `bigint` values because JavaScript's `number` type can only safely represent + * integers up to 2^53 - 1 (Number.MAX_SAFE_INTEGER). Using `bigint` ensures no + * precision loss for values in the full Int64/UInt64 range. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing packed 64-bit integers. Should contain + * at least `length * 8` bytes. + * @param length - The number of integer values to read. + * @param signed - If `true`, interprets values as signed integers (Int64, range -2^63 to 2^63-1). + * If `false`, interprets as unsigned integers (UInt64, range 0 to 2^64-1). Defaults to `true`. + * + * @returns An array of `bigint` values (or null for invalid entries). The array uses + * `bigint` to preserve full 64-bit precision. + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readInt64Values = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + signed: boolean = true +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + const offset = i * BYTES_PER_INT64 + values[i] = validityChecker(i) ? (signed ? view.getBigInt64(offset, true) : view.getBigUint64(offset, true)) : null + } + return values +} + +// ============================================================================= +// Floats +// ============================================================================= + +/** + * Decodes a 16-bit half-precision floating point value (IEEE 754 binary16) + * from its bit representation to a JavaScript number. + * + * Half-precision floats use: + * - 1 bit for sign + * - 5 bits for exponent (biased by 15) + * - 10 bits for fraction/mantissa + * + * @param bits - The 16-bit unsigned integer representing the half-precision float + * @returns The decoded floating point number + */ +const decodeFloat16 = (bits: number): number => { + const sign = (bits >> 15) & 1 + const exponent = (bits >> 10) & 0x1f + const fraction = bits & 0x3ff + if (exponent === 0) return (sign ? -1 : 1) * Math.pow(2, -14) * (fraction / 1024) + if (exponent === 31) return fraction === 0 ? (sign ? -Infinity : Infinity) : NaN + return (sign ? -1 : 1) * Math.pow(2, exponent - 15) * (1 + fraction / 1024) +} + +/** + * Reads 16-bit half-precision floating point values from an Arrow Float16 column's buffers. + * + * Arrow stores Float16 values as 2 bytes per value in little-endian byte order. + * Half-precision floats (IEEE 754 binary16) offer reduced precision but smaller + * storage compared to single/double precision. They are commonly used in machine + * learning and graphics applications. + * + * The values are decoded from their bit representation to JavaScript `number` values + * (which are internally double-precision floats). + * + * **Precision note**: Float16 has limited precision (about 3-4 significant decimal digits) + * and a smaller range than Float32/Float64. Values outside the representable range + * become Infinity or -Infinity. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 16-bit float values. Should contain + * at least `length * 2` bytes. + * @param length - The number of float values to read. + * + * @returns An array of number values (or null for invalid entries). + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readFloat16Values = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + values[i] = validityChecker(i) ? decodeFloat16(view.getUint16(i * BYTES_PER_INT16, true)) : null + } + return values +} + +/** + * Reads 32-bit single-precision floating point values from an Arrow Float32 column's buffers. + * + * Arrow stores Float32 values as 4 bytes per value in little-endian byte order, + * following the IEEE 754 binary32 standard. Single-precision floats offer about + * 6-7 significant decimal digits of precision. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 32-bit float values. Should contain + * at least `length * 4` bytes. + * @param length - The number of float values to read. + * + * @returns An array of number values (or null for invalid entries). Note that JavaScript + * numbers are internally 64-bit doubles, so Float32 values are promoted to double + * precision upon reading. + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readFloat32Values = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + values[i] = validityChecker(i) ? view.getFloat32(i * BYTES_PER_INT32, true) : null + } + return values +} + +/** + * Reads 64-bit double-precision floating point values from an Arrow Float64 column's buffers. + * + * Arrow stores Float64 values as 8 bytes per value in little-endian byte order, + * following the IEEE 754 binary64 standard. Double-precision floats offer about + * 15-17 significant decimal digits of precision and are the native precision of + * JavaScript's `number` type. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 64-bit float values. Should contain + * at least `length * 8` bytes. + * @param length - The number of float values to read. + * + * @returns An array of number values (or null for invalid entries). + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readFloat64Values = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + values[i] = validityChecker(i) ? view.getFloat64(i * BYTES_PER_INT64, true) : null + } + return values +} + +// ============================================================================= +// Decimals +// ============================================================================= + +/** + * Converts a sequence of bytes (little-endian) to a bigint value. + * + * Arrow stores decimal values as two's complement integers in little-endian byte + * order. This function reconstructs the bigint value from those bytes, handling + * both signed and unsigned interpretations. + * + * @param bytes - The byte array in little-endian order + * @param signed - If true, interprets as signed (two's complement) + * @returns The bigint representation of the bytes + */ +const bytesToBigInt = (bytes: Uint8Array, signed: boolean): bigint => { + let result = 0n + for (let i = bytes.length - 1; i >= 0; i--) { + result = (result << 8n) | BigInt(bytes[i]) + } + if (signed && bytes.length > 0 && (bytes[bytes.length - 1] & 0x80) !== 0) { + result = result - (1n << BigInt(bytes.length * 8)) + } + return result +} + +/** + * Formats a decimal integer value as a string with the proper decimal point placement. + * + * Arrow Decimal types store values as scaled integers. For example, the value 123.45 + * with scale=2 is stored as the integer 12345. This function converts the integer + * back to a properly formatted decimal string. + * + * @param value - The unscaled bigint value representing the decimal + * @param scale - The number of digits after the decimal point + * @returns A string representation of the decimal value (e.g., "123.45") + */ +const formatDecimal = (value: bigint, scale: number): string => { + const isNegative = value < 0n + const absValue = isNegative ? -value : value + const str = absValue.toString() + if (scale === 0) return isNegative ? `-${str}` : str + const paddedStr = str.padStart(scale + 1, "0") + const intPart = paddedStr.slice(0, -scale) || "0" + const fracPart = paddedStr.slice(-scale) + return `${isNegative ? "-" : ""}${intPart}.${fracPart}` +} + +/** + * Reads decimal values from an Arrow Decimal128 or Decimal256 column's buffers. + * + * Arrow represents decimal numbers as fixed-point integers with a specified precision + * (total number of digits) and scale (digits after the decimal point). The values are + * stored as little-endian two's complement integers using 128 bits (16 bytes) or + * 256 bits (32 bytes) depending on the decimal type. + * + * Values are returned as strings to preserve exact precision, since JavaScript numbers + * cannot accurately represent all decimal values (especially those with many significant + * digits or large magnitudes). + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing the decimal values. Should contain + * at least `length * (bitWidth / 8)` bytes. + * @param length - The number of decimal values to read. + * @param scale - The number of digits after the decimal point. For example, a scale + * of 2 means values like "123.45". + * @param bitWidth - The bit width of the decimal type (128 or 256 bits). + * + * @returns An array of string values representing the decimals (or null for invalid + * entries). Strings preserve exact precision without floating-point rounding errors. + * + * @example + * ```ts + * // Reading Decimal128 with precision=10, scale=2 + * // Value 12345 with scale 2 becomes "123.45" + * const values = readDecimalValues(validity, data, 1, 2, 128) + * // Result: ["123.45"] + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readDecimalValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + /* precision: number, */ + scale: number, + bitWidth: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const byteWidth = bitWidth / 8 + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const start = i * byteWidth + const bigIntValue = bytesToBigInt(dataBuffer.subarray(start, start + byteWidth), true) + values[i] = formatDecimal(bigIntValue, scale) + } else { + values[i] = null + } + } + return values +} + +// ============================================================================= +// Strings +// ============================================================================= + +/** + * Reads UTF-8 encoded string values from an Arrow Utf8 column's buffers. + * + * Arrow's Utf8 type uses a variable-length binary layout consisting of three buffers: + * 1. **Validity buffer**: Bitmap indicating which values are non-null + * 2. **Offset buffer**: Array of 32-bit integers where offset[i] gives the starting + * byte position of string i, and offset[i+1] - offset[i] gives its length + * 3. **Data buffer**: Contiguous UTF-8 encoded string data + * + * The offset buffer contains `length + 1` offsets (one extra to define the end of + * the last string). All strings are stored back-to-back in the data buffer. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param offsetBuffer - The offset buffer containing 32-bit integer offsets into the + * data buffer. Should contain `(length + 1) * 4` bytes. + * @param dataBuffer - The data buffer containing UTF-8 encoded string bytes. + * @param length - The number of string values to read. + * + * @returns An array of string values (or null for invalid entries). + * + * @example + * ```ts + * // Offsets: [0, 5, 10] means string 0 is bytes 0-4, string 1 is bytes 5-9 + * // Data: "HelloWorld" (UTF-8 bytes) + * const values = readUtf8Values(validity, offsets, data, 2) + * // Result: ["Hello", "World"] + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#variable-size-binary-layout | Arrow Variable-Size Binary Layout} + */ +export const readUtf8Values = ( + validityBuffer: Uint8Array, + offsetBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const offsetView = new DataView(offsetBuffer.buffer, offsetBuffer.byteOffset, offsetBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const start = offsetView.getInt32(i * BYTES_PER_INT32, true) + const end = offsetView.getInt32((i + 1) * BYTES_PER_INT32, true) + values[i] = textDecoder.decode(dataBuffer.subarray(start, end)) + } else { + values[i] = null + } + } + return values +} + +/** + * Reads UTF-8 encoded string values from an Arrow LargeUtf8 column's buffers. + * + * LargeUtf8 is identical to Utf8 except it uses 64-bit offsets instead of 32-bit, + * allowing for strings that exceed 2GB in total size. The layout is: + * 1. **Validity buffer**: Bitmap indicating which values are non-null + * 2. **Offset buffer**: Array of 64-bit integers (as `bigint`) for byte positions + * 3. **Data buffer**: Contiguous UTF-8 encoded string data + * + * **Important**: While Arrow supports 64-bit offsets, this implementation converts + * them to JavaScript numbers for use with TypedArray methods. Offsets exceeding + * `Number.MAX_SAFE_INTEGER` (2^53 - 1) will throw a `RangeError` to prevent silent + * precision loss. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param offsetBuffer - The offset buffer containing 64-bit integer offsets into the + * data buffer. Should contain `(length + 1) * 8` bytes. + * @param dataBuffer - The data buffer containing UTF-8 encoded string bytes. + * @param length - The number of string values to read. + * + * @returns An array of string values (or null for invalid entries). + * + * @throws {RangeError} If any offset exceeds `Number.MAX_SAFE_INTEGER` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#variable-size-binary-layout | Arrow Variable-Size Binary Layout} + */ +export const readLargeUtf8Values = ( + validityBuffer: Uint8Array, + offsetBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const offsetView = new DataView(offsetBuffer.buffer, offsetBuffer.byteOffset, offsetBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const start = bigintToNumberSafe(offsetView.getBigInt64(i * BYTES_PER_INT64, true), `large-utf8 offset[${i}]`) + const end = bigintToNumberSafe( + offsetView.getBigInt64((i + 1) * BYTES_PER_INT64, true), + `large-utf8 offset[${i + 1}]` + ) + values[i] = textDecoder.decode(dataBuffer.subarray(start, end)) + } else { + values[i] = null + } + } + return values +} + +// ============================================================================= +// Binary +// ============================================================================= + +/** + * Reads binary (byte array) values from an Arrow Binary column's buffers. + * + * Arrow's Binary type uses a variable-length layout identical to Utf8, but the + * data is treated as raw bytes rather than UTF-8 encoded text: + * 1. **Validity buffer**: Bitmap indicating which values are non-null + * 2. **Offset buffer**: Array of 32-bit integers for byte positions + * 3. **Data buffer**: Contiguous binary data + * + * Unlike string readers that decode to JavaScript strings, binary readers return + * `Uint8Array` slices. The returned arrays are **copies** of the data (via `slice()`) + * to ensure they remain valid even if the underlying buffer is modified or freed. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param offsetBuffer - The offset buffer containing 32-bit integer offsets into the + * data buffer. Should contain `(length + 1) * 4` bytes. + * @param dataBuffer - The data buffer containing the binary data. + * @param length - The number of binary values to read. + * + * @returns An array of `Uint8Array` values (or null for invalid entries). Each + * `Uint8Array` is an independent copy of the binary data. + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#variable-size-binary-layout | Arrow Variable-Size Binary Layout} + */ +export const readBinaryValues = ( + validityBuffer: Uint8Array, + offsetBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const offsetView = new DataView(offsetBuffer.buffer, offsetBuffer.byteOffset, offsetBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const start = offsetView.getInt32(i * BYTES_PER_INT32, true) + const end = offsetView.getInt32((i + 1) * BYTES_PER_INT32, true) + values[i] = dataBuffer.slice(start, end) + } else { + values[i] = null + } + } + return values +} + +/** + * Reads binary (byte array) values from an Arrow LargeBinary column's buffers. + * + * LargeBinary is identical to Binary except it uses 64-bit offsets instead of 32-bit, + * allowing for binary data that exceeds 2GB in total size. + * + * **Important**: While Arrow supports 64-bit offsets, this implementation converts + * them to JavaScript numbers for use with TypedArray methods. Offsets exceeding + * `Number.MAX_SAFE_INTEGER` (2^53 - 1) will throw a `RangeError` to prevent silent + * precision loss. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param offsetBuffer - The offset buffer containing 64-bit integer offsets into the + * data buffer. Should contain `(length + 1) * 8` bytes. + * @param dataBuffer - The data buffer containing the binary data. + * @param length - The number of binary values to read. + * + * @returns An array of `Uint8Array` values (or null for invalid entries). Each + * `Uint8Array` is an independent copy of the binary data. + * + * @throws {RangeError} If any offset exceeds `Number.MAX_SAFE_INTEGER` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#variable-size-binary-layout | Arrow Variable-Size Binary Layout} + */ +export const readLargeBinaryValues = ( + validityBuffer: Uint8Array, + offsetBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const offsetView = new DataView(offsetBuffer.buffer, offsetBuffer.byteOffset, offsetBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const start = bigintToNumberSafe(offsetView.getBigInt64(i * BYTES_PER_INT64, true), `large-binary offset[${i}]`) + const end = bigintToNumberSafe( + offsetView.getBigInt64((i + 1) * BYTES_PER_INT64, true), + `large-binary offset[${i + 1}]` + ) + values[i] = dataBuffer.slice(start, end) + } else { + values[i] = null + } + } + return values +} + +/** + * Reads fixed-size binary values from an Arrow FixedSizeBinary column's buffers. + * + * Unlike variable-length Binary, FixedSizeBinary stores values of a fixed byte width. + * All values occupy exactly `byteWidth` bytes, stored contiguously in the data buffer. + * This layout is more memory-efficient for fixed-size data like UUIDs (16 bytes), + * IPv6 addresses (16 bytes), or hashes (32 bytes for SHA-256). + * + * The layout consists of just two buffers: + * 1. **Validity buffer**: Bitmap indicating which values are non-null + * 2. **Data buffer**: Contiguous fixed-size binary values + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing fixed-size binary values. Should + * contain at least `length * byteWidth` bytes. + * @param length - The number of binary values to read. + * @param byteWidth - The fixed size in bytes of each value. + * + * @returns An array of `Uint8Array` values (or null for invalid entries). Each + * `Uint8Array` has exactly `byteWidth` bytes and is an independent copy. + * + * @example + * ```ts + * // Reading UUIDs (16 bytes each) + * const uuids = readFixedSizeBinaryValues(validity, data, 3, 16) + * // Each element is a 16-byte Uint8Array + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Fixed-Size Layout} + */ +export const readFixedSizeBinaryValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + byteWidth: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const start = i * byteWidth + values[i] = dataBuffer.slice(start, start + byteWidth) + } else { + values[i] = null + } + } + return values +} + +// ============================================================================= +// Temporal +// ============================================================================= + +/** + * Reads date values from an Arrow Date (DAY unit) column's buffers. + * + * Arrow's Date type with DAY unit stores dates as 32-bit signed integers representing + * the number of days since the Unix epoch (January 1, 1970). This compact representation + * is useful for storing dates without time components. + * + * Values are converted to JavaScript `Date` objects set to midnight UTC on the + * corresponding date. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 32-bit day counts. Should contain + * at least `length * 4` bytes. + * @param length - The number of date values to read. + * + * @returns An array of JavaScript `Date` objects (or null for invalid entries). + * Each date is set to midnight UTC on the corresponding day. + * + * @example + * ```ts + * // Day 0 = 1970-01-01, Day 1 = 1970-01-02, Day 18628 = 2021-01-01 + * const dates = readDateDayValues(validity, data, 1) + * // Result: [Date("2021-01-01T00:00:00.000Z")] + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readDateDayValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + values[i] = validityChecker(i) ? new Date(view.getInt32(i * BYTES_PER_INT32, true) * MS_PER_DAY) : null + } + return values +} + +/** + * Reads date values from an Arrow Date (MILLISECOND unit) column's buffers. + * + * Arrow's Date type with MILLISECOND unit stores dates as 64-bit signed integers + * representing milliseconds since the Unix epoch (January 1, 1970 00:00:00 UTC). + * This allows sub-day precision while still being a date-oriented type. + * + * **Important**: The 64-bit millisecond values are converted to JavaScript numbers + * for use with the `Date` constructor. Values exceeding `Number.MAX_SAFE_INTEGER` + * will throw a `RangeError`. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 64-bit millisecond counts. Should + * contain at least `length * 8` bytes. + * @param length - The number of date values to read. + * + * @returns An array of JavaScript `Date` objects (or null for invalid entries). + * + * @throws {RangeError} If any millisecond value exceeds `Number.MAX_SAFE_INTEGER` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readDateMillisecondValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const ms = bigintToNumberSafe(view.getBigInt64(i * BYTES_PER_INT64, true), `date-millisecond[${i}]`) + values[i] = new Date(ms) + } else { + values[i] = null + } + } + return values +} + +/** + * Reads timestamp values from an Arrow Timestamp column's buffers. + * + * Arrow's Timestamp type stores date/time values as 64-bit signed integers with a + * configurable time unit (SECOND, MILLISECOND, MICROSECOND, or NANOSECOND). Timestamps + * may optionally have an associated timezone, though this reader does not adjust for + * timezone—it returns JavaScript `Date` objects in UTC. + * + * The conversion to JavaScript `Date` varies by unit: + * - **SECOND**: Multiply by 1000 to get milliseconds + * - **MILLISECOND**: Use directly (with overflow checking) + * - **MICROSECOND**: Divide by 1000 (loses sub-millisecond precision) + * - **NANOSECOND**: Divide by 1,000,000 (loses sub-millisecond precision) + * + * **Note**: JavaScript Date only supports millisecond precision. Sub-millisecond + * precision in MICROSECOND and NANOSECOND timestamps is truncated, not rounded. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 64-bit timestamp values. Should + * contain at least `length * 8` bytes. + * @param length - The number of timestamp values to read. + * @param unit - The time unit of the stored values: "SECOND", "MILLISECOND", + * "MICROSECOND", or "NANOSECOND". + * + * @returns An array of JavaScript `Date` objects (or null for invalid entries). + * + * @throws {RangeError} If SECOND or MILLISECOND values exceed safe integer range + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readTimestampValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + unit: keyof typeof TimeUnit +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + const converter = TIMESTAMP_UNIT_TO_MS[unit] + + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const rawValue = view.getBigInt64(i * BYTES_PER_INT64, true) + values[i] = new Date(converter(rawValue, i)) + } else { + values[i] = null + } + } + return values +} + +/** + * Reads time-of-day values from an Arrow Time column's buffers. + * + * Arrow's Time type represents a time of day (without date) with configurable + * precision. It stores values as either 32-bit or 64-bit integers depending on + * the unit: + * - **SECOND** (32-bit): Seconds since midnight + * - **MILLISECOND** (32-bit): Milliseconds since midnight + * - **MICROSECOND** (64-bit): Microseconds since midnight + * - **NANOSECOND** (64-bit): Nanoseconds since midnight + * + * Values are returned as numbers representing milliseconds since midnight, suitable + * for time calculations or displaying as time strings. + * + * **Note**: When converting from MICROSECOND or NANOSECOND, the result may have + * fractional milliseconds. For NANOSECOND times, this preserves sub-millisecond + * precision in the number representation. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing time values. Should contain at + * least `length * (bitWidth / 8)` bytes. + * @param length - The number of time values to read. + * @param unit - The time unit: "SECOND", "MILLISECOND", "MICROSECOND", or "NANOSECOND". + * @param bitWidth - The bit width of stored values: 32 for SECOND/MILLISECOND, + * 64 for MICROSECOND/NANOSECOND. + * + * @returns An array of numbers representing milliseconds since midnight (or null + * for invalid entries). May include fractional milliseconds for high-precision units. + * + * @throws {RangeError} If 64-bit time values exceed `Number.MAX_SAFE_INTEGER` + * + * @example + * ```ts + * // Time 12:30:45.123 in milliseconds = 45045123 + * const times = readTimeValues(validity, data, 1, "MILLISECOND", 32) + * // Result: [45045123] + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readTimeValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + unit: keyof typeof TimeUnit, + bitWidth: TimeBitWidth +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const multiplier = TIME_UNIT_TO_MS[unit] + const values = new Array(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const raw = bitWidth === 32 + ? view.getInt32(i * BYTES_PER_INT32, true) + : bigintToNumberSafe(view.getBigInt64(i * BYTES_PER_INT64, true), `time[${i}]`) + values[i] = raw * multiplier + } else { + values[i] = null + } + } + return values +} + +/** + * Reads duration values from an Arrow Duration column's buffers. + * + * Arrow's Duration type represents a length of time (not tied to any calendar or + * timezone) as a 64-bit signed integer with a configurable time unit. Unlike timestamps, + * durations are relative time spans suitable for representing intervals like "5 seconds" + * or "100 nanoseconds". + * + * Values are returned as objects containing the raw `bigint` value and the unit string, + * allowing consumers to perform precise calculations or convert to other representations + * as needed. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 64-bit duration values. Should + * contain at least `length * 8` bytes. + * @param length - The number of duration values to read. + * @param unit - The time unit: "SECOND", "MILLISECOND", "MICROSECOND", or "NANOSECOND". + * + * @returns An array of objects with `{ value: bigint, unit: string }` (or null for + * invalid entries). The `value` is the raw count in the specified unit. + * + * @example + * ```ts + * const durations = readDurationValues(validity, data, 2, "MILLISECOND") + * // Result: [{ value: 5000n, unit: "MILLISECOND" }, { value: 10000n, unit: "MILLISECOND" }] + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readDurationValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + unit: keyof typeof TimeUnit +): ReadonlyArray<{ value: bigint; unit: string } | null> => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array<{ value: bigint; unit: string } | null>(length) + for (let i = 0; i < length; i++) { + values[i] = validityChecker(i) ? { unit, value: view.getBigInt64(i * BYTES_PER_INT64, true) } : null + } + return values +} + +/** + * Reads interval values from an Arrow Interval (YEAR_MONTH unit) column's buffers. + * + * Arrow's Interval type with YEAR_MONTH unit stores calendar intervals as 32-bit + * signed integers representing a number of months. This is useful for date arithmetic + * that should respect calendar boundaries (e.g., "add 1 month" should go from + * Jan 31 to Feb 28/29, not exactly 30 days). + * + * Values are returned as objects with a `months` property, which can be positive + * (future) or negative (past). + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 32-bit month counts. Should contain + * at least `length * 4` bytes. + * @param length - The number of interval values to read. + * + * @returns An array of objects with `{ months: number }` (or null for invalid entries). + * For example, 14 months = 1 year and 2 months. + * + * @example + * ```ts + * const intervals = readIntervalYearMonthValues(validity, data, 2) + * // Result: [{ months: 12 }, { months: -3 }] // 1 year, negative 3 months + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readIntervalYearMonthValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray<{ months: number } | null> => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array<{ months: number } | null>(length) + for (let i = 0; i < length; i++) { + values[i] = validityChecker(i) ? { months: view.getInt32(i * BYTES_PER_INT32, true) } : null + } + return values +} + +/** + * Reads interval values from an Arrow Interval (DAY_TIME unit) column's buffers. + * + * Arrow's Interval type with DAY_TIME unit stores intervals as pairs of 32-bit + * signed integers: one for days and one for milliseconds within a day. This provides + * more precision than YEAR_MONTH while still separating day-level and sub-day components. + * + * Each value occupies 8 bytes: + * - Bytes 0-3: Days (32-bit signed int) + * - Bytes 4-7: Milliseconds (32-bit signed int) + * + * Values are returned as objects with `days` and `milliseconds` properties. Both + * can be negative for intervals in the past. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 8-byte day/time pairs. Should contain + * at least `length * 8` bytes. + * @param length - The number of interval values to read. + * + * @returns An array of objects with `{ days: number, milliseconds: number }` (or null + * for invalid entries). + * + * @example + * ```ts + * const intervals = readIntervalDayTimeValues(validity, data, 1) + * // Result: [{ days: 5, milliseconds: 43200000 }] // 5 days and 12 hours + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readIntervalDayTimeValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray<{ days: number; milliseconds: number } | null> => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array<{ days: number; milliseconds: number } | null>(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const offset = i * BYTES_PER_INT64 + values[i] = { days: view.getInt32(offset, true), milliseconds: view.getInt32(offset + BYTES_PER_INT32, true) } + } else { + values[i] = null + } + } + return values +} + +/** + * Reads interval values from an Arrow Interval (MONTH_DAY_NANO unit) column's buffers. + * + * Arrow's Interval type with MONTH_DAY_NANO unit provides the most comprehensive + * interval representation, storing months, days, and nanoseconds separately. This + * allows expressing intervals like "1 month, 2 days, and 500 nanoseconds" without + * ambiguity about month lengths or day durations. + * + * Each value occupies 16 bytes: + * - Bytes 0-3: Months (32-bit signed int) + * - Bytes 4-7: Days (32-bit signed int) + * - Bytes 8-15: Nanoseconds (64-bit signed int) + * + * The nanosecond component is returned as a `bigint` to preserve full precision, + * as it can represent values too large for JavaScript's number type. + * + * @param validityBuffer - The validity bitmap indicating which values are non-null. + * An empty buffer indicates all values are valid. + * @param dataBuffer - The data buffer containing 16-byte interval values. Should + * contain at least `length * 16` bytes. + * @param length - The number of interval values to read. + * + * @returns An array of objects with `{ months: number, days: number, nanoseconds: bigint }` + * (or null for invalid entries). + * + * @example + * ```ts + * const intervals = readIntervalMonthDayNanoValues(validity, data, 1) + * // Result: [{ months: 1, days: 15, nanoseconds: 3600000000000n }] // 1 month, 15 days, 1 hour + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html#fixed-size-primitive-layout | Arrow Primitive Layout} + */ +export const readIntervalMonthDayNanoValues = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number +): ReadonlyArray<{ months: number; days: number; nanoseconds: bigint } | null> => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array<{ months: number; days: number; nanoseconds: bigint } | null>(length) + for (let i = 0; i < length; i++) { + if (validityChecker(i)) { + const offset = i * BYTES_PER_INTERVAL_MONTH_DAY_NANO + values[i] = { + days: view.getInt32(offset + BYTES_PER_INT32, true), + months: view.getInt32(offset, true), + nanoseconds: view.getBigInt64(offset + BYTES_PER_INT64, true) + } + } else { + values[i] = null + } + } + return values +} + +// ============================================================================= +// Lists +// ============================================================================= + +const readListValues = ( + column: DecodedColumn, + registry?: DictionaryRegistry +): ReadonlyArray | null> => { + const { buffers, children, node } = column + const length = Number(node.length) + const vc = createValidityChecker(buffers[0]) + const ov = new DataView(buffers[1].buffer, buffers[1].byteOffset, buffers[1].byteLength) + const childValues = readColumnValues(children[0], registry) + const values = new Array | null>(length) + for (let i = 0; i < length; i++) { + if (vc(i)) { + values[i] = childValues.slice( + ov.getInt32(i * BYTES_PER_INT32, true), + ov.getInt32((i + 1) * BYTES_PER_INT32, true) + ) + } else { + values[i] = null + } + } + return values +} + +const readLargeListValues = ( + column: DecodedColumn, + registry?: DictionaryRegistry +): ReadonlyArray | null> => { + const { buffers, children, node } = column + const length = Number(node.length) + const vc = createValidityChecker(buffers[0]) + const ov = new DataView(buffers[1].buffer, buffers[1].byteOffset, buffers[1].byteLength) + const childValues = readColumnValues(children[0], registry) + const values = new Array | null>(length) + for (let i = 0; i < length; i++) { + if (vc(i)) { + const start = bigintToNumberSafe(ov.getBigInt64(i * BYTES_PER_INT64, true), `large-list offset[${i}]`) + const end = bigintToNumberSafe(ov.getBigInt64((i + 1) * BYTES_PER_INT64, true), `large-list offset[${i + 1}]`) + values[i] = childValues.slice(start, end) + } else { + values[i] = null + } + } + return values +} + +const readFixedSizeListValues = ( + column: DecodedColumn, + listSize: number, + registry?: DictionaryRegistry +): ReadonlyArray | null> => { + const { buffers, children, node } = column + const length = Number(node.length) + const vc = createValidityChecker(buffers[0]) + const childValues = readColumnValues(children[0], registry) + const values = new Array | null>(length) + for (let i = 0; i < length; i++) { + values[i] = vc(i) ? childValues.slice(i * listSize, (i + 1) * listSize) : null + } + return values +} + +// ============================================================================= +// Structs +// ============================================================================= + +const readStructValues = ( + column: DecodedColumn, + registry?: DictionaryRegistry +): ReadonlyArray | null> => { + const { buffers, children, node } = column + const length = Number(node.length) + const vc = createValidityChecker(buffers[0]) + const childValuesMap = new Map(children.map((c) => [c.field.name, readColumnValues(c, registry)])) + const values = new Array | null>(length) + for (let i = 0; i < length; i++) { + if (vc(i)) { + const obj: Record = {} + for (const c of children) obj[c.field.name] = childValuesMap.get(c.field.name)![i] + values[i] = obj + } else { + values[i] = null + } + } + return values +} + +// ============================================================================= +// Maps +// ============================================================================= + +const readMapValues = ( + column: DecodedColumn, + registry?: DictionaryRegistry +): ReadonlyArray< + ReadonlyArray<{ + readonly key: unknown + readonly value: unknown + }> | null +> => { + const { buffers, children, node } = column + const length = Number(node.length) + const vc = createValidityChecker(buffers[0]) + const ov = new DataView(buffers[1].buffer, buffers[1].byteOffset, buffers[1].byteLength) + const entriesValues = readColumnValues(children[0], registry) as Array | null> + const values = new Array | null>(length) + for (let i = 0; i < length; i++) { + if (vc(i)) { + const start = ov.getInt32(i * BYTES_PER_INT32, true) + const end = ov.getInt32((i + 1) * BYTES_PER_INT32, true) + const entries = new Array<{ key: unknown; value: unknown }>(end - start) + let idx = 0 + for (let j = start; j < end; j++) { + const e = entriesValues[j] + if (e) entries[idx++] = { key: e.key, value: e.value } + } + values[i] = entries.length === idx ? entries : entries.slice(0, idx) + } else { + values[i] = null + } + } + return values +} + +// ============================================================================= +// Unions +// ============================================================================= + +const readUnionValues = ( + column: DecodedColumn, + mode: keyof typeof UnionMode, + typeIds: ReadonlyArray, + registry?: DictionaryRegistry +): ReadonlyArray => { + const { buffers, children, node } = column + const length = Number(node.length) + const typeIdBuffer = buffers[0] + const offsetView = mode === "DENSE" + ? new DataView(buffers[1].buffer, buffers[1].byteOffset, buffers[1].byteLength) + : null + const childValuesArrays = children.map((c) => readColumnValues(c, registry)) + const typeIdToChild = new Map(typeIds.map((id, idx) => [id, idx])) + const values = new Array(length) + for (let i = 0; i < length; i++) { + const childIdx = typeIdToChild.get(typeIdBuffer[i]) + if (childIdx === undefined) { + values[i] = null + } else if (mode === "DENSE" && offsetView) { + values[i] = childValuesArrays[childIdx][offsetView.getInt32(i * BYTES_PER_INT32, true)] + } else { + values[i] = childValuesArrays[childIdx][i] + } + } + return values +} + +// ============================================================================= +// Dictionary Encoding +// ============================================================================= + +/** + * Read dictionary indices from a dictionary-encoded column. + * Dictionary-encoded columns store integer indices that reference into a dictionary. + */ +const readDictionaryIndices = ( + validityBuffer: Uint8Array, + dataBuffer: Uint8Array, + length: number, + indexType: { bitWidth: IntBitWidth; isSigned: boolean } +): ReadonlyArray => { + const validityChecker = createValidityChecker(validityBuffer) + const view = new DataView(dataBuffer.buffer, dataBuffer.byteOffset, dataBuffer.byteLength) + const values = new Array(length) + + for (let i = 0; i < length; i++) { + if (!validityChecker(i)) { + values[i] = null + continue + } + + let index: number + switch (indexType.bitWidth) { + case 8: + index = indexType.isSigned ? view.getInt8(i) : view.getUint8(i) + break + case 16: + index = indexType.isSigned + ? view.getInt16(i * BYTES_PER_INT16, true) + : view.getUint16(i * BYTES_PER_INT16, true) + break + case 32: + index = indexType.isSigned + ? view.getInt32(i * BYTES_PER_INT32, true) + : view.getUint32(i * BYTES_PER_INT32, true) + break + case 64: { + // For 64-bit indices, we need to convert to number (may lose precision for very large indices) + const bigIndex = indexType.isSigned + ? view.getBigInt64(i * BYTES_PER_INT64, true) + : view.getBigUint64(i * BYTES_PER_INT64, true) + index = bigintToNumberSafe(bigIndex, `dictionary index[${i}]`) + break + } + } + values[i] = index + } + + return values +} + +/** + * Read values from a dictionary-encoded column by looking up indices in the dictionary. + */ +const readDictionaryEncodedValues = ( + column: DecodedColumn, + dictionaryEncoding: DictionaryEncoding, + registry: DictionaryRegistry +): ReadonlyArray => { + const { buffers, node } = column + const length = Number(node.length) + + // Get the dictionary from the registry + const dictionary = registry.get(dictionaryEncoding.id) + if (!dictionary) { + throw new Error( + `Dictionary with ID ${dictionaryEncoding.id} not found. ` + + `Make sure dictionary batches are processed before record batches that reference them.` + ) + } + + // Read the indices + const indices = readDictionaryIndices( + buffers[0], + buffers[1], + length, + dictionaryEncoding.indexType + ) + + // Look up values in the dictionary + const dictValues = dictionary.values + const values = new Array(length) + for (let i = 0; i < length; i++) { + const index = indices[i] + values[i] = index === null ? null : dictValues[index] + } + + return values +} + +// ============================================================================= +// Columns +// ============================================================================= + +/** + * Reads and decodes values from a decoded Arrow column based on its data type. + * + * This is the primary entry point for extracting typed values from Arrow columnar data. + * It dispatches to the appropriate specialized reader function based on the column's + * Arrow data type, handling all supported Arrow types including: + * + * **Primitive types**: Null, Bool, Int8/16/32/64, UInt8/16/32/64, Float16/32/64, Decimal128/256 + * + * **String/Binary types**: Utf8, LargeUtf8, Binary, LargeBinary, FixedSizeBinary + * + * **Temporal types**: Date (day/millisecond), Time (second/milli/micro/nano), + * Timestamp (second/milli/micro/nano), Duration, Interval (year-month/day-time/month-day-nano) + * + * **Nested types**: List, LargeList, FixedSizeList, Struct, Map, Union (sparse/dense) + * + * **Dictionary encoding**: Automatically resolves dictionary indices to actual values + * when a `DictionaryRegistry` is provided. + * + * The function handles validity bitmaps automatically, returning `null` for invalid + * (null) values in the column. + * + * @param column - The decoded column containing field metadata, node information, + * and extracted buffers. Obtained from `decodeRecordBatch`. + * @param registry - Optional dictionary registry for resolving dictionary-encoded + * columns. **Required** if the column or any nested child column uses dictionary + * encoding; an error will be thrown if dictionary encoding is detected but no + * registry is provided. + * + * @returns An array of decoded values corresponding to the column's data type. + * Returns `ReadonlyArray` to accommodate all possible Arrow types. + * The actual runtime types depend on the column's Arrow type: + * - Primitives: `number | null`, `bigint | null`, `boolean | null`, `string | null` + * - Temporal: `Date | null`, `number | null`, `{ value: bigint, unit: string } | null` + * - Intervals: Objects with months/days/milliseconds/nanoseconds properties + * - Nested: Arrays and objects containing recursively decoded values + * + * @throws {Error} If a dictionary-encoded column is encountered without a registry + * @throws {Error} If a referenced dictionary ID is not found in the registry + * @throws {RangeError} If 64-bit offsets or values exceed `Number.MAX_SAFE_INTEGER` + * + * @example + * ```ts + * // Basic usage with a decoded record batch + * const decodedBatch = decodeRecordBatch(recordBatch, body, schema) + * const nameColumn = decodedBatch.columns[0] + * const names = readColumnValues(nameColumn) // string[] | null[] + * + * // With dictionary encoding + * const registry = new DictionaryRegistry() + * decodeDictionaryBatch(dictBatch, body, schema, registry, readColumnValues) + * const categoryColumn = decodedBatch.columns[1] + * const categories = readColumnValues(categoryColumn, registry) + * ``` + * + * @see {@link https://arrow.apache.org/docs/format/Columnar.html | Arrow Columnar Format} + */ +export const readColumnValues = ( + column: DecodedColumn, + registry?: DictionaryRegistry +): ReadonlyArray => { + const { buffers, field, node } = column + const length = Number(node.length) + const type = field.type + + // Handle dictionary-encoded columns + if (field.dictionaryEncoding) { + if (!registry) { + throw new Error( + `Column "${field.name}" is dictionary-encoded but no dictionary registry was provided. ` + + `Pass a DictionaryRegistry to readColumnValues for dictionary-encoded columns.` + ) + } + return readDictionaryEncodedValues(column, field.dictionaryEncoding, registry) + } + + switch (type.typeId) { + case "null": { + return new Array(length).fill(null) + } + case "bool": { + return readBoolValues(buffers[0], buffers[1], length) + } + case "int": { + switch (type.bitWidth) { + case 8: { + return readInt8Values(buffers[0], buffers[1], length, type.isSigned) + } + case 16: { + return readInt16Values(buffers[0], buffers[1], length, type.isSigned) + } + case 32: { + return readInt32Values(buffers[0], buffers[1], length, type.isSigned) + } + case 64: { + return readInt64Values(buffers[0], buffers[1], length, type.isSigned) + } + } + } + case "float": { + switch (type.precision) { + case "HALF": { + return readFloat16Values(buffers[0], buffers[1], length) + } + case "SINGLE": { + return readFloat32Values(buffers[0], buffers[1], length) + } + case "DOUBLE": { + return readFloat64Values(buffers[0], buffers[1], length) + } + } + } + case "decimal": { + return readDecimalValues(buffers[0], buffers[1], length, /* type.precision, */ type.scale, type.bitWidth) + } + case "utf8": { + return readUtf8Values(buffers[0], buffers[1], buffers[2], length) + } + case "large-utf8": { + return readLargeUtf8Values(buffers[0], buffers[1], buffers[2], length) + } + case "binary": { + return readBinaryValues(buffers[0], buffers[1], buffers[2], length) + } + case "large-binary": { + return readLargeBinaryValues(buffers[0], buffers[1], buffers[2], length) + } + case "fixed-size-binary": { + return readFixedSizeBinaryValues(buffers[0], buffers[1], length, type.byteWidth) + } + case "date": { + return type.unit === "DAY" + ? readDateDayValues(buffers[0], buffers[1], length) + : readDateMillisecondValues(buffers[0], buffers[1], length) + } + case "time": { + return readTimeValues(buffers[0], buffers[1], length, type.unit, type.bitWidth) + } + case "timestamp": { + return readTimestampValues(buffers[0], buffers[1], length, type.unit) + } + case "duration": { + return readDurationValues(buffers[0], buffers[1], length, type.unit) + } + case "interval": { + switch (type.unit) { + case "YEAR_MONTH": { + return readIntervalYearMonthValues(buffers[0], buffers[1], length) + } + case "DAY_TIME": { + return readIntervalDayTimeValues(buffers[0], buffers[1], length) + } + case "MONTH_DAY_NANO": { + return readIntervalMonthDayNanoValues(buffers[0], buffers[1], length) + } + } + } + case "list": { + return readListValues(column, registry) + } + case "large-list": { + return readLargeListValues(column, registry) + } + case "fixed-size-list": { + return readFixedSizeListValues(column, type.listSize, registry) + } + case "struct": { + return readStructValues(column, registry) + } + case "map": { + return readMapValues(column, registry) + } + case "union": { + return readUnionValues(column, type.mode, type.typeIds, registry) + } + } +} diff --git a/packages/amp/src/internal/arrow-flight-ipc/RecordBatch.ts b/packages/amp/src/internal/arrow-flight-ipc/RecordBatch.ts new file mode 100644 index 0000000..165a879 --- /dev/null +++ b/packages/amp/src/internal/arrow-flight-ipc/RecordBatch.ts @@ -0,0 +1,660 @@ +/** + * Arrow RecordBatch Types and Parser + * + * This module provides types and parsing utilities for Arrow IPC RecordBatch messages. + * RecordBatch messages contain the metadata describing how to interpret + * the binary body data (buffers and their layouts). + * + * References: + * - RecordBatch.fbs: https://github.com/apache/arrow/blob/main/format/Message.fbs + * - IPC Format: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format + * + * @internal + */ +import * as Effect from "effect/Effect" +import * as Predicate from "effect/Predicate" +import { MissingFieldError, UnexpectedMessageTypeError } from "./Errors.ts" +import { FlatBufferReader } from "./FlatBufferReader.ts" +import { + type ArrowDataType, + type ArrowField, + type ArrowSchema, + type FlightData, + getMessageType, + MessageHeaderType +} from "./Schema.ts" + +// ============================================================================= +// Constants +// ============================================================================= + +export const BufferType = { + /** Validity bitmap (null / non-null) */ + VALIDITY: 0, + /** Offset buffer for variable-length types */ + OFFSET: 1, + /** Large offset offer (int64) for large types */ + LARGE_OFFSET: 2, + /** Data buffer containing actual values */ + DATA: 3, + /** Type IDs for union types */ + TYPE_ID: 4 +} as const +export type BufferType = typeof BufferType[keyof typeof BufferType] + +// ============================================================================= +// Record Batch Types +// ============================================================================= + +/** + * Represents a parsed `RecordBatch` message. + * + * The `RecordBatch` contains metadata about how to interpret the body: + * - `nodes`: One `FieldNode` per field (including nested), describing validity/length + * - `buffers`: Buffer locations within the body for all field data + * - `compression`: Optional compression info + */ +export class RecordBatch { + /** + * The number of rows in this batch. + */ + readonly length: bigint + + /** + * One field node per field in depth-first order. For nested types, the + * parent comes before its children. + */ + readonly nodes: ReadonlyArray + /** + * The location of all buffers in the message body, the order of which matches + * the flattened schema (depth-first). + */ + readonly buffers: ReadonlyArray + + /** + * Optional compression information. + */ + readonly compression: BodyCompression | undefined + + constructor( + length: bigint, + nodes: ReadonlyArray, + buffers: ReadonlyArray, + compression?: BodyCompression | undefined + ) { + this.length = length + this.nodes = nodes + this.buffers = buffers + this.compression = compression + } +} + +/** + * Metadata about a single field's data in the `RecordBatch`. There is one + * `FieldNode` per field (including nested children). + */ +export class FieldNode { + /** + * The number of values in this field (may differ from batch length for nested + * fields). + */ + readonly length: bigint + + /** + * The number of null values. + */ + readonly nullCount: bigint + + constructor(length: bigint, nullCount: bigint) { + this.length = length + this.nullCount = nullCount + } +} + +/** + * Describes location of a buffer within the message body. Buffers are stored + * contiguously in the body, aligned to 8-byte chunks. + */ +export class BufferDescriptor { + /** + * The offset of the buffer from the start of the message body. + */ + readonly offset: bigint + + /** + * The length of the buffer in bytes. + */ + readonly length: bigint + + constructor(offset: bigint, length: bigint) { + this.offset = offset + this.length = length + } +} + +export const CompressionCodec = { + LZ4_FRAME: 0, + ZSTD: 1 +} as const +export type CompressionCodec = typeof CompressionCodec[keyof typeof CompressionCodec] + +export const BodyCompressionMethod = { + /** + * Indicates that each buffer is compressed individually. + */ + BUFFER: 0 +} as const +export type BodyCompressionMethod = typeof BodyCompressionMethod[keyof typeof BodyCompressionMethod] + +export class BodyCompression { + /** + * The compression codec that was used. + */ + readonly codec: CompressionCodec + + /** + * The method that was used for compressing buffers. + */ + readonly method: BodyCompressionMethod + + constructor(codec: CompressionCodec, method: BodyCompressionMethod) { + this.codec = codec + this.method = method + } +} + +// ============================================================================= +// Dictionary Batch Types +// ============================================================================= + +/** + * Represents a parsed `DictionaryBatch` message. + * + * A `DictionaryBatch` associates a dictionary ID with dictionary data. The + * dictionary data is stored as a `RecordBatch` containing a single column + * with the dictionary values. + * + * References: + * - https://arrow.apache.org/docs/format/Columnar.html#dictionary-encoding + */ +export class DictionaryBatch { + /** + * The unique identifier for this dictionary. This ID is referenced by + * fields that use this dictionary for encoding. + */ + readonly id: bigint + + /** + * The dictionary data as a `RecordBatch`. This contains a single field + * with the dictionary values. + */ + readonly data: RecordBatch + + /** + * If `true`, this batch is a delta that should be appended to an existing + * dictionary with the same ID. If `false`, this replaces any existing + * dictionary with the same ID. + */ + readonly isDelta: boolean + + constructor(id: bigint, data: RecordBatch, isDelta: boolean) { + this.id = id + this.data = data + this.isDelta = isDelta + } +} + +// ============================================================================= +// Buffer Types +// ============================================================================= + +/** + * Calculates the expected buffer layout for a schema. This is needed to + * correctly interpret the buffers in a RecordBatch. + */ +export class BufferLayout { + /** + * The field that this buffer belongs to. + */ + readonly field: ArrowField + + /** + * The buffer type (validity, offset, etc). + */ + readonly bufferType: BufferType + + /** + * The index of this buffer in the `RecordBatch.buffers` array. + */ + readonly bufferIndex: number + + constructor(field: ArrowField, bufferType: BufferType, bufferIndex: number) { + this.field = field + this.bufferType = bufferType + this.bufferIndex = bufferIndex + } +} + +/** + * Get the buffer types required for a given Arrow data type. The buffer order + * matches the Arrow IPC specification. + */ +export const getBufferTypesForType = (type: ArrowDataType): ReadonlyArray => { + switch (type.typeId) { + // The null type has no buffers + case "null": { + return [] + } + + // Fixed-width types: validity + data + case "bool": + case "int": + case "float": + case "decimal": + case "date": + case "time": + case "timestamp": + case "interval": + case "duration": + case "fixed-size-binary": { + return [BufferType.VALIDITY, BufferType.DATA] + } + + // Variable-length types: validity + offsets + data + case "binary": + case "utf8": { + return [BufferType.VALIDITY, BufferType.OFFSET, BufferType.DATA] + } + + // Large variable-length types: validity + large offsets + data + case "large-binary": + case "large-utf8": { + return [BufferType.VALIDITY, BufferType.LARGE_OFFSET, BufferType.DATA] + } + + // List: validity + offsets (children handled separately) + case "list": { + return [BufferType.VALIDITY, BufferType.OFFSET] + } + + // Large list: validity + large offsets (children handled separately) + case "large-list": { + return [BufferType.VALIDITY, BufferType.LARGE_OFFSET] + } + + // Fixed-size lists: validity only (no offset information needed) + case "fixed-size-list": { + return [BufferType.VALIDITY] + } + + // Struct: validity only (children handled separately) + case "struct": { + return [BufferType.VALIDITY] + } + + // Map: validity + offsets (children handled separately) + case "map": { + return [BufferType.VALIDITY, BufferType.OFFSET] + } + + case "union": { + if (type.mode === "SPARSE") { + // Sparse union: type IDs only + return [BufferType.TYPE_ID] + } else { + // Dense union: type IDs + offsets + return [BufferType.TYPE_ID, BufferType.OFFSET] + } + } + } +} + +// ============================================================================= +// Decoded Record Batch Types +// ============================================================================= + +/** + * A fully decoded Arrow `RecordBatch`. + */ +export class DecodedRecordBatch { + /** + * The schema for this batch. + */ + readonly schema: ArrowSchema + + /** + * The number of rows in this batch. + */ + readonly numRows: bigint + + /** + * The columns in this batch. + */ + readonly columns: ReadonlyArray + + constructor( + schema: ArrowSchema, + numRows: bigint, + columns: ReadonlyArray + ) { + this.schema = schema + this.numRows = numRows + this.columns = columns + } + + /** + * Retrieve a column by name. + */ + getColumn(name: string): DecodedColumn | undefined { + return this.columns.find((column) => column.field.name === name) + } + + /** + * Retrieve a column by index. + */ + getColumnAt(index: number): DecodedColumn | undefined { + return this.columns[index] + } +} + +/** + * Represents a decoded column with its data buffers. + */ +export class DecodedColumn { + /** + * The schema field definition. + */ + readonly field: ArrowField + + /** + * The field node with length / null count. + */ + readonly node: FieldNode + + /** + * The raw data buffers for this column. + * + * The layout of the buffers depends on the type: + * - Primitive: [validity, data] + * - Variable-length (utf8, binary): [validity, offsets, data] + * - List: [validity, offsets] + child buffers + * - Struct: [validity] + child buffers + * - etc. + */ + readonly buffers: ReadonlyArray + + /** + * Child columns for nested data types. + */ + readonly children: ReadonlyArray + + constructor( + field: ArrowField, + node: FieldNode, + buffers: ReadonlyArray, + children: ReadonlyArray + ) { + this.field = field + this.node = node + this.buffers = buffers + this.children = children + } +} + +// ============================================================================= +// Parsers +// ============================================================================= + +/** + * Parse an Arrow RecordBatch from the raw IPC message header bytes (FlatBuffer) + * of a `FlightData` message. + * + * Message table structure (from https://github.com/apache/arrow/blob/main/format/Message.fbs): + * version: MetadataVersion (Int16 enum) + * header: MessageHeader (union) + * bodyLength: long + * custom_metadata: [KeyValue] + * + * In FlatBuffers, a union field generates TWO vtable entries: + * - The type discriminator (UInt8) + * - The offset to the union value + * + * So the vtable field indices are: + * 0: version (Int16) + * 1: header_type (UInt8 - union type discriminator) + * 2: header (offset to union table) + * 3: bodyLength (Int64) + * 4: custom_metadata (vector offset) + */ +export const parseRecordBatch = Effect.fn(function*(flightData: FlightData) { + const reader = new FlatBufferReader(flightData.dataHeader) + + // The flatbuffer root table offset is at position 0 + const rootOffset = reader.readOffset(0) + + // Read the position of the message header union type discriminator + const headerTypePosition = reader.getFieldPosition(rootOffset, 1) + if (Predicate.isNull(headerTypePosition)) { + return yield* new MissingFieldError({ + fieldName: "header_type", + fieldIndex: 1, + tableOffset: rootOffset + }) + } + + // Read the actual message header union type discriminator + const headerType = reader.readUint8(headerTypePosition) + if (headerType !== MessageHeaderType.RECORD_BATCH) { + return yield* new UnexpectedMessageTypeError({ + expected: MessageHeaderType.RECORD_BATCH, + received: headerType + }) + } + + // Read the union value offset (field index 2) + const headerPosition = reader.getFieldPosition(rootOffset, 2) + if (Predicate.isNull(headerPosition)) { + return yield* new MissingFieldError({ + fieldName: "header", + fieldIndex: 2, + tableOffset: rootOffset + }) + } + + // Read the offset position of the schema relative to the header position + const recordBatchOffset = reader.readOffset(headerPosition) + + return yield* parseRecordBatchTable(reader, recordBatchOffset) +}) + +/** + * Parses a RecordBatch table. + * + * The structure of a RecordBatch table is as follows: + * 0: length (Int64) - number of rows + * 1: nodes ([FieldNode]) - one per field in DFS order + * 2: buffers ([Buffer]) - buffer locations + * 3: compression (BodyCompression) - optional + */ +export const parseRecordBatchTable = Effect.fn(function*(reader: FlatBufferReader, offset: number) { + // Parse length + const lengthPosition = reader.getFieldPosition(offset, 0) + const length = Predicate.isNotNull(lengthPosition) ? reader.readInt64(lengthPosition) : 0n + + // Parse nodes vector + const nodes: Array = [] + const nodesPos = reader.getFieldPosition(offset, 1) + if (Predicate.isNotNull(nodesPos)) { + const nodesVectorOffset = reader.readOffset(nodesPos) + const nodeCount = reader.readVectorLength(nodesVectorOffset) + + // FieldNode is a struct (inline in vector), not a table + // Each FieldNode is 16 bytes: length (Int64) + null_count (Int64) + const FIELD_NODE_SIZE = 16 + + for (let i = 0; i < nodeCount; i++) { + const nodeOffset = nodesVectorOffset + 4 + i * FIELD_NODE_SIZE + const length = reader.readInt64(nodeOffset) + const nullCount = reader.readInt64(nodeOffset + 8) + nodes.push(new FieldNode(length, nullCount)) + } + } + + // Parse buffers vector + const buffers: Array = [] + const buffersPosition = reader.getFieldPosition(offset, 2) + if (Predicate.isNotNull(buffersPosition)) { + const buffersVectorOffset = reader.readOffset(buffersPosition) + const numBuffers = reader.readVectorLength(buffersVectorOffset) + + // Buffer is a struct (inline in vector), not a table + // Each Buffer is 16 bytes: offset (Int64) + length (Int64) + const BUFFER_SIZE = 16 + + for (let i = 0; i < numBuffers; i++) { + const bufferOffset = buffersVectorOffset + 4 + i * BUFFER_SIZE + const offset = reader.readInt64(bufferOffset) + const length = reader.readInt64(bufferOffset + 8) + buffers.push(new BufferDescriptor(offset, length)) + } + } + + // Parse optional compression + let compression: BodyCompression | undefined + const compressionPosition = reader.getFieldPosition(offset, 3) + if (Predicate.isNotNull(compressionPosition)) { + const compressionOffset = reader.readOffset(compressionPosition) + compression = parseBodyCompression(reader, compressionOffset) + } + + return new RecordBatch(length, nodes, buffers, compression) +}) + +/** + * Parses the `BodyCompression` table. + * + * The structure of the BodyCompression table is as follows: + * 0: codec (CompressionType enum, Int8) + * 1: method (BodyCompressionMethod enum, Int8) + */ +const parseBodyCompression = (reader: FlatBufferReader, offset: number): BodyCompression => { + const codecPosition = reader.getFieldPosition(offset, 0) + const codec = Predicate.isNotNull(codecPosition) + ? reader.readInt8(codecPosition) as CompressionCodec + : CompressionCodec.LZ4_FRAME + + const methodPosition = reader.getFieldPosition(offset, 1) + const method = Predicate.isNotNull(methodPosition) + ? reader.readInt8(methodPosition) as BodyCompressionMethod + : BodyCompressionMethod.BUFFER + + return new BodyCompression(codec, method) +} + +/** + * Parse an Arrow DictionaryBatch from the raw IPC message header bytes (FlatBuffer) + * of a `FlightData` message. + * + * The structure mirrors `parseRecordBatch` but expects a DICTIONARY_BATCH message type. + * + * DictionaryBatch table structure: + * 0: id (Int64) - dictionary identifier + * 1: data (RecordBatch) - the dictionary values + * 2: isDelta (Bool) - whether this is a delta update + */ +export const parseDictionaryBatch = Effect.fn(function*(flightData: FlightData) { + const reader = new FlatBufferReader(flightData.dataHeader) + + // The flatbuffer root table offset is at position 0 + const rootOffset = reader.readOffset(0) + + // Read the position of the message header union type discriminator + const headerTypePosition = reader.getFieldPosition(rootOffset, 1) + if (Predicate.isNull(headerTypePosition)) { + return yield* new MissingFieldError({ + fieldName: "header_type", + fieldIndex: 1, + tableOffset: rootOffset + }) + } + + // Read the actual message header union type discriminator + const headerType = reader.readUint8(headerTypePosition) + if (headerType !== MessageHeaderType.DICTIONARY_BATCH) { + return yield* new UnexpectedMessageTypeError({ + expected: MessageHeaderType.DICTIONARY_BATCH, + received: headerType + }) + } + + // Read the union value offset (field index 2) + const headerPosition = reader.getFieldPosition(rootOffset, 2) + if (Predicate.isNull(headerPosition)) { + return yield* new MissingFieldError({ + fieldName: "header", + fieldIndex: 2, + tableOffset: rootOffset + }) + } + + // Read the offset position of the dictionary batch relative to the header position + const dictionaryBatchOffset = reader.readOffset(headerPosition) + + return yield* parseDictionaryBatchTable(reader, dictionaryBatchOffset) +}) + +/** + * Parses a DictionaryBatch table. + * + * The structure of a DictionaryBatch table is as follows: + * 0: id (Int64) - the dictionary identifier + * 1: data (RecordBatch) - the dictionary values as a record batch + * 2: isDelta (Bool) - whether this is a delta update (default: false) + */ +const parseDictionaryBatchTable = Effect.fn(function*(reader: FlatBufferReader, offset: number) { + // Parse id + const idPosition = reader.getFieldPosition(offset, 0) + const id = Predicate.isNotNull(idPosition) ? reader.readInt64(idPosition) : 0n + + // Parse data (RecordBatch) + const dataPosition = reader.getFieldPosition(offset, 1) + if (Predicate.isNull(dataPosition)) { + return yield* new MissingFieldError({ + fieldName: "data", + fieldIndex: 1, + tableOffset: offset + }) + } + const dataOffset = reader.readOffset(dataPosition) + const data = yield* parseRecordBatchTable(reader, dataOffset) + + // Parse isDelta (default: false) + const isDeltaPosition = reader.getFieldPosition(offset, 2) + const isDelta = Predicate.isNotNull(isDeltaPosition) + ? reader.readUint8(isDeltaPosition) !== 0 + : false + + return new DictionaryBatch(id, data, isDelta) +}) + +// ============================================================================= +// Utilities +// ============================================================================= + +/** + * Returns `true` if the provided `FlightData` header data buffer contains a + * record batch message, otherwise returns `false`. + */ +export const isRecordBatchMessage = Effect.fn(function*(flightData: FlightData) { + const messageType = yield* getMessageType(flightData) + return messageType === MessageHeaderType.RECORD_BATCH +}) + +/** + * Returns `true` if the provided `FlightData` header data buffer contains a + * dictionary batch message, otherwise returns `false`. + */ +export const isDictionaryBatchMessage = Effect.fn(function*(flightData: FlightData) { + const messageType = yield* getMessageType(flightData) + return messageType === MessageHeaderType.DICTIONARY_BATCH +}) diff --git a/packages/amp/src/internal/arrow-flight-ipc/Schema.ts b/packages/amp/src/internal/arrow-flight-ipc/Schema.ts new file mode 100644 index 0000000..81a17ce --- /dev/null +++ b/packages/amp/src/internal/arrow-flight-ipc/Schema.ts @@ -0,0 +1,1003 @@ +/** + * Arrow Schema Types and Parser + * + * This module provides types and parsing utilities for Arrow IPC schema messages. + * + * References: + * - Arrow IPC Format: https://arrow.apache.org/docs/format/Columnar.html#ipc-file-format + * - FlatBuffer encoding: https://flatbuffers.dev/flatbuffers_internals.html + * + * @internal + */ + +import * as Effect from "effect/Effect" +import * as Predicate from "effect/Predicate" +import { + InvalidArrowDataTypeError, + InvalidMessageTypeError, + MissingFieldError, + UnexpectedMessageTypeError +} from "./Errors.ts" +import { FlatBufferReader } from "./FlatBufferReader.ts" + +// ============================================================================= +// Message Types +// ============================================================================= + +export const MessageHeaderType = { + NONE: 0, + SCHEMA: 1, + DICTIONARY_BATCH: 2, + RECORD_BATCH: 3, + TENSOR: 4, + SPARSE_TENSOR: 5 +} as const +export type MessageHeaderType = typeof MessageHeaderType[keyof typeof MessageHeaderType] + +/** + * A partial representation of the `FlightData` type. + */ +export interface FlightData { + readonly dataHeader: Uint8Array + readonly dataBody: Uint8Array +} + +// ============================================================================= +// Arrow Data Type Enums +// ============================================================================= + +export const ArrowDataTypeEnum = { + NONE: 0, + NULL: 1, + INT: 2, + FLOATING_POINT: 3, + BINARY: 4, + UTF8: 5, + BOOL: 6, + DECIMAL: 7, + DATE: 8, + TIME: 9, + TIMESTAMP: 10, + INTERVAL: 11, + LIST: 12, + STRUCT: 13, + UNION: 14, + FIXED_SIZE_BINARY: 15, + FIXED_SIZE_LIST: 16, + MAP: 17, + DURATION: 18, + LARGE_BINARY: 19, + LARGE_UTF8: 20, + LARGE_LIST: 21 +} as const +export type ArrowDataTypeEnum = typeof ArrowDataTypeEnum[keyof typeof ArrowDataTypeEnum] + +export const DateUnit = { + DAY: 0, + MILLISECOND: 1 +} as const +export type DateUnit = typeof DateUnit[keyof typeof DateUnit] + +export const Endianness = { + LITTLE: 0, + BIG: 1 +} as const +export type Endianness = typeof Endianness[keyof typeof Endianness] + +export const IntervalUnit = { + YEAR_MONTH: 0, + DAY_TIME: 1, + MONTH_DAY_NANO: 2 +} as const +export type IntervalUnit = typeof IntervalUnit[keyof typeof IntervalUnit] + +export const Precision = { + HALF: 0, + SINGLE: 1, + DOUBLE: 2 +} as const +export type Precision = typeof Precision[keyof typeof Precision] + +export const TimeUnit = { + SECOND: 0, + MILLISECOND: 1, + MICROSECOND: 2, + NANOSECOND: 3 +} as const +export type TimeUnit = typeof TimeUnit[keyof typeof TimeUnit] + +export const UnionMode = { + SPARSE: 0, + DENSE: 1 +} as const +export type UnionMode = typeof UnionMode[keyof typeof UnionMode] + +// ============================================================================= +// Schema Types +// ============================================================================= + +export class ArrowSchema { + readonly fields: ReadonlyArray + readonly metadata: ReadonlyMap + readonly endianness: Endianness + constructor( + fields: ReadonlyArray, + metadata: ReadonlyMap, + endianness: Endianness + ) { + this.fields = fields + this.metadata = metadata + this.endianness = endianness + } +} + +export class ArrowField { + readonly name: string + readonly type: ArrowDataType + readonly nullable: boolean + readonly metadata: ReadonlyMap + readonly children: ReadonlyArray + readonly dictionaryEncoding?: DictionaryEncoding | undefined + constructor( + name: string, + type: ArrowDataType, + nullable: boolean, + metadata: ReadonlyMap, + children: ReadonlyArray, + dictionaryEncoding?: DictionaryEncoding | undefined + ) { + this.name = name + this.type = type + this.nullable = nullable + this.metadata = metadata + this.children = children + this.dictionaryEncoding = dictionaryEncoding + } +} + +export class DictionaryEncoding { + readonly id: bigint + readonly indexType: IntType + readonly isOrdered: boolean + constructor(id: bigint, indexType: IntType, isOrdered: boolean) { + this.id = id + this.indexType = indexType + this.isOrdered = isOrdered + } +} + +// ============================================================================= +// Arrow Data Types +// ============================================================================= + +export type ArrowDataType = + | NullType + | BoolType + | IntType + | FloatingPointType + | DecimalType + | BinaryType + | LargeBinaryType + | FixedSizeBinaryType + | Utf8Type + | LargeUtf8Type + | DateType + | TimeType + | TimestampType + | IntervalType + | DurationType + | ListType + | LargeListType + | FixedSizeListType + | StructType + | MapType + | UnionType + +export interface NullType { + readonly typeId: "null" +} +export const NullType: NullType = { typeId: "null" } + +export interface BoolType { + readonly typeId: "bool" +} +export const BoolType: BoolType = { typeId: "bool" } + +export type IntBitWidth = 8 | 16 | 32 | 64 + +export class IntType { + readonly typeId = "int" + readonly bitWidth: IntBitWidth + readonly isSigned: boolean + constructor(bitWidth: IntBitWidth, isSigned: boolean) { + this.bitWidth = bitWidth + this.isSigned = isSigned + } +} + +const PRECISION_MAPPING: Record = { + [Precision.HALF]: "HALF", + [Precision.SINGLE]: "SINGLE", + [Precision.DOUBLE]: "DOUBLE" +} + +export class FloatingPointType { + readonly typeId = "float" + readonly precision: "HALF" | "SINGLE" | "DOUBLE" + constructor(precision: Precision) { + this.precision = PRECISION_MAPPING[precision] + } +} + +export class DecimalType { + readonly typeId = "decimal" + readonly precision: number + readonly scale: number + readonly bitWidth: number + constructor(precision: number, scale: number, bitWidth: number) { + this.precision = precision + this.scale = scale + this.bitWidth = bitWidth + } +} + +export interface BinaryType { + readonly typeId: "binary" +} +export const BinaryType: BinaryType = { typeId: "binary" } + +export interface LargeBinaryType { + readonly typeId: "large-binary" +} +export const LargeBinaryType: LargeBinaryType = { typeId: "large-binary" } + +export class FixedSizeBinaryType { + readonly typeId = "fixed-size-binary" + readonly byteWidth: number + constructor(byteWidth: number) { + this.byteWidth = byteWidth + } +} + +export interface Utf8Type { + readonly typeId: "utf8" +} +export const Utf8Type: Utf8Type = { typeId: "utf8" } + +export interface LargeUtf8Type { + readonly typeId: "large-utf8" +} +export const LargeUtf8Type: LargeUtf8Type = { typeId: "large-utf8" } + +export class DateType { + readonly typeId = "date" + readonly unit: keyof typeof DateUnit + constructor(unit: DateUnit) { + this.unit = unit === 0 ? "DAY" : "MILLISECOND" + } +} + +export type TimeBitWidth = 32 | 64 + +const TIME_UNIT_MAPPING: Record = { + [TimeUnit.SECOND]: "SECOND", + [TimeUnit.MILLISECOND]: "MILLISECOND", + [TimeUnit.MICROSECOND]: "MICROSECOND", + [TimeUnit.NANOSECOND]: "NANOSECOND" +} + +export class TimeType { + readonly typeId = "time" + readonly unit: keyof typeof TimeUnit + readonly bitWidth: TimeBitWidth + constructor(unit: TimeUnit, bitWidth: TimeBitWidth) { + this.unit = TIME_UNIT_MAPPING[unit] + this.bitWidth = bitWidth + } +} + +export class TimestampType { + readonly typeId = "timestamp" + readonly unit: keyof typeof TimeUnit + readonly timezone: string | null + constructor(unit: TimeUnit, timezone: string | null) { + this.unit = TIME_UNIT_MAPPING[unit] + this.timezone = timezone + } +} + +const INTERVAL_UNIT_MAPPING: Record = { + [IntervalUnit.DAY_TIME]: "DAY_TIME", + [IntervalUnit.MONTH_DAY_NANO]: "MONTH_DAY_NANO", + [IntervalUnit.YEAR_MONTH]: "YEAR_MONTH" +} + +export class IntervalType { + readonly typeId = "interval" + readonly unit: keyof typeof IntervalUnit + constructor(unit: IntervalUnit) { + this.unit = INTERVAL_UNIT_MAPPING[unit] + } +} + +export class DurationType { + readonly typeId = "duration" + readonly unit: keyof typeof TimeUnit + constructor(unit: TimeUnit) { + this.unit = TIME_UNIT_MAPPING[unit] + } +} + +export interface ListType { + readonly typeId: "list" +} +export const ListType: ListType = { typeId: "list" } + +/** + * Same as List, but with 64-bit offsets, allowing for representation of + * extremely large data values. + */ +export interface LargeListType { + readonly typeId: "large-list" +} +export const LargeListType: LargeListType = { typeId: "large-list" } + +export class FixedSizeListType { + readonly typeId = "fixed-size-list" + readonly listSize: number + constructor(listSize: number) { + this.listSize = listSize + } +} + +/** + * A `StructType` in the flatbuffer metadata is the same as an Arrow Struct + * (according to the physical memory layout). + */ +export interface StructType { + readonly typeId: "struct" +} +export const StructType: StructType = { typeId: "struct" } + +export class MapType { + readonly typeId = "map" + readonly keysSorted: boolean + constructor(keysSorted: boolean) { + this.keysSorted = keysSorted + } +} + +const UNION_MODE_MAPPING: Record = { + [UnionMode.SPARSE]: "SPARSE", + [UnionMode.DENSE]: "DENSE" +} + +export class UnionType { + readonly typeId = "union" + readonly mode: keyof typeof UnionMode + readonly typeIds: ReadonlyArray + constructor(mode: UnionMode, typeIds: ReadonlyArray) { + this.mode = UNION_MODE_MAPPING[mode] + this.typeIds = typeIds + } +} + +// ============================================================================= +// Message Type Utilities +// ============================================================================= + +/** + * Returns the Arrow Flight message type from the `FlightData` header. + */ +export const getMessageType = Effect.fn(function*(flightData: FlightData) { + const reader = new FlatBufferReader(flightData.dataHeader) + + // The flatbuffer root table offset is at position 0 + const rootOffset = reader.readOffset(0) + + // Read the position of the message header union type discriminator + const headerTypePosition = reader.getFieldPosition(rootOffset, 1) + + if (Predicate.isNull(headerTypePosition)) { + return yield* new MissingFieldError({ + fieldName: "header", + fieldIndex: 1, + tableOffset: rootOffset + }) + } + + const headerType = reader.readUint8(headerTypePosition) + + if (headerType < 0 || headerType > 5) { + return yield* new InvalidMessageTypeError({ value: headerType }) + } + + return headerType as MessageHeaderType +}) + +// ============================================================================= +// Schema Parsing +// ============================================================================= + +/** + * Parse an Arrow Schema from the raw IPC message header bytes (FlatBuffer) of + * a `FlightData` message. + * + * Message table structure (from https://github.com/apache/arrow/blob/main/format/Message.fbs): + * version: MetadataVersion (Int16 enum) + * header: MessageHeader (union) + * bodyLength: long + * custom_metadata: [KeyValue] + * + * In FlatBuffers, a union field generates TWO vtable entries: + * - The type discriminator (UInt8) + * - The offset to the union value + * + * So the vtable field indices are: + * 0: version (Int16) + * 1: header_type (UInt8 - union type discriminator) + * 2: header (offset to union table) + * 3: bodyLength (Int64) + * 4: custom_metadata (vector offset) + */ +export const parseSchema = Effect.fn(function*(flightData: FlightData) { + const reader = new FlatBufferReader(flightData.dataHeader) + + // The flatbuffer root table offset is at position 0 + const rootOffset = reader.readOffset(0) + + // Read the position of the message header union type discriminator + const headerTypePosition = reader.getFieldPosition(rootOffset, 1) + if (Predicate.isNull(headerTypePosition)) { + return yield* new MissingFieldError({ + fieldName: "header_type", + fieldIndex: 1, + tableOffset: rootOffset + }) + } + + // Read the actual message header union type discriminator + const headerType = reader.readUint8(headerTypePosition) + if (headerType !== MessageHeaderType.SCHEMA) { + return yield* new UnexpectedMessageTypeError({ + expected: MessageHeaderType.SCHEMA, + received: headerType + }) + } + + // Read the union value offset (field index 2) + const headerPosition = reader.getFieldPosition(rootOffset, 2) + if (Predicate.isNull(headerPosition)) { + return yield* new MissingFieldError({ + fieldName: "header", + fieldIndex: 2, + tableOffset: rootOffset + }) + } + + // Read the offset position of the schema relative to the header position + const schemaOffset = reader.readOffset(headerPosition) + + return yield* parseSchemaTable(reader, schemaOffset) +}) + +/** + * Parse the Schema vtable from the FlatBuffer message header. + * + * The structure of the Schema vtable is as follows: + * 0: endianness (Int16) + * 1: fields (vector of Field) + * 2: custom_metadata (vector of KeyValue) + * 3: features (vector of Int64) - optional + */ +const parseSchemaTable = Effect.fn(function*( + reader: FlatBufferReader, + offset: number +) { + // Parse the endianness + const endiannessPosition = reader.getFieldPosition(offset, 0) + const endianness = Predicate.isNotNull(endiannessPosition) + ? (reader.readInt16(endiannessPosition) as Endianness) + : Endianness.LITTLE + + // Parse fields + const fields: Array = [] + const fieldsPosition = reader.getFieldPosition(offset, 1) + if (Predicate.isNotNull(fieldsPosition)) { + const fieldsVectorOffset = reader.readOffset(fieldsPosition) + const fieldCount = reader.readVectorLength(fieldsVectorOffset) + + for (let i = 0; i < fieldCount; i++) { + const fieldOffsetPosition = fieldsVectorOffset + 4 + i * 4 + const fieldOffset = reader.readOffset(fieldOffsetPosition) + fields.push(yield* parseField(reader, fieldOffset)) + } + } + // Parse metadata + const metadata = new Map() + const metadataPosition = reader.getFieldPosition(offset, 2) + if (Predicate.isNotNull(metadataPosition)) { + parseKeyValueVector(reader, metadataPosition, metadata) + } + + return new ArrowSchema(fields, metadata, endianness) +}) + +/** + * Parse the Field vtable from the FlatBuffer message header. + * + * The structure of the Field vtable is as follows: + * 0: name (string) + * 1: nullable (bool) + * 2: type_type (Type enum, UInt8) + * 3: type (union - type-specific table) + * 4: dictionary (DictionaryEncoding table) + * 5: children (vector of Field) + * 6: custom_metadata (vector of KeyValue) + */ +const parseField: ( + reader: FlatBufferReader, + offset: number +) => Effect.Effect = Effect.fn( + function*(reader, offset) { + // Parse field name + const namePosition = reader.getFieldPosition(offset, 0) + const name = Predicate.isNotNull(namePosition) + ? reader.readString(namePosition) + : "" + + // Parse field nullability + const nullabilityPosition = reader.getFieldPosition(offset, 1) + const nullable = Predicate.isNotNull(nullabilityPosition) + ? reader.readUint8(nullabilityPosition) !== 0 + : false + + // Parse type + const typeEnumPosition = reader.getFieldPosition(offset, 2) + const typeEnum = Predicate.isNotNull(typeEnumPosition) + ? (reader.readUint8(typeEnumPosition) as ArrowDataTypeEnum) + : ArrowDataTypeEnum.NONE + const typePosition = reader.getFieldPosition(offset, 3) + const typeOffset = Predicate.isNotNull(typePosition) + ? reader.readOffset(typePosition) + : 0 + const type = yield* parseType(reader, typeEnum, typeOffset) + + // Parse dictionary encoding + let dictionaryEncoding: DictionaryEncoding | undefined + const dictPosition = reader.getFieldPosition(offset, 4) + if (Predicate.isNotNull(dictPosition)) { + const dictOffset = reader.readOffset(dictPosition) + dictionaryEncoding = parseDictionaryEncoding(reader, dictOffset) + } + + // Parse children + const children: Array = [] + const childrenPosition = reader.getFieldPosition(offset, 5) + if (Predicate.isNotNull(childrenPosition)) { + const childrenVectorOffset = reader.readOffset(childrenPosition) + const childrenCount = reader.readVectorLength(childrenVectorOffset) + + for (let i = 0; i < childrenCount; i++) { + const childOffsetPosition = childrenVectorOffset + 4 + i * 4 + const childOffset = reader.readOffset(childOffsetPosition) + children.push(yield* parseField(reader, childOffset)) + } + } + + // Parse metadata + const metadata = new Map() + const metadataPosition = reader.getFieldPosition(offset, 6) + if (Predicate.isNotNull(metadataPosition)) { + parseKeyValueVector(reader, metadataPosition, metadata) + } + + return new ArrowField( + name, + type, + nullable, + metadata, + children, + dictionaryEncoding + ) + } +) + +/** + * Parse type union based on the type enum value. + */ +const parseType = Effect.fn(function*( + reader: FlatBufferReader, + typeEnum: ArrowDataTypeEnum, + offset: number +) { + switch (typeEnum) { + case ArrowDataTypeEnum.NULL: { + return NullType + } + case ArrowDataTypeEnum.BOOL: { + return BoolType + } + case ArrowDataTypeEnum.INT: { + return parseIntType(reader, offset) + } + case ArrowDataTypeEnum.FLOATING_POINT: { + return parseFloatingPointType(reader, offset) + } + case ArrowDataTypeEnum.DECIMAL: { + return parseDecimalType(reader, offset) + } + case ArrowDataTypeEnum.BINARY: { + return BinaryType + } + case ArrowDataTypeEnum.LARGE_BINARY: { + return LargeBinaryType + } + case ArrowDataTypeEnum.FIXED_SIZE_BINARY: { + return parseFixedSizeBinaryType(reader, offset) + } + case ArrowDataTypeEnum.UTF8: { + return Utf8Type + } + case ArrowDataTypeEnum.LARGE_UTF8: { + return LargeUtf8Type + } + case ArrowDataTypeEnum.DATE: { + return parseDateType(reader, offset) + } + case ArrowDataTypeEnum.TIME: { + return parseTimeType(reader, offset) + } + case ArrowDataTypeEnum.TIMESTAMP: { + return parseTimestampType(reader, offset) + } + case ArrowDataTypeEnum.INTERVAL: { + return parseIntervalType(reader, offset) + } + case ArrowDataTypeEnum.DURATION: { + return parseDurationType(reader, offset) + } + case ArrowDataTypeEnum.LIST: { + return ListType + } + case ArrowDataTypeEnum.LARGE_LIST: { + return LargeListType + } + case ArrowDataTypeEnum.FIXED_SIZE_LIST: { + return parseFixedSizeListType(reader, offset) + } + case ArrowDataTypeEnum.STRUCT: { + return StructType + } + case ArrowDataTypeEnum.MAP: { + return parseMapType(reader, offset) + } + case ArrowDataTypeEnum.UNION: { + return parseUnionType(reader, offset) + } + default: { + return yield* new InvalidArrowDataTypeError({ + type: typeEnum, + offset + }) + } + } +}) + +/** + * Parses an `Int` schema. + * + * The structure of the Int vtable is as follows: + * 0: bitWidth (Int32) + * 1: is_signed (Bool) + */ +const parseIntType = (reader: FlatBufferReader, offset: number): IntType => { + const bitWidthPosition = reader.getFieldPosition(offset, 0) + const bitWidth = Predicate.isNotNull(bitWidthPosition) + ? (reader.readInt32(bitWidthPosition) as IntBitWidth) + : 32 + + const isSignedPosition = reader.getFieldPosition(offset, 1) + const isSigned = Predicate.isNotNull(isSignedPosition) + ? reader.readUint8(isSignedPosition) !== 0 + : true + + return new IntType(bitWidth, isSigned) +} + +/** + * Parses a `FloatingPoint` schema. + * + * The structure of the FloatingPoint vtable is as follows: + * 0: precision (Precision enum) + */ +const parseFloatingPointType = ( + reader: FlatBufferReader, + offset: number +): FloatingPointType => { + const precisionPosition = reader.getFieldPosition(offset, 0) + const precisionEnum = Predicate.isNotNull(precisionPosition) + ? (reader.readInt16(precisionPosition) as Precision) + : Precision.DOUBLE + + return new FloatingPointType(precisionEnum) +} + +/** + * Parses a `Decimal` schema. + * + * The structure of the Decimal vtable is as follows: + * 0: precision (Int32) + * 1: scale (Int32) + * 2: bitWidth (Int32) + */ +const parseDecimalType = ( + reader: FlatBufferReader, + offset: number +): DecimalType => { + const precisionPosition = reader.getFieldPosition(offset, 0) + const precision = Predicate.isNotNull(precisionPosition) + ? reader.readInt32(precisionPosition) + : 0 + + const scalePosition = reader.getFieldPosition(offset, 1) + const scale = Predicate.isNotNull(scalePosition) + ? reader.readInt32(scalePosition) + : 0 + + const bitWidthPosition = reader.getFieldPosition(offset, 2) + const bitWidth = Predicate.isNotNull(bitWidthPosition) + ? reader.readInt32(bitWidthPosition) + : 128 + + return new DecimalType(precision, scale, bitWidth) +} + +/** + * Parses a `FixedSizeBinary` schema. + * + * The structure of the FixedSizeBinary vtable is as follows: + * 0: byteWidth (Int32) + */ +const parseFixedSizeBinaryType = (reader: FlatBufferReader, offset: number): FixedSizeBinaryType => { + const byteWidthPosition = reader.getFieldPosition(offset, 0) + const byteWidth = Predicate.isNotNull(byteWidthPosition) + ? reader.readInt32(byteWidthPosition) + : 0 + + return new FixedSizeBinaryType(byteWidth) +} + +/** + * Parses a `Date` schema. + * + * The structure of the Date vtable is as follows: + * 0: unit (DateUnit enum) + */ +const parseDateType = (reader: FlatBufferReader, offset: number): DateType => { + const unitPosition = reader.getFieldPosition(offset, 0) + const unitEnum = Predicate.isNotNull(unitPosition) + ? (reader.readInt16(unitPosition) as DateUnit) + : DateUnit.MILLISECOND + + return new DateType(unitEnum) +} + +/** + * Parses a `Time` schema. + * + * The structure of the Time vtable is as follows: + * 0: unit (TimeUnit enum) + * 1: bitWidth (Int32) + */ +const parseTimeType = (reader: FlatBufferReader, offset: number): TimeType => { + const unitPosition = reader.getFieldPosition(offset, 0) + const unitEnum = Predicate.isNotNull(unitPosition) + ? (reader.readInt16(unitPosition) as TimeUnit) + : TimeUnit.MILLISECOND + + const bitWidthPosition = reader.getFieldPosition(offset, 1) + const bitWidth = Predicate.isNotNull(bitWidthPosition) + ? (reader.readInt32(bitWidthPosition) as TimeBitWidth) + : 32 + + return new TimeType(unitEnum, bitWidth) +} + +/** + * Parses a `Timestamp` schema. + * + * The structure of the Timestamp vtable is as follows: + * 0: unit (TimeUnit enum) + * 1: timezone (string) + */ +const parseTimestampType = ( + reader: FlatBufferReader, + offset: number +): TimestampType => { + const unitPosition = reader.getFieldPosition(offset, 0) + const unitEnum = Predicate.isNotNull(unitPosition) + ? (reader.readInt16(unitPosition) as TimeUnit) + : TimeUnit.MICROSECOND + + const timezonePosition = reader.getFieldPosition(offset, 1) + const timezone = Predicate.isNotNull(timezonePosition) + ? reader.readString(timezonePosition) + : null + + return new TimestampType(unitEnum, timezone) +} + +/** + * Parses an `Interval` schema. + * + * The structure of the Interval vtable is as follows: + * 0: unit (IntervalUnit enum) + */ +const parseIntervalType = ( + reader: FlatBufferReader, + offset: number +): IntervalType => { + const unitPosition = reader.getFieldPosition(offset, 0) + const unitEnum = Predicate.isNotNull(unitPosition) + ? (reader.readInt16(unitPosition) as IntervalUnit) + : IntervalUnit.YEAR_MONTH + + return new IntervalType(unitEnum) +} + +/** + * Parses a `Duration` schema. + * + * The structure of the Duration vtable is as follows: + * 0: unit (TimeUnit enum) + */ +const parseDurationType = ( + reader: FlatBufferReader, + offset: number +): DurationType => { + const unitPosition = reader.getFieldPosition(offset, 0) + const unitEnum = Predicate.isNotNull(unitPosition) + ? (reader.readInt16(unitPosition) as TimeUnit) + : TimeUnit.MILLISECOND + + return new DurationType(unitEnum) +} + +/** + * Parses a `FixedSizeList` schema. + * + * The structure of the FixedSizeList vtable is as follows: + * 0: listSize (Int32) + */ +const parseFixedSizeListType = ( + reader: FlatBufferReader, + offset: number +): FixedSizeListType => { + const listSizePosition = reader.getFieldPosition(offset, 0) + const listSize = Predicate.isNotNull(listSizePosition) + ? reader.readInt32(listSizePosition) + : 0 + + return new FixedSizeListType(listSize) +} + +/** + * Parses a `Map` schema. + * + * The structure of the Map vtable is as follows: + * 0: keysSorted (Bool) + */ +const parseMapType = (reader: FlatBufferReader, offset: number): MapType => { + const keysSortedPosition = reader.getFieldPosition(offset, 0) + const keysSorted = Predicate.isNotNull(keysSortedPosition) + ? reader.readUint8(keysSortedPosition) !== 0 + : false + + return new MapType(keysSorted) +} + +/** + * Parses a `Union` schema. + * + * The structure of the Union vtable is as follows: + * 0: mode (UnionMode enum) + * 1: typeIds (vector of Int32) + */ +const parseUnionType = ( + reader: FlatBufferReader, + offset: number +): UnionType => { + const modePosition = reader.getFieldPosition(offset, 0) + const modeEnum = Predicate.isNotNull(modePosition) + ? (reader.readInt16(modePosition) as UnionMode) + : UnionMode.SPARSE + + const typeIds: Array = [] + const typeIdsPosition = reader.getFieldPosition(offset, 1) + if (Predicate.isNotNull(typeIdsPosition)) { + const vectorOffset = reader.readOffset(typeIdsPosition) + const typeIdCount = reader.readVectorLength(vectorOffset) + for (let i = 0; i < typeIdCount; i++) { + typeIds.push(reader.readInt32(vectorOffset + 4 + i * 4)) + } + } + + return new UnionType(modeEnum, typeIds) +} + +/** + * Parses a `DictionaryEncoding` schema. + * + * The structure of the DictionaryEncoding vtable is as follows: + * 0: id (Int64) + * 1: indexType (Int table) + * 2: isOrdered (Bool) + */ +const parseDictionaryEncoding = ( + reader: FlatBufferReader, + offset: number +): DictionaryEncoding => { + const idPosition = reader.getFieldPosition(offset, 0) + const id = Predicate.isNotNull(idPosition) + ? reader.readInt64(idPosition) + : 0n + + const indexTypePosition = reader.getFieldPosition(offset, 1) + let indexType: IntType + if (Predicate.isNotNull(indexTypePosition)) { + const indexTypeOffset = reader.readOffset(indexTypePosition) + indexType = parseIntType(reader, indexTypeOffset) + } else { + indexType = new IntType(32, true) + } + + const isOrderedPosition = reader.getFieldPosition(offset, 2) + const isOrdered = Predicate.isNotNull(isOrderedPosition) + ? reader.readUint8(isOrderedPosition) !== 0 + : false + + return new DictionaryEncoding(id, indexType, isOrdered) +} + +/** + * Parses a KeyValue vector into a Map. + */ +const parseKeyValueVector = ( + reader: FlatBufferReader, + pos: number, + map: Map +): void => { + const vectorOffset = reader.readOffset(pos) + const itemCount = reader.readVectorLength(vectorOffset) + + for (let i = 0; i < itemCount; i++) { + const kvOffsetPosition = vectorOffset + 4 + i * 4 + const kvOffset = reader.readOffset(kvOffsetPosition) + + // KeyValue table: key (string), value (string) + const keyPosition = reader.getFieldPosition(kvOffset, 0) + const valuePosition = reader.getFieldPosition(kvOffset, 1) + + if (Predicate.isNotNull(keyPosition)) { + const key = reader.readString(keyPosition) + const value = Predicate.isNotNull(valuePosition) + ? reader.readString(valuePosition) + : "" + map.set(key, value) + } + } +} + +// ============================================================================= +// Schema Utilities +// ============================================================================= + +/** + * Returns `true` if the provided `FlightData` header data buffer contains a + * schema message, otherwise returns `false`. + */ +export const isSchemaMessage = Effect.fn(function*(flightData: FlightData) { + const messageType = yield* getMessageType(flightData) + return messageType === MessageHeaderType.SCHEMA +}) diff --git a/packages/amp/test/arrow-flight-ipc/roundtrip.test.ts b/packages/amp/test/arrow-flight-ipc/roundtrip.test.ts new file mode 100644 index 0000000..c22f406 --- /dev/null +++ b/packages/amp/test/arrow-flight-ipc/roundtrip.test.ts @@ -0,0 +1,1011 @@ +/** + * Arrow FlightData Roundtrip Tests + * + * Tests that generated FlightData can be parsed by the arrow-flight-ipc package + * and that decoded values match the expected generated values. + */ +import { decodeRecordBatch } from "@edgeandnode/amp/internal/arrow-flight-ipc/Decoder" +import { parseRecordBatch } from "@edgeandnode/amp/internal/arrow-flight-ipc/RecordBatch" +import { parseSchema } from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import { describe, it } from "@effect/vitest" +import * as Effect from "effect/Effect" +import * as FlightDataGenerator from "../arrow-test-harness/FlightDataGenerator.ts" +import * as SchemaBuilder from "../arrow-test-harness/SchemaBuilder.ts" +import { formatComparisonErrors, verifyDecodedValues } from "../arrow-test-harness/ValueComparison.ts" + +describe("FlightData roundtrip", () => { + it.effect("a simple int32 column", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 42, + defaultNullRate: 0 + }) + + // Parse the schema message + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + expect(parsedSchema.fields.length).toBe(1) + expect(parsedSchema.fields[0].name).toBe("id") + expect(parsedSchema.fields[0].type.typeId).toBe("int") + + // Parse the record batch message + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + expect(recordBatch.length).toBe(10n) + + // Decode the record batch + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + expect(decoded.numRows).toBe(10n) + expect(decoded.columns.length).toBe(1) + + // Verify decoded values match expected values + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("nullable int32 column", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 20, + seed: 123, + defaultNullRate: 0.3 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // Verify null count is tracked correctly + const idColumn = decoded.getColumn("id")! + const expectedNulls = generated.expectedValues.id.filter((v) => v === null).length + expect(idColumn.node.nullCount).toBe(BigInt(expectedNulls)) + + // Verify all values match + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("bool column", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .bool("flag") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 16, + seed: 456, + defaultNullRate: 0 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // Verify values match + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + + // Should have a mix of true and false + const expectedValues = generated.expectedValues.flag + expect(expectedValues.some((v) => v === true)).toBe(true) + expect(expectedValues.some((v) => v === false)).toBe(true) + })) + + it.effect("utf8 column", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .utf8("name") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 5, + seed: 789, + defaultNullRate: 0 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // Verify values match + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("multiple columns", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .utf8("name") + .bool("active") + .int64("score") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 999, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + expect(parsedSchema.fields.length).toBe(4) + expect(parsedSchema.fields[0].name).toBe("id") + expect(parsedSchema.fields[1].name).toBe("name") + expect(parsedSchema.fields[2].name).toBe("active") + expect(parsedSchema.fields[3].name).toBe("score") + + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + expect(recordBatch.length).toBe(10n) + + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + expect(decoded.columns.length).toBe(4) + + // Verify each column has correct structure + const idColumn = decoded.getColumn("id")! + expect(idColumn.buffers.length).toBe(2) // validity + data + + const nameColumn = decoded.getColumn("name")! + expect(nameColumn.buffers.length).toBe(3) // validity + offsets + data + + const activeColumn = decoded.getColumn("active")! + expect(activeColumn.buffers.length).toBe(2) // validity + data + + const scoreColumn = decoded.getColumn("score")! + expect(scoreColumn.buffers.length).toBe(2) // validity + data + + // Verify all values match + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("all integer types", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int8("i8") + .int16("i16") + .int32("i32") + .int64("i64") + .uint8("u8") + .uint16("u16") + .uint32("u32") + .uint64("u64") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 15, + seed: 111, + defaultNullRate: 0.15 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("float types", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .float32("f32") + .float64("f64") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 20, + seed: 222, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("binary types", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .binary("bin") + .fixedSizeBinary("fixed", 8) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 333, + defaultNullRate: 0.2 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("decimal type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .decimal("amount", 10, 2) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 444, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("temporal types", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .dateDay("day") + .dateMillisecond("ms_date") + .timestamp("ts") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 555, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("list type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .list("items", (b) => b.int32()) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 8, + seed: 666, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("struct type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .struct("person", (b) => + b + .utf8("name") + .int32("age")) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 8, + seed: 777, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles empty record batch (0 rows)", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .utf8("name") + .bool("active") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 0, + seed: 1000 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + expect(parsedSchema.fields.length).toBe(3) + + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + expect(recordBatch.length).toBe(0n) + + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + expect(decoded.numRows).toBe(0n) + expect(decoded.columns.length).toBe(3) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles empty strings (not null)", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .utf8("text") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 1001, + defaultNullRate: 0, + fields: { + text: { minLength: 0, maxLength: 0 } + } + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // All values should be empty strings, not null + expect(generated.expectedValues.text.every((v) => v === "")).toBe(true) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles empty binary (not null)", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .binary("data") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 1002, + defaultNullRate: 0, + fields: { + data: { minLength: 0, maxLength: 0 } + } + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // All values should be empty Uint8Arrays + expect(generated.expectedValues.data.every((v) => v instanceof Uint8Array && v.length === 0)).toBe(true) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles all-null columns", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .utf8("name") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 1003, + defaultNullRate: 1.0 // 100% nulls + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // Verify all values are null + expect(generated.expectedValues.id.every((v) => v === null)).toBe(true) + expect(generated.expectedValues.name.every((v) => v === null)).toBe(true) + + // Verify null counts + expect(decoded.getColumn("id")!.node.nullCount).toBe(10n) + expect(decoded.getColumn("name")!.node.nullCount).toBe(10n) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles empty lists (not null)", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .list("items", (b) => b.int32()) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 5, + seed: 1004, + defaultNullRate: 0, + fields: { + items: { minLength: 0, maxLength: 0 } + } + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // All values should be empty arrays + expect(generated.expectedValues.items.every((v) => Array.isArray(v) && v.length === 0)).toBe(true) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles deeply nested structures", ({ expect }) => + Effect.gen(function*() { + // Struct containing a list of structs + const testSchema = SchemaBuilder.schema() + .struct("outer", (s) => + s + .utf8("name") + .list("items", (l) => l.int32()) + .struct("nested", (n) => + n + .int32("value") + .utf8("label"))) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 5, + seed: 1005, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles single row", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .utf8("name") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 1, + seed: 1006, + defaultNullRate: 0 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + expect(decoded.numRows).toBe(1n) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles large number of rows", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .utf8("name") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10000, + seed: 1007, + defaultNullRate: 0.05 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + expect(decoded.numRows).toBe(10000n) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles large number of columns", ({ expect }) => + Effect.gen(function*() { + // Create schema with 50 columns + let builder = SchemaBuilder.schema() + for (let i = 0; i < 50; i++) { + builder = builder.int32(`col_${i}`) + } + const testSchema = builder.build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 100, + seed: 1008, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + expect(parsedSchema.fields.length).toBe(50) + + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + expect(decoded.columns.length).toBe(50) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("handles map type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .map("attributes", (k) => k.utf8(), (v) => v.int32()) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 5, + seed: 1009, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("multiple batches with varying row counts", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .utf8("name") + .bool("active") + .build() + + const generated = yield* FlightDataGenerator.generateMultiBatchFlightData(testSchema, { + rowsPerBatch: [5, 20, 1, 100, 50], + seed: 2001, + defaultNullRate: 0.15 + }) + + expect(generated.batches.length).toBe(5) + expect(generated.totalRows).toBe(176) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + + for (let i = 0; i < generated.batches.length; i++) { + const batch = generated.batches[i] + const recordBatch = yield* parseRecordBatch(batch.flightData) + const decoded = decodeRecordBatch(recordBatch, batch.flightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, batch.expectedValues) + expect(comparison.success, `Batch ${i}: ${formatComparisonErrors(comparison.errors)}`).toBe(true) + } + })) + + it.effect("batch with empty batch in middle", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .utf8("name") + .build() + + const generated = yield* FlightDataGenerator.generateMultiBatchFlightData(testSchema, { + rowsPerBatch: [10, 0, 10], + seed: 2002, + defaultNullRate: 0 + }) + + expect(generated.batches.length).toBe(3) + expect(generated.totalRows).toBe(20) + expect(generated.batches[1].numRows).toBe(0) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + + for (let i = 0; i < generated.batches.length; i++) { + const batch = generated.batches[i] + const recordBatch = yield* parseRecordBatch(batch.flightData) + const decoded = decodeRecordBatch(recordBatch, batch.flightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, batch.expectedValues) + expect(comparison.success, `Batch ${i}: ${formatComparisonErrors(comparison.errors)}`).toBe(true) + } + })) + + it.effect("many small batches", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("value") + .build() + + // 100 batches of 10 rows each + const generated = yield* FlightDataGenerator.generateMultiBatchFlightData(testSchema, { + rowsPerBatch: Array(100).fill(10), + seed: 2003, + defaultNullRate: 0.05 + }) + + expect(generated.batches.length).toBe(100) + expect(generated.totalRows).toBe(1000) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + + for (let i = 0; i < generated.batches.length; i++) { + const batch = generated.batches[i] + const recordBatch = yield* parseRecordBatch(batch.flightData) + const decoded = decodeRecordBatch(recordBatch, batch.flightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, batch.expectedValues) + expect(comparison.success, `Batch ${i}: ${formatComparisonErrors(comparison.errors)}`).toBe(true) + } + })) + + it.effect("multi-batch with complex nested types", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .int32("id") + .list("tags", (b) => b.utf8()) + .struct("metadata", (s) => + s + .utf8("key") + .int32("count")) + .build() + + const generated = yield* FlightDataGenerator.generateMultiBatchFlightData(testSchema, { + rowsPerBatch: [5, 10, 5], + seed: 2004, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + + for (let i = 0; i < generated.batches.length; i++) { + const batch = generated.batches[i] + const recordBatch = yield* parseRecordBatch(batch.flightData) + const decoded = decodeRecordBatch(recordBatch, batch.flightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, batch.expectedValues) + expect(comparison.success, `Batch ${i}: ${formatComparisonErrors(comparison.errors)}`).toBe(true) + } + })) + + it.effect("float32 with special values (NaN, Infinity, -0)", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .float32("value") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 100, + seed: 3000, + defaultNullRate: 0.1, + fields: { + value: { includeSpecialFloats: true, specialFloatRate: 0.2 } + } + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // Verify we have some special values + const values = generated.expectedValues.value as Array + const hasNaN = values.some((v) => v !== null && Number.isNaN(v)) + const hasInfinity = values.some((v) => v === Infinity || v === -Infinity) + + expect(hasNaN || hasInfinity).toBe(true) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("float64 with special values", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .float64("value") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 100, + seed: 3001, + defaultNullRate: 0.1, + fields: { + value: { includeSpecialFloats: true, specialFloatRate: 0.2 } + } + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("floats with extreme values", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .float32("f32") + .float64("f64") + .build() + + // Generate with special floats to test extreme values like MAX_VALUE, MIN_VALUE + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 200, + seed: 3002, + defaultNullRate: 0, + fields: { + f32: { includeSpecialFloats: true, specialFloatRate: 0.3 }, + f64: { includeSpecialFloats: true, specialFloatRate: 0.3 } + } + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("float32 with wide range values", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .float32("value") + .build() + + // Test without special floats but with the expanded range + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 100, + seed: 3003, + defaultNullRate: 0 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + // Verify values span a wide range (not just -1000 to 1000) + const values = generated.expectedValues.value as Array + const maxAbs = Math.max(...values.map(Math.abs)) + expect(maxAbs).toBeGreaterThan(1000) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("float16 type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .float16("half") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 20, + seed: 4000, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("large-utf8 type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .largeUtf8("bigText") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 4001, + defaultNullRate: 0.2 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("large-binary type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .largeBinary("bigData") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 4002, + defaultNullRate: 0.2 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("large-list type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .largeList("bigList", (b) => b.int32()) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 8, + seed: 4003, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("fixed-size-list type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .fixedSizeList("fixed3", 3, (b) => b.int32()) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 4004, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("time types (all units)", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .timeSecond("timeSec") + .timeMillisecond("timeMs") + .timeMicrosecond("timeUs") + .timeNanosecond("timeNs") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 4005, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("duration type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .duration("durSec", "SECOND") + .duration("durMs", "MILLISECOND") + .duration("durUs", "MICROSECOND") + .duration("durNs", "NANOSECOND") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 4006, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("interval types (all units)", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .intervalYearMonth("intervalYM") + .intervalDayTime("intervalDT") + .intervalMonthDayNano("intervalMDN") + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 4007, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("sparse union type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .union("choice", "SPARSE", (u) => + u + .variant("intVal", (b) => b.int32()) + .variant("strVal", (b) => b.utf8())) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 10, + seed: 4008, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) + + it.effect("dense union type", ({ expect }) => + Effect.gen(function*() { + const testSchema = SchemaBuilder.schema() + .union("tagged", "DENSE", (u) => + u + .variant("number", (b) => b.float64()) + .variant("text", (b) => b.utf8()) + .variant("flag", (b) => b.bool())) + .build() + + const generated = yield* FlightDataGenerator.generateFlightData(testSchema, { + numRows: 15, + seed: 4009, + defaultNullRate: 0.1 + }) + + const parsedSchema = yield* parseSchema(generated.schemaFlightData) + const recordBatch = yield* parseRecordBatch(generated.recordBatchFlightData) + const decoded = decodeRecordBatch(recordBatch, generated.recordBatchFlightData.dataBody, parsedSchema) + + const comparison = verifyDecodedValues(testSchema, decoded, generated.expectedValues) + expect(comparison.success, formatComparisonErrors(comparison.errors)).toBe(true) + })) +}) diff --git a/packages/amp/test/arrow-test-harness/BufferUtils.ts b/packages/amp/test/arrow-test-harness/BufferUtils.ts new file mode 100644 index 0000000..b52a28d --- /dev/null +++ b/packages/amp/test/arrow-test-harness/BufferUtils.ts @@ -0,0 +1,355 @@ +/** + * Buffer Utilities for Arrow Test Harness + * @internal + */ + +// ============================================================================= +// Alignment +// ============================================================================= + +export const ARROW_ALIGNMENT = 8 + +export const align8 = (size: number): number => { + const remainder = size % ARROW_ALIGNMENT + return remainder === 0 ? size : size + (ARROW_ALIGNMENT - remainder) +} + +export const padToAlignment = (buffer: Uint8Array): Uint8Array => { + const alignedSize = align8(buffer.length) + if (alignedSize === buffer.length) { + return buffer + } + const padded = new Uint8Array(alignedSize) + padded.set(buffer) + return padded +} + +// ============================================================================= +// Validity Bitmaps +// ============================================================================= + +export const createValidityBitmap = ( + values: ReadonlyArray +): { bitmap: Uint8Array; nullCount: number } => { + let nullCount = 0 + for (const value of values) { + if (value === null) nullCount++ + } + + // If no nulls, return empty buffer (Arrow optimization) + if (nullCount === 0) { + return { bitmap: new Uint8Array(0), nullCount: 0 } + } + + const numBytes = Math.ceil(values.length / 8) + const bitmap = new Uint8Array(align8(numBytes)) + + for (let i = 0; i < values.length; i++) { + if (values[i] !== null) { + const byteIndex = Math.floor(i / 8) + const bitIndex = i % 8 + bitmap[byteIndex] |= 1 << bitIndex + } + } + + return { bitmap, nullCount } +} + +export const createValidityBitmapFromFlags = (validity: ReadonlyArray): Uint8Array => { + const numBytes = Math.ceil(validity.length / 8) + const bitmap = new Uint8Array(align8(numBytes)) + + for (let i = 0; i < validity.length; i++) { + if (validity[i]) { + const byteIndex = Math.floor(i / 8) + const bitIndex = i % 8 + bitmap[byteIndex] |= 1 << bitIndex + } + } + + return bitmap +} + +// ============================================================================= +// Offset Buffers +// ============================================================================= + +export const createInt32OffsetBuffer = (offsets: ReadonlyArray): Uint8Array => { + const buffer = new Uint8Array(align8(offsets.length * 4)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < offsets.length; i++) { + view.setInt32(i * 4, offsets[i], true) + } + + return buffer +} + +export const createInt64OffsetBuffer = (offsets: ReadonlyArray): Uint8Array => { + const buffer = new Uint8Array(align8(offsets.length * 8)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < offsets.length; i++) { + view.setBigInt64(i * 8, offsets[i], true) + } + + return buffer +} + +// ============================================================================= +// Variable-Length Data Buffers +// ============================================================================= + +export const createVariableLengthBuffers = ( + values: ReadonlyArray, + large: boolean +): { offsets: Uint8Array; data: Uint8Array } => { + const encoder = new TextEncoder() + const encodedValues: Array = [] + const offsets: Array = large ? [0n] : [0] + let currentOffset = large ? 0n : 0 + + for (const value of values) { + if (value === null) { + offsets.push(currentOffset) + } else { + const bytes = typeof value === "string" ? encoder.encode(value) : value + encodedValues.push(bytes) + if (large) { + currentOffset = (currentOffset as bigint) + BigInt(bytes.length) + } else { + currentOffset = (currentOffset as number) + bytes.length + } + offsets.push(currentOffset) + } + } + + const totalLength = large ? Number(currentOffset as bigint) : (currentOffset as number) + const data = new Uint8Array(align8(totalLength)) + let pos = 0 + for (const bytes of encodedValues) { + data.set(bytes, pos) + pos += bytes.length + } + + const offsetBuffer = large + ? createInt64OffsetBuffer(offsets as Array) + : createInt32OffsetBuffer(offsets as Array) + + return { offsets: offsetBuffer, data } +} + +// ============================================================================= +// Fixed-Width Data Buffers +// ============================================================================= + +export const createBoolDataBuffer = (values: ReadonlyArray): Uint8Array => { + const numBytes = Math.ceil(values.length / 8) + const buffer = new Uint8Array(align8(numBytes)) + + for (let i = 0; i < values.length; i++) { + if (values[i] === true) { + const byteIndex = Math.floor(i / 8) + const bitIndex = i % 8 + buffer[byteIndex] |= 1 << bitIndex + } + } + + return buffer +} + +export const createInt8DataBuffer = (values: ReadonlyArray, signed: boolean): Uint8Array => { + const buffer = new Uint8Array(align8(values.length)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < values.length; i++) { + const value = values[i] ?? 0 + if (signed) { + view.setInt8(i, value) + } else { + view.setUint8(i, value) + } + } + + return buffer +} + +export const createInt16DataBuffer = (values: ReadonlyArray, signed: boolean): Uint8Array => { + const buffer = new Uint8Array(align8(values.length * 2)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < values.length; i++) { + const value = values[i] ?? 0 + if (signed) { + view.setInt16(i * 2, value, true) + } else { + view.setUint16(i * 2, value, true) + } + } + + return buffer +} + +export const createInt32DataBuffer = (values: ReadonlyArray, signed: boolean): Uint8Array => { + const buffer = new Uint8Array(align8(values.length * 4)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < values.length; i++) { + const value = values[i] ?? 0 + if (signed) { + view.setInt32(i * 4, value, true) + } else { + view.setUint32(i * 4, value, true) + } + } + + return buffer +} + +export const createInt64DataBuffer = (values: ReadonlyArray, signed: boolean): Uint8Array => { + const buffer = new Uint8Array(align8(values.length * 8)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < values.length; i++) { + const value = values[i] ?? 0n + if (signed) { + view.setBigInt64(i * 8, value, true) + } else { + view.setBigUint64(i * 8, value, true) + } + } + + return buffer +} + +export const createFloat32DataBuffer = (values: ReadonlyArray): Uint8Array => { + const buffer = new Uint8Array(align8(values.length * 4)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < values.length; i++) { + view.setFloat32(i * 4, values[i] ?? 0, true) + } + + return buffer +} + +export const createFloat64DataBuffer = (values: ReadonlyArray): Uint8Array => { + const buffer = new Uint8Array(align8(values.length * 8)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < values.length; i++) { + view.setFloat64(i * 8, values[i] ?? 0, true) + } + + return buffer +} + +const encodeFloat16 = (value: number): number => { + if (value === 0) return 0 + if (!Number.isFinite(value)) { + if (Number.isNaN(value)) return 0x7E00 + return value > 0 ? 0x7C00 : 0xFC00 + } + + const sign = value < 0 ? 1 : 0 + const absValue = Math.abs(value) + + if (absValue < 6.103515625e-5) { + const mantissa = Math.round(absValue / 5.960464477539063e-8) + return (sign << 15) | mantissa + } + + let exponent = Math.floor(Math.log2(absValue)) + let mantissa = absValue / Math.pow(2, exponent) - 1 + + exponent += 15 + if (exponent >= 31) return (sign << 15) | 0x7C00 + if (exponent <= 0) return (sign << 15) + + mantissa = Math.round(mantissa * 1024) + return (sign << 15) | (exponent << 10) | (mantissa & 0x3FF) +} + +export const createFloat16DataBuffer = (values: ReadonlyArray): Uint8Array => { + const buffer = new Uint8Array(align8(values.length * 2)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < values.length; i++) { + const encoded = encodeFloat16(values[i] ?? 0) + view.setUint16(i * 2, encoded, true) + } + + return buffer +} + +export const createFixedSizeBinaryDataBuffer = ( + values: ReadonlyArray, + byteWidth: number +): Uint8Array => { + const buffer = new Uint8Array(align8(values.length * byteWidth)) + + for (let i = 0; i < values.length; i++) { + const value = values[i] + if (value !== null) { + buffer.set(value.subarray(0, byteWidth), i * byteWidth) + } + } + + return buffer +} + +// ============================================================================= +// Decimal Data Buffers +// ============================================================================= + +const bigIntToBytes = (value: bigint, byteWidth: number): Uint8Array => { + const bytes = new Uint8Array(byteWidth) + let v = value < 0n ? -value : value + const isNegative = value < 0n + + for (let i = 0; i < byteWidth; i++) { + bytes[i] = Number(v & 0xFFn) + v >>= 8n + } + + if (isNegative) { + let carry = 1 + for (let i = 0; i < byteWidth; i++) { + const inverted = (~bytes[i] & 0xFF) + carry + bytes[i] = inverted & 0xFF + carry = inverted >> 8 + } + } + + return bytes +} + +export const createDecimalDataBuffer = ( + values: ReadonlyArray, + bitWidth: 128 | 256 +): Uint8Array => { + const byteWidth = bitWidth / 8 + const buffer = new Uint8Array(align8(values.length * byteWidth)) + + for (let i = 0; i < values.length; i++) { + const value = values[i] ?? 0n + const bytes = bigIntToBytes(value, byteWidth) + buffer.set(bytes, i * byteWidth) + } + + return buffer +} + +// ============================================================================= +// Union Type Buffers +// ============================================================================= + +export const createTypeIdBuffer = (typeIds: ReadonlyArray): Uint8Array => { + const buffer = new Uint8Array(align8(typeIds.length)) + + for (let i = 0; i < typeIds.length; i++) { + buffer[i] = typeIds[i] + } + + return buffer +} diff --git a/packages/amp/test/arrow-test-harness/FlatBufferWriter.ts b/packages/amp/test/arrow-test-harness/FlatBufferWriter.ts new file mode 100644 index 0000000..bde5481 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/FlatBufferWriter.ts @@ -0,0 +1,299 @@ +/** + * FlatBuffer Writer for Arrow Test Harness + * + * Writes FlatBuffer-encoded data that can be read by FlatBufferReader. + * + * @internal + */ + +export class FlatBufferWriter { + private buffer: Uint8Array + private position: number = 0 + private vtableCache: Map = new Map() + + constructor(initialSize: number = 4096) { + this.buffer = new Uint8Array(initialSize) + } + + private grow(needed: number): void { + if (this.position + needed <= this.buffer.length) return + const newSize = Math.max(this.buffer.length * 2, this.position + needed) + const newBuffer = new Uint8Array(newSize) + newBuffer.set(this.buffer) + this.buffer = newBuffer + } + + private pad(alignment: number): void { + const mask = alignment - 1 + while (this.position & mask) { + this.grow(1) + this.buffer[this.position++] = 0 + } + } + + currentOffset(): number { + return this.position + } + + writeByte(value: number): void { + this.grow(1) + this.buffer[this.position++] = value & 0xff + } + + writeInt16(value: number): void { + this.pad(2) + this.grow(2) + new DataView(this.buffer.buffer, this.position, 2).setInt16(0, value, true) + this.position += 2 + } + + writeInt32(value: number): void { + this.pad(4) + this.grow(4) + new DataView(this.buffer.buffer, this.position, 4).setInt32(0, value, true) + this.position += 4 + } + + writeInt64(value: bigint): void { + this.pad(8) + this.grow(8) + new DataView(this.buffer.buffer, this.position, 8).setBigInt64(0, value, true) + this.position += 8 + } + + // Raw write methods without alignment (for table field writing) + private writeInt16Raw(value: number): void { + this.grow(2) + new DataView(this.buffer.buffer, this.position, 2).setInt16(0, value, true) + this.position += 2 + } + + private writeInt32Raw(value: number): void { + this.grow(4) + new DataView(this.buffer.buffer, this.position, 4).setInt32(0, value, true) + this.position += 4 + } + + private writeInt64Raw(value: bigint): void { + this.grow(8) + new DataView(this.buffer.buffer, this.position, 8).setBigInt64(0, value, true) + this.position += 8 + } + + writeBytes(bytes: Uint8Array): void { + this.grow(bytes.length) + this.buffer.set(bytes, this.position) + this.position += bytes.length + } + + writeString(value: string): number { + const bytes = new TextEncoder().encode(value) + this.pad(4) + const offset = this.position + + // Length prefix + this.writeInt32(bytes.length) + + // String bytes + null terminator + this.grow(bytes.length + 1) + this.buffer.set(bytes, this.position) + this.position += bytes.length + this.buffer[this.position++] = 0 + + this.pad(4) + return offset + } + + writeOffsetVector(offsets: ReadonlyArray): number { + this.pad(4) + const vectorOffset = this.position + + // Write count + this.writeInt32(offsets.length) + + // Write each offset as relative offset from its position + for (const target of offsets) { + const relOffset = target - this.position + this.writeInt32(relOffset) + } + + return vectorOffset + } + + writeStructVector(data: Uint8Array, structSize: number): number { + this.pad(4) + const offset = this.position + const count = data.length / structSize + + this.writeInt32(count) + this.writeBytes(data) + + return offset + } + + startTable(): TableBuilder { + return new TableBuilder() + } + + finishTable(builder: TableBuilder): number { + const { fields, maxFieldIndex } = builder.build() + + if (maxFieldIndex < 0) { + // Empty table + this.pad(2) + const vtableOffset = this.position + this.writeInt16(4) // vtable size + this.writeInt16(4) // table size + + this.pad(4) + const tableOffset = this.position + this.writeInt32(tableOffset - vtableOffset) // soffset to vtable + return tableOffset + } + + // Calculate field layout within table (after the 4-byte vtable pointer) + const fieldCount = maxFieldIndex + 1 + const fieldLayout: Array<{ index: number; offset: number; field: TableField }> = [] + + // Sort by size descending for alignment efficiency + const sortedFields = [...fields.entries()].sort((a, b) => b[1].size - a[1].size) + + let tableContentOffset = 4 // Start after vtable pointer + for (const [index, field] of sortedFields) { + // Align to field size + const align = field.size + tableContentOffset = (tableContentOffset + align - 1) & ~(align - 1) + fieldLayout.push({ index, offset: tableContentOffset, field }) + tableContentOffset += field.size + } + + const tableSize = tableContentOffset + + // Build vtable: [vtableSize: i16] [tableSize: i16] [field0: i16] [field1: i16] ... + const vtableSize = 4 + fieldCount * 2 + const vtable = new Uint8Array(vtableSize) + const vtableView = new DataView(vtable.buffer) + vtableView.setInt16(0, vtableSize, true) + vtableView.setInt16(2, tableSize, true) + + // Fill field offsets in vtable + for (const { index, offset } of fieldLayout) { + vtableView.setInt16(4 + index * 2, offset, true) + } + + // Check vtable cache for deduplication + const vtableKey = Array.from(vtable).join(",") + let vtableOffset = this.vtableCache.get(vtableKey) + + if (vtableOffset === undefined) { + this.pad(2) + vtableOffset = this.position + this.writeBytes(vtable) + this.vtableCache.set(vtableKey, vtableOffset) + } + + // Write table + this.pad(4) + const tableOffset = this.position + + // soffset_t pointing back to vtable + this.writeInt32(tableOffset - vtableOffset) + + // Write fields in offset order + const byOffset = [...fieldLayout].sort((a, b) => a.offset - b.offset) + + for (const { field, offset } of byOffset) { + const targetPos = tableOffset + offset + + // Pad to target position + while (this.position < targetPos) { + this.writeByte(0) + } + + // Write field value (using raw methods to avoid re-alignment) + if (field.type === "offset") { + const relOffset = (field.value as number) - this.position + this.writeInt32Raw(relOffset) + } else if (field.size === 1) { + this.writeByte(field.value as number) + } else if (field.size === 2) { + this.writeInt16Raw(field.value as number) + } else if (field.size === 4) { + this.writeInt32Raw(field.value as number) + } else if (field.size === 8) { + this.writeInt64Raw(field.value as bigint) + } + } + + // Pad to full table size + while (this.position < tableOffset + tableSize) { + this.writeByte(0) + } + + return tableOffset + } + + finish(rootTableOffset: number): Uint8Array { + this.pad(4) + + // FlatBuffer: first 4 bytes are uoffset_t to root table + const finalBuffer = new Uint8Array(this.position + 4) + const view = new DataView(finalBuffer.buffer) + + // Root offset is relative to byte 0, pointing to rootTableOffset + 4 + view.setUint32(0, rootTableOffset + 4, true) + finalBuffer.set(this.buffer.subarray(0, this.position), 4) + + return finalBuffer + } +} + +interface TableField { + value: number | bigint + size: number + type: "scalar" | "offset" +} + +export class TableBuilder { + private fields: Map = new Map() + + addInt8(fieldIndex: number, value: number): this { + this.fields.set(fieldIndex, { value, size: 1, type: "scalar" }) + return this + } + + addUint8(fieldIndex: number, value: number): this { + this.fields.set(fieldIndex, { value, size: 1, type: "scalar" }) + return this + } + + addInt16(fieldIndex: number, value: number): this { + this.fields.set(fieldIndex, { value, size: 2, type: "scalar" }) + return this + } + + addInt32(fieldIndex: number, value: number): this { + this.fields.set(fieldIndex, { value, size: 4, type: "scalar" }) + return this + } + + addInt64(fieldIndex: number, value: bigint): this { + this.fields.set(fieldIndex, { value, size: 8, type: "scalar" }) + return this + } + + addOffset(fieldIndex: number, offset: number): this { + this.fields.set(fieldIndex, { value: offset, size: 4, type: "offset" }) + return this + } + + addBool(fieldIndex: number, value: boolean): this { + this.fields.set(fieldIndex, { value: value ? 1 : 0, size: 1, type: "scalar" }) + return this + } + + build(): { fields: Map; maxFieldIndex: number } { + const maxFieldIndex = this.fields.size > 0 ? Math.max(...this.fields.keys()) : -1 + return { fields: this.fields, maxFieldIndex } + } +} diff --git a/packages/amp/test/arrow-test-harness/FlightDataGenerator.ts b/packages/amp/test/arrow-test-harness/FlightDataGenerator.ts new file mode 100644 index 0000000..212da19 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/FlightDataGenerator.ts @@ -0,0 +1,265 @@ +/** + * FlightData Generator - Main Orchestrator + * @internal + */ +import type { ArrowField, ArrowSchema } from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import * as Effect from "effect/Effect" +import * as Layer from "effect/Layer" +import * as Random from "effect/Random" +import * as BufferUtils from "./BufferUtils.ts" +import * as GeneratorRegistry from "./GeneratorRegistry.ts" +import * as MessageEncoder from "./MessageEncoder.ts" +import * as Types from "./Types.ts" + +export const generateFlightData = ( + schema: ArrowSchema, + options: Types.FlightDataGeneratorOptions = {} +): Effect.Effect => { + const numRows = options.numRows ?? 100 + const seed = typeof options.seed === "string" ? hashString(options.seed) : (options.seed ?? 42) + + return Effect.gen(function*() { + const registry = yield* Types.GeneratorRegistry + + // Generate data for each field + const columnResults: Array = [] + const expectedValues: Record> = {} + + for (const field of schema.fields) { + const config = getFieldConfig(field.name, options) + const generator = registry.getGenerator(field.type.typeId) + const result = yield* generator.generate(field, numRows, config) + columnResults.push(result) + expectedValues[field.name] = result.values + } + + // Collect field nodes and buffers in depth-first order + const fieldNodes: Array = [] + const buffers: Array<{ data: Uint8Array }> = [] + + for (let i = 0; i < schema.fields.length; i++) { + collectBuffers(schema.fields[i], columnResults[i], fieldNodes, buffers) + } + + // Calculate aligned offsets and build body + let bodyLength = 0n + const bufferDescriptors: Array = [] + + for (const buf of buffers) { + const alignedLength = BufferUtils.align8(buf.data.length) + bufferDescriptors.push({ + offset: bodyLength, + length: BigInt(buf.data.length) + }) + bodyLength += BigInt(alignedLength) + } + + // Concatenate buffers into body (with alignment padding) + const body = new Uint8Array(Number(bodyLength)) + let offset = 0 + for (const buf of buffers) { + body.set(buf.data, offset) + offset += BufferUtils.align8(buf.data.length) + } + + // Encode messages + const schemaHeader = MessageEncoder.encodeSchemaMessage(schema) + const recordBatchHeader = MessageEncoder.encodeRecordBatchMessage( + BigInt(numRows), + fieldNodes, + bufferDescriptors, + bodyLength + ) + + return { + schemaFlightData: { + dataHeader: schemaHeader, + dataBody: new Uint8Array(0) + }, + recordBatchFlightData: { + dataHeader: recordBatchHeader, + dataBody: body + }, + expectedValues, + schema + } + }).pipe( + Effect.provide(Layer.mergeAll( + Layer.succeed(Random.Random, Random.make(seed)), + GeneratorRegistry.Live + )) + ) +} + +const collectBuffers = ( + field: ArrowField, + result: Types.GeneratedBuffers, + fieldNodes: Array, + buffers: Array<{ data: Uint8Array }> +): void => { + // Add field node + fieldNodes.push(result.fieldNode) + + // Add buffers based on type + const typeId = field.type.typeId + + // Validity bitmap (most types have this) + if (typeId !== "null") { + buffers.push({ data: result.validity }) + } + + // Offset buffer (variable-length and some nested types) + if (result.offsets !== null) { + buffers.push({ data: result.offsets }) + } + + // Data buffer (for types that have inline data) + if (hasDataBuffer(typeId)) { + buffers.push({ data: result.data }) + } + + // Recurse into children + for (let i = 0; i < result.children.length; i++) { + const childField = field.children[i] + collectBuffers(childField, result.children[i], fieldNodes, buffers) + } +} + +const hasDataBuffer = (typeId: string): boolean => { + switch (typeId) { + case "null": + case "list": + case "large-list": + case "fixed-size-list": + case "struct": + case "map": + case "union": + return false + default: + return true + } +} + +const getFieldConfig = ( + fieldName: string, + options: Types.BaseGeneratorOptions +): Types.FieldGeneratorConfig => { + const fieldConfig = options.fields?.[fieldName] ?? {} + return { + nullRate: fieldConfig.nullRate ?? options.defaultNullRate ?? 0.2, + ...fieldConfig + } +} + +/** + * Generate multiple record batches with the same schema. + * + * Useful for testing streaming/chunked scenarios where data arrives in batches. + */ +export const generateMultiBatchFlightData = ( + schema: ArrowSchema, + options: Types.MultiBatchGeneratorOptions +): Effect.Effect => { + const rowsPerBatch = Array.isArray(options.rowsPerBatch) + ? options.rowsPerBatch + : [options.rowsPerBatch] + const seed = typeof options.seed === "string" ? hashString(options.seed) : (options.seed ?? 42) + + return Effect.gen(function*() { + const registry = yield* Types.GeneratorRegistry + + const batches: Array = [] + let totalRows = 0 + + for (let batchIndex = 0; batchIndex < rowsPerBatch.length; batchIndex++) { + const numRows = rowsPerBatch[batchIndex] + totalRows += numRows + + // Generate data for each field + const columnResults: Array = [] + const expectedValues: Record> = {} + + for (const field of schema.fields) { + const config = getFieldConfig(field.name, options) + const generator = registry.getGenerator(field.type.typeId) + const result = yield* generator.generate(field, numRows, config) + columnResults.push(result) + expectedValues[field.name] = result.values + } + + // Collect field nodes and buffers + const fieldNodes: Array = [] + const buffers: Array<{ data: Uint8Array }> = [] + + for (let i = 0; i < schema.fields.length; i++) { + collectBuffers(schema.fields[i], columnResults[i], fieldNodes, buffers) + } + + // Calculate aligned offsets and build body + let bodyLength = 0n + const bufferDescriptors: Array = [] + + for (const buf of buffers) { + const alignedLength = BufferUtils.align8(buf.data.length) + bufferDescriptors.push({ + offset: bodyLength, + length: BigInt(buf.data.length) + }) + bodyLength += BigInt(alignedLength) + } + + // Concatenate buffers into body + const body = new Uint8Array(Number(bodyLength)) + let offset = 0 + for (const buf of buffers) { + body.set(buf.data, offset) + offset += BufferUtils.align8(buf.data.length) + } + + // Encode record batch message + const recordBatchHeader = MessageEncoder.encodeRecordBatchMessage( + BigInt(numRows), + fieldNodes, + bufferDescriptors, + bodyLength + ) + + batches.push({ + flightData: { + dataHeader: recordBatchHeader, + dataBody: body + }, + expectedValues, + numRows + }) + } + + // Encode schema message + const schemaHeader = MessageEncoder.encodeSchemaMessage(schema) + + return { + schemaFlightData: { + dataHeader: schemaHeader, + dataBody: new Uint8Array(0) + }, + batches, + schema, + totalRows + } + }).pipe( + Effect.provide(Layer.mergeAll( + Layer.succeed(Random.Random, Random.make(seed)), + GeneratorRegistry.Live + )) + ) +} + +const hashString = (str: string): number => { + let hash = 0 + for (let i = 0; i < str.length; i++) { + const char = str.charCodeAt(i) + hash = ((hash << 5) - hash) + char + hash = hash & hash // Convert to 32-bit integer + } + return Math.abs(hash) +} diff --git a/packages/amp/test/arrow-test-harness/GeneratorRegistry.ts b/packages/amp/test/arrow-test-harness/GeneratorRegistry.ts new file mode 100644 index 0000000..b788a7d --- /dev/null +++ b/packages/amp/test/arrow-test-harness/GeneratorRegistry.ts @@ -0,0 +1,60 @@ +/** + * Generator Registry - maps type IDs to data generators + * @internal + */ +import * as Layer from "effect/Layer" +import * as Decimal from "./generators/decimal.ts" +import * as Nested from "./generators/nested.ts" +import * as Primitives from "./generators/primitives.ts" +import * as Strings from "./generators/strings.ts" +import * as Temporal from "./generators/temporal.ts" +import * as Types from "./Types.ts" + +const generators: Record = { + // Primitives (typeId values from Schema.ts) + "null": Primitives.nullGenerator, + "bool": Primitives.boolGenerator, + "int": Primitives.intGenerator, + "float": Primitives.floatGenerator, + + // Decimal + "decimal": Decimal.decimalGenerator, + + // Strings & Binary + "utf8": Strings.utf8Generator, + "large-utf8": Strings.largeUtf8Generator, + "binary": Strings.binaryGenerator, + "large-binary": Strings.largeBinaryGenerator, + "fixed-size-binary": Strings.fixedSizeBinaryGenerator, + + // Temporal + "date": Temporal.dateGenerator, + "time": Temporal.timeGenerator, + "timestamp": Temporal.timestampGenerator, + "duration": Temporal.durationGenerator, + "interval": Temporal.intervalGenerator, + + // Nested + "list": Nested.listGenerator, + "large-list": Nested.largeListGenerator, + "fixed-size-list": Nested.fixedSizeListGenerator, + "struct": Nested.structGenerator, + "map": Nested.mapGenerator, + "union": Nested.unionGenerator +} + +const getGenerator = (typeId: string): Types.DataGenerator => { + const generator = generators[typeId] + if (!generator) { + throw new Error(`No generator for type: ${typeId}`) + } + return generator +} + +/** + * Live layer providing the GeneratorRegistry service. + */ +export const Live: Layer.Layer = Layer.succeed( + Types.GeneratorRegistry, + Types.GeneratorRegistry.of({ getGenerator }) +) diff --git a/packages/amp/test/arrow-test-harness/MessageEncoder.ts b/packages/amp/test/arrow-test-harness/MessageEncoder.ts new file mode 100644 index 0000000..4c8d6d2 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/MessageEncoder.ts @@ -0,0 +1,289 @@ +/** + * Message Encoder for Arrow Test Harness + * @internal + */ +import { + type ArrowDataType, + ArrowDataTypeEnum, + type ArrowField, + type ArrowSchema, + DateUnit, + IntervalUnit, + MessageHeaderType, + Precision, + TimeUnit, + UnionMode +} from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import { FlatBufferWriter } from "./FlatBufferWriter.ts" + +const METADATA_VERSION = 4 + +// ============================================================================= +// Schema Message +// ============================================================================= + +export const encodeSchemaMessage = (schema: ArrowSchema): Uint8Array => { + const writer = new FlatBufferWriter() + + const fieldOffsets = schema.fields.map((field) => encodeField(writer, field)) + const fieldsVectorOffset = writer.writeOffsetVector(fieldOffsets) + + let metadataOffset: number | null = null + if (schema.metadata.size > 0) { + metadataOffset = encodeKeyValueVector(writer, schema.metadata) + } + + const schemaBuilder = writer.startTable() + schemaBuilder.addInt16(0, schema.endianness) + schemaBuilder.addOffset(1, fieldsVectorOffset) + if (metadataOffset !== null) schemaBuilder.addOffset(2, metadataOffset) + const schemaOffset = writer.finishTable(schemaBuilder) + + const messageBuilder = writer.startTable() + messageBuilder.addInt16(0, METADATA_VERSION) + messageBuilder.addUint8(1, MessageHeaderType.SCHEMA) + messageBuilder.addOffset(2, schemaOffset) + messageBuilder.addInt64(3, 0n) + const messageOffset = writer.finishTable(messageBuilder) + + return writer.finish(messageOffset) +} + +// ============================================================================= +// RecordBatch Message +// ============================================================================= + +export interface FieldNodeData { + readonly length: bigint + readonly nullCount: bigint +} + +export interface BufferData { + readonly offset: bigint + readonly length: bigint +} + +export const encodeRecordBatchMessage = ( + numRows: bigint, + fieldNodes: ReadonlyArray, + buffers: ReadonlyArray, + bodyLength: bigint +): Uint8Array => { + const writer = new FlatBufferWriter() + + // FieldNode structs (16 bytes: length i64 + nullCount i64) + let nodesOffset: number | null = null + if (fieldNodes.length > 0) { + const nodesData = new Uint8Array(fieldNodes.length * 16) + const nodesView = new DataView(nodesData.buffer) + fieldNodes.forEach((node, i) => { + nodesView.setBigInt64(i * 16, node.length, true) + nodesView.setBigInt64(i * 16 + 8, node.nullCount, true) + }) + nodesOffset = writer.writeStructVector(nodesData, 16) + } + + // Buffer structs (16 bytes: offset i64 + length i64) + let buffersOffset: number | null = null + if (buffers.length > 0) { + const buffersData = new Uint8Array(buffers.length * 16) + const buffersView = new DataView(buffersData.buffer) + buffers.forEach((buf, i) => { + buffersView.setBigInt64(i * 16, buf.offset, true) + buffersView.setBigInt64(i * 16 + 8, buf.length, true) + }) + buffersOffset = writer.writeStructVector(buffersData, 16) + } + + const rbBuilder = writer.startTable() + rbBuilder.addInt64(0, numRows) + if (nodesOffset !== null) rbBuilder.addOffset(1, nodesOffset) + if (buffersOffset !== null) rbBuilder.addOffset(2, buffersOffset) + const rbOffset = writer.finishTable(rbBuilder) + + const messageBuilder = writer.startTable() + messageBuilder.addInt16(0, METADATA_VERSION) + messageBuilder.addUint8(1, MessageHeaderType.RECORD_BATCH) + messageBuilder.addOffset(2, rbOffset) + messageBuilder.addInt64(3, bodyLength) + const messageOffset = writer.finishTable(messageBuilder) + + return writer.finish(messageOffset) +} + +// ============================================================================= +// Field Encoding +// ============================================================================= + +const encodeField = (writer: FlatBufferWriter, field: ArrowField): number => { + const childOffsets = field.children.map((child) => encodeField(writer, child)) + const childrenVectorOffset = childOffsets.length > 0 ? writer.writeOffsetVector(childOffsets) : null + + const nameOffset = writer.writeString(field.name) + const [typeEnumValue, typeOffset] = encodeType(writer, field.type) + + let metadataOffset: number | null = null + if (field.metadata.size > 0) { + metadataOffset = encodeKeyValueVector(writer, field.metadata) + } + + const fieldBuilder = writer.startTable() + fieldBuilder.addOffset(0, nameOffset) + fieldBuilder.addBool(1, field.nullable) + fieldBuilder.addUint8(2, typeEnumValue) + if (typeOffset !== null) fieldBuilder.addOffset(3, typeOffset) + if (childrenVectorOffset !== null) fieldBuilder.addOffset(5, childrenVectorOffset) + if (metadataOffset !== null) fieldBuilder.addOffset(6, metadataOffset) + + return writer.finishTable(fieldBuilder) +} + +// ============================================================================= +// Type Encoding +// ============================================================================= + +const PRECISION_MAP: Record = { + HALF: Precision.HALF, + SINGLE: Precision.SINGLE, + DOUBLE: Precision.DOUBLE +} +const DATE_UNIT_MAP: Record = { DAY: DateUnit.DAY, MILLISECOND: DateUnit.MILLISECOND } +const TIME_UNIT_MAP: Record = { + SECOND: TimeUnit.SECOND, + MILLISECOND: TimeUnit.MILLISECOND, + MICROSECOND: TimeUnit.MICROSECOND, + NANOSECOND: TimeUnit.NANOSECOND +} +const INTERVAL_UNIT_MAP: Record = { + YEAR_MONTH: IntervalUnit.YEAR_MONTH, + DAY_TIME: IntervalUnit.DAY_TIME, + MONTH_DAY_NANO: IntervalUnit.MONTH_DAY_NANO +} +const UNION_MODE_MAP: Record = { SPARSE: UnionMode.SPARSE, DENSE: UnionMode.DENSE } + +const encodeType = (writer: FlatBufferWriter, type: ArrowDataType): [ArrowDataTypeEnum, number | null] => { + const emptyTable = () => writer.finishTable(writer.startTable()) + + switch (type.typeId) { + case "null": + return [ArrowDataTypeEnum.NULL, emptyTable()] + case "bool": + return [ArrowDataTypeEnum.BOOL, emptyTable()] + case "binary": + return [ArrowDataTypeEnum.BINARY, emptyTable()] + case "large-binary": + return [ArrowDataTypeEnum.LARGE_BINARY, emptyTable()] + case "utf8": + return [ArrowDataTypeEnum.UTF8, emptyTable()] + case "large-utf8": + return [ArrowDataTypeEnum.LARGE_UTF8, emptyTable()] + case "list": + return [ArrowDataTypeEnum.LIST, emptyTable()] + case "large-list": + return [ArrowDataTypeEnum.LARGE_LIST, emptyTable()] + case "struct": + return [ArrowDataTypeEnum.STRUCT, emptyTable()] + + case "int": { + const b = writer.startTable() + b.addInt32(0, type.bitWidth) + b.addBool(1, type.isSigned) + return [ArrowDataTypeEnum.INT, writer.finishTable(b)] + } + + case "float": { + const b = writer.startTable() + b.addInt16(0, PRECISION_MAP[type.precision]) + return [ArrowDataTypeEnum.FLOATING_POINT, writer.finishTable(b)] + } + + case "decimal": { + const b = writer.startTable() + b.addInt32(0, type.precision) + b.addInt32(1, type.scale) + b.addInt32(2, type.bitWidth) + return [ArrowDataTypeEnum.DECIMAL, writer.finishTable(b)] + } + + case "fixed-size-binary": { + const b = writer.startTable() + b.addInt32(0, type.byteWidth) + return [ArrowDataTypeEnum.FIXED_SIZE_BINARY, writer.finishTable(b)] + } + + case "date": { + const b = writer.startTable() + b.addInt16(0, DATE_UNIT_MAP[type.unit]) + return [ArrowDataTypeEnum.DATE, writer.finishTable(b)] + } + + case "time": { + const b = writer.startTable() + b.addInt16(0, TIME_UNIT_MAP[type.unit]) + b.addInt32(1, type.bitWidth) + return [ArrowDataTypeEnum.TIME, writer.finishTable(b)] + } + + case "timestamp": { + let tzOffset: number | null = null + if (type.timezone !== null) tzOffset = writer.writeString(type.timezone) + const b = writer.startTable() + b.addInt16(0, TIME_UNIT_MAP[type.unit]) + if (tzOffset !== null) b.addOffset(1, tzOffset) + return [ArrowDataTypeEnum.TIMESTAMP, writer.finishTable(b)] + } + + case "interval": { + const b = writer.startTable() + b.addInt16(0, INTERVAL_UNIT_MAP[type.unit]) + return [ArrowDataTypeEnum.INTERVAL, writer.finishTable(b)] + } + + case "duration": { + const b = writer.startTable() + b.addInt16(0, TIME_UNIT_MAP[type.unit]) + return [ArrowDataTypeEnum.DURATION, writer.finishTable(b)] + } + + case "fixed-size-list": { + const b = writer.startTable() + b.addInt32(0, type.listSize) + return [ArrowDataTypeEnum.FIXED_SIZE_LIST, writer.finishTable(b)] + } + + case "map": { + const b = writer.startTable() + b.addBool(0, type.keysSorted) + return [ArrowDataTypeEnum.MAP, writer.finishTable(b)] + } + + case "union": { + // Write typeIds as int32 vector: [length: u32] [id0: i32] [id1: i32] ... + const typeIdsData = new Uint8Array(type.typeIds.length * 4) + const typeIdsView = new DataView(typeIdsData.buffer) + type.typeIds.forEach((id, i) => typeIdsView.setInt32(i * 4, id, true)) + const typeIdsOffset = writer.writeStructVector(typeIdsData, 4) + const b = writer.startTable() + b.addInt16(0, UNION_MODE_MAP[type.mode]) + b.addOffset(1, typeIdsOffset) + return [ArrowDataTypeEnum.UNION, writer.finishTable(b)] + } + } +} + +// ============================================================================= +// Metadata Encoding +// ============================================================================= + +const encodeKeyValueVector = (writer: FlatBufferWriter, metadata: ReadonlyMap): number => { + const kvOffsets: Array = [] + for (const [key, value] of metadata.entries()) { + const keyOffset = writer.writeString(key) + const valueOffset = writer.writeString(value) + const kvBuilder = writer.startTable() + kvBuilder.addOffset(0, keyOffset) + kvBuilder.addOffset(1, valueOffset) + kvOffsets.push(writer.finishTable(kvBuilder)) + } + return writer.writeOffsetVector(kvOffsets) +} diff --git a/packages/amp/test/arrow-test-harness/RandomUtils.ts b/packages/amp/test/arrow-test-harness/RandomUtils.ts new file mode 100644 index 0000000..b64e3d3 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/RandomUtils.ts @@ -0,0 +1,219 @@ +/** + * Random Utilities for Arrow Test Harness + * + * Convenient helpers wrapping Effect's Random service for generating + * Arrow-specific test data types. + * + * @internal + */ +import * as Effect from "effect/Effect" +import * as Random from "effect/Random" + +// ============================================================================= +// Basic Random Generators +// ============================================================================= + +/** + * Generate a random bigint in the range [min, max] inclusive. + */ +export const nextBigInt = (min: bigint, max: bigint): Effect.Effect => + Effect.gen(function*() { + const range = max - min + 1n + // For ranges that fit in a safe integer, use simple approach + if (range <= BigInt(Number.MAX_SAFE_INTEGER)) { + const randomValue = yield* Random.nextIntBetween(0, Number(range) - 1) + return min + BigInt(randomValue) + } + // For larger ranges, combine multiple random values + const high = yield* Random.nextIntBetween(0, 0x7FFFFFFF) + const low = yield* Random.nextIntBetween(0, 0xFFFFFFFF) + const combined = ((BigInt(high) << 32n) | BigInt(low >>> 0)) % range + return min + combined + }) + +/** + * Generate a random float in the range [min, max]. + */ +export const nextFloat = (min: number, max: number): Effect.Effect => + Effect.map(Random.next, (n) => n * (max - min) + min) + +/** + * Returns true with the given probability. + */ +export const nextBoolWithProbability = (probability: number): Effect.Effect => + Effect.map(Random.next, (n) => n < probability) + +// ============================================================================= +// String and Bytes Generators +// ============================================================================= + +const alphanumericChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +/** + * Generate a random alphanumeric string of the given length. + */ +export const nextString = (length: number): Effect.Effect => + Effect.gen(function*() { + let result = "" + for (let i = 0; i < length; i++) { + const idx = yield* Random.nextIntBetween(0, alphanumericChars.length - 1) + result += alphanumericChars[idx] + } + return result + }) + +/** + * Generate a random string with length between min and max. + */ +export const nextStringBetween = (minLength: number, maxLength: number): Effect.Effect => + Effect.gen(function*() { + const length = yield* Random.nextIntBetween(minLength, maxLength) + return yield* nextString(length) + }) + +/** + * Generate random bytes of the given length. + */ +export const nextBytes = (length: number): Effect.Effect => + Effect.gen(function*() { + const bytes = new Uint8Array(length) + for (let i = 0; i < length; i++) { + bytes[i] = yield* Random.nextIntBetween(0, 255) + } + return bytes + }) + +/** + * Generate random bytes with length between min and max. + */ +export const nextBytesBetween = ( + minLength: number, + maxLength: number +): Effect.Effect => + Effect.gen(function*() { + const length = yield* Random.nextIntBetween(minLength, maxLength) + return yield* nextBytes(length) + }) + +// ============================================================================= +// Date/Time Generators +// ============================================================================= + +/** + * Generate a random Date within a reasonable range (1970-2100). + */ +export const nextDate = (): Effect.Effect => + Effect.gen(function*() { + // Random timestamp between 1970-01-01 and 2100-01-01 + const minMs = 0 + const maxMs = 4102444800000 // 2100-01-01 + const timestamp = yield* Random.nextIntBetween(minMs, maxMs) + return new Date(timestamp) + }) + +/** + * Generate a random timestamp in milliseconds since epoch. + */ +export const nextTimestampMs = (): Effect.Effect => + Random.nextIntBetween(0, 4102444800000) + +/** + * Generate a random time of day in milliseconds (0 to 86399999). + */ +export const nextTimeOfDayMs = (): Effect.Effect => Random.nextIntBetween(0, 86399999) + +// ============================================================================= +// Array Generators +// ============================================================================= + +/** + * Generate an array of random values. + */ +export const nextArray = ( + length: number, + generator: Effect.Effect +): Effect.Effect, never, Random.Random> => + Effect.gen(function*() { + const result: Array = [] + for (let i = 0; i < length; i++) { + result.push(yield* generator) + } + return result + }) + +/** + * Generate an array of nullable values based on null rate. + */ +export const nextNullableArray = ( + length: number, + generator: Effect.Effect, + nullRate: number +): Effect.Effect, never, Random.Random> => + Effect.gen(function*() { + const result: Array = [] + for (let i = 0; i < length; i++) { + const isNull = yield* nextBoolWithProbability(nullRate) + if (isNull) { + result.push(null) + } else { + result.push(yield* generator) + } + } + return result + }) + +// ============================================================================= +// Type-Specific Generators +// ============================================================================= + +/** + * Generate a random signed 8-bit integer. + */ +export const nextInt8 = (): Effect.Effect => Random.nextIntBetween(-128, 127) + +/** + * Generate a random unsigned 8-bit integer. + */ +export const nextUint8 = (): Effect.Effect => Random.nextIntBetween(0, 255) + +/** + * Generate a random signed 16-bit integer. + */ +export const nextInt16 = (): Effect.Effect => Random.nextIntBetween(-32768, 32767) + +/** + * Generate a random unsigned 16-bit integer. + */ +export const nextUint16 = (): Effect.Effect => Random.nextIntBetween(0, 65535) + +/** + * Generate a random signed 32-bit integer. + */ +export const nextInt32 = (): Effect.Effect => + Random.nextIntBetween(-2147483648, 2147483647) + +/** + * Generate a random unsigned 32-bit integer. + */ +export const nextUint32 = (): Effect.Effect => Random.nextIntBetween(0, 4294967295) + +/** + * Generate a random signed 64-bit integer. + */ +export const nextInt64 = (): Effect.Effect => + nextBigInt(-9223372036854775808n, 9223372036854775807n) + +/** + * Generate a random unsigned 64-bit integer. + */ +export const nextUint64 = (): Effect.Effect => nextBigInt(0n, 18446744073709551615n) + +/** + * Generate a random 32-bit float. + */ +export const nextFloat32 = (): Effect.Effect => nextFloat(-3.4028235e38, 3.4028235e38) + +/** + * Generate a random 64-bit float. + */ +export const nextFloat64 = (): Effect.Effect => nextFloat(-1e100, 1e100) diff --git a/packages/amp/test/arrow-test-harness/SchemaBuilder.ts b/packages/amp/test/arrow-test-harness/SchemaBuilder.ts new file mode 100644 index 0000000..b9d79fe --- /dev/null +++ b/packages/amp/test/arrow-test-harness/SchemaBuilder.ts @@ -0,0 +1,413 @@ +/** + * Fluent Schema Builder + * @internal + */ +import { + ArrowField, + ArrowSchema, + BinaryType, + BoolType, + DateType, + DateUnit, + DecimalType, + DurationType, + Endianness, + FixedSizeBinaryType, + FixedSizeListType, + FloatingPointType, + IntervalType, + IntervalUnit, + IntType, + LargeBinaryType, + LargeListType, + LargeUtf8Type, + ListType, + MapType, + NullType, + Precision, + StructType, + TimestampType, + TimeType, + TimeUnit, + UnionMode, + UnionType, + Utf8Type +} from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" + +export class SchemaBuilder { + private readonly fields: Array = [] + private readonly schemaMetadata: Map = new Map() + + null(name: string, options?: FieldOptions): this { + return this.addField(name, NullType, options) + } + + bool(name: string, options?: FieldOptions): this { + return this.addField(name, BoolType, options) + } + + int8(name: string, options?: FieldOptions): this { + return this.addField(name, new IntType(8, true), options) + } + + int16(name: string, options?: FieldOptions): this { + return this.addField(name, new IntType(16, true), options) + } + + int32(name: string, options?: FieldOptions): this { + return this.addField(name, new IntType(32, true), options) + } + + int64(name: string, options?: FieldOptions): this { + return this.addField(name, new IntType(64, true), options) + } + + uint8(name: string, options?: FieldOptions): this { + return this.addField(name, new IntType(8, false), options) + } + + uint16(name: string, options?: FieldOptions): this { + return this.addField(name, new IntType(16, false), options) + } + + uint32(name: string, options?: FieldOptions): this { + return this.addField(name, new IntType(32, false), options) + } + + uint64(name: string, options?: FieldOptions): this { + return this.addField(name, new IntType(64, false), options) + } + + float16(name: string, options?: FieldOptions): this { + return this.addField(name, new FloatingPointType(Precision.HALF), options) + } + + float32(name: string, options?: FieldOptions): this { + return this.addField(name, new FloatingPointType(Precision.SINGLE), options) + } + + float64(name: string, options?: FieldOptions): this { + return this.addField(name, new FloatingPointType(Precision.DOUBLE), options) + } + + decimal(name: string, precision: number, scale: number, bitWidth: 128 | 256 = 128, options?: FieldOptions): this { + return this.addField(name, new DecimalType(precision, scale, bitWidth), options) + } + + utf8(name: string, options?: FieldOptions): this { + return this.addField(name, Utf8Type, options) + } + + largeUtf8(name: string, options?: FieldOptions): this { + return this.addField(name, LargeUtf8Type, options) + } + + binary(name: string, options?: FieldOptions): this { + return this.addField(name, BinaryType, options) + } + + largeBinary(name: string, options?: FieldOptions): this { + return this.addField(name, LargeBinaryType, options) + } + + fixedSizeBinary(name: string, byteWidth: number, options?: FieldOptions): this { + return this.addField(name, new FixedSizeBinaryType(byteWidth), options) + } + + dateDay(name: string, options?: FieldOptions): this { + return this.addField(name, new DateType(DateUnit.DAY), options) + } + + dateMillisecond(name: string, options?: FieldOptions): this { + return this.addField(name, new DateType(DateUnit.MILLISECOND), options) + } + + timeSecond(name: string, options?: FieldOptions): this { + return this.addField(name, new TimeType(TimeUnit.SECOND, 32), options) + } + + timeMillisecond(name: string, options?: FieldOptions): this { + return this.addField(name, new TimeType(TimeUnit.MILLISECOND, 32), options) + } + + timeMicrosecond(name: string, options?: FieldOptions): this { + return this.addField(name, new TimeType(TimeUnit.MICROSECOND, 64), options) + } + + timeNanosecond(name: string, options?: FieldOptions): this { + return this.addField(name, new TimeType(TimeUnit.NANOSECOND, 64), options) + } + + timestamp( + name: string, + unit: "SECOND" | "MILLISECOND" | "MICROSECOND" | "NANOSECOND" = "MICROSECOND", + timezone: string | null = null, + options?: FieldOptions + ): this { + const unitEnum = TimeUnit[unit] + return this.addField(name, new TimestampType(unitEnum, timezone), options) + } + + duration( + name: string, + unit: "SECOND" | "MILLISECOND" | "MICROSECOND" | "NANOSECOND" = "MICROSECOND", + options?: FieldOptions + ): this { + const unitEnum = TimeUnit[unit] + return this.addField(name, new DurationType(unitEnum), options) + } + + intervalYearMonth(name: string, options?: FieldOptions): this { + return this.addField(name, new IntervalType(IntervalUnit.YEAR_MONTH), options) + } + + intervalDayTime(name: string, options?: FieldOptions): this { + return this.addField(name, new IntervalType(IntervalUnit.DAY_TIME), options) + } + + intervalMonthDayNano(name: string, options?: FieldOptions): this { + return this.addField(name, new IntervalType(IntervalUnit.MONTH_DAY_NANO), options) + } + + list(name: string, itemBuilder: (b: FieldBuilder) => FieldBuilder, options?: FieldOptions): this { + const itemField = itemBuilder(new FieldBuilder("item")).build() + return this.addField(name, ListType, { ...options, children: [itemField] }) + } + + largeList(name: string, itemBuilder: (b: FieldBuilder) => FieldBuilder, options?: FieldOptions): this { + const itemField = itemBuilder(new FieldBuilder("item")).build() + return this.addField(name, LargeListType, { ...options, children: [itemField] }) + } + + fixedSizeList( + name: string, + listSize: number, + itemBuilder: (b: FieldBuilder) => FieldBuilder, + options?: FieldOptions + ): this { + const itemField = itemBuilder(new FieldBuilder("item")).build() + return this.addField(name, new FixedSizeListType(listSize), { ...options, children: [itemField] }) + } + + struct(name: string, structBuilder: (b: SchemaBuilder) => SchemaBuilder, options?: FieldOptions): this { + const nestedBuilder = structBuilder(new SchemaBuilder()) + return this.addField(name, StructType, { ...options, children: nestedBuilder.fields }) + } + + map( + name: string, + keyBuilder: (b: FieldBuilder) => FieldBuilder, + valueBuilder: (b: FieldBuilder) => FieldBuilder, + options?: MapFieldOptions + ): this { + const keyField = keyBuilder(new FieldBuilder("key")).nullable(false).build() + const valueField = valueBuilder(new FieldBuilder("value")).build() + + // Map's child is an "entries" struct with "key" and "value" fields + const entriesField = new ArrowField( + "entries", + StructType, + false, + new Map(), + [keyField, valueField], + undefined + ) + + return this.addField(name, new MapType(options?.keysSorted ?? false), { ...options, children: [entriesField] }) + } + + union( + name: string, + mode: "SPARSE" | "DENSE", + variantBuilder: (b: UnionBuilder) => UnionBuilder, + options?: FieldOptions + ): this { + const builder = variantBuilder(new UnionBuilder()) + const { children, typeIds } = builder.build() + const modeEnum = mode === "SPARSE" ? UnionMode.SPARSE : UnionMode.DENSE + return this.addField(name, new UnionType(modeEnum, typeIds), { ...options, children }) + } + + metadata(key: string, value: string): this { + this.schemaMetadata.set(key, value) + return this + } + + build(): ArrowSchema { + return new ArrowSchema(this.fields, this.schemaMetadata, Endianness.LITTLE) + } + + private addField( + name: string, + type: ArrowField["type"], + options?: FieldOptions & { children?: ReadonlyArray } + ): this { + const field = new ArrowField( + name, + type, + options?.nullable ?? true, + options?.metadata ?? new Map(), + options?.children ?? [], + undefined + ) + this.fields.push(field) + return this + } +} + +export class FieldBuilder { + private fieldName: string + private fieldType: ArrowField["type"] = NullType + private fieldNullable: boolean = true + private fieldMetadata: Map = new Map() + private fieldChildren: ReadonlyArray = [] + + constructor(name: string) { + this.fieldName = name + } + + null(): this { + this.fieldType = NullType + return this + } + + bool(): this { + this.fieldType = BoolType + return this + } + + int8(): this { + this.fieldType = new IntType(8, true) + return this + } + + int16(): this { + this.fieldType = new IntType(16, true) + return this + } + + int32(): this { + this.fieldType = new IntType(32, true) + return this + } + + int64(): this { + this.fieldType = new IntType(64, true) + return this + } + + uint8(): this { + this.fieldType = new IntType(8, false) + return this + } + + uint16(): this { + this.fieldType = new IntType(16, false) + return this + } + + uint32(): this { + this.fieldType = new IntType(32, false) + return this + } + + uint64(): this { + this.fieldType = new IntType(64, false) + return this + } + + float16(): this { + this.fieldType = new FloatingPointType(Precision.HALF) + return this + } + + float32(): this { + this.fieldType = new FloatingPointType(Precision.SINGLE) + return this + } + + float64(): this { + this.fieldType = new FloatingPointType(Precision.DOUBLE) + return this + } + + decimal(precision: number, scale: number, bitWidth: 128 | 256 = 128): this { + this.fieldType = new DecimalType(precision, scale, bitWidth) + return this + } + + utf8(): this { + this.fieldType = Utf8Type + return this + } + + largeUtf8(): this { + this.fieldType = LargeUtf8Type + return this + } + + binary(): this { + this.fieldType = BinaryType + return this + } + + largeBinary(): this { + this.fieldType = LargeBinaryType + return this + } + + fixedSizeBinary(byteWidth: number): this { + this.fieldType = new FixedSizeBinaryType(byteWidth) + return this + } + + nullable(value: boolean = true): this { + this.fieldNullable = value + return this + } + + metadata(key: string, value: string): this { + this.fieldMetadata.set(key, value) + return this + } + + build(): ArrowField { + return new ArrowField( + this.fieldName, + this.fieldType, + this.fieldNullable, + this.fieldMetadata, + this.fieldChildren, + undefined + ) + } +} + +export class UnionBuilder { + private variants: Array = [] + private ids: Array = [] + private nextId: number = 0 + + variant(name: string, fieldBuilder: (b: FieldBuilder) => FieldBuilder, typeId?: number): this { + const id = typeId ?? this.nextId++ + const field = fieldBuilder(new FieldBuilder(name)).build() + this.variants.push(field) + this.ids.push(id) + return this + } + + build(): { children: ReadonlyArray; typeIds: ReadonlyArray } { + return { children: this.variants, typeIds: this.ids } + } +} + +interface FieldOptions { + readonly nullable?: boolean + readonly metadata?: Map +} + +interface MapFieldOptions extends FieldOptions { + readonly keysSorted?: boolean +} + +export const schema = (): SchemaBuilder => new SchemaBuilder() diff --git a/packages/amp/test/arrow-test-harness/Types.ts b/packages/amp/test/arrow-test-harness/Types.ts new file mode 100644 index 0000000..9a9d440 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/Types.ts @@ -0,0 +1,193 @@ +/** + * Arrow Test Harness Types + * + * Shared interfaces for the Arrow RecordBatch test data producer. + * + * @internal + */ +import type { ArrowField, ArrowSchema, FlightData } from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import * as Context from "effect/Context" +import type * as Effect from "effect/Effect" +import type * as Random from "effect/Random" + +// Re-export FlightData for convenience +export type { FlightData } + +// ============================================================================= +// Generator Configuration +// ============================================================================= + +/** + * Configuration for generating test data for a single field. + */ +export interface FieldGeneratorConfig { + /** Probability of generating null values (0-1). Default: 0.2 */ + readonly nullRate?: number + /** Minimum number of items for variable-length types (strings, lists). Default: 0 */ + readonly minLength?: number + /** Maximum number of items for variable-length types. Default: varies by type */ + readonly maxLength?: number + /** Custom value generator function (overrides default random generation) */ + readonly valueGenerator?: (index: number) => Effect.Effect + /** Include special float values (NaN, Infinity, -Infinity, -0). Default: false */ + readonly includeSpecialFloats?: boolean + /** Probability of generating a special float value when includeSpecialFloats is true. Default: 0.1 */ + readonly specialFloatRate?: number +} + +/** + * Configuration for nested field generation (lists, structs, maps, unions). + */ +export interface NestedFieldConfig extends FieldGeneratorConfig { + /** Configuration for child fields, keyed by field name */ + readonly children?: Record +} + +/** + * Base options shared by all generator option types. + */ +export interface BaseGeneratorOptions { + /** Seed for deterministic random generation. Default: 42 */ + readonly seed?: number | string + /** Per-field configuration, keyed by field name */ + readonly fields?: Record + /** Global default null rate for all fields. Default: 0.2 */ + readonly defaultNullRate?: number +} + +/** + * Options for generating FlightData. + */ +export interface FlightDataGeneratorOptions extends BaseGeneratorOptions { + /** Number of rows to generate. Default: 100 */ + readonly numRows?: number +} + +/** + * Options for generating multi-batch FlightData. + */ +export interface MultiBatchGeneratorOptions extends BaseGeneratorOptions { + /** Number of rows per batch. Can be a single number (same for all) or array. */ + readonly rowsPerBatch: number | ReadonlyArray +} + +// ============================================================================= +// Generated Data Types +// ============================================================================= + +/** + * Result of generating test FlightData. + */ +export interface GeneratedFlightData { + /** FlightData containing Schema message */ + readonly schemaFlightData: FlightData + /** FlightData containing RecordBatch message */ + readonly recordBatchFlightData: FlightData + /** The generated values for verification, keyed by field name */ + readonly expectedValues: Record> + /** The schema used for generation */ + readonly schema: ArrowSchema +} + +/** + * A single generated record batch with its expected values. + */ +export interface GeneratedBatch { + /** FlightData containing RecordBatch message */ + readonly flightData: FlightData + /** The generated values for verification, keyed by field name */ + readonly expectedValues: Record> + /** Number of rows in this batch */ + readonly numRows: number +} + +/** + * Result of generating multi-batch test FlightData. + */ +export interface GeneratedMultiBatchFlightData { + /** FlightData containing Schema message */ + readonly schemaFlightData: FlightData + /** Array of generated record batches */ + readonly batches: ReadonlyArray + /** The schema used for generation */ + readonly schema: ArrowSchema + /** Total number of rows across all batches */ + readonly totalRows: number +} + +/** + * Buffer layout and metadata for a generated column. + */ +export interface GeneratedBuffers { + /** Validity bitmap (empty Uint8Array if all values are valid) */ + readonly validity: Uint8Array + /** Offset buffer for variable-length types (null if not applicable) */ + readonly offsets: Uint8Array | null + /** Data buffer containing the actual values */ + readonly data: Uint8Array + /** Child buffers for nested types (empty array for leaf types) */ + readonly children: ReadonlyArray + /** Field node metadata (length and null count) */ + readonly fieldNode: FieldNodeInfo +} + +/** + * Field node information for RecordBatch metadata. + */ +export interface FieldNodeInfo { + /** Number of values in this field */ + readonly length: bigint + /** Number of null values */ + readonly nullCount: bigint +} + +/** + * Result from a data generator including both buffers and decoded values. + */ +export interface GeneratorResult extends GeneratedBuffers { + /** The generated values (for test verification) */ + readonly values: ReadonlyArray +} + +// ============================================================================= +// Generator Registry Service +// ============================================================================= + +/** + * Service for looking up data generators by Arrow type ID. + * + * This eliminates the circular dependency between the registry and nested + * generators by using Effect's service pattern for dependency injection. + */ +export interface GeneratorRegistry { + readonly getGenerator: (typeId: string) => DataGenerator +} + +export const GeneratorRegistry = Context.GenericTag("GeneratorRegistry") + +// ============================================================================= +// Data Generator Interface +// ============================================================================= + +/** + * Interface for type-specific data generators. + * + * Generators use Effect's Random service for deterministic random generation. + * Nested type generators also require the GeneratorRegistry service to look up + * child type generators. + */ +export interface DataGenerator { + /** + * Generate buffers and values for a specific Arrow data type. + * + * @param field - The Arrow field definition + * @param numRows - Number of rows to generate + * @param config - Field generation configuration + * @returns Effect that produces generated buffers and expected values + */ + generate( + field: ArrowField, + numRows: number, + config: FieldGeneratorConfig + ): Effect.Effect +} diff --git a/packages/amp/test/arrow-test-harness/ValueComparison.ts b/packages/amp/test/arrow-test-harness/ValueComparison.ts new file mode 100644 index 0000000..8ec04f1 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/ValueComparison.ts @@ -0,0 +1,414 @@ +/** + * Value Comparison Utilities + * + * Provides type-aware comparison between expected values (from generators) + * and actual decoded values (from readers). + * + * @internal + */ +import { readColumnValues } from "@edgeandnode/amp/internal/arrow-flight-ipc/Readers" +import type { DecodedRecordBatch } from "@edgeandnode/amp/internal/arrow-flight-ipc/RecordBatch" +import type { ArrowDataType, ArrowField, ArrowSchema } from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" + +/** + * Result of comparing values. + */ +export interface ComparisonResult { + readonly success: boolean + readonly errors: ReadonlyArray +} + +export interface ComparisonError { + readonly field: string + readonly index: number + readonly expected: unknown + readonly actual: unknown + readonly message: string +} + +/** + * Compare expected values from generation against decoded values from parsing. + */ +export const compareColumnValues = ( + fieldName: string, + fieldType: ArrowDataType, + expected: ReadonlyArray, + actual: ReadonlyArray +): ComparisonResult => { + const errors: Array = [] + + if (expected.length !== actual.length) { + errors.push({ + field: fieldName, + index: -1, + expected: expected.length, + actual: actual.length, + message: `Length mismatch: expected ${expected.length}, got ${actual.length}` + }) + return { success: false, errors } + } + + for (let i = 0; i < expected.length; i++) { + const exp = expected[i] + const act = actual[i] + + if (!valuesEqual(fieldType, exp, act)) { + errors.push({ + field: fieldName, + index: i, + expected: exp, + actual: act, + message: `Value mismatch at index ${i}` + }) + } + } + + return { success: errors.length === 0, errors } +} + +/** + * Compare a single expected value against an actual decoded value. + * Handles type-specific conversions between generator and reader formats. + */ +const valuesEqual = (type: ArrowDataType, expected: unknown, actual: unknown): boolean => { + // Both null + if (expected === null && actual === null) return true + // One null, one not + if (expected === null || actual === null) return false + + switch (type.typeId) { + case "null": + return true + + case "bool": + case "int": + return expected === actual + + case "float": + return floatsEqual(expected as number, actual as number, type.precision) + + case "decimal": + return decimalsEqual(expected as bigint, actual as string, type.scale) + + case "utf8": + case "large-utf8": + return expected === actual + + case "binary": + case "large-binary": + case "fixed-size-binary": + return uint8ArraysEqual(expected as Uint8Array, actual as Uint8Array) + + case "date": + return datesEqual(expected, actual as Date, type.unit) + + case "time": + return timesEqual(expected as number | bigint, actual as number, type.unit) + + case "timestamp": + return timestampsEqual(expected as bigint, actual as Date, type.unit) + + case "duration": + return durationsEqual(expected as bigint, actual as { value: bigint; unit: string }) + + case "interval": + return intervalsEqual(expected, actual, type.unit) + + case "list": + case "large-list": + return listsEqual(type, expected as Array, actual as Array) + + case "fixed-size-list": + return listsEqual(type, expected as Array, actual as Array) + + case "struct": + return structsEqual(type, expected as Record, actual as Record) + + case "map": + return mapsEqual(type, expected as Array, actual as Array) + + case "union": + return unionsEqual(type, expected, actual) + + default: + return expected === actual + } +} + +const floatsEqual = (expected: number, actual: number, precision: string): boolean => { + // Handle NaN (NaN !== NaN in JS, so use Number.isNaN) + if (Number.isNaN(expected) && Number.isNaN(actual)) return true + if (Number.isNaN(expected) || Number.isNaN(actual)) return false + + // Handle infinities + if (!Number.isFinite(expected) && !Number.isFinite(actual)) return expected === actual + if (!Number.isFinite(expected) || !Number.isFinite(actual)) return false + + // Handle negative zero (0 === -0 in JS, but they're distinct in IEEE754) + if (expected === 0 && actual === 0) { + return Object.is(expected, actual) + } + + // Float16 has very low precision + if (precision === "HALF") { + const tolerance = Math.abs(expected) * 0.01 + 0.1 + return Math.abs(expected - actual) <= tolerance + } + + // Float32 comparison with tolerance + if (precision === "SINGLE") { + const tolerance = Math.abs(expected) * 1e-6 + 1e-6 + return Math.abs(expected - actual) <= tolerance + } + + // Float64 comparison + const tolerance = Math.abs(expected) * 1e-14 + 1e-14 + return Math.abs(expected - actual) <= tolerance +} + +const decimalsEqual = (expected: bigint, actual: string, scale: number): boolean => { + // Convert expected bigint to formatted decimal string + const isNegative = expected < 0n + const absValue = isNegative ? -expected : expected + const str = absValue.toString() + + let expectedStr: string + if (scale === 0) { + expectedStr = isNegative ? `-${str}` : str + } else { + const paddedStr = str.padStart(scale + 1, "0") + const intPart = paddedStr.slice(0, -scale) || "0" + const fracPart = paddedStr.slice(-scale) + expectedStr = `${isNegative ? "-" : ""}${intPart}.${fracPart}` + } + + return expectedStr === actual +} + +const uint8ArraysEqual = (expected: Uint8Array, actual: Uint8Array): boolean => { + if (expected.length !== actual.length) return false + for (let i = 0; i < expected.length; i++) { + if (expected[i] !== actual[i]) return false + } + return true +} + +const MS_PER_DAY = 86400000 + +const datesEqual = (expected: unknown, actual: Date, unit: string): boolean => { + if (unit === "DAY") { + // Generator stores days since epoch as number + const expectedMs = (expected as number) * MS_PER_DAY + return expectedMs === actual.getTime() + } + // MILLISECOND: generator stores ms since epoch as bigint + return Number(expected as bigint) === actual.getTime() +} + +const timesEqual = ( + expected: number | bigint, + actual: number, + unit: string +): boolean => { + // Generator stores raw time value, reader converts to ms + const toMs: Record = { + "SECOND": 1000, + "MILLISECOND": 1, + "MICROSECOND": 0.001, + "NANOSECOND": 0.000001 + } + const expectedMs = Number(expected) * toMs[unit] + return Math.abs(expectedMs - actual) < 0.001 +} + +const timestampsEqual = (expected: bigint, actual: Date, unit: string): boolean => { + const toMs: Record number> = { + "SECOND": (v) => Number(v) * 1000, + "MILLISECOND": (v) => Number(v), + "MICROSECOND": (v) => Number(v / 1000n), + "NANOSECOND": (v) => Number(v / 1000000n) + } + const expectedMs = toMs[unit](expected) + return expectedMs === actual.getTime() +} + +const durationsEqual = ( + expected: bigint, + actual: { value: bigint; unit: string } +): boolean => { + return expected === actual.value +} + +const intervalsEqual = (expected: unknown, actual: unknown, unit: string): boolean => { + if (unit === "YEAR_MONTH") { + // Generator stores number of months directly + return expected === (actual as { months: number }).months + } + if (unit === "DAY_TIME") { + const exp = expected as { days: number; milliseconds: number } + const act = actual as { days: number; milliseconds: number } + return exp.days === act.days && exp.milliseconds === act.milliseconds + } + // MONTH_DAY_NANO + const exp = expected as { months: number; days: number; nanoseconds: bigint } + const act = actual as { months: number; days: number; nanoseconds: bigint } + return exp.months === act.months && exp.days === act.days && exp.nanoseconds === act.nanoseconds +} + +const listsEqual = ( + type: ArrowDataType, + expected: Array, + actual: Array +): boolean => { + if (expected.length !== actual.length) return false + + // Get child type - for lists, the child is the item type + const childType = "children" in type + ? ((type as { children?: ReadonlyArray }).children?.[0]?.type) + : undefined + + if (!childType) { + // Fallback to simple comparison + return JSON.stringify(expected) === JSON.stringify(actual) + } + + for (let i = 0; i < expected.length; i++) { + if (!valuesEqual(childType, expected[i], actual[i])) return false + } + return true +} + +const structsEqual = ( + type: ArrowDataType, + expected: Record, + actual: Record +): boolean => { + const children = "children" in type + ? ((type as { children?: ReadonlyArray }).children ?? []) + : [] + + for (const child of children) { + if (!valuesEqual(child.type, expected[child.name], actual[child.name])) { + return false + } + } + return true +} + +const mapsEqual = ( + type: ArrowDataType, + expected: Array, + actual: Array +): boolean => { + if (expected.length !== actual.length) return false + + for (let i = 0; i < expected.length; i++) { + const expEntry = expected[i] as { key: unknown; value: unknown } + const actEntry = actual[i] as { key: unknown; value: unknown } + + // Get key/value types from map's entries child + const entriesField = "children" in type + ? ((type as { children?: ReadonlyArray }).children?.[0]) + : undefined + + if (entriesField && entriesField.children.length >= 2) { + const keyType = entriesField.children[0].type + const valueType = entriesField.children[1].type + + if (!valuesEqual(keyType, expEntry.key, actEntry.key)) return false + if (!valuesEqual(valueType, expEntry.value, actEntry.value)) return false + } else { + if (expEntry.key !== actEntry.key) return false + if (expEntry.value !== actEntry.value) return false + } + } + return true +} + +const unionsEqual = ( + type: ArrowDataType, + expected: unknown, + actual: unknown +): boolean => { + // For unions, we just compare the selected values directly + // The type information of the selected variant would be needed for deep comparison + // For now, use JSON comparison as a fallback + return JSON.stringify(expected) === JSON.stringify(actual) +} + +/** + * Verify that all columns in a decoded record batch match expected values. + */ +export const verifyDecodedValues = ( + schema: ArrowSchema, + decoded: DecodedRecordBatch, + expectedValues: Record> +): ComparisonResult => { + const allErrors: Array = [] + + for (const field of schema.fields) { + const column = decoded.getColumn(field.name) + if (!column) { + allErrors.push({ + field: field.name, + index: -1, + expected: "column", + actual: null, + message: `Column "${field.name}" not found in decoded batch` + }) + continue + } + + const actualValues = readColumnValues(column) + const expected = expectedValues[field.name] + + if (!expected) { + allErrors.push({ + field: field.name, + index: -1, + expected: "expected values", + actual: null, + message: `No expected values for column "${field.name}"` + }) + continue + } + + const result = compareColumnValues(field.name, field.type, expected, actualValues) + for (const err of result.errors) { + allErrors.push(err) + } + } + + return { success: allErrors.length === 0, errors: allErrors } +} + +/** + * Format comparison errors for test output. + */ +export const formatComparisonErrors = (errors: ReadonlyArray): string => { + if (errors.length === 0) return "No errors" + + const lines = errors.slice(0, 10).map((e) => { + if (e.index === -1) { + return ` ${e.field}: ${e.message}` + } + return ` ${e.field}[${e.index}]: expected ${formatValue(e.expected)}, got ${formatValue(e.actual)}` + }) + + if (errors.length > 10) { + lines.push(` ... and ${errors.length - 10} more errors`) + } + + return lines.join("\n") +} + +const formatValue = (v: unknown): string => { + if (v === null) return "null" + if (v === undefined) return "undefined" + if (typeof v === "bigint") return `${v}n` + if (v instanceof Date) return v.toISOString() + if (v instanceof Uint8Array) return `Uint8Array(${v.length})` + if (Array.isArray(v)) return `Array(${v.length})` + if (typeof v === "object") return JSON.stringify(v) + return String(v) +} diff --git a/packages/amp/test/arrow-test-harness/generators/decimal.ts b/packages/amp/test/arrow-test-harness/generators/decimal.ts new file mode 100644 index 0000000..74bfc70 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/generators/decimal.ts @@ -0,0 +1,44 @@ +/** + * Decimal Type Generator + * @internal + */ +import type * as Schema from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import * as Effect from "effect/Effect" +import * as BufferUtils from "../BufferUtils.ts" +import * as Rand from "../RandomUtils.ts" +import type * as Types from "../Types.ts" + +export const decimalGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.DecimalType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + + // Calculate max value based on precision + // e.g., precision=5 means max is 99999 + const maxUnscaled = 10n ** BigInt(type.precision) - 1n + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + // Generate a random value within the precision bounds + values.push(yield* Rand.nextBigInt(-maxUnscaled, maxUnscaled)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createDecimalDataBuffer(values, type.bitWidth as 128 | 256) + + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} diff --git a/packages/amp/test/arrow-test-harness/generators/index.ts b/packages/amp/test/arrow-test-harness/generators/index.ts new file mode 100644 index 0000000..5e2b9c6 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/generators/index.ts @@ -0,0 +1,9 @@ +/** + * Data Generators + * @internal + */ +export * as Decimal from "./decimal.ts" +export * as Nested from "./nested.ts" +export * as Primitives from "./primitives.ts" +export * as Strings from "./strings.ts" +export * as Temporal from "./temporal.ts" diff --git a/packages/amp/test/arrow-test-harness/generators/nested.ts b/packages/amp/test/arrow-test-harness/generators/nested.ts new file mode 100644 index 0000000..a3d614a --- /dev/null +++ b/packages/amp/test/arrow-test-harness/generators/nested.ts @@ -0,0 +1,413 @@ +/** + * Nested Type Generators (List, Struct, Map, Union) + * @internal + */ +import type * as Schema from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import * as Effect from "effect/Effect" +import * as Random from "effect/Random" +import * as BufferUtils from "../BufferUtils.ts" +import * as Rand from "../RandomUtils.ts" +import * as Types from "../Types.ts" + +// ============================================================================= +// List Generator +// ============================================================================= + +export const listGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const registry = yield* Types.GeneratorRegistry + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const minItems = config.minLength ?? 0 + const maxItems = config.maxLength ?? 10 + + const childField = field.children[0] + const childConfig = (config as Types.NestedFieldConfig).children?.[childField.name] ?? {} + + const values: Array | null> = [] + const offsets: Array = [0] + let currentOffset = 0 + + // First pass: determine list lengths + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + offsets.push(currentOffset) + } else { + // Handle case where minItems === maxItems (avoids empty range in nextIntBetween) + const listLength = minItems === maxItems + ? minItems + : yield* Random.nextIntBetween(minItems, maxItems) + currentOffset += listLength + offsets.push(currentOffset) + values.push([]) // placeholder + } + } + + // Generate child data + const totalChildElements = currentOffset + const childGenerator = registry.getGenerator(childField.type.typeId) + const childResult = yield* childGenerator.generate(childField, totalChildElements, childConfig) + + // Populate list values from child values + for (let i = 0; i < numRows; i++) { + if (values[i] !== null) { + const start = offsets[i] + const end = offsets[i + 1] + values[i] = childResult.values.slice(start, end) as Array + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const offsetBuffer = BufferUtils.createInt32OffsetBuffer(offsets) + + return { + validity: bitmap, + offsets: offsetBuffer, + data: new Uint8Array(0), + children: [childResult], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Large List Generator +// ============================================================================= + +export const largeListGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const registry = yield* Types.GeneratorRegistry + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const minItems = config.minLength ?? 0 + const maxItems = config.maxLength ?? 10 + + const childField = field.children[0] + const childConfig = (config as Types.NestedFieldConfig).children?.[childField.name] ?? {} + + const values: Array | null> = [] + const offsets: Array = [0n] + let currentOffset = 0n + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + offsets.push(currentOffset) + } else { + // Handle case where minItems === maxItems (avoids empty range in nextIntBetween) + const listLength = minItems === maxItems + ? minItems + : yield* Random.nextIntBetween(minItems, maxItems) + currentOffset += BigInt(listLength) + offsets.push(currentOffset) + values.push([]) + } + } + + const totalChildElements = Number(currentOffset) + const childGenerator = registry.getGenerator(childField.type.typeId) + const childResult = yield* childGenerator.generate(childField, totalChildElements, childConfig) + + for (let i = 0; i < numRows; i++) { + if (values[i] !== null) { + const start = Number(offsets[i]) + const end = Number(offsets[i + 1]) + values[i] = childResult.values.slice(start, end) as Array + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const offsetBuffer = BufferUtils.createInt64OffsetBuffer(offsets) + + return { + validity: bitmap, + offsets: offsetBuffer, + data: new Uint8Array(0), + children: [childResult], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Fixed-Size List Generator +// ============================================================================= + +export const fixedSizeListGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const registry = yield* Types.GeneratorRegistry + const type = field.type as Schema.FixedSizeListType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + + const childField = field.children[0] + const childConfig = (config as Types.NestedFieldConfig).children?.[childField.name] ?? {} + + // Generate validity first + const validity: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + validity.push(!isNull) + } + + // Generate all child elements (even for null parents, they exist in memory) + const totalChildElements = numRows * type.listSize + const childGenerator = registry.getGenerator(childField.type.typeId) + const childResult = yield* childGenerator.generate(childField, totalChildElements, childConfig) + + // Build values + const values: Array | null> = [] + for (let i = 0; i < numRows; i++) { + if (!validity[i]) { + values.push(null) + } else { + const start = i * type.listSize + const end = start + type.listSize + values.push(childResult.values.slice(start, end) as Array) + } + } + + const nullCount = validity.filter((v) => !v).length + const bitmap = BufferUtils.createValidityBitmapFromFlags(validity) + + return { + validity: bitmap, + offsets: null, + data: new Uint8Array(0), + children: [childResult], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Struct Generator +// ============================================================================= + +export const structGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const registry = yield* Types.GeneratorRegistry + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const nestedConfig = config as Types.NestedFieldConfig + + // Determine row validity + const validity: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + validity.push(!isNull) + } + + // Generate all child columns + const childResults: Array = [] + for (const childField of field.children) { + const childConfig = nestedConfig.children?.[childField.name] ?? {} + const childGenerator = registry.getGenerator(childField.type.typeId) + childResults.push(yield* childGenerator.generate(childField, numRows, childConfig)) + } + + // Build struct values + const values: Array | null> = [] + for (let i = 0; i < numRows; i++) { + if (!validity[i]) { + values.push(null) + } else { + const struct: Record = {} + for (let j = 0; j < field.children.length; j++) { + struct[field.children[j].name] = childResults[j].values[i] + } + values.push(struct) + } + } + + const nullCount = validity.filter((v) => !v).length + const bitmap = BufferUtils.createValidityBitmapFromFlags(validity) + + return { + validity: bitmap, + offsets: null, + data: new Uint8Array(0), + children: childResults, + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Map Generator +// ============================================================================= + +export const mapGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const registry = yield* Types.GeneratorRegistry + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const minItems = config.minLength ?? 0 + const maxItems = config.maxLength ?? 5 + + // Map has one child: entries struct with "key" and "value" fields + const entriesField = field.children[0] + const keyField = entriesField.children[0] + const valueField = entriesField.children[1] + + const values: Array | null> = [] + const offsets: Array = [0] + let currentOffset = 0 + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + offsets.push(currentOffset) + } else { + // Handle case where minItems === maxItems (avoids empty range in nextIntBetween) + const mapSize = minItems === maxItems + ? minItems + : yield* Random.nextIntBetween(minItems, maxItems) + currentOffset += mapSize + offsets.push(currentOffset) + values.push([]) + } + } + + // Generate keys and values + const totalEntries = currentOffset + const keyGenerator = registry.getGenerator(keyField.type.typeId) + const valueGenerator = registry.getGenerator(valueField.type.typeId) + + const keyResult = yield* keyGenerator.generate(keyField, totalEntries, {}) + const valueResult = yield* valueGenerator.generate(valueField, totalEntries, {}) + + // Build entries struct result + const entriesValues: Array<{ key: unknown; value: unknown } | null> = [] + for (let i = 0; i < totalEntries; i++) { + entriesValues.push({ key: keyResult.values[i], value: valueResult.values[i] }) + } + + const entriesResult: Types.GeneratorResult = { + validity: new Uint8Array(0), + offsets: null, + data: new Uint8Array(0), + children: [keyResult, valueResult], + fieldNode: { length: BigInt(totalEntries), nullCount: 0n }, + values: entriesValues + } + + // Populate map values + for (let i = 0; i < numRows; i++) { + if (values[i] !== null) { + const start = offsets[i] + const end = offsets[i + 1] + values[i] = entriesValues.slice(start, end) as Array<{ key: unknown; value: unknown }> + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const offsetBuffer = BufferUtils.createInt32OffsetBuffer(offsets) + + return { + validity: bitmap, + offsets: offsetBuffer, + data: new Uint8Array(0), + children: [entriesResult], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Union Generator +// ============================================================================= + +export const unionGenerator: Types.DataGenerator = { + generate: (field, numRows) => + Effect.gen(function*() { + const registry = yield* Types.GeneratorRegistry + const type = field.type as Schema.UnionType + const isSparse = type.mode === "SPARSE" + + // Generate type IDs for each row + const typeIds: Array = [] + for (let i = 0; i < numRows; i++) { + const idx = yield* Random.nextIntBetween(0, type.typeIds.length - 1) + typeIds.push(type.typeIds[idx]) + } + + const typeIdBuffer = BufferUtils.createTypeIdBuffer(typeIds) + + if (isSparse) { + // Sparse union: all children have full length + const childResults: Array = [] + for (const childField of field.children) { + const childGenerator = registry.getGenerator(childField.type.typeId) + childResults.push(yield* childGenerator.generate(childField, numRows, {})) + } + + // Build values based on type IDs + const typeIdToChildIdx = new Map(type.typeIds.map((id, idx) => [id, idx])) + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const childIdx = typeIdToChildIdx.get(typeIds[i])! + values.push(childResults[childIdx].values[i]) + } + + return { + validity: typeIdBuffer, + offsets: null, + data: new Uint8Array(0), + children: childResults, + fieldNode: { length: BigInt(numRows), nullCount: 0n }, + values + } + } + + // Dense union: children have variable lengths, need offset buffer + const childCounts = new Map() + for (const id of type.typeIds) childCounts.set(id, 0) + + const denseOffsets: Array = [] + for (let i = 0; i < numRows; i++) { + const typeId = typeIds[i] + denseOffsets.push(childCounts.get(typeId)!) + childCounts.set(typeId, childCounts.get(typeId)! + 1) + } + + const offsetBuffer = BufferUtils.createInt32OffsetBuffer(denseOffsets) + + // Generate child data with appropriate lengths + const childResults: Array = [] + for (let i = 0; i < field.children.length; i++) { + const childField = field.children[i] + const childTypeId = type.typeIds[i] + const childLength = childCounts.get(childTypeId)! + const childGenerator = registry.getGenerator(childField.type.typeId) + childResults.push(yield* childGenerator.generate(childField, childLength, {})) + } + + // Build values + const typeIdToChildIdx = new Map(type.typeIds.map((id, idx) => [id, idx])) + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const childIdx = typeIdToChildIdx.get(typeIds[i])! + const offset = denseOffsets[i] + values.push(childResults[childIdx].values[offset]) + } + + return { + validity: typeIdBuffer, + offsets: offsetBuffer, + data: new Uint8Array(0), + children: childResults, + fieldNode: { length: BigInt(numRows), nullCount: 0n }, + values + } + }) +} diff --git a/packages/amp/test/arrow-test-harness/generators/primitives.ts b/packages/amp/test/arrow-test-harness/generators/primitives.ts new file mode 100644 index 0000000..f1ad105 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/generators/primitives.ts @@ -0,0 +1,223 @@ +/** + * Primitive Type Generators + * @internal + */ +import type * as Schema from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import * as Effect from "effect/Effect" +import * as Random from "effect/Random" +import * as BufferUtils from "../BufferUtils.ts" +import * as Rand from "../RandomUtils.ts" +import type * as Types from "../Types.ts" + +// ============================================================================= +// Null Generator +// ============================================================================= + +export const nullGenerator: Types.DataGenerator = { + generate: (_field, numRows, _config) => + Effect.succeed({ + validity: new Uint8Array(0), + offsets: null, + data: new Uint8Array(0), + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(numRows) }, + values: new Array(numRows).fill(null) + }) +} + +// ============================================================================= +// Bool Generator +// ============================================================================= + +export const boolGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Random.nextBoolean) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createBoolDataBuffer(values) + + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Int Generator +// ============================================================================= + +const intRanges = { + 8: { signed: { min: -128, max: 127 }, unsigned: { min: 0, max: 255 } }, + 16: { signed: { min: -32768, max: 32767 }, unsigned: { min: 0, max: 65535 } }, + 32: { signed: { min: -2147483648, max: 2147483647 }, unsigned: { min: 0, max: 4294967295 } } +} as const + +const int64Ranges = { + signed: { min: -9223372036854775808n, max: 9223372036854775807n }, + unsigned: { min: 0n, max: 18446744073709551615n } +} as const + +export const intGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.IntType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + + if (type.bitWidth === 64) { + const range = int64Ranges[type.isSigned ? "signed" : "unsigned"] + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Rand.nextBigInt(range.min, range.max)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createInt64DataBuffer(values, type.isSigned) + + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + } + + const range = intRanges[type.bitWidth as 8 | 16 | 32][type.isSigned ? "signed" : "unsigned"] + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Random.nextIntBetween(range.min, range.max)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = type.bitWidth === 8 + ? BufferUtils.createInt8DataBuffer(values, type.isSigned) + : type.bitWidth === 16 + ? BufferUtils.createInt16DataBuffer(values, type.isSigned) + : BufferUtils.createInt32DataBuffer(values, type.isSigned) + + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Float Generator +// ============================================================================= + +// Special float values by precision (precision-appropriate to avoid overflow) +const SPECIAL_FLOATS_BY_PRECISION = { + HALF: [ + Number.NaN, + Number.POSITIVE_INFINITY, + Number.NEGATIVE_INFINITY, + -0, + 6.1e-5, // Smallest positive float16 subnormal (approx) + 65504, // Largest finite float16 + -65504 + ], + SINGLE: [ + Number.NaN, + Number.POSITIVE_INFINITY, + Number.NEGATIVE_INFINITY, + -0, + 1.4e-45, // Smallest positive float32 subnormal + 3.4028235e38, // Largest finite float32 + -3.4028235e38, + 1.1920929e-7 // Float32 epsilon + ], + DOUBLE: [ + Number.NaN, + Number.POSITIVE_INFINITY, + Number.NEGATIVE_INFINITY, + -0, + Number.MIN_VALUE, // Smallest positive float64 subnormal (5e-324) + Number.MAX_VALUE, // Largest finite float64 (1.7e308) + -Number.MAX_VALUE, + Number.EPSILON // Float64 epsilon (2.2e-16) + ] +} as const + +// Float ranges by precision (conservative ranges that avoid overflow) +const FLOAT_RANGES = { + HALF: { min: -65504, max: 65504 }, // Float16 max + SINGLE: { min: -3.4e38, max: 3.4e38 }, // Float32 approximate max + DOUBLE: { min: -1.7e308, max: 1.7e308 } // Float64 approximate max +} as const + +export const floatGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.FloatingPointType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const includeSpecial = config.includeSpecialFloats ?? false + const specialRate = config.specialFloatRate ?? 0.1 + const range = FLOAT_RANGES[type.precision] + const specialFloats = SPECIAL_FLOATS_BY_PRECISION[type.precision] + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else if (includeSpecial && (yield* Rand.nextBoolWithProbability(specialRate))) { + // Generate a precision-appropriate special float value + const idx = yield* Random.nextIntBetween(0, specialFloats.length) + values.push(specialFloats[idx]) + } else { + // Generate a normal float in the valid range for this precision + values.push(yield* Rand.nextFloat(range.min, range.max)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = type.precision === "HALF" + ? BufferUtils.createFloat16DataBuffer(values) + : type.precision === "SINGLE" + ? BufferUtils.createFloat32DataBuffer(values) + : BufferUtils.createFloat64DataBuffer(values) + + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} diff --git a/packages/amp/test/arrow-test-harness/generators/strings.ts b/packages/amp/test/arrow-test-harness/generators/strings.ts new file mode 100644 index 0000000..4110641 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/generators/strings.ts @@ -0,0 +1,183 @@ +/** + * String and Binary Type Generators + * @internal + */ +import type * as Schema from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import * as Effect from "effect/Effect" +import * as BufferUtils from "../BufferUtils.ts" +import * as Rand from "../RandomUtils.ts" +import type * as Types from "../Types.ts" + +// ============================================================================= +// UTF-8 Generator +// ============================================================================= + +export const utf8Generator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const minLength = config.minLength ?? 0 + const maxLength = config.maxLength ?? 50 + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Rand.nextStringBetween(minLength, maxLength)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const { data, offsets } = BufferUtils.createVariableLengthBuffers(values, false) + + return { + validity: bitmap, + offsets, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Large UTF-8 Generator +// ============================================================================= + +export const largeUtf8Generator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const minLength = config.minLength ?? 0 + const maxLength = config.maxLength ?? 50 + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Rand.nextStringBetween(minLength, maxLength)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const { data, offsets } = BufferUtils.createVariableLengthBuffers(values, true) + + return { + validity: bitmap, + offsets, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Binary Generator +// ============================================================================= + +export const binaryGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const minLength = config.minLength ?? 0 + const maxLength = config.maxLength ?? 50 + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Rand.nextBytesBetween(minLength, maxLength)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const { data, offsets } = BufferUtils.createVariableLengthBuffers(values, false) + + return { + validity: bitmap, + offsets, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Large Binary Generator +// ============================================================================= + +export const largeBinaryGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const minLength = config.minLength ?? 0 + const maxLength = config.maxLength ?? 50 + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Rand.nextBytesBetween(minLength, maxLength)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const { data, offsets } = BufferUtils.createVariableLengthBuffers(values, true) + + return { + validity: bitmap, + offsets, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Fixed-Size Binary Generator +// ============================================================================= + +export const fixedSizeBinaryGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.FixedSizeBinaryType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + const values: Array = [] + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Rand.nextBytes(type.byteWidth)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createFixedSizeBinaryDataBuffer(values, type.byteWidth) + + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} diff --git a/packages/amp/test/arrow-test-harness/generators/temporal.ts b/packages/amp/test/arrow-test-harness/generators/temporal.ts new file mode 100644 index 0000000..3c291a7 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/generators/temporal.ts @@ -0,0 +1,295 @@ +/** + * Temporal Type Generators + * @internal + */ +import type * as Schema from "@edgeandnode/amp/internal/arrow-flight-ipc/Schema" +import * as Effect from "effect/Effect" +import * as Random from "effect/Random" +import * as BufferUtils from "../BufferUtils.ts" +import * as Rand from "../RandomUtils.ts" +import type * as Types from "../Types.ts" + +// ============================================================================= +// Date Generator +// ============================================================================= + +export const dateGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.DateType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + + if (type.unit === "DAY") { + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Random.nextIntBetween(0, 47482)) + } + } + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createInt32DataBuffer(values, true) + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + } + + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Rand.nextBigInt(0n, 4102444800000n)) + } + } + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createInt64DataBuffer(values, true) + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Time Generator +// ============================================================================= + +export const timeGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.TimeType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + + if (type.bitWidth === 32) { + const maxValue = type.unit === "SECOND" ? 86400 : 86400000 + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Random.nextIntBetween(0, maxValue - 1)) + } + } + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createInt32DataBuffer(values, true) + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + } + + const maxValue = type.unit === "MICROSECOND" ? 86400000000n : 86400000000000n + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Rand.nextBigInt(0n, maxValue - 1n)) + } + } + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createInt64DataBuffer(values, true) + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Timestamp Generator +// ============================================================================= + +export const timestampGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.TimestampType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + + const multipliers: Record = { + SECOND: 1n, + MILLISECOND: 1000n, + MICROSECOND: 1000000n, + NANOSECOND: 1000000000n + } + const mult = multipliers[type.unit] + const maxSeconds = 4102444800n + + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + const seconds = yield* Rand.nextBigInt(0n, maxSeconds) + values.push(seconds * mult) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createInt64DataBuffer(values, true) + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Duration Generator +// ============================================================================= + +export const durationGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.DurationType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + const maxValue = { + SECOND: 31536000n, + MILLISECOND: 31536000000n, + MICROSECOND: 31536000000000n, + NANOSECOND: 31536000000000000n + }[type.unit]! + values.push(yield* Rand.nextBigInt(0n, maxValue)) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createInt64DataBuffer(values, true) + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} + +// ============================================================================= +// Interval Generator +// ============================================================================= + +export const intervalGenerator: Types.DataGenerator = { + generate: (field, numRows, config) => + Effect.gen(function*() { + const type = field.type as Schema.IntervalType + const nullRate = field.nullable ? (config.nullRate ?? 0.2) : 0 + + if (type.unit === "YEAR_MONTH") { + const values: Array = [] + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + values.push(yield* Random.nextIntBetween(-1200, 1200)) + } + } + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + const data = BufferUtils.createInt32DataBuffer(values, true) + return { + validity: bitmap, + offsets: null, + data, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + } + + if (type.unit === "DAY_TIME") { + const values: Array<{ days: number; milliseconds: number } | null> = [] + const buffer = new Uint8Array(BufferUtils.align8(numRows * 8)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + const days = yield* Random.nextIntBetween(-3650, 3650) + const milliseconds = yield* Random.nextIntBetween(0, 86400000) + view.setInt32(i * 8, days, true) + view.setInt32(i * 8 + 4, milliseconds, true) + values.push({ days, milliseconds }) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + return { + validity: bitmap, + offsets: null, + data: buffer, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + } + + // MONTH_DAY_NANO + const values: Array<{ months: number; days: number; nanoseconds: bigint } | null> = [] + const buffer = new Uint8Array(BufferUtils.align8(numRows * 16)) + const view = new DataView(buffer.buffer) + + for (let i = 0; i < numRows; i++) { + const isNull = yield* Rand.nextBoolWithProbability(nullRate) + if (isNull) { + values.push(null) + } else { + const months = yield* Random.nextIntBetween(-1200, 1200) + const days = yield* Random.nextIntBetween(-3650, 3650) + const nanoseconds = yield* Rand.nextBigInt(0n, 86400000000000n) + view.setInt32(i * 16, months, true) + view.setInt32(i * 16 + 4, days, true) + view.setBigInt64(i * 16 + 8, nanoseconds, true) + values.push({ months, days, nanoseconds }) + } + } + + const { bitmap, nullCount } = BufferUtils.createValidityBitmap(values) + return { + validity: bitmap, + offsets: null, + data: buffer, + children: [], + fieldNode: { length: BigInt(numRows), nullCount: BigInt(nullCount) }, + values + } + }) +} diff --git a/packages/amp/test/arrow-test-harness/index.ts b/packages/amp/test/arrow-test-harness/index.ts new file mode 100644 index 0000000..57b0e57 --- /dev/null +++ b/packages/amp/test/arrow-test-harness/index.ts @@ -0,0 +1,14 @@ +/** + * Arrow Test Harness + * + * Generates valid Apache Arrow FlightData for testing the arrow-flight-ipc package. + * + * @internal + */ +export * as BufferUtils from "./BufferUtils.ts" +export * as FlightDataGenerator from "./FlightDataGenerator.ts" +export * as Generators from "./generators/index.ts" +export * as MessageEncoder from "./MessageEncoder.ts" +export * as SchemaBuilder from "./SchemaBuilder.ts" +export * as Types from "./Types.ts" +export * as ValueComparison from "./ValueComparison.ts" diff --git a/packages/amp/tsconfig.build.json b/packages/amp/tsconfig.build.json index 02c325c..dcdf446 100644 --- a/packages/amp/tsconfig.build.json +++ b/packages/amp/tsconfig.build.json @@ -1,10 +1,11 @@ { "$schema": "http://json.schemastore.org/tsconfig", - "extends": "./tsconfig.src.json", + "extends": "./tsconfig.src.jsonc", "compilerOptions": { "tsBuildInfoFile": ".tsbuildinfo/build.tsbuildinfo", "outDir": "dist", "types": ["node"], + "erasableSyntaxOnly": false, "stripInternal": true } } diff --git a/packages/amp/tsconfig.src.json b/packages/amp/tsconfig.src.json index 9867b22..1144997 100644 --- a/packages/amp/tsconfig.src.json +++ b/packages/amp/tsconfig.src.json @@ -6,6 +6,7 @@ "tsBuildInfoFile": ".tsbuildinfo/src.tsbuildinfo", "outDir": ".tsbuildinfo/src", "types": ["node"], - "rootDir": "src" + "rootDir": "src", + "erasableSyntaxOnly": false } } diff --git a/packages/amp/tsconfig.test.json b/packages/amp/tsconfig.test.json index bcf92ac..c579c26 100644 --- a/packages/amp/tsconfig.test.json +++ b/packages/amp/tsconfig.test.json @@ -9,14 +9,13 @@ "noEmit": true, "baseUrl": ".", "paths": { - "effect": ["src/index.ts"], - "effect/*": ["src/*/index.ts", "src/*.ts"] + "@edgeandnode/amp": ["src/index.ts"], + "@edgeandnode/amp/*": ["src/*/index.ts", "src/*.ts"] }, "types": ["node"], "plugins": [ { "name": "@effect/language-service", - "transform": "@effect/language-service/transform", "namespaceImportPackages": [] } ] diff --git a/packages/amp/vitest.config.ts b/packages/amp/vitest.config.ts new file mode 100644 index 0000000..fb966ae --- /dev/null +++ b/packages/amp/vitest.config.ts @@ -0,0 +1,6 @@ +import { mergeConfig, type ViteUserConfig } from "vitest/config" +import shared from "../../vitest.shared.ts" + +const config: ViteUserConfig = {} + +export default mergeConfig(shared, config) diff --git a/packages/arrow-flight-json/LICENSE b/packages/arrow-flight-json/LICENSE deleted file mode 100644 index b7ad282..0000000 --- a/packages/arrow-flight-json/LICENSE +++ /dev/null @@ -1,70 +0,0 @@ -License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. -"Business Source License" is a trademark of MariaDB Corporation Ab. - -Parameters - -Licensor: Edge & Node Ventures, Inc. -Licensed Work: The Licensed Work is the specific version of the source code - with (c) 2025 Edge & Node Ventures, Inc. that this License is - included with. -Additional Use Grant: You may make production use of the Licensed Work, provided - Your use does not compete with Edge & Node Ventures’ offerings - of the Licensed Work. - - For purposes of this license: A “competitive offering” is a - Product that is offered to third parties on a paid basis, or a - Product that is offered to third parties and significantly - overlaps with the functionality and capabilities of Edge & Node - Ventures’ version(s) of the Licensed Work. Additionally, - Products that are not provided on a paid basis, or that do not - significantly overlap with the functionality and capabilities - of Edge & Node Ventures’ version(s) of the Licensed Work are - not competitive. - - “Product” means software that is offered to end users and - managed by the end users, or offered to end users through a - third party service that makes use of the Licensed Work. - -Change Date: Three years, or earlier, from the date the Licensed Work is published -Change License: Apache 2.0 - -Terms - -The Licensor hereby grants you the right to copy, modify, create derivative -works, redistribute, and make non-production use of the Licensed Work. The -Licensor may make an Additional Use Grant, above, permitting limited production use. - -Effective on the Change Date, or the fourth anniversary of the first publicly -available distribution of a specific version of the Licensed Work under this -License, whichever comes first, the Licensor hereby grants you rights under -the terms of the Change License, and the rights granted in the paragraph -above terminate. - -If your use of the Licensed Work does not comply with the requirements -currently in effect as described in this License, you must purchase a -commercial license from the Licensor, its affiliated entities, or authorized -resellers, or you must refrain from using the Licensed Work. - -All copies of the original and modified Licensed Work, and derivative works -of the Licensed Work, are subject to this License. This License applies -separately for each version of the Licensed Work and the Change Date may vary -for each version of the Licensed Work released by Licensor. - -You must conspicuously display this License on each original or modified copy -of the Licensed Work. If you receive the Licensed Work in original or -modified form from a third party, the terms and conditions set forth in this -License apply to your use of that work. - -Any use of the Licensed Work in violation of this License will automatically -terminate your rights under this License for the current and all other -versions of the Licensed Work. - -This License does not grant you any right in any trademark or logo of -Licensor or its affiliates (provided that you may use a trademark or logo of -Licensor as expressly required by this License). - -TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON -AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, -EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND -TITLE. diff --git a/packages/arrow-flight-json/package.json b/packages/arrow-flight-json/package.json deleted file mode 100644 index 1e0bd40..0000000 --- a/packages/arrow-flight-json/package.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "@edgeandnode/arrow-flight-json", - "version": "0.0.0", - "type": "module", - "license": "BUSL-1.1", - "description": "Decode Arrow Flight responses into JSON", - "homepage": "https://www.edgeandnode.com/amp-dev", - "repository": { - "type": "git", - "url": "https://github.com/edgeandnode/amp-typescript", - "directory": "packages/arrow-flight-json" - }, - "exports": { - "./package.json": "./package.json", - ".": "./src/index.ts", - "./*": "./src/*.ts" - }, - "files": [ - "src/**/*.ts", - "dist/**/*.js", - "dist/**/*.js.map", - "dist/**/*.d.ts", - "dist/**/*.d.ts.map" - ] -} diff --git a/packages/arrow-flight-json/src/index.ts b/packages/arrow-flight-json/src/index.ts deleted file mode 100644 index f14f4a6..0000000 --- a/packages/arrow-flight-json/src/index.ts +++ /dev/null @@ -1 +0,0 @@ -export const test = () => {} diff --git a/packages/arrow-flight-json/tsconfig.build.json b/packages/arrow-flight-json/tsconfig.build.json deleted file mode 100644 index 02c325c..0000000 --- a/packages/arrow-flight-json/tsconfig.build.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "$schema": "http://json.schemastore.org/tsconfig", - "extends": "./tsconfig.src.json", - "compilerOptions": { - "tsBuildInfoFile": ".tsbuildinfo/build.tsbuildinfo", - "outDir": "dist", - "types": ["node"], - "stripInternal": true - } -} diff --git a/packages/arrow-flight-json/tsconfig.json b/packages/arrow-flight-json/tsconfig.json deleted file mode 100644 index 86025d1..0000000 --- a/packages/arrow-flight-json/tsconfig.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "$schema": "http://json.schemastore.org/tsconfig", - "extends": "../../tsconfig.base.jsonc", - "include": [], - "references": [ - { "path": "tsconfig.src.json" }, - { "path": "tsconfig.test.json" } - ] -} diff --git a/packages/arrow-flight-json/tsconfig.src.json b/packages/arrow-flight-json/tsconfig.src.json deleted file mode 100644 index 9867b22..0000000 --- a/packages/arrow-flight-json/tsconfig.src.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "$schema": "http://json.schemastore.org/tsconfig", - "extends": "../../tsconfig.base.jsonc", - "include": ["src"], - "compilerOptions": { - "tsBuildInfoFile": ".tsbuildinfo/src.tsbuildinfo", - "outDir": ".tsbuildinfo/src", - "types": ["node"], - "rootDir": "src" - } -} diff --git a/packages/arrow-flight-json/tsconfig.test.json b/packages/arrow-flight-json/tsconfig.test.json deleted file mode 100644 index bcf92ac..0000000 --- a/packages/arrow-flight-json/tsconfig.test.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "$schema": "http://json.schemastore.org/tsconfig", - "extends": "../../tsconfig.base.jsonc", - "include": ["test"], - "references": [{ "path": "tsconfig.src.json" }], - "compilerOptions": { - "tsBuildInfoFile": ".tsbuildinfo/test.tsbuildinfo", - "outDir": ".tsbuildinfo/test", - "noEmit": true, - "baseUrl": ".", - "paths": { - "effect": ["src/index.ts"], - "effect/*": ["src/*/index.ts", "src/*.ts"] - }, - "types": ["node"], - "plugins": [ - { - "name": "@effect/language-service", - "transform": "@effect/language-service/transform", - "namespaceImportPackages": [] - } - ] - } -} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 98fc6a4..c66182c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -12,23 +12,35 @@ importers: specifier: ^0.3.2 version: 0.3.2 '@effect/language-service': - specifier: ^0.57.1 - version: 0.57.1 + specifier: ^0.62.0 + version: 0.62.0 + '@effect/vitest': + specifier: ^0.27.0 + version: 0.27.0(effect@3.19.11)(vitest@4.0.15) '@eslint/js': specifier: ^9.39.1 version: 9.39.1 '@types/node': - specifier: ^24.10.1 - version: 24.10.1 + specifier: ^25.0.0 + version: 25.0.0 + '@vitest/coverage-v8': + specifier: ^4.0.15 + version: 4.0.15(vitest@4.0.15) + '@vitest/ui': + specifier: ^4.0.15 + version: 4.0.15(vitest@4.0.15) + effect: + specifier: ^3.19.11 + version: 3.19.11 eslint: specifier: ^9.39.1 version: 9.39.1 eslint-import-resolver-typescript: specifier: ^4.4.4 - version: 4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1))(eslint@9.39.1) + version: 4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1))(eslint@9.39.1) eslint-plugin-import-x: specifier: ^4.16.1 - version: 4.16.1(@typescript-eslint/utils@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1) + version: 4.16.1(@typescript-eslint/utils@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1) eslint-plugin-simple-import-sort: specifier: ^12.1.1 version: 12.1.1(eslint@9.39.1) @@ -37,7 +49,13 @@ importers: version: 2.0.0(eslint@9.39.1) eslint-plugin-unused-imports: specifier: ^4.3.0 - version: 4.3.0(@typescript-eslint/eslint-plugin@8.48.0(@typescript-eslint/parser@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1) + version: 4.3.0(@typescript-eslint/eslint-plugin@8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1) + glob: + specifier: ^13.0.0 + version: 13.0.0 + globals: + specifier: ^16.5.0 + version: 16.5.0 ts-patch: specifier: ^3.3.0 version: 3.3.0 @@ -45,17 +63,143 @@ importers: specifier: ^5.9.3 version: 5.9.3 typescript-eslint: - specifier: ^8.48.0 - version: 8.48.0(eslint@9.39.1)(typescript@5.9.3) + specifier: ^8.49.0 + version: 8.49.0(eslint@9.39.1)(typescript@5.9.3) + vite-tsconfig-paths: + specifier: ^5.1.4 + version: 5.1.4(typescript@5.9.3)(vite@7.2.7(@types/node@25.0.0)) + vitest: + specifier: ^4.0.15 + version: 4.0.15(@types/node@25.0.0)(@vitest/ui@4.0.15) + vitest-mock-express: + specifier: ^2.2.0 + version: 2.2.0 packages/amp: devDependencies: + '@bufbuild/buf': + specifier: ^1.61.0 + version: 1.61.0 + '@bufbuild/protobuf': + specifier: ^2.10.1 + version: 2.10.1 + '@bufbuild/protoc-gen-es': + specifier: ^2.10.1 + version: 2.10.1(@bufbuild/protobuf@2.10.1) + '@connectrpc/connect': + specifier: ^2.1.1 + version: 2.1.1(@bufbuild/protobuf@2.10.1) + '@connectrpc/connect-node': + specifier: ^2.1.1 + version: 2.1.1(@bufbuild/protobuf@2.10.1)(@connectrpc/connect@2.1.1(@bufbuild/protobuf@2.10.1)) effect: - specifier: ^3.19.8 - version: 3.19.8 + specifier: ^3.19.11 + version: 3.19.11 + + scratchpad: + dependencies: + '@edgeandnode/amp': + specifier: workspace:* + version: link:../packages/amp packages: + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.28.5': + resolution: {integrity: sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/types@7.28.5': + resolution: {integrity: sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==} + engines: {node: '>=6.9.0'} + + '@bcoe/v8-coverage@1.0.2': + resolution: {integrity: sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==} + engines: {node: '>=18'} + + '@bufbuild/buf-darwin-arm64@1.61.0': + resolution: {integrity: sha512-8vUGNV65LNPp+HT3NuCT9i/mCUEyLrSFctJ2Dz8JqnUu8fVPm4f8lVBSCT0TYLoQ8o8xb/A7bwWu14aKfXxgCg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + + '@bufbuild/buf-darwin-x64@1.61.0': + resolution: {integrity: sha512-dyJghwTYXT6e/Ec+2iPWijquTzyT+5vTItX4R7hM/soNQOs4eEo2z7EHLqvclHyi7+p7+8xye3z+BFM33ucM8A==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + + '@bufbuild/buf-linux-aarch64@1.61.0': + resolution: {integrity: sha512-9r5DRqwpq3WX0ltzt1p/Oe+8g679Fg4XJLaH/zmjqpwHVO+tNzYy7PHK4qWg83PCoANw6OVizl+iTmnE0vbklg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + + '@bufbuild/buf-linux-armv7@1.61.0': + resolution: {integrity: sha512-P2cSlzu68omZ6kyijMcYezZJFS4XayfLA0ATm1HP0zUaV+xVoeMOGhKvMWUFAa9SGYUs4Drxi2fyK/c2ZFA8sw==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + + '@bufbuild/buf-linux-x64@1.61.0': + resolution: {integrity: sha512-USDSLB0vkrR6Sk/VDtECLdOKHfynenCjnAchr9bdqC137IVJT7TVT8JhBW0UWMmXDZYjwbDrBLITwWj8hwk6Uw==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + + '@bufbuild/buf-win32-arm64@1.61.0': + resolution: {integrity: sha512-36n90c0lzhDMXRKhht8XreUCha4OTY7yR6g+bnkAFUXSbCbR3BpqrenWvl5NBtfX2Y70dmvjKnGwVanSIrb1uA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + + '@bufbuild/buf-win32-x64@1.61.0': + resolution: {integrity: sha512-uBIU7tQlCBPyoeJZH0NzL6y9Y4ikje4OlgHYGssbaPLsCKELSFDHQkHFabkwnVeXSUf9Intq+U3jtb+qOIlflw==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + + '@bufbuild/buf@1.61.0': + resolution: {integrity: sha512-9kKZK/GjhIDUj50pngmjtC6bGFl6U1rFDH2gZJATRfp2vNnQPsb6BqZ+2lea37bgHBYdAYttlve8wnXszXmftA==} + engines: {node: '>=12'} + hasBin: true + + '@bufbuild/protobuf@2.10.1': + resolution: {integrity: sha512-ckS3+vyJb5qGpEYv/s1OebUHDi/xSNtfgw1wqKZo7MR9F2z+qXr0q5XagafAG/9O0QPVIUfST0smluYSTpYFkg==} + + '@bufbuild/protoc-gen-es@2.10.1': + resolution: {integrity: sha512-vsfbWs1X93oX+sMMJ7910/OwIizAYH5IOAArsxnSTifiop1fVgLFPAvJBLiHZoNMI8B/lbqji2SFwvjK0AWO1Q==} + engines: {node: '>=20'} + hasBin: true + peerDependencies: + '@bufbuild/protobuf': 2.10.1 + peerDependenciesMeta: + '@bufbuild/protobuf': + optional: true + + '@bufbuild/protoplugin@2.10.1': + resolution: {integrity: sha512-imB8dKEjrOnG5+XqVS+CeYn924WGLU/g3wogKhk11XtX9y9NJ7432OS6h24asuBbLrQcPdEZ6QkfM7KeOCeeyQ==} + + '@connectrpc/connect-node@2.1.1': + resolution: {integrity: sha512-s3TfsI1XF+n+1z6MBS9rTnFsxxR4Rw5wmdEnkQINli81ESGxcsfaEet8duzq8LVuuCupmhUsgpRo0Nv9pZkufg==} + engines: {node: '>=20'} + peerDependencies: + '@bufbuild/protobuf': ^2.7.0 + '@connectrpc/connect': 2.1.1 + + '@connectrpc/connect@2.1.1': + resolution: {integrity: sha512-JzhkaTvM73m2K1URT6tv53k2RwngSmCXLZJgK580qNQOXRzZRR/BCMfZw3h+90JpnG6XksP5bYT+cz0rpUzUWQ==} + peerDependencies: + '@bufbuild/protobuf': ^2.7.0 + '@dprint/formatter@0.4.1': resolution: {integrity: sha512-IB/GXdlMOvi0UhQQ9mcY15Fxcrc2JPadmo6tqefCNV0bptFq7YBpggzpqYXldBXDa04CbKJ+rDwO2eNRPE2+/g==} @@ -65,10 +209,16 @@ packages: '@effect/eslint-plugin@0.3.2': resolution: {integrity: sha512-c4Vs9t3r54A4Zpl+wo8+PGzZz3JWYsip41H+UrebRLjQ2Hk/ap63IeCgN/HWcYtxtyhRopjp7gW9nOQ2Snbl+g==} - '@effect/language-service@0.57.1': - resolution: {integrity: sha512-uWzYN+aHl4KfZHGmDxa3+OjV/mk9gMUycIyB8SYvyDiny3lXBtmRFdRVnw6TYL1P5EBfg4N09+lO/0ECRREVXQ==} + '@effect/language-service@0.62.0': + resolution: {integrity: sha512-E1OU/jMiSqgl26D8grU3ClI46fxCfICzC6vwuSrNjW56Vlb/fujIKp97mGfhOE97DC4UTk2I9tlqIGK5fwTTPA==} hasBin: true + '@effect/vitest@0.27.0': + resolution: {integrity: sha512-8bM7n9xlMUYw9GqPIVgXFwFm2jf27m/R7psI64PGpwU5+26iwyxp9eAXEsfT5S6lqztYfpQQ1Ubp5o6HfNYzJQ==} + peerDependencies: + effect: ^3.19.0 + vitest: ^3.2.0 + '@emnapi/core@1.7.1': resolution: {integrity: sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==} @@ -78,6 +228,162 @@ packages: '@emnapi/wasi-threads@1.1.0': resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==} + '@esbuild/aix-ppc64@0.25.12': + resolution: {integrity: sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.25.12': + resolution: {integrity: sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.25.12': + resolution: {integrity: sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.25.12': + resolution: {integrity: sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.25.12': + resolution: {integrity: sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.25.12': + resolution: {integrity: sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.25.12': + resolution: {integrity: sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.25.12': + resolution: {integrity: sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.25.12': + resolution: {integrity: sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.25.12': + resolution: {integrity: sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.25.12': + resolution: {integrity: sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.25.12': + resolution: {integrity: sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.25.12': + resolution: {integrity: sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.25.12': + resolution: {integrity: sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.25.12': + resolution: {integrity: sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.25.12': + resolution: {integrity: sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.25.12': + resolution: {integrity: sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.25.12': + resolution: {integrity: sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.25.12': + resolution: {integrity: sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.25.12': + resolution: {integrity: sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.25.12': + resolution: {integrity: sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.25.12': + resolution: {integrity: sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.25.12': + resolution: {integrity: sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.25.12': + resolution: {integrity: sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.25.12': + resolution: {integrity: sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.25.12': + resolution: {integrity: sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + '@eslint-community/eslint-utils@4.9.0': resolution: {integrity: sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -140,57 +446,219 @@ packages: resolution: {integrity: sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==} engines: {node: 20 || >=22} + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + '@napi-rs/wasm-runtime@0.2.12': resolution: {integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==} + '@polka/url@1.0.0-next.29': + resolution: {integrity: sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==} + + '@rollup/rollup-android-arm-eabi@4.53.3': + resolution: {integrity: sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.53.3': + resolution: {integrity: sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.53.3': + resolution: {integrity: sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.53.3': + resolution: {integrity: sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.53.3': + resolution: {integrity: sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.53.3': + resolution: {integrity: sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.53.3': + resolution: {integrity: sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.53.3': + resolution: {integrity: sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.53.3': + resolution: {integrity: sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.53.3': + resolution: {integrity: sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-loong64-gnu@4.53.3': + resolution: {integrity: sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-ppc64-gnu@4.53.3': + resolution: {integrity: sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.53.3': + resolution: {integrity: sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-riscv64-musl@4.53.3': + resolution: {integrity: sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.53.3': + resolution: {integrity: sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.53.3': + resolution: {integrity: sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.53.3': + resolution: {integrity: sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-openharmony-arm64@4.53.3': + resolution: {integrity: sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.53.3': + resolution: {integrity: sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.53.3': + resolution: {integrity: sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.53.3': + resolution: {integrity: sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.53.3': + resolution: {integrity: sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==} + cpu: [x64] + os: [win32] + '@standard-schema/spec@1.0.0': resolution: {integrity: sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==} '@tybys/wasm-util@0.10.1': resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} + '@types/body-parser@1.19.6': + resolution: {integrity: sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==} + + '@types/chai@5.2.3': + resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} + + '@types/connect@3.4.38': + resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} + + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} + '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + '@types/express-serve-static-core@4.19.7': + resolution: {integrity: sha512-FvPtiIf1LfhzsaIXhv/PHan/2FeQBbtBDtfX2QfvPxdUelMDEckK08SM6nqo1MIZY3RUlfA+HV8+hFUSio78qg==} + + '@types/express@4.17.25': + resolution: {integrity: sha512-dVd04UKsfpINUnK0yBoYHDF3xu7xVH4BuDotC/xGuycx4CgbP48X/KF/586bcObxT0HENHXEU8Nqtu6NR+eKhw==} + + '@types/http-errors@2.0.5': + resolution: {integrity: sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==} + '@types/json-schema@7.0.15': resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} - '@types/node@24.10.1': - resolution: {integrity: sha512-GNWcUTRBgIRJD5zj+Tq0fKOJ5XZajIiBroOF0yvj2bSU1WvNdYS/dn9UxwsujGW4JX06dnHyjV2y9rRaybH0iQ==} + '@types/mime@1.3.5': + resolution: {integrity: sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==} + + '@types/node@25.0.0': + resolution: {integrity: sha512-rl78HwuZlaDIUSeUKkmogkhebA+8K1Hy7tddZuJ3D0xV8pZSfsYGTsliGUol1JPzu9EKnTxPC4L1fiWouStRew==} + + '@types/qs@6.14.0': + resolution: {integrity: sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==} - '@typescript-eslint/eslint-plugin@8.48.0': - resolution: {integrity: sha512-XxXP5tL1txl13YFtrECECQYeZjBZad4fyd3cFV4a19LkAY/bIp9fev3US4S5fDVV2JaYFiKAZ/GRTOLer+mbyQ==} + '@types/range-parser@1.2.7': + resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} + + '@types/send@0.17.6': + resolution: {integrity: sha512-Uqt8rPBE8SY0RK8JB1EzVOIZ32uqy8HwdxCnoCOsYrvnswqmFZ/k+9Ikidlk/ImhsdvBsloHbAlewb2IEBV/Og==} + + '@types/send@1.2.1': + resolution: {integrity: sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==} + + '@types/serve-static@1.15.10': + resolution: {integrity: sha512-tRs1dB+g8Itk72rlSI2ZrW6vZg0YrLI81iQSTkMmOqnqCaNr/8Ek4VwWcN5vZgCYWbg/JJSGBlUaYGAOP73qBw==} + + '@typescript-eslint/eslint-plugin@8.49.0': + resolution: {integrity: sha512-JXij0vzIaTtCwu6SxTh8qBc66kmf1xs7pI4UOiMDFVct6q86G0Zs7KRcEoJgY3Cav3x5Tq0MF5jwgpgLqgKG3A==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: - '@typescript-eslint/parser': ^8.48.0 + '@typescript-eslint/parser': ^8.49.0 eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/parser@8.48.0': - resolution: {integrity: sha512-jCzKdm/QK0Kg4V4IK/oMlRZlY+QOcdjv89U2NgKHZk1CYTj82/RVSx1mV/0gqCVMJ/DA+Zf/S4NBWNF8GQ+eqQ==} + '@typescript-eslint/parser@8.49.0': + resolution: {integrity: sha512-N9lBGA9o9aqb1hVMc9hzySbhKibHmB+N3IpoShyV6HyQYRGIhlrO5rQgttypi+yEeKsKI4idxC8Jw6gXKD4THA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/project-service@8.48.0': - resolution: {integrity: sha512-Ne4CTZyRh1BecBf84siv42wv5vQvVmgtk8AuiEffKTUo3DrBaGYZueJSxxBZ8fjk/N3DrgChH4TOdIOwOwiqqw==} + '@typescript-eslint/project-service@8.49.0': + resolution: {integrity: sha512-/wJN0/DKkmRUMXjZUXYZpD1NEQzQAAn9QWfGwo+Ai8gnzqH7tvqS7oNVdTjKqOcPyVIdZdyCMoqN66Ia789e7g==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/scope-manager@8.48.0': - resolution: {integrity: sha512-uGSSsbrtJrLduti0Q1Q9+BF1/iFKaxGoQwjWOIVNJv0o6omrdyR8ct37m4xIl5Zzpkp69Kkmvom7QFTtue89YQ==} + '@typescript-eslint/scope-manager@8.49.0': + resolution: {integrity: sha512-npgS3zi+/30KSOkXNs0LQXtsg9ekZ8OISAOLGWA/ZOEn0ZH74Ginfl7foziV8DT+D98WfQ5Kopwqb/PZOaIJGg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/tsconfig-utils@8.48.0': - resolution: {integrity: sha512-WNebjBdFdyu10sR1M4OXTt2OkMd5KWIL+LLfeH9KhgP+jzfDV/LI3eXzwJ1s9+Yc0Kzo2fQCdY/OpdusCMmh6w==} + '@typescript-eslint/tsconfig-utils@8.49.0': + resolution: {integrity: sha512-8prixNi1/6nawsRYxet4YOhnbW+W9FK/bQPxsGB1D3ZrDzbJ5FXw5XmzxZv82X3B+ZccuSxo/X8q9nQ+mFecWA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/type-utils@8.48.0': - resolution: {integrity: sha512-zbeVaVqeXhhab6QNEKfK96Xyc7UQuoFWERhEnj3mLVnUWrQnv15cJNseUni7f3g557gm0e46LZ6IJ4NJVOgOpw==} + '@typescript-eslint/type-utils@8.49.0': + resolution: {integrity: sha512-KTExJfQ+svY8I10P4HdxKzWsvtVnsuCifU5MvXrRwoP2KOlNZ9ADNEWWsQTJgMxLzS5VLQKDjkCT/YzgsnqmZg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 @@ -200,23 +668,32 @@ packages: resolution: {integrity: sha512-cQMcGQQH7kwKoVswD1xdOytxQR60MWKM1di26xSUtxehaDs/32Zpqsu5WJlXTtTTqyAVK8R7hvsUnIXRS+bjvA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript-eslint/typescript-estree@8.48.0': - resolution: {integrity: sha512-ljHab1CSO4rGrQIAyizUS6UGHHCiAYhbfcIZ1zVJr5nMryxlXMVWS3duFPSKvSUbFPwkXMFk1k0EMIjub4sRRQ==} + '@typescript-eslint/types@8.49.0': + resolution: {integrity: sha512-e9k/fneezorUo6WShlQpMxXh8/8wfyc+biu6tnAqA81oWrEic0k21RHzP9uqqpyBBeBKu4T+Bsjy9/b8u7obXQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/typescript-estree@8.49.0': + resolution: {integrity: sha512-jrLdRuAbPfPIdYNppHJ/D0wN+wwNfJ32YTAm10eJVsFmrVpXQnDWBn8niCSMlWjvml8jsce5E/O+86IQtTbJWA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/utils@8.48.0': - resolution: {integrity: sha512-yTJO1XuGxCsSfIVt1+1UrLHtue8xz16V8apzPYI06W0HbEbEWHxHXgZaAgavIkoh+GeV6hKKd5jm0sS6OYxWXQ==} + '@typescript-eslint/utils@8.49.0': + resolution: {integrity: sha512-N3W7rJw7Rw+z1tRsHZbK395TWSYvufBXumYtEGzypgMUthlg0/hmCImeA8hgO2d2G4pd7ftpxxul2J8OdtdaFA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/visitor-keys@8.48.0': - resolution: {integrity: sha512-T0XJMaRPOH3+LBbAfzR2jalckP1MSG/L9eUtY0DEzUyVaXJ/t6zN0nR7co5kz0Jko/nkSYCBRkz1djvjajVTTg==} + '@typescript-eslint/visitor-keys@8.49.0': + resolution: {integrity: sha512-LlKaciDe3GmZFphXIc79THF/YYBugZ7FS1pO581E/edlVVNbZKDy93evqmrfQ9/Y4uN0vVhX4iuchq26mK/iiA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript/vfs@1.6.2': + resolution: {integrity: sha512-hoBwJwcbKHmvd2QVebiytN1aELvpk9B74B4L1mFm/XT1Q/VOYAWl2vQ9AWRFtQq8zmz6enTpfTV8WRc4ATjW/g==} + peerDependencies: + typescript: '*' + '@unrs/resolver-binding-android-arm-eabi@1.11.1': resolution: {integrity: sha512-ppLRUgHVaGRWUx0R0Ut06Mjo9gBaBkg3v/8AxusGLhsIotbBLuRk51rAzqLC8gq6NyyAojEXglNjzf6R948DNw==} cpu: [arm] @@ -312,6 +789,49 @@ packages: cpu: [x64] os: [win32] + '@vitest/coverage-v8@4.0.15': + resolution: {integrity: sha512-FUJ+1RkpTFW7rQITdgTi93qOCWJobWhBirEPCeXh2SW2wsTlFxy51apDz5gzG+ZEYt/THvWeNmhdAoS9DTwpCw==} + peerDependencies: + '@vitest/browser': 4.0.15 + vitest: 4.0.15 + peerDependenciesMeta: + '@vitest/browser': + optional: true + + '@vitest/expect@4.0.15': + resolution: {integrity: sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==} + + '@vitest/mocker@4.0.15': + resolution: {integrity: sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==} + peerDependencies: + msw: ^2.4.9 + vite: ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + + '@vitest/pretty-format@4.0.15': + resolution: {integrity: sha512-SWdqR8vEv83WtZcrfLNqlqeQXlQLh2iilO1Wk1gv4eiHKjEzvgHb2OVc3mIPyhZE6F+CtfYjNlDJwP5MN6Km7A==} + + '@vitest/runner@4.0.15': + resolution: {integrity: sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==} + + '@vitest/snapshot@4.0.15': + resolution: {integrity: sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==} + + '@vitest/spy@4.0.15': + resolution: {integrity: sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==} + + '@vitest/ui@4.0.15': + resolution: {integrity: sha512-sxSyJMaKp45zI0u+lHrPuZM1ZJQ8FaVD35k+UxVrha1yyvQ+TZuUYllUixwvQXlB7ixoDc7skf3lQPopZIvaQw==} + peerDependencies: + vitest: 4.0.15 + + '@vitest/utils@4.0.15': + resolution: {integrity: sha512-HXjPW2w5dxhTD0dLwtYHDnelK3j8sR8cWIaLxr22evTyY6q8pRCjZSmhRWVjBaOVXChQd6AwMzi9pucorXCPZA==} + acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} peerDependencies: @@ -336,6 +856,13 @@ packages: argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + assertion-error@2.0.1: + resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} + engines: {node: '>=12'} + + ast-v8-to-istanbul@0.3.8: + resolution: {integrity: sha512-szgSZqUxI5T8mLKvS7WTjF9is+MVbOeLADU73IseOcrqhxr/VAvy6wfoVE39KnKzA7JRhjF5eUagNlHwvZPlKQ==} + balanced-match@1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} @@ -349,6 +876,10 @@ packages: resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} engines: {node: '>=6'} + chai@6.2.1: + resolution: {integrity: sha512-p4Z49OGG5W/WBCPSS/dH3jQ73kD6tiMmUM+bckNK6Jr5JHMG3k9bg/BvKR8lKmtVBKmOiuVaV2ws8s9oSbwysg==} + engines: {node: '>=18'} + chalk@4.1.2: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} @@ -383,8 +914,16 @@ packages: deep-is@0.1.4: resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} - effect@3.19.8: - resolution: {integrity: sha512-OmLw8EfH02vdmyU2fO4uY9He/wepwKI5E/JNpE2pseaWWUbaYOK9UlxIiKP20ZEqQr+S/jSqRDGmpiqD/2DeCQ==} + effect@3.19.11: + resolution: {integrity: sha512-UTEj3c1s41Ha3uzSPKKvFBZaDjZ8ez00Q2NYWVm2mKh2LXeX8j6LTg1HcQHnmdUhOjr79KHmhVWYB/zbegLO1A==} + + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + + esbuild@0.25.12: + resolution: {integrity: sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==} + engines: {node: '>=18'} + hasBin: true escape-string-regexp@4.0.0: resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} @@ -483,10 +1022,17 @@ packages: resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} engines: {node: '>=4.0'} + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + esutils@2.0.3: resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} engines: {node: '>=0.10.0'} + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} + engines: {node: '>=12.0.0'} + fast-check@3.23.2: resolution: {integrity: sha512-h5+1OzzfCC3Ef7VbtKdcv7zsstUQwUDlYpUTvjeUsJAssPgLn7QzbboPtL5ro04Mq0rPOsMzl7q5hIbRs2wD1A==} engines: {node: '>=8.0.0'} @@ -512,6 +1058,9 @@ packages: picomatch: optional: true + fflate@0.8.2: + resolution: {integrity: sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==} + file-entry-cache@8.0.0: resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==} engines: {node: '>=16.0.0'} @@ -527,6 +1076,11 @@ packages: flatted@3.3.3: resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + function-bind@1.1.2: resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} @@ -537,6 +1091,10 @@ packages: resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} engines: {node: '>=10.13.0'} + glob@13.0.0: + resolution: {integrity: sha512-tvZgpqk6fz4BaNZ66ZsRaZnbHvP/jG3uKJvAZOwEVUL4RTA5nJeeLYfyN9/VA8NX/V3IBG+hkeuGpKjvELkVhA==} + engines: {node: 20 || >=22} + global-prefix@4.0.0: resolution: {integrity: sha512-w0Uf9Y9/nyHinEk5vMJKRie+wa4kR5hmDbEhGGds/kG1PwGLLHKRoNMeJOyCQjjBkANlnScqgzcFwGHgmgLkVA==} engines: {node: '>=16'} @@ -545,8 +1103,12 @@ packages: resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==} engines: {node: '>=18'} - graphemer@1.4.0: - resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + globals@16.5.0: + resolution: {integrity: sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==} + engines: {node: '>=18'} + + globrex@0.1.2: + resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} has-flag@4.0.0: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} @@ -556,6 +1118,9 @@ packages: resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} engines: {node: '>= 0.4'} + html-escaper@2.0.2: + resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} + ignore@5.3.2: resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} engines: {node: '>= 4'} @@ -598,6 +1163,25 @@ packages: resolution: {integrity: sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==} engines: {node: '>=16'} + istanbul-lib-coverage@3.2.2: + resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} + engines: {node: '>=8'} + + istanbul-lib-report@3.0.1: + resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==} + engines: {node: '>=10'} + + istanbul-lib-source-maps@5.0.6: + resolution: {integrity: sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==} + engines: {node: '>=10'} + + istanbul-reports@3.2.0: + resolution: {integrity: sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==} + engines: {node: '>=8'} + + js-tokens@9.0.1: + resolution: {integrity: sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==} + js-yaml@4.1.1: resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} hasBin: true @@ -629,6 +1213,20 @@ packages: lodash.merge@4.6.2: resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + lru-cache@11.2.4: + resolution: {integrity: sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==} + engines: {node: 20 || >=22} + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + magicast@0.5.1: + resolution: {integrity: sha512-xrHS24IxaLrvuo613F719wvOIv9xPHFWQHuvGUBmPnCA/3MQxKI3b+r7n1jAoDHmsbC5bRhTZYR77invLAxVnw==} + + make-dir@4.0.0: + resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} + engines: {node: '>=10'} + minimatch@10.1.1: resolution: {integrity: sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==} engines: {node: 20 || >=22} @@ -643,9 +1241,22 @@ packages: minimist@1.2.8: resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + + mrmime@2.0.1: + resolution: {integrity: sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==} + engines: {node: '>=10'} + ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + napi-postinstall@0.3.4: resolution: {integrity: sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==} engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0} @@ -657,6 +1268,9 @@ packages: natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + obug@2.1.1: + resolution: {integrity: sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==} + optionator@0.9.4: resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} engines: {node: '>= 0.8.0'} @@ -684,10 +1298,24 @@ packages: path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + path-scurry@2.0.1: + resolution: {integrity: sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==} + engines: {node: 20 || >=22} + + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + picomatch@4.0.3: resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} engines: {node: '>=12'} + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} + prelude-ls@1.2.1: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} @@ -715,6 +1343,11 @@ packages: engines: {node: '>= 0.4'} hasBin: true + rollup@4.53.3: + resolution: {integrity: sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + semver@7.7.3: resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} engines: {node: '>=10'} @@ -728,10 +1361,27 @@ packages: resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} engines: {node: '>=8'} + siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + + sirv@3.0.2: + resolution: {integrity: sha512-2wcC/oGxHis/BoHkkPwldgiPSYcpZK3JU28WoMVv55yHJgcZ8rlXvuG9iZggz+sU1d4bRgIGASwyWqjxu3FM0g==} + engines: {node: '>=18'} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + stable-hash-x@0.2.0: resolution: {integrity: sha512-o3yWv49B/o4QZk5ZcsALc6t0+eCelPc44zZsLtCQnZPDwFpDYSWcDnrv2TtMmMbQ7uKo3J0HTURCqckw23czNQ==} engines: {node: '>=12.0.0'} + stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + strip-ansi@6.0.1: resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} engines: {node: '>=8'} @@ -748,10 +1398,25 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + + tinyexec@1.0.2: + resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} + engines: {node: '>=18'} + tinyglobby@0.2.15: resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} + tinyrainbow@3.0.3: + resolution: {integrity: sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==} + engines: {node: '>=14.0.0'} + + totalist@3.0.1: + resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} + engines: {node: '>=6'} + ts-api-utils@2.1.0: resolution: {integrity: sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==} engines: {node: '>=18.12'} @@ -762,6 +1427,16 @@ packages: resolution: {integrity: sha512-zAOzDnd5qsfEnjd9IGy1IRuvA7ygyyxxdxesbhMdutt8AHFjD8Vw8hU2rMF89HX1BKRWFYqKHrO8Q6lw0NeUZg==} hasBin: true + tsconfck@3.1.6: + resolution: {integrity: sha512-ks6Vjr/jEw0P1gmOVwutM3B7fWxoWBL2KRDb1JfqGVawBmO5UsvmWOQFGHBPl5yxYz4eERr19E6L7NMv+Fej4w==} + engines: {node: ^18 || >=20} + hasBin: true + peerDependencies: + typescript: ^5.0.0 + peerDependenciesMeta: + typescript: + optional: true + tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} @@ -769,13 +1444,18 @@ packages: resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} engines: {node: '>= 0.8.0'} - typescript-eslint@8.48.0: - resolution: {integrity: sha512-fcKOvQD9GUn3Xw63EgiDqhvWJ5jsyZUaekl3KVpGsDJnN46WJTe3jWxtQP9lMZm1LJNkFLlTaWAxK2vUQR+cqw==} + typescript-eslint@8.49.0: + resolution: {integrity: sha512-zRSVH1WXD0uXczCXw+nsdjGPUdx4dfrs5VQoHnUWmv1U3oNlAKv4FUNdLDhVUg+gYn+a5hUESqch//Rv5wVhrg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' + typescript@5.4.5: + resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==} + engines: {node: '>=14.17'} + hasBin: true + typescript@5.9.3: resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} engines: {node: '>=14.17'} @@ -790,6 +1470,91 @@ packages: uri-js@4.4.1: resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + vite-tsconfig-paths@5.1.4: + resolution: {integrity: sha512-cYj0LRuLV2c2sMqhqhGpaO3LretdtMn/BVX4cPLanIZuwwrkVl+lK84E/miEXkCHWXuq65rhNN4rXsBcOB3S4w==} + peerDependencies: + vite: '*' + peerDependenciesMeta: + vite: + optional: true + + vite@7.2.7: + resolution: {integrity: sha512-ITcnkFeR3+fI8P1wMgItjGrR10170d8auB4EpMLPqmx6uxElH3a/hHGQabSHKdqd4FXWO1nFIp9rRn7JQ34ACQ==} + engines: {node: ^20.19.0 || >=22.12.0} + hasBin: true + peerDependencies: + '@types/node': ^20.19.0 || >=22.12.0 + jiti: '>=1.21.0' + less: ^4.0.0 + lightningcss: ^1.21.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: '>=0.54.8' + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + vitest-mock-express@2.2.0: + resolution: {integrity: sha512-JoKVdo9KM4K5wo9GcQ0GabiRFU5XP2yJVMFv9R/C6arcv5wTPoDAiama2jJelO/ieIq+XcGETLr/V4vjCuqofA==} + + vitest@4.0.15: + resolution: {integrity: sha512-n1RxDp8UJm6N0IbJLQo+yzLZ2sQCDyl1o0LeugbPWf8+8Fttp29GghsQBjYJVmWq3gBFfe9Hs1spR44vovn2wA==} + engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@opentelemetry/api': ^1.9.0 + '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 + '@vitest/browser-playwright': 4.0.15 + '@vitest/browser-preview': 4.0.15 + '@vitest/browser-webdriverio': 4.0.15 + '@vitest/ui': 4.0.15 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@opentelemetry/api': + optional: true + '@types/node': + optional: true + '@vitest/browser-playwright': + optional: true + '@vitest/browser-preview': + optional: true + '@vitest/browser-webdriverio': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + which@2.0.2: resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} engines: {node: '>= 8'} @@ -800,15 +1565,93 @@ packages: engines: {node: ^16.13.0 || >=18.0.0} hasBin: true - word-wrap@1.2.5: - resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} - engines: {node: '>=0.10.0'} + why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + engines: {node: '>=8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + +snapshots: + + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/parser@7.28.5': + dependencies: + '@babel/types': 7.28.5 + + '@babel/types@7.28.5': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 + + '@bcoe/v8-coverage@1.0.2': {} + + '@bufbuild/buf-darwin-arm64@1.61.0': + optional: true + + '@bufbuild/buf-darwin-x64@1.61.0': + optional: true + + '@bufbuild/buf-linux-aarch64@1.61.0': + optional: true + + '@bufbuild/buf-linux-armv7@1.61.0': + optional: true + + '@bufbuild/buf-linux-x64@1.61.0': + optional: true + + '@bufbuild/buf-win32-arm64@1.61.0': + optional: true + + '@bufbuild/buf-win32-x64@1.61.0': + optional: true + + '@bufbuild/buf@1.61.0': + optionalDependencies: + '@bufbuild/buf-darwin-arm64': 1.61.0 + '@bufbuild/buf-darwin-x64': 1.61.0 + '@bufbuild/buf-linux-aarch64': 1.61.0 + '@bufbuild/buf-linux-armv7': 1.61.0 + '@bufbuild/buf-linux-x64': 1.61.0 + '@bufbuild/buf-win32-arm64': 1.61.0 + '@bufbuild/buf-win32-x64': 1.61.0 + + '@bufbuild/protobuf@2.10.1': {} + + '@bufbuild/protoc-gen-es@2.10.1(@bufbuild/protobuf@2.10.1)': + dependencies: + '@bufbuild/protoplugin': 2.10.1 + optionalDependencies: + '@bufbuild/protobuf': 2.10.1 + transitivePeerDependencies: + - supports-color + + '@bufbuild/protoplugin@2.10.1': + dependencies: + '@bufbuild/protobuf': 2.10.1 + '@typescript/vfs': 1.6.2(typescript@5.4.5) + typescript: 5.4.5 + transitivePeerDependencies: + - supports-color - yocto-queue@0.1.0: - resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} - engines: {node: '>=10'} + '@connectrpc/connect-node@2.1.1(@bufbuild/protobuf@2.10.1)(@connectrpc/connect@2.1.1(@bufbuild/protobuf@2.10.1))': + dependencies: + '@bufbuild/protobuf': 2.10.1 + '@connectrpc/connect': 2.1.1(@bufbuild/protobuf@2.10.1) -snapshots: + '@connectrpc/connect@2.1.1(@bufbuild/protobuf@2.10.1)': + dependencies: + '@bufbuild/protobuf': 2.10.1 '@dprint/formatter@0.4.1': {} @@ -820,7 +1663,12 @@ snapshots: '@dprint/typescript': 0.91.8 prettier-linter-helpers: 1.0.0 - '@effect/language-service@0.57.1': {} + '@effect/language-service@0.62.0': {} + + '@effect/vitest@0.27.0(effect@3.19.11)(vitest@4.0.15)': + dependencies: + effect: 3.19.11 + vitest: 4.0.15(@types/node@25.0.0)(@vitest/ui@4.0.15) '@emnapi/core@1.7.1': dependencies: @@ -838,6 +1686,84 @@ snapshots: tslib: 2.8.1 optional: true + '@esbuild/aix-ppc64@0.25.12': + optional: true + + '@esbuild/android-arm64@0.25.12': + optional: true + + '@esbuild/android-arm@0.25.12': + optional: true + + '@esbuild/android-x64@0.25.12': + optional: true + + '@esbuild/darwin-arm64@0.25.12': + optional: true + + '@esbuild/darwin-x64@0.25.12': + optional: true + + '@esbuild/freebsd-arm64@0.25.12': + optional: true + + '@esbuild/freebsd-x64@0.25.12': + optional: true + + '@esbuild/linux-arm64@0.25.12': + optional: true + + '@esbuild/linux-arm@0.25.12': + optional: true + + '@esbuild/linux-ia32@0.25.12': + optional: true + + '@esbuild/linux-loong64@0.25.12': + optional: true + + '@esbuild/linux-mips64el@0.25.12': + optional: true + + '@esbuild/linux-ppc64@0.25.12': + optional: true + + '@esbuild/linux-riscv64@0.25.12': + optional: true + + '@esbuild/linux-s390x@0.25.12': + optional: true + + '@esbuild/linux-x64@0.25.12': + optional: true + + '@esbuild/netbsd-arm64@0.25.12': + optional: true + + '@esbuild/netbsd-x64@0.25.12': + optional: true + + '@esbuild/openbsd-arm64@0.25.12': + optional: true + + '@esbuild/openbsd-x64@0.25.12': + optional: true + + '@esbuild/openharmony-arm64@0.25.12': + optional: true + + '@esbuild/sunos-x64@0.25.12': + optional: true + + '@esbuild/win32-arm64@0.25.12': + optional: true + + '@esbuild/win32-ia32@0.25.12': + optional: true + + '@esbuild/win32-x64@0.25.12': + optional: true + '@eslint-community/eslint-utils@4.9.0(eslint@9.39.1)': dependencies: eslint: 9.39.1 @@ -901,6 +1827,15 @@ snapshots: dependencies: '@isaacs/balanced-match': 4.0.1 + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + '@napi-rs/wasm-runtime@0.2.12': dependencies: '@emnapi/core': 1.7.1 @@ -908,6 +1843,74 @@ snapshots: '@tybys/wasm-util': 0.10.1 optional: true + '@polka/url@1.0.0-next.29': {} + + '@rollup/rollup-android-arm-eabi@4.53.3': + optional: true + + '@rollup/rollup-android-arm64@4.53.3': + optional: true + + '@rollup/rollup-darwin-arm64@4.53.3': + optional: true + + '@rollup/rollup-darwin-x64@4.53.3': + optional: true + + '@rollup/rollup-freebsd-arm64@4.53.3': + optional: true + + '@rollup/rollup-freebsd-x64@4.53.3': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.53.3': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.53.3': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.53.3': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.53.3': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-x64-musl@4.53.3': + optional: true + + '@rollup/rollup-openharmony-arm64@4.53.3': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.53.3': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.53.3': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.53.3': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.53.3': + optional: true + '@standard-schema/spec@1.0.0': {} '@tybys/wasm-util@0.10.1': @@ -915,24 +1918,76 @@ snapshots: tslib: 2.8.1 optional: true + '@types/body-parser@1.19.6': + dependencies: + '@types/connect': 3.4.38 + '@types/node': 25.0.0 + + '@types/chai@5.2.3': + dependencies: + '@types/deep-eql': 4.0.2 + assertion-error: 2.0.1 + + '@types/connect@3.4.38': + dependencies: + '@types/node': 25.0.0 + + '@types/deep-eql@4.0.2': {} + '@types/estree@1.0.8': {} + '@types/express-serve-static-core@4.19.7': + dependencies: + '@types/node': 25.0.0 + '@types/qs': 6.14.0 + '@types/range-parser': 1.2.7 + '@types/send': 1.2.1 + + '@types/express@4.17.25': + dependencies: + '@types/body-parser': 1.19.6 + '@types/express-serve-static-core': 4.19.7 + '@types/qs': 6.14.0 + '@types/serve-static': 1.15.10 + + '@types/http-errors@2.0.5': {} + '@types/json-schema@7.0.15': {} - '@types/node@24.10.1': + '@types/mime@1.3.5': {} + + '@types/node@25.0.0': dependencies: undici-types: 7.16.0 - '@typescript-eslint/eslint-plugin@8.48.0(@typescript-eslint/parser@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3)': + '@types/qs@6.14.0': {} + + '@types/range-parser@1.2.7': {} + + '@types/send@0.17.6': + dependencies: + '@types/mime': 1.3.5 + '@types/node': 25.0.0 + + '@types/send@1.2.1': + dependencies: + '@types/node': 25.0.0 + + '@types/serve-static@1.15.10': + dependencies: + '@types/http-errors': 2.0.5 + '@types/node': 25.0.0 + '@types/send': 0.17.6 + + '@typescript-eslint/eslint-plugin@8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3)': dependencies: '@eslint-community/regexpp': 4.12.2 - '@typescript-eslint/parser': 8.48.0(eslint@9.39.1)(typescript@5.9.3) - '@typescript-eslint/scope-manager': 8.48.0 - '@typescript-eslint/type-utils': 8.48.0(eslint@9.39.1)(typescript@5.9.3) - '@typescript-eslint/utils': 8.48.0(eslint@9.39.1)(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.48.0 + '@typescript-eslint/parser': 8.49.0(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.49.0 + '@typescript-eslint/type-utils': 8.49.0(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.49.0 eslint: 9.39.1 - graphemer: 1.4.0 ignore: 7.0.5 natural-compare: 1.4.0 ts-api-utils: 2.1.0(typescript@5.9.3) @@ -940,41 +1995,41 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.48.0(eslint@9.39.1)(typescript@5.9.3)': + '@typescript-eslint/parser@8.49.0(eslint@9.39.1)(typescript@5.9.3)': dependencies: - '@typescript-eslint/scope-manager': 8.48.0 - '@typescript-eslint/types': 8.48.0 - '@typescript-eslint/typescript-estree': 8.48.0(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.48.0 + '@typescript-eslint/scope-manager': 8.49.0 + '@typescript-eslint/types': 8.49.0 + '@typescript-eslint/typescript-estree': 8.49.0(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.49.0 debug: 4.4.3 eslint: 9.39.1 typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/project-service@8.48.0(typescript@5.9.3)': + '@typescript-eslint/project-service@8.49.0(typescript@5.9.3)': dependencies: - '@typescript-eslint/tsconfig-utils': 8.48.0(typescript@5.9.3) - '@typescript-eslint/types': 8.48.0 + '@typescript-eslint/tsconfig-utils': 8.49.0(typescript@5.9.3) + '@typescript-eslint/types': 8.49.0 debug: 4.4.3 typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/scope-manager@8.48.0': + '@typescript-eslint/scope-manager@8.49.0': dependencies: - '@typescript-eslint/types': 8.48.0 - '@typescript-eslint/visitor-keys': 8.48.0 + '@typescript-eslint/types': 8.49.0 + '@typescript-eslint/visitor-keys': 8.49.0 - '@typescript-eslint/tsconfig-utils@8.48.0(typescript@5.9.3)': + '@typescript-eslint/tsconfig-utils@8.49.0(typescript@5.9.3)': dependencies: typescript: 5.9.3 - '@typescript-eslint/type-utils@8.48.0(eslint@9.39.1)(typescript@5.9.3)': + '@typescript-eslint/type-utils@8.49.0(eslint@9.39.1)(typescript@5.9.3)': dependencies: - '@typescript-eslint/types': 8.48.0 - '@typescript-eslint/typescript-estree': 8.48.0(typescript@5.9.3) - '@typescript-eslint/utils': 8.48.0(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/types': 8.49.0 + '@typescript-eslint/typescript-estree': 8.49.0(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.1)(typescript@5.9.3) debug: 4.4.3 eslint: 9.39.1 ts-api-utils: 2.1.0(typescript@5.9.3) @@ -984,12 +2039,14 @@ snapshots: '@typescript-eslint/types@8.48.0': {} - '@typescript-eslint/typescript-estree@8.48.0(typescript@5.9.3)': + '@typescript-eslint/types@8.49.0': {} + + '@typescript-eslint/typescript-estree@8.49.0(typescript@5.9.3)': dependencies: - '@typescript-eslint/project-service': 8.48.0(typescript@5.9.3) - '@typescript-eslint/tsconfig-utils': 8.48.0(typescript@5.9.3) - '@typescript-eslint/types': 8.48.0 - '@typescript-eslint/visitor-keys': 8.48.0 + '@typescript-eslint/project-service': 8.49.0(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.49.0(typescript@5.9.3) + '@typescript-eslint/types': 8.49.0 + '@typescript-eslint/visitor-keys': 8.49.0 debug: 4.4.3 minimatch: 9.0.5 semver: 7.7.3 @@ -999,22 +2056,29 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.48.0(eslint@9.39.1)(typescript@5.9.3)': + '@typescript-eslint/utils@8.49.0(eslint@9.39.1)(typescript@5.9.3)': dependencies: '@eslint-community/eslint-utils': 4.9.0(eslint@9.39.1) - '@typescript-eslint/scope-manager': 8.48.0 - '@typescript-eslint/types': 8.48.0 - '@typescript-eslint/typescript-estree': 8.48.0(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.49.0 + '@typescript-eslint/types': 8.49.0 + '@typescript-eslint/typescript-estree': 8.49.0(typescript@5.9.3) eslint: 9.39.1 typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/visitor-keys@8.48.0': + '@typescript-eslint/visitor-keys@8.49.0': dependencies: - '@typescript-eslint/types': 8.48.0 + '@typescript-eslint/types': 8.49.0 eslint-visitor-keys: 4.2.1 + '@typescript/vfs@1.6.2(typescript@5.4.5)': + dependencies: + debug: 4.4.3 + typescript: 5.4.5 + transitivePeerDependencies: + - supports-color + '@unrs/resolver-binding-android-arm-eabi@1.11.1': optional: true @@ -1074,6 +2138,73 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true + '@vitest/coverage-v8@4.0.15(vitest@4.0.15)': + dependencies: + '@bcoe/v8-coverage': 1.0.2 + '@vitest/utils': 4.0.15 + ast-v8-to-istanbul: 0.3.8 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-report: 3.0.1 + istanbul-lib-source-maps: 5.0.6 + istanbul-reports: 3.2.0 + magicast: 0.5.1 + obug: 2.1.1 + std-env: 3.10.0 + tinyrainbow: 3.0.3 + vitest: 4.0.15(@types/node@25.0.0)(@vitest/ui@4.0.15) + transitivePeerDependencies: + - supports-color + + '@vitest/expect@4.0.15': + dependencies: + '@standard-schema/spec': 1.0.0 + '@types/chai': 5.2.3 + '@vitest/spy': 4.0.15 + '@vitest/utils': 4.0.15 + chai: 6.2.1 + tinyrainbow: 3.0.3 + + '@vitest/mocker@4.0.15(vite@7.2.7(@types/node@25.0.0))': + dependencies: + '@vitest/spy': 4.0.15 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.2.7(@types/node@25.0.0) + + '@vitest/pretty-format@4.0.15': + dependencies: + tinyrainbow: 3.0.3 + + '@vitest/runner@4.0.15': + dependencies: + '@vitest/utils': 4.0.15 + pathe: 2.0.3 + + '@vitest/snapshot@4.0.15': + dependencies: + '@vitest/pretty-format': 4.0.15 + magic-string: 0.30.21 + pathe: 2.0.3 + + '@vitest/spy@4.0.15': {} + + '@vitest/ui@4.0.15(vitest@4.0.15)': + dependencies: + '@vitest/utils': 4.0.15 + fflate: 0.8.2 + flatted: 3.3.3 + pathe: 2.0.3 + sirv: 3.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 + vitest: 4.0.15(@types/node@25.0.0)(@vitest/ui@4.0.15) + + '@vitest/utils@4.0.15': + dependencies: + '@vitest/pretty-format': 4.0.15 + tinyrainbow: 3.0.3 + acorn-jsx@5.3.2(acorn@8.15.0): dependencies: acorn: 8.15.0 @@ -1095,6 +2226,14 @@ snapshots: argparse@2.0.1: {} + assertion-error@2.0.1: {} + + ast-v8-to-istanbul@0.3.8: + dependencies: + '@jridgewell/trace-mapping': 0.3.31 + estree-walker: 3.0.3 + js-tokens: 9.0.1 + balanced-match@1.0.2: {} brace-expansion@1.1.12: @@ -1108,6 +2247,8 @@ snapshots: callsites@3.1.0: {} + chai@6.2.1: {} + chalk@4.1.2: dependencies: ansi-styles: 4.3.0 @@ -1135,11 +2276,42 @@ snapshots: deep-is@0.1.4: {} - effect@3.19.8: + effect@3.19.11: dependencies: '@standard-schema/spec': 1.0.0 fast-check: 3.23.2 + es-module-lexer@1.7.0: {} + + esbuild@0.25.12: + optionalDependencies: + '@esbuild/aix-ppc64': 0.25.12 + '@esbuild/android-arm': 0.25.12 + '@esbuild/android-arm64': 0.25.12 + '@esbuild/android-x64': 0.25.12 + '@esbuild/darwin-arm64': 0.25.12 + '@esbuild/darwin-x64': 0.25.12 + '@esbuild/freebsd-arm64': 0.25.12 + '@esbuild/freebsd-x64': 0.25.12 + '@esbuild/linux-arm': 0.25.12 + '@esbuild/linux-arm64': 0.25.12 + '@esbuild/linux-ia32': 0.25.12 + '@esbuild/linux-loong64': 0.25.12 + '@esbuild/linux-mips64el': 0.25.12 + '@esbuild/linux-ppc64': 0.25.12 + '@esbuild/linux-riscv64': 0.25.12 + '@esbuild/linux-s390x': 0.25.12 + '@esbuild/linux-x64': 0.25.12 + '@esbuild/netbsd-arm64': 0.25.12 + '@esbuild/netbsd-x64': 0.25.12 + '@esbuild/openbsd-arm64': 0.25.12 + '@esbuild/openbsd-x64': 0.25.12 + '@esbuild/openharmony-arm64': 0.25.12 + '@esbuild/sunos-x64': 0.25.12 + '@esbuild/win32-arm64': 0.25.12 + '@esbuild/win32-ia32': 0.25.12 + '@esbuild/win32-x64': 0.25.12 + escape-string-regexp@4.0.0: {} eslint-import-context@0.1.9(unrs-resolver@1.11.1): @@ -1149,7 +2321,7 @@ snapshots: optionalDependencies: unrs-resolver: 1.11.1 - eslint-import-resolver-typescript@4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1))(eslint@9.39.1): + eslint-import-resolver-typescript@4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1))(eslint@9.39.1): dependencies: debug: 4.4.3 eslint: 9.39.1 @@ -1160,11 +2332,11 @@ snapshots: tinyglobby: 0.2.15 unrs-resolver: 1.11.1 optionalDependencies: - eslint-plugin-import-x: 4.16.1(@typescript-eslint/utils@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1) + eslint-plugin-import-x: 4.16.1(@typescript-eslint/utils@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1) transitivePeerDependencies: - supports-color - eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1): + eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1): dependencies: '@typescript-eslint/types': 8.48.0 comment-parser: 1.4.1 @@ -1177,7 +2349,7 @@ snapshots: stable-hash-x: 0.2.0 unrs-resolver: 1.11.1 optionalDependencies: - '@typescript-eslint/utils': 8.48.0(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.1)(typescript@5.9.3) transitivePeerDependencies: - supports-color @@ -1190,11 +2362,11 @@ snapshots: eslint: 9.39.1 natural-compare-lite: 1.4.0 - eslint-plugin-unused-imports@4.3.0(@typescript-eslint/eslint-plugin@8.48.0(@typescript-eslint/parser@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1): + eslint-plugin-unused-imports@4.3.0(@typescript-eslint/eslint-plugin@8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1): dependencies: eslint: 9.39.1 optionalDependencies: - '@typescript-eslint/eslint-plugin': 8.48.0(@typescript-eslint/parser@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/eslint-plugin': 8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3) eslint-scope@8.4.0: dependencies: @@ -1260,8 +2432,14 @@ snapshots: estraverse@5.3.0: {} + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + esutils@2.0.3: {} + expect-type@1.3.0: {} + fast-check@3.23.2: dependencies: pure-rand: 6.1.0 @@ -1278,6 +2456,8 @@ snapshots: optionalDependencies: picomatch: 4.0.3 + fflate@0.8.2: {} + file-entry-cache@8.0.0: dependencies: flat-cache: 4.0.1 @@ -1294,6 +2474,9 @@ snapshots: flatted@3.3.3: {} + fsevents@2.3.3: + optional: true + function-bind@1.1.2: {} get-tsconfig@4.13.0: @@ -1304,6 +2487,12 @@ snapshots: dependencies: is-glob: 4.0.3 + glob@13.0.0: + dependencies: + minimatch: 10.1.1 + minipass: 7.1.2 + path-scurry: 2.0.1 + global-prefix@4.0.0: dependencies: ini: 4.1.3 @@ -1312,7 +2501,9 @@ snapshots: globals@14.0.0: {} - graphemer@1.4.0: {} + globals@16.5.0: {} + + globrex@0.1.2: {} has-flag@4.0.0: {} @@ -1320,6 +2511,8 @@ snapshots: dependencies: function-bind: 1.1.2 + html-escaper@2.0.2: {} + ignore@5.3.2: {} ignore@7.0.5: {} @@ -1351,6 +2544,29 @@ snapshots: isexe@3.1.1: {} + istanbul-lib-coverage@3.2.2: {} + + istanbul-lib-report@3.0.1: + dependencies: + istanbul-lib-coverage: 3.2.2 + make-dir: 4.0.0 + supports-color: 7.2.0 + + istanbul-lib-source-maps@5.0.6: + dependencies: + '@jridgewell/trace-mapping': 0.3.31 + debug: 4.4.3 + istanbul-lib-coverage: 3.2.2 + transitivePeerDependencies: + - supports-color + + istanbul-reports@3.2.0: + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.1 + + js-tokens@9.0.1: {} + js-yaml@4.1.1: dependencies: argparse: 2.0.1 @@ -1378,6 +2594,22 @@ snapshots: lodash.merge@4.6.2: {} + lru-cache@11.2.4: {} + + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + magicast@0.5.1: + dependencies: + '@babel/parser': 7.28.5 + '@babel/types': 7.28.5 + source-map-js: 1.2.1 + + make-dir@4.0.0: + dependencies: + semver: 7.7.3 + minimatch@10.1.1: dependencies: '@isaacs/brace-expansion': 5.0.0 @@ -1392,14 +2624,22 @@ snapshots: minimist@1.2.8: {} + minipass@7.1.2: {} + + mrmime@2.0.1: {} + ms@2.1.3: {} + nanoid@3.3.11: {} + napi-postinstall@0.3.4: {} natural-compare-lite@1.4.0: {} natural-compare@1.4.0: {} + obug@2.1.1: {} + optionator@0.9.4: dependencies: deep-is: 0.1.4 @@ -1427,8 +2667,23 @@ snapshots: path-parse@1.0.7: {} + path-scurry@2.0.1: + dependencies: + lru-cache: 11.2.4 + minipass: 7.1.2 + + pathe@2.0.3: {} + + picocolors@1.1.1: {} + picomatch@4.0.3: {} + postcss@8.5.6: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + prelude-ls@1.2.1: {} prettier-linter-helpers@1.0.0: @@ -1449,6 +2704,34 @@ snapshots: path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 + rollup@4.53.3: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.53.3 + '@rollup/rollup-android-arm64': 4.53.3 + '@rollup/rollup-darwin-arm64': 4.53.3 + '@rollup/rollup-darwin-x64': 4.53.3 + '@rollup/rollup-freebsd-arm64': 4.53.3 + '@rollup/rollup-freebsd-x64': 4.53.3 + '@rollup/rollup-linux-arm-gnueabihf': 4.53.3 + '@rollup/rollup-linux-arm-musleabihf': 4.53.3 + '@rollup/rollup-linux-arm64-gnu': 4.53.3 + '@rollup/rollup-linux-arm64-musl': 4.53.3 + '@rollup/rollup-linux-loong64-gnu': 4.53.3 + '@rollup/rollup-linux-ppc64-gnu': 4.53.3 + '@rollup/rollup-linux-riscv64-gnu': 4.53.3 + '@rollup/rollup-linux-riscv64-musl': 4.53.3 + '@rollup/rollup-linux-s390x-gnu': 4.53.3 + '@rollup/rollup-linux-x64-gnu': 4.53.3 + '@rollup/rollup-linux-x64-musl': 4.53.3 + '@rollup/rollup-openharmony-arm64': 4.53.3 + '@rollup/rollup-win32-arm64-msvc': 4.53.3 + '@rollup/rollup-win32-ia32-msvc': 4.53.3 + '@rollup/rollup-win32-x64-gnu': 4.53.3 + '@rollup/rollup-win32-x64-msvc': 4.53.3 + fsevents: 2.3.3 + semver@7.7.3: {} shebang-command@2.0.0: @@ -1457,8 +2740,22 @@ snapshots: shebang-regex@3.0.0: {} + siginfo@2.0.0: {} + + sirv@3.0.2: + dependencies: + '@polka/url': 1.0.0-next.29 + mrmime: 2.0.1 + totalist: 3.0.1 + + source-map-js@1.2.1: {} + stable-hash-x@0.2.0: {} + stackback@0.0.2: {} + + std-env@3.10.0: {} + strip-ansi@6.0.1: dependencies: ansi-regex: 5.0.1 @@ -1471,11 +2768,19 @@ snapshots: supports-preserve-symlinks-flag@1.0.0: {} + tinybench@2.9.0: {} + + tinyexec@1.0.2: {} + tinyglobby@0.2.15: dependencies: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 + tinyrainbow@3.0.3: {} + + totalist@3.0.1: {} + ts-api-utils@2.1.0(typescript@5.9.3): dependencies: typescript: 5.9.3 @@ -1489,6 +2794,10 @@ snapshots: semver: 7.7.3 strip-ansi: 6.0.1 + tsconfck@3.1.6(typescript@5.9.3): + optionalDependencies: + typescript: 5.9.3 + tslib@2.8.1: optional: true @@ -1496,17 +2805,19 @@ snapshots: dependencies: prelude-ls: 1.2.1 - typescript-eslint@8.48.0(eslint@9.39.1)(typescript@5.9.3): + typescript-eslint@8.49.0(eslint@9.39.1)(typescript@5.9.3): dependencies: - '@typescript-eslint/eslint-plugin': 8.48.0(@typescript-eslint/parser@8.48.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3) - '@typescript-eslint/parser': 8.48.0(eslint@9.39.1)(typescript@5.9.3) - '@typescript-eslint/typescript-estree': 8.48.0(typescript@5.9.3) - '@typescript-eslint/utils': 8.48.0(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/eslint-plugin': 8.49.0(@typescript-eslint/parser@8.49.0(eslint@9.39.1)(typescript@5.9.3))(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/parser': 8.49.0(eslint@9.39.1)(typescript@5.9.3) + '@typescript-eslint/typescript-estree': 8.49.0(typescript@5.9.3) + '@typescript-eslint/utils': 8.49.0(eslint@9.39.1)(typescript@5.9.3) eslint: 9.39.1 typescript: 5.9.3 transitivePeerDependencies: - supports-color + typescript@5.4.5: {} + typescript@5.9.3: {} undici-types@7.16.0: {} @@ -1539,6 +2850,71 @@ snapshots: dependencies: punycode: 2.3.1 + vite-tsconfig-paths@5.1.4(typescript@5.9.3)(vite@7.2.7(@types/node@25.0.0)): + dependencies: + debug: 4.4.3 + globrex: 0.1.2 + tsconfck: 3.1.6(typescript@5.9.3) + optionalDependencies: + vite: 7.2.7(@types/node@25.0.0) + transitivePeerDependencies: + - supports-color + - typescript + + vite@7.2.7(@types/node@25.0.0): + dependencies: + esbuild: 0.25.12 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + postcss: 8.5.6 + rollup: 4.53.3 + tinyglobby: 0.2.15 + optionalDependencies: + '@types/node': 25.0.0 + fsevents: 2.3.3 + + vitest-mock-express@2.2.0: + dependencies: + '@types/express': 4.17.25 + + vitest@4.0.15(@types/node@25.0.0)(@vitest/ui@4.0.15): + dependencies: + '@vitest/expect': 4.0.15 + '@vitest/mocker': 4.0.15(vite@7.2.7(@types/node@25.0.0)) + '@vitest/pretty-format': 4.0.15 + '@vitest/runner': 4.0.15 + '@vitest/snapshot': 4.0.15 + '@vitest/spy': 4.0.15 + '@vitest/utils': 4.0.15 + es-module-lexer: 1.7.0 + expect-type: 1.3.0 + magic-string: 0.30.21 + obug: 2.1.1 + pathe: 2.0.3 + picomatch: 4.0.3 + std-env: 3.10.0 + tinybench: 2.9.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 + vite: 7.2.7(@types/node@25.0.0) + why-is-node-running: 2.3.0 + optionalDependencies: + '@types/node': 25.0.0 + '@vitest/ui': 4.0.15(vitest@4.0.15) + transitivePeerDependencies: + - jiti + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - terser + - tsx + - yaml + which@2.0.2: dependencies: isexe: 2.0.0 @@ -1547,6 +2923,11 @@ snapshots: dependencies: isexe: 3.1.1 + why-is-node-running@2.3.0: + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + word-wrap@1.2.5: {} yocto-queue@0.1.0: {} diff --git a/scratchpad/index.ts b/scratchpad/index.ts new file mode 100644 index 0000000..336ce12 --- /dev/null +++ b/scratchpad/index.ts @@ -0,0 +1 @@ +export {} diff --git a/scratchpad/package.json b/scratchpad/package.json new file mode 100644 index 0000000..a388aed --- /dev/null +++ b/scratchpad/package.json @@ -0,0 +1,9 @@ +{ + "name": "scratchpad", + "private": true, + "type": "module", + "version": "0.0.0", + "dependencies": { + "@edgeandnode/amp": "workspace:*" + } +} diff --git a/scratchpad/tsconfig.json b/scratchpad/tsconfig.json new file mode 100644 index 0000000..13a468b --- /dev/null +++ b/scratchpad/tsconfig.json @@ -0,0 +1,20 @@ +{ + "$schema": "http://json.schemastore.org/tsconfig", + "extends": "../tsconfig.base.jsonc", + "include": [ + "**/*.ts" + ], + "compilerOptions": { + "noEmit": true, + "declaration": false, + "declarationMap": false, + "composite": false, + "incremental": false, + "plugins": [ + { + "name": "@effect/language-service", + "namespaceImportPackages": [] + } + ] + } +} diff --git a/scripts/clean.mjs b/scripts/clean.mjs new file mode 100644 index 0000000..fee744d --- /dev/null +++ b/scripts/clean.mjs @@ -0,0 +1,11 @@ +import * as Glob from "glob" +import * as Fs from "node:fs" + +const dirs = [".", ...Glob.sync("packages/*/", ...Glob.sync("packages/ai/*/"))] +dirs.forEach((pkg) => { + const files = [".tsbuildinfo", "build", "dist", "coverage"] + + files.forEach((file) => { + Fs.rmSync(`${pkg}/${file}`, { recursive: true, force: true }, () => {}) + }) +}) diff --git a/tsconfig.base.jsonc b/tsconfig.base.jsonc index 47236d5..f924861 100644 --- a/tsconfig.base.jsonc +++ b/tsconfig.base.jsonc @@ -30,10 +30,7 @@ { "name": "@effect/language-service", "transform": "@effect/language-service/transform", - "namespaceImportPackages": [ - "effect", - "@effect/*" - ] + "namespaceImportPackages": ["effect", "@effect/*"] } ] } diff --git a/tsconfig.build.json b/tsconfig.build.json index 7fd9315..540bdfc 100644 --- a/tsconfig.build.json +++ b/tsconfig.build.json @@ -6,7 +6,6 @@ "stripInternal": true }, "references": [ - { "path": "packages/amp/tsconfig.build.json" }, - { "path": "packages/arrow-flight-json/tsconfig.build.json" } + { "path": "packages/amp/tsconfig.build.json" } ] } diff --git a/tsconfig.json b/tsconfig.json index 85f9c03..80a019a 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -3,7 +3,6 @@ "extends": "./tsconfig.base.jsonc", "include": [], "references": [ - { "path": "packages/amp" }, - { "path": "packages/arrow-flight-json" } + { "path": "packages/amp" } ] } diff --git a/vitest.config.ts b/vitest.config.ts new file mode 100644 index 0000000..fda3ed1 --- /dev/null +++ b/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineConfig } from "vitest/config" + +export default defineConfig({ + test: { + globals: true, + projects: ["packages/*/vitest.config.ts"] + } +}) diff --git a/vitest.setup.ts b/vitest.setup.ts new file mode 100644 index 0000000..f12d839 --- /dev/null +++ b/vitest.setup.ts @@ -0,0 +1,21 @@ +import { addEqualityTesters } from "@effect/vitest" + +addEqualityTesters() + +// Ignore warnings from usage of experimental features to declutter test output. +const ignore = ["ExperimentalWarning"] +const emitWarning = process.emitWarning +process.emitWarning = (warning, ...args) => { + const [head] = args + if (head != null) { + if (typeof head === "string" && ignore.includes(head)) { + return + } + + if (typeof head === "object" && ignore.includes(head.type)) { + return + } + } + + return emitWarning(warning, ...args) +} diff --git a/vitest.shared.ts b/vitest.shared.ts new file mode 100644 index 0000000..479b6c4 --- /dev/null +++ b/vitest.shared.ts @@ -0,0 +1,41 @@ +import * as path from "node:path" +import viteTsconfigPaths from "vite-tsconfig-paths" +import type { ViteUserConfig } from "vitest/config" + +const config: ViteUserConfig = { + plugins: [viteTsconfigPaths()], + esbuild: { + target: "es2020" + }, + test: { + setupFiles: [path.join(__dirname, "vitest.setup.ts")], + fakeTimers: { + toFake: undefined + }, + sequence: { + concurrent: true + }, + include: ["test/**/*.{test,spec}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}"], + coverage: { + provider: "v8", + reporter: ["html"], + reportsDirectory: "coverage", + exclude: [ + "node_modules/", + "dist/", + "benchmark/", + "bundle/", + "dtslint/", + "build/", + "coverage/", + "test/utils/", + "**/*.d.ts", + "**/*.config.*", + "**/vitest.setup.*", + "**/vitest.shared.*" + ] + } + } +} + +export default config