Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .changeset/green-planes-jump.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
"@voltagent/core": patch
---

feat: add estimated prompt context telemetry for observability

- record estimated prompt-context breakdown for system instructions, conversation messages, and tool schemas on LLM spans
- expose cached and reasoning token usage on LLM spans for observability consumers
- add tests for prompt-context estimation helpers
24 changes: 22 additions & 2 deletions packages/core/src/agent/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,10 @@ import {
import type { AgentHooks, OnToolEndHookResult, OnToolErrorHookResult } from "./hooks";
import { stripDanglingOpenAIReasoningFromModelMessages } from "./model-message-normalizer";
import { AgentTraceContext, addModelAttributesToSpan } from "./open-telemetry/trace-context";
import {
estimatePromptContextUsage,
promptContextUsageEstimateToAttributes,
} from "./prompt-context-usage";
import type {
BaseMessage,
BaseTool,
Expand Down Expand Up @@ -4101,7 +4105,16 @@ export class Agent {
},
): Span {
const { label, ...spanParams } = params;
const attributes = this.buildLLMSpanAttributes(spanParams);
const promptContextUsageEstimate = estimatePromptContextUsage({
messages: params.messages,
tools: params.tools,
});
const attributes = {
...this.buildLLMSpanAttributes(spanParams),
...(promptContextUsageEstimate
? promptContextUsageEstimateToAttributes(promptContextUsageEstimate)
: {}),
};
const span = oc.traceContext.createChildSpan(`llm:${params.operation}`, "llm", {
kind: SpanKind.CLIENT,
label,
Expand Down Expand Up @@ -4240,7 +4253,8 @@ export class Agent {
return;
}

const { promptTokens, completionTokens, totalTokens } = normalizedUsage;
const { promptTokens, completionTokens, totalTokens, cachedInputTokens, reasoningTokens } =
normalizedUsage;

if (promptTokens !== undefined) {
span.setAttribute("llm.usage.prompt_tokens", promptTokens);
Expand All @@ -4251,6 +4265,12 @@ export class Agent {
if (totalTokens !== undefined) {
span.setAttribute("llm.usage.total_tokens", totalTokens);
}
if (cachedInputTokens !== undefined) {
span.setAttribute("llm.usage.cached_tokens", cachedInputTokens);
}
if (reasoningTokens !== undefined) {
span.setAttribute("llm.usage.reasoning_tokens", reasoningTokens);
}
}

private recordProviderCost(span: Span, providerMetadata?: unknown): void {
Expand Down
71 changes: 71 additions & 0 deletions packages/core/src/agent/prompt-context-usage.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import { describe, expect, it } from "vitest";
import { z } from "zod";
import {
estimatePromptContextUsage,
promptContextUsageEstimateToAttributes,
} from "./prompt-context-usage";

describe("prompt context usage estimation", () => {
it("estimates system, message, and tool context separately", () => {
const estimate = estimatePromptContextUsage({
messages: [
{
role: "system",
content: "You are a careful assistant.",
},
{
role: "user",
content: "Summarize the latest release notes.",
},
{
role: "assistant",
content: [{ type: "text", text: "Let me inspect them." }],
},
],
tools: {
searchDocs: {
description: "Search the documentation",
inputSchema: z.object({
query: z.string(),
topK: z.number().int().optional(),
}),
},
},
});

expect(estimate).toBeDefined();
expect(estimate?.systemMessageCount).toBe(1);
expect(estimate?.toolCount).toBe(1);
expect(estimate?.systemTokensEstimated).toBeGreaterThan(0);
expect(estimate?.nonSystemMessageTokensEstimated).toBeGreaterThan(0);
expect(estimate?.toolTokensEstimated).toBeGreaterThan(0);
expect(estimate?.messageTokensEstimated).toBe(
(estimate?.systemTokensEstimated ?? 0) + (estimate?.nonSystemMessageTokensEstimated ?? 0),
);
expect(estimate?.totalTokensEstimated).toBe(
(estimate?.messageTokensEstimated ?? 0) + (estimate?.toolTokensEstimated ?? 0),
);
});

it("returns prompt context usage span attributes", () => {
const attributes = promptContextUsageEstimateToAttributes({
systemTokensEstimated: 12,
messageTokensEstimated: 34,
nonSystemMessageTokensEstimated: 22,
toolTokensEstimated: 18,
totalTokensEstimated: 52,
systemMessageCount: 1,
toolCount: 2,
});

expect(attributes).toEqual({
"usage.prompt_context.system_tokens_estimated": 12,
"usage.prompt_context.message_tokens_estimated": 34,
"usage.prompt_context.non_system_message_tokens_estimated": 22,
"usage.prompt_context.tool_tokens_estimated": 18,
"usage.prompt_context.total_tokens_estimated": 52,
"usage.prompt_context.system_message_count": 1,
"usage.prompt_context.tool_count": 2,
});
});
});
223 changes: 223 additions & 0 deletions packages/core/src/agent/prompt-context-usage.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,223 @@
import { safeStringify } from "@voltagent/internal/utils";
import type { ToolSet } from "ai";
import { zodSchemaToJsonUI } from "../utils/toolParser";

const ESTIMATED_CHARS_PER_TOKEN = 4;
const BINARY_PART_TYPES = new Set([
"audio",
"file",
"image",
"input_audio",
"input_image",
"media",
]);
const LARGE_BINARY_KEYS = new Set(["audio", "base64", "bytes", "data", "image"]);

type PromptMessage = {
role?: string;
content?: unknown;
};

export interface PromptContextUsageEstimate {
systemTokensEstimated: number;
messageTokensEstimated: number;
nonSystemMessageTokensEstimated: number;
toolTokensEstimated: number;
totalTokensEstimated: number;
systemMessageCount: number;
toolCount: number;
}

export function estimatePromptContextUsage(params: {
messages?: PromptMessage[];
tools?: ToolSet;
}): PromptContextUsageEstimate | undefined {
let systemTokensEstimated = 0;
let messageTokensEstimated = 0;
let nonSystemMessageTokensEstimated = 0;
let systemMessageCount = 0;

for (const message of params.messages ?? []) {
const serializedMessage = serializePromptMessage(message);
if (!serializedMessage) {
continue;
}

const estimatedTokens = estimateTokensFromText(serializedMessage);
messageTokensEstimated += estimatedTokens;

if (message.role === "system") {
systemTokensEstimated += estimatedTokens;
systemMessageCount += 1;
continue;
}

nonSystemMessageTokensEstimated += estimatedTokens;
}

const serializedTools = Object.entries(params.tools ?? {}).map(([name, tool]) =>
serializeToolDefinition(name, tool),
);
const toolTokensEstimated =
serializedTools.length > 0 ? estimateTokensFromText(safeStringify(serializedTools)) : 0;
const totalTokensEstimated = messageTokensEstimated + toolTokensEstimated;

if (totalTokensEstimated === 0) {
return undefined;
}

return {
systemTokensEstimated,
messageTokensEstimated,
nonSystemMessageTokensEstimated,
toolTokensEstimated,
totalTokensEstimated,
systemMessageCount,
toolCount: serializedTools.length,
};
}

export function promptContextUsageEstimateToAttributes(
estimate: PromptContextUsageEstimate,
): Record<string, number> {
return {
"usage.prompt_context.system_tokens_estimated": estimate.systemTokensEstimated,
"usage.prompt_context.message_tokens_estimated": estimate.messageTokensEstimated,
"usage.prompt_context.non_system_message_tokens_estimated":
estimate.nonSystemMessageTokensEstimated,
"usage.prompt_context.tool_tokens_estimated": estimate.toolTokensEstimated,
"usage.prompt_context.total_tokens_estimated": estimate.totalTokensEstimated,
"usage.prompt_context.system_message_count": estimate.systemMessageCount,
"usage.prompt_context.tool_count": estimate.toolCount,
};
}

function estimateTokensFromText(text: string): number {
if (!text) {
return 0;
}

return Math.ceil(text.length / ESTIMATED_CHARS_PER_TOKEN);
}

function serializePromptMessage(message: PromptMessage): string {
const content = serializePromptValue(message.content).trim();
if (!content) {
return "";
}

const role = typeof message.role === "string" ? message.role.toUpperCase() : "MESSAGE";
return `${role}:\n${content}`;
}

function serializePromptValue(value: unknown): string {
if (typeof value === "string") {
return value;
}

if (typeof value === "number" || typeof value === "boolean") {
return String(value);
}

if (Array.isArray(value)) {
return value
.map((entry) => serializePromptValue(entry))
.filter((entry) => entry.trim().length > 0)
.join("\n");
}

if (!value || typeof value !== "object") {
return "";
}

const record = value as Record<string, unknown>;
const type = typeof record.type === "string" ? record.type : undefined;

if (typeof record.text === "string") {
return record.text;
}

if (type && BINARY_PART_TYPES.has(type)) {
return `[${type}]`;
}

if (type === "tool-call") {
const toolName = typeof record.toolName === "string" ? record.toolName : "tool";
const input = serializePromptValue(record.input);
return input ? `tool-call ${toolName}: ${input}` : `tool-call ${toolName}`;
}

if (type === "tool-result") {
const toolName = typeof record.toolName === "string" ? record.toolName : "tool";
const output = serializePromptValue(record.output);
return output ? `tool-result ${toolName}: ${output}` : `tool-result ${toolName}`;
}

if ("content" in record) {
const nestedContent = serializePromptValue(record.content);
if (nestedContent) {
return nestedContent;
}
}

return safeStringify(sanitizeRecord(record));
}

function sanitizeRecord(record: Record<string, unknown>): Record<string, unknown> {
const sanitized: Record<string, unknown> = {};

for (const [key, value] of Object.entries(record)) {
sanitized[key] = LARGE_BINARY_KEYS.has(key) ? "[omitted]" : value;
}
Comment on lines +166 to +171
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Deep binary fields are not sanitized, which can still blow up serialization cost.

On Line 170, only top-level keys are redacted. Nested payloads (e.g., content.metadata.data) still pass through and can create large safeStringify inputs in hot paths, skewing estimates and adding avoidable overhead.

Proposed fix (recursive sanitization)
-function sanitizeRecord(record: Record<string, unknown>): Record<string, unknown> {
-  const sanitized: Record<string, unknown> = {};
-
-  for (const [key, value] of Object.entries(record)) {
-    sanitized[key] = LARGE_BINARY_KEYS.has(key) ? "[omitted]" : value;
-  }
-
-  return sanitized;
-}
+function sanitizeValue(value: unknown, depth = 0): unknown {
+  if (depth > 6) return "[omitted]";
+  if (Array.isArray(value)) return value.map((v) => sanitizeValue(v, depth + 1));
+  if (!value || typeof value !== "object") return value;
+
+  const sanitized: Record<string, unknown> = {};
+  for (const [key, entry] of Object.entries(value)) {
+    sanitized[key] = LARGE_BINARY_KEYS.has(key)
+      ? "[omitted]"
+      : sanitizeValue(entry, depth + 1);
+  }
+  return sanitized;
+}
+
+function sanitizeRecord(record: Record<string, unknown>): Record<string, unknown> {
+  return sanitizeValue(record) as Record<string, unknown>;
+}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
function sanitizeRecord(record: Record<string, unknown>): Record<string, unknown> {
const sanitized: Record<string, unknown> = {};
for (const [key, value] of Object.entries(record)) {
sanitized[key] = LARGE_BINARY_KEYS.has(key) ? "[omitted]" : value;
}
function sanitizeValue(value: unknown, depth = 0): unknown {
if (depth > 6) return "[omitted]";
if (Array.isArray(value)) return value.map((v) => sanitizeValue(v, depth + 1));
if (!value || typeof value !== "object") return value;
const sanitized: Record<string, unknown> = {};
for (const [key, entry] of Object.entries(value)) {
sanitized[key] = LARGE_BINARY_KEYS.has(key)
? "[omitted]"
: sanitizeValue(entry, depth + 1);
}
return sanitized;
}
function sanitizeRecord(record: Record<string, unknown>): Record<string, unknown> {
return sanitizeValue(record) as Record<string, unknown>;
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/core/src/agent/prompt-context-usage.ts` around lines 166 - 171, The
sanitizeRecord function currently only redacts top-level keys and misses nested
binary fields; update sanitizeRecord to recursively traverse objects and arrays
so any property whose key matches LARGE_BINARY_KEYS (at any depth, e.g.,
content.metadata.data) is replaced with "[omitted]" while primitives are kept
as-is; detect and skip non-plain values (null, Date, RegExp) and protect against
circular references (e.g., via a seen Set) to avoid infinite recursion; keep the
function name sanitizeRecord and the LARGE_BINARY_KEYS lookup so callers remain
unchanged.


return sanitized;
}

function serializeToolDefinition(name: string, tool: unknown): Record<string, unknown> {
if (!tool || typeof tool !== "object") {
return { name };
}

const candidate = tool as Record<string, unknown>;

return {
name,
...(typeof candidate.type === "string" ? { type: candidate.type } : {}),
...(typeof candidate.id === "string" ? { id: candidate.id } : {}),
...(typeof candidate.description === "string" ? { description: candidate.description } : {}),
...(candidate.inputSchema || candidate.parameters || candidate.input_schema || candidate.schema
? {
inputSchema: normalizeSchema(
candidate.inputSchema ??
candidate.parameters ??
candidate.input_schema ??
candidate.schema,
),
}
: {}),
...(candidate.outputSchema || candidate.output_schema
? {
outputSchema: normalizeSchema(candidate.outputSchema ?? candidate.output_schema),
}
: {}),
...(candidate.providerOptions ? { providerOptions: candidate.providerOptions } : {}),
...(candidate.args ? { args: sanitizeRecord(candidate.args as Record<string, unknown>) } : {}),
...(candidate.needsApproval !== undefined ? { needsApproval: candidate.needsApproval } : {}),
Comment on lines +203 to +205
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
set -euo pipefail

echo "Check where tool payloads are transformed before provider/model calls:"
rg -nP -C3 '\b(tool|tools)\b.*\b(name|description|parameters|schema|inputSchema|outputSchema)\b' --type=ts

echo
echo "Check whether providerOptions / needsApproval / args are consumed in provider-facing payloads:"
rg -nP -C3 '\b(providerOptions|needsApproval|args)\b' --type=ts

echo
echo "Find function-valued needsApproval declarations (would skew stringify-based estimates):"
rg -nP -C2 'needsApproval\s*:\s*(async\s*)?(\([^)]*\)\s*=>|function\b)' --type=ts

Repository: VoltAgent/voltagent

Length of output: 50376


🏁 Script executed:

# First, let's look at the exact content around lines 203-205 in prompt-context-usage.ts
cat -n packages/core/src/agent/prompt-context-usage.ts | sed -n '190,220p'

Repository: VoltAgent/voltagent

Length of output: 1260


🏁 Script executed:

# Find the serializeToolDefinition function to understand what it does
rg -n "serializeToolDefinition" packages/core/src/agent/prompt-context-usage.ts -A 30

Repository: VoltAgent/voltagent

Length of output: 2684


🏁 Script executed:

# Check the Tool type definition to understand field types
rg -n "interface Tool|type Tool" packages/core/src/tool/ --type=ts -A 20 | head -100

Repository: VoltAgent/voltagent

Length of output: 6483


🏁 Script executed:

# Check if safeStringify is used in this file
rg -n "safeStringify|JSON.stringify" packages/core/src/agent/prompt-context-usage.ts

Repository: VoltAgent/voltagent

Length of output: 271


🏁 Script executed:

# Find Tool interface definition to understand field types
rg -n "interface Tool|type Tool.*=" packages/core/src/tool/ --type=ts -B 2 -A 50 | grep -A 50 "^packages/core/src/tool/index.ts"

Repository: VoltAgent/voltagent

Length of output: 7184


🏁 Script executed:

# Search for needsApproval type definition
rg -n "needsApproval" packages/core/src/tool/index.ts -B 3 -A 3

Repository: VoltAgent/voltagent

Length of output: 900


🏁 Script executed:

# Check how providerOptions, needsApproval are used in provider adapters
rg -n "providerOptions|needsApproval" packages/core/src/tool/manager/ToolManager.ts -B 3 -A 3

Repository: VoltAgent/voltagent

Length of output: 874


🏁 Script executed:

# Check what 'args' field is in ProviderTool definition
rg -n "type ProviderTool|interface ProviderTool" packages/core/src/tool/index.ts -A 10

Repository: VoltAgent/voltagent

Length of output: 324


🏁 Script executed:

# Check the AI SDK's Tool type to understand which fields are model-visible
rg -n "from \"ai\"" packages/core/src/tool/index.ts

Repository: VoltAgent/voltagent

Length of output: 159


🏁 Script executed:

# Look for how serializeToolDefinition output is actually used in token estimation
rg -n "serializeToolDefinition\|toolTokensEstimated" packages/core/src/agent/prompt-context-usage.ts -B 3 -A 3

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# Check if ToolNeedsApprovalFunction is defined and what it looks like
rg -n "ToolNeedsApprovalFunction" packages/core/src/tool/index.ts -B 2 -A 3

Repository: VoltAgent/voltagent

Length of output: 767


🏁 Script executed:

# Check what safeStringify does and how it handles functions
rg -n "function safeStringify" packages/core/src -A 15 --type=ts

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# Look at how sanitizeRecord works
rg -n "function sanitizeRecord|export.*sanitizeRecord" packages/core/src/agent/prompt-context-usage.ts -B 2 -A 10

Repository: VoltAgent/voltagent

Length of output: 494


🏁 Script executed:

# See where args comes from in the tool object and if it should be in schema
rg -n "\.args\s*=" packages/core/src/tool/ --type=ts | head -20

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# Find safeStringify implementation in `@voltagent/internal`
fd -t f "*.ts" | xargs rg -l "safeStringify" | head -5

Repository: VoltAgent/voltagent

Length of output: 566


🏁 Script executed:

# Check how AI SDK's Tool type is used and what fields it expects
rg -n "VercelTool\[" packages/core/src/tool/manager/ToolManager.ts -B 3 -A 5

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# Verify what serializeToolDefinition output is for: check comments around it
rg -n "serializeToolDefinition\|toolTokensEstimated" packages/core/src/agent/prompt-context-usage.ts -B 5 -A 2

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# Look at the AI SDK Tool type definition being used
rg -n "Tool\s*\{|Tool\s*<" packages/core/src/tool/manager/ToolManager.ts -B 2 -A 20 | head -80

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# Check comments in prompt-context-usage.ts about what this serialization is for
cat -n packages/core/src/agent/prompt-context-usage.ts | sed -n '40,80p'

Repository: VoltAgent/voltagent

Length of output: 1507


🏁 Script executed:

# See what fields the AI SDK Tool actually expects/sends to model
rg -n "description.*inputSchema\|parameters.*description" packages/core/src/tool/manager/ToolManager.ts -B 3 -A 3

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# Check the actual AI SDK tool field documentation - look at how tools are used in providers
rg -n "tools\[.*\]\s*=" packages/core/src/agent/providers/ --type=ts -B 2 -A 5 | head -60

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# See how ToolManager constructs tools for the AI SDK
cat -n packages/core/src/tool/manager/ToolManager.ts | sed -n '63,92p'

Repository: VoltAgent/voltagent

Length of output: 1294


🏁 Script executed:

# Verify what fields AI SDK actually sends to model by checking provider integrations
rg -n "generateText|doGenerate" packages/core/src/agent/providers/anthropic/ --type=ts -A 10 | head -40

Repository: VoltAgent/voltagent

Length of output: 147


🏁 Script executed:

# Check if there are any tests or documentation about what serializeToolDefinition should include
rg -n "serializeToolDefinition" packages/core/src/agent/prompt-context-usage.spec.ts -B 5 -A 10

Repository: VoltAgent/voltagent

Length of output: 45


🏁 Script executed:

# Look at test files for prompt-context-usage
fd -type f "prompt-context-usage" packages/core/src/

Repository: VoltAgent/voltagent

Length of output: 233


🏁 Script executed:

# Check what the AI SDK actually sends to the model for tool definitions
rg -n "name.*description.*inputSchema\|description.*inputSchema" packages/core/src/tool/manager/ToolManager.ts -B 3 -A 3

Repository: VoltAgent/voltagent

Length of output: 45


Remove runtime metadata fields from tool schema serialization to avoid inflating token estimates.

Lines 203–205 currently include providerOptions, args, and needsApproval, which are runtime/control metadata rather than model-visible schema fields. Since these are passed to safeStringify for token estimation (line 62), they artificially inflate tool_tokens_estimated. The AI SDK sends only description, inputSchema, and outputSchema to the model; restrict serialization to schema fields only.

For args, add a runtime type guard to ensure it is a plain object (not an array):

Suggested fix
-    ...(candidate.providerOptions ? { providerOptions: candidate.providerOptions } : {}),
-    ...(candidate.args ? { args: sanitizeRecord(candidate.args as Record<string, unknown>) } : {}),
-    ...(candidate.needsApproval !== undefined ? { needsApproval: candidate.needsApproval } : {}),
+    ...(candidate.args && typeof candidate.args === "object" && !Array.isArray(candidate.args)
+      ? { args: sanitizeRecord(candidate.args as Record<string, unknown>) }
+      : {}),
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
...(candidate.providerOptions ? { providerOptions: candidate.providerOptions } : {}),
...(candidate.args ? { args: sanitizeRecord(candidate.args as Record<string, unknown>) } : {}),
...(candidate.needsApproval !== undefined ? { needsApproval: candidate.needsApproval } : {}),
...(candidate.args && typeof candidate.args === "object" && !Array.isArray(candidate.args)
? { args: sanitizeRecord(candidate.args as Record<string, unknown>) }
: {}),
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/core/src/agent/prompt-context-usage.ts` around lines 203 - 205, The
serialization currently includes runtime metadata fields providerOptions, args,
and needsApproval which inflate token estimates; change the object built for
safeStringify (used for token estimation) to include only model-visible schema
fields (description, inputSchema, outputSchema) and remove providerOptions and
needsApproval, and for args add a runtime type guard that only includes args
when it's a plain non-array object (use the existing sanitizeRecord helper only
after confirming !Array.isArray(candidate.args) && typeof candidate.args ===
'object' && candidate.args !== null). Update the code that constructs the object
passed to safeStringify (where candidate.providerOptions, candidate.args, and
candidate.needsApproval are currently spread) so it omits those runtime fields
unless args passes the plain-object guard.

};
}

function normalizeSchema(schema: unknown): unknown {
if (!schema || typeof schema !== "object") {
return schema;
}

try {
if ("_def" in (schema as Record<string, unknown>)) {
return zodSchemaToJsonUI(schema);
}
} catch (_error) {
return schema;
}

return schema;
}
Loading