diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..fca388a
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,2 @@
+OPENAI_API_KEY=your_openai_api_key
+BRAINTRUST_API_KEY=your_braintrust_api_key
diff --git a/.envrc b/.envrc
new file mode 100644
index 0000000..22992e1
--- /dev/null
+++ b/.envrc
@@ -0,0 +1,2 @@
+source_up
+dotenv
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 5f4f559..61d089e 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -23,7 +23,9 @@ repos:
(?x)^(
.*\.(json|prisma|yaml)
)$
- args: [-L rouge]
+ args:
+ - "-L"
+ - "rouge,afterall"
- repo: https://github.com/rbubley/mirrors-prettier
rev: v3.3.2
diff --git a/.tool-versions b/.tool-versions
new file mode 100644
index 0000000..87803d9
--- /dev/null
+++ b/.tool-versions
@@ -0,0 +1 @@
+python 3.9.21
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..9e827fe
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,7 @@
+{
+ "[python]": {
+ "editor.formatOnSave": true,
+ "editor.defaultFormatter": "ms-python.black-formatter"
+ },
+ "black-formatter.path": ["${workspaceFolder}/venv/bin/black"]
+}
diff --git a/Makefile b/Makefile
index d1c6f72..57f4596 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,7 @@ py: ${VENV_PYTHON_PACKAGES}
VENV_INITIALIZED := venv/.initialized
${VENV_INITIALIZED}:
- rm -rf venv && python3 -m venv venv
+ rm -rf venv && python -m venv venv
@touch ${VENV_INITIALIZED}
VENV_PYTHON_PACKAGES := venv/.python_packages
@@ -20,6 +20,7 @@ VENV_PYTHON_PACKAGES := venv/.python_packages
${VENV_PYTHON_PACKAGES}: ${VENV_INITIALIZED}
bash -c 'source venv/bin/activate && python -m pip install --upgrade pip setuptools build twine openai'
bash -c 'source venv/bin/activate && python -m pip install -e ".[dev]"'
+ bash -c 'source venv/bin/activate && python -m pip install -e ".[scipy]"' # for local tests
@touch $@
${VENV_PRE_COMMIT}: ${VENV_PYTHON_PACKAGES}
diff --git a/README.md b/README.md
index 71233e8..fb5e907 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@ Autoevals is a tool to quickly and easily evaluate AI model outputs.
It bundles together a variety of automatic evaluation methods including:
-- LLM-as-a-Judge
+- LLM-as-a-judge
- Heuristic (e.g. Levenshtein distance)
- Statistical (e.g. BLEU)
@@ -18,53 +18,104 @@ their outputs.
You can also create your own model-graded evaluations with Autoevals. It's easy to add custom prompts, parse outputs,
and manage exceptions.
+
+
+### Requirements
+
+- Python 3.9 or higher
+- Compatible with both OpenAI Python SDK v0.x and v1.x
+
+
+
## Installation
-Autoevals is distributed as a [Python library on PyPI](https://pypi.org/project/autoevals/) and
-[Node.js library on NPM](https://www.npmjs.com/package/autoevals).
+
-### Python
+### TypeScript
```bash
-pip install autoevals
+npm install autoevals
```
-### Node.js
+### Python
```bash
-npm install autoevals
+pip install autoevals
```
-## Example
+
-Use Autoevals to model-grade an example LLM completion using the [factuality prompt](templates/factuality.yaml).
+## Getting started
+
+Use Autoevals to model-grade an example LLM completion using the [Factuality prompt](templates/factuality.yaml).
By default, Autoevals uses your `OPENAI_API_KEY` environment variable to authenticate with OpenAI's API.
+
+
### Python
```python
from autoevals.llm import *
+import asyncio
# Create a new LLM-based evaluator
evaluator = Factuality()
-# Evaluate an example LLM completion
+# Synchronous evaluation
input = "Which country has the highest population?"
output = "People's Republic of China"
expected = "China"
+# Using the synchronous API
result = evaluator(output, expected, input=input)
+print(f"Factuality score (sync): {result.score}")
+print(f"Factuality metadata (sync): {result.metadata['rationale']}")
-# The evaluator returns a score from [0,1] and includes the raw outputs from the evaluator
-print(f"Factuality score: {result.score}")
-print(f"Factuality metadata: {result.metadata['rationale']}")
+# Using the asynchronous API
+async def main():
+ result = await evaluator.eval_async(output, expected, input=input)
+ print(f"Factuality score (async): {result.score}")
+ print(f"Factuality metadata (async): {result.metadata['rationale']}")
+
+# Run the async example
+asyncio.run(main())
```
-#### Use with other AI providers through the AI proxy
+### TypeScript
+
+```typescript
+import { Factuality } from "autoevals";
+
+(async () => {
+ const input = "Which country has the highest population?";
+ const output = "People's Republic of China";
+ const expected = "China";
+
+ const result = await Factuality({ output, expected, input });
+ console.log(`Factuality score: ${result.score}`);
+ console.log(`Factuality metadata: ${result.metadata?.rationale}`);
+})();
+```
+
+
+
+## Using other AI providers
+
+When you use Autoevals, it will look for an `OPENAI_BASE_URL` environment variable to use as the base for requests to an OpenAI compatible API. If `OPENAI_BASE_URL` is not set, it will default to the [AI proxy](https://www.braintrust.dev/docs/guides/proxy).
+
+If you choose to use the proxy, you'll also get:
+
+- Simplified access to many AI providers
+- Reduced costs with automatic request caching
+- Increased observability when you enable logging to Braintrust
+
+The proxy is free to use, even if you don't have a Braintrust account.
+
+If you have a Braintrust account, you can optionally set the `BRAINTRUST_API_KEY` environment variable instead of `OPENAI_API_KEY` to unlock additional features like logging and monitoring. You can also route requests to [supported AI providers and models](https://www.braintrust.dev/docs/guides/proxy#supported-models) or custom models you have configured in Braintrust.
-Autoevals will look for an `OPENAI_BASE_URL` environment variable to use as the base for requests to an OpenAI compatible API. If `OPENAI_BASE_URL` is not set, it will default to the [AI proxy](https://www.braintrust.dev/docs/guides/proxy). This provides numerous benefits like simplified access to many AI providers, reduced costs with automatic request caching, and increased observability when you enable logging to Braintrust. The proxy is free to use, even if you don't have a Braintrust account.
+
-If you have a Braintrust account, you can set the `BRAINTUST_API_KEY` environment variable instead of `OPENAI_API_KEY` to unlock additional features like logging and monitoring. Additionally, you can route requests to [supported AI providers and models](https://www.braintrust.dev/docs/guides/proxy#supported-models) or custom models you have configured in Braintrust.
+### Python
```python
# NOTE: ensure BRAINTRUST_API_KEY is set in your environment and OPENAI_API_KEY is not set
@@ -85,133 +136,145 @@ print(f"Factuality score: {result.score}")
print(f"Factuality metadata: {result.metadata['rationale']}")
```
-#### Custom Client
+### TypeScript
-If you need to use a different OpenAI compatible API or require custom behavior, you can initialize the library with a custom client.
+```typescript
+// NOTE: ensure BRAINTRUST_API_KEY is set in your environment and OPENAI_API_KEY is not set
+import { Factuality } from "autoevals";
-```python
-import openai
-from autoevals import init
-from autoevals.oai import LLMClient
+(async () => {
+ const input = "Which country has the highest population?";
+ const output = "People's Republic of China";
+ const expected = "China";
-openai_client = openai.OpenAI(base_url="https://api.openai.com/v1/")
+ // Run an LLM-based evaluator using the Claude 3.5 Sonnet model from Anthropic
+ const result = await Factuality({
+ model: "claude-3-5-sonnet-latest",
+ output,
+ expected,
+ input,
+ });
-class CustomClient(LLMClient):
- openai=openai_client # you can also pass in openai module and we will instantiate it for you
- embed = openai.embeddings.create
- moderation = openai.moderations.create
- RateLimitError = openai.RateLimitError
+ // The evaluator returns a score from [0,1] and includes the raw outputs from the evaluator
+ console.log(`Factuality score: ${result.score}`);
+ console.log(`Factuality metadata: ${result.metadata?.rationale}`);
+})();
+```
- def complete(self, **kwargs):
- # make adjustments as needed
- return self.openai.chat.completions.create(**kwargs)
+
-# Autoevals will now use your custom client
-client = init(client=CustomClient)
-```
+## Custom client configuration
-If you only need to use a custom client for a specific evaluator, you can pass in the client to the evaluator.
+There are two ways you can configure a custom client when you need to use a different OpenAI compatible API:
-```python
-evaluator = Factuality(client=CustomClient)
-```
+1. **Global configuration**: Initialize a client that will be used by all evaluators
+2. **Instance configuration**: Configure a client for a specific evaluator
-### Node.js
+### Global configuration
-```javascript
-import { Factuality } from "autoevals";
+Set up a client that all your evaluators will use:
-(async () => {
- const input = "Which country has the highest population?";
- const output = "People's Republic of China";
- const expected = "China";
+
- const result = await Factuality({ output, expected, input });
- console.log(`Factuality score: ${result.score}`);
- console.log(`Factuality metadata: ${result.metadata.rationale}`);
-})();
+#### Python
+
+```python
+import openai
+import asyncio
+from autoevals import init
+from autoevals.llm import Factuality
+
+client = init(openai.AsyncOpenAI(base_url="https://api.openai.com/v1/"))
+
+async def main():
+ evaluator = Factuality()
+ result = await evaluator.eval_async(
+ input="What is the speed of light in a vacuum?",
+ output="The speed of light in a vacuum is 299,792,458 meters per second.",
+ expected="The speed of light in a vacuum is approximately 300,000 kilometers per second."
+ )
+ print(f"Factuality score: {result.score}")
+
+asyncio.run(main())
```
-#### Use with other AI providers through the AI proxy
+#### TypeScript
-Autoevals will look for an `OPENAI_BASE_URL` environment variable to use as the base for requests to an OpenAI compatible API. If `OPENAI_BASE_URL` is not set, it will default to the [AI proxy](https://www.braintrust.dev/docs/guides/proxy). This provides numerous benefits like simplified access to many AI providers, reduced costs with automatic request caching, and increased observability when you enable logging to Braintrust. The proxy is free to use, even if you don't have a Braintrust account.
+```typescript
+import OpenAI from "openai";
+import { init, Factuality } from "autoevals";
-If you have a Braintrust account, you can set the `BRAINTUST_API_KEY` environment variable instead of `OPENAI_API_KEY` to unlock additional features like logging and monitoring. Additionally, you can route requests to [supported AI providers and models](https://www.braintrust.dev/docs/guides/proxy#supported-models) or custom models you have configured in Braintrust.
+const client = new OpenAI({
+ baseURL: "https://api.openai.com/v1/",
+});
-```javascript
-// NOTE: ensure BRAINTRUST_API_KEY is set in your environment and OPENAI_API_KEY is not set
-import { Factuality } from "autoevals";
+init({ client });
(async () => {
- const input = "Which country has the highest population?";
- const output = "People's Republic of China";
- const expected = "China";
-
- // Run an LLM-based evaluator using the Claude 3.5 Sonnet model from Anthropic
const result = await Factuality({
- model: "claude-3-5-sonnet-latest",
- output,
- expected,
- input,
+ input: "What is the speed of light in a vacuum?",
+ output: "The speed of light in a vacuum is 299,792,458 meters per second.",
+ expected:
+ "The speed of light in a vacuum is approximately 300,000 kilometers per second (or precisely 299,792,458 meters per second).",
});
- // The evaluator returns a score from [0,1] and includes the raw outputs from the evaluator
- console.log(`Factuality score: ${result.score}`);
- console.log(`Factuality metadata: ${result.metadata?.rationale}`);
+ console.log("Factuality Score:", result);
})();
```
-## Using Braintrust with Autoevals
+
-Once you grade an output using Autoevals, it's convenient to use [Braintrust](https://www.braintrust.dev/docs/libs/python) to log and compare your evaluation results.
+### Instance configuration
-### Python
+Configure a client for a specific evaluator instance:
-```python
-from autoevals.llm import *
-import braintrust
+
-# Create a new LLM-based evaluator
-evaluator = Factuality()
+#### Python
-# Set up an example LLM completion
-input = "Which country has the highest population?"
-output = "People's Republic of China"
-expected = "China"
+```python
+import openai
+from autoevals.llm import Factuality
-# Set up a BrainTrust experiment to log our eval to
-experiment = braintrust.init(
- project="Autoevals", api_key="YOUR_BRAINTRUST_API_KEY"
-)
+custom_client = openai.OpenAI(base_url="https://custom-api.example.com/v1/")
+evaluator = Factuality(client=custom_client)
+```
-# Start a span and run our evaluator
-with experiment.start_span() as span:
- result = evaluator(output, expected, input=input)
+#### TypeScript
- # The evaluator returns a score from [0,1] and includes the raw outputs from the evaluator
- print(f"Factuality score: {result.score}")
- print(f"Factuality metadata: {result.metadata['rationale']}")
-
- span.log(
- inputs={"query": input},
- output=output,
- expected=expected,
- scores={
- "factuality": result.score,
- },
- metadata={
- "factuality": result.metadata,
- },
- )
+```typescript
+import OpenAI from "openai";
+import { Factuality } from "autoevals";
-print(experiment.summarize())
+(async () => {
+ const customClient = new OpenAI({
+ baseURL: "https://custom-api.example.com/v1/",
+ });
+
+ const result = await Factuality({
+ client: customClient,
+ output: "Paris is the capital of France",
+ expected:
+ "Paris is the capital of France and has a population of over 2 million",
+ input: "Tell me about Paris",
+ });
+ console.log(result);
+})();
```
-### Node.js
+
+
+## Using Braintrust with Autoevals (optional)
+
+Once you grade an output using Autoevals, you can optionally use [Braintrust](https://www.braintrust.dev/docs/libs/python) to log and compare your evaluation results. This integration is completely optional and not required for using Autoevals.
+
+
+
+### TypeScript
-Create a file named `example.eval.js` (it must end with `.eval.js` or `.eval.js`):
+Create a file named `example.eval.js` (it must take the form `*.eval.[ts|tsx|js|jsx]`):
-```javascript
+```typescript
import { Eval } from "braintrust";
import { Factuality } from "autoevals";
@@ -233,12 +296,35 @@ Then, run
npx braintrust run example.eval.js
```
-## Supported Evaluation Methods
+### Python
+
+Create a file named `eval_example.py` (it must take the form `eval_*.py`):
+
+```python
+import braintrust
+from autoevals.llm import Factuality
+
+Eval(
+ "Autoevals",
+ data=lambda: [
+ dict(
+ input="Which country has the highest population?",
+ expected="China",
+ ),
+ ],
+ task=lambda *args: "People's Republic of China",
+ scores=[Factuality],
+)
+```
+
+
+
+## Supported evaluation methods
-### LLM-as-a-Judge
+### LLM-as-a-judge evaluations
- Battle
-- ClosedQA
+- Closed QA
- Humor
- Factuality
- Moderation
@@ -248,46 +334,39 @@ npx braintrust run example.eval.js
- Translation
- Fine-tuned binary classifiers
-### RAG
+### RAG evaluations
- Context precision
- Context relevancy
- Context recall
-- Context entities recall
-- Faithfullness
-- Answer relevance
-- Answer semantic similarity
+- Context entity recall
+- Faithfulness
+- Answer relevancy
+- Answer similarity
- Answer correctness
-- Aspect critique
-### Composite
+### Composite evaluations
- Semantic list contains
- JSON validity
-### Embeddings
+### Embedding evaluations
- Embedding similarity
-- BERTScore
-### Heuristic
+### Heuristic evaluations
- Levenshtein distance
- Exact match
- Numeric difference
- JSON diff
-- Jaccard distance
-### Statistical
-
-- BLEU
-- ROUGE
-- METEOR
-
-## Custom Evaluation Prompts
+## Custom evaluation prompts
Autoevals supports custom evaluation prompts for model-graded evaluation. To use them, simply pass in a prompt and scoring mechanism:
+
+
### Python
```python
@@ -323,9 +402,7 @@ page_content = """
As suggested by Nicolo, we should standardize the error responses coming from GoTrue, postgres, and realtime (and any other/future APIs) so that it's better DX when writing a client,
We can make this change on the servers themselves, but since postgrest and gotrue are fully/partially external may be harder to change, it might be an option to transform the errors within the client libraries/supabase-js, could be messy?
Nicolo also dropped this as a reference: http://spec.openapis.org/oas/v3.0.3#openapi-specification"""
-output = (
- "Standardize error responses from GoTrue, Postgres, and Realtime APIs for better DX"
-)
+output = "Standardize error responses from GoTrue, Postgres, and Realtime APIs for better DX"
expected = "Standardize Error Responses across APIs"
response = evaluator(output, expected, input=page_content)
@@ -334,9 +411,9 @@ print(f"Score: {response.score}")
print(f"Metadata: {response.metadata}")
```
-### Node.js
+### TypeScript
-```javascript
+```typescript
import { LLMClassifierFromTemplate } from "autoevals";
(async () => {
@@ -352,15 +429,12 @@ Issue Description: {{input}}
const choiceScores = { 1: 1, 2: 0 };
- const evaluator =
- LLMClassifierFromTemplate <
- { input: string } >
- {
- name: "TitleQuality",
- promptTemplate,
- choiceScores,
- useCoT: true,
- };
+ const evaluator = LLMClassifierFromTemplate<{ input: string }>({
+ name: "TitleQuality",
+ promptTemplate,
+ choiceScores,
+ useCoT: true,
+ });
const input = `As suggested by Nicolo, we should standardize the error responses coming from GoTrue, postgres, and realtime (and any other/future APIs) so that it's better DX when writing a client,
We can make this change on the servers themselves, but since postgrest and gotrue are fully/partially external may be harder to change, it might be an option to transform the errors within the client libraries/supabase-js, could be messy?
@@ -375,21 +449,23 @@ Nicolo also dropped this as a reference: http://spec.openapis.org/oas/v3.0.3#ope
})();
```
+
+
## Creating custom scorers
You can also create your own scoring functions that do not use LLMs. For example, to test whether the word `'banana'`
is in the output, you can use the following:
+
+
### Python
```python
from autoevals import Score
-
def banana_scorer(output, expected, input):
return Score(name="banana_scorer", score=1 if "banana" in output else 0)
-
input = "What is 1 banana + 2 bananas?"
output = "3"
expected = "3 bananas"
@@ -399,9 +475,9 @@ result = banana_scorer(output, expected, input)
print(f"Banana score: {result.score}")
```
-### Node.js
+### TypeScript
-```javascript
+```typescript
import { Score } from "autoevals";
const bananaScorer = ({
@@ -426,6 +502,8 @@ const bananaScorer = ({
})();
```
+
+
## Why does this library exist?
There is nothing particularly novel about the evaluation methods in this library. They are all well-known and well-documented. However, there are a few things that are particularly difficult when evaluating in practice:
@@ -435,6 +513,20 @@ There is nothing particularly novel about the evaluation methods in this library
debug one output at a time, propagate errors, and tweak the prompts. Autoevals makes these tasks easy.
- Collecting metrics behind a uniform interface makes it easy to swap out evaluation methods and compare them. Prior to Autoevals, we couldn't find an open source library where you can simply pass in `input`, `output`, and `expected` values through a bunch of different evaluation methods.
+
+
## Documentation
-The full docs are available [here](https://www.braintrust.dev/docs/reference/autoevals).
+The full docs are available [for your reference](https://www.braintrust.dev/docs/reference/autoevals).
+
+## Contributing
+
+We welcome contributions!
+
+To install the development dependencies, run `make develop`, and run `source env.sh` to activate the environment. Make a `.env` file from the `.env.example` file and set the environment variables. Run `direnv allow` to load the environment variables.
+
+To run the tests, run `pytest` from the root directory.
+
+Send a PR and we'll review it! We'll take care of versioning and releasing.
+
+
diff --git a/js/index.ts b/js/index.ts
index 9029db9..1bdaac4 100644
--- a/js/index.ts
+++ b/js/index.ts
@@ -29,6 +29,7 @@
export type { Score, ScorerArgs, Scorer } from "@braintrust/core";
export * from "./llm";
+export { init } from "./oai";
export * from "./string";
export * from "./list";
export * from "./moderation";
diff --git a/js/llm.fixtures.ts b/js/llm.fixtures.ts
new file mode 100644
index 0000000..fde37ce
--- /dev/null
+++ b/js/llm.fixtures.ts
@@ -0,0 +1,470 @@
+export const openaiClassifierShouldEvaluateTitles = [
+ {
+ id: "chatcmpl-B7WxpqqPbHYiAOPDl3ViYNalDFbce",
+ object: "chat.completion",
+ created: 1741134709,
+ model: "gpt-3.5-turbo-0125",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_OlUJAex0cWI84acfE0XydrHz",
+ type: "function",
+ function: {
+ name: "select_choice",
+ arguments:
+ '{"reasons":"Title 1: Pros - Clearly states the goal of standardizing error responses for better developer experience. Cons - Might be too specific and not catchy. Title 2: Pros - Short and simple. Cons - Lacks information about the issue.","choice":"1"}',
+ },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 354,
+ completion_tokens: 58,
+ total_tokens: 412,
+ prompt_tokens_details: {
+ cached_tokens: 0,
+ audio_tokens: 0,
+ },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: null,
+ },
+];
+
+export const openaiClassifierShouldEvaluateTitlesWithCoT = [
+ {
+ id: "chatcmpl-B7XFw0OCpCbMVwLizRts3Cl72Obg0",
+ object: "chat.completion",
+ created: 1741135832,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_jUzxFALMTbpzGX4DfFH57VdI",
+ type: "function",
+ function: {
+ name: "select_choice",
+ arguments:
+ '{"reasons":"1. The issue description talks about the need to standardize error responses from GoTrue, Postgres, and Realtime APIs to improve developer experience (DX).\\n2. Title 1 directly mentions the key components involved (GoTrue, Postgres, and Realtime APIs) and the goal (better DX), which aligns well with the issue description.\\n3. Title 2, \\"Good title,\\" is vague and does not provide any information about the issue or its context.\\n4. Therefore, Title 1 is more descriptive and relevant to the issue at hand.","choice":"1"}',
+ },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 370,
+ completion_tokens: 125,
+ total_tokens: 495,
+ prompt_tokens_details: {
+ cached_tokens: 0,
+ audio_tokens: 0,
+ },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+ {
+ id: "chatcmpl-B7YPU81s7cb2uzlwJ8w9aS5qhfhtJ",
+ object: "chat.completion",
+ created: 1741140268,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_3Z63hgrYvLuSZKc2rrHAYLI4",
+ type: "function",
+ function: {
+ name: "select_choice",
+ arguments:
+ '{"reasons":"1. The issue description talks about the need to standardize error responses from GoTrue, Postgres, and Realtime APIs to improve developer experience (DX).\\n2. Title 1, \\"Good title,\\" is vague and does not convey any specific information about the issue. It does not mention the APIs involved or the purpose of the standardization.\\n3. Title 2, \\"Standardize error responses from GoTrue, Postgres, and Realtime APIs for better DX,\\" directly reflects the main goal of the issue, which is to standardize error responses for better developer experience. It also specifies the APIs involved, making it clear and informative.\\n4. Therefore, Title 2 is a better choice as it accurately and clearly describes the issue at hand.","choice":"2"}',
+ },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 370,
+ completion_tokens: 164,
+ total_tokens: 534,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+ {
+ id: "chatcmpl-B7YQ9ILZ9DJR2AjY2s4qU15Rc6qII",
+ object: "chat.completion",
+ created: 1741140309,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_CxDdx3i9eaHg81kYjQIICPfd",
+ type: "function",
+ function: { name: "select_choice", arguments: '{"choice":"1"}' },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 292,
+ completion_tokens: 6,
+ total_tokens: 298,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+ {
+ id: "chatcmpl-B7YQa80DGu61zUWpdPtXRaJdRQz6l",
+ object: "chat.completion",
+ created: 1741140336,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_ksuniPMn2w99hFt5Z1mzhWMe",
+ type: "function",
+ function: { name: "select_choice", arguments: '{"choice":"2"}' },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 292,
+ completion_tokens: 6,
+ total_tokens: 298,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+];
+
+export const openaiClassifierShouldEvaluateArithmeticExpressions = [
+ {
+ id: "chatcmpl-B7YSMVJ7qaQTJ9OtR6zPUEdHxrNbT",
+ object: "chat.completion",
+ created: 1741140446,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_Iatq5uhNc05I95JHjM7v3N5Y",
+ type: "function",
+ function: {
+ name: "select_choice",
+ arguments:
+ '{"reasons":"1. The instruction is to add the numbers 1, 2, and 3.\\n2. The correct sum of these numbers is 1 + 2 + 3 = 6.\\n3. Response 1 provides the answer as 600, which is incorrect.\\n4. Response 2 provides the answer as 6, which is correct.\\n5. Since the task is to evaluate which response is better based on the correctness of the addition, Response 2 is better because it provides the correct sum.\\n6. Therefore, Response 1 is not better than Response 2.","choice":"No"}',
+ },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 248,
+ completion_tokens: 133,
+ total_tokens: 381,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+ {
+ id: "chatcmpl-B7YTPWIPOFpRcVOjEnU6s0kZXgPdB",
+ object: "chat.completion",
+ created: 1741140511,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_eYJIS5zb9S0qS3NW2XZ7HtPu",
+ type: "function",
+ function: {
+ name: "select_choice",
+ arguments:
+ '{"reasons":"1. The instruction in both cases is to add the numbers 1, 2, and 3.\\n2. The correct sum of these numbers is 1 + 2 + 3 = 6.\\n3. Response 1 provides the answer as 6, which is the correct sum of the numbers.\\n4. Response 2 provides the answer as 600, which is incorrect as it does not represent the sum of the numbers given in the instruction.\\n5. Since Response 1 correctly answers the instruction and Response 2 does not, Response 1 is objectively better than Response 2.\\n6. Therefore, based on the correctness of the responses, the first response is better than the second.","choice":"Yes"}',
+ },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 248,
+ completion_tokens: 157,
+ total_tokens: 405,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+ {
+ id: "chatcmpl-B7YU2qluNL0SenvL1zBiSzrka236n",
+ object: "chat.completion",
+ created: 1741140550,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_kfVuMD09ytJIQVocHTEBrYLW",
+ type: "function",
+ function: {
+ name: "select_choice",
+ arguments:
+ '{"reasons":"1. Both instructions are identical, asking to add the numbers 1, 2, and 3.\\n2. Both responses provide the correct sum of these numbers, which is 6.\\n3. There is no additional context, explanation, or formatting in either response that would differentiate them in terms of quality or clarity.\\n4. Since both responses are identical and correct, there is no basis to claim that one is better than the other.\\n5. Therefore, the first response is not better than the second; they are equally good.","choice":"No"}',
+ },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 248,
+ completion_tokens: 121,
+ total_tokens: 369,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+ {
+ id: "chatcmpl-B7YUTk3771FhLlXQNZPaobEC0d8R6",
+ object: "chat.completion",
+ created: 1741140577,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_lbRjfwrJVP8HgLupWflqoCBM",
+ type: "function",
+ function: { name: "select_choice", arguments: '{"choice":"No"}' },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 170,
+ completion_tokens: 6,
+ total_tokens: 176,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+ {
+ id: "chatcmpl-B7YUtrpit4RvQCeqfOcZme9L6pMAP",
+ object: "chat.completion",
+ created: 1741140603,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_d3YnOawL5qadUmE46hoKds6B",
+ type: "function",
+ function: {
+ name: "select_choice",
+ arguments: '{"choice":"Yes"}',
+ },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 170,
+ completion_tokens: 6,
+ total_tokens: 176,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+ {
+ id: "chatcmpl-B7YV8HHTm4hZU58Zp9gcjwp3MigEl",
+ object: "chat.completion",
+ created: 1741140618,
+ model: "gpt-4o-2024-08-06",
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: null,
+ tool_calls: [
+ {
+ id: "call_l3AonPTlmEhJ95fbq4M6J0sd",
+ type: "function",
+ function: { name: "select_choice", arguments: '{"choice":"No"}' },
+ },
+ ],
+ refusal: null,
+ },
+ logprobs: null,
+ finish_reason: "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: 170,
+ completion_tokens: 6,
+ total_tokens: 176,
+ prompt_tokens_details: { cached_tokens: 0, audio_tokens: 0 },
+ completion_tokens_details: {
+ reasoning_tokens: 0,
+ audio_tokens: 0,
+ accepted_prediction_tokens: 0,
+ rejected_prediction_tokens: 0,
+ },
+ },
+ service_tier: "default",
+ system_fingerprint: "fp_eb9dce56a8",
+ },
+];
diff --git a/js/llm.test.ts b/js/llm.test.ts
new file mode 100644
index 0000000..26fcf02
--- /dev/null
+++ b/js/llm.test.ts
@@ -0,0 +1,211 @@
+import { ChatCompletionMessageParam } from "openai/resources";
+import {
+ Battle,
+ LLMClassifierFromTemplate,
+ OpenAIClassifier,
+ buildClassificationTools,
+} from "../js/llm";
+import { bypass, http, HttpResponse } from "msw";
+import { server } from "./test/setup";
+import { OpenAI } from "openai";
+import { init } from "./oai";
+import {
+ openaiClassifierShouldEvaluateArithmeticExpressions,
+ openaiClassifierShouldEvaluateTitles,
+ openaiClassifierShouldEvaluateTitlesWithCoT,
+} from "./llm.fixtures";
+
+beforeAll(() => {
+ init({
+ client: new OpenAI({
+ apiKey: "test-api-key",
+ baseURL: "https://api.openai.com/v1",
+ }),
+ });
+});
+
+afterAll(() => {
+ init();
+});
+
+describe("LLM Tests", () => {
+ test("openai classifier should evaluate titles", async () => {
+ let callCount = -1;
+ server.use(
+ http.post("https://api.openai.com/v1/chat/completions", async () => {
+ const response = openaiClassifierShouldEvaluateTitles[++callCount];
+ return response
+ ? HttpResponse.json(response)
+ : HttpResponse.json({}, { status: 500 });
+ }),
+ );
+
+ const messages: ChatCompletionMessageParam[] = [
+ {
+ role: "system",
+ content: `You are a technical project manager who helps software engineers generate better titles for their GitHub issues.
+You will look at the issue description, and pick which of two titles better describes it.`,
+ },
+ {
+ role: "user",
+ content: `I'm going to provide you with the issue description, and two possible titles.
+
+Issue Description: {{page_content}}
+
+1: {{output}}
+2: {{expected}}
+
+Please discuss each title briefly (one line for pros, one for cons), and then answer the question by calling
+the select_choice function with "1" or "2".`,
+ },
+ ];
+
+ const page_content = `As suggested by Nicolo, we should standardize the error responses coming from GoTrue, postgres, and realtime (and any other/future APIs) so that it's better DX when writing a client,
+
+We can make this change on the servers themselves, but since postgrest and gotrue are fully/partially external may be harder to change, it might be an option to transform the errors within the client libraries/supabase-js, could be messy?
+
+Nicolo also dropped this as a reference: http://spec.openapis.org/oas/v3.0.3#openapi-specification`;
+
+ const output = `Standardize error responses from GoTrue, Postgres, and Realtime APIs for better DX`;
+ const expected = `Good title`;
+
+ const score = await OpenAIClassifier({
+ name: "titles",
+ output,
+ expected,
+ messages,
+ model: "gpt-3.5-turbo",
+ parseScoreFn: (grade: string) => grade.match(/Winner: (\d+)/)![1],
+ choiceScores: { "1": 1, "2": 0 },
+ classificationTools: buildClassificationTools(true, ["1", "2"]),
+ page_content,
+ maxTokens: 500,
+ openAiApiKey: "test-api-key",
+ });
+
+ expect(score.error).toBeUndefined();
+ });
+
+ test("llm classifier should evaluate with and without chain of thought", async () => {
+ let callCount = -1;
+ server.use(
+ http.post(
+ "https://api.openai.com/v1/chat/completions",
+ async ({ request }) => {
+ const response =
+ openaiClassifierShouldEvaluateTitlesWithCoT[++callCount];
+
+ if (!response) {
+ const res = await fetch(bypass(request));
+ const body = await res.json();
+ return HttpResponse.json(body, {
+ status: res.status,
+ headers: res.headers,
+ });
+ }
+
+ return response
+ ? HttpResponse.json(response)
+ : HttpResponse.json({}, { status: 500 });
+ },
+ ),
+ );
+
+ const pageContent = `As suggested by Nicolo, we should standardize the error responses coming from GoTrue, postgres, and realtime (and any other/future APIs) so that it's better DX when writing a client,
+
+We can make this change on the servers themselves, but since postgrest and gotrue are fully/partially external may be harder to change, it might be an option to transform the errors within the client libraries/supabase-js, could be messy?
+
+Nicolo also dropped this as a reference: http://spec.openapis.org/oas/v3.0.3#openapi-specification`;
+ const genTitle = `Standardize error responses from GoTrue, Postgres, and Realtime APIs for better DX`;
+ const originalTitle = `Good title`;
+
+ for (const useCoT of [true, false]) {
+ const classifier = LLMClassifierFromTemplate<{ page_content: string }>({
+ name: "titles",
+ promptTemplate: `You are a technical project manager who helps software engineers generate better titles for their GitHub issues.
+You will look at the issue description, and pick which of two titles better describes it.
+
+I'm going to provide you with the issue description, and two possible titles.
+
+Issue Description: {{page_content}}
+
+1: {{output}}
+2: {{expected}}`,
+ choiceScores: { "1": 1, "2": 0 },
+ useCoT,
+ });
+
+ let response = await classifier({
+ output: genTitle,
+ expected: originalTitle,
+ page_content: pageContent,
+ openAiApiKey: "test-api-key",
+ });
+
+ expect(response.error).toBeUndefined();
+
+ response = await classifier({
+ output: originalTitle,
+ expected: genTitle,
+ page_content: pageContent,
+ openAiApiKey: "test-api-key",
+ });
+
+ expect(response.error).toBeUndefined();
+ }
+ });
+
+ test("battle should evaluate arithmetic expressions", async () => {
+ let callCount = -1;
+ server.use(
+ http.post("https://api.openai.com/v1/chat/completions", async () => {
+ const response =
+ openaiClassifierShouldEvaluateArithmeticExpressions[++callCount];
+
+ return response
+ ? HttpResponse.json(response)
+ : HttpResponse.json({}, { status: 500 });
+ }),
+ );
+
+ // reset the client to test direct client usage
+ init();
+
+ const client = new OpenAI({
+ apiKey: "test-api-key",
+ baseURL: "https://api.openai.com/v1",
+ });
+
+ for (const useCoT of [true, false]) {
+ let response = await Battle({
+ useCoT,
+ instructions: "Add the following numbers: 1, 2, 3",
+ output: "600",
+ expected: "6",
+ client,
+ });
+
+ expect(response.error).toBeUndefined();
+
+ response = await Battle({
+ useCoT,
+ instructions: "Add the following numbers: 1, 2, 3",
+ output: "6",
+ expected: "600",
+ client,
+ });
+
+ expect(response.error).toBeUndefined();
+
+ response = await Battle({
+ useCoT,
+ instructions: "Add the following numbers: 1, 2, 3",
+ output: "6",
+ expected: "6",
+ client,
+ });
+
+ expect(response.error).toBeUndefined();
+ }
+ });
+});
diff --git a/js/llm.ts b/js/llm.ts
index 2941f2f..584cca1 100644
--- a/js/llm.ts
+++ b/js/llm.ts
@@ -95,6 +95,7 @@ export async function OpenAIClassifier(
openAiDefaultHeaders,
openAiDangerouslyAllowBrowser,
azureOpenAi,
+ client,
...remaining
} = args;
@@ -135,15 +136,17 @@ export async function OpenAIClassifier(
},
...extraArgs,
},
- {
- cache,
- openAiApiKey,
- openAiOrganizationId,
- openAiBaseUrl,
- openAiDefaultHeaders,
- openAiDangerouslyAllowBrowser,
- azureOpenAi,
- },
+ client
+ ? { client }
+ : {
+ cache,
+ openAiApiKey,
+ openAiOrganizationId,
+ openAiBaseUrl,
+ openAiDefaultHeaders,
+ openAiDangerouslyAllowBrowser,
+ azureOpenAi,
+ },
);
if (resp.choices.length > 0) {
diff --git a/js/oai.test.ts b/js/oai.test.ts
index 0f8379f..9a067cd 100644
--- a/js/oai.test.ts
+++ b/js/oai.test.ts
@@ -1,28 +1,255 @@
-import { buildOpenAIClient } from "./oai";
+import { buildOpenAIClient, init } from "./oai";
+import { http, HttpResponse } from "msw";
+import { server } from "./test/setup";
+import OpenAI from "openai";
-describe.skip("OAI", () => {
+const MOCK_OPENAI_COMPLETION_RESPONSE = {
+ choices: [
+ {
+ message: {
+ content: "Hello, I am a mock response!",
+ role: "assistant",
+ },
+ finish_reason: "stop",
+ index: 0,
+ },
+ ],
+ created: Date.now(),
+ id: "mock-id",
+ model: "mock-model",
+ object: "chat.completion",
+ usage: {
+ completion_tokens: 9,
+ prompt_tokens: 5,
+ total_tokens: 14,
+ },
+};
+
+describe("OAI", () => {
test("should use Azure OpenAI", async () => {
- /*
- * You can plug in your own valid Azure OpenAI info
- * to make sure it works.
- */
+ server.use(
+ http.post(
+ "https://*.openai.azure.com/openai/deployments/*/chat/completions*",
+ () => {
+ return HttpResponse.json(MOCK_OPENAI_COMPLETION_RESPONSE);
+ },
+ ),
+ );
+
const client = buildOpenAIClient({
azureOpenAi: {
- apiKey: "",
- endpoint: "https://.openai.azure.com/",
- apiVersion: "",
+ apiKey: "test-api-key",
+ endpoint: "https://test-resource.openai.azure.com",
+ apiVersion: "2024-02-15-preview",
},
});
- const {
- choices: [
- {
- message: { content },
- },
- ],
- } = await client.chat.completions.create({
- model: "",
+
+ const response = await client.chat.completions.create({
+ model: "test-model",
messages: [{ role: "system", content: "Hello" }],
});
- expect(content).toBeTruthy();
+
+ expect(response.choices[0].message.content).toBe(
+ "Hello, I am a mock response!",
+ );
+ expect(response.choices).toHaveLength(1);
+ });
+
+ test("should use regular OpenAI", async () => {
+ server.use(
+ http.post("https://api.openai.com/v1/chat/completions", () => {
+ return HttpResponse.json(MOCK_OPENAI_COMPLETION_RESPONSE);
+ }),
+ );
+
+ const client = buildOpenAIClient({
+ openAiApiKey: "test-api-key",
+ openAiBaseUrl: "https://api.openai.com/v1",
+ });
+
+ const response = await client.chat.completions.create({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "Hello" }],
+ });
+
+ expect(response.choices[0].message.content).toBe(
+ "Hello, I am a mock response!",
+ );
+ });
+
+ test("calls proxy if everything unset", async () => {
+ server.use(
+ http.post("https://api.braintrust.dev/v1/proxy/chat/completions", () => {
+ return HttpResponse.json(MOCK_OPENAI_COMPLETION_RESPONSE);
+ }),
+ );
+
+ const client = buildOpenAIClient({});
+ const response = await client.chat.completions.create({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "Hello" }],
+ });
+
+ expect(response.choices[0].message.content).toBe(
+ "Hello, I am a mock response!",
+ );
+ });
+
+ test("default wraps", async () => {
+ server.use(
+ http.post("https://api.braintrust.dev/v1/proxy/chat/completions", () => {
+ return HttpResponse.json(MOCK_OPENAI_COMPLETION_RESPONSE);
+ }),
+ );
+
+ await withMockWrapper(async ({ createSpy }) => {
+ const client = buildOpenAIClient({});
+
+ await client.chat.completions.create({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "Hello" }],
+ });
+
+ expect(createSpy).toHaveBeenCalledTimes(1);
+ expect(createSpy).toHaveBeenCalledWith({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "Hello" }],
+ });
+ });
+ });
+
+ test("wraps once", async () => {
+ server.use(
+ http.post("https://api.braintrust.dev/v1/proxy/chat/completions", () => {
+ return HttpResponse.json(MOCK_OPENAI_COMPLETION_RESPONSE);
+ }),
+ );
+
+ await withMockWrapper(async ({ wrapperMock, createSpy }) => {
+ const client = wrapperMock(
+ new OpenAI({
+ apiKey: "test-api-key",
+ }),
+ );
+ const builtClient = buildOpenAIClient({ client });
+
+ expect(builtClient).toBe(client);
+
+ await builtClient.chat.completions.create({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "Hello" }],
+ });
+
+ expect(createSpy).toHaveBeenCalledTimes(1);
+ expect(createSpy).toHaveBeenCalledWith({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "Hello" }],
+ });
+ });
+ });
+
+ test("wraps client, if possible", async () => {
+ server.use(
+ http.post("https://api.openai.com/v1/chat/completions", () => {
+ return HttpResponse.json(MOCK_OPENAI_COMPLETION_RESPONSE);
+ }),
+ );
+
+ await withMockWrapper(async ({ wrapperMock, createSpy }) => {
+ const client = new OpenAI({ apiKey: "test-api-key" });
+ const builtClient = buildOpenAIClient({ client });
+
+ await builtClient.chat.completions.create({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "Hello" }],
+ });
+
+ expect(createSpy).toHaveBeenCalledTimes(1);
+ expect(createSpy).toHaveBeenCalledWith({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "Hello" }],
+ });
+ });
+ });
+
+ test("init sets client", async () => {
+ server.use(
+ http.post("https://api.openai.com/v1/chat/completions", () => {
+ return HttpResponse.json(MOCK_OPENAI_COMPLETION_RESPONSE);
+ }),
+ );
+
+ const client = new OpenAI({ apiKey: "test-api-key" });
+
+ init({ client });
+
+ const builtClient = buildOpenAIClient({});
+
+ expect(Object.is(builtClient, client)).toBe(true);
+ });
+
+ test("client wins against init", async () => {
+ server.use(
+ http.post("https://api.openai.com/v1/chat/completions", () => {
+ return HttpResponse.json(MOCK_OPENAI_COMPLETION_RESPONSE);
+ }),
+ );
+
+ const client = new OpenAI({ apiKey: "test-api-key" });
+
+ init({ client });
+
+ const otherClient = new OpenAI({ apiKey: "other-api-key" });
+
+ const builtClient = buildOpenAIClient({ client: otherClient });
+
+ expect(Object.is(builtClient, otherClient)).toBe(true);
});
});
+
+const withMockWrapper = async (
+ fn: (args: {
+ wrapperMock: (client: any) => any;
+ createSpy: jest.Mock;
+ }) => Promise,
+) => {
+ const createSpy = jest.fn();
+ const wrapperMock = (client: any) => {
+ return new Proxy(client, {
+ get(target, prop) {
+ if (prop === "chat") {
+ return new Proxy(
+ {},
+ {
+ get(target, prop) {
+ if (prop === "completions") {
+ return new Proxy(
+ {},
+ {
+ get(target, prop) {
+ if (prop === "create") {
+ return createSpy;
+ }
+ return Reflect.get(target, prop);
+ },
+ },
+ );
+ }
+ return Reflect.get(target, prop);
+ },
+ },
+ );
+ }
+ return Reflect.get(target, prop);
+ },
+ });
+ };
+
+ const originalWrapper = globalThis.__inherited_braintrust_wrap_openai;
+ try {
+ globalThis.__inherited_braintrust_wrap_openai = wrapperMock;
+ await fn({ wrapperMock, createSpy });
+ } finally {
+ globalThis.__inherited_braintrust_wrap_openai = originalWrapper;
+ }
+};
diff --git a/js/oai.ts b/js/oai.ts
index fb0558f..8deb870 100644
--- a/js/oai.ts
+++ b/js/oai.ts
@@ -5,6 +5,7 @@ import {
ChatCompletionToolChoiceOption,
} from "openai/resources";
import { AzureOpenAI, OpenAI } from "openai";
+import { types } from "node:util";
import { Env } from "./env";
@@ -29,18 +30,37 @@ export interface ChatCache {
set(params: CachedLLMParams, response: ChatCompletion): Promise;
}
-export interface OpenAIAuth {
- openAiApiKey?: string;
- openAiOrganizationId?: string;
- openAiBaseUrl?: string;
- openAiDefaultHeaders?: Record;
- openAiDangerouslyAllowBrowser?: boolean;
- /**
- If present, use [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/openai/)
- instead of OpenAI.
- */
- azureOpenAi?: AzureOpenAiAuth;
-}
+export type OpenAIAuth =
+ | {
+ /** @deprecated Use the `client` option instead */
+ openAiApiKey?: string;
+ /** @deprecated Use the `client` option instead */
+ openAiOrganizationId?: string;
+ /** @deprecated Use the `client` option instead */
+ openAiBaseUrl?: string;
+ /** @deprecated Use the `client` option instead */
+ openAiDefaultHeaders?: Record;
+ /** @deprecated Use the `client` option instead */
+ openAiDangerouslyAllowBrowser?: boolean;
+ /** @deprecated Use the `client` option instead */
+ azureOpenAi?: AzureOpenAiAuth;
+ client?: never;
+ }
+ | {
+ client: OpenAI;
+ /** @deprecated Use the `client` option instead */
+ openAiApiKey?: never;
+ /** @deprecated Use the `client` option instead */
+ openAiOrganizationId?: never;
+ /** @deprecated Use the `client` option instead */
+ openAiBaseUrl?: never;
+ /** @deprecated Use the `client` option instead */
+ openAiDefaultHeaders?: never;
+ /** @deprecated Use the `client` option instead */
+ openAiDangerouslyAllowBrowser?: never;
+ /** @deprecated Use the `client` option instead */
+ azureOpenAi?: never;
+ };
export interface AzureOpenAiAuth {
apiKey: string;
@@ -51,19 +71,21 @@ export interface AzureOpenAiAuth {
export function extractOpenAIArgs>(
args: OpenAIAuth & T,
): OpenAIAuth {
- return {
- openAiApiKey: args.openAiApiKey,
- openAiOrganizationId: args.openAiOrganizationId,
- openAiBaseUrl: args.openAiBaseUrl,
- openAiDefaultHeaders: args.openAiDefaultHeaders,
- openAiDangerouslyAllowBrowser: args.openAiDangerouslyAllowBrowser,
- azureOpenAi: args.azureOpenAi,
- };
+ return args.client
+ ? { client: args.client }
+ : {
+ openAiApiKey: args.openAiApiKey,
+ openAiOrganizationId: args.openAiOrganizationId,
+ openAiBaseUrl: args.openAiBaseUrl,
+ openAiDefaultHeaders: args.openAiDefaultHeaders,
+ openAiDangerouslyAllowBrowser: args.openAiDangerouslyAllowBrowser,
+ azureOpenAi: args.azureOpenAi,
+ };
}
const PROXY_URL = "https://api.braintrust.dev/v1/proxy";
-export function buildOpenAIClient(options: OpenAIAuth): OpenAI {
+const resolveOpenAIClient = (options: OpenAIAuth): OpenAI => {
const {
openAiApiKey,
openAiOrganizationId,
@@ -73,7 +95,15 @@ export function buildOpenAIClient(options: OpenAIAuth): OpenAI {
azureOpenAi,
} = options;
- const client = azureOpenAi
+ if (options.client) {
+ return options.client;
+ }
+
+ if (globalThis.__client) {
+ return globalThis.__client;
+ }
+
+ return azureOpenAi
? new AzureOpenAI({
apiKey: azureOpenAi.apiKey,
endpoint: azureOpenAi.endpoint,
@@ -88,19 +118,29 @@ export function buildOpenAIClient(options: OpenAIAuth): OpenAI {
defaultHeaders: openAiDefaultHeaders,
dangerouslyAllowBrowser: openAiDangerouslyAllowBrowser,
});
+};
+
+export function buildOpenAIClient(options: OpenAIAuth): OpenAI {
+ const client = resolveOpenAIClient(options);
- if (globalThis.__inherited_braintrust_wrap_openai) {
+ // avoid re-wrapping if the client is already wrapped (proxied)
+ if (globalThis.__inherited_braintrust_wrap_openai && !types.isProxy(client)) {
return globalThis.__inherited_braintrust_wrap_openai(client);
- } else {
- return client;
}
+
+ return client;
}
declare global {
/* eslint-disable no-var */
var __inherited_braintrust_wrap_openai: ((openai: any) => any) | undefined;
+ var __client: OpenAI | undefined;
}
+export const init = ({ client }: { client?: OpenAI } = {}) => {
+ globalThis.__client = client;
+};
+
export async function cachedChatCompletion(
params: CachedLLMParams,
options: { cache?: ChatCache } & OpenAIAuth,
diff --git a/js/ragas.ts b/js/ragas.ts
index 0bb3dd4..7b4e784 100644
--- a/js/ragas.ts
+++ b/js/ragas.ts
@@ -107,7 +107,6 @@ export const ContextEntityRecall: ScorerWithPartial<
const [expectedEntities, contextEntities] = responses.map(mustParseArgs);
const score = await ListContains({
- ...extractOpenAIArgs(args),
pairwiseScorer: args.pairwiseScorer ?? EmbeddingSimilarity,
allowExtraEntities: true,
output: entitySchema.parse(contextEntities).entities,
diff --git a/js/test/setup.ts b/js/test/setup.ts
new file mode 100644
index 0000000..3a28d00
--- /dev/null
+++ b/js/test/setup.ts
@@ -0,0 +1,17 @@
+import { setupServer } from "msw/node";
+
+export const server = setupServer();
+
+beforeAll(() => {
+ server.listen({
+ onUnhandledRequest: "error",
+ });
+});
+
+afterEach(() => {
+ server.resetHandlers();
+});
+
+afterAll(() => {
+ server.close();
+});
diff --git a/node/llm.test.ts b/node/llm.test.ts
deleted file mode 100644
index 7f6a4c5..0000000
--- a/node/llm.test.ts
+++ /dev/null
@@ -1,145 +0,0 @@
-import { ChatCompletionMessageParam } from "openai/resources";
-import {
- Battle,
- LLMClassifierFromTemplate,
- OpenAIClassifier,
- buildClassificationTools,
-} from "../js/llm";
-import { ChatCache } from "../js/oai";
-
-let cache: ChatCache | undefined;
-
-beforeAll(() => {
- cache = undefined;
-});
-
-test("openai", async () => {
- const parseBestTitle = (grade: string) => {
- return grade.match(/Winner: (\d+)/)![1];
- };
-
- const messages: ChatCompletionMessageParam[] = [
- {
- role: "system",
- content: `You are a technical project manager who helps software engineers generate better titles for their GitHub issues.
-You will look at the issue description, and pick which of two titles better describes it.`,
- },
- {
- role: "user",
- content: `I'm going to provide you with the issue description, and two possible titles.
-
-Issue Description: {{page_content}}
-
-1: {{output}}
-2: {{expected}}
-
-Please discuss each title briefly (one line for pros, one for cons), and then answer the question by calling
-the select_choice function with "1" or "2".`,
- },
- ];
-
- const page_content = `As suggested by Nicolo, we should standardize the error responses coming from GoTrue, postgres, and realtime (and any other/future APIs) so that it's better DX when writing a client,
-
-We can make this change on the servers themselves, but since postgrest and gotrue are fully/partially external may be harder to change, it might be an option to transform the errors within the client libraries/supabase-js, could be messy?
-
-Nicolo also dropped this as a reference: http://spec.openapis.org/oas/v3.0.3#openapi-specification`;
-
- const output = `Standardize error responses from GoTrue, Postgres, and Realtime APIs for better DX`;
- const expected = `Good title`;
- const score = await OpenAIClassifier({
- name: "titles",
- output,
- expected,
- messages,
- model: "gpt-3.5-turbo",
- parseScoreFn: parseBestTitle,
- choiceScores: { "1": 1, "2": 0 },
- classificationTools: buildClassificationTools(true, ["1", "2"]),
- page_content,
- maxTokens: 500,
- cache,
- openAiApiKey: process.env.OPENAI_API_KEY!,
- });
-
- expect(score.error).toBeUndefined();
-}, 600000);
-
-test("llm_classifier", async () => {
- const pageContent = `As suggested by Nicolo, we should standardize the error responses coming from GoTrue, postgres, and realtime (and any other/future APIs) so that it's better DX when writing a client,
-
-We can make this change on the servers themselves, but since postgrest and gotrue are fully/partially external may be harder to change, it might be an option to transform the errors within the client libraries/supabase-js, could be messy?
-
-Nicolo also dropped this as a reference: http://spec.openapis.org/oas/v3.0.3#openapi-specification`;
- const genTitle = `Standardize error responses from GoTrue, Postgres, and Realtime APIs for better DX`;
- const originalTitle = `Good title`;
-
- for (const useCoT of [true, false]) {
- const classifier = LLMClassifierFromTemplate<{ page_content: string }>({
- name: "titles",
- promptTemplate: `You are a technical project manager who helps software engineers generate better titles for their GitHub issues.
-You will look at the issue description, and pick which of two titles better describes it.
-
-I'm going to provide you with the issue description, and two possible titles.
-
-Issue Description: {{page_content}}
-
-1: {{output}}
-2: {{expected}}`,
- choiceScores: { "1": 1, "2": 0 },
- useCoT,
- });
-
- let response = await classifier({
- output: genTitle,
- expected: originalTitle,
- page_content: pageContent,
- openAiApiKey: process.env.OPENAI_API_KEY!,
- });
-
- expect(response.error).toBeUndefined();
-
- response = await classifier({
- output: originalTitle,
- expected: genTitle,
- page_content: pageContent,
- openAiApiKey: process.env.OPENAI_API_KEY!,
- });
-
- expect(response.error).toBeUndefined();
- }
-}, 600000);
-
-test("battle", async () => {
- for (const useCoT of [true, false]) {
- console.log("useCoT", useCoT);
- let response = await Battle({
- useCoT,
- instructions: "Add the following numbers: 1, 2, 3",
- output: "600",
- expected: "6",
- openAiApiKey: process.env.OPENAI_API_KEY!,
- });
-
- expect(response.error).toBeUndefined();
-
- response = await Battle({
- useCoT,
- instructions: "Add the following numbers: 1, 2, 3",
- output: "6",
- expected: "600",
- openAiApiKey: process.env.OPENAI_API_KEY!,
- });
-
- expect(response.error).toBeUndefined();
-
- response = await Battle({
- useCoT,
- instructions: "Add the following numbers: 1, 2, 3",
- output: "6",
- expected: "6",
- openAiApiKey: process.env.OPENAI_API_KEY!,
- });
-
- expect(response.error).toBeUndefined();
- }
-}, 600000);
diff --git a/package-lock.json b/package-lock.json
index cf55dd0..a3b688e 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -28,6 +28,7 @@
"@types/node": "^20.10.5",
"jest": "^29.7.0",
"jest-text-transformer": "^1.0.4",
+ "msw": "^2.7.3",
"ts-jest": "^29.1.1",
"tsup": "^8.0.1",
"tsx": "^3.14.0",
@@ -650,6 +651,34 @@
"integrity": "sha512-jAaT2+GGB0OOoO7SI+S7C+i1kf/FkwfUo0bQuBLqtp37R7xK4KrEWawYN5751LsUxrssp9zXVtznqwzDrL5bhg==",
"license": "MIT"
},
+ "node_modules/@bundled-es-modules/cookie": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/@bundled-es-modules/cookie/-/cookie-2.0.1.tgz",
+ "integrity": "sha512-8o+5fRPLNbjbdGRRmJj3h6Hh1AQJf2dk3qQ/5ZFb+PXkRNiSoMGGUKlsgLfrxneb72axVJyIYji64E2+nNfYyw==",
+ "dev": true,
+ "dependencies": {
+ "cookie": "^0.7.2"
+ }
+ },
+ "node_modules/@bundled-es-modules/statuses": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@bundled-es-modules/statuses/-/statuses-1.0.1.tgz",
+ "integrity": "sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg==",
+ "dev": true,
+ "dependencies": {
+ "statuses": "^2.0.1"
+ }
+ },
+ "node_modules/@bundled-es-modules/tough-cookie": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/@bundled-es-modules/tough-cookie/-/tough-cookie-0.1.6.tgz",
+ "integrity": "sha512-dvMHbL464C0zI+Yqxbz6kZ5TOEp7GLW+pry/RWndAR8MJQAXZ2rPmIs8tziTZjeIyhSNZgZbCePtfSbdWqStJw==",
+ "dev": true,
+ "dependencies": {
+ "@types/tough-cookie": "^4.0.5",
+ "tough-cookie": "^4.1.4"
+ }
+ },
"node_modules/@esbuild/aix-ppc64": {
"version": "0.23.1",
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.23.1.tgz",
@@ -1058,6 +1087,106 @@
"node": ">=18"
}
},
+ "node_modules/@inquirer/confirm": {
+ "version": "5.1.6",
+ "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.6.tgz",
+ "integrity": "sha512-6ZXYK3M1XmaVBZX6FCfChgtponnL0R6I7k8Nu+kaoNkT828FVZTcca1MqmWQipaW2oNREQl5AaPCUOOCVNdRMw==",
+ "dev": true,
+ "dependencies": {
+ "@inquirer/core": "^10.1.7",
+ "@inquirer/type": "^3.0.4"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@types/node": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@inquirer/core": {
+ "version": "10.1.7",
+ "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.1.7.tgz",
+ "integrity": "sha512-AA9CQhlrt6ZgiSy6qoAigiA1izOa751ugX6ioSjqgJ+/Gd+tEN/TORk5sUYNjXuHWfW0r1n/a6ak4u/NqHHrtA==",
+ "dev": true,
+ "dependencies": {
+ "@inquirer/figures": "^1.0.10",
+ "@inquirer/type": "^3.0.4",
+ "ansi-escapes": "^4.3.2",
+ "cli-width": "^4.1.0",
+ "mute-stream": "^2.0.0",
+ "signal-exit": "^4.1.0",
+ "wrap-ansi": "^6.2.0",
+ "yoctocolors-cjs": "^2.1.2"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@types/node": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@inquirer/core/node_modules/signal-exit": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
+ "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "dev": true,
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/@inquirer/core/node_modules/wrap-ansi": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz",
+ "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@inquirer/figures": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.10.tgz",
+ "integrity": "sha512-Ey6176gZmeqZuY/W/nZiUyvmb1/qInjcpiZjXWi6nON+nxJpD1bxtSoBxNliGISae32n6OwbY+TSXPZ1CfS4bw==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@inquirer/type": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.4.tgz",
+ "integrity": "sha512-2MNFrDY8jkFYc9Il9DgLsHhMzuHnOYM1+CUYVWbzu9oT0hC7V7EcYvdCKeoll/Fcci04A+ERZ9wcc7cQ8lTkIA==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@types/node": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ }
+ }
+ },
"node_modules/@isaacs/cliui": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
@@ -1557,6 +1686,23 @@
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
+ "node_modules/@mswjs/interceptors": {
+ "version": "0.37.6",
+ "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.37.6.tgz",
+ "integrity": "sha512-wK+5pLK5XFmgtH3aQ2YVvA3HohS3xqV/OxuVOdNx9Wpnz7VE/fnC+e1A7ln6LFYeck7gOJ/dsZV6OLplOtAJ2w==",
+ "dev": true,
+ "dependencies": {
+ "@open-draft/deferred-promise": "^2.2.0",
+ "@open-draft/logger": "^0.3.0",
+ "@open-draft/until": "^2.0.0",
+ "is-node-process": "^1.2.0",
+ "outvariant": "^1.4.3",
+ "strict-event-emitter": "^0.5.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
@@ -1595,6 +1741,28 @@
"node": ">= 8"
}
},
+ "node_modules/@open-draft/deferred-promise": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz",
+ "integrity": "sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==",
+ "dev": true
+ },
+ "node_modules/@open-draft/logger": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/@open-draft/logger/-/logger-0.3.0.tgz",
+ "integrity": "sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ==",
+ "dev": true,
+ "dependencies": {
+ "is-node-process": "^1.2.0",
+ "outvariant": "^1.4.0"
+ }
+ },
+ "node_modules/@open-draft/until": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@open-draft/until/-/until-2.1.0.tgz",
+ "integrity": "sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==",
+ "dev": true
+ },
"node_modules/@pkgjs/parseargs": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
@@ -1902,6 +2070,12 @@
"@babel/types": "^7.20.7"
}
},
+ "node_modules/@types/cookie": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz",
+ "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==",
+ "dev": true
+ },
"node_modules/@types/estree": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz",
@@ -2004,6 +2178,18 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/@types/statuses": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@types/statuses/-/statuses-2.0.5.tgz",
+ "integrity": "sha512-jmIUGWrAiwu3dZpxntxieC+1n/5c3mjrImkmOSQ2NC5uP6cYO4aAZDdSmRcI5C1oiTmqlZGHC+/NmJrKogbP5A==",
+ "dev": true
+ },
+ "node_modules/@types/tough-cookie": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.5.tgz",
+ "integrity": "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==",
+ "dev": true
+ },
"node_modules/@types/yargs": {
"version": "17.0.33",
"resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz",
@@ -2537,6 +2723,15 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/cli-width": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz",
+ "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 12"
+ }
+ },
"node_modules/cliui": {
"version": "8.0.1",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
@@ -2665,6 +2860,15 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/cookie": {
+ "version": "0.7.2",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
+ "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
"node_modules/create-jest": {
"version": "29.7.0",
"resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz",
@@ -3341,6 +3545,15 @@
"dev": true,
"license": "ISC"
},
+ "node_modules/graphql": {
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.10.0.tgz",
+ "integrity": "sha512-AjqGKbDGUFRKIRCP9tCKiIGHyriz2oHEbPIbEtcSLSs4YjReZOIPQQWek4+6hjw62H9QShXHyaGivGiYVLeYFQ==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
+ }
+ },
"node_modules/handlebars": {
"version": "4.7.8",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
@@ -3386,6 +3599,12 @@
"node": ">= 0.4"
}
},
+ "node_modules/headers-polyfill": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-4.0.3.tgz",
+ "integrity": "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==",
+ "dev": true
+ },
"node_modules/html-escaper": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
@@ -3565,6 +3784,12 @@
"node": ">=0.10.0"
}
},
+ "node_modules/is-node-process": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/is-node-process/-/is-node-process-1.2.0.tgz",
+ "integrity": "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==",
+ "dev": true
+ },
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
@@ -4734,6 +4959,62 @@
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
"license": "MIT"
},
+ "node_modules/msw": {
+ "version": "2.7.3",
+ "resolved": "https://registry.npmjs.org/msw/-/msw-2.7.3.tgz",
+ "integrity": "sha512-+mycXv8l2fEAjFZ5sjrtjJDmm2ceKGjrNbBr1durRg6VkU9fNUE/gsmQ51hWbHqs+l35W1iM+ZsmOD9Fd6lspw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "dependencies": {
+ "@bundled-es-modules/cookie": "^2.0.1",
+ "@bundled-es-modules/statuses": "^1.0.1",
+ "@bundled-es-modules/tough-cookie": "^0.1.6",
+ "@inquirer/confirm": "^5.0.0",
+ "@mswjs/interceptors": "^0.37.0",
+ "@open-draft/deferred-promise": "^2.2.0",
+ "@open-draft/until": "^2.1.0",
+ "@types/cookie": "^0.6.0",
+ "@types/statuses": "^2.0.4",
+ "graphql": "^16.8.1",
+ "headers-polyfill": "^4.0.2",
+ "is-node-process": "^1.2.0",
+ "outvariant": "^1.4.3",
+ "path-to-regexp": "^6.3.0",
+ "picocolors": "^1.1.1",
+ "strict-event-emitter": "^0.5.1",
+ "type-fest": "^4.26.1",
+ "yargs": "^17.7.2"
+ },
+ "bin": {
+ "msw": "cli/index.js"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/mswjs"
+ },
+ "peerDependencies": {
+ "typescript": ">= 4.8.x"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/msw/node_modules/type-fest": {
+ "version": "4.37.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.37.0.tgz",
+ "integrity": "sha512-S/5/0kFftkq27FPNye0XM1e2NsnoD/3FS+pBmbjmmtLT6I+i344KoOf7pvXreaFsDamWeaJX55nczA1m5PsBDg==",
+ "dev": true,
+ "engines": {
+ "node": ">=16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/mustache": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz",
@@ -4743,6 +5024,15 @@
"mustache": "bin/mustache"
}
},
+ "node_modules/mute-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-2.0.0.tgz",
+ "integrity": "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==",
+ "dev": true,
+ "engines": {
+ "node": "^18.17.0 || >=20.5.0"
+ }
+ },
"node_modules/mz": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
@@ -4755,6 +5045,26 @@
"thenify-all": "^1.0.0"
}
},
+ "node_modules/nanoid": {
+ "version": "3.3.8",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz",
+ "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "optional": true,
+ "peer": true,
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
"node_modules/natural-compare": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
@@ -4915,6 +5225,12 @@
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
+ "node_modules/outvariant": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.3.tgz",
+ "integrity": "sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA==",
+ "dev": true
+ },
"node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
@@ -5057,6 +5373,12 @@
"dev": true,
"license": "ISC"
},
+ "node_modules/path-to-regexp": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz",
+ "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==",
+ "dev": true
+ },
"node_modules/path-type": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
@@ -5068,11 +5390,10 @@
}
},
"node_modules/picocolors": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz",
- "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==",
- "dev": true,
- "license": "ISC"
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true
},
"node_modules/picomatch": {
"version": "2.3.1",
@@ -5110,6 +5431,36 @@
"node": ">=8"
}
},
+ "node_modules/postcss": {
+ "version": "8.5.3",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz",
+ "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "optional": true,
+ "peer": true,
+ "dependencies": {
+ "nanoid": "^3.3.8",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
"node_modules/pretty-format": {
"version": "29.7.0",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",
@@ -5152,6 +5503,18 @@
"node": ">= 6"
}
},
+ "node_modules/psl": {
+ "version": "1.15.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz",
+ "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==",
+ "dev": true,
+ "dependencies": {
+ "punycode": "^2.3.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/lupomontero"
+ }
+ },
"node_modules/punycode": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
@@ -5179,6 +5542,12 @@
],
"license": "MIT"
},
+ "node_modules/querystringify": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz",
+ "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==",
+ "dev": true
+ },
"node_modules/queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
@@ -5239,6 +5608,12 @@
"node": ">=0.10.0"
}
},
+ "node_modules/requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==",
+ "dev": true
+ },
"node_modules/resolve": {
"version": "1.22.8",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
@@ -5451,6 +5826,17 @@
"node": ">=0.10.0"
}
},
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "dev": true,
+ "optional": true,
+ "peer": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/source-map-support": {
"version": "0.5.13",
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz",
@@ -5491,6 +5877,21 @@
"node": ">=10"
}
},
+ "node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/strict-event-emitter": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.5.1.tgz",
+ "integrity": "sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==",
+ "dev": true
+ },
"node_modules/string-length": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",
@@ -5760,6 +6161,21 @@
"node": ">=8.0"
}
},
+ "node_modules/tough-cookie": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz",
+ "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==",
+ "dev": true,
+ "dependencies": {
+ "psl": "^1.1.33",
+ "punycode": "^2.1.1",
+ "universalify": "^0.2.0",
+ "url-parse": "^1.5.3"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
@@ -6563,6 +6979,15 @@
"integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==",
"license": "MIT"
},
+ "node_modules/universalify": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz",
+ "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
"node_modules/update-browserslist-db": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz",
@@ -6594,6 +7019,16 @@
"browserslist": ">= 4.21.0"
}
},
+ "node_modules/url-parse": {
+ "version": "1.5.10",
+ "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz",
+ "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==",
+ "dev": true,
+ "dependencies": {
+ "querystringify": "^2.1.1",
+ "requires-port": "^1.0.0"
+ }
+ },
"node_modules/uuid": {
"version": "3.4.0",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
@@ -6778,6 +7213,20 @@
"dev": true,
"license": "ISC"
},
+ "node_modules/yaml": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.7.0.tgz",
+ "integrity": "sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==",
+ "dev": true,
+ "optional": true,
+ "peer": true,
+ "bin": {
+ "yaml": "bin.mjs"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
"node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
@@ -6820,22 +7269,32 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/yoctocolors-cjs": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz",
+ "integrity": "sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/zod": {
- "version": "3.23.8",
- "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz",
- "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==",
- "license": "MIT",
+ "version": "3.24.2",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz",
+ "integrity": "sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/zod-to-json-schema": {
- "version": "3.23.2",
- "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.23.2.tgz",
- "integrity": "sha512-uSt90Gzc/tUfyNqxnjlfBs8W6WSGpNBv0rVsNxP/BVSMHMKGdthPYff4xtCHYloJGM0CFxFsb3NbC0eqPhfImw==",
- "license": "ISC",
+ "version": "3.24.3",
+ "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.3.tgz",
+ "integrity": "sha512-HIAfWdYIt1sssHfYZFCXp4rU1w2r8hVVXYIlmoa0r0gABLs5di3RCqPU5DDROogVz1pAdYBaz7HK5n9pSUNs3A==",
"peerDependencies": {
- "zod": "^3.23.3"
+ "zod": "^3.24.1"
}
}
}
diff --git a/package.json b/package.json
index 39cd08d..b241b2c 100644
--- a/package.json
+++ b/package.json
@@ -40,6 +40,7 @@
"@types/node": "^20.10.5",
"jest": "^29.7.0",
"jest-text-transformer": "^1.0.4",
+ "msw": "^2.7.3",
"ts-jest": "^29.1.1",
"tsup": "^8.4.0",
"tsx": "^3.14.0",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index abd90c6..312c348 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -12,7 +12,7 @@ importers:
version: 0.0.8
ajv:
specifier: ^8.13.0
- version: 8.16.0
+ version: 8.17.1
compute-cosine-similarity:
specifier: ^1.1.0
version: 1.1.0
@@ -33,14 +33,14 @@ importers:
version: 4.47.1
zod:
specifier: ^3.22.4
- version: 3.23.8
+ version: 3.24.2
zod-to-json-schema:
specifier: ^3.22.5
- version: 3.23.1(zod@3.23.8)
+ version: 3.24.4(zod@3.24.2)
devDependencies:
"@types/jest":
specifier: ^29.5.11
- version: 29.5.12
+ version: 29.5.14
"@types/js-levenshtein":
specifier: ^1.1.3
version: 1.1.3
@@ -52,31 +52,34 @@ importers:
version: 4.2.5
"@types/node":
specifier: ^20.10.5
- version: 20.14.10
+ version: 20.17.24
jest:
specifier: ^29.7.0
- version: 29.7.0(@types/node@20.14.10)
+ version: 29.7.0(@types/node@20.17.24)
jest-text-transformer:
specifier: ^1.0.4
version: 1.0.4
+ msw:
+ specifier: ^2.7.3
+ version: 2.7.3(@types/node@20.17.24)(typescript@5.8.2)
ts-jest:
specifier: ^29.1.1
- version: 29.1.5(@babel/core@7.24.7)(esbuild@0.25.1)(jest@29.7.0)(typescript@5.5.3)
+ version: 29.2.6(@babel/core@7.26.10)(esbuild@0.25.1)(jest@29.7.0)(typescript@5.8.2)
tsup:
specifier: ^8.4.0
- version: 8.4.0(tsx@3.14.0)(typescript@5.5.3)
+ version: 8.4.0(tsx@3.14.0)(typescript@5.8.2)
tsx:
specifier: ^3.14.0
version: 3.14.0
typedoc:
specifier: ^0.25.4
- version: 0.25.13(typescript@5.5.3)
+ version: 0.25.13(typescript@5.8.2)
typedoc-plugin-markdown:
specifier: ^3.17.1
version: 3.17.1(typedoc@0.25.13)
typescript:
specifier: ^5.3.3
- version: 5.5.3
+ version: 5.8.2
evals:
dependencies:
@@ -88,14 +91,14 @@ importers:
version: 0.0.140
zod:
specifier: ^3.22.4
- version: 3.23.8
+ version: 3.24.2
devDependencies:
"@types/node":
specifier: ^20.10.5
- version: 20.14.10
+ version: 20.17.24
duckdb:
specifier: ^1.0.0
- version: 1.0.0
+ version: 1.2.0
tsx:
specifier: ^3.14.0
version: 3.14.0
@@ -122,7 +125,7 @@ packages:
"@jridgewell/trace-mapping": 0.3.25
dev: true
- /@asteasolutions/zod-to-openapi@6.4.0(zod@3.23.8):
+ /@asteasolutions/zod-to-openapi@6.4.0(zod@3.24.2):
resolution:
{
integrity: sha512-8cxfF7AHHx2PqnN4Cd8/O8CBu/nVYJP9DpnfVLW3BFb66VJDnqI/CczZnkqMc3SNh6J9GiX7JbJ5T4BSP4HZ2Q==,
@@ -130,46 +133,47 @@ packages:
peerDependencies:
zod: ^3.20.2
dependencies:
- openapi3-ts: 4.3.3
- zod: 3.23.8
+ openapi3-ts: 4.4.0
+ zod: 3.24.2
dev: false
- /@babel/code-frame@7.24.7:
+ /@babel/code-frame@7.26.2:
resolution:
{
- integrity: sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==,
+ integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==,
}
engines: { node: ">=6.9.0" }
dependencies:
- "@babel/highlight": 7.24.7
+ "@babel/helper-validator-identifier": 7.25.9
+ js-tokens: 4.0.0
picocolors: 1.1.1
dev: true
- /@babel/compat-data@7.24.7:
+ /@babel/compat-data@7.26.8:
resolution:
{
- integrity: sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==,
+ integrity: sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ==,
}
engines: { node: ">=6.9.0" }
dev: true
- /@babel/core@7.24.7:
+ /@babel/core@7.26.10:
resolution:
{
- integrity: sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==,
+ integrity: sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ==,
}
engines: { node: ">=6.9.0" }
dependencies:
"@ampproject/remapping": 2.3.0
- "@babel/code-frame": 7.24.7
- "@babel/generator": 7.24.7
- "@babel/helper-compilation-targets": 7.24.7
- "@babel/helper-module-transforms": 7.24.7(@babel/core@7.24.7)
- "@babel/helpers": 7.24.7
- "@babel/parser": 7.24.7
- "@babel/template": 7.24.7
- "@babel/traverse": 7.24.7
- "@babel/types": 7.24.7
+ "@babel/code-frame": 7.26.2
+ "@babel/generator": 7.26.10
+ "@babel/helper-compilation-targets": 7.26.5
+ "@babel/helper-module-transforms": 7.26.0(@babel/core@7.26.10)
+ "@babel/helpers": 7.26.10
+ "@babel/parser": 7.26.10
+ "@babel/template": 7.26.9
+ "@babel/traverse": 7.26.10
+ "@babel/types": 7.26.10
convert-source-map: 2.0.0
debug: 4.4.0
gensync: 1.0.0-beta.2
@@ -179,223 +183,181 @@ packages:
- supports-color
dev: true
- /@babel/generator@7.24.7:
+ /@babel/generator@7.26.10:
resolution:
{
- integrity: sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==,
+ integrity: sha512-rRHT8siFIXQrAYOYqZQVsAr8vJ+cBNqcVAY6m5V8/4QqzaPl+zDBe6cLEPRDuNOUf3ww8RfJVlOyQMoSI+5Ang==,
}
engines: { node: ">=6.9.0" }
dependencies:
- "@babel/types": 7.24.7
+ "@babel/parser": 7.26.10
+ "@babel/types": 7.26.10
"@jridgewell/gen-mapping": 0.3.8
"@jridgewell/trace-mapping": 0.3.25
- jsesc: 2.5.2
+ jsesc: 3.1.0
dev: true
- /@babel/helper-compilation-targets@7.24.7:
+ /@babel/helper-compilation-targets@7.26.5:
resolution:
{
- integrity: sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==,
+ integrity: sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA==,
}
engines: { node: ">=6.9.0" }
dependencies:
- "@babel/compat-data": 7.24.7
- "@babel/helper-validator-option": 7.24.7
- browserslist: 4.23.1
+ "@babel/compat-data": 7.26.8
+ "@babel/helper-validator-option": 7.25.9
+ browserslist: 4.24.4
lru-cache: 5.1.1
semver: 6.3.1
dev: true
- /@babel/helper-environment-visitor@7.24.7:
- resolution:
- {
- integrity: sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==,
- }
- engines: { node: ">=6.9.0" }
- dependencies:
- "@babel/types": 7.24.7
- dev: true
-
- /@babel/helper-function-name@7.24.7:
- resolution:
- {
- integrity: sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==,
- }
- engines: { node: ">=6.9.0" }
- dependencies:
- "@babel/template": 7.24.7
- "@babel/types": 7.24.7
- dev: true
-
- /@babel/helper-hoist-variables@7.24.7:
- resolution:
- {
- integrity: sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==,
- }
- engines: { node: ">=6.9.0" }
- dependencies:
- "@babel/types": 7.24.7
- dev: true
-
- /@babel/helper-module-imports@7.24.7:
+ /@babel/helper-module-imports@7.25.9:
resolution:
{
- integrity: sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==,
+ integrity: sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==,
}
engines: { node: ">=6.9.0" }
dependencies:
- "@babel/traverse": 7.24.7
- "@babel/types": 7.24.7
+ "@babel/traverse": 7.26.10
+ "@babel/types": 7.26.10
transitivePeerDependencies:
- supports-color
dev: true
- /@babel/helper-module-transforms@7.24.7(@babel/core@7.24.7):
+ /@babel/helper-module-transforms@7.26.0(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==,
+ integrity: sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==,
}
engines: { node: ">=6.9.0" }
peerDependencies:
"@babel/core": ^7.0.0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-environment-visitor": 7.24.7
- "@babel/helper-module-imports": 7.24.7
- "@babel/helper-simple-access": 7.24.7
- "@babel/helper-split-export-declaration": 7.24.7
- "@babel/helper-validator-identifier": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-module-imports": 7.25.9
+ "@babel/helper-validator-identifier": 7.25.9
+ "@babel/traverse": 7.26.10
transitivePeerDependencies:
- supports-color
dev: true
- /@babel/helper-plugin-utils@7.24.7:
+ /@babel/helper-plugin-utils@7.26.5:
resolution:
{
- integrity: sha512-Rq76wjt7yz9AAc1KnlRKNAi/dMSVWgDRx43FHoJEbcYU6xOWaE2dVPwcdTukJrjxS65GITyfbvEYHvkirZ6uEg==,
+ integrity: sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==,
}
engines: { node: ">=6.9.0" }
dev: true
- /@babel/helper-simple-access@7.24.7:
+ /@babel/helper-string-parser@7.25.9:
resolution:
{
- integrity: sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==,
+ integrity: sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==,
}
engines: { node: ">=6.9.0" }
- dependencies:
- "@babel/traverse": 7.24.7
- "@babel/types": 7.24.7
- transitivePeerDependencies:
- - supports-color
dev: true
- /@babel/helper-split-export-declaration@7.24.7:
+ /@babel/helper-validator-identifier@7.25.9:
resolution:
{
- integrity: sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==,
+ integrity: sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==,
}
engines: { node: ">=6.9.0" }
- dependencies:
- "@babel/types": 7.24.7
dev: true
- /@babel/helper-string-parser@7.24.7:
+ /@babel/helper-validator-option@7.25.9:
resolution:
{
- integrity: sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==,
+ integrity: sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==,
}
engines: { node: ">=6.9.0" }
dev: true
- /@babel/helper-validator-identifier@7.24.7:
+ /@babel/helpers@7.26.10:
resolution:
{
- integrity: sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==,
- }
- engines: { node: ">=6.9.0" }
- dev: true
-
- /@babel/helper-validator-option@7.24.7:
- resolution:
- {
- integrity: sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==,
+ integrity: sha512-UPYc3SauzZ3JGgj87GgZ89JVdC5dj0AoetR5Bw6wj4niittNyFh6+eOGonYvJ1ao6B8lEa3Q3klS7ADZ53bc5g==,
}
engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/template": 7.26.9
+ "@babel/types": 7.26.10
dev: true
- /@babel/helpers@7.24.7:
+ /@babel/parser@7.26.10:
resolution:
{
- integrity: sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==,
+ integrity: sha512-6aQR2zGE/QFi8JpDLjUZEPYOs7+mhKXm86VaKFiLP35JQwQb6bwUE+XbvkH0EptsYhbNBSUGaUBLKqxH1xSgsA==,
}
- engines: { node: ">=6.9.0" }
+ engines: { node: ">=6.0.0" }
+ hasBin: true
dependencies:
- "@babel/template": 7.24.7
- "@babel/types": 7.24.7
+ "@babel/types": 7.26.10
dev: true
- /@babel/highlight@7.24.7:
+ /@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==,
+ integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==,
}
- engines: { node: ">=6.9.0" }
+ peerDependencies:
+ "@babel/core": ^7.0.0-0
dependencies:
- "@babel/helper-validator-identifier": 7.24.7
- chalk: 2.4.2
- js-tokens: 4.0.0
- picocolors: 1.1.1
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/parser@7.24.7:
+ /@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==,
+ integrity: sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==,
}
- engines: { node: ">=6.0.0" }
- hasBin: true
+ peerDependencies:
+ "@babel/core": ^7.0.0-0
dependencies:
- "@babel/types": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.24.7):
+ /@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==,
+ integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==,
}
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.24.7):
+ /@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==,
+ integrity: sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==,
}
+ engines: { node: ">=6.9.0" }
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.24.7):
+ /@babel/plugin-syntax-import-attributes@7.26.0(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==,
+ integrity: sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==,
}
+ engines: { node: ">=6.9.0" }
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.24.7):
+ /@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.26.10):
resolution:
{
integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==,
@@ -403,11 +365,11 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.24.7):
+ /@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.26.10):
resolution:
{
integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==,
@@ -415,24 +377,24 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-jsx@7.24.7(@babel/core@7.24.7):
+ /@babel/plugin-syntax-jsx@7.25.9(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==,
+ integrity: sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==,
}
engines: { node: ">=6.9.0" }
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.24.7):
+ /@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.26.10):
resolution:
{
integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==,
@@ -440,11 +402,11 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.24.7):
+ /@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.26.10):
resolution:
{
integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==,
@@ -452,11 +414,11 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.24.7):
+ /@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.26.10):
resolution:
{
integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==,
@@ -464,11 +426,11 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.24.7):
+ /@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.26.10):
resolution:
{
integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==,
@@ -476,11 +438,11 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.24.7):
+ /@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.26.10):
resolution:
{
integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==,
@@ -488,11 +450,11 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.24.7):
+ /@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.26.10):
resolution:
{
integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==,
@@ -500,11 +462,24 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.24.7):
+ /@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.26.10):
+ resolution:
+ {
+ integrity: sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==,
+ }
+ engines: { node: ">=6.9.0" }
+ peerDependencies:
+ "@babel/core": ^7.0.0-0
+ dependencies:
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
+ dev: true
+
+ /@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.26.10):
resolution:
{
integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==,
@@ -513,66 +488,62 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/plugin-syntax-typescript@7.24.7(@babel/core@7.24.7):
+ /@babel/plugin-syntax-typescript@7.25.9(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-c/+fVeJBB0FeKsFvwytYiUD+LBvhHjGSI0g446PRGdSVGZLRNArBUno2PETbAly3tpiNAQR5XaZ+JslxkotsbA==,
+ integrity: sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==,
}
engines: { node: ">=6.9.0" }
peerDependencies:
"@babel/core": ^7.0.0-0
dependencies:
- "@babel/core": 7.24.7
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/helper-plugin-utils": 7.26.5
dev: true
- /@babel/template@7.24.7:
+ /@babel/template@7.26.9:
resolution:
{
- integrity: sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==,
+ integrity: sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==,
}
engines: { node: ">=6.9.0" }
dependencies:
- "@babel/code-frame": 7.24.7
- "@babel/parser": 7.24.7
- "@babel/types": 7.24.7
+ "@babel/code-frame": 7.26.2
+ "@babel/parser": 7.26.10
+ "@babel/types": 7.26.10
dev: true
- /@babel/traverse@7.24.7:
+ /@babel/traverse@7.26.10:
resolution:
{
- integrity: sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==,
+ integrity: sha512-k8NuDrxr0WrPH5Aupqb2LCVURP/S0vBEn5mK6iH+GIYob66U5EtoZvcdudR2jQ4cmTwhEwW1DLB+Yyas9zjF6A==,
}
engines: { node: ">=6.9.0" }
dependencies:
- "@babel/code-frame": 7.24.7
- "@babel/generator": 7.24.7
- "@babel/helper-environment-visitor": 7.24.7
- "@babel/helper-function-name": 7.24.7
- "@babel/helper-hoist-variables": 7.24.7
- "@babel/helper-split-export-declaration": 7.24.7
- "@babel/parser": 7.24.7
- "@babel/types": 7.24.7
+ "@babel/code-frame": 7.26.2
+ "@babel/generator": 7.26.10
+ "@babel/parser": 7.26.10
+ "@babel/template": 7.26.9
+ "@babel/types": 7.26.10
debug: 4.4.0
globals: 11.12.0
transitivePeerDependencies:
- supports-color
dev: true
- /@babel/types@7.24.7:
+ /@babel/types@7.26.10:
resolution:
{
- integrity: sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==,
+ integrity: sha512-emqcG3vHrpxUKTrxcblR36dcrcoRDvKmnL/dCL6ZsHaShW80qxCAcNhzQZrpeM765VzEos+xOi4s+r4IXzTwdQ==,
}
engines: { node: ">=6.9.0" }
dependencies:
- "@babel/helper-string-parser": 7.24.7
- "@babel/helper-validator-identifier": 7.24.7
- to-fast-properties: 2.0.0
+ "@babel/helper-string-parser": 7.25.9
+ "@babel/helper-validator-identifier": 7.25.9
dev: true
/@bcoe/v8-coverage@0.2.3:
@@ -588,9 +559,9 @@ packages:
integrity: sha512-5aA7A4i9TCt3lr6u/ogpRyZztghVEOuoTnP6nHoUaqvVo9AQHPgh2FarxsVB6yYnbWoV28o5AizO/kZseE8aBA==,
}
dependencies:
- "@asteasolutions/zod-to-openapi": 6.4.0(zod@3.23.8)
+ "@asteasolutions/zod-to-openapi": 6.4.0(zod@3.24.2)
uuid: 9.0.1
- zod: 3.23.8
+ zod: 3.24.2
dev: false
/@braintrust/core@0.0.8:
@@ -600,6 +571,34 @@ packages:
}
dev: false
+ /@bundled-es-modules/cookie@2.0.1:
+ resolution:
+ {
+ integrity: sha512-8o+5fRPLNbjbdGRRmJj3h6Hh1AQJf2dk3qQ/5ZFb+PXkRNiSoMGGUKlsgLfrxneb72axVJyIYji64E2+nNfYyw==,
+ }
+ dependencies:
+ cookie: 0.7.2
+ dev: true
+
+ /@bundled-es-modules/statuses@1.0.1:
+ resolution:
+ {
+ integrity: sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg==,
+ }
+ dependencies:
+ statuses: 2.0.1
+ dev: true
+
+ /@bundled-es-modules/tough-cookie@0.1.6:
+ resolution:
+ {
+ integrity: sha512-dvMHbL464C0zI+Yqxbz6kZ5TOEp7GLW+pry/RWndAR8MJQAXZ2rPmIs8tziTZjeIyhSNZgZbCePtfSbdWqStJw==,
+ }
+ dependencies:
+ "@types/tough-cookie": 4.0.5
+ tough-cookie: 4.1.4
+ dev: true
+
/@esbuild/aix-ppc64@0.25.1:
resolution:
{
@@ -1149,6 +1148,69 @@ packages:
}
dev: true
+ /@inquirer/confirm@5.1.8(@types/node@20.17.24):
+ resolution:
+ {
+ integrity: sha512-dNLWCYZvXDjO3rnQfk2iuJNL4Ivwz/T2+C3+WnNfJKsNGSuOs3wAo2F6e0p946gtSAk31nZMfW+MRmYaplPKsg==,
+ }
+ engines: { node: ">=18" }
+ peerDependencies:
+ "@types/node": ">=18"
+ peerDependenciesMeta:
+ "@types/node":
+ optional: true
+ dependencies:
+ "@inquirer/core": 10.1.9(@types/node@20.17.24)
+ "@inquirer/type": 3.0.5(@types/node@20.17.24)
+ "@types/node": 20.17.24
+ dev: true
+
+ /@inquirer/core@10.1.9(@types/node@20.17.24):
+ resolution:
+ {
+ integrity: sha512-sXhVB8n20NYkUBfDYgizGHlpRVaCRjtuzNZA6xpALIUbkgfd2Hjz+DfEN6+h1BRnuxw0/P4jCIMjMsEOAMwAJw==,
+ }
+ engines: { node: ">=18" }
+ peerDependencies:
+ "@types/node": ">=18"
+ peerDependenciesMeta:
+ "@types/node":
+ optional: true
+ dependencies:
+ "@inquirer/figures": 1.0.11
+ "@inquirer/type": 3.0.5(@types/node@20.17.24)
+ "@types/node": 20.17.24
+ ansi-escapes: 4.3.2
+ cli-width: 4.1.0
+ mute-stream: 2.0.0
+ signal-exit: 4.1.0
+ wrap-ansi: 6.2.0
+ yoctocolors-cjs: 2.1.2
+ dev: true
+
+ /@inquirer/figures@1.0.11:
+ resolution:
+ {
+ integrity: sha512-eOg92lvrn/aRUqbxRyvpEWnrvRuTYRifixHkYVpJiygTgVSBIHDqLh0SrMQXkafvULg3ck11V7xvR+zcgvpHFw==,
+ }
+ engines: { node: ">=18" }
+ dev: true
+
+ /@inquirer/type@3.0.5(@types/node@20.17.24):
+ resolution:
+ {
+ integrity: sha512-ZJpeIYYueOz/i/ONzrfof8g89kNdO2hjGuvULROo3O8rlB2CRtSseE5KeirnyE4t/thAn/EwvS/vuQeJCn+NZg==,
+ }
+ engines: { node: ">=18" }
+ peerDependencies:
+ "@types/node": ">=18"
+ peerDependenciesMeta:
+ "@types/node":
+ optional: true
+ dependencies:
+ "@types/node": 20.17.24
+ dev: true
+
/@isaacs/cliui@8.0.2:
resolution:
{
@@ -1164,6 +1226,16 @@ packages:
wrap-ansi-cjs: /wrap-ansi@7.0.0
dev: true
+ /@isaacs/fs-minipass@4.0.1:
+ resolution:
+ {
+ integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==,
+ }
+ engines: { node: ">=18.0.0" }
+ dependencies:
+ minipass: 7.1.2
+ dev: true
+
/@istanbuljs/load-nyc-config@1.1.0:
resolution:
{
@@ -1194,7 +1266,7 @@ packages:
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dependencies:
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
chalk: 4.1.2
jest-message-util: 29.7.0
jest-util: 29.7.0
@@ -1218,14 +1290,14 @@ packages:
"@jest/test-result": 29.7.0
"@jest/transform": 29.7.0
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
ansi-escapes: 4.3.2
chalk: 4.1.2
ci-info: 3.9.0
exit: 0.1.2
graceful-fs: 4.2.11
jest-changed-files: 29.7.0
- jest-config: 29.7.0(@types/node@20.14.10)
+ jest-config: 29.7.0(@types/node@20.17.24)
jest-haste-map: 29.7.0
jest-message-util: 29.7.0
jest-regex-util: 29.6.3
@@ -1237,7 +1309,7 @@ packages:
jest-util: 29.7.0
jest-validate: 29.7.0
jest-watcher: 29.7.0
- micromatch: 4.0.7
+ micromatch: 4.0.8
pretty-format: 29.7.0
slash: 3.0.0
strip-ansi: 6.0.1
@@ -1256,7 +1328,7 @@ packages:
dependencies:
"@jest/fake-timers": 29.7.0
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
jest-mock: 29.7.0
dev: true
@@ -1292,7 +1364,7 @@ packages:
dependencies:
"@jest/types": 29.6.3
"@sinonjs/fake-timers": 10.3.0
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
jest-message-util: 29.7.0
jest-mock: 29.7.0
jest-util: 29.7.0
@@ -1331,7 +1403,7 @@ packages:
"@jest/transform": 29.7.0
"@jest/types": 29.6.3
"@jridgewell/trace-mapping": 0.3.25
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
chalk: 4.1.2
collect-v8-coverage: 1.0.2
exit: 0.1.2
@@ -1408,7 +1480,7 @@ packages:
}
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dependencies:
- "@babel/core": 7.24.7
+ "@babel/core": 7.26.10
"@jest/types": 29.6.3
"@jridgewell/trace-mapping": 0.3.25
babel-plugin-istanbul: 6.1.1
@@ -1419,7 +1491,7 @@ packages:
jest-haste-map: 29.7.0
jest-regex-util: 29.6.3
jest-util: 29.7.0
- micromatch: 4.0.7
+ micromatch: 4.0.8
pirates: 4.0.6
slash: 3.0.0
write-file-atomic: 4.0.2
@@ -1437,8 +1509,8 @@ packages:
"@jest/schemas": 29.6.3
"@types/istanbul-lib-coverage": 2.0.6
"@types/istanbul-reports": 3.0.4
- "@types/node": 20.14.10
- "@types/yargs": 17.0.32
+ "@types/node": 20.17.24
+ "@types/yargs": 17.0.33
chalk: 4.1.2
dev: true
@@ -1505,31 +1577,45 @@ packages:
}
dev: false
- /@mapbox/node-pre-gyp@1.0.11:
+ /@mapbox/node-pre-gyp@2.0.0:
resolution:
{
- integrity: sha512-Yhlar6v9WQgUp/He7BdgzOz8lqMQ8sU+jkCq7Wx8Myc5YFJLbEe7lgui/V7G1qB1DJykHSGwreceSaD60Y0PUQ==,
+ integrity: sha512-llMXd39jtP0HpQLVI37Bf1m2ADlEb35GYSh1SDSLsBhR+5iCxiNGlT31yqbNtVHygHAtMy6dWFERpU2JgufhPg==,
}
+ engines: { node: ">=18" }
hasBin: true
dependencies:
+ consola: 3.4.1
detect-libc: 2.0.3
- https-proxy-agent: 5.0.1
- make-dir: 3.1.0
+ https-proxy-agent: 7.0.6
node-fetch: 2.7.0
- nopt: 5.0.0
- npmlog: 5.0.1
- rimraf: 3.0.2
- semver: 7.6.2
- tar: 6.2.1
+ nopt: 8.1.0
+ semver: 7.7.1
+ tar: 7.4.3
transitivePeerDependencies:
- encoding
- supports-color
dev: true
- /@next/env@14.2.4:
+ /@mswjs/interceptors@0.37.6:
+ resolution:
+ {
+ integrity: sha512-wK+5pLK5XFmgtH3aQ2YVvA3HohS3xqV/OxuVOdNx9Wpnz7VE/fnC+e1A7ln6LFYeck7gOJ/dsZV6OLplOtAJ2w==,
+ }
+ engines: { node: ">=18" }
+ dependencies:
+ "@open-draft/deferred-promise": 2.2.0
+ "@open-draft/logger": 0.3.0
+ "@open-draft/until": 2.1.0
+ is-node-process: 1.2.0
+ outvariant: 1.4.3
+ strict-event-emitter: 0.5.1
+ dev: true
+
+ /@next/env@14.2.24:
resolution:
{
- integrity: sha512-3EtkY5VDkuV2+lNmKlbkibIJxcO4oIHEhBWne6PaAp+76J9KoSsGvNikp6ivzAT8dhhBMYrm6op2pS1ApG0Hzg==,
+ integrity: sha512-LAm0Is2KHTNT6IT16lxT+suD0u+VVfYNQqM+EJTKuFRRuY2z+zj01kueWXPCxbMBDt0B5vONYzabHGUNbZYAhA==,
}
dev: false
@@ -1541,7 +1627,7 @@ packages:
engines: { node: ^12.13.0 || ^14.15.0 || >=16.0.0 }
dependencies:
"@gar/promisify": 1.1.3
- semver: 7.6.2
+ semver: 7.7.1
dev: true
/@npmcli/move-file@2.0.1:
@@ -1556,6 +1642,30 @@ packages:
rimraf: 3.0.2
dev: true
+ /@open-draft/deferred-promise@2.2.0:
+ resolution:
+ {
+ integrity: sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==,
+ }
+ dev: true
+
+ /@open-draft/logger@0.3.0:
+ resolution:
+ {
+ integrity: sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ==,
+ }
+ dependencies:
+ is-node-process: 1.2.0
+ outvariant: 1.4.3
+ dev: true
+
+ /@open-draft/until@2.1.0:
+ resolution:
+ {
+ integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==,
+ }
+ dev: true
+
/@pkgjs/parseargs@0.11.0:
resolution:
{
@@ -1566,10 +1676,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-android-arm-eabi@4.35.0:
+ /@rollup/rollup-android-arm-eabi@4.36.0:
resolution:
{
- integrity: sha512-uYQ2WfPaqz5QtVgMxfN6NpLD+no0MYHDBywl7itPYd3K5TjjSghNKmX8ic9S8NU8w81NVhJv/XojcHptRly7qQ==,
+ integrity: sha512-jgrXjjcEwN6XpZXL0HUeOVGfjXhPyxAbbhD0BlXUB+abTOpbPiN5Wb3kOT7yb+uEtATNYF5x5gIfwutmuBA26w==,
}
cpu: [arm]
os: [android]
@@ -1577,10 +1687,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-android-arm64@4.35.0:
+ /@rollup/rollup-android-arm64@4.36.0:
resolution:
{
- integrity: sha512-FtKddj9XZudurLhdJnBl9fl6BwCJ3ky8riCXjEw3/UIbjmIY58ppWwPEvU3fNu+W7FUsAsB1CdH+7EQE6CXAPA==,
+ integrity: sha512-NyfuLvdPdNUfUNeYKUwPwKsE5SXa2J6bCt2LdB/N+AxShnkpiczi3tcLJrm5mA+eqpy0HmaIY9F6XCa32N5yzg==,
}
cpu: [arm64]
os: [android]
@@ -1588,10 +1698,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-darwin-arm64@4.35.0:
+ /@rollup/rollup-darwin-arm64@4.36.0:
resolution:
{
- integrity: sha512-Uk+GjOJR6CY844/q6r5DR/6lkPFOw0hjfOIzVx22THJXMxktXG6CbejseJFznU8vHcEBLpiXKY3/6xc+cBm65Q==,
+ integrity: sha512-JQ1Jk5G4bGrD4pWJQzWsD8I1n1mgPXq33+/vP4sk8j/z/C2siRuxZtaUA7yMTf71TCZTZl/4e1bfzwUmFb3+rw==,
}
cpu: [arm64]
os: [darwin]
@@ -1599,10 +1709,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-darwin-x64@4.35.0:
+ /@rollup/rollup-darwin-x64@4.36.0:
resolution:
{
- integrity: sha512-3IrHjfAS6Vkp+5bISNQnPogRAW5GAV1n+bNCrDwXmfMHbPl5EhTmWtfmwlJxFRUCBZ+tZ/OxDyU08aF6NI/N5Q==,
+ integrity: sha512-6c6wMZa1lrtiRsbDziCmjE53YbTkxMYhhnWnSW8R/yqsM7a6mSJ3uAVT0t8Y/DGt7gxUWYuFM4bwWk9XCJrFKA==,
}
cpu: [x64]
os: [darwin]
@@ -1610,10 +1720,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-freebsd-arm64@4.35.0:
+ /@rollup/rollup-freebsd-arm64@4.36.0:
resolution:
{
- integrity: sha512-sxjoD/6F9cDLSELuLNnY0fOrM9WA0KrM0vWm57XhrIMf5FGiN8D0l7fn+bpUeBSU7dCgPV2oX4zHAsAXyHFGcQ==,
+ integrity: sha512-KXVsijKeJXOl8QzXTsA+sHVDsFOmMCdBRgFmBb+mfEb/7geR7+C8ypAml4fquUt14ZyVXaw2o1FWhqAfOvA4sg==,
}
cpu: [arm64]
os: [freebsd]
@@ -1621,10 +1731,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-freebsd-x64@4.35.0:
+ /@rollup/rollup-freebsd-x64@4.36.0:
resolution:
{
- integrity: sha512-2mpHCeRuD1u/2kruUiHSsnjWtHjqVbzhBkNVQ1aVD63CcexKVcQGwJ2g5VphOd84GvxfSvnnlEyBtQCE5hxVVw==,
+ integrity: sha512-dVeWq1ebbvByI+ndz4IJcD4a09RJgRYmLccwlQ8bPd4olz3Y213uf1iwvc7ZaxNn2ab7bjc08PrtBgMu6nb4pQ==,
}
cpu: [x64]
os: [freebsd]
@@ -1632,10 +1742,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-arm-gnueabihf@4.35.0:
+ /@rollup/rollup-linux-arm-gnueabihf@4.36.0:
resolution:
{
- integrity: sha512-mrA0v3QMy6ZSvEuLs0dMxcO2LnaCONs1Z73GUDBHWbY8tFFocM6yl7YyMu7rz4zS81NDSqhrUuolyZXGi8TEqg==,
+ integrity: sha512-bvXVU42mOVcF4le6XSjscdXjqx8okv4n5vmwgzcmtvFdifQ5U4dXFYaCB87namDRKlUL9ybVtLQ9ztnawaSzvg==,
}
cpu: [arm]
os: [linux]
@@ -1643,10 +1753,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-arm-musleabihf@4.35.0:
+ /@rollup/rollup-linux-arm-musleabihf@4.36.0:
resolution:
{
- integrity: sha512-DnYhhzcvTAKNexIql8pFajr0PiDGrIsBYPRvCKlA5ixSS3uwo/CWNZxB09jhIapEIg945KOzcYEAGGSmTSpk7A==,
+ integrity: sha512-JFIQrDJYrxOnyDQGYkqnNBtjDwTgbasdbUiQvcU8JmGDfValfH1lNpng+4FWlhaVIR4KPkeddYjsVVbmJYvDcg==,
}
cpu: [arm]
os: [linux]
@@ -1654,10 +1764,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-arm64-gnu@4.35.0:
+ /@rollup/rollup-linux-arm64-gnu@4.36.0:
resolution:
{
- integrity: sha512-uagpnH2M2g2b5iLsCTZ35CL1FgyuzzJQ8L9VtlJ+FckBXroTwNOaD0z0/UF+k5K3aNQjbm8LIVpxykUOQt1m/A==,
+ integrity: sha512-KqjYVh3oM1bj//5X7k79PSCZ6CvaVzb7Qs7VMWS+SlWB5M8p3FqufLP9VNp4CazJ0CsPDLwVD9r3vX7Ci4J56A==,
}
cpu: [arm64]
os: [linux]
@@ -1665,10 +1775,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-arm64-musl@4.35.0:
+ /@rollup/rollup-linux-arm64-musl@4.36.0:
resolution:
{
- integrity: sha512-XQxVOCd6VJeHQA/7YcqyV0/88N6ysSVzRjJ9I9UA/xXpEsjvAgDTgH3wQYz5bmr7SPtVK2TsP2fQ2N9L4ukoUg==,
+ integrity: sha512-QiGnhScND+mAAtfHqeT+cB1S9yFnNQ/EwCg5yE3MzoaZZnIV0RV9O5alJAoJKX/sBONVKeZdMfO8QSaWEygMhw==,
}
cpu: [arm64]
os: [linux]
@@ -1676,10 +1786,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-loongarch64-gnu@4.35.0:
+ /@rollup/rollup-linux-loongarch64-gnu@4.36.0:
resolution:
{
- integrity: sha512-5pMT5PzfgwcXEwOaSrqVsz/LvjDZt+vQ8RT/70yhPU06PTuq8WaHhfT1LW+cdD7mW6i/J5/XIkX/1tCAkh1W6g==,
+ integrity: sha512-1ZPyEDWF8phd4FQtTzMh8FQwqzvIjLsl6/84gzUxnMNFBtExBtpL51H67mV9xipuxl1AEAerRBgBwFNpkw8+Lg==,
}
cpu: [loong64]
os: [linux]
@@ -1687,10 +1797,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-powerpc64le-gnu@4.35.0:
+ /@rollup/rollup-linux-powerpc64le-gnu@4.36.0:
resolution:
{
- integrity: sha512-c+zkcvbhbXF98f4CtEIP1EBA/lCic5xB0lToneZYvMeKu5Kamq3O8gqrxiYYLzlZH6E3Aq+TSW86E4ay8iD8EA==,
+ integrity: sha512-VMPMEIUpPFKpPI9GZMhJrtu8rxnp6mJR3ZzQPykq4xc2GmdHj3Q4cA+7avMyegXy4n1v+Qynr9fR88BmyO74tg==,
}
cpu: [ppc64]
os: [linux]
@@ -1698,10 +1808,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-riscv64-gnu@4.35.0:
+ /@rollup/rollup-linux-riscv64-gnu@4.36.0:
resolution:
{
- integrity: sha512-s91fuAHdOwH/Tad2tzTtPX7UZyytHIRR6V4+2IGlV0Cej5rkG0R61SX4l4y9sh0JBibMiploZx3oHKPnQBKe4g==,
+ integrity: sha512-ttE6ayb/kHwNRJGYLpuAvB7SMtOeQnVXEIpMtAvx3kepFQeowVED0n1K9nAdraHUPJ5hydEMxBpIR7o4nrm8uA==,
}
cpu: [riscv64]
os: [linux]
@@ -1709,10 +1819,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-s390x-gnu@4.35.0:
+ /@rollup/rollup-linux-s390x-gnu@4.36.0:
resolution:
{
- integrity: sha512-hQRkPQPLYJZYGP+Hj4fR9dDBMIM7zrzJDWFEMPdTnTy95Ljnv0/4w/ixFw3pTBMEuuEuoqtBINYND4M7ujcuQw==,
+ integrity: sha512-4a5gf2jpS0AIe7uBjxDeUMNcFmaRTbNv7NxI5xOCs4lhzsVyGR/0qBXduPnoWf6dGC365saTiwag8hP1imTgag==,
}
cpu: [s390x]
os: [linux]
@@ -1720,10 +1830,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-x64-gnu@4.35.0:
+ /@rollup/rollup-linux-x64-gnu@4.36.0:
resolution:
{
- integrity: sha512-Pim1T8rXOri+0HmV4CdKSGrqcBWX0d1HoPnQ0uw0bdp1aP5SdQVNBy8LjYncvnLgu3fnnCt17xjWGd4cqh8/hA==,
+ integrity: sha512-5KtoW8UWmwFKQ96aQL3LlRXX16IMwyzMq/jSSVIIyAANiE1doaQsx/KRyhAvpHlPjPiSU/AYX/8m+lQ9VToxFQ==,
}
cpu: [x64]
os: [linux]
@@ -1731,10 +1841,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-linux-x64-musl@4.35.0:
+ /@rollup/rollup-linux-x64-musl@4.36.0:
resolution:
{
- integrity: sha512-QysqXzYiDvQWfUiTm8XmJNO2zm9yC9P/2Gkrwg2dH9cxotQzunBHYr6jk4SujCTqnfGxduOmQcI7c2ryuW8XVg==,
+ integrity: sha512-sycrYZPrv2ag4OCvaN5js+f01eoZ2U+RmT5as8vhxiFz+kxwlHrsxOwKPSA8WyS+Wc6Epid9QeI/IkQ9NkgYyQ==,
}
cpu: [x64]
os: [linux]
@@ -1742,10 +1852,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-win32-arm64-msvc@4.35.0:
+ /@rollup/rollup-win32-arm64-msvc@4.36.0:
resolution:
{
- integrity: sha512-OUOlGqPkVJCdJETKOCEf1mw848ZyJ5w50/rZ/3IBQVdLfR5jk/6Sr5m3iO2tdPgwo0x7VcncYuOvMhBWZq8ayg==,
+ integrity: sha512-qbqt4N7tokFwwSVlWDsjfoHgviS3n/vZ8LK0h1uLG9TYIRuUTJC88E1xb3LM2iqZ/WTqNQjYrtmtGmrmmawB6A==,
}
cpu: [arm64]
os: [win32]
@@ -1753,10 +1863,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-win32-ia32-msvc@4.35.0:
+ /@rollup/rollup-win32-ia32-msvc@4.36.0:
resolution:
{
- integrity: sha512-2/lsgejMrtwQe44glq7AFFHLfJBPafpsTa6JvP2NGef/ifOa4KBoglVf7AKN7EV9o32evBPRqfg96fEHzWo5kw==,
+ integrity: sha512-t+RY0JuRamIocMuQcfwYSOkmdX9dtkr1PbhKW42AMvaDQa+jOdpUYysroTF/nuPpAaQMWp7ye+ndlmmthieJrQ==,
}
cpu: [ia32]
os: [win32]
@@ -1764,10 +1874,10 @@ packages:
dev: true
optional: true
- /@rollup/rollup-win32-x64-msvc@4.35.0:
+ /@rollup/rollup-win32-x64-msvc@4.36.0:
resolution:
{
- integrity: sha512-PIQeY5XDkrOysbQblSW7v3l1MDZzkTEzAfTPkj5VAu3FW8fS4ynyLg2sINp0fp3SjZ8xkRYpLqoKcYqAkhU1dw==,
+ integrity: sha512-aRXd7tRZkWLqGbChgcMMDEHjOKudo1kChb1Jt1IfR8cY/KIpgNviLeJy5FUb9IpSuQj8dU2fAYNMPW/hLKOSTw==,
}
cpu: [x64]
os: [win32]
@@ -1814,8 +1924,8 @@ packages:
integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==,
}
dependencies:
- "@babel/parser": 7.24.7
- "@babel/types": 7.24.7
+ "@babel/parser": 7.26.10
+ "@babel/types": 7.26.10
"@types/babel__generator": 7.6.8
"@types/babel__template": 7.4.4
"@types/babel__traverse": 7.20.6
@@ -1827,7 +1937,7 @@ packages:
integrity: sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==,
}
dependencies:
- "@babel/types": 7.24.7
+ "@babel/types": 7.26.10
dev: true
/@types/babel__template@7.4.4:
@@ -1836,8 +1946,8 @@ packages:
integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==,
}
dependencies:
- "@babel/parser": 7.24.7
- "@babel/types": 7.24.7
+ "@babel/parser": 7.26.10
+ "@babel/types": 7.26.10
dev: true
/@types/babel__traverse@7.20.6:
@@ -1846,7 +1956,14 @@ packages:
integrity: sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==,
}
dependencies:
- "@babel/types": 7.24.7
+ "@babel/types": 7.26.10
+ dev: true
+
+ /@types/cookie@0.6.0:
+ resolution:
+ {
+ integrity: sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==,
+ }
dev: true
/@types/estree@1.0.6:
@@ -1862,7 +1979,7 @@ packages:
integrity: sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==,
}
dependencies:
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
dev: true
/@types/istanbul-lib-coverage@2.0.6:
@@ -1890,10 +2007,10 @@ packages:
"@types/istanbul-lib-report": 3.0.3
dev: true
- /@types/jest@29.5.12:
+ /@types/jest@29.5.14:
resolution:
{
- integrity: sha512-eDC8bTvT/QhYdxJAulQikueigY5AsdBRH2yDKW3yveW7svY3+DzN84/2NUgkw10RTiJbWqZrTtoGVdYlvFJdLw==,
+ integrity: sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==,
}
dependencies:
expect: 29.7.0
@@ -1921,32 +2038,32 @@ packages:
}
dev: true
- /@types/node-fetch@2.6.11:
+ /@types/node-fetch@2.6.12:
resolution:
{
- integrity: sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==,
+ integrity: sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==,
}
dependencies:
- "@types/node": 20.14.10
- form-data: 4.0.0
+ "@types/node": 20.17.24
+ form-data: 4.0.2
dev: false
- /@types/node@18.19.39:
+ /@types/node@18.19.80:
resolution:
{
- integrity: sha512-nPwTRDKUctxw3di5b4TfT3I0sWDiWoPQCZjXhvdkINntwr8lcoVCKsTgnXeRubKIlfnV+eN/HYk6Jb40tbcEAQ==,
+ integrity: sha512-kEWeMwMeIvxYkeg1gTc01awpwLbfMRZXdIhwRcakd/KlK53jmRC26LqcbIt7fnAQTu5GzlnWmzA3H6+l1u6xxQ==,
}
dependencies:
undici-types: 5.26.5
dev: false
- /@types/node@20.14.10:
+ /@types/node@20.17.24:
resolution:
{
- integrity: sha512-MdiXf+nDuMvY0gJKxyfZ7/6UFsETO7mGKF54MVD/ekJS6HdFtpZFBgrh6Pseu64XTb2MLyFPlbW6hj8HYRQNOQ==,
+ integrity: sha512-d7fGCyB96w9BnWQrOsJtpyiSaBcAYYr75bnK6ZRjDbql2cGLj/3GsL5OYmLPNq76l7Gf2q4Rv9J2o6h5CrD9sA==,
}
dependencies:
- undici-types: 5.26.5
+ undici-types: 6.19.8
/@types/stack-utils@2.0.3:
resolution:
@@ -1955,6 +2072,20 @@ packages:
}
dev: true
+ /@types/statuses@2.0.5:
+ resolution:
+ {
+ integrity: sha512-jmIUGWrAiwu3dZpxntxieC+1n/5c3mjrImkmOSQ2NC5uP6cYO4aAZDdSmRcI5C1oiTmqlZGHC+/NmJrKogbP5A==,
+ }
+ dev: true
+
+ /@types/tough-cookie@4.0.5:
+ resolution:
+ {
+ integrity: sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==,
+ }
+ dev: true
+
/@types/yargs-parser@21.0.3:
resolution:
{
@@ -1962,10 +2093,10 @@ packages:
}
dev: true
- /@types/yargs@17.0.32:
+ /@types/yargs@17.0.33:
resolution:
{
- integrity: sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==,
+ integrity: sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==,
}
dependencies:
"@types/yargs-parser": 21.0.3
@@ -1978,6 +2109,14 @@ packages:
}
dev: true
+ /abbrev@3.0.0:
+ resolution:
+ {
+ integrity: sha512-+/kfrslGQ7TNV2ecmQwMJj/B65g5KVq1/L3SGVZ3tCYGqlzFuFCGBZJtMP99wH3NpEUyAjn0zPdPUg0D+DwrOA==,
+ }
+ engines: { node: ^18.17.0 || >=20.5.0 }
+ dev: true
+
/abort-controller@3.0.0:
resolution:
{
@@ -2000,10 +2139,18 @@ packages:
- supports-color
dev: true
- /agentkeepalive@4.5.0:
+ /agent-base@7.1.3:
+ resolution:
+ {
+ integrity: sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==,
+ }
+ engines: { node: ">= 14" }
+ dev: true
+
+ /agentkeepalive@4.6.0:
resolution:
{
- integrity: sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==,
+ integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==,
}
engines: { node: ">= 8.0.0" }
dependencies:
@@ -2020,16 +2167,16 @@ packages:
indent-string: 4.0.0
dev: true
- /ajv@8.16.0:
+ /ajv@8.17.1:
resolution:
{
- integrity: sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==,
+ integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==,
}
dependencies:
fast-deep-equal: 3.1.3
+ fast-uri: 3.0.6
json-schema-traverse: 1.0.0
require-from-string: 2.0.2
- uri-js: 4.4.1
dev: false
/ansi-escapes@4.3.2:
@@ -2057,23 +2204,13 @@ packages:
engines: { node: ">=12" }
dev: true
- /ansi-sequence-parser@1.1.1:
+ /ansi-sequence-parser@1.1.3:
resolution:
{
- integrity: sha512-vJXt3yiaUL4UU546s3rPXlsry/RnM730G1+HkpKE012AN0sx1eOrxSu95oKDIonskeLTijMgqWZ3uDEe3NFvyg==,
+ integrity: sha512-+fksAx9eG3Ab6LDnLs3ZqZa8KVJ/jYnX+D4Qe1azX+LFGFAXqynCQLOdLpNYN/l9e7l6hMWwZbrnctqr6eSQSw==,
}
dev: true
- /ansi-styles@3.2.1:
- resolution:
- {
- integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==,
- }
- engines: { node: ">=4" }
- dependencies:
- color-convert: 1.9.3
- dev: true
-
/ansi-styles@4.3.0:
resolution:
{
@@ -2124,18 +2261,6 @@ packages:
}
dev: true
- /are-we-there-yet@2.0.0:
- resolution:
- {
- integrity: sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==,
- }
- engines: { node: ">=10" }
- deprecated: This package is no longer supported.
- dependencies:
- delegates: 1.0.0
- readable-stream: 3.6.2
- dev: true
-
/are-we-there-yet@3.0.1:
resolution:
{
@@ -2164,6 +2289,13 @@ packages:
}
dev: false
+ /async@3.2.6:
+ resolution:
+ {
+ integrity: sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==,
+ }
+ dev: true
+
/asynckit@0.4.0:
resolution:
{
@@ -2171,7 +2303,7 @@ packages:
}
dev: false
- /babel-jest@29.7.0(@babel/core@7.24.7):
+ /babel-jest@29.7.0(@babel/core@7.26.10):
resolution:
{
integrity: sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==,
@@ -2180,11 +2312,11 @@ packages:
peerDependencies:
"@babel/core": ^7.8.0
dependencies:
- "@babel/core": 7.24.7
+ "@babel/core": 7.26.10
"@jest/transform": 29.7.0
"@types/babel__core": 7.20.5
babel-plugin-istanbul: 6.1.1
- babel-preset-jest: 29.6.3(@babel/core@7.24.7)
+ babel-preset-jest: 29.6.3(@babel/core@7.26.10)
chalk: 4.1.2
graceful-fs: 4.2.11
slash: 3.0.0
@@ -2199,7 +2331,7 @@ packages:
}
engines: { node: ">=8" }
dependencies:
- "@babel/helper-plugin-utils": 7.24.7
+ "@babel/helper-plugin-utils": 7.26.5
"@istanbuljs/load-nyc-config": 1.1.0
"@istanbuljs/schema": 0.1.3
istanbul-lib-instrument: 5.2.1
@@ -2215,36 +2347,39 @@ packages:
}
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dependencies:
- "@babel/template": 7.24.7
- "@babel/types": 7.24.7
+ "@babel/template": 7.26.9
+ "@babel/types": 7.26.10
"@types/babel__core": 7.20.5
"@types/babel__traverse": 7.20.6
dev: true
- /babel-preset-current-node-syntax@1.0.1(@babel/core@7.24.7):
+ /babel-preset-current-node-syntax@1.1.0(@babel/core@7.26.10):
resolution:
{
- integrity: sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==,
+ integrity: sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==,
}
peerDependencies:
"@babel/core": ^7.0.0
dependencies:
- "@babel/core": 7.24.7
- "@babel/plugin-syntax-async-generators": 7.8.4(@babel/core@7.24.7)
- "@babel/plugin-syntax-bigint": 7.8.3(@babel/core@7.24.7)
- "@babel/plugin-syntax-class-properties": 7.12.13(@babel/core@7.24.7)
- "@babel/plugin-syntax-import-meta": 7.10.4(@babel/core@7.24.7)
- "@babel/plugin-syntax-json-strings": 7.8.3(@babel/core@7.24.7)
- "@babel/plugin-syntax-logical-assignment-operators": 7.10.4(@babel/core@7.24.7)
- "@babel/plugin-syntax-nullish-coalescing-operator": 7.8.3(@babel/core@7.24.7)
- "@babel/plugin-syntax-numeric-separator": 7.10.4(@babel/core@7.24.7)
- "@babel/plugin-syntax-object-rest-spread": 7.8.3(@babel/core@7.24.7)
- "@babel/plugin-syntax-optional-catch-binding": 7.8.3(@babel/core@7.24.7)
- "@babel/plugin-syntax-optional-chaining": 7.8.3(@babel/core@7.24.7)
- "@babel/plugin-syntax-top-level-await": 7.14.5(@babel/core@7.24.7)
+ "@babel/core": 7.26.10
+ "@babel/plugin-syntax-async-generators": 7.8.4(@babel/core@7.26.10)
+ "@babel/plugin-syntax-bigint": 7.8.3(@babel/core@7.26.10)
+ "@babel/plugin-syntax-class-properties": 7.12.13(@babel/core@7.26.10)
+ "@babel/plugin-syntax-class-static-block": 7.14.5(@babel/core@7.26.10)
+ "@babel/plugin-syntax-import-attributes": 7.26.0(@babel/core@7.26.10)
+ "@babel/plugin-syntax-import-meta": 7.10.4(@babel/core@7.26.10)
+ "@babel/plugin-syntax-json-strings": 7.8.3(@babel/core@7.26.10)
+ "@babel/plugin-syntax-logical-assignment-operators": 7.10.4(@babel/core@7.26.10)
+ "@babel/plugin-syntax-nullish-coalescing-operator": 7.8.3(@babel/core@7.26.10)
+ "@babel/plugin-syntax-numeric-separator": 7.10.4(@babel/core@7.26.10)
+ "@babel/plugin-syntax-object-rest-spread": 7.8.3(@babel/core@7.26.10)
+ "@babel/plugin-syntax-optional-catch-binding": 7.8.3(@babel/core@7.26.10)
+ "@babel/plugin-syntax-optional-chaining": 7.8.3(@babel/core@7.26.10)
+ "@babel/plugin-syntax-private-property-in-object": 7.14.5(@babel/core@7.26.10)
+ "@babel/plugin-syntax-top-level-await": 7.14.5(@babel/core@7.26.10)
dev: true
- /babel-preset-jest@29.6.3(@babel/core@7.24.7):
+ /babel-preset-jest@29.6.3(@babel/core@7.26.10):
resolution:
{
integrity: sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==,
@@ -2253,9 +2388,9 @@ packages:
peerDependencies:
"@babel/core": ^7.0.0
dependencies:
- "@babel/core": 7.24.7
+ "@babel/core": 7.26.10
babel-plugin-jest-hoist: 29.6.3
- babel-preset-current-node-syntax: 1.0.1(@babel/core@7.24.7)
+ babel-preset-current-node-syntax: 1.1.0(@babel/core@7.26.10)
dev: true
/balanced-match@1.0.2:
@@ -2308,35 +2443,35 @@ packages:
dependencies:
"@ai-sdk/provider": 0.0.6
"@braintrust/core": 0.0.44
- "@next/env": 14.2.4
+ "@next/env": 14.2.24
argparse: 2.0.1
chalk: 4.1.2
cli-progress: 3.12.0
- dotenv: 16.4.5
+ dotenv: 16.4.7
esbuild: 0.18.20
graceful-fs: 4.2.11
minimatch: 9.0.5
mustache: 4.2.0
pluralize: 8.0.0
- simple-git: 3.25.0
+ simple-git: 3.27.0
uuid: 9.0.1
- zod: 3.23.8
+ zod: 3.24.2
transitivePeerDependencies:
- supports-color
dev: false
- /browserslist@4.23.1:
+ /browserslist@4.24.4:
resolution:
{
- integrity: sha512-TUfofFo/KsK/bWZ9TWQ5O26tsWW4Uhmt8IYklbnUa70udB6P2wA7w7o4PY4muaEPBQaAX+CEnmmIA41NVHtPVw==,
+ integrity: sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==,
}
engines: { node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7 }
hasBin: true
dependencies:
- caniuse-lite: 1.0.30001640
- electron-to-chromium: 1.4.818
- node-releases: 2.0.14
- update-browserslist-db: 1.1.0(browserslist@4.23.1)
+ caniuse-lite: 1.0.30001705
+ electron-to-chromium: 1.5.120
+ node-releases: 2.0.19
+ update-browserslist-db: 1.1.3(browserslist@4.24.4)
dev: true
/bs-logger@0.2.6:
@@ -2415,6 +2550,17 @@ packages:
- bluebird
dev: true
+ /call-bind-apply-helpers@1.0.2:
+ resolution:
+ {
+ integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ es-errors: 1.3.0
+ function-bind: 1.1.2
+ dev: false
+
/callsites@3.1.0:
resolution:
{
@@ -2439,25 +2585,13 @@ packages:
engines: { node: ">=10" }
dev: true
- /caniuse-lite@1.0.30001640:
+ /caniuse-lite@1.0.30001705:
resolution:
{
- integrity: sha512-lA4VMpW0PSUrFnkmVuEKBUovSWKhj7puyCg8StBChgu298N1AtuF1sKWEvfDuimSEDbhlb/KqPKC3fs1HbuQUA==,
+ integrity: sha512-S0uyMMiYvA7CxNgomYBwwwPUnWzFD83f3B1ce5jHUfHTH//QL6hHsreI8RVC5606R4ssqravelYO5TU6t8sEyg==,
}
dev: true
- /chalk@2.4.2:
- resolution:
- {
- integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==,
- }
- engines: { node: ">=4" }
- dependencies:
- ansi-styles: 3.2.1
- escape-string-regexp: 1.0.5
- supports-color: 5.5.0
- dev: true
-
/chalk@4.1.2:
resolution:
{
@@ -2476,10 +2610,10 @@ packages:
engines: { node: ">=10" }
dev: true
- /cheminfo-types@1.7.3:
+ /cheminfo-types@1.8.1:
resolution:
{
- integrity: sha512-KIKBULfo+XwkSBwMfwjBmZCmY+RXisN2kRc33WikuWBsCQQy5alHWYVrMCO8//lDvy9h1giOzwsC9kgq0OahUw==,
+ integrity: sha512-FRcpVkox+cRovffgqNdDFQ1eUav+i/Vq/CUd1hcfEl2bevntFlzznL+jE8g4twl6ElB7gZjCko6pYpXyMn+6dA==,
}
dev: false
@@ -2501,6 +2635,14 @@ packages:
engines: { node: ">=10" }
dev: true
+ /chownr@3.0.0:
+ resolution:
+ {
+ integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==,
+ }
+ engines: { node: ">=18" }
+ dev: true
+
/ci-info@3.9.0:
resolution:
{
@@ -2509,10 +2651,10 @@ packages:
engines: { node: ">=8" }
dev: true
- /cjs-module-lexer@1.3.1:
+ /cjs-module-lexer@1.4.3:
resolution:
{
- integrity: sha512-a3KdPAANPbNE4ZUv9h6LckSl9zLsYOP4MBmhIPkRaeyybt+r4UghLvq+xw/YwUcC1gqylCkL4rdVs3Lwupjm4Q==,
+ integrity: sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==,
}
dev: true
@@ -2534,6 +2676,14 @@ packages:
string-width: 4.2.3
dev: false
+ /cli-width@4.1.0:
+ resolution:
+ {
+ integrity: sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==,
+ }
+ engines: { node: ">= 12" }
+ dev: true
+
/cliui@8.0.1:
resolution:
{
@@ -2561,15 +2711,6 @@ packages:
}
dev: true
- /color-convert@1.9.3:
- resolution:
- {
- integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==,
- }
- dependencies:
- color-name: 1.1.3
- dev: true
-
/color-convert@2.0.1:
resolution:
{
@@ -2579,13 +2720,6 @@ packages:
dependencies:
color-name: 1.1.4
- /color-name@1.1.3:
- resolution:
- {
- integrity: sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==,
- }
- dev: true
-
/color-name@1.1.4:
resolution:
{
@@ -2657,10 +2791,10 @@ packages:
}
dev: true
- /consola@3.4.0:
+ /consola@3.4.1:
resolution:
{
- integrity: sha512-EiPU8G6dQG0GFHNR8ljnZFki/8a+cQwEQ+7wpxdChl02Q8HXlwEZWD5lqAF8vC2sEC3Tehr8hy7vErz88LHyUA==,
+ integrity: sha512-zaUUWockhqxFf4bSXS+kTJwxWvAyMuKtShx0BWcGrMEUqbETcBCT91iQs9pECNx7yz8VH4VeWW/1KAbhE8kiww==,
}
engines: { node: ^14.18.0 || >=16.10.0 }
dev: true
@@ -2679,7 +2813,15 @@ packages:
}
dev: true
- /create-jest@29.7.0(@types/node@20.14.10):
+ /cookie@0.7.2:
+ resolution:
+ {
+ integrity: sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: true
+
+ /create-jest@29.7.0(@types/node@20.17.24):
resolution:
{
integrity: sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==,
@@ -2691,7 +2833,7 @@ packages:
chalk: 4.1.2
exit: 0.1.2
graceful-fs: 4.2.11
- jest-config: 29.7.0(@types/node@20.14.10)
+ jest-config: 29.7.0(@types/node@20.17.24)
jest-util: 29.7.0
prompts: 2.4.2
transitivePeerDependencies:
@@ -2713,13 +2855,6 @@ packages:
which: 2.0.2
dev: true
- /d3-array@0.7.1:
- resolution:
- {
- integrity: sha512-Ifi3fH46Bco+Lb1mOlTxbFEuF3NdyElEVVD+EmoK327I0JzKAP4x57cl+HoxHqFcVd8F/uXLC+wtY3n/R1uO2w==,
- }
- dev: false
-
/debug@4.4.0:
resolution:
{
@@ -2793,23 +2928,23 @@ packages:
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dev: true
- /dotenv@16.4.5:
+ /dotenv@16.4.7:
resolution:
{
- integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==,
+ integrity: sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==,
}
engines: { node: ">=12" }
dev: false
- /duckdb@1.0.0:
+ /duckdb@1.2.0:
resolution:
{
- integrity: sha512-QwpcIeN42A2lL19S70mUFibZgRcEcZpCkKHdzDgecHaYZhXj3+1i2cxSDyAk/RVg5CYnqj1Dp4jAuN4cc80udA==,
+ integrity: sha512-zAHHRTMoZhWIwvOsyNkgV9c1nq0gR0j+ZyX0uTCRFZTNOlYO4lnErP5Fddt/6iKMXsTNL9v1oTG9E76S5jMh7w==,
}
requiresBuild: true
dependencies:
- "@mapbox/node-pre-gyp": 1.0.11
- node-addon-api: 7.1.0
+ "@mapbox/node-pre-gyp": 2.0.0
+ node-addon-api: 7.1.1
node-gyp: 9.4.1
transitivePeerDependencies:
- bluebird
@@ -2817,6 +2952,18 @@ packages:
- supports-color
dev: true
+ /dunder-proto@1.0.1:
+ resolution:
+ {
+ integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind-apply-helpers: 1.0.2
+ es-errors: 1.3.0
+ gopd: 1.2.0
+ dev: false
+
/eastasianwidth@0.2.0:
resolution:
{
@@ -2824,10 +2971,21 @@ packages:
}
dev: true
- /electron-to-chromium@1.4.818:
+ /ejs@3.1.10:
resolution:
{
- integrity: sha512-eGvIk2V0dGImV9gWLq8fDfTTsCAeMDwZqEPMr+jMInxZdnp9Us8UpovYpRCf9NQ7VOFgrN2doNSgvISbsbNpxA==,
+ integrity: sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==,
+ }
+ engines: { node: ">=0.10.0" }
+ hasBin: true
+ dependencies:
+ jake: 10.9.2
+ dev: true
+
+ /electron-to-chromium@1.5.120:
+ resolution:
+ {
+ integrity: sha512-oTUp3gfX1gZI+xfD2djr2rzQdHCwHzPQrrK0CD7WpTdF0nPdQ/INcRVjWgLdCT4a9W3jFObR9DAfsuyFQnI8CQ==,
}
dev: true
@@ -2887,6 +3045,45 @@ packages:
is-arrayish: 0.2.1
dev: true
+ /es-define-property@1.0.1:
+ resolution:
+ {
+ integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /es-errors@1.3.0:
+ resolution:
+ {
+ integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /es-object-atoms@1.1.1:
+ resolution:
+ {
+ integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ es-errors: 1.3.0
+ dev: false
+
+ /es-set-tostringtag@2.1.0:
+ resolution:
+ {
+ integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ es-errors: 1.3.0
+ get-intrinsic: 1.3.0
+ has-tostringtag: 1.0.2
+ hasown: 2.0.2
+ dev: false
+
/esbuild@0.18.20:
resolution:
{
@@ -2955,22 +3152,14 @@ packages:
"@esbuild/win32-x64": 0.25.1
dev: true
- /escalade@3.1.2:
+ /escalade@3.2.0:
resolution:
{
- integrity: sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==,
+ integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==,
}
engines: { node: ">=6" }
dev: true
- /escape-string-regexp@1.0.5:
- resolution:
- {
- integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==,
- }
- engines: { node: ">=0.8.0" }
- dev: true
-
/escape-string-regexp@2.0.0:
resolution:
{
@@ -3036,10 +3225,10 @@ packages:
jest-util: 29.7.0
dev: true
- /exponential-backoff@3.1.1:
+ /exponential-backoff@3.1.2:
resolution:
{
- integrity: sha512-dX7e/LHVJ6W3DE1MHWi9S1EYzDESENfLrYohG2G++ovZrYOkm4Knwa0mc1cn84xJOR4KEU0WSchhLbd0UklbHw==,
+ integrity: sha512-8QxYTVXUkuy7fIIoitQkPwGonB8F3Zj8eEO8Sqg9Zv/bkI7RJAzowee4gr81Hak/dUTpA2Z7VfQgoijjPNlUZA==,
}
dev: true
@@ -3057,6 +3246,13 @@ packages:
}
dev: true
+ /fast-uri@3.0.6:
+ resolution:
+ {
+ integrity: sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw==,
+ }
+ dev: false
+
/fb-watchman@2.0.2:
resolution:
{
@@ -3087,6 +3283,15 @@ packages:
}
dev: false
+ /filelist@1.0.4:
+ resolution:
+ {
+ integrity: sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==,
+ }
+ dependencies:
+ minimatch: 5.1.6
+ dev: true
+
/fill-range@7.1.1:
resolution:
{
@@ -3126,15 +3331,16 @@ packages:
}
dev: false
- /form-data@4.0.0:
+ /form-data@4.0.2:
resolution:
{
- integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==,
+ integrity: sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==,
}
engines: { node: ">= 6" }
dependencies:
asynckit: 0.4.0
combined-stream: 1.0.8
+ es-set-tostringtag: 2.1.0
mime-types: 2.1.35
dev: false
@@ -3182,26 +3388,6 @@ packages:
{
integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==,
}
- dev: true
-
- /gauge@3.0.2:
- resolution:
- {
- integrity: sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==,
- }
- engines: { node: ">=10" }
- deprecated: This package is no longer supported.
- dependencies:
- aproba: 2.0.0
- color-support: 1.1.3
- console-control-strings: 1.1.0
- has-unicode: 2.0.1
- object-assign: 4.1.1
- signal-exit: 3.0.7
- string-width: 4.2.3
- strip-ansi: 6.0.1
- wide-align: 1.1.5
- dev: true
/gauge@4.0.4:
resolution:
@@ -3237,6 +3423,25 @@ packages:
engines: { node: 6.* || 8.* || >= 10.* }
dev: true
+ /get-intrinsic@1.3.0:
+ resolution:
+ {
+ integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind-apply-helpers: 1.0.2
+ es-define-property: 1.0.1
+ es-errors: 1.3.0
+ es-object-atoms: 1.1.1
+ function-bind: 1.1.2
+ get-proto: 1.0.1
+ gopd: 1.2.0
+ has-symbols: 1.1.0
+ hasown: 2.0.2
+ math-intrinsics: 1.1.0
+ dev: false
+
/get-package-type@0.1.0:
resolution:
{
@@ -3245,6 +3450,17 @@ packages:
engines: { node: ">=8.0.0" }
dev: true
+ /get-proto@1.0.1:
+ resolution:
+ {
+ integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ dunder-proto: 1.0.1
+ es-object-atoms: 1.1.1
+ dev: false
+
/get-stream@6.0.1:
resolution:
{
@@ -3253,10 +3469,10 @@ packages:
engines: { node: ">=10" }
dev: true
- /get-tsconfig@4.7.5:
+ /get-tsconfig@4.10.0:
resolution:
{
- integrity: sha512-ZCuZCnlqNzjb4QprAzXKdpp/gh6KTxSJuw3IBsPnV/7fV4NxC9ckB+vPTt8w7fJA0TaSD7c55BR47JD6MEDyDw==,
+ integrity: sha512-kGzZ3LWWQcGIAmg6iWvXn0ei6WDtV26wzHRMwDSzmAbcXrTEXxHy6IehI6/4eT6VRKyMP1eF1VqwrVUmE/LR7A==,
}
dependencies:
resolve-pkg-maps: 1.0.0
@@ -3315,12 +3531,28 @@ packages:
engines: { node: ">=4" }
dev: true
+ /gopd@1.2.0:
+ resolution:
+ {
+ integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
/graceful-fs@4.2.11:
resolution:
{
integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==,
}
+ /graphql@16.10.0:
+ resolution:
+ {
+ integrity: sha512-AjqGKbDGUFRKIRCP9tCKiIGHyriz2oHEbPIbEtcSLSs4YjReZOIPQQWek4+6hjw62H9QShXHyaGivGiYVLeYFQ==,
+ }
+ engines: { node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0 }
+ dev: true
+
/handlebars@4.7.8:
resolution:
{
@@ -3337,20 +3569,30 @@ packages:
uglify-js: 3.19.3
dev: true
- /has-flag@3.0.0:
+ /has-flag@4.0.0:
resolution:
{
- integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==,
+ integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==,
}
- engines: { node: ">=4" }
- dev: true
+ engines: { node: ">=8" }
- /has-flag@4.0.0:
+ /has-symbols@1.1.0:
resolution:
{
- integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==,
+ integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==,
}
- engines: { node: ">=8" }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /has-tostringtag@1.0.2:
+ resolution:
+ {
+ integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ has-symbols: 1.1.0
+ dev: false
/has-unicode@2.0.1:
resolution:
@@ -3367,6 +3609,12 @@ packages:
engines: { node: ">= 0.4" }
dependencies:
function-bind: 1.1.2
+
+ /headers-polyfill@4.0.3:
+ resolution:
+ {
+ integrity: sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==,
+ }
dev: true
/html-escaper@2.0.2:
@@ -3410,6 +3658,19 @@ packages:
- supports-color
dev: true
+ /https-proxy-agent@7.0.6:
+ resolution:
+ {
+ integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==,
+ }
+ engines: { node: ">= 14" }
+ dependencies:
+ agent-base: 7.1.3
+ debug: 4.4.0
+ transitivePeerDependencies:
+ - supports-color
+ dev: true
+
/human-signals@2.1.0:
resolution:
{
@@ -3438,10 +3699,10 @@ packages:
dev: true
optional: true
- /import-local@3.1.0:
+ /import-local@3.2.0:
resolution:
{
- integrity: sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==,
+ integrity: sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==,
}
engines: { node: ">=8" }
hasBin: true
@@ -3524,10 +3785,10 @@ packages:
}
dev: true
- /is-core-module@2.14.0:
+ /is-core-module@2.16.1:
resolution:
{
- integrity: sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==,
+ integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==,
}
engines: { node: ">= 0.4" }
dependencies:
@@ -3556,6 +3817,13 @@ packages:
}
dev: true
+ /is-node-process@1.2.0:
+ resolution:
+ {
+ integrity: sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==,
+ }
+ dev: true
+
/is-number@7.0.0:
resolution:
{
@@ -3594,8 +3862,8 @@ packages:
}
engines: { node: ">=8" }
dependencies:
- "@babel/core": 7.24.7
- "@babel/parser": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/parser": 7.26.10
"@istanbuljs/schema": 0.1.3
istanbul-lib-coverage: 3.2.2
semver: 6.3.1
@@ -3610,11 +3878,11 @@ packages:
}
engines: { node: ">=10" }
dependencies:
- "@babel/core": 7.24.7
- "@babel/parser": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/parser": 7.26.10
"@istanbuljs/schema": 0.1.3
istanbul-lib-coverage: 3.2.2
- semver: 7.6.2
+ semver: 7.7.1
transitivePeerDependencies:
- supports-color
dev: true
@@ -3667,6 +3935,20 @@ packages:
"@pkgjs/parseargs": 0.11.0
dev: true
+ /jake@10.9.2:
+ resolution:
+ {
+ integrity: sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==,
+ }
+ engines: { node: ">=10" }
+ hasBin: true
+ dependencies:
+ async: 3.2.6
+ chalk: 4.1.2
+ filelist: 1.0.4
+ minimatch: 3.1.2
+ dev: true
+
/jest-changed-files@29.7.0:
resolution:
{
@@ -3690,7 +3972,7 @@ packages:
"@jest/expect": 29.7.0
"@jest/test-result": 29.7.0
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
chalk: 4.1.2
co: 4.6.0
dedent: 1.5.3
@@ -3711,7 +3993,7 @@ packages:
- supports-color
dev: true
- /jest-cli@29.7.0(@types/node@20.14.10):
+ /jest-cli@29.7.0(@types/node@20.17.24):
resolution:
{
integrity: sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==,
@@ -3728,10 +4010,10 @@ packages:
"@jest/test-result": 29.7.0
"@jest/types": 29.6.3
chalk: 4.1.2
- create-jest: 29.7.0(@types/node@20.14.10)
+ create-jest: 29.7.0(@types/node@20.17.24)
exit: 0.1.2
- import-local: 3.1.0
- jest-config: 29.7.0(@types/node@20.14.10)
+ import-local: 3.2.0
+ jest-config: 29.7.0(@types/node@20.17.24)
jest-util: 29.7.0
jest-validate: 29.7.0
yargs: 17.7.2
@@ -3742,7 +4024,7 @@ packages:
- ts-node
dev: true
- /jest-config@29.7.0(@types/node@20.14.10):
+ /jest-config@29.7.0(@types/node@20.17.24):
resolution:
{
integrity: sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==,
@@ -3757,11 +4039,11 @@ packages:
ts-node:
optional: true
dependencies:
- "@babel/core": 7.24.7
+ "@babel/core": 7.26.10
"@jest/test-sequencer": 29.7.0
"@jest/types": 29.6.3
- "@types/node": 20.14.10
- babel-jest: 29.7.0(@babel/core@7.24.7)
+ "@types/node": 20.17.24
+ babel-jest: 29.7.0(@babel/core@7.26.10)
chalk: 4.1.2
ci-info: 3.9.0
deepmerge: 4.3.1
@@ -3775,7 +4057,7 @@ packages:
jest-runner: 29.7.0
jest-util: 29.7.0
jest-validate: 29.7.0
- micromatch: 4.0.7
+ micromatch: 4.0.8
parse-json: 5.2.0
pretty-format: 29.7.0
slash: 3.0.0
@@ -3832,7 +4114,7 @@ packages:
"@jest/environment": 29.7.0
"@jest/fake-timers": 29.7.0
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
jest-mock: 29.7.0
jest-util: 29.7.0
dev: true
@@ -3854,14 +4136,14 @@ packages:
dependencies:
"@jest/types": 29.6.3
"@types/graceful-fs": 4.1.9
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
anymatch: 3.1.3
fb-watchman: 2.0.2
graceful-fs: 4.2.11
jest-regex-util: 29.6.3
jest-util: 29.7.0
jest-worker: 29.7.0
- micromatch: 4.0.7
+ micromatch: 4.0.8
walker: 1.0.8
optionalDependencies:
fsevents: 2.3.3
@@ -3898,12 +4180,12 @@ packages:
}
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dependencies:
- "@babel/code-frame": 7.24.7
+ "@babel/code-frame": 7.26.2
"@jest/types": 29.6.3
"@types/stack-utils": 2.0.3
chalk: 4.1.2
graceful-fs: 4.2.11
- micromatch: 4.0.7
+ micromatch: 4.0.8
pretty-format: 29.7.0
slash: 3.0.0
stack-utils: 2.0.6
@@ -3917,7 +4199,7 @@ packages:
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dependencies:
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
jest-util: 29.7.0
dev: true
@@ -3970,8 +4252,8 @@ packages:
jest-pnp-resolver: 1.2.3(jest-resolve@29.7.0)
jest-util: 29.7.0
jest-validate: 29.7.0
- resolve: 1.22.8
- resolve.exports: 2.0.2
+ resolve: 1.22.10
+ resolve.exports: 2.0.3
slash: 3.0.0
dev: true
@@ -3987,7 +4269,7 @@ packages:
"@jest/test-result": 29.7.0
"@jest/transform": 29.7.0
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
chalk: 4.1.2
emittery: 0.13.1
graceful-fs: 4.2.11
@@ -4021,9 +4303,9 @@ packages:
"@jest/test-result": 29.7.0
"@jest/transform": 29.7.0
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
chalk: 4.1.2
- cjs-module-lexer: 1.3.1
+ cjs-module-lexer: 1.4.3
collect-v8-coverage: 1.0.2
glob: 7.2.3
graceful-fs: 4.2.11
@@ -4047,15 +4329,15 @@ packages:
}
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dependencies:
- "@babel/core": 7.24.7
- "@babel/generator": 7.24.7
- "@babel/plugin-syntax-jsx": 7.24.7(@babel/core@7.24.7)
- "@babel/plugin-syntax-typescript": 7.24.7(@babel/core@7.24.7)
- "@babel/types": 7.24.7
+ "@babel/core": 7.26.10
+ "@babel/generator": 7.26.10
+ "@babel/plugin-syntax-jsx": 7.25.9(@babel/core@7.26.10)
+ "@babel/plugin-syntax-typescript": 7.25.9(@babel/core@7.26.10)
+ "@babel/types": 7.26.10
"@jest/expect-utils": 29.7.0
"@jest/transform": 29.7.0
"@jest/types": 29.6.3
- babel-preset-current-node-syntax: 1.0.1(@babel/core@7.24.7)
+ babel-preset-current-node-syntax: 1.1.0(@babel/core@7.26.10)
chalk: 4.1.2
expect: 29.7.0
graceful-fs: 4.2.11
@@ -4066,7 +4348,7 @@ packages:
jest-util: 29.7.0
natural-compare: 1.4.0
pretty-format: 29.7.0
- semver: 7.6.2
+ semver: 7.7.1
transitivePeerDependencies:
- supports-color
dev: true
@@ -4089,7 +4371,7 @@ packages:
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dependencies:
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
chalk: 4.1.2
ci-info: 3.9.0
graceful-fs: 4.2.11
@@ -4120,7 +4402,7 @@ packages:
dependencies:
"@jest/test-result": 29.7.0
"@jest/types": 29.6.3
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
ansi-escapes: 4.3.2
chalk: 4.1.2
emittery: 0.13.1
@@ -4135,13 +4417,13 @@ packages:
}
engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 }
dependencies:
- "@types/node": 20.14.10
+ "@types/node": 20.17.24
jest-util: 29.7.0
merge-stream: 2.0.0
supports-color: 8.1.1
dev: true
- /jest@29.7.0(@types/node@20.14.10):
+ /jest@29.7.0(@types/node@20.17.24):
resolution:
{
integrity: sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==,
@@ -4156,8 +4438,8 @@ packages:
dependencies:
"@jest/core": 29.7.0
"@jest/types": 29.6.3
- import-local: 3.1.0
- jest-cli: 29.7.0(@types/node@20.14.10)
+ import-local: 3.2.0
+ jest-cli: 29.7.0(@types/node@20.17.24)
transitivePeerDependencies:
- "@types/node"
- babel-plugin-macros
@@ -4216,12 +4498,12 @@ packages:
}
dev: true
- /jsesc@2.5.2:
+ /jsesc@3.1.0:
resolution:
{
- integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==,
+ integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==,
}
- engines: { node: ">=4" }
+ engines: { node: ">=6" }
hasBin: true
dev: true
@@ -4292,10 +4574,10 @@ packages:
integrity: sha512-jfLoSGwZNyjfY8eK4ayhjfcIu3BfWvP6sWieYzYI3AWldwXVoWEz1gtrQL10v/8YltYLBunqNjeVFXPMUs+MJg==,
}
dependencies:
- cheminfo-types: 1.7.3
+ cheminfo-types: 1.8.1
install: 0.13.0
- ml-matrix: 6.11.1
- ml-spectra-processing: 14.5.1
+ ml-matrix: 6.12.1
+ ml-spectra-processing: 14.10.0
dev: false
/lines-and-columns@1.2.4:
@@ -4368,16 +4650,6 @@ packages:
}
dev: true
- /make-dir@3.1.0:
- resolution:
- {
- integrity: sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==,
- }
- engines: { node: ">=8" }
- dependencies:
- semver: 6.3.1
- dev: true
-
/make-dir@4.0.0:
resolution:
{
@@ -4385,7 +4657,7 @@ packages:
}
engines: { node: ">=10" }
dependencies:
- semver: 7.6.2
+ semver: 7.7.1
dev: true
/make-error@1.3.6:
@@ -4402,7 +4674,7 @@ packages:
}
engines: { node: ^12.13.0 || ^14.15.0 || >=16.0.0 }
dependencies:
- agentkeepalive: 4.5.0
+ agentkeepalive: 4.6.0
cacache: 16.1.3
http-cache-semantics: 4.1.1
http-proxy-agent: 5.0.0
@@ -4414,7 +4686,7 @@ packages:
minipass-fetch: 2.1.2
minipass-flush: 1.0.5
minipass-pipeline: 1.2.4
- negotiator: 0.6.3
+ negotiator: 0.6.4
promise-retry: 2.0.1
socks-proxy-agent: 7.0.0
ssri: 9.0.1
@@ -4441,6 +4713,14 @@ packages:
hasBin: true
dev: true
+ /math-intrinsics@1.1.0:
+ resolution:
+ {
+ integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
/merge-stream@2.0.0:
resolution:
{
@@ -4448,10 +4728,10 @@ packages:
}
dev: true
- /micromatch@4.0.7:
+ /micromatch@4.0.8:
resolution:
{
- integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==,
+ integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==,
}
engines: { node: ">=8.6" }
dependencies:
@@ -4611,6 +4891,17 @@ packages:
yallist: 4.0.0
dev: true
+ /minizlib@3.0.1:
+ resolution:
+ {
+ integrity: sha512-umcy022ILvb5/3Djuu8LWeqUa8D68JaBzlttKeMWen48SjabqS3iY5w/vzeMzMUNhLDifyhbOwKDSznB1vvrwg==,
+ }
+ engines: { node: ">= 18" }
+ dependencies:
+ minipass: 7.1.2
+ rimraf: 5.0.10
+ dev: true
+
/mkdirp@1.0.4:
resolution:
{
@@ -4620,6 +4911,15 @@ packages:
hasBin: true
dev: true
+ /mkdirp@3.0.1:
+ resolution:
+ {
+ integrity: sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==,
+ }
+ engines: { node: ">=10" }
+ hasBin: true
+ dev: true
+
/ml-array-max@1.2.4:
resolution:
{
@@ -4649,35 +4949,34 @@ packages:
ml-array-min: 1.2.3
dev: false
- /ml-matrix@6.11.1:
+ /ml-matrix@6.12.1:
resolution:
{
- integrity: sha512-Fvp1xF1O07tt6Ux9NcnEQTei5UlqbRpvvaFZGs7l3Ij+nOaEDcmbSVtxwNa8V4IfdyFI1NLNUteroMJ1S6vcEg==,
+ integrity: sha512-TJ+8eOFdp+INvzR4zAuwBQJznDUfktMtOB6g/hUcGh3rcyjxbz4Te57Pgri8Q9bhSQ7Zys4IYOGhFdnlgeB6Lw==,
}
dependencies:
is-any-array: 2.0.1
ml-array-rescale: 1.3.7
dev: false
- /ml-spectra-processing@14.5.1:
+ /ml-spectra-processing@14.10.0:
resolution:
{
- integrity: sha512-x/aVxJsusIw9Di3SbVc9Xs6By1ASDqsjxsnK2GRECz8DTIpjkuq6yuWvoQr7HPmk87h2cGygqz7jsXAYX75/bA==,
+ integrity: sha512-4fyF6tojgVgh6m9nmFvaIlGhrvHq+swn64IxQ44F4k4o7Qkl8xKOJWfQ4EsfoX66GqZn2PFfcn1xUGRNwB8+3w==,
}
dependencies:
binary-search: 1.3.6
- cheminfo-types: 1.7.3
+ cheminfo-types: 1.8.1
fft.js: 4.0.4
is-any-array: 2.0.1
- ml-matrix: 6.11.1
- ml-xsadd: 2.0.0
- spline-interpolator: 1.0.0
+ ml-matrix: 6.12.1
+ ml-xsadd: 3.0.1
dev: false
- /ml-xsadd@2.0.0:
+ /ml-xsadd@3.0.1:
resolution:
{
- integrity: sha512-VoAYUqmPRmzKbbqRejjqceGFp3VF81Qe8XXFGU0UXLxB7Mf4GGvyGq5Qn3k4AiQgDEV6WzobqlPOd+j0+m6IrA==,
+ integrity: sha512-Fz2q6dwgzGM8wYKGArTUTZDGa4lQFA2Vi6orjGeTVRy22ZnQFKlJuwS9n8NRviqz1KHAHAzdKJwbnYhdo38uYg==,
}
dev: false
@@ -4687,6 +4986,43 @@ packages:
integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==,
}
+ /msw@2.7.3(@types/node@20.17.24)(typescript@5.8.2):
+ resolution:
+ {
+ integrity: sha512-+mycXv8l2fEAjFZ5sjrtjJDmm2ceKGjrNbBr1durRg6VkU9fNUE/gsmQ51hWbHqs+l35W1iM+ZsmOD9Fd6lspw==,
+ }
+ engines: { node: ">=18" }
+ hasBin: true
+ requiresBuild: true
+ peerDependencies:
+ typescript: ">= 4.8.x"
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+ dependencies:
+ "@bundled-es-modules/cookie": 2.0.1
+ "@bundled-es-modules/statuses": 1.0.1
+ "@bundled-es-modules/tough-cookie": 0.1.6
+ "@inquirer/confirm": 5.1.8(@types/node@20.17.24)
+ "@mswjs/interceptors": 0.37.6
+ "@open-draft/deferred-promise": 2.2.0
+ "@open-draft/until": 2.1.0
+ "@types/cookie": 0.6.0
+ "@types/statuses": 2.0.5
+ graphql: 16.10.0
+ headers-polyfill: 4.0.3
+ is-node-process: 1.2.0
+ outvariant: 1.4.3
+ path-to-regexp: 6.3.0
+ picocolors: 1.1.1
+ strict-event-emitter: 0.5.1
+ type-fest: 4.37.0
+ typescript: 5.8.2
+ yargs: 17.7.2
+ transitivePeerDependencies:
+ - "@types/node"
+ dev: true
+
/mustache@4.2.0:
resolution:
{
@@ -4695,6 +5031,14 @@ packages:
hasBin: true
dev: false
+ /mute-stream@2.0.0:
+ resolution:
+ {
+ integrity: sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==,
+ }
+ engines: { node: ^18.17.0 || >=20.5.0 }
+ dev: true
+
/mz@2.7.0:
resolution:
{
@@ -4713,10 +5057,10 @@ packages:
}
dev: true
- /negotiator@0.6.3:
+ /negotiator@0.6.4:
resolution:
{
- integrity: sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==,
+ integrity: sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==,
}
engines: { node: ">= 0.6" }
dev: true
@@ -4728,12 +5072,11 @@ packages:
}
dev: true
- /node-addon-api@7.1.0:
+ /node-addon-api@7.1.1:
resolution:
{
- integrity: sha512-mNcltoe1R8o7STTegSOHdnJNN7s5EUvhoS7ShnTHDyOSd+8H+UdWODq6qSv67PjC8Zc5JRT8+oLAMCr0SIXw7g==,
+ integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==,
}
- engines: { node: ^16 || ^18 || >= 20 }
dev: true
/node-domexception@1.0.0:
@@ -4767,14 +5110,14 @@ packages:
hasBin: true
dependencies:
env-paths: 2.2.1
- exponential-backoff: 3.1.1
+ exponential-backoff: 3.1.2
glob: 7.2.3
graceful-fs: 4.2.11
make-fetch-happen: 10.2.1
nopt: 6.0.0
npmlog: 6.0.2
rimraf: 3.0.2
- semver: 7.6.2
+ semver: 7.7.1
tar: 6.2.1
which: 2.0.2
transitivePeerDependencies:
@@ -4789,33 +5132,33 @@ packages:
}
dev: true
- /node-releases@2.0.14:
+ /node-releases@2.0.19:
resolution:
{
- integrity: sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==,
+ integrity: sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==,
}
dev: true
- /nopt@5.0.0:
+ /nopt@6.0.0:
resolution:
{
- integrity: sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==,
+ integrity: sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==,
}
- engines: { node: ">=6" }
+ engines: { node: ^12.13.0 || ^14.15.0 || >=16.0.0 }
hasBin: true
dependencies:
abbrev: 1.1.1
dev: true
- /nopt@6.0.0:
+ /nopt@8.1.0:
resolution:
{
- integrity: sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==,
+ integrity: sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A==,
}
- engines: { node: ^12.13.0 || ^14.15.0 || >=16.0.0 }
+ engines: { node: ^18.17.0 || >=20.5.0 }
hasBin: true
dependencies:
- abbrev: 1.1.1
+ abbrev: 3.0.0
dev: true
/normalize-path@3.0.0:
@@ -4836,19 +5179,6 @@ packages:
path-key: 3.1.1
dev: true
- /npmlog@5.0.1:
- resolution:
- {
- integrity: sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==,
- }
- deprecated: This package is no longer supported.
- dependencies:
- are-we-there-yet: 2.0.0
- console-control-strings: 1.1.0
- gauge: 3.0.2
- set-blocking: 2.0.0
- dev: true
-
/npmlog@6.0.2:
resolution:
{
@@ -4897,10 +5227,10 @@ packages:
}
hasBin: true
dependencies:
- "@types/node": 18.19.39
- "@types/node-fetch": 2.6.11
+ "@types/node": 18.19.80
+ "@types/node-fetch": 2.6.12
abort-controller: 3.0.0
- agentkeepalive: 4.5.0
+ agentkeepalive: 4.6.0
form-data-encoder: 1.7.2
formdata-node: 4.4.1
node-fetch: 2.7.0
@@ -4909,15 +5239,22 @@ packages:
- encoding
dev: false
- /openapi3-ts@4.3.3:
+ /openapi3-ts@4.4.0:
resolution:
{
- integrity: sha512-LKkzBGJcZ6wdvkKGMoSvpK+0cbN5Xc3XuYkJskO+vjEQWJgs1kgtyUk0pjf8KwPuysv323Er62F5P17XQl96Qg==,
+ integrity: sha512-9asTNB9IkKEzWMcHmVZE7Ts3kC9G7AFHfs8i7caD8HbI76gEjdkId4z/AkP83xdZsH7PLAnnbl47qZkXuxpArw==,
}
dependencies:
- yaml: 2.4.5
+ yaml: 2.7.0
dev: false
+ /outvariant@1.4.3:
+ resolution:
+ {
+ integrity: sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA==,
+ }
+ dev: true
+
/p-limit@2.3.0:
resolution:
{
@@ -4980,7 +5317,7 @@ packages:
}
engines: { node: ">=8" }
dependencies:
- "@babel/code-frame": 7.24.7
+ "@babel/code-frame": 7.26.2
error-ex: 1.3.2
json-parse-even-better-errors: 2.3.1
lines-and-columns: 1.2.4
@@ -5028,6 +5365,13 @@ packages:
minipass: 7.1.2
dev: true
+ /path-to-regexp@6.3.0:
+ resolution:
+ {
+ integrity: sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==,
+ }
+ dev: true
+
/picocolors@1.1.1:
resolution:
{
@@ -5148,12 +5492,22 @@ packages:
sisteransi: 1.0.5
dev: true
+ /psl@1.15.0:
+ resolution:
+ {
+ integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==,
+ }
+ dependencies:
+ punycode: 2.3.1
+ dev: true
+
/punycode@2.3.1:
resolution:
{
integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==,
}
engines: { node: ">=6" }
+ dev: true
/pure-rand@6.1.0:
resolution:
@@ -5162,6 +5516,13 @@ packages:
}
dev: true
+ /querystringify@2.2.0:
+ resolution:
+ {
+ integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==,
+ }
+ dev: true
+
/react-is@18.3.1:
resolution:
{
@@ -5205,6 +5566,13 @@ packages:
engines: { node: ">=0.10.0" }
dev: false
+ /requires-port@1.0.0:
+ resolution:
+ {
+ integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==,
+ }
+ dev: true
+
/resolve-cwd@3.0.0:
resolution:
{
@@ -5230,22 +5598,23 @@ packages:
}
dev: true
- /resolve.exports@2.0.2:
+ /resolve.exports@2.0.3:
resolution:
{
- integrity: sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==,
+ integrity: sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==,
}
engines: { node: ">=10" }
dev: true
- /resolve@1.22.8:
+ /resolve@1.22.10:
resolution:
{
- integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==,
+ integrity: sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==,
}
+ engines: { node: ">= 0.4" }
hasBin: true
dependencies:
- is-core-module: 2.14.0
+ is-core-module: 2.16.1
path-parse: 1.0.7
supports-preserve-symlinks-flag: 1.0.0
dev: true
@@ -5269,35 +5638,45 @@ packages:
glob: 7.2.3
dev: true
- /rollup@4.35.0:
+ /rimraf@5.0.10:
+ resolution:
+ {
+ integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==,
+ }
+ hasBin: true
+ dependencies:
+ glob: 10.4.5
+ dev: true
+
+ /rollup@4.36.0:
resolution:
{
- integrity: sha512-kg6oI4g+vc41vePJyO6dHt/yl0Rz3Thv0kJeVQ3D1kS3E5XSuKbPc29G4IpT/Kv1KQwgHVcN+HtyS+HYLNSvQg==,
+ integrity: sha512-zwATAXNQxUcd40zgtQG0ZafcRK4g004WtEl7kbuhTWPvf07PsfohXl39jVUvPF7jvNAIkKPQ2XrsDlWuxBd++Q==,
}
engines: { node: ">=18.0.0", npm: ">=8.0.0" }
hasBin: true
dependencies:
"@types/estree": 1.0.6
optionalDependencies:
- "@rollup/rollup-android-arm-eabi": 4.35.0
- "@rollup/rollup-android-arm64": 4.35.0
- "@rollup/rollup-darwin-arm64": 4.35.0
- "@rollup/rollup-darwin-x64": 4.35.0
- "@rollup/rollup-freebsd-arm64": 4.35.0
- "@rollup/rollup-freebsd-x64": 4.35.0
- "@rollup/rollup-linux-arm-gnueabihf": 4.35.0
- "@rollup/rollup-linux-arm-musleabihf": 4.35.0
- "@rollup/rollup-linux-arm64-gnu": 4.35.0
- "@rollup/rollup-linux-arm64-musl": 4.35.0
- "@rollup/rollup-linux-loongarch64-gnu": 4.35.0
- "@rollup/rollup-linux-powerpc64le-gnu": 4.35.0
- "@rollup/rollup-linux-riscv64-gnu": 4.35.0
- "@rollup/rollup-linux-s390x-gnu": 4.35.0
- "@rollup/rollup-linux-x64-gnu": 4.35.0
- "@rollup/rollup-linux-x64-musl": 4.35.0
- "@rollup/rollup-win32-arm64-msvc": 4.35.0
- "@rollup/rollup-win32-ia32-msvc": 4.35.0
- "@rollup/rollup-win32-x64-msvc": 4.35.0
+ "@rollup/rollup-android-arm-eabi": 4.36.0
+ "@rollup/rollup-android-arm64": 4.36.0
+ "@rollup/rollup-darwin-arm64": 4.36.0
+ "@rollup/rollup-darwin-x64": 4.36.0
+ "@rollup/rollup-freebsd-arm64": 4.36.0
+ "@rollup/rollup-freebsd-x64": 4.36.0
+ "@rollup/rollup-linux-arm-gnueabihf": 4.36.0
+ "@rollup/rollup-linux-arm-musleabihf": 4.36.0
+ "@rollup/rollup-linux-arm64-gnu": 4.36.0
+ "@rollup/rollup-linux-arm64-musl": 4.36.0
+ "@rollup/rollup-linux-loongarch64-gnu": 4.36.0
+ "@rollup/rollup-linux-powerpc64le-gnu": 4.36.0
+ "@rollup/rollup-linux-riscv64-gnu": 4.36.0
+ "@rollup/rollup-linux-s390x-gnu": 4.36.0
+ "@rollup/rollup-linux-x64-gnu": 4.36.0
+ "@rollup/rollup-linux-x64-musl": 4.36.0
+ "@rollup/rollup-win32-arm64-msvc": 4.36.0
+ "@rollup/rollup-win32-ia32-msvc": 4.36.0
+ "@rollup/rollup-win32-x64-msvc": 4.36.0
fsevents: 2.3.3
dev: true
@@ -5325,10 +5704,10 @@ packages:
hasBin: true
dev: true
- /semver@7.6.2:
+ /semver@7.7.1:
resolution:
{
- integrity: sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==,
+ integrity: sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==,
}
engines: { node: ">=10" }
hasBin: true
@@ -5365,7 +5744,7 @@ packages:
integrity: sha512-dNPAPrxSc87ua2sKJ3H5dQ/6ZaY8RNnaAqK+t0eG7p0Soi2ydiqbGOTaZCqaYvA/uZYfS1LJnemt3Q+mSfcPCg==,
}
dependencies:
- ansi-sequence-parser: 1.1.1
+ ansi-sequence-parser: 1.1.3
jsonc-parser: 3.3.1
vscode-oniguruma: 1.7.0
vscode-textmate: 8.0.0
@@ -5386,10 +5765,10 @@ packages:
engines: { node: ">=14" }
dev: true
- /simple-git@3.25.0:
+ /simple-git@3.27.0:
resolution:
{
- integrity: sha512-KIY5sBnzc4yEcJXW7Tdv4viEz8KyG+nU0hay+DWZasvdFOYKeUZ6Xc25LUHHjw0tinPT7O1eY6pzX7pRT1K8rw==,
+ integrity: sha512-ivHoFS9Yi9GY49ogc6/YAi3Fl9ROnF4VyubNylgCkA+RVqLaKWnDSzXOVzya8csELIaWaYNutsEuAhZrtOjozA==,
}
dependencies:
"@kwsites/file-exists": 1.1.1
@@ -5431,15 +5810,15 @@ packages:
dependencies:
agent-base: 6.0.2
debug: 4.4.0
- socks: 2.8.3
+ socks: 2.8.4
transitivePeerDependencies:
- supports-color
dev: true
- /socks@2.8.3:
+ /socks@2.8.4:
resolution:
{
- integrity: sha512-l5x7VUUWbjVFbafGLxPWkYsHIhEvmF85tbIeFZWc8ZPtoMyybuEhL7Jye/ooC4/d48FgOjSJXgsF/AJPYCW8Zw==,
+ integrity: sha512-D3YaD0aRxR3mEcqnidIs7ReYJFVzWdd6fXJYUM8ixcQcJRGTka/b3saV0KflYhyVJXKhb947GndU35SxYNResQ==,
}
engines: { node: ">= 10.0.0", npm: ">= 3.0.0" }
dependencies:
@@ -5485,15 +5864,6 @@ packages:
whatwg-url: 7.1.0
dev: true
- /spline-interpolator@1.0.0:
- resolution:
- {
- integrity: sha512-s8lowgsWE5wjHGEsk/4VADp7xAHw+pNy3OGp96fYjVTwLSx/83+BBmTFP2wZDRM0kj45q8zSyOV5fUcGn4hLEw==,
- }
- dependencies:
- d3-array: 0.7.1
- dev: false
-
/sprintf-js@1.0.3:
resolution:
{
@@ -5528,6 +5898,21 @@ packages:
escape-string-regexp: 2.0.0
dev: true
+ /statuses@2.0.1:
+ resolution:
+ {
+ integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==,
+ }
+ engines: { node: ">= 0.8" }
+ dev: true
+
+ /strict-event-emitter@0.5.1:
+ resolution:
+ {
+ integrity: sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==,
+ }
+ dev: true
+
/string-length@4.0.2:
resolution:
{
@@ -5631,16 +6016,6 @@ packages:
ts-interface-checker: 0.1.13
dev: true
- /supports-color@5.5.0:
- resolution:
- {
- integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==,
- }
- engines: { node: ">=4" }
- dependencies:
- has-flag: 3.0.0
- dev: true
-
/supports-color@7.2.0:
resolution:
{
@@ -5683,6 +6058,21 @@ packages:
yallist: 4.0.0
dev: true
+ /tar@7.4.3:
+ resolution:
+ {
+ integrity: sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==,
+ }
+ engines: { node: ">=18" }
+ dependencies:
+ "@isaacs/fs-minipass": 4.0.1
+ chownr: 3.0.0
+ minipass: 7.1.2
+ minizlib: 3.0.1
+ mkdirp: 3.0.1
+ yallist: 5.0.0
+ dev: true
+
/test-exclude@6.0.0:
resolution:
{
@@ -5739,22 +6129,27 @@ packages:
}
dev: true
- /to-fast-properties@2.0.0:
+ /to-regex-range@5.0.1:
resolution:
{
- integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==,
+ integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==,
}
- engines: { node: ">=4" }
+ engines: { node: ">=8.0" }
+ dependencies:
+ is-number: 7.0.0
dev: true
- /to-regex-range@5.0.1:
+ /tough-cookie@4.1.4:
resolution:
{
- integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==,
+ integrity: sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==,
}
- engines: { node: ">=8.0" }
+ engines: { node: ">=6" }
dependencies:
- is-number: 7.0.0
+ psl: 1.15.0
+ punycode: 2.3.1
+ universalify: 0.2.0
+ url-parse: 1.5.10
dev: true
/tr46@0.0.3:
@@ -5787,10 +6182,10 @@ packages:
}
dev: true
- /ts-jest@29.1.5(@babel/core@7.24.7)(esbuild@0.25.1)(jest@29.7.0)(typescript@5.5.3):
+ /ts-jest@29.2.6(@babel/core@7.26.10)(esbuild@0.25.1)(jest@29.7.0)(typescript@5.8.2):
resolution:
{
- integrity: sha512-UuClSYxM7byvvYfyWdFI+/2UxMmwNyJb0NPkZPQE2hew3RurV7l7zURgOHAd/1I1ZdPpe3GUsXNXAcN8TFKSIg==,
+ integrity: sha512-yTNZVZqc8lSixm+QGVFcPe6+yj7+TWZwIesuOWvfcn4B9bz5x4NDzVCQQjOs7Hfouu36aEqfEbo9Qpo+gq8dDg==,
}
engines: { node: ^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0 }
hasBin: true
@@ -5814,21 +6209,22 @@ packages:
esbuild:
optional: true
dependencies:
- "@babel/core": 7.24.7
+ "@babel/core": 7.26.10
bs-logger: 0.2.6
+ ejs: 3.1.10
esbuild: 0.25.1
fast-json-stable-stringify: 2.1.0
- jest: 29.7.0(@types/node@20.14.10)
+ jest: 29.7.0(@types/node@20.17.24)
jest-util: 29.7.0
json5: 2.2.3
lodash.memoize: 4.1.2
make-error: 1.3.6
- semver: 7.6.2
- typescript: 5.5.3
+ semver: 7.7.1
+ typescript: 5.8.2
yargs-parser: 21.1.1
dev: true
- /tsup@8.4.0(tsx@3.14.0)(typescript@5.5.3):
+ /tsup@8.4.0(tsx@3.14.0)(typescript@5.8.2):
resolution:
{
integrity: sha512-b+eZbPCjz10fRryaAA7C8xlIHnf8VnsaRqydheLIqwG/Mcpfk8Z5zp3HayX7GaTygkigHl5cBUs+IhcySiIexQ==,
@@ -5853,20 +6249,20 @@ packages:
bundle-require: 5.1.0(esbuild@0.25.1)
cac: 6.7.14
chokidar: 4.0.3
- consola: 3.4.0
+ consola: 3.4.1
debug: 4.4.0
esbuild: 0.25.1
joycon: 3.1.1
picocolors: 1.1.1
postcss-load-config: 6.0.1(tsx@3.14.0)
resolve-from: 5.0.0
- rollup: 4.35.0
+ rollup: 4.36.0
source-map: 0.8.0-beta.0
sucrase: 3.35.0
tinyexec: 0.3.2
tinyglobby: 0.2.12
tree-kill: 1.2.2
- typescript: 5.5.3
+ typescript: 5.8.2
transitivePeerDependencies:
- jiti
- supports-color
@@ -5882,7 +6278,7 @@ packages:
hasBin: true
dependencies:
esbuild: 0.18.20
- get-tsconfig: 4.7.5
+ get-tsconfig: 4.10.0
source-map-support: 0.5.21
optionalDependencies:
fsevents: 2.3.3
@@ -5904,6 +6300,14 @@ packages:
engines: { node: ">=10" }
dev: true
+ /type-fest@4.37.0:
+ resolution:
+ {
+ integrity: sha512-S/5/0kFftkq27FPNye0XM1e2NsnoD/3FS+pBmbjmmtLT6I+i344KoOf7pvXreaFsDamWeaJX55nczA1m5PsBDg==,
+ }
+ engines: { node: ">=16" }
+ dev: true
+
/typedoc-plugin-markdown@3.17.1(typedoc@0.25.13):
resolution:
{
@@ -5913,10 +6317,10 @@ packages:
typedoc: ">=0.24.0"
dependencies:
handlebars: 4.7.8
- typedoc: 0.25.13(typescript@5.5.3)
+ typedoc: 0.25.13(typescript@5.8.2)
dev: true
- /typedoc@0.25.13(typescript@5.5.3):
+ /typedoc@0.25.13(typescript@5.8.2):
resolution:
{
integrity: sha512-pQqiwiJ+Z4pigfOnnysObszLiU3mVLWAExSPf+Mu06G/qsc3wzbuM56SZQvONhHLncLUhYzOVkjFFpFfL5AzhQ==,
@@ -5930,13 +6334,13 @@ packages:
marked: 4.3.0
minimatch: 9.0.5
shiki: 0.14.7
- typescript: 5.5.3
+ typescript: 5.8.2
dev: true
- /typescript@5.5.3:
+ /typescript@5.8.2:
resolution:
{
- integrity: sha512-/hreyEujaB0w76zKo6717l3L0o/qEUtRgdvUBvlkhoWeOVMjMuHNHk0BRBzikzuGDqNmPQbg5ifMEqsHLiIUcQ==,
+ integrity: sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==,
}
engines: { node: ">=14.17" }
hasBin: true
@@ -5958,6 +6362,13 @@ packages:
{
integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==,
}
+ dev: false
+
+ /undici-types@6.19.8:
+ resolution:
+ {
+ integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==,
+ }
/unique-filename@2.0.1:
resolution:
@@ -5979,28 +6390,37 @@ packages:
imurmurhash: 0.1.4
dev: true
- /update-browserslist-db@1.1.0(browserslist@4.23.1):
+ /universalify@0.2.0:
+ resolution:
+ {
+ integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==,
+ }
+ engines: { node: ">= 4.0.0" }
+ dev: true
+
+ /update-browserslist-db@1.1.3(browserslist@4.24.4):
resolution:
{
- integrity: sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==,
+ integrity: sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==,
}
hasBin: true
peerDependencies:
browserslist: ">= 4.21.0"
dependencies:
- browserslist: 4.23.1
- escalade: 3.1.2
+ browserslist: 4.24.4
+ escalade: 3.2.0
picocolors: 1.1.1
dev: true
- /uri-js@4.4.1:
+ /url-parse@1.5.10:
resolution:
{
- integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==,
+ integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==,
}
dependencies:
- punycode: 2.3.1
- dev: false
+ querystringify: 2.2.0
+ requires-port: 1.0.0
+ dev: true
/util-deprecate@1.0.2:
resolution:
@@ -6151,6 +6571,18 @@ packages:
}
dev: true
+ /wrap-ansi@6.2.0:
+ resolution:
+ {
+ integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ ansi-styles: 4.3.0
+ string-width: 4.2.3
+ strip-ansi: 6.0.1
+ dev: true
+
/wrap-ansi@7.0.0:
resolution:
{
@@ -6215,10 +6647,18 @@ packages:
}
dev: true
- /yaml@2.4.5:
+ /yallist@5.0.0:
+ resolution:
+ {
+ integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==,
+ }
+ engines: { node: ">=18" }
+ dev: true
+
+ /yaml@2.7.0:
resolution:
{
- integrity: sha512-aBx2bnqDzVOyNKfsysjA2ms5ZlnjSAW2eG3/L5G/CSujfjLJTJsEw1bGw8kCf04KodQWk1pxlGnZ56CRxiawmg==,
+ integrity: sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==,
}
engines: { node: ">= 14" }
hasBin: true
@@ -6240,7 +6680,7 @@ packages:
engines: { node: ">=12" }
dependencies:
cliui: 8.0.1
- escalade: 3.1.2
+ escalade: 3.2.0
get-caller-file: 2.0.5
require-directory: 2.1.1
string-width: 4.2.3
@@ -6256,20 +6696,28 @@ packages:
engines: { node: ">=10" }
dev: true
- /zod-to-json-schema@3.23.1(zod@3.23.8):
+ /yoctocolors-cjs@2.1.2:
+ resolution:
+ {
+ integrity: sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==,
+ }
+ engines: { node: ">=18" }
+ dev: true
+
+ /zod-to-json-schema@3.24.4(zod@3.24.2):
resolution:
{
- integrity: sha512-oT9INvydob1XV0v1d2IadrR74rLtDInLvDFfAa1CG0Pmg/vxATk7I2gSelfj271mbzeM4Da0uuDQE/Nkj3DWNw==,
+ integrity: sha512-0uNlcvgabyrni9Ag8Vghj21drk7+7tp7VTwwR7KxxXXc/3pbXz2PHlDgj3cICahgF1kHm4dExBFj7BXrZJXzig==,
}
peerDependencies:
- zod: ^3.23.3
+ zod: ^3.24.1
dependencies:
- zod: 3.23.8
+ zod: 3.24.2
dev: false
- /zod@3.23.8:
+ /zod@3.24.2:
resolution:
{
- integrity: sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==,
+ integrity: sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==,
}
dev: false
diff --git a/py/autoevals/__init__.py b/py/autoevals/__init__.py
index af69f5c..807988e 100644
--- a/py/autoevals/__init__.py
+++ b/py/autoevals/__init__.py
@@ -1,32 +1,123 @@
-"""
-AutoEvals is a tool to quickly and easily evaluate AI model outputs.
+"""Autoevals is a comprehensive toolkit for evaluating AI model outputs.
+
+This library provides a collection of specialized scorers for different types of evaluations:
+
+- `string`: Text similarity using edit distance or embeddings
+- `llm`: LLM-based evaluation for correctness, complexity, security, etc.
+- `moderation`: Content safety and policy compliance checks
+- `ragas`: Advanced NLP metrics for RAG system evaluation
+- `json`: JSON validation and structural comparison
+- `number`: Numeric similarity with relative scaling
+- `value`: Exact matching and basic comparisons
+
+**Key features**:
-### Quickstart
+- Both sync and async evaluation support
+- Configurable scoring parameters
+- Detailed feedback through metadata
+- Integration with OpenAI and other LLM providers through Braintrust AI Proxy
-```bash
-pip install autoevals
+**Client setup**:
+
+There are two ways to configure the OpenAI client:
+
+1. Global initialization (recommended):
+
+```python
+from autoevals import init
+from openai import AsyncOpenAI
+
+# Set up once at the start of your application
+client = AsyncOpenAI()
+init(client=client)
```
-### Example
+2. Per-evaluator initialization:
+
+```python
+from openai import AsyncOpenAI
+from autoevals.ragas import CloseQA
+
+# Pass client directly to evaluator
+client = AsyncOpenAI()
+evaluator = CloseQA(client=client)
+```
+
+**Multi-provider support via the Braintrust AI Proxy**:
+
+Autoevals supports multiple LLM providers (Anthropic, Azure, etc.) through the Braintrust AI Proxy.
+Configure your client to use the proxy:
```python
-from autoevals.llm import *
+import os
+from openai import AsyncOpenAI
+from autoevals.llm import Correctness
+
+# Configure client to use Braintrust AI Proxy
+client = AsyncOpenAI(
+ base_url="https://api.braintrustproxy.com/v1",
+ api_key=os.getenv("BRAINTRUST_API_KEY"),
+)
+
+# Use with any evaluator
+evaluator = Correctness(client=client)
+```
+
+**Braintrust integration**:
+
+Autoevals automatically integrates with Braintrust logging when you install the library. If needed, you can manually wrap the client:
+
+```python
+from openai import AsyncOpenAI
+from braintrust import wrap_openai
+from autoevals.ragas import CloseQA
+
+# Explicitly wrap the client if needed
+client = wrap_openai(AsyncOpenAI())
+evaluator = CloseQA(client=client)
+```
+
+**Example Autoevals usage**:
+
+```python
+from autoevals.ragas import CloseQA
+import asyncio
+
+async def evaluate_qa():
+ # Create evaluator for question answering
+ evaluator = CloseQA()
+
+ # Question and context
+ question = "What was the purpose of the Apollo missions?"
+ context = '''
+ The Apollo program was a NASA space program that ran from 1961 to 1972,
+ with the goal of landing humans on the Moon and bringing them safely back
+ to Earth. The program achieved its most famous success when Apollo 11
+ astronauts Neil Armstrong and Buzz Aldrin became the first humans to walk
+ on the Moon on July 20, 1969.
+ '''
-# Create a new LLM-based evaluator
-evaluator = Factuality()
+ # Two different answers to evaluate
+ answer = "The Apollo program's main goal was to land humans on the Moon and return them safely to Earth."
+ expected = "The Apollo missions were designed to achieve human lunar landing and safe return."
-# Evaluate an example LLM completion
-input = "Which country has the highest population?"
-output = "People's Republic of China"
-expected = "China"
+ # Evaluate the answer
+ result = await evaluator.eval_async(
+ question=question,
+ context=context,
+ output=answer,
+ expected=expected
+ )
-result = evaluator(output, expected, input=input)
+ print(f"Score: {result.score}") # Semantic similarity score (0-1)
+ print(f"Rationale: {result.metadata.rationale}") # Detailed explanation
+ print(f"Faithfulness: {result.metadata.faithfulness}") # Context alignment
-# The evaluator returns a score from [0,1] and includes the raw outputs from the evaluator
-print(f"Factuality score: {result.score}")
-print(f"Factuality metadata: {result.metadata['rationale']}")
+# Run async evaluation
+asyncio.run(evaluate_qa())
```
+See individual module documentation for detailed usage and options.
"""
from braintrust_core.score import Score, Scorer
diff --git a/py/autoevals/json.py b/py/autoevals/json.py
index acd198a..6e61b98 100644
--- a/py/autoevals/json.py
+++ b/py/autoevals/json.py
@@ -1,3 +1,18 @@
+"""JSON evaluation scorers for comparing and validating JSON data.
+
+This module provides scorers for working with JSON data:
+
+- JSONDiff: Compare JSON objects for structural and content similarity
+ - Handles nested structures, strings, numbers
+ - Customizable with different scorers for string and number comparisons
+ - Can automatically parse JSON strings
+
+- ValidJSON: Validate if a string is valid JSON and matches an optional schema
+ - Validates JSON syntax
+ - Optional JSON Schema validation
+ - Works with both strings and parsed objects
+"""
+
import json
from braintrust_core.score import Score, Scorer
@@ -10,9 +25,57 @@
class JSONDiff(ScorerWithPartial):
- """
- A simple scorer that compares JSON objects, using a customizable comparison method for strings
- (defaults to Levenshtein) and numbers (defaults to NumericDiff).
+ """Compare JSON objects for structural and content similarity.
+
+ This scorer recursively compares JSON objects, handling:
+ - Nested dictionaries and lists
+ - String similarity using Levenshtein distance
+ - Numeric value comparison
+ - Automatic parsing of JSON strings
+
+ Example:
+ ```python
+ import asyncio
+ from openai import AsyncOpenAI
+ from autoevals import JSONDiff
+ from autoevals.string import EmbeddingSimilarity
+
+ async def compare_json():
+ # Initialize with async client for string comparison
+ client = AsyncOpenAI()
+ string_scorer = EmbeddingSimilarity(client=client)
+
+ diff = JSONDiff(string_scorer=string_scorer)
+
+ result = await diff.eval_async(
+ output={
+ "name": "John Smith",
+ "age": 30,
+ "skills": ["python", "javascript"]
+ },
+ expected={
+ "name": "John A. Smith",
+ "age": 31,
+ "skills": ["python", "typescript"]
+ }
+ )
+
+ print(result.score) # Similarity score between 0-1
+ print(result.metadata) # Detailed comparison breakdown
+
+ # Run the async evaluation
+ asyncio.run(compare_json())
+ ```
+
+ Args:
+ string_scorer: Optional custom scorer for string comparisons (default: Levenshtein)
+ number_scorer: Optional custom scorer for number comparisons (default: NumericDiff)
+ preserve_strings: Don't attempt to parse strings as JSON (default: False)
+
+ Returns:
+ Score object with:
+ - score: Similarity score between 0-1
+ - metadata: Detailed comparison breakdown
"""
def __init__(self, string_scorer: Scorer = None, number_scorer: Scorer = None, preserve_strings: bool = False):
@@ -59,9 +122,59 @@ def json_diff(self, o1, o2):
class ValidJSON(ScorerWithPartial):
- """
- A binary scorer that evaluates the validity of JSON output, optionally validating against a
- JSON Schema definition (see https://json-schema.org/learn/getting-started-step-by-step#create).
+ """Validate if a string is valid JSON and optionally matches a schema.
+
+ This scorer checks if:
+ - The input can be parsed as valid JSON
+ - The parsed JSON matches an optional JSON Schema
+ - Handles both string inputs and pre-parsed JSON objects
+
+ Example:
+ ```python
+ import asyncio
+ from autoevals import ValidJSON
+
+ async def validate_json():
+ # Define a schema to validate against
+ schema = {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "age": {"type": "number"},
+ "skills": {
+ "type": "array",
+ "items": {"type": "string"}
+ }
+ },
+ "required": ["name", "age"]
+ }
+
+ validator = ValidJSON(schema=schema)
+
+ result = await validator.eval_async(
+ output='''
+ {
+ "name": "John Smith",
+ "age": 30,
+ "skills": ["python", "javascript"]
+ }
+ '''
+ )
+
+ print(result.score) # 1 if valid, 0 if invalid
+ print(result.metadata) # Validation details or error messages
+
+ # Run the async validation
+ asyncio.run(validate_json())
+ ```
+
+ Args:
+ schema: Optional JSON Schema to validate against
+
+ Returns:
+ Score object with:
+ - score: 1 if valid JSON (and matches schema if provided), 0 otherwise
+ - metadata: Validation details or error messages
"""
def __init__(self, schema=None):
diff --git a/py/autoevals/llm.py b/py/autoevals/llm.py
index 78f6387..f07272f 100644
--- a/py/autoevals/llm.py
+++ b/py/autoevals/llm.py
@@ -1,3 +1,49 @@
+"""LLM-based evaluation scorers for assessing model outputs.
+
+This module provides a collection of pre-built LLM scorers for common evaluation tasks.
+
+All evaluators accept the following common arguments:
+- model: Model to use (defaults to gpt-4)
+- temperature: Controls randomness (0-1, defaults to 0)
+- client: OpenAI client (defaults to global client from init())
+
+Example:
+```python
+from openai import OpenAI
+from autoevals import Battle, Factuality, ClosedQA, init
+
+# Initialize with your OpenAI client (or pass client= to individual scorers)
+init(OpenAI())
+
+# Compare solutions
+battle = Battle()
+result = battle.eval(
+ instructions="Write a function to sort a list",
+ output="def quicksort(arr): ...",
+ expected="def bubblesort(arr): ..."
+)
+print(result.score) # 1 if better, 0 if worse
+print(result.metadata["rationale"]) # Explanation of comparison
+
+# Check factual accuracy
+factual = Factuality()
+result = factual.eval(
+ output="Paris is the largest city in France",
+ expected="Paris is the capital and largest city in France"
+)
+print(result.score) # 1 if accurate, 0 if inaccurate
+
+# Evaluate answer correctness
+qa = ClosedQA()
+result = qa.eval(
+ input="What is the capital of France?",
+ output="Paris",
+ criteria="Must be exact city name"
+)
+print(result.score) # 1 if correct, 0 if incorrect
+```
+"""
+
import json
import os
import re
@@ -11,7 +57,7 @@
from autoevals.partial import ScorerWithPartial
-from .oai import LLMClient, arun_cached_request, run_cached_request
+from .oai import Client, arun_cached_request, run_cached_request
# Disable HTML escaping in chevron.
chevron.renderer._html_escape = lambda x: x # type: ignore[attr-defined]
@@ -80,10 +126,10 @@ def build_classification_tools(useCoT, choice_strings):
class OpenAIScorer(ScorerWithPartial):
def __init__(
self,
- api_key=None,
- base_url=None,
- client: Optional[LLMClient] = None,
- ):
+ api_key: Optional[str] = None,
+ base_url: Optional[str] = None,
+ client: Optional[Client] = None,
+ ) -> None:
self.extra_args = {}
if api_key:
self.extra_args["api_key"] = api_key
@@ -96,11 +142,11 @@ def __init__(
class OpenAILLMScorer(OpenAIScorer):
def __init__(
self,
- temperature=None,
- api_key=None,
- base_url=None,
- client: Optional[LLMClient] = None,
- ):
+ temperature: Optional[float] = None,
+ api_key: Optional[str] = None,
+ base_url: Optional[str] = None,
+ client: Optional[Client] = None,
+ ) -> None:
super().__init__(
api_key=api_key,
base_url=base_url,
@@ -123,7 +169,7 @@ def __init__(
engine=None,
api_key=None,
base_url=None,
- client: Optional[LLMClient] = None,
+ client: Optional[Client] = None,
):
super().__init__(
client=client,
@@ -226,9 +272,49 @@ class ModelGradedSpec:
class LLMClassifier(OpenAILLMClassifier):
+ """High-level classifier for evaluating text using LLMs.
+
+ This is the main class for building custom classifiers. It provides:
+ - Chain of thought reasoning for better accuracy
+ - Standardized output parsing
+ - Template-based prompts
+ - YAML configuration support
+ - Flexible scoring rules
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.llm import LLMClassifier
+
+ # Create a classifier for toxicity evaluation
+ classifier = LLMClassifier(
+ name="toxicity", # Name for tracking
+ prompt_template="Rate if this text is toxic: {{output}}", # Template with variables
+ choice_scores={"toxic": 0, "not_toxic": 1}, # Mapping choices to scores
+ client=OpenAI() # Optional: could use init() to set a global client instead
+ )
+
+ # Evaluate some text
+ result = classifier.eval(output="some text to evaluate")
+ print(result.score) # Score between 0-1 based on choice_scores
+ print(result.metadata) # Additional evaluation details
+ ```
+
+ Args:
+ name: Classifier name for tracking
+ prompt_template: Template for generating prompts (supports `{{output}}`, `{{expected}}`, etc.)
+ choice_scores: Mapping of choices to scores (e.g. `{"good": 1, "bad": 0}`)
+ model: Model to use. Defaults to DEFAULT_MODEL.
+ use_cot: Enable chain of thought reasoning. Defaults to True.
+ max_tokens: Maximum tokens to generate. Defaults to 512.
+ temperature: Controls randomness (0-1). Defaults to 0.
+ engine: Deprecated by OpenAI. Use model instead.
+ api_key: Deprecated. Use client instead.
+ base_url: Deprecated. Use client instead.
+ client: OpenAI client. If not provided, uses global client from init().
+ **extra_render_args: Additional template variables
"""
- An LLM-based classifier that wraps `OpenAILLMClassifier` and provides a standard way to
- apply chain of thought, parse the output, and score the result."""
_SPEC_FILE_CONTENTS: Dict[str, str] = defaultdict(str)
@@ -244,7 +330,7 @@ def __init__(
engine=None,
api_key=None,
base_url=None,
- client: Optional[LLMClient] = None,
+ client: Optional[Client] = None,
**extra_render_args,
):
choice_strings = list(choice_scores.keys())
@@ -273,11 +359,11 @@ def __init__(
)
@classmethod
- def from_spec(cls, name: str, spec: ModelGradedSpec, client: Optional[LLMClient] = None, **kwargs):
+ def from_spec(cls, name: str, spec: ModelGradedSpec, client: Optional[Client] = None, **kwargs):
return cls(name, spec.prompt, spec.choice_scores, client=client, **kwargs)
@classmethod
- def from_spec_file(cls, name: str, path: str, client: Optional[LLMClient] = None, **kwargs):
+ def from_spec_file(cls, name: str, path: str, client: Optional[Client] = None, **kwargs):
if cls._SPEC_FILE_CONTENTS[name] == "":
with open(path) as f:
cls._SPEC_FILE_CONTENTS[name] = f.read()
@@ -295,7 +381,7 @@ def __new__(
temperature=None,
api_key=None,
base_url=None,
- client: Optional[LLMClient] = None,
+ client: Optional[Client] = None,
):
kwargs = {}
if model is not None:
@@ -327,66 +413,416 @@ def __new__(
class Battle(SpecFileClassifier):
+ """Compare if a solution performs better than a reference solution.
+
+ This evaluator uses LLM-based comparison to determine if a generated solution is better
+ than a reference solution, considering factors like:
+ - Code quality and readability
+ - Algorithm efficiency and complexity
+ - Implementation completeness
+ - Best practices and patterns
+ - Error handling and edge cases
+
+ Example:
+ ```python
+ import asyncio
+ from openai import AsyncOpenAI
+ from autoevals import Battle
+
+ async def evaluate_solutions():
+ # Initialize with async client
+ client = AsyncOpenAI()
+ battle = Battle(client=client)
+
+ result = await battle.eval_async(
+ instructions="Write a function to sort a list of integers in ascending order",
+ output='''
+ def quicksort(arr):
+ if len(arr) <= 1:
+ return arr
+ pivot = arr[len(arr) // 2]
+ left = [x for x in arr if x < pivot]
+ middle = [x for x in arr if x == pivot]
+ right = [x for x in arr if x > pivot]
+ return quicksort(left) + middle + quicksort(right)
+ ''',
+ expected='''
+ def bubblesort(arr):
+ n = len(arr)
+ for i in range(n):
+ for j in range(0, n - i - 1):
+ if arr[j] > arr[j + 1]:
+ arr[j], arr[j + 1] = arr[j + 1], arr[j]
+ return arr
+ '''
+ )
+
+ print(result.score) # 1 if output is better, 0 if worse
+ print(result.metadata["rationale"]) # Detailed comparison explanation
+ print(result.metadata["choice"]) # Selected choice (better/worse)
+
+ # Run the async evaluation
+ asyncio.run(evaluate_solutions())
+ ```
+
+ Args:
+ instructions: Problem description or task requirements that both solutions should address
+ output: Solution to evaluate (code, text, or other content)
+ expected: Reference solution to compare against
+
+ Returns:
+ Score object with:
+ - score: 1 if output solution is better, 0 if worse
+ - metadata.rationale: Detailed explanation of the comparison
+ - metadata.choice: Selected choice (better/worse)
"""
- Test whether an output _better_ performs the `instructions` than the original
- (`expected`) value."""
pass
class ClosedQA(SpecFileClassifier):
+ """Evaluate answer correctness using the model's knowledge.
+
+ Example:
+ ```python
+ from autoevals import ClosedQA, init
+ from openai import OpenAI
+
+ init(OpenAI())
+
+ qa = ClosedQA()
+ result = qa.eval(
+ input="What is the capital of France?",
+ output="Paris",
+ criteria="Must be exact city name"
+ )
+ print(result.score) # 1 if correct, 0 if incorrect
+ ```
+
+ Args:
+ input: Question to evaluate
+ output: Answer to assess
+ criteria: Optional evaluation criteria
"""
- Test whether an output answers the `input` using knowledge built into the model. You
- can specify `criteria` to further constrain the answer."""
pass
class Humor(SpecFileClassifier):
+ """Rate the humor level in text.
+
+ Example:
+ ```python
+ from autoevals import Humor, init
+ from openai import OpenAI
+
+ init(OpenAI())
+
+ humor = Humor()
+ result = humor.eval(
+ output="Why did the developer quit? They didn't get arrays!"
+ )
+ print(result.score) # 1 if funny, 0 if not
+ print(result.metadata["rationale"]) # Explanation
+ ```
+
+ Args:
+ output: Text to evaluate for humor
"""
- Test whether an output is funny."""
pass
class Factuality(SpecFileClassifier):
+ """Check factual accuracy against a reference.
+
+ Example:
+ ```python
+ from autoevals import Factuality, init
+ from openai import OpenAI
+
+ init(OpenAI())
+
+ factual = Factuality()
+ result = factual.eval(
+ output="Paris is the largest city in France",
+ expected="Paris is the capital and largest city in France"
+ )
+ print(result.score) # 1 if accurate, 0 if inaccurate
+ ```
+
+ Args:
+ output: Text to check
+ expected: Reference text with correct facts
"""
- Test whether an output is factual, compared to an original (`expected`) value."""
pass
class Possible(SpecFileClassifier):
+ """Evaluate if a solution is feasible and practical.
+
+ Example:
+ ```python
+ from autoevals import Possible, init
+ from openai import OpenAI
+
+ init(OpenAI())
+
+ possible = Possible()
+ result = possible.eval(
+ input="Design a system to handle 1M users",
+ output="We'll use a distributed architecture..."
+ )
+ print(result.score) # 1 if feasible, 0 if not
+ ```
+
+ Args:
+ input: Problem description
+ output: Proposed solution
"""
- Test whether an output is a possible solution to the challenge posed in the input."""
pass
class Security(SpecFileClassifier):
+ """Evaluate if a solution has security vulnerabilities.
+
+ This evaluator uses LLM-based analysis to identify potential security issues
+ in code or system designs, checking for common vulnerabilities like:
+ - Injection attacks (SQL, command, etc.)
+ - Authentication/authorization flaws
+ - Data exposure risks
+ - Input validation issues
+ - Unsafe dependencies
+ - Insecure configurations
+ - Common OWASP vulnerabilities
+
+ Example:
+ ```python
+ import asyncio
+ from openai import AsyncOpenAI
+ from autoevals import Security
+
+ async def evaluate_security():
+ # Initialize with async client
+ client = AsyncOpenAI()
+ security = Security(client=client)
+
+ result = await security.eval_async(
+ instructions="Write a function to execute a SQL query with user input",
+ output='''
+ def execute_query(user_input):
+ query = f"SELECT * FROM users WHERE name = '{user_input}'"
+ cursor.execute(query)
+ return cursor.fetchall()
+ '''
+ )
+
+ print(result.score) # 0 if vulnerable, 1 if secure
+ print(result.metadata["rationale"]) # Detailed security analysis
+ print(result.metadata["choice"]) # Selected choice (secure/vulnerable)
+
+ # Run the async evaluation
+ asyncio.run(evaluate_security())
+ ```
+
+ Args:
+ instructions: Context or requirements for the security evaluation
+ output: Code or system design to evaluate for security issues
+
+ Returns:
+ Score object with:
+ - score: 1 if secure, 0 if vulnerable
+ - metadata.rationale: Detailed security analysis
+ - metadata.choice: Selected choice (secure/vulnerable)
+ - metadata.vulnerabilities: List of identified security issues
"""
- Test whether an output is malicious."""
pass
class Sql(SpecFileClassifier):
+ """Compare if two SQL queries are equivalent.
+
+ Example:
+ ```python
+ from autoevals import Sql, init
+ from openai import OpenAI
+
+ init(OpenAI())
+
+ sql = Sql()
+ result = sql.eval(
+ output="SELECT * FROM users WHERE age >= 18",
+ expected="SELECT * FROM users WHERE age > 17"
+ )
+ print(result.score) # 1 if equivalent, 0 if different
+ ```
+
+ Args:
+ output: SQL query to check
+ expected: Reference SQL query
"""
- Test whether a SQL query is semantically the same as a reference (output) query."""
pass
class Summary(SpecFileClassifier):
+ """Evaluate text summarization quality.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import Summary, init
+
+ init(OpenAI())
+
+ summary = Summary()
+ result = summary.eval(
+ input="Long article text...",
+ output="Brief summary...",
+ expected="Reference summary..."
+ )
+ print(result.score) # Higher is better
+ ```
+
+ Args:
+ input: Original text
+ output: Generated summary
+ expected: Reference summary
"""
- Test whether an output is a better summary of the `input` than the original (`expected`) value."""
pass
class Translation(SpecFileClassifier):
+ """Evaluate translation quality.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import Translation
+
+ translation = Translation(client=OpenAI())
+ result = translation.eval(
+ input="Hello world!",
+ output="¡Hola mundo!",
+ expected="¡Hola mundo!",
+ language="Spanish"
+ )
+
+ print(result.score) # Higher is better
+ ```
+
+ Args:
+ input: Source text
+ output: Translation to evaluate
+ expected: Reference translation
+ language: Target language
+ """
+
+ pass
+
+
+class Correctness(SpecFileClassifier):
+ """Evaluate if a solution correctly solves a given problem.
+
+ This evaluator uses LLM-based analysis to determine if a solution correctly
+ addresses the given problem requirements, considering aspects like:
+ - Functional correctness
+ - Edge case handling
+ - Input validation
+ - Output format compliance
+ - Implementation completeness
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import Correctness
+
+ correctness = Correctness(client=OpenAI())
+ result = correctness.eval(
+ instructions='''
+ Write a function that takes a list of integers and returns their sum.
+ The function should handle empty lists by returning 0.
+ ''',
+ output='''
+ def sum_list(numbers):
+ if not numbers:
+ return 0
+ return sum(numbers)
+ '''
+ )
+
+ print(result.score) # 1 if correct, 0 if incorrect
+ print(result.metadata["rationale"]) # Detailed explanation
+ print(result.metadata["choice"]) # Selected choice (correct/incorrect)
+ ```
+
+ Args:
+ instructions: Problem description or task requirements to evaluate against
+ output: Solution to evaluate (code, text, or other content)
+
+ Returns:
+ Score object with:
+ - score: 1 if solution is correct, 0 if incorrect
+ - metadata.rationale: Detailed explanation of the evaluation
+ - metadata.choice: Selected choice (correct/incorrect)
+ """
+
+ pass
+
+
+class Complexity(SpecFileClassifier):
+ """Evaluate the complexity and efficiency of a solution.
+
+ This evaluator uses LLM-based analysis to assess various aspects of solution complexity:
+ - Time complexity (Big O notation)
+ - Space complexity
+ - Code readability and maintainability
+ - Implementation efficiency
+ - Resource utilization
+ - Algorithmic optimizations
+ - Design patterns and best practices
+
+ Example:
+ ```python
+ from autoevals import Complexity
+
+ complexity = Complexity(client=OpenAI())
+ result = complexity.eval(
+ instructions="Implement a function to find duplicates in a list",
+ output='''
+ def find_duplicates(arr):
+ seen = set()
+ duplicates = set()
+ for x in arr:
+ if x in seen:
+ duplicates.add(x)
+ seen.add(x)
+ return list(duplicates)
+ '''
+ )
+
+ print(result.score) # 1 if efficient, 0 if inefficient
+ print(result.metadata["rationale"]) # Detailed complexity analysis
+ print(result.metadata["choice"]) # Selected choice (efficient/inefficient)
+ print(result.metadata["time_complexity"]) # Estimated Big O notation
+ print(result.metadata["space_complexity"]) # Space usage analysis
+ ```
+
+ Args:
+ instructions: Problem description or requirements to evaluate against
+ output: Solution to analyze for complexity (code, algorithm, system design)
+
+ Returns:
+ Score object with:
+ - score: 1 if efficient, 0 if inefficient
+ - metadata.rationale: Detailed complexity analysis
+ - metadata.choice: Selected choice (efficient/inefficient)
+ - metadata.time_complexity: Time complexity analysis
+ - metadata.space_complexity: Space complexity analysis
"""
- Test whether an `output` is as good of a translation of the `input` in the specified `language`
- as an expert (`expected`) value.."""
pass
diff --git a/py/autoevals/moderation.py b/py/autoevals/moderation.py
index 4b4e542..51e8864 100644
--- a/py/autoevals/moderation.py
+++ b/py/autoevals/moderation.py
@@ -4,14 +4,43 @@
from autoevals.llm import OpenAIScorer
-from .oai import LLMClient, arun_cached_request, run_cached_request
+from .oai import Client, arun_cached_request, run_cached_request
REQUEST_TYPE = "moderation"
class Moderation(OpenAIScorer):
- """
- A scorer that uses OpenAI's moderation API to determine if AI response contains ANY flagged content.
+ """A scorer that evaluates if AI responses contain inappropriate or unsafe content.
+
+ This scorer uses OpenAI's moderation API to check for potentially harmful content in text.
+ It returns a score of 0 if content is flagged as inappropriate, and 1 if it passes moderation.
+
+ Args:
+ threshold: Optional float to set a custom threshold for content flagging.
+ If None, uses OpenAI's default flagging logic.
+ If set, content is flagged if any category score exceeds this threshold.
+ client: Optional client for making API calls. Can be:
+ - None: Uses global client from init()
+ - OpenAI v1 client: Used directly
+ - OpenAI v0 module: Wrapped in a client adapter
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.moderation import Moderation
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ # Create evaluator with default settings
+ moderator = Moderation()
+ result = moderator.eval(
+ output="This is the text to check for inappropriate content"
+ )
+ print(result.score) # 1 if content is appropriate, 0 if flagged
+ print(result.metadata) # Detailed category scores and threshold used
+ ```
"""
threshold = None
@@ -22,15 +51,29 @@ def __init__(
threshold=None,
api_key=None,
base_url=None,
- client: Optional[LLMClient] = None,
+ client: Optional[Client] = None,
):
- """
- Create a new Moderation scorer.
-
- :param threshold: Optional. Threshold to use to determine whether content has exceeded threshold. By
- default, it uses OpenAI's default. (Using `flagged` from the response payload.)
- :param api_key: OpenAI key
- :param base_url: Base URL to be used to reach OpenAI moderation endpoint.
+ """Initialize a Moderation scorer.
+
+ Args:
+ threshold: Optional float to set a custom threshold for content flagging.
+ If None, uses OpenAI's default flagging logic.
+ If set, content is flagged if any category score exceeds this threshold.
+ client: Optional client for making API calls. Can be:
+ - None: Uses global client from init()
+ - OpenAI v1 client: Used directly
+ - OpenAI v0 module: Wrapped in a client adapter
+ api_key: Deprecated. Use client instead.
+ base_url: Deprecated. Use client instead.
+
+ Note:
+ The api_key and base_url parameters are deprecated and will be removed in a future version.
+ Instead, you can either:
+ 1. Pass a client instance directly to this constructor using the client parameter
+ 2. Set a global client using autoevals.init(client=your_client)
+
+ The global client can be configured once and will be used by all evaluators that don't have
+ a specific client passed to them.
"""
super().__init__(api_key=api_key, base_url=base_url, client=client)
self.threshold = threshold
@@ -71,5 +114,3 @@ def compute_score(moderation_result, threshold):
__all__ = ["Moderation"]
-__all__ = ["Moderation"]
-__all__ = ["Moderation"]
diff --git a/py/autoevals/number.py b/py/autoevals/number.py
index 73ff5cf..610b627 100644
--- a/py/autoevals/number.py
+++ b/py/autoevals/number.py
@@ -1,11 +1,43 @@
+"""Numeric evaluation scorers for comparing numerical values.
+
+This module provides scorers for working with numbers:
+- NumericDiff: Compare numbers using normalized difference, providing a similarity score
+ that accounts for both absolute and relative differences between values.
+
+Features:
+- Normalized scoring between 0 and 1
+- Handles special cases like comparing zeros
+- Accounts for magnitude when computing differences
+- Suitable for both small and large number comparisons
+"""
+
from braintrust_core.score import Score
from autoevals.partial import ScorerWithPartial
class NumericDiff(ScorerWithPartial):
- """
- A simple scorer that compares numbers by normalizing their difference.
+ """Numeric similarity scorer using normalized difference.
+
+ Example:
+ ```python
+ scorer = NumericDiff()
+ result = scorer.eval(
+ output=105,
+ expected=100
+ )
+ print(result.score) # 0.95 (normalized similarity)
+ ```
+
+ Args:
+ output: Number to evaluate
+ expected: Reference number to compare against
+
+ Returns:
+ Score object with normalized similarity (0-1), where:
+ - 1 means identical numbers
+ - Score decreases as difference increases relative to magnitude
+ - Special case: score=1 when both numbers are 0
"""
def _run_eval_sync(self, output, expected=None, **kwargs):
diff --git a/py/autoevals/oai.py b/py/autoevals/oai.py
index 72e9c0d..1566663 100644
--- a/py/autoevals/oai.py
+++ b/py/autoevals/oai.py
@@ -3,13 +3,111 @@
import sys
import textwrap
import time
+import warnings
from contextvars import ContextVar
from dataclasses import dataclass
-from typing import Any, Optional
+from typing import Any, Callable, Dict, Optional, Protocol, Tuple, Type, TypeVar, Union, cast, runtime_checkable
PROXY_URL = "https://api.braintrust.dev/v1/proxy"
+@runtime_checkable
+class ChatCompletions(Protocol):
+ create: Callable[..., Any]
+
+
+@runtime_checkable
+class Chat(Protocol):
+ @property
+ def completions(self) -> ChatCompletions:
+ ...
+
+
+@runtime_checkable
+class Embeddings(Protocol):
+ create: Callable[..., Any]
+
+
+@runtime_checkable
+class Moderations(Protocol):
+ create: Callable[..., Any]
+
+
+@runtime_checkable
+class OpenAIV1Module(Protocol):
+ class OpenAI(Protocol):
+ # Core API resources
+ @property
+ def chat(self) -> Chat:
+ ...
+
+ @property
+ def embeddings(self) -> Embeddings:
+ ...
+
+ @property
+ def moderations(self) -> Moderations:
+ ...
+
+ # Configuration
+ @property
+ def api_key(self) -> str:
+ ...
+
+ @property
+ def organization(self) -> Optional[str]:
+ ...
+
+ @property
+ def base_url(self) -> Union[str, Any, None]:
+ ...
+
+ class AsyncOpenAI(OpenAI):
+ ...
+
+ class RateLimitError(Exception):
+ ...
+
+
+# TODO: we're removing v0 support in the next release
+@runtime_checkable
+class OpenAIV0Module(Protocol):
+ class ChatCompletion(Protocol):
+ acreate: Callable[..., Any]
+ create: Callable[..., Any]
+
+ class Embedding(Protocol):
+ acreate: Callable[..., Any]
+ create: Callable[..., Any]
+
+ class Moderation(Protocol):
+ acreate: Callable[..., Any]
+ create: Callable[..., Any]
+
+ api_key: Optional[str]
+ api_base: Optional[str]
+ base_url: Optional[str]
+
+ class error(Protocol):
+ class RateLimitError(Exception):
+ ...
+
+
+_openai_module: Optional[Union[OpenAIV1Module, OpenAIV0Module]] = None
+
+
+def get_openai_module() -> Union[OpenAIV1Module, OpenAIV0Module]:
+ global _openai_module
+
+ if _openai_module is not None:
+ return _openai_module
+
+ import openai # type: ignore
+
+ _openai_module = cast(Union[OpenAIV1Module, OpenAIV0Module], openai)
+ return _openai_module
+
+
@dataclass
class LLMClient:
"""A client wrapper for LLM operations that supports both OpenAI SDK v0 and v1.
@@ -31,19 +129,25 @@ class LLMClient:
RateLimitError: The rate limit exception class for the SDK version.
- For v0: openai.error.RateLimitError
- For v1: openai.RateLimitError
+ is_async: Whether the client is async (only used for v0 autoconfiguration).
Note:
If using async OpenAI methods you must use the async methods in Autoevals.
+ The client will automatically configure itself if methods are not provided.
Example:
```python
# Using with OpenAI v1
import openai
- client = LLMClient(
- openai=openai,
- complete=openai.chat.completions.create,
- embed=openai.embeddings.create,
- moderation=openai.moderations.create,
+ client = openai.OpenAI() # Configure with your settings
+ llm = LLMClient(openai=client) # Methods will be auto-configured
+
+ # Or with explicit method configuration
+ llm = LLMClient(
+ openai=client,
+ complete=client.chat.completions.create,
+ embed=client.embeddings.create,
+ moderation=client.moderations.create,
RateLimitError=openai.RateLimitError
)
@@ -52,39 +156,125 @@ class LLMClient:
class CustomLLMClient(LLMClient):
def complete(self, **kwargs):
# make adjustments as needed
- return openai.chat.completions.create(**kwargs)
+ return self.openai.chat.completions.create(**kwargs)
```
-
- Note:
- This class is typically instantiated via the `prepare_openai()` function, which handles
- the SDK version detection and proper function assignment automatically.
"""
- openai: Any
- complete: Any
- embed: Any
- moderation: Any
- RateLimitError: Exception
+ openai: Union[OpenAIV0Module, OpenAIV1Module.OpenAI]
+ complete: Callable[..., Any] = None # type: ignore # Set in __post_init__
+ embed: Callable[..., Any] = None # type: ignore # Set in __post_init__
+ moderation: Callable[..., Any] = None # type: ignore # Set in __post_init__
+ RateLimitError: Type[Exception] = None # type: ignore # Set in __post_init__
+ is_async: bool = False
+ _is_wrapped: bool = False
+
+ def __post_init__(self):
+ NamedWrapper, wrap_openai = get_openai_wrappers()
+
+ has_customization = self.complete is not None or self.embed is not None or self.moderation is not None # type: ignore # Pyright doesn't understand our design choice
+
+ # avoid wrapping if we have custom methods (the user may intend not to wrap)
+ if not has_customization and not isinstance(self.openai, NamedWrapper):
+ self.openai = wrap_openai(self.openai)
+
+ self._is_wrapped = isinstance(self.openai, NamedWrapper)
+
+ openai_module = get_openai_module()
+
+ if hasattr(openai_module, "OpenAI"):
+ openai_module = cast(OpenAIV1Module, openai_module)
+ self.openai = cast(OpenAIV1Module.OpenAI, self.openai)
+
+ # v1
+ self.complete = self.openai.chat.completions.create
+ self.embed = self.openai.embeddings.create
+ self.moderation = self.openai.moderations.create
+ self.RateLimitError = openai_module.RateLimitError
+ else:
+ openai_module = cast(OpenAIV0Module, openai_module)
+ self.openai = cast(OpenAIV0Module, self.openai)
+
+ # v0
+ self.complete = self.openai.ChatCompletion.acreate if self.is_async else self.openai.ChatCompletion.create
+ self.embed = self.openai.Embedding.acreate if self.is_async else self.openai.Embedding.create
+ self.moderation = self.openai.Moderation.acreate if self.is_async else self.openai.Moderation.create
+ self.RateLimitError = openai_module.error.RateLimitError
+
+ @property
+ def is_wrapped(self) -> bool:
+ return self._is_wrapped
_client_var = ContextVar[Optional[LLMClient]]("client")
+T = TypeVar("T")
+
+_named_wrapper: Optional[Type[Any]] = None
+_wrap_openai: Optional[Callable[[Any], Any]] = None
+
+
+def get_openai_wrappers() -> Tuple[Type[Any], Callable[[Any], Any]]:
+ global _named_wrapper, _wrap_openai
+
+ if _named_wrapper is not None and _wrap_openai is not None:
+ return _named_wrapper, _wrap_openai
+
+ try:
+ from braintrust.oai import NamedWrapper as BraintrustNamedWrapper # type: ignore
+ from braintrust.oai import wrap_openai # type: ignore
+
+ _named_wrapper = cast(Type[Any], BraintrustNamedWrapper)
+ except ImportError:
+
+ class NamedWrapper:
+ pass
+
+ def wrap_openai(openai: T) -> T:
+ return openai
+
+ _named_wrapper = NamedWrapper
+
+ _wrap_openai = cast(Callable[[Any], Any], wrap_openai)
+ return _named_wrapper, _wrap_openai
+
-def init(*, client: Optional[LLMClient] = None):
+Client = Union[LLMClient, OpenAIV0Module, OpenAIV1Module.OpenAI]
+
+
+def resolve_client(client: Client, is_async: bool = False) -> LLMClient:
+ if isinstance(client, LLMClient):
+ return client
+ return LLMClient(openai=client, is_async=is_async)
+
+
+def init(client: Optional[Client] = None, is_async: bool = False):
"""Initialize Autoevals with an optional custom LLM client.
This function sets up the global client context for Autoevals to use. If no client is provided,
the default OpenAI client will be used.
Args:
- client (Optional[LLMClient]): A custom LLM client instance that implements the LLMClient interface.
- If None, the default OpenAI client will be used.\
+ client: The client to use for LLM operations. Can be one of:
+ - None: Resets the global client
+ - LLMClient: Used directly as provided
+ - OpenAIV0Module: Wrapped in a new LLMClient instance (OpenAI SDK v0)
+ - OpenAIV1: Wrapped in a new LLMClient instance (OpenAI SDK v1)
+ is_async: Whether to create a client with async operations. Defaults to False.
+ Deprecated: Use the `client` argument directly with your desired async/sync configuration.
"""
- _client_var.set(client)
+ _client_var.set(resolve_client(client, is_async=is_async) if client else None)
+
+warned_deprecated_api_key_base_url = False
-def prepare_openai(client: Optional[LLMClient] = None, is_async=False, api_key=None, base_url=None):
- """Prepares and configures an OpenAI client for use with AutoEval, if client is not provided.
+
+def prepare_openai(
+ client: Optional[Client] = None,
+ is_async: bool = False,
+ api_key: Optional[str] = None,
+ base_url: Optional[str] = None,
+):
+ """Prepares and configures an OpenAI client for use with AutoEval.
This function handles both v0 and v1 of the OpenAI SDK, configuring the client
with the appropriate authentication and base URL settings.
@@ -100,117 +290,75 @@ def prepare_openai(client: Optional[LLMClient] = None, is_async=False, api_key=N
api_key (str, optional): OpenAI API key. If not provided, will look for
OPENAI_API_KEY or BRAINTRUST_API_KEY in environment variables.
-
Deprecated: Use the `client` argument and set the `openai`.
base_url (str, optional): Base URL for API requests. If not provided, will
use OPENAI_BASE_URL from environment or fall back to PROXY_URL.
-
Deprecated: Use the `client` argument and set the `openai`.
Returns:
- Tuple[LLMClient, bool]: A tuple containing:
- - The configured LLMClient instance, or the client you've provided
- - A boolean indicating whether the client was wrapped with Braintrust tracing
+ LLMClient: The configured LLMClient instance, or the client you've provided
Raises:
ImportError: If the OpenAI package is not installed
"""
client = client or _client_var.get(None)
+ if client is not None:
+ return resolve_client(client, is_async=is_async)
- openai = getattr(client, "openai", None)
- if not openai:
- try:
- import openai
- except Exception as e:
- print(
- textwrap.dedent(
- f"""\
- Unable to import openai: {e}
-
- Please install it, e.g. with
-
- pip install 'openai'
- """
- ),
- file=sys.stderr,
- )
- raise
-
- openai_obj = openai
-
- is_v1 = False
-
- if hasattr(openai, "OpenAI"):
- # This is the new v1 API
- is_v1 = True
-
- if client is None:
- # prepare the default openai sdk, if not provided
- if api_key is None:
- api_key = os.environ.get("OPENAI_API_KEY") or os.environ.get("BRAINTRUST_API_KEY")
- if base_url is None:
- base_url = os.environ.get("OPENAI_BASE_URL", PROXY_URL)
-
- if is_v1:
- if is_async:
- openai_obj = openai.AsyncOpenAI(api_key=api_key, base_url=base_url)
- else:
- openai_obj = openai.OpenAI(api_key=api_key, base_url=base_url)
- else:
- if api_key:
- openai.api_key = api_key
- openai.api_base = base_url
-
- # optimistically wrap openai instance for tracing
- wrapped = False
try:
- from braintrust.oai import NamedWrapper, wrap_openai
+ openai_module = get_openai_module()
+ except Exception as e:
+ print(
+ textwrap.dedent(
+ f"""\
+ Unable to import openai: {e}
+
+ Please install it, e.g. with
+
+ pip install 'openai'
+ """
+ ),
+ file=sys.stderr,
+ )
+ raise
+
+ global warned_deprecated_api_key_base_url
+ if not warned_deprecated_api_key_base_url and (api_key is not None or base_url is not None):
+ warnings.warn(
+ "The api_key and base_url parameters are deprecated. Please use init() or call with client instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ warned_deprecated_api_key_base_url = True
- if not isinstance(openai_obj, NamedWrapper):
- openai_obj = wrap_openai(openai_obj)
+ # prepare the default openai sdk, if not provided
+ if api_key is None:
+ api_key = os.environ.get("OPENAI_API_KEY") or os.environ.get("BRAINTRUST_API_KEY")
+ if base_url is None:
+ base_url = os.environ.get("OPENAI_BASE_URL", PROXY_URL)
- wrapped = True
- except ImportError:
- pass
-
- if client is None:
- # prepare the default client if not provided
- complete_fn = None
- rate_limit_error = None
-
- Client = LLMClient
-
- if is_v1:
- client = Client(
- openai=openai,
- complete=openai_obj.chat.completions.create,
- embed=openai_obj.embeddings.create,
- moderation=openai_obj.moderations.create,
- RateLimitError=openai.RateLimitError,
- )
+ if hasattr(openai_module, "OpenAI"):
+ openai_module = cast(OpenAIV1Module, openai_module)
+
+ # v1 API
+ if is_async:
+ openai_obj = openai_module.AsyncOpenAI(api_key=api_key, base_url=base_url) # type: ignore
else:
- rate_limit_error = openai.error.RateLimitError
- if is_async:
- complete_fn = openai_obj.ChatCompletion.acreate
- embedding_fn = openai_obj.Embedding.acreate
- moderation_fn = openai_obj.Moderation.acreate
- else:
- complete_fn = openai_obj.ChatCompletion.create
- embedding_fn = openai_obj.Embedding.create
- moderation_fn = openai_obj.Moderation.create
- client = Client(
- openai=openai,
- complete=complete_fn,
- embed=embedding_fn,
- moderation=moderation_fn,
- RateLimitError=rate_limit_error,
- )
-
- return client, wrapped
-
-
-def post_process_response(resp):
+ openai_obj = openai_module.OpenAI(api_key=api_key, base_url=base_url) # type: ignore
+ else:
+ openai_module = cast(OpenAIV0Module, openai_module)
+
+ # v0 API
+ if api_key:
+ openai_module.api_key = api_key
+ openai_module.api_base = base_url
+ openai_obj = openai_module
+
+ return LLMClient(openai=openai_obj, is_async=is_async)
+
+
+def post_process_response(resp: Any) -> Dict[str, Any]:
# This normalizes against craziness in OpenAI v0 vs. v1
if hasattr(resp, "to_dict"):
# v0
@@ -220,19 +368,25 @@ def post_process_response(resp):
return resp.dict()
-def set_span_purpose(kwargs):
+def set_span_purpose(kwargs: Dict[str, Any]) -> None:
kwargs.setdefault("span_info", {}).setdefault("span_attributes", {})["purpose"] = "scorer"
def run_cached_request(
- *, client: Optional[LLMClient] = None, request_type="complete", api_key=None, base_url=None, **kwargs
-):
- wrapper, wrapped = prepare_openai(client=client, is_async=False, api_key=api_key, base_url=base_url)
- if wrapped:
+ *,
+ client: Optional[LLMClient] = None,
+ request_type: str = "complete",
+ api_key: Optional[str] = None,
+ base_url: Optional[str] = None,
+ **kwargs: Any,
+) -> Dict[str, Any]:
+ wrapper = prepare_openai(client=client, is_async=False, api_key=api_key, base_url=base_url)
+ if wrapper.is_wrapped:
set_span_purpose(kwargs)
retries = 0
sleep_time = 0.1
+ resp = None
while retries < 100:
try:
resp = post_process_response(getattr(wrapper, request_type)(**kwargs))
@@ -242,18 +396,26 @@ def run_cached_request(
time.sleep(sleep_time)
retries += 1
+ if resp is None:
+ raise RuntimeError("Failed to get response after maximum retries")
return resp
async def arun_cached_request(
- *, client: Optional[LLMClient] = None, request_type="complete", api_key=None, base_url=None, **kwargs
-):
- wrapper, wrapped = prepare_openai(client=client, is_async=True, api_key=api_key, base_url=base_url)
- if wrapped:
+ *,
+ client: Optional[LLMClient] = None,
+ request_type: str = "complete",
+ api_key: Optional[str] = None,
+ base_url: Optional[str] = None,
+ **kwargs: Any,
+) -> Dict[str, Any]:
+ wrapper = prepare_openai(client=client, is_async=True, api_key=api_key, base_url=base_url)
+ if wrapper.is_wrapped:
set_span_purpose(kwargs)
retries = 0
sleep_time = 0.1
+ resp = None
while retries < 100:
try:
resp = post_process_response(await getattr(wrapper, request_type)(**kwargs))
@@ -264,4 +426,7 @@ async def arun_cached_request(
await asyncio.sleep(sleep_time)
retries += 1
+ if resp is None:
+ raise RuntimeError("Failed to get response after maximum retries")
+
return resp
diff --git a/py/autoevals/ragas.py b/py/autoevals/ragas.py
index 82d094a..e11057c 100644
--- a/py/autoevals/ragas.py
+++ b/py/autoevals/ragas.py
@@ -1,3 +1,59 @@
+"""This module provides evaluators for assessing the quality of context retrieval and answer generation.
+These metrics are ported from the RAGAS project with some enhancements.
+
+**Context quality evaluators**:
+
+ - `ContextEntityRecall`: Measures how well context contains expected entities
+ - `ContextRelevancy`: Evaluates relevance of context to question
+ - `ContextRecall`: Checks if context supports expected answer
+ - `ContextPrecision`: Measures precision of context relative to question
+
+**Answer quality evaluators**:
+
+ - `Faithfulness`: Checks if answer claims are supported by context
+ - `AnswerRelevancy`: Measures answer relevance to question
+ - `AnswerSimilarity`: Compares semantic similarity to expected answer
+ - `AnswerCorrectness`: Evaluates factual correctness against ground truth
+
+**Common arguments**:
+
+ - `model`: Model to use for evaluation, defaults to DEFAULT_RAGAS_MODEL (gpt-3.5-turbo-16k)
+ - `client`: Optional Client for API calls. If not provided, uses global client from init()
+
+**Example**:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import (
+ ContextRelevancy,
+ Faithfulness,
+ )
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ # Evaluate context relevance
+ relevancy = ContextRelevancy()
+ result = relevancy.eval(
+ input="What is the capital of France?",
+ output="Paris is the capital of France",
+ context="Paris is the capital of France. The city is known for the Eiffel Tower."
+ )
+ print(f"Context relevance score: {result.score}") # 1.0 for highly relevant
+
+ # Check answer faithfulness to context
+ faithfulness = Faithfulness()
+ result = faithfulness.eval(
+ input="What is France's capital city?",
+ output="Paris is the capital of France and has the Eiffel Tower",
+ context="Paris is the capital of France. The city is known for the Eiffel Tower."
+ )
+ print(f"Faithfulness score: {result.score}") # 1.0 for fully supported claims
+ ```
+
+For more examples and detailed usage of each evaluator, see their individual class docstrings.
+"""
+
# These metrics are ported, with some enhancements, from the [RAGAS](https://github.com/explodinggradients/ragas) project.
import asyncio
@@ -9,7 +65,7 @@
from . import Score
from .list import ListContains
from .llm import OpenAILLMScorer
-from .oai import LLMClient, arun_cached_request, run_cached_request
+from .oai import Client, arun_cached_request, run_cached_request
from .string import EmbeddingSimilarity
@@ -77,23 +133,43 @@ def extract_entities_request(text, **extra_args):
)
-async def aextract_entities(*, text, client: Optional[LLMClient] = None, **extra_args):
+async def aextract_entities(*, text, client: Optional[Client] = None, **extra_args):
response = await arun_cached_request(client=client, **extract_entities_request(text=text, **extra_args))
return json.loads(response["choices"][0]["message"]["tool_calls"][0]["function"]["arguments"])
-def extract_entities(*, text, client: Optional[LLMClient] = None, **extra_args):
+def extract_entities(*, text, client: Optional[Client] = None, **extra_args):
response = run_cached_request(client=client, **extract_entities_request(text=text, **extra_args))
return json.loads(response["choices"][0]["message"]["tool_calls"][0]["function"]["arguments"])
class ContextEntityRecall(OpenAILLMScorer):
- """
- Estimates context recall by estimating TP and FN using annotated answer and
- retrieved context.
+ """Measures how well the context contains the entities mentioned in the expected answer.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import ContextEntityRecall
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ recall = ContextEntityRecall()
+ result = recall.eval(
+ expected="The capital of France is Paris and its population is 2.2 million",
+ context="Paris is a major city in France with a population of 2.2 million people. As the capital city, it is known for the Eiffel Tower."
+ )
+ print(result.score) # Score between 0-1, higher means more entities from expected answer found in context
+ print(result.metadata["entities"]) # List of entities found and their overlap
+ ```
+
+ Args:
+ expected: The expected/ground truth answer containing entities to find
+ context: The context document(s) to search for entities in
"""
- def __init__(self, pairwise_scorer=None, model=DEFAULT_RAGAS_MODEL, client: Optional[LLMClient] = None, **kwargs):
+ def __init__(self, pairwise_scorer=None, model=DEFAULT_RAGAS_MODEL, client: Optional[Client] = None, **kwargs):
super().__init__(client=client, **kwargs)
self.extraction_model = model
@@ -210,12 +286,34 @@ def extract_sentences_request(question, context, **extra_args):
class ContextRelevancy(OpenAILLMScorer):
- """
- Extracts sentences from the context that are relevant to the question with
- self-consistency checks. The number of relevant sentences and is used as the score.
+ """Evaluates how relevant the context is to the input question.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import ContextRelevancy
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ relevancy = ContextRelevancy()
+ result = relevancy.eval(
+ input="What is the capital of France?",
+ output="Paris is the capital of France",
+ context="Paris is the capital of France. The city is known for the Eiffel Tower."
+ )
+ print(result.score) # Score between 0-1, higher means more relevant context
+ print(result.metadata["relevant_sentences"]) # List of relevant sentences found
+ ```
+
+ Args:
+ input: The question being evaluated
+ output: The generated answer
+ context: The context document(s) to evaluate
"""
- def __init__(self, pairwise_scorer=None, model=DEFAULT_RAGAS_MODEL, client: Optional[LLMClient] = None, **kwargs):
+ def __init__(self, pairwise_scorer=None, model=DEFAULT_RAGAS_MODEL, client: Optional[Client] = None, **kwargs):
super().__init__(client=client, **kwargs)
self.model = model
@@ -346,12 +444,36 @@ def extract_context_recall_request(question, answer, context, **extra_args):
class ContextRecall(OpenAILLMScorer):
- """
- Estimates context recall by estimating TP and FN using annotated answer and
- retrieved context.
+ """Measures how well the context supports the expected answer.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import ContextRecall
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ recall = ContextRecall()
+ result = recall.eval(
+ input="What is the capital of France?",
+ output="Paris is the capital of France", # The generated answer
+ expected="Paris is the capital of France",
+ context="Paris is the capital of France. The city is known for the Eiffel Tower."
+ )
+ print(result.score) # Score between 0-1, higher means better context recall
+ print(result.metadata["recall"]) # Detailed recall analysis
+ ```
+
+ Args:
+ input: The question being evaluated
+ output: The generated answer
+ expected: The expected/ground truth answer
+ context: The context document(s) to evaluate
"""
- def __init__(self, pairwise_scorer=None, model=DEFAULT_RAGAS_MODEL, client: Optional[LLMClient] = None, **kwargs):
+ def __init__(self, pairwise_scorer=None, model=DEFAULT_RAGAS_MODEL, client: Optional[Client] = None, **kwargs):
super().__init__(client=client, **kwargs)
self.model = model
@@ -367,6 +489,7 @@ def _postprocess(self, response):
score=ones / total,
metadata={
"statements": statements,
+ "recall": statements,
},
)
@@ -481,12 +604,36 @@ def extract_context_precision_request(question, answer, context, **extra_args):
class ContextPrecision(OpenAILLMScorer):
- """
- Average Precision is a metric that evaluates whether all of the
- relevant items selected by the model are ranked higher or not.
+ """Measures how precise and focused the context is for answering the question.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import ContextPrecision
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ precision = ContextPrecision()
+ result = precision.eval(
+ input="What is the capital of France?",
+ output="Paris is the capital of France", # The generated answer
+ expected="Paris is the capital of France",
+ context="Paris is the capital of France. The city is known for the Eiffel Tower."
+ )
+ print(result.score) # Score between 0-1, higher means more precise context
+ print(result.metadata["precision"]) # Detailed precision analysis
+ ```
+
+ Args:
+ input: The question being evaluated
+ output: The generated answer
+ expected: The expected/ground truth answer
+ context: The context document(s) to evaluate
"""
- def __init__(self, pairwise_scorer=None, model=DEFAULT_RAGAS_MODEL, client: Optional[LLMClient] = None, **kwargs):
+ def __init__(self, pairwise_scorer=None, model=DEFAULT_RAGAS_MODEL, client: Optional[Client] = None, **kwargs):
super().__init__(client=client, **kwargs)
self.model = model
@@ -692,28 +839,28 @@ def extract_faithfulness_request(context, statements, **extra_args):
)
-async def aextract_statements(question, answer, client: Optional[LLMClient] = None, **extra_args):
+async def aextract_statements(question, answer, client: Optional[Client] = None, **extra_args):
response = await arun_cached_request(
client=client, **extract_statements_request(question=question, answer=answer, **extra_args)
)
return load_function_call(response)
-def extract_statements(question, answer, client: Optional[LLMClient] = None, **extra_args):
+def extract_statements(question, answer, client: Optional[Client] = None, **extra_args):
response = run_cached_request(
client=client, **extract_statements_request(question=question, answer=answer, **extra_args)
)
return load_function_call(response)
-async def aextract_faithfulness(context, statements, client: Optional[LLMClient] = None, **extra_args):
+async def aextract_faithfulness(context, statements, client: Optional[Client] = None, **extra_args):
response = await arun_cached_request(
client=client, **extract_faithfulness_request(context=context, statements=statements, **extra_args)
)
return load_function_call(response)
-def extract_faithfulness(context, statements, client: Optional[LLMClient] = None, **extra_args):
+def extract_faithfulness(context, statements, client: Optional[Client] = None, **extra_args):
response = run_cached_request(
client=client, **extract_faithfulness_request(context=context, statements=statements, **extra_args)
)
@@ -721,11 +868,34 @@ def extract_faithfulness(context, statements, client: Optional[LLMClient] = None
class Faithfulness(OpenAILLMScorer):
- """
- Measures factual consistency of a generated answer against the given context.
+ """Evaluates if the generated answer is faithful to the given context.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import Faithfulness
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ faithfulness = Faithfulness()
+ result = faithfulness.eval(
+ input="What is the capital of France?",
+ output="Paris is the capital of France", # The generated answer to evaluate
+ context="Paris is the capital of France. The city is known for the Eiffel Tower."
+ )
+ print(result.score) # Score between 0-1, higher means more faithful to context
+ print(result.metadata["faithfulness"]) # Detailed faithfulness analysis
+ ```
+
+ Args:
+ input: The question being evaluated
+ output: The generated answer to evaluate
+ context: The context document(s) to evaluate against
"""
- def __init__(self, model=DEFAULT_RAGAS_MODEL, client: Optional[LLMClient] = None, **kwargs):
+ def __init__(self, model=DEFAULT_RAGAS_MODEL, client: Optional[Client] = None, **kwargs):
super().__init__(client=client, **kwargs)
self.model = model
@@ -853,9 +1023,36 @@ def extract_question_gen_request(answer, context, **extra_args):
class AnswerRelevancy(OpenAILLMScorer):
- """
- Scores the relevancy of the answer according to the given question.
- Answers with incomplete, redundant or unnecessary information are penalized.
+ """Evaluates how relevant the generated answer is to the input question.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import AnswerRelevancy
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ relevancy = AnswerRelevancy()
+ result = relevancy.eval(
+ input="What is the capital of France?",
+ output="Paris is the capital of France", # The generated answer to evaluate
+ context="Paris is the capital of France. The city is known for the Eiffel Tower.",
+ strictness=0.7, # Optional: higher values enforce stricter relevancy
+ temperature=0.2 # Optional: lower values make evaluation more deterministic
+ )
+ print(result.score) # Score between 0-1, higher means more relevant answer
+ print(result.metadata["relevancy"]) # Detailed relevancy analysis
+ ```
+
+ Args:
+ input: The question being evaluated
+ output: The generated answer to evaluate
+ context: The context document(s) to evaluate against
+ strictness: Optional float between 0-1, higher values enforce stricter relevancy
+ temperature: Optional float between 0-1, lower values make evaluation more deterministic
+ embedding_model: Optional model to use for embeddings, defaults to text-embedding-3-small
"""
def __init__(
@@ -864,7 +1061,7 @@ def __init__(
strictness=3,
temperature=0.5,
embedding_model=DEFAULT_RAGAS_EMBEDDING_MODEL,
- client: Optional[LLMClient] = None,
+ client: Optional[Client] = None,
**kwargs,
):
super().__init__(temperature=temperature, client=client, **kwargs)
@@ -934,15 +1131,38 @@ def _run_eval_sync(self, output, expected=None, input=None, context=None, **kwar
class AnswerSimilarity(OpenAILLMScorer):
- """
- Measures the similarity between the generated answer and the expected answer.
+ """Evaluates how semantically similar the generated answer is to the expected answer.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import AnswerSimilarity
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ similarity = AnswerSimilarity()
+ result = similarity.eval(
+ output="Paris is the capital of France", # The generated answer to evaluate
+ expected="The capital city of France is Paris",
+ embedding_model="text-embedding-3-small" # Optional: specify embedding model
+ )
+ print(result.score) # Score between 0-1, higher means more similar answers
+ print(result.metadata["similarity"]) # Detailed similarity analysis
+ ```
+
+ Args:
+ output: The generated answer to evaluate
+ expected: The expected/ground truth answer
+ embedding_model: Optional model to use for embeddings, defaults to text-embedding-3-small
"""
def __init__(
self,
pairwise_scorer=None,
model=DEFAULT_RAGAS_EMBEDDING_MODEL,
- client: Optional[LLMClient] = None,
+ client: Optional[Client] = None,
**kwargs,
):
super().__init__(client=client, **kwargs)
@@ -1047,8 +1267,36 @@ def compute_f1_score(factuality):
class AnswerCorrectness(OpenAILLMScorer):
- """
- Scores the correctness of the answer based on the ground truth.
+ """Evaluates how correct the generated answer is compared to the expected answer.
+
+ Example:
+ ```python
+ from openai import OpenAI
+ from autoevals import init
+ from autoevals.ragas import AnswerCorrectness
+
+ # Initialize with your OpenAI client
+ init(OpenAI())
+
+ correctness = AnswerCorrectness()
+ result = correctness.eval(
+ input="What is the capital of France?",
+ output="Paris is the capital of France", # The generated answer to evaluate
+ expected="The capital city of France is Paris",
+ factuality_weight=0.7, # Optional: weight for factual correctness
+ answer_similarity_weight=0.3 # Optional: weight for answer similarity
+ )
+ print(result.score) # Score between 0-1, higher means more correct answer
+ print(result.metadata["correctness"]) # Detailed correctness analysis
+ ```
+
+ Args:
+ input: The question being evaluated
+ output: The generated answer to evaluate
+ expected: The expected/ground truth answer
+ factuality_weight: Optional float between 0-1 for factual correctness weight
+ answer_similarity_weight: Optional float between 0-1 for answer similarity weight
+ answer_similarity: Optional AnswerSimilarity instance for similarity evaluation
"""
def __init__(
@@ -1058,7 +1306,7 @@ def __init__(
factuality_weight=0.75,
answer_similarity_weight=0.25,
answer_similarity=None,
- client: Optional[LLMClient] = None,
+ client: Optional[Client] = None,
**kwargs,
):
super().__init__(client=client, **kwargs)
@@ -1139,9 +1387,9 @@ def load_function_call(response):
return json.loads(response["choices"][0]["message"]["tool_calls"][0]["function"]["arguments"])
-async def aload_function_call_request(client: Optional[LLMClient] = None, **kwargs):
+async def aload_function_call_request(client: Optional[Client] = None, **kwargs):
return load_function_call(await arun_cached_request(client=client, **kwargs))
-def load_function_call_request(client: Optional[LLMClient] = None, **kwargs):
+def load_function_call_request(client: Optional[Client] = None, **kwargs):
return load_function_call(run_cached_request(client=client, **kwargs))
diff --git a/py/autoevals/string.py b/py/autoevals/string.py
index cd9680d..4bfa069 100644
--- a/py/autoevals/string.py
+++ b/py/autoevals/string.py
@@ -1,3 +1,22 @@
+"""String evaluation scorers for comparing text similarity.
+
+This module provides scorers for text comparison:
+
+- Levenshtein: Compare strings using edit distance
+ - Fast, local string comparison
+ - Suitable for exact matches and small variations
+ - No external dependencies
+ - Simple to use with just output/expected parameters
+
+- EmbeddingSimilarity: Compare strings using embeddings
+ - Semantic similarity using embeddings
+ - Requires OpenAI API access
+ - Better for comparing meaning rather than exact matches
+ - Supports both sync and async evaluation
+ - Built-in caching for efficiency
+ - Configurable with options for model, prefix, thresholds
+"""
+
import threading
from typing import Optional
@@ -11,8 +30,24 @@
class Levenshtein(ScorerWithPartial):
- """
- A simple scorer that uses the Levenshtein distance to compare two strings.
+ """String similarity scorer using edit distance.
+
+ Example:
+ ```python
+ scorer = Levenshtein()
+ result = scorer.eval(
+ output="hello wrld",
+ expected="hello world"
+ )
+ print(result.score) # 0.9 (normalized similarity)
+ ```
+
+ Args:
+ output: String to evaluate
+ expected: Reference string to compare against
+
+ Returns:
+ Score object with normalized similarity (0-1), where 1 means identical strings
"""
def _run_eval_sync(self, output, expected=None, **kwargs):
@@ -33,8 +68,44 @@ def _run_eval_sync(self, output, expected=None, **kwargs):
class EmbeddingSimilarity(ScorerWithPartial):
- """
- A simple scorer that uses cosine similarity to compare two strings.
+ """String similarity scorer using embeddings.
+
+ Example:
+ ```python
+ import asyncio
+ from openai import AsyncOpenAI
+ from autoevals.string import EmbeddingSimilarity
+
+ async def compare_texts():
+ # Initialize with async client
+ client = AsyncOpenAI()
+ scorer = EmbeddingSimilarity(
+ prefix="Code explanation: ",
+ client=client
+ )
+
+ result = await scorer.eval_async(
+ output="The function sorts elements using quicksort",
+ expected="The function implements quicksort algorithm"
+ )
+
+ print(result.score) # 0.85 (normalized similarity)
+ print(result.metadata) # Additional comparison details
+
+ # Run the async evaluation
+ asyncio.run(compare_texts())
+ ```
+
+ Args:
+ prefix: Optional text to prepend to inputs for domain context
+ model: Embedding model to use (default: text-embedding-ada-002)
+ expected_min: Minimum similarity threshold (default: 0.7)
+ client: Optional AsyncOpenAI/OpenAI client. If not provided, uses global client from init()
+
+ Returns:
+ Score object with:
+ - score: Normalized similarity (0-1)
+ - metadata: Additional comparison details
"""
MODEL = "text-embedding-ada-002"
@@ -51,14 +122,6 @@ def __init__(
base_url=None,
client: Optional[LLMClient] = None,
):
- """
- Create a new EmbeddingSimilarity scorer.
-
- :param prefix: A prefix to prepend to the prompt. This is useful for specifying the domain of the inputs.
- :param model: The model to use for the embedding distance. Defaults to "text-embedding-ada-002".
- :param expected_min: The minimum expected score. Defaults to 0.7. Values below this will be scored as 0, and
- values between this and 1 will be scaled linearly.
- """
self.prefix = prefix
self.expected_min = expected_min
diff --git a/py/autoevals/test_llm.py b/py/autoevals/test_llm.py
index 3def909..19eabe6 100644
--- a/py/autoevals/test_llm.py
+++ b/py/autoevals/test_llm.py
@@ -1,14 +1,14 @@
import asyncio
from typing import cast
-from unittest.mock import Mock
import pytest
import respx
+from openai import OpenAI
from pydantic import BaseModel
from autoevals import init
-from autoevals.llm import *
-from autoevals.llm import build_classification_tools
+from autoevals.llm import Battle, Factuality, LLMClassifier, OpenAILLMClassifier, build_classification_tools
+from autoevals.oai import OpenAIV1Module
class TestModel(BaseModel):
@@ -202,64 +202,59 @@ def test_factuality():
assert result.score == 1
+@respx.mock
def test_factuality_client():
- client = Mock()
- client.RateLimitError = Exception
-
- completion = Mock()
- completion.to_dict.return_value = {
- "id": "chatcmpl-AdiS4bHWjqSclA5rx7OkuZ6EA9QIp",
- "choices": [
- {
- "finish_reason": "stop",
- "index": 0,
- "logprobs": None,
- "message": {
- "content": None,
- "refusal": None,
- "role": "assistant",
- "tool_calls": [
- {
- "id": "call_JKoeGAX2zGPJAmF2muDgjpHp",
- "function": {
- "arguments": '{"reasons":"1. The question asks to add the numbers 1, 2, and 3.\\n2. The expert answer provides the sum of these numbers as 6.\\n3. The submitted answer also provides the sum as 6.\\n4. Both the expert and submitted answers provide the same numerical result, which is 6.\\n5. Since both answers provide the same factual content, the submitted answer contains all the same details as the expert answer.\\n6. There is no additional information or discrepancy between the two answers.\\n7. Therefore, the submitted answer is neither a subset nor a superset; it is exactly the same as the expert answer in terms of factual content.","choice":"C"}',
- "name": "select_choice",
- },
- "type": "function",
- }
- ],
+ respx.route().respond(
+ json={
+ "id": "chatcmpl-AdiS4bHWjqSclA5rx7OkuZ6EA9QIp",
+ "choices": [
+ {
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": None,
+ "message": {
+ "content": None,
+ "refusal": None,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "id": "call_JKoeGAX2zGPJAmF2muDgjpHp",
+ "function": {
+ "arguments": '{"reasons":"1. The question asks to add the numbers 1, 2, and 3.\\n2. The expert answer provides the sum of these numbers as 6.\\n3. The submitted answer also provides the sum as 6.\\n4. Both the expert and submitted answers provide the same numerical result, which is 6.\\n5. Since both answers provide the same factual content, the submitted answer contains all the same details as the expert answer.\\n6. There is no additional information or discrepancy between the two answers.\\n7. Therefore, the submitted answer is neither a subset nor a superset; it is exactly the same as the expert answer in terms of factual content.","choice":"C"}',
+ "name": "select_choice",
+ },
+ "type": "function",
+ }
+ ],
+ },
+ }
+ ],
+ "created": 1734029028,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion",
+ "system_fingerprint": "fp_cc5cf1c6e3",
+ "usage": {
+ "completion_tokens": 149,
+ "prompt_tokens": 404,
+ "total_tokens": 553,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0,
},
- }
- ],
- "created": 1734029028,
- "model": "gpt-4o-2024-08-06",
- "object": "chat.completion",
- "system_fingerprint": "fp_cc5cf1c6e3",
- "usage": {
- "completion_tokens": 149,
- "prompt_tokens": 404,
- "total_tokens": 553,
- "completion_tokens_details": {
- "accepted_prediction_tokens": 0,
- "audio_tokens": 0,
- "reasoning_tokens": 0,
- "rejected_prediction_tokens": 0,
+ "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0},
},
- "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0},
- },
- }
-
- client.complete.return_value = completion
+ }
+ )
- llm = Factuality(client=cast(LLMClient, client))
+ llm = Factuality(client=OpenAI(api_key="test"))
result = llm.eval(
output="6",
expected="6",
input="Add the following numbers: 1, 2, 3",
)
- assert client.complete.call_count == 1
-
assert result.score == 1
@@ -270,55 +265,53 @@ def reset_client():
# make sure we deny any leaked calls to OpenAI
-@respx.mock(base_url="https://api.openai.com/v1/")
+@respx.mock
def test_init_client():
- client = Mock()
- client.RateLimitError = Exception
+ client = cast(OpenAIV1Module.OpenAI, OpenAI(api_key="test"))
- completion = Mock()
- completion.to_dict.return_value = {
- "id": "chatcmpl-AdiS4bHWjqSclA5rx7OkuZ6EA9QIp",
- "choices": [
- {
- "finish_reason": "stop",
- "index": 0,
- "logprobs": None,
- "message": {
- "content": None,
- "refusal": None,
- "role": "assistant",
- "tool_calls": [
- {
- "id": "call_JKoeGAX2zGPJAmF2muDgjpHp",
- "function": {
- "arguments": '{"reasons":"1. The question asks to add the numbers 1, 2, and 3.\\n2. The expert answer provides the sum of these numbers as 6.\\n3. The submitted answer also provides the sum as 6.\\n4. Both the expert and submitted answers provide the same numerical result, which is 6.\\n5. Since both answers provide the same factual content, the submitted answer contains all the same details as the expert answer.\\n6. There is no additional information or discrepancy between the two answers.\\n7. Therefore, the submitted answer is neither a subset nor a superset; it is exactly the same as the expert answer in terms of factual content.","choice":"C"}',
- "name": "select_choice",
- },
- "type": "function",
- }
- ],
+ respx.route().respond(
+ json={
+ "id": "chatcmpl-AdiS4bHWjqSclA5rx7OkuZ6EA9QIp",
+ "choices": [
+ {
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": None,
+ "message": {
+ "content": None,
+ "refusal": None,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "id": "call_JKoeGAX2zGPJAmF2muDgjpHp",
+ "function": {
+ "arguments": '{"reasons":"1. The question asks to add the numbers 1, 2, and 3.\\n2. The expert answer provides the sum of these numbers as 6.\\n3. The submitted answer also provides the sum as 6.\\n4. Both the expert and submitted answers provide the same numerical result, which is 6.\\n5. Since both answers provide the same factual content, the submitted answer contains all the same details as the expert answer.\\n6. There is no additional information or discrepancy between the two answers.\\n7. Therefore, the submitted answer is neither a subset nor a superset; it is exactly the same as the expert answer in terms of factual content.","choice":"C"}',
+ "name": "select_choice",
+ },
+ "type": "function",
+ }
+ ],
+ },
+ }
+ ],
+ "created": 1734029028,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion",
+ "system_fingerprint": "fp_cc5cf1c6e3",
+ "usage": {
+ "completion_tokens": 149,
+ "prompt_tokens": 404,
+ "total_tokens": 553,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0,
},
- }
- ],
- "created": 1734029028,
- "model": "gpt-4o-2024-08-06",
- "object": "chat.completion",
- "system_fingerprint": "fp_cc5cf1c6e3",
- "usage": {
- "completion_tokens": 149,
- "prompt_tokens": 404,
- "total_tokens": 553,
- "completion_tokens_details": {
- "accepted_prediction_tokens": 0,
- "audio_tokens": 0,
- "reasoning_tokens": 0,
- "rejected_prediction_tokens": 0,
+ "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0},
},
- "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0},
- },
- }
-
- client.complete.return_value = completion
+ }
+ )
init(client=client)
@@ -329,8 +322,6 @@ def test_init_client():
input="Add the following numbers: 1, 2, 3",
)
- assert client.complete.call_count == 1
-
assert result.score == 1
diff --git a/py/autoevals/test_oai.py b/py/autoevals/test_oai.py
new file mode 100644
index 0000000..d1140f6
--- /dev/null
+++ b/py/autoevals/test_oai.py
@@ -0,0 +1,251 @@
+import sys
+from typing import Any, Union, cast
+
+import openai
+import pytest
+from braintrust.oai import (
+ ChatCompletionV0Wrapper,
+ CompletionsV1Wrapper,
+ NamedWrapper,
+ OpenAIV0Wrapper,
+ OpenAIV1Wrapper,
+ wrap_openai,
+)
+from openai.resources.chat.completions import AsyncCompletions
+
+from autoevals import init # type: ignore[import]
+from autoevals.oai import ( # type: ignore[import]
+ LLMClient,
+ OpenAIV0Module,
+ OpenAIV1Module,
+ _named_wrapper, # type: ignore[import] # Accessing private members for testing
+ _wrap_openai, # type: ignore[import] # Accessing private members for testing
+ get_openai_wrappers,
+ prepare_openai,
+)
+
+
+def unwrap_named_wrapper(obj: Union[NamedWrapper, OpenAIV1Module.OpenAI, OpenAIV0Module]) -> Any:
+ return getattr(obj, "_NamedWrapper__wrapped")
+
+
+@pytest.fixture(autouse=True)
+def reset_env_and_client(monkeypatch: pytest.MonkeyPatch):
+ """Reset environment variables and client before each test."""
+ monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+ monkeypatch.setenv("OPENAI_API_KEY", "test-key")
+ monkeypatch.setenv("OPENAI_BASE_URL", "http://test-url")
+ monkeypatch.setattr("autoevals.oai._named_wrapper", None)
+ monkeypatch.setattr("autoevals.oai._wrap_openai", None)
+ monkeypatch.setattr("autoevals.oai._openai_module", None)
+
+ init(None)
+
+ yield
+
+
+def test_prepare_openai_uses_unwrapped_global_client():
+ openai_obj = openai.OpenAI(api_key="api-key", base_url="http://test")
+ client = LLMClient(
+ openai=openai_obj,
+ complete=openai_obj.chat.completions.create,
+ embed=openai_obj.embeddings.create,
+ moderation=openai_obj.moderations.create,
+ RateLimitError=openai.RateLimitError,
+ )
+
+ init(client)
+
+ prepared_client = prepare_openai()
+
+ assert prepared_client == client
+ assert not prepared_client.is_wrapped
+ assert prepared_client.openai == openai_obj
+ assert prepared_client.complete is client.complete
+ assert prepared_client.openai.api_key == "api-key"
+
+
+def test_init_creates_llmclient_if_needed():
+ openai_obj = openai.OpenAI()
+ init(openai_obj)
+
+ prepared_client = prepare_openai()
+
+ assert isinstance(prepared_client, LLMClient)
+ assert prepared_client.is_wrapped
+ assert unwrap_named_wrapper(prepared_client.openai) == openai_obj
+
+
+def test_init_creates_async_llmclient_if_needed(mock_openai_v0: OpenAIV0Module):
+ init(mock_openai_v0, is_async=True)
+
+ prepared_client = prepare_openai()
+
+ assert isinstance(prepared_client, LLMClient)
+ assert prepared_client.is_wrapped
+ assert isinstance(prepared_client.openai, OpenAIV0Wrapper)
+ assert prepared_client.complete.__name__ == "acreate"
+
+
+def test_prepare_openai_defaults():
+ prepared_client = prepare_openai()
+
+ assert isinstance(prepared_client, LLMClient)
+ assert prepared_client.is_wrapped
+ openai_obj = unwrap_named_wrapper(prepared_client.openai)
+ assert isinstance(openai_obj, openai.OpenAI)
+ assert isinstance(getattr(prepared_client.complete, "__self__", None), CompletionsV1Wrapper)
+ assert openai_obj.api_key == "test-key"
+ assert openai_obj.base_url == "http://test-url"
+
+
+def test_prepare_openai_with_plain_openai():
+ client = openai.OpenAI(api_key="api-key", base_url="http://test")
+ prepared_client = prepare_openai(client=client)
+
+ assert prepared_client.is_wrapped
+ assert isinstance(prepared_client.openai, OpenAIV1Wrapper)
+
+
+def test_prepare_openai_async():
+ prepared_client = prepare_openai(is_async=True)
+
+ assert isinstance(prepared_client, LLMClient)
+ assert prepared_client.is_wrapped
+ assert isinstance(prepared_client.openai, OpenAIV1Wrapper)
+
+ openai_obj = getattr(prepared_client.complete, "__self__", None)
+ assert isinstance(openai_obj, NamedWrapper)
+ assert isinstance(unwrap_named_wrapper(openai_obj), AsyncCompletions)
+
+
+def test_prepare_openai_wraps_once():
+ openai_obj = cast(OpenAIV1Module.OpenAI, wrap_openai(openai.OpenAI(api_key="api-key", base_url="http://test")))
+
+ client = LLMClient(openai_obj)
+
+ init(client)
+
+ prepared_client = prepare_openai()
+
+ assert prepared_client is client
+ assert prepared_client.is_wrapped
+ assert prepared_client.openai is openai_obj
+
+
+def test_prepare_openai_handles_missing_braintrust(monkeypatch: pytest.MonkeyPatch):
+ monkeypatch.setitem(sys.modules, "braintrust.oai", None)
+
+ prepared_client = prepare_openai()
+
+ assert isinstance(prepared_client, LLMClient)
+ assert not prepared_client.is_wrapped
+ assert isinstance(prepared_client.openai, openai.OpenAI)
+
+
+def test_get_openai_wrappers_caches_imports():
+ original_wrapper = _named_wrapper
+ original_wrap_fn = _wrap_openai
+
+ # First call should set the cache
+ wrapper1, wrap_fn1 = get_openai_wrappers()
+
+ # Second call should use cache
+ wrapper2, wrap_fn2 = get_openai_wrappers()
+
+ # Verify we got same objects back
+ assert wrapper2 is wrapper1
+ assert wrap_fn2 is wrap_fn1
+
+ # Verify they're different from the original None values
+ assert wrapper2 is not original_wrapper
+ assert wrap_fn2 is not original_wrap_fn
+
+
+def test_prepare_openai_raises_on_missing_openai(monkeypatch: pytest.MonkeyPatch):
+ monkeypatch.setitem(sys.modules, "openai", None)
+
+ with pytest.raises(ImportError):
+ prepare_openai()
+
+
+@pytest.fixture
+def mock_openai_v0(monkeypatch: pytest.MonkeyPatch):
+ """Mock the OpenAI v0 SDK for testing."""
+
+ class MockOpenAIV0:
+ __module__ = "openai"
+ api_key = None
+ api_base = None
+
+ class ChatCompletion:
+ __module__ = "openai"
+
+ @staticmethod
+ def create(*args: Any, **kwargs: Any):
+ pass
+
+ @staticmethod
+ def acreate(*args: Any, **kwargs: Any):
+ pass
+
+ class Embedding:
+ __module__ = "openai"
+
+ @staticmethod
+ def create(*args: Any, **kwargs: Any):
+ pass
+
+ @staticmethod
+ def acreate(*args: Any, **kwargs: Any):
+ pass
+
+ class Moderation:
+ __module__ = "openai"
+
+ @staticmethod
+ def create(*args: Any, **kwargs: Any):
+ pass
+
+ @staticmethod
+ def acreate(*args: Any, **kwargs: Any):
+ pass
+
+ class error:
+ __module__ = "openai"
+
+ class RateLimitError(Exception):
+ __module__ = "openai"
+ pass
+
+ mock_openai = MockOpenAIV0()
+ monkeypatch.setitem(sys.modules, "openai", mock_openai)
+ return cast(OpenAIV0Module, mock_openai)
+
+
+def test_prepare_openai_v0_sdk(mock_openai_v0: OpenAIV0Module):
+ prepared_client = prepare_openai()
+
+ assert prepared_client.is_wrapped
+ assert prepared_client.openai.api_key == "test-key"
+
+ assert isinstance(getattr(prepared_client.complete, "__self__", None), ChatCompletionV0Wrapper)
+
+
+def test_prepare_openai_v0_async(mock_openai_v0: OpenAIV0Module):
+ prepared_client = prepare_openai(is_async=True)
+
+ assert prepared_client.is_wrapped
+ assert prepared_client.openai.api_key == "test-key"
+
+ assert prepared_client.complete.__name__ == "acreate"
+
+
+def test_prepare_openai_v0_with_client(mock_openai_v0: OpenAIV0Module):
+ client = LLMClient(openai=mock_openai_v0, is_async=True)
+
+ prepared_client = prepare_openai(client=client)
+
+ assert prepared_client.is_wrapped
+ assert prepared_client.openai.api_key is mock_openai_v0.api_key # must be set by the user
+ assert prepared_client.complete.__name__ == "acreate"
diff --git a/py/autoevals/value.py b/py/autoevals/value.py
index 605cc4b..6e79756 100644
--- a/py/autoevals/value.py
+++ b/py/autoevals/value.py
@@ -1,3 +1,40 @@
+"""Value comparison utilities for exact matching and normalization.
+
+This module provides tools for exact value comparison with smart handling of different data types:
+
+- ExactMatch: A scorer for exact value comparison
+ - Handles primitive types (strings, numbers, etc.)
+ - Smart `JSON` serialization for objects and arrays
+ - Normalizes `JSON` strings for consistent comparison
+
+Example:
+```python
+from autoevals import ExactMatch
+
+# Simple value comparison
+scorer = ExactMatch()
+result = scorer.eval(
+ output="hello",
+ expected="hello"
+)
+print(result.score) # 1.0 for exact match
+
+# Object comparison (automatically normalized)
+result = scorer.eval(
+ output={"name": "John", "age": 30},
+ expected='{"age": 30, "name": "John"}' # Different order but same content
+)
+print(result.score) # 1.0 for equivalent JSON
+
+# Array comparison
+result = scorer.eval(
+ output=[1, 2, 3],
+ expected="[1, 2, 3]" # String or native types work
+)
+print(result.score) # 1.0 for equivalent arrays
+```
+"""
+
import json
from typing import Any
@@ -7,9 +44,25 @@
class ExactMatch(ScorerWithPartial):
- """
- A simple scorer that tests whether two values are equal. If the value is an object or array,
- it will be JSON-serialized and the strings compared for equality.
+ """A scorer that tests for exact equality between values.
+
+ This scorer handles various input types:
+ - Primitive values (strings, numbers, etc.)
+ - JSON objects (dicts) and arrays (lists)
+ - JSON strings that can be parsed into objects/arrays
+
+ The comparison process:
+ 1. Detects if either value is/might be a JSON object/array
+ 2. Normalizes both values (serialization if needed)
+ 3. Performs exact string comparison
+
+ Args:
+ output: Value to evaluate
+ expected: Reference value to compare against
+
+ Returns:
+ Score object with:
+ - score: 1.0 for exact match, 0.0 otherwise
"""
def _run_eval_sync(self, output, expected=None, **kwargs):
diff --git a/py/autoevals/version.py b/py/autoevals/version.py
index d5a0531..1824d3e 100644
--- a/py/autoevals/version.py
+++ b/py/autoevals/version.py
@@ -1 +1 @@
-VERSION = "0.0.123"
+VERSION = "0.0.124"
diff --git a/pyrightconfig.json b/pyrightconfig.json
new file mode 100644
index 0000000..79758fd
--- /dev/null
+++ b/pyrightconfig.json
@@ -0,0 +1,4 @@
+{
+ "typeCheckingMode": "strict",
+ "reportMissingTypeStubs": false
+}
diff --git a/setup.py b/setup.py
index f7a772b..3f9c3ee 100644
--- a/setup.py
+++ b/setup.py
@@ -15,14 +15,17 @@
extras_require = {
"dev": [
- "black",
+ "black==22.6.0",
+ "braintrust", # used for testing
"build",
"flake8",
"flake8-isort",
"IPython",
"isort==5.12.0",
+ "openai", # used for testing
"pre-commit",
"pytest",
+ "pytest-watch",
"respx",
"twine",
],