Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions crates/lingua/src/providers/bedrock/adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,8 +419,7 @@ impl ProviderAdapter for BedrockAdapter {
}

// Check for usage-only chunk
if chunk.choices.is_empty() && chunk.usage.is_some() {
let usage = chunk.usage.as_ref().unwrap();
if let (true, Some(usage)) = (chunk.choices.is_empty(), &chunk.usage) {
return Ok(serde_json::json!({
"metadata": {
"usage": {
Expand Down
2 changes: 2 additions & 0 deletions payloads/cases/models.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
// Canonical model configuration - change these to update all test cases
export const OPENAI_CHAT_COMPLETIONS_MODEL = "gpt-5-nano";
export const OPENAI_RESPONSES_MODEL = "gpt-5-nano";
// For parameters not supported by reasoning models (temperature, top_p, logprobs)
export const OPENAI_NON_REASONING_MODEL = "gpt-4o-mini";
export const ANTHROPIC_MODEL = "claude-sonnet-4-20250514";
export const GOOGLE_MODEL = "gemini-2.5-flash";
export const BEDROCK_MODEL = "us.anthropic.claude-haiku-4-5-20251001-v1:0";
284 changes: 270 additions & 14 deletions payloads/cases/params.ts
Original file line number Diff line number Diff line change
@@ -1,14 +1,22 @@
import { TestCaseCollection } from "./types";
import { OPENAI_RESPONSES_MODEL } from "./models";
import {
OPENAI_CHAT_COMPLETIONS_MODEL,
OPENAI_RESPONSES_MODEL,
OPENAI_NON_REASONING_MODEL,
} from "./models";

// OpenAI Responses API parameter test cases
// Each test case exercises specific parameters from the Responses API
// OpenAI Responses API and Chat Completions API parameter test cases
// Each test case exercises specific parameters with bidirectional mappings where possible
// Note: temperature, top_p, and logprobs are not supported with reasoning models (gpt-5-nano)
export const paramsCases: TestCaseCollection = {
// === Reasoning Configuration ===

reasoningSummaryParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "What is 2+2?" }],
reasoning_effort: "medium",
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "2+2" }],
Expand All @@ -25,7 +33,11 @@ export const paramsCases: TestCaseCollection = {
// === Text Response Configuration ===

textFormatJsonObjectParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: 'Return {"status": "ok"} as JSON.' }],
response_format: { type: "json_object" },
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Return JSON with a=1" }],
Expand All @@ -41,7 +53,31 @@ export const paramsCases: TestCaseCollection = {
},

textFormatJsonSchemaParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [
{
role: "user",
content: "Extract: John is 25.",
},
],
response_format: {
type: "json_schema",
json_schema: {
name: "person_info",
schema: {
type: "object",
properties: {
name: { type: "string" },
age: { type: "number" },
},
required: ["name", "age"],
additionalProperties: false,
},
strict: true,
},
},
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Name: John, Age: 25" }],
Expand Down Expand Up @@ -99,7 +135,27 @@ export const paramsCases: TestCaseCollection = {
},

toolChoiceRequiredParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "Tokyo weather" }],
tools: [
{
type: "function",
function: {
name: "get_weather",
description: "Get weather",
strict: true,
parameters: {
type: "object",
properties: { location: { type: "string" } },
required: ["location"],
additionalProperties: false,
},
},
},
],
tool_choice: { type: "function", function: { name: "get_weather" } },
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Tokyo weather" }],
Expand Down Expand Up @@ -127,7 +183,27 @@ export const paramsCases: TestCaseCollection = {
},

parallelToolCallsDisabledParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "Weather in NYC and LA?" }],
tools: [
{
type: "function",
function: {
name: "get_weather",
description: "Get weather",
strict: true,
parameters: {
type: "object",
properties: { location: { type: "string" } },
required: ["location"],
additionalProperties: false,
},
},
},
],
parallel_tool_calls: false,
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "NYC and LA weather" }],
Expand Down Expand Up @@ -157,7 +233,13 @@ export const paramsCases: TestCaseCollection = {
// === Context & State Management ===

instructionsParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [
{ role: "system", content: "Always say ok." },
{ role: "user", content: "Hi" },
],
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Hi" }],
Expand All @@ -181,7 +263,11 @@ export const paramsCases: TestCaseCollection = {
},

storeDisabledParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "Say ok." }],
store: false,
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Hi" }],
Expand All @@ -195,7 +281,11 @@ export const paramsCases: TestCaseCollection = {
// === Caching & Performance ===

serviceTierParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "Say ok." }],
service_tier: "default",
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Hi" }],
Expand All @@ -207,7 +297,11 @@ export const paramsCases: TestCaseCollection = {
},

promptCacheKeyParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "Say ok." }],
prompt_cache_key: "user-123-ml-explanation",
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Hi" }],
Expand All @@ -221,7 +315,16 @@ export const paramsCases: TestCaseCollection = {
// === Metadata & Identification ===

metadataParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "Say ok." }],
store: true,
metadata: {
request_id: "req-12345",
user_tier: "premium",
experiment: "control",
},
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Hi" }],
Expand All @@ -233,7 +336,11 @@ export const paramsCases: TestCaseCollection = {
},

safetyIdentifierParam: {
"chat-completions": null,
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "Say ok." }],
safety_identifier: "hashed-user-id-abc123",
},
responses: {
model: OPENAI_RESPONSES_MODEL,
input: [{ role: "user", content: "Hi" }],
Expand All @@ -243,4 +350,153 @@ export const paramsCases: TestCaseCollection = {
google: null,
bedrock: null,
},

// === Sampling Parameters (require non-reasoning model) ===

temperatureParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "Say hi." }],
temperature: 0.7,
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

topPParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "Say hi." }],
top_p: 0.9,
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

frequencyPenaltyParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "Say ok." }],
frequency_penalty: 0.5,
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

presencePenaltyParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "Say ok." }],
presence_penalty: 0.5,
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

logprobsParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "What is 2 + 2?" }],
logprobs: true,
top_logprobs: 2,
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

// === Output Control ===

nMultipleCompletionsParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "Say a word." }],
n: 2,
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

stopSequencesParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "Count from 1 to 20." }],
stop: ["10", "ten"],
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

maxCompletionTokensParam: {
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [{ role: "user", content: "Say ok." }],
max_completion_tokens: 500,
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

// === Advanced Parameters ===

predictionParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [
{
role: "user",
content:
"Update this function to add error handling:\n\nfunction divide(a, b) {\n return a / b;\n}",
},
],
prediction: {
type: "content",
content:
"function divide(a, b) {\n if (b === 0) {\n throw new Error('Cannot divide by zero');\n }\n return a / b;\n}",
},
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

seedParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "Pick a number." }],
seed: 12345,
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},

logitBiasParam: {
"chat-completions": {
model: OPENAI_NON_REASONING_MODEL,
messages: [{ role: "user", content: "Say hello." }],
logit_bias: { "15339": -100 },
},
responses: null,
anthropic: null,
google: null,
bedrock: null,
},
};
Loading