Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 77 additions & 0 deletions v1/models/mistral.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
$schema: "https://raw.githubusercontent.com/hiddenpath/ai-protocol/main/schemas/v1.json"

# Mistral Model Instances
# Reference: https://docs.mistral.ai/api/

models:
mistral-small-latest:
provider: mistral
model_id: "mistral-small-latest"
display_name: "Mistral Small"
context_window: 32000
capabilities: [chat, tools, streaming, parallel_tools]
status: active
tags: ["mistral", "small", "tools", "latest"]

mistral-medium-latest:
provider: mistral
model_id: "mistral-medium-latest"
display_name: "Mistral Medium"
context_window: 32000
capabilities: [chat, tools, streaming, parallel_tools]
status: active
tags: ["mistral", "medium", "tools", "latest"]

mistral-large-latest:
provider: mistral
model_id: "mistral-large-latest"
display_name: "Mistral Large"
context_window: 32000
capabilities: [chat, tools, streaming, parallel_tools, agentic]
status: active
tags: ["mistral", "large", "tools", "agentic", "latest"]

pixtral-small-latest:
provider: mistral
model_id: "pixtral-small-latest"
display_name: "Pixtral Small"
context_window: 128000
capabilities: [chat, vision, tools, streaming, parallel_tools]
status: active
tags: ["mistral", "pixtral", "vision", "small", "latest"]

pixtral-large-latest:
provider: mistral
model_id: "pixtral-large-latest"
display_name: "Pixtral Large"
context_window: 128000
capabilities: [chat, vision, tools, streaming, parallel_tools, agentic]
status: active
tags: ["mistral", "pixtral", "vision", "large", "agentic", "latest"]

open-mistral-7b:
provider: mistral
model_id: "open-mistral-7b"
display_name: "Open Mistral 7B"
context_window: 32000
capabilities: [chat, streaming]
status: active
tags: ["mistral", "open", "7b", "base"]

open-mixtral-8x7b:
provider: mistral
model_id: "open-mixtral-8x7b"
display_name: "Open Mixtral 8x7B"
context_window: 32000
capabilities: [chat, streaming]
status: active
tags: ["mistral", "open", "mixtral", "8x7b", "base"]

mistral-tiny:
provider: mistral
model_id: "mistral-tiny"
display_name: "Mistral Tiny"
context_window: 32000
capabilities: [chat, streaming]
status: active
tags: ["mistral", "tiny", "cost-effective"]
159 changes: 159 additions & 0 deletions v1/providers/mistral.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
$schema: "https://raw.githubusercontent.com/hiddenpath/ai-protocol/main/schemas/v1.json"

id: mistral
protocol_version: "1.5"

version: "v1"
base_url: "https://api.mistral.ai/v1"
auth:
type: bearer
token_env: "MISTRAL_API_KEY"
payload_format: "openai_style"

# OpenAI-compatible API families
api_families: ["chat_completions"]
default_api_family: "chat_completions"
endpoints:
# Runtime uses UnifiedRequest.operation (currently "chat") to resolve endpoints.
# Keep keys aligned with runtime expectations.
chat:
path: "/chat/completions"
method: "POST"
adapter: "openai"

# Optional service endpoints (OpenAI-compatible).
services:
list_models:
path: "/models"
method: "GET"
response_binding: "data"

# Termination reason normalization (see v1/spec.yaml: standard_schema.streaming_events.termination_reasons)
termination:
source_field: "finish_reason"
mapping:
stop: "end_turn"
length: "max_tokens"
tool_calls: "tool_use"
content_filter: "refusal"
notes:
- "OpenAI-compatible: finish_reason is reported per candidate."

# Tool invocation normalization (see v1/spec.yaml: standard_schema.content_blocks)
tooling:
source_model: "openai_tool_calls"
tool_use:
id_path: "id"
name_path: "function.name"
input_path: "function.arguments"
input_format: "json_string"
notes:
- "OpenAI-compatible tool_calls.function.arguments is typically a JSON string; runtimes SHOULD parse it into an object when normalizing."

# Rate limits (Mistral uses standard HTTP headers)
# Note: Mistral may use different header names, but we follow OpenAI-compatible pattern
rate_limit_headers:
requests_limit: "x-ratelimit-limit-requests"
requests_remaining: "x-ratelimit-remaining-requests"
requests_reset: "x-ratelimit-reset-requests"
tokens_limit: "x-ratelimit-limit-tokens"
tokens_remaining: "x-ratelimit-remaining-tokens"
tokens_reset: "x-ratelimit-reset-tokens"
retry_after: "retry-after"

# Retry policy (OpenAI-compatible guidance: exponential backoff for 429; retry 5xx after brief wait)
retry_policy:
strategy: "exponential_backoff"
min_delay_ms: 1000
jitter: "full"
retry_on_http_status: [429, 500]
notes:
- "OpenAI-compatible: exponential backoff is recommended for rate limit errors (429)."
- "Retry 500 after a brief wait."

parameter_mappings:
temperature: "temperature"
max_tokens: "max_tokens"
stream: "stream"
stop_sequences: "stop"
tools: "tools"
tool_choice: "tool_choice"
frequency_penalty: "frequency_penalty"
presence_penalty: "presence_penalty"
top_p: "top_p"
random_seed: "random_seed"
n: "n"
parallel_tool_calls: "parallel_tool_calls"
safe_prompt: "safe_prompt"
response_format: "response_format"
prompt_mode: "prompt_mode"
prediction: "prediction"
metadata: "metadata"

response_format: "openai_style"
response_paths:
content: "choices[0].message.content"
tool_calls: "choices[0].message.tool_calls"
usage: "usage"

streaming:
event_format: "data_lines"
decoder:
format: "sse"
delimiter: "\n\n"
prefix: "data: "
done_signal: "[DONE]"
content_path: "choices[0].delta.content"
tool_call_path: "choices[0].delta.tool_calls"
usage_path: "usage"
frame_selector: "exists($.choices) || exists($.error)"
event_map:
# Text content delta
- match: "exists($.choices[*].delta.content)"
emit: "PartialContentDelta"
fields:
content: "$.choices[*].delta.content"
# Tool call start
- match: "exists($.choices[*].delta.tool_calls[*].function.name)"
emit: "ToolCallStarted"
fields:
tool_call_id: "$.choices[*].delta.tool_calls[*].id"
tool_name: "$.choices[*].delta.tool_calls[*].function.name"
index: "$.choices[*].delta.tool_calls[*].index"
# Tool call arguments streaming
- match: "exists($.choices[*].delta.tool_calls[*].function.arguments)"
emit: "PartialToolCall"
fields:
arguments: "$.choices[*].delta.tool_calls[*].function.arguments"
index: "$.choices[*].delta.tool_calls[*].index"
# Usage metadata
- match: "exists($.usage)"
emit: "Metadata"
fields:
usage: "$.usage"
# Finish with reason
- match: "exists($.choices[*].finish_reason)"
emit: "FinalCandidate"
fields:
finish_reason: "$.choices[*].finish_reason"
candidate_index: "$.choices[*].index"
stop_condition: "$.choices[0].finish_reason != null"

features:
multi_candidate:
support_type: "native"
param_name: "n"
response_mapping:
tool_calls:
path: "choices[0].message.tool_calls"
fields:
id: "id"
name: "function.name"
args: "function.arguments"
error:
message_path: "error.message"
code_path: "error.code"
type_path: "error.type"
param_path: "error.param"

capabilities: [chat, tools, streaming, parallel_tools, agentic]
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Provider missing vision capability declared by Pixtral models

Medium Severity

The Mistral provider's capabilities list at line 159 is [chat, tools, streaming, parallel_tools, agentic] but does not include vision. However, pixtral-small-latest and pixtral-large-latest models both declare vision in their capabilities. Other providers (anthropic, openai, gemini, qwen) all include vision at the provider level when their models support it. This inconsistency could cause capability validation failures or prevent vision functionality from being properly detected for Pixtral models.

Additional Locations (2)

Fix in Cursor Fix in Web

Loading