diff --git a/schemas/v1.json b/schemas/v1.json index e1d03b7..98b46a8 100644 --- a/schemas/v1.json +++ b/schemas/v1.json @@ -1,5 +1,4 @@ { - "$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://raw.githubusercontent.com/hiddenpath/ai-protocol/main/schemas/v1.json", "title": "AI-Protocol v1.1 Specification", "description": "JSON Schema for AI-Protocol v1.1 provider and model specifications", diff --git a/scripts/validate-configs.sh b/scripts/validate-configs.sh index 842b205..be7cb3f 100644 --- a/scripts/validate-configs.sh +++ b/scripts/validate-configs.sh @@ -36,6 +36,8 @@ validate_file() { echo -n "Validating $file... " + # Use draft-07 spec (widely supported by ajv-cli) + # The schema file uses "$schema": "https://json-schema.org/draft-07/schema" if ajv validate -s "$schema" -d "$file" >/dev/null 2>&1; then echo -e "${GREEN}✅ PASSED${NC}" ((PASSED++)) diff --git a/v1/models/mistral.yaml b/v1/models/mistral.yaml new file mode 100644 index 0000000..7f8fcc4 --- /dev/null +++ b/v1/models/mistral.yaml @@ -0,0 +1,77 @@ +$schema: "https://raw.githubusercontent.com/hiddenpath/ai-protocol/main/schemas/v1.json" + +# Mistral Model Instances +# Reference: https://docs.mistral.ai/api/ + +models: + mistral-small-latest: + provider: mistral + model_id: "mistral-small-latest" + display_name: "Mistral Small" + context_window: 32000 + capabilities: [chat, tools, streaming, parallel_tools] + status: active + tags: ["mistral", "small", "tools", "latest"] + + mistral-medium-latest: + provider: mistral + model_id: "mistral-medium-latest" + display_name: "Mistral Medium" + context_window: 32000 + capabilities: [chat, tools, streaming, parallel_tools] + status: active + tags: ["mistral", "medium", "tools", "latest"] + + mistral-large-latest: + provider: mistral + model_id: "mistral-large-latest" + display_name: "Mistral Large" + context_window: 32000 + capabilities: [chat, tools, streaming, parallel_tools, agentic] + status: active + tags: ["mistral", "large", "tools", "agentic", "latest"] + + pixtral-small-latest: + provider: mistral + model_id: "pixtral-small-latest" + display_name: "Pixtral Small" + context_window: 128000 + capabilities: [chat, vision, tools, streaming, parallel_tools] + status: active + tags: ["mistral", "pixtral", "vision", "small", "latest"] + + pixtral-large-latest: + provider: mistral + model_id: "pixtral-large-latest" + display_name: "Pixtral Large" + context_window: 128000 + capabilities: [chat, vision, tools, streaming, parallel_tools, agentic] + status: active + tags: ["mistral", "pixtral", "vision", "large", "agentic", "latest"] + + open-mistral-7b: + provider: mistral + model_id: "open-mistral-7b" + display_name: "Open Mistral 7B" + context_window: 32000 + capabilities: [chat, streaming] + status: active + tags: ["mistral", "open", "7b", "base"] + + open-mixtral-8x7b: + provider: mistral + model_id: "open-mixtral-8x7b" + display_name: "Open Mixtral 8x7B" + context_window: 32000 + capabilities: [chat, streaming] + status: active + tags: ["mistral", "open", "mixtral", "8x7b", "base"] + + mistral-tiny: + provider: mistral + model_id: "mistral-tiny" + display_name: "Mistral Tiny" + context_window: 32000 + capabilities: [chat, streaming] + status: active + tags: ["mistral", "tiny", "cost-effective"] diff --git a/v1/providers/mistral.yaml b/v1/providers/mistral.yaml new file mode 100644 index 0000000..be430c7 --- /dev/null +++ b/v1/providers/mistral.yaml @@ -0,0 +1,159 @@ +$schema: "https://raw.githubusercontent.com/hiddenpath/ai-protocol/main/schemas/v1.json" + +id: mistral +protocol_version: "1.5" + +version: "v1" +base_url: "https://api.mistral.ai/v1" +auth: + type: bearer + token_env: "MISTRAL_API_KEY" +payload_format: "openai_style" + +# OpenAI-compatible API families +api_families: ["chat_completions"] +default_api_family: "chat_completions" +endpoints: + # Runtime uses UnifiedRequest.operation (currently "chat") to resolve endpoints. + # Keep keys aligned with runtime expectations. + chat: + path: "/chat/completions" + method: "POST" + adapter: "openai" + +# Optional service endpoints (OpenAI-compatible). +services: + list_models: + path: "/models" + method: "GET" + response_binding: "data" + +# Termination reason normalization (see v1/spec.yaml: standard_schema.streaming_events.termination_reasons) +termination: + source_field: "finish_reason" + mapping: + stop: "end_turn" + length: "max_tokens" + tool_calls: "tool_use" + content_filter: "refusal" + notes: + - "OpenAI-compatible: finish_reason is reported per candidate." + +# Tool invocation normalization (see v1/spec.yaml: standard_schema.content_blocks) +tooling: + source_model: "openai_tool_calls" + tool_use: + id_path: "id" + name_path: "function.name" + input_path: "function.arguments" + input_format: "json_string" + notes: + - "OpenAI-compatible tool_calls.function.arguments is typically a JSON string; runtimes SHOULD parse it into an object when normalizing." + +# Rate limits (Mistral uses standard HTTP headers) +# Note: Mistral may use different header names, but we follow OpenAI-compatible pattern +rate_limit_headers: + requests_limit: "x-ratelimit-limit-requests" + requests_remaining: "x-ratelimit-remaining-requests" + requests_reset: "x-ratelimit-reset-requests" + tokens_limit: "x-ratelimit-limit-tokens" + tokens_remaining: "x-ratelimit-remaining-tokens" + tokens_reset: "x-ratelimit-reset-tokens" + retry_after: "retry-after" + +# Retry policy (OpenAI-compatible guidance: exponential backoff for 429; retry 5xx after brief wait) +retry_policy: + strategy: "exponential_backoff" + min_delay_ms: 1000 + jitter: "full" + retry_on_http_status: [429, 500] + notes: + - "OpenAI-compatible: exponential backoff is recommended for rate limit errors (429)." + - "Retry 500 after a brief wait." + +parameter_mappings: + temperature: "temperature" + max_tokens: "max_tokens" + stream: "stream" + stop_sequences: "stop" + tools: "tools" + tool_choice: "tool_choice" + frequency_penalty: "frequency_penalty" + presence_penalty: "presence_penalty" + top_p: "top_p" + random_seed: "random_seed" + n: "n" + parallel_tool_calls: "parallel_tool_calls" + safe_prompt: "safe_prompt" + response_format: "response_format" + prompt_mode: "prompt_mode" + prediction: "prediction" + metadata: "metadata" + +response_format: "openai_style" +response_paths: + content: "choices[0].message.content" + tool_calls: "choices[0].message.tool_calls" + usage: "usage" + +streaming: + event_format: "data_lines" + decoder: + format: "sse" + delimiter: "\n\n" + prefix: "data: " + done_signal: "[DONE]" + content_path: "choices[0].delta.content" + tool_call_path: "choices[0].delta.tool_calls" + usage_path: "usage" + frame_selector: "exists($.choices) || exists($.error)" + event_map: + # Text content delta + - match: "exists($.choices[*].delta.content)" + emit: "PartialContentDelta" + fields: + content: "$.choices[*].delta.content" + # Tool call start + - match: "exists($.choices[*].delta.tool_calls[*].function.name)" + emit: "ToolCallStarted" + fields: + tool_call_id: "$.choices[*].delta.tool_calls[*].id" + tool_name: "$.choices[*].delta.tool_calls[*].function.name" + index: "$.choices[*].delta.tool_calls[*].index" + # Tool call arguments streaming + - match: "exists($.choices[*].delta.tool_calls[*].function.arguments)" + emit: "PartialToolCall" + fields: + arguments: "$.choices[*].delta.tool_calls[*].function.arguments" + index: "$.choices[*].delta.tool_calls[*].index" + # Usage metadata + - match: "exists($.usage)" + emit: "Metadata" + fields: + usage: "$.usage" + # Finish with reason + - match: "exists($.choices[*].finish_reason)" + emit: "FinalCandidate" + fields: + finish_reason: "$.choices[*].finish_reason" + candidate_index: "$.choices[*].index" + stop_condition: "$.choices[0].finish_reason != null" + +features: + multi_candidate: + support_type: "native" + param_name: "n" + response_mapping: + tool_calls: + path: "choices[0].message.tool_calls" + fields: + id: "id" + name: "function.name" + args: "function.arguments" + error: + message_path: "error.message" + code_path: "error.code" + type_path: "error.type" + param_path: "error.param" + +capabilities: [chat, tools, streaming, parallel_tools, agentic]