From bea68f16fc57a7f52f7abad52f9187798834bed1 Mon Sep 17 00:00:00 2001 From: Mouse Date: Sun, 3 May 2026 01:53:52 -0700 Subject: [PATCH] fix: add strict config bounds validation --- promptlens/models/config.py | 22 +++++++++--------- tests/test_config_validation_hardening.py | 27 +++++++++++++++++++++++ 2 files changed, 38 insertions(+), 11 deletions(-) create mode 100644 tests/test_config_validation_hardening.py diff --git a/promptlens/models/config.py b/promptlens/models/config.py index 7c85798..6992c50 100644 --- a/promptlens/models/config.py +++ b/promptlens/models/config.py @@ -22,9 +22,9 @@ class ProviderConfig(BaseModel): name: str model: str api_key: Optional[str] = None - temperature: float = 0.7 - max_tokens: int = 1024 - timeout: int = 60 + temperature: float = Field(default=0.7, ge=0.0, le=2.0) + max_tokens: int = Field(default=1024, ge=1) + timeout: int = Field(default=60, ge=1) endpoint: Optional[str] = None additional_params: Dict[str, Any] = Field(default_factory=dict) @@ -44,8 +44,8 @@ class ModelConfig(BaseModel): name: str provider: str model: str - temperature: float = 0.7 - max_tokens: int = 1024 + temperature: float = Field(default=0.7, ge=0.0, le=2.0) + max_tokens: int = Field(default=1024, ge=1) additional_params: Dict[str, Any] = Field(default_factory=dict) @@ -62,7 +62,7 @@ class JudgeConfig(BaseModel): provider: str = "anthropic" model: str = "claude-3-5-sonnet-20241022" - temperature: float = 0.3 + temperature: float = Field(default=0.3, ge=0.0, le=2.0) custom_prompt: Optional[str] = None criteria: List[str] = Field(default_factory=lambda: ["accuracy", "helpfulness"]) @@ -77,10 +77,10 @@ class ExecutionConfig(BaseModel): timeout_seconds: Request timeout """ - parallel_requests: int = 3 - retry_attempts: int = 3 - retry_delay_seconds: float = 1.0 - timeout_seconds: int = 60 + parallel_requests: int = Field(default=3, ge=1) + retry_attempts: int = Field(default=3, ge=0) + retry_delay_seconds: float = Field(default=1.0, ge=0.0) + timeout_seconds: int = Field(default=60, ge=1) class OutputConfig(BaseModel): @@ -93,7 +93,7 @@ class OutputConfig(BaseModel): """ directory: str = "./promptlens_results" - formats: List[str] = Field(default_factory=lambda: ["html", "json"]) + formats: List[str] = Field(default_factory=lambda: ["html", "json"], min_length=1) run_name: Optional[str] = None diff --git a/tests/test_config_validation_hardening.py b/tests/test_config_validation_hardening.py new file mode 100644 index 0000000..f26b6a0 --- /dev/null +++ b/tests/test_config_validation_hardening.py @@ -0,0 +1,27 @@ +from pydantic import ValidationError + +from promptlens.models.config import ModelConfig, OutputConfig + + +def test_model_temperature_must_be_between_0_and_2() -> None: + try: + ModelConfig(name="m", provider="openai", model="gpt-4", temperature=2.1) + assert False, "Expected ValidationError" + except ValidationError: + assert True + + +def test_model_max_tokens_must_be_positive() -> None: + try: + ModelConfig(name="m", provider="openai", model="gpt-4", max_tokens=0) + assert False, "Expected ValidationError" + except ValidationError: + assert True + + +def test_output_formats_must_not_be_empty() -> None: + try: + OutputConfig(formats=[]) + assert False, "Expected ValidationError" + except ValidationError: + assert True