diff --git a/.env.example b/.env.example index 2e007b2b..13998eb2 100644 --- a/.env.example +++ b/.env.example @@ -17,6 +17,7 @@ MISTRAL_API_KEY= MISTRAL_ENDPOINT=https://api.mistral.ai/v1 OLLAMA_ENDPOINT=http://localhost:11434 +OLLAMA_HOST=http://localhost:11434 ALIBABA_ENDPOINT=https://dashscope.aliyuncs.com/compatible-mode/v1 ALIBABA_API_KEY= diff --git a/docker-compose.yml b/docker-compose.yml index c7e3f182..6adc567b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -25,6 +25,7 @@ services: - DEEPSEEK_ENDPOINT=${DEEPSEEK_ENDPOINT:-https://api.deepseek.com} - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-} - OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT:-http://localhost:11434} + - OLLAMA_HOST=${OLLAMA_HOST:-http://localhost:11434} - MISTRAL_ENDPOINT=${MISTRAL_ENDPOINT:-https://api.mistral.ai/v1} - MISTRAL_API_KEY=${MISTRAL_API_KEY:-} - ALIBABA_ENDPOINT=${ALIBABA_ENDPOINT:-https://dashscope.aliyuncs.com/compatible-mode/v1} diff --git a/src/utils/config.py b/src/utils/config.py index b3d55fea..7c4d609b 100644 --- a/src/utils/config.py +++ b/src/utils/config.py @@ -18,8 +18,7 @@ "google": ["gemini-2.0-flash", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest", "gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-pro-exp-02-05", "gemini-2.5-pro-preview-03-25", "gemini-2.5-flash-preview-04-17"], - "ollama": ["qwen2.5:7b", "qwen2.5:14b", "qwen2.5:32b", "qwen2.5-coder:14b", "qwen2.5-coder:32b", "llama2:7b", - "deepseek-r1:14b", "deepseek-r1:32b"], + "ollama": ["qwen2.5:14b", "codellama:7b-instruct-q4_0"], "azure_openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo"], "mistral": ["pixtral-large-latest", "mistral-large-latest", "mistral-small-latest", "ministral-8b-latest"], "alibaba": ["qwen-plus", "qwen-max", "qwen-vl-max", "qwen-vl-plus", "qwen-turbo", "qwen-long"], diff --git a/src/utils/llm_provider.py b/src/utils/llm_provider.py index c285e365..aa7a431c 100644 --- a/src/utils/llm_provider.py +++ b/src/utils/llm_provider.py @@ -233,7 +233,7 @@ def get_llm_model(provider: str, **kwargs): ) elif provider == "ollama": if not kwargs.get("base_url", ""): - base_url = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434") + base_url = os.getenv("OLLAMA_ENDPOINT", "http://ollama:11434") else: base_url = kwargs.get("base_url") diff --git a/src/webui/components/agent_settings_tab.py b/src/webui/components/agent_settings_tab.py index d8ebc05d..c28f4540 100644 --- a/src/webui/components/agent_settings_tab.py +++ b/src/webui/components/agent_settings_tab.py @@ -64,14 +64,14 @@ def create_agent_settings_tab(webui_manager: WebuiManager): llm_provider = gr.Dropdown( choices=[provider for provider, model in config.model_names.items()], label="LLM Provider", - value="openai", + value="ollama", info="Select LLM provider for LLM", interactive=True ) llm_model_name = gr.Dropdown( label="LLM Model Name", - choices=config.model_names['openai'], - value="gpt-4o", + choices=config.model_names['ollama'], + value="qwen2.5:14b", interactive=True, allow_custom_value=True, info="Select a model in the dropdown options or directly type a custom model name" diff --git a/src/webui/components/browser_use_agent_tab.py b/src/webui/components/browser_use_agent_tab.py index a488e70d..e81caba0 100644 --- a/src/webui/components/browser_use_agent_tab.py +++ b/src/webui/components/browser_use_agent_tab.py @@ -526,6 +526,8 @@ def done_callback_wrapper(history: AgentHistoryList): raise ValueError( "Browser or Context not initialized, cannot create agent." ) + print("OLLAMA_ENDPOINT:", os.environ.get("OLLAMA_ENDPOINT")) + print("OLLAMA_HOST:", os.environ.get("OLLAMA_HOST")) webui_manager.bu_agent = BrowserUseAgent( task=task, llm=main_llm,