-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
72 lines (63 loc) · 3.02 KB
/
.env.example
File metadata and controls
72 lines (63 loc) · 3.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# Rename this file to .env once you have filled in the below environment variables!
# Whether to enable file logging
USAGE_LOG_FILE=true
# Include this environment variable if you want more logging for debugging locally
LOG_LEVEL=debug
# Operating environment, different from NODE_ENV. NODE_ENV is determined at build time, while this variable is used for enabling certain features in different environments
# development | production | test
OPERATING_ENV=production
# Resource file storage location
STORAGE_DIR=/public/uploads
# Maximum upload size for attachments
MAX_UPLOAD_SIZE_MB=5
# The number of segments when reaching the maximum Tokens count
MAX_RESPONSE_SEGMENTS=5
# Default token usage for a single chat
MAX_TOKENS=8000
# Example Context Values for qwen2.5-coder:32b
#
# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM
DEFAULT_NUM_CTX=
# Enabled model providers, currently supporting Anthropic, Cohere, Deepseek, DouBao, Ernie, Google, Groq,
# HuggingFace, Hyperbolic, Kimi, Mistral, Ollama, OpenAI, OpenRouter, OpenAILike, Perplexity, Qwen, xAI,
# ZhiPu, Together, LMStudio, AmazonBedrock, Github
LLM_PROVIDER=
# BASE URL of the current model provider, some providers require this to be set, such as OpenAI, Ollama, LMStudio
# DONT USE http://localhost:11434 due to IPV6 issues
# USE EXAMPLE http://127.0.0.1:11434
PROVIDER_BASE_URL=
# API KEY of the current provider, used to request the model API. Some providers do not require this to be set.
# Specifically, if the model provider is AmazonBedrock, this should be a JSON string, reference:
# https://console.aws.amazon.com/iam/home
# The JSON should include the following keys:
# - region: The AWS region where Bedrock is available.
# - accessKeyId: Your AWS access key ID.
# - secretAccessKey: Your AWS secret access key.
# - sessionToken (optional): Temporary session token if using an IAM role or temporary credentials.
# Example JSON:
# {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey", "sessionToken": "yourSessionToken"}
PROVIDER_API_KEY=
# MODEL used for page generation (should correspond to LLM_PROVIDER)
LLM_DEFAULT_MODEL=
# MODEL used for auxiliary page generation, such as summarization and pre-analysis. (should correspond to LLM_PROVIDER)
LLM_MINOR_MODEL=
# Get your Serper API Key https://serper.dev/
SERPER_API_KEY=
# Get your Weather API Key https://www.weatherapi.com/my/
WEATHER_API_KEY=
# Environment variables required for Logto integration
# Logto endpoint
LOGTO_ENDPOINT=
# Logto application ID
LOGTO_APP_ID=
# Logto application secret
LOGTO_APP_SECRET=
# Application base URL, modify according to actual deployment environment
LOGTO_BASE_URL=http://localhost:5173
# Random 36-character string, used to encrypt Logto cookies.
LOGTO_COOKIE_SECRET=
# Whether to enable Logto authentication in development environment, set to false to not enforce authentication in development
LOGTO_ENABLE=false