@@ -4,7 +4,184 @@ description: "Overview of supported language models in PraisonAI, including Open
4
4
icon : " brain"
5
5
---
6
6
7
- ## Supported Models for Code
7
+ # Code
8
+
9
+ ## Set model by 3 ways
10
+
11
+ ### 1. OpenAI Compatible Endpoints
12
+
13
+ <Note >By Default it uses OPENAI_BASE_URL https://api.openai.com/v1 </Note >
14
+ Example Groq Implementation:
15
+
16
+ ``` bash
17
+ export OPENAI_API_KEY=groq-api-key
18
+ export OPENAI_BASE_URL=https://api.groq.com/openai/v1
19
+ ```
20
+
21
+ ``` python
22
+ from praisonaiagents import Agent
23
+
24
+ agent = Agent(
25
+ instructions = " You are a helpful assistant" ,
26
+ llm = " llama-3.1-8b-instant" ,
27
+ )
28
+
29
+ agent.start(" Why sky is Blue?" )
30
+ ```
31
+
32
+ ### 2. Litellm Compatible model names (eg: gemini/gemini-1.5-flash-8b)
33
+
34
+ ``` bash
35
+ pip install " praisonaiagents[llm]"
36
+ ```
37
+
38
+ ``` python
39
+ from praisonaiagents import Agent
40
+
41
+ agent = Agent(
42
+ instructions = " You are a helpful assistant" ,
43
+ llm = " gemini/gemini-1.5-flash-8b" ,
44
+ self_reflect = True ,
45
+ verbose = True
46
+ )
47
+
48
+ agent.start(" Why sky is Blue?" )
49
+ ```
50
+
51
+ ### 3. Litellm Compatible Configuration
52
+
53
+ ``` bash
54
+ pip install " praisonaiagents[llm]"
55
+ ```
56
+
57
+ ``` python
58
+ from praisonaiagents import Agent
59
+
60
+ llm_config = {
61
+ " model" : " gemini/gemini-1.5-flash-latest" , # Model name without provider prefix
62
+
63
+ # Core settings
64
+ " temperature" : 0.7 , # Controls randomness (like temperature)
65
+ " timeout" : 30 , # Timeout in seconds
66
+ " top_p" : 0.9 , # Nucleus sampling parameter
67
+ " max_tokens" : 1000 , # Max tokens in response
68
+
69
+ # Advanced parameters
70
+ " presence_penalty" : 0.1 , # Penalize repetition of topics (-2.0 to 2.0)
71
+ " frequency_penalty" : 0.1 , # Penalize token repetition (-2.0 to 2.0)
72
+
73
+ # API settings (optional)
74
+ " api_key" : None , # Your API key (or use environment variable)
75
+ " base_url" : None , # Custom API endpoint if needed
76
+
77
+ # Response formatting
78
+ " response_format" : { # Force specific response format
79
+ " type" : " text" # Options: "text", "json_object"
80
+ },
81
+
82
+ # Additional controls
83
+ " seed" : 42 , # For reproducible responses
84
+ " stop_phrases" : [" ##" , " END" ], # Custom stop sequences
85
+ }
86
+
87
+ agent = Agent(
88
+ instructions = " You are a helpful Assistant."
89
+ llm = llm_config
90
+ )
91
+ agent.start()
92
+ ```
93
+
94
+ ## Advanced Configuration (Litellm Support)
95
+
96
+ <Note >This uses Litellm</Note >
97
+ <Steps >
98
+ <Step title = " Install Package" >
99
+ Install required packages:
100
+ ```bash
101
+ pip install "praisonaiagents[ llm] "
102
+ ```
103
+ </Step >
104
+
105
+ <Step title = " Setup Environment" >
106
+ Configure environment:
107
+ ```bash
108
+ export GOOGLE_API_KEY=your-api-key
109
+ ```
110
+
111
+ <Note >
112
+ Get your API key from [ Google AI Studio] ( https://makersuite.google.com/app/apikey )
113
+ </Note >
114
+ </Step >
115
+
116
+ <Step title = " Create Agent" >
117
+ Create ` app.py ` :
118
+
119
+ <CodeGroup >
120
+
121
+ ``` python Basic
122
+ # if json_object is supported by the model
123
+ from praisonaiagents import Agent
124
+
125
+ agent = Agent(
126
+ instructions = " You are a helpful assistant" ,
127
+ llm = " gemini/gemini-1.5-flash-8b" ,
128
+ self_reflect = True ,
129
+ verbose = True
130
+ )
131
+
132
+ agent.start(" Why sky is Blue?" )
133
+ ```
134
+
135
+ ``` python Advanced
136
+ # if json_object is not supported by the model
137
+ from praisonaiagents import Agent
138
+
139
+ # Detailed LLM configuration
140
+ llm_config = {
141
+ " model" : " gemini/gemini-1.5-flash-latest" , # Model name without provider prefix
142
+
143
+ # Core settings
144
+ " temperature" : 0.7 , # Controls randomness (like temperature)
145
+ " timeout" : 30 , # Timeout in seconds
146
+ " top_p" : 0.9 , # Nucleus sampling parameter
147
+ " max_tokens" : 1000 , # Max tokens in response
148
+
149
+ # Advanced parameters
150
+ " presence_penalty" : 0.1 , # Penalize repetition of topics (-2.0 to 2.0)
151
+ " frequency_penalty" : 0.1 , # Penalize token repetition (-2.0 to 2.0)
152
+
153
+ # API settings (optional)
154
+ " api_key" : None , # Your API key (or use environment variable)
155
+ " base_url" : None , # Custom API endpoint if needed
156
+
157
+ # Response formatting
158
+ " response_format" : { # Force specific response format
159
+ " type" : " text" # Options: "text", "json_object"
160
+ },
161
+
162
+ # Additional controls
163
+ " seed" : 42 , # For reproducible responses
164
+ " stop_phrases" : [" ##" , " END" ], # Custom stop sequences
165
+ }
166
+
167
+ agent = Agent(
168
+ instructions = " You are a helpful Assistant specialized in scientific explanations. "
169
+ " Provide clear, accurate, and engaging responses." ,
170
+ llm = llm_config, # Pass the detailed configuration
171
+ verbose = True , # Enable detailed output
172
+ markdown = True , # Format responses in markdown
173
+ self_reflect = True , # Enable self-reflection
174
+ max_reflect = 3 , # Maximum reflection iterations
175
+ min_reflect = 1 # Minimum reflection iterations
176
+ )
177
+
178
+ # Test the agent
179
+ response = agent.start(" Why is the sky blue? Please explain in simple terms." )
180
+
181
+ ```
182
+ </CodeGroup >
183
+ </Step >
184
+ </Steps >
8
185
9
186
<AccordionGroup >
10
187
<Accordion title = " Ollama Integration" defaultOpen >
0 commit comments