Skip to content

Commit 354ceaa

Browse files
Merge pull request #369 from MervinPraison/develop
Bump version to 2.0.68 and enhance Ollama model template generation
2 parents 24f25e2 + ec142c7 commit 354ceaa

File tree

7 files changed

+204
-39
lines changed

7 files changed

+204
-39
lines changed

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
FROM python:3.11-slim
22
WORKDIR /app
33
COPY . .
4-
RUN pip install flask praisonai==2.0.67 gunicorn markdown
4+
RUN pip install flask praisonai==2.0.68 gunicorn markdown
55
EXPOSE 8080
66
CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]

docs/api/praisonai/deploy.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ <h2 id="raises">Raises</h2>
110110
file.write(&#34;FROM python:3.11-slim\n&#34;)
111111
file.write(&#34;WORKDIR /app\n&#34;)
112112
file.write(&#34;COPY . .\n&#34;)
113-
file.write(&#34;RUN pip install flask praisonai==2.0.67 gunicorn markdown\n&#34;)
113+
file.write(&#34;RUN pip install flask praisonai==2.0.68 gunicorn markdown\n&#34;)
114114
file.write(&#34;EXPOSE 8080\n&#34;)
115115
file.write(&#39;CMD [&#34;gunicorn&#34;, &#34;-b&#34;, &#34;0.0.0.0:8080&#34;, &#34;api:app&#34;]\n&#39;)
116116

praisonai.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ class Praisonai < Formula
33

44
desc "AI tools for various AI applications"
55
homepage "https://github.com/MervinPraison/PraisonAI"
6-
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/2.0.67.tar.gz"
6+
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/2.0.68.tar.gz"
77
sha256 "1828fb9227d10f991522c3f24f061943a254b667196b40b1a3e4a54a8d30ce32" # Replace with actual SHA256 checksum
88
license "MIT"
99

praisonai/deploy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def create_dockerfile(self):
5656
file.write("FROM python:3.11-slim\n")
5757
file.write("WORKDIR /app\n")
5858
file.write("COPY . .\n")
59-
file.write("RUN pip install flask praisonai==2.0.67 gunicorn markdown\n")
59+
file.write("RUN pip install flask praisonai==2.0.68 gunicorn markdown\n")
6060
file.write("EXPOSE 8080\n")
6161
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
6262

praisonai/train.py

Lines changed: 197 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -312,42 +312,207 @@ def save_model_gguf(self):
312312

313313
def prepare_modelfile_content(self):
314314
output_model = self.config["hf_model_name"]
315-
# Determine stop tokens from config or infer based on model name
316315
model_name = self.config["model_name"].lower()
317-
if "phi" in model_name:
318-
inferred_stop_tokens = ["<|end|>", "<|user|>", "<|assistant|>"]
319-
elif "llava" in model_name:
320-
inferred_stop_tokens = ["</s>", "USER:", "ASSSISTANT:"]
321-
elif "mistral" in model_name:
322-
inferred_stop_tokens = ["[INST]", "[/INST]"]
323-
elif "qwen" in model_name:
324-
inferred_stop_tokens = ["<|endoftext|>"]
325-
elif "deepseek" in model_name:
326-
inferred_stop_tokens = ["<|begin▁of▁sentence|>", "<|end▁of▁sentence|>", "<|User|>", "<|Assistant|>"]
327-
else:
328-
inferred_stop_tokens = ["<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>"]
329-
# Use stop_tokens from config if provided, otherwise use inferred
330-
model_stop_tokens = self.config.get("stop_tokens", inferred_stop_tokens)
331-
332-
gguf_path = f"{output_model}/unsloth.Q4_K_M.gguf"
333-
if not os.path.exists(gguf_path):
334-
self.model, self.hf_tokenizer = self.load_model()
335-
self.save_model_gguf()
336-
stop_parameters = "\n".join([f'PARAMETER stop "{token}"' for token in model_stop_tokens])
337-
return f"""FROM {output_model}/unsloth.Q4_K_M.gguf
338-
339-
TEMPLATE \"\"\"Below are some instructions that describe some tasks. Write responses that appropriately complete each request.{{{{ if .Prompt }}}}
340-
341-
### Instruction:
342-
{{{{ .Prompt }}}}
343-
344-
{{{{ end }}}}### Response:
345-
{{{{ .Response }}}}\"\"\"
346-
347-
{stop_parameters}
316+
# Mapping from model name keywords to their default TEMPLATE and stop tokens (and optional SYSTEM/num_ctx)
317+
mapping = {
318+
"llama": {
319+
"template": """<|start_header_id|>system<|end_header_id|>
320+
Cutting Knowledge Date: December 2023
321+
{{ if .System }}{{ .System }}
322+
{{- end }}
323+
{{- if .Tools }}When you receive a tool call response, use the output to format an answer to the orginal user question.
324+
You are a helpful assistant with tool calling capabilities.
325+
{{- end }}<|eot_id|>
326+
{{- range $i, $_ := .Messages }}
327+
{{- $last := eq (len (slice $.Messages $i)) 1 }}
328+
{{- if eq .Role "user" }}<|start_header_id|>user<|end_header_id|>
329+
{{- if and $.Tools $last }}
330+
Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.
331+
Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. Do not use variables.
332+
{{ range $.Tools }}
333+
{{- . }}
334+
{{ end }}
335+
{{ .Content }}<|eot_id|>
336+
{{- else }}
337+
{{ .Content }}<|eot_id|>
338+
{{- end }}{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
339+
{{ end }}
340+
{{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|>
341+
{{- if .ToolCalls }}
342+
{{ range .ToolCalls }}
343+
{"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }}}{{ end }}
344+
{{- else }}
345+
{{ .Content }}
346+
{{- end }}{{ if not $last }}<|eot_id|>{{ end }}
347+
{{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|>
348+
{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
349+
{{ end }}
350+
{{- end }}
351+
{{- end }}""",
352+
"stop_tokens": ["<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>"]
353+
},
354+
"qwen": {
355+
"template": """{{- if .Suffix }}<|fim_prefix|>{{ .Prompt }}<|fim_suffix|>{{ .Suffix }}<|fim_middle|>
356+
{{- else if .Messages }}
357+
{{- if or .System .Tools }}<|im_start|>system
358+
{{- if .System }}
359+
{{ .System }}
360+
{{- end }}
361+
{{- if .Tools }}
362+
# Tools
363+
You may call one or more functions to assist with the user query.
364+
You are provided with function signatures within <tools></tools> XML tags:
365+
<tools>
366+
{{- range .Tools }}
367+
{"type": "function", "function": {{ .Function }}}
368+
{{- end }}
369+
</tools>
370+
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
371+
<tool_call>
372+
{"name": <function-name>, "arguments": <args-json-object>}
373+
</tool_call>
374+
{{- end }}<|im_end|>
375+
{{ end }}
376+
{{- range $i, $_ := .Messages }}
377+
{{- $last := eq (len (slice $.Messages $i)) 1 -}}
378+
{{- if eq .Role "user" }}<|im_start|>user
379+
{{ .Content }}<|im_end|>
380+
{{ else if eq .Role "assistant" }}<|im_start|>assistant
381+
{{ if .Content }}{{ .Content }}
382+
{{- else if .ToolCalls }}<tool_call>
383+
{{ range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
384+
{{ end }}</tool_call>
385+
{{- end }}{{ if not $last }}<|im_end|>
386+
{{ end }}
387+
{{- else if eq .Role "tool" }}<|im_start|>user
388+
<tool_response>
389+
{{ .Content }}
390+
</tool_response><|im_end|>
391+
{{ end }}
392+
{{- if and (ne .Role "assistant") $last }}<|im_start|>assistant
393+
{{ end }}
394+
{{- end }}
395+
{{- else }}
396+
{{- if .System }}<|im_start|>system
397+
{{ .System }}<|im_end|>
398+
{{ end }}{{ if .Prompt }}<|im_start|>user
399+
{{ .Prompt }}<|im_end|>
400+
{{ end }}<|im_start|>assistant
401+
{{ end }}{{ .Response }}{{ if .Response }}<|im_end|>{{ end }}""",
402+
"system": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.",
403+
"num_ctx": 32768,
404+
"stop_tokens": ["<|endoftext|>"]
405+
},
406+
"mistral": {
407+
"template": "[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST]",
408+
"stop_tokens": ["[INST]", "[/INST]"]
409+
},
410+
"phi": {
411+
"template": """{{- range $i, $_ := .Messages }}
412+
{{- $last := eq (len (slice $.Messages $i)) 1 -}}
413+
<|im_start|>{{ .Role }}<|im_sep|>
414+
{{ .Content }}{{ if not $last }}<|im_end|>
415+
{{ end }}
416+
{{- if and (ne .Role "assistant") $last }}<|im_end|>
417+
<|im_start|>assistant<|im_sep|>
418+
{{ end }}
419+
{{- end }}""",
420+
"stop_tokens": ["<|im_start|>", "<|im_end|>", "<|im_sep|>"]
421+
},
422+
"deepseek": {
423+
"template": """{{- if .System }}{{ .System }}{{ end }}
424+
{{- range $i, $_ := .Messages }}
425+
{{- $last := eq (len (slice $.Messages $i)) 1}}
426+
{{- if eq .Role "user" }}<|User|>{{ .Content }}
427+
{{- else if eq .Role "assistant" }}<|Assistant|>{{ .Content }}{{- if not $last }}<|end▁of▁sentence|>{{- end }}
428+
{{- end }}
429+
{{- if and $last (ne .Role "assistant") }}<|Assistant|>{{- end }}
430+
{{- end }}""",
431+
"stop_tokens": ["<|begin▁of▁sentence|>", "<|end▁of▁sentence|>", "<|User|>", "<|Assistant|>"]
432+
},
433+
"llava": {
434+
"template": """{{- if .Suffix }}<|fim_prefix|>{{ .Prompt }}<|fim_suffix|>{{ .Suffix }}<|fim_middle|>
435+
{{- else if .Messages }}
436+
{{- if or .System .Tools }}<|im_start|>system
437+
{{- if .System }}
438+
{{ .System }}
439+
{{- end }}
440+
{{- if .Tools }}
441+
# Tools
442+
You may call one or more functions to assist with the user query.
443+
You are provided with function signatures within <tools></tools> XML tags:
444+
<tools>
445+
{{- range .Tools }}
446+
{"type": "function", "function": {{ .Function }}}
447+
{{- end }}
448+
</tools>
449+
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
450+
<tool_call>
451+
{"name": <function-name>, "arguments": <args-json-object>}
452+
</tool_call>
453+
{{- end }}<|im_end|>
454+
{{ end }}
455+
{{- range $i, $_ := .Messages }}
456+
{{- $last := eq (len (slice $.Messages $i)) 1 -}}
457+
{{- if eq .Role "user" }}<|im_start|>user
458+
{{ .Content }}<|im_end|>
459+
{{ else if eq .Role "assistant" }}<|im_start|>assistant
460+
{{ if .Content }}{{ .Content }}
461+
{{- else if .ToolCalls }}<tool_call>
462+
{{ range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
463+
{{ end }}</tool_call>
464+
{{- end }}{{ if not $last }}<|im_end|>
465+
{{ end }}
466+
{{- else if eq .Role "tool" }}<|im_start|>user
467+
<tool_response>
468+
{{ .Content }}
469+
</tool_response><|im_end|>
470+
{{ end }}
471+
{{- if and (ne .Role "assistant") $last }}<|im_start|>assistant
472+
{{ end }}
473+
{{- end }}
474+
{{- else }}
475+
{{- if .System }}<|im_start|>system
476+
{{ .System }}<|im_end|>
477+
{{ end }}{{ if .Prompt }}<|im_start|>user
478+
{{ .Prompt }}<|im_end|>
479+
{{ end }}<|im_start|>assistant
480+
{{ end }}{{ .Response }}{{ if .Response }}<|im_end|>{{ end }}""",
481+
"stop_tokens": ["</s>", "USER:", "ASSSISTANT:"]
482+
}
483+
}
484+
# Select mapping by checking if any key is in the model_name.
485+
chosen = None
486+
for key, settings in mapping.items():
487+
if key in model_name:
488+
chosen = settings
489+
break
490+
if chosen is None:
491+
# Fallback default
492+
chosen = {
493+
"template": """{{ if .System }}<|start_header_id|>system<|end_header_id|>
494+
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
495+
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
496+
{{ .Response }}<|eot_id|>""",
497+
"stop_tokens": ["<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>"]
498+
}
499+
# Build the stop parameter lines.
500+
stop_params = "\n".join([f"PARAMETER stop {token}" for token in chosen["stop_tokens"]])
501+
# Optionally include a SYSTEM line and num_ctx if defined in the mapping.
502+
system_line = ""
503+
if "system" in chosen:
504+
system_line = f"SYSTEM {chosen['system']}\n"
505+
num_ctx_line = ""
506+
if "num_ctx" in chosen:
507+
num_ctx_line = f"PARAMETER num_ctx {chosen['num_ctx']}\n"
508+
# Assemble and return the modelfile content.
509+
return f"""FROM {output_model}
510+
TEMPLATE \"\"\"{chosen['template']}\"\"\"
511+
{system_line}{num_ctx_line}{stop_params}
348512
"""
349513

350514

515+
351516
def create_and_push_ollama_model(self):
352517
modelfile_content = self.prepare_modelfile_content()
353518
with open("Modelfile", "w") as file:

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "PraisonAI"
3-
version = "2.0.67"
3+
version = "2.0.68"
44
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human-agent collaboration."
55
readme = "README.md"
66
license = ""
@@ -84,7 +84,7 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.7", "crewai"]
8484

8585
[tool.poetry]
8686
name = "PraisonAI"
87-
version = "2.0.67"
87+
version = "2.0.68"
8888
description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
8989
authors = ["Mervin Praison"]
9090
license = ""

uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)