Skip to content

Commit

Permalink
.
Browse files Browse the repository at this point in the history
  • Loading branch information
arthurbrenno committed Feb 8, 2025
1 parent 41653cb commit 40cdf58
Show file tree
Hide file tree
Showing 5 changed files with 140 additions and 3 deletions.
14 changes: 13 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,19 @@ version = "v0.8.3"
description = "IntelliBricks provides a streamlined set of tools for developing AI-powered applications. It simplifies complex tasks such as interacting with LLMs, training machine learning models, and implementing Retrieval Augmented Generation (RAG). Focus on building your application logic, not wrestling with boilerplate. IntelliBricks empowers you to build intelligent applications faster and more efficiently."
readme = "README.md"
requires-python = ">=3.13.0"
dependencies = ["langfuse>=2.57.0", "groq>=0.13.1", "google-genai>=0.3.0", "msgspec", "langchain-core==0.3.28", "openai>=1.54.3", "beautifulsoup4>=4.12.3", "mypy>=1.14.1", "python-dotenv>=1.0.1", "architecture>=0.5.23"]
dependencies = [
"langfuse>=2.57.0",
"groq>=0.13.1",
"google-genai>=0.3.0",
"msgspec",
"langchain-core==0.3.28",
"openai>=1.54.3",
"beautifulsoup4>=4.12.3",
"mypy>=1.14.1",
"python-dotenv>=1.0.1",
"architecture>=0.5.23",
"ollama>=0.4.7",
]

[project.urls]
Source = "https://github.com/arthurbrenno/intellibricks"
Expand Down
Empty file.
105 changes: 105 additions & 0 deletions src/intellibricks/llms/integrations/ollama/ollama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
# import timeit
# from typing import Literal, Optional, Sequence, TypeVar, overload, override

# import msgspec
# from architecture.utils.decorators import ensure_module_installed
# from langfuse.client import os
# from intellibricks.llms.util import ms_type_to_schema
# from intellibricks.llms.base import (
# LanguageModel,
# )
# from intellibricks.llms.constants import FinishReason
# from intellibricks.llms.types import (
# AudioTranscription,
# CalledFunction,
# ChatCompletion,
# CompletionTokensDetails,
# Function,
# GeneratedAssistantMessage,
# Message,
# MessageChoice,
# OpenAIModelType,
# Part,
# PromptTokensDetails,
# RawResponse,
# SentenceSegment,
# ToolCall,
# ToolCallSequence,
# ToolInputType,
# TypeAlias,
# Usage,
# )
# from ollama import AsyncClient

# S = TypeVar("S", bound=msgspec.Struct, default=RawResponse)
# DeepSeekModels = Literal[
# "deepseek-r1:1.5b",
# "deepseek-r1:7b",
# "deepseek-r1:8b",
# "deepseek-r1:14b",
# "deepseek-r1:32b",
# ]

# ChatModel = Literal[DeepSeekModels]


# class OllamaLanguageModel(LanguageModel, frozen=True):
# model_name: ChatModel
# max_retries: int = 2

# @overload
# async def chat_async(
# self,
# messages: Sequence[Message],
# *,
# response_model: None = None,
# n: Optional[int] = None,
# temperature: Optional[float] = None,
# max_completion_tokens: Optional[int] = None,
# top_p: Optional[float] = None,
# top_k: Optional[int] = None,
# stop_sequences: Optional[Sequence[str]] = None,
# tools: Optional[Sequence[ToolInputType]] = None,
# timeout: Optional[float] = None,
# ) -> ChatCompletion[RawResponse]: ...
# @overload
# async def chat_async(
# self,
# messages: Sequence[Message],
# *,
# response_model: type[S],
# n: Optional[int] = None,
# temperature: Optional[float] = None,
# max_completion_tokens: Optional[int] = None,
# top_p: Optional[float] = None,
# top_k: Optional[int] = None,
# stop_sequences: Optional[Sequence[str]] = None,
# tools: Optional[Sequence[ToolInputType]] = None,
# timeout: Optional[float] = None,
# ) -> ChatCompletion[S]: ...

# @ensure_module_installed("ollama", "ollama")
# @override
# async def chat_async(
# self,
# messages: Sequence[Message],
# *,
# response_model: Optional[type[S]] = None,
# n: Optional[int] = None,
# temperature: Optional[float] = None,
# max_completion_tokens: Optional[int] = None,
# top_p: Optional[float] = None,
# top_k: Optional[int] = None,
# stop_sequences: Optional[Sequence[str]] = None,
# tools: Optional[Sequence[ToolInputType]] = None,
# timeout: Optional[float] = None,
# ) -> ChatCompletion[S] | ChatCompletion[RawResponse]:
# now = timeit.default_timer()
# client = AsyncClient()
# completion = await client.chat(
# model=self.model_name,
# messages=[m.to_ollama_message() for m in messages],
# format=ms_type_to_schema(response_model, openai_like=True)
# if response_model is not None
# else None,
# )
7 changes: 6 additions & 1 deletion src/intellibricks/llms/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@

import msgspec
from architecture import dp, log
from architecture.data.files import find_extension, ext_to_mime, bytes_to_mime
from architecture.data.files import bytes_to_mime, ext_to_mime, find_extension
from architecture.utils.decorators import ensure_module_installed

from intellibricks.llms.util import (
Expand Down Expand Up @@ -155,6 +155,7 @@
from groq.types.shared_params.function_definition import (
FunctionDefinition as GroqFunctionDefinition,
)
from ollama._types import Message as OllamaMessage
from openai.types.chat.chat_completion_content_part_param import (
ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam,
)
Expand Down Expand Up @@ -1897,6 +1898,10 @@ def to_google_format(self) -> GenaiContent: ...
def to_openai_format(self) -> ChatCompletionMessageParam:
raise NotImplementedError

# @abstractmethod
def to_ollama_message(self) -> OllamaMessage:
raise NotImplementedError

# @abstractmethod
def to_groq_format(self) -> GroqChatCompletionMessageParam:
raise NotImplementedError
Expand Down
17 changes: 16 additions & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 40cdf58

Please sign in to comment.