diff --git a/.github/workflows/export.yml b/.github/workflows/export.yml
new file mode 100644
index 0000000..5670acc
--- /dev/null
+++ b/.github/workflows/export.yml
@@ -0,0 +1,43 @@
+name: Check Export
+
+env:
+ TELEMETRY_ENABLED: false
+on:
+ push:
+ branches: [main]
+ pull_request:
+ branches: [main]
+
+jobs:
+ find-folders:
+ runs-on: ubuntu-latest
+ outputs:
+ folders: ${{ steps.find-rxconfig.outputs.folders }}
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Find folders with rxconfig.py
+ id: find-rxconfig
+ run: |
+ FOLDERS=$(find . -maxdepth 2 -type f -name "rxconfig.py" | xargs dirname | sed 's|^\./||' | jq -R -s -c 'split("\n")[:-1]')
+ echo "folders=$FOLDERS" >> $GITHUB_OUTPUT
+ echo "Found folders: $FOLDERS"
+
+ check-export:
+ needs: find-folders
+ strategy:
+ matrix:
+ folder: ${{ fromJson(needs.find-folders.outputs.folders) }}
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: astral-sh/setup-uv@v6
+ with:
+ python-version: 3.12
+ activate-environment: true
+ - name: Install dependencies
+ working-directory: ${{ matrix.folder }}
+ run: uv pip install -r requirements.txt
+ - name: Run export
+ working-directory: ${{ matrix.folder }}
+ run: uv run reflex export
diff --git a/agentic_rag/chat/chat.py b/agentic_rag/chat/chat.py
index b912884..a9d1432 100644
--- a/agentic_rag/chat/chat.py
+++ b/agentic_rag/chat/chat.py
@@ -1,6 +1,7 @@
import reflex as rx
from chat.components.chat import State, chat, action_bar, sidebar
+
def index() -> rx.Component:
"""The main app."""
return rx.box(
@@ -32,4 +33,4 @@ def index() -> rx.Component:
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/agentic_rag/chat/components/chat.py b/agentic_rag/chat/components/chat.py
index 8a91a1b..ee35aa1 100644
--- a/agentic_rag/chat/components/chat.py
+++ b/agentic_rag/chat/components/chat.py
@@ -5,14 +5,12 @@
import base64
from pathlib import Path
import asyncio
-import os
import time
from agno.agent import Agent
-from agno.document import Document
from agno.document.reader.pdf_reader import PDFReader
from agno.utils.log import logger
-from agno.agent import Agent, AgentMemory
+from agno.agent import AgentMemory
from agno.embedder.google import GeminiEmbedder
from agno.knowledge import AgentKnowledge
from agno.memory.db.postgres import PgMemoryDb
@@ -23,10 +21,10 @@
import traceback
-from typing import Optional
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
+
def get_agentic_rag_agent(
model_id: str = "gemini-2.0-flash",
user_id: Optional[str] = None,
@@ -34,16 +32,13 @@ def get_agentic_rag_agent(
debug_mode: bool = True,
) -> Agent:
"""Get an Agentic RAG Agent with Memory optimized for Deepseek and PDFs."""
-
+
# Initialize Deepseek model
- model = Gemini(id=model_id)
+ model = Gemini(id=model_id)
# Define persistent memory for chat history
memory = AgentMemory(
- db=PgMemoryDb(
- table_name="pdf_agent_memory",
- db_url=db_url
- ),
+ db=PgMemoryDb(table_name="pdf_agent_memory", db_url=db_url),
create_user_memories=False,
create_session_summary=False,
)
@@ -57,10 +52,9 @@ def get_agentic_rag_agent(
embedder=GeminiEmbedder(),
),
num_documents=4, # Optimal for PDF chunking
- document_processor=PDFReader(chunk_size=1000
- ),
- batch_size=32,
- parallel_processing=True
+ document_processor=PDFReader(chunk_size=1000),
+ batch_size=32,
+ parallel_processing=True,
)
# Create the PDF-focused Agent
@@ -69,10 +63,7 @@ def get_agentic_rag_agent(
session_id=session_id,
user_id=user_id,
model=model,
- storage=PostgresAgentStorage(
- table_name="pdf_agent_sessions",
- db_url=db_url
- ),
+ storage=PostgresAgentStorage(table_name="pdf_agent_sessions", db_url=db_url),
memory=memory,
knowledge=knowledge_base,
description="You are a helpful Agent called 'Agentic RAG' and your goal is to assist the user in the best way possible.",
@@ -123,10 +114,10 @@ def get_agentic_rag_agent(
# Styles
message_style = dict(
- display="inline-block",
- padding="1em",
+ display="inline-block",
+ padding="1em",
border_radius="8px",
- max_width=["30em", "30em", "50em", "50em", "50em", "50em"]
+ max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
)
SIDEBAR_STYLE = dict(
@@ -148,14 +139,18 @@ def get_agentic_rag_agent(
_hover={"bg": rx.color("mauve", 3)},
)
+
@dataclass
class QA:
"""A question and answer pair."""
+
question: str
answer: str
+
class LoadingIcon(rx.Component):
"""A custom loading icon component."""
+
library = "react-loading-icons"
tag = "SpinningCircles"
stroke: rx.Var[str]
@@ -169,11 +164,13 @@ class LoadingIcon(rx.Component):
def get_event_triggers(self) -> dict:
return {"on_change": lambda status: [status]}
+
loading_icon = LoadingIcon.create
class State(rx.State):
"""The app state."""
+
chats: List[List[QA]] = [[]]
base64_pdf: str = ""
uploading: bool = False
@@ -193,7 +190,7 @@ class Config:
exclude = {"_temp_dir"}
json_encoders = {
Path: lambda v: str(v),
- tempfile.TemporaryDirectory: lambda v: None
+ tempfile.TemporaryDirectory: lambda v: None,
}
def _create_agent(self) -> Agent:
@@ -202,12 +199,12 @@ def _create_agent(self) -> Agent:
# Generate a consistent session ID based on current chat
if not self._session_id:
self._session_id = f"session_{int(time.time())}"
-
+
return get_agentic_rag_agent(
model_id="gemini-2.0-flash",
session_id=self._session_id,
user_id=None,
- debug_mode=True
+ debug_mode=True,
)
except Exception as e:
logger.error(f"Agent creation error: {str(e)}")
@@ -225,7 +222,7 @@ async def handle_upload(self, files: List[rx.UploadFile]):
file = files[0]
upload_data = await file.read()
-
+
# Create persistent temp directory
if self._temp_dir is None:
self._temp_dir = Path(tempfile.mkdtemp())
@@ -255,7 +252,7 @@ async def handle_upload(self, files: List[rx.UploadFile]):
return
# Store base64 for preview
- base64_pdf = base64.b64encode(upload_data).decode('utf-8')
+ base64_pdf = base64.b64encode(upload_data).decode("utf-8")
self.base64_pdf = base64_pdf
self.knowledge_base_files.append(file.filename)
@@ -265,7 +262,7 @@ async def handle_upload(self, files: List[rx.UploadFile]):
finally:
self.uploading = False
yield
-
+
@rx.event(background=True)
async def process_question(self, form_data: dict):
"""Process a question using streaming responses"""
@@ -273,7 +270,7 @@ async def process_question(self, form_data: dict):
return
question = form_data["question"]
-
+
async with self:
self.processing = True
self.chats[self.current_chat].append(QA(question=question, answer=""))
@@ -291,7 +288,9 @@ def run_stream():
stream_response = agent.run(question, stream=True)
for chunk in stream_response:
if chunk.content:
- asyncio.run_coroutine_threadsafe(queue.put(chunk.content), loop)
+ asyncio.run_coroutine_threadsafe(
+ queue.put(chunk.content), loop
+ )
asyncio.run_coroutine_threadsafe(queue.put(None), loop)
except Exception as e:
error_msg = f"Error: {str(e)}"
@@ -308,7 +307,7 @@ def run_stream():
if isinstance(chunk, str) and chunk.startswith("Error: "):
answer_content = chunk
break
-
+
answer_content += chunk
async with self:
self.chats[self.current_chat][-1].answer = answer_content
@@ -326,7 +325,6 @@ def run_stream():
async with self:
self.processing = False
yield
-
def clear_knowledge_base(self):
"""Clear knowledge base and reset state"""
@@ -334,7 +332,7 @@ def clear_knowledge_base(self):
# Create temporary agent to clear vector store
agent = self._create_agent()
agent.knowledge.vector_db.delete()
-
+
# Reset state
self.loaded_files.clear()
self.knowledge_base_files.clear()
@@ -344,26 +342,27 @@ def clear_knowledge_base(self):
self.upload_status = "Knowledge base cleared"
except Exception as e:
self.upload_status = f"Error clearing knowledge base: {str(e)}"
-
+
def create_new_chat(self):
"""Create a new chat"""
self.chats.append([])
self.current_chat = len(self.chats) - 1
+
def pdf_preview() -> rx.Component:
return rx.box(
rx.heading("PDF Preview", size="4", margin_bottom="1em"),
rx.cond(
State.base64_pdf != "",
rx.html(
- f'''
+ f"""
- '''
+ """
),
rx.text("No PDF uploaded yet", color="red"),
),
@@ -373,6 +372,7 @@ def pdf_preview() -> rx.Component:
overflow="hidden",
)
+
def message(qa: QA) -> rx.Component:
return rx.box(
rx.box(
@@ -398,12 +398,10 @@ def message(qa: QA) -> rx.Component:
width="100%",
)
+
def chat() -> rx.Component:
return rx.vstack(
- rx.box(
- rx.foreach(State.chats[State.current_chat], message),
- width="100%"
- ),
+ rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
py="8",
flex="1",
width="100%",
@@ -414,6 +412,7 @@ def chat() -> rx.Component:
padding_bottom="5em",
)
+
def action_bar() -> rx.Component:
return rx.box(
rx.vstack(
@@ -461,6 +460,7 @@ def action_bar() -> rx.Component:
width="100%",
)
+
def sidebar() -> rx.Component:
return rx.box(
rx.vstack(
@@ -509,11 +509,7 @@ def sidebar() -> rx.Component:
width="100%",
),
),
- rx.text(
- State.upload_status,
- color=rx.color("mauve", 11),
- font_size="sm"
- ),
+ rx.text(State.upload_status, color=rx.color("mauve", 11), font_size="sm"),
align_items="stretch",
height="100%",
),
diff --git a/agentic_rag/requirements.txt b/agentic_rag/requirements.txt
index 400b3fe..1595c36 100644
--- a/agentic_rag/requirements.txt
+++ b/agentic_rag/requirements.txt
@@ -1,10 +1,11 @@
+reflex==0.7.11
agno
google-generativeai
-reflex
bs4
duckduckgo-search
qdrant-client
pgvector
psycopg[binary]
pypdf
-sqlalchemy
\ No newline at end of file
+sqlalchemy
+google-genai
\ No newline at end of file
diff --git a/ai_stock_analyst_agent/agent/agent.py b/ai_stock_analyst_agent/agent/agent.py
index f1c1744..c1d5cf4 100644
--- a/ai_stock_analyst_agent/agent/agent.py
+++ b/ai_stock_analyst_agent/agent/agent.py
@@ -1,7 +1,6 @@
import reflex as rx
-from typing import List, Optional
+from typing import List
from dataclasses import dataclass
-import time
import asyncio
from textwrap import dedent
@@ -9,16 +8,20 @@
from agno.models.google import Gemini
from agno.tools.yfinance import YFinanceTools
+
# Data Models
@dataclass
class QA:
"""A question and answer pair."""
+
question: str
answer: str
+
# Custom Loading Icon
class LoadingIcon(rx.Component):
"""A custom loading icon component."""
+
library = "react-loading-icons"
tag = "SpinningCircles"
stroke: rx.Var[str]
@@ -32,14 +35,15 @@ class LoadingIcon(rx.Component):
def get_event_triggers(self) -> dict:
return {"on_change": lambda status: [status]}
+
loading_icon = LoadingIcon.create
# Styles
message_style = dict(
- display="inline-block",
- padding="1em",
+ display="inline-block",
+ padding="1em",
border_radius="8px",
- max_width=["30em", "30em", "50em", "50em", "50em", "50em"]
+ max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
)
SIDEBAR_STYLE = dict(
@@ -69,9 +73,11 @@ def get_event_triggers(self) -> dict:
"margin_right": "2",
}
+
# Application State
class State(rx.State):
"""The app state."""
+
chats: List[List[QA]] = [[]]
current_chat: int = 0
processing: bool = False
@@ -136,7 +142,7 @@ async def process_question(self, form_data: dict):
return
question = form_data["question"]
-
+
async with self:
self.processing = True
self.chats[self.current_chat].append(QA(question=question, answer=""))
@@ -144,12 +150,14 @@ async def process_question(self, form_data: dict):
try:
agent = self._create_agent()
- response = agent.run(question, stream=True)
+ response = agent.run(question, stream=True)
async with self:
answer_content = ""
for chunk in response: # Process each chunk of the response
- if hasattr(chunk, "content"): # Check if the chunk has a `content` attribute
+ if hasattr(
+ chunk, "content"
+ ): # Check if the chunk has a `content` attribute
answer_content += chunk.content
else:
answer_content += str(chunk)
@@ -158,7 +166,7 @@ async def process_question(self, form_data: dict):
self.chats[self.current_chat][-1].answer = answer_content
self.chats = self.chats
yield
- asyncio.sleep(0.05)
+ asyncio.sleep(0.05)
except Exception as e:
answer_content = f"Error processing question: {str(e)}"
@@ -179,12 +187,13 @@ def remove_from_watchlist(self, symbol: str):
"""Remove a stock from the watchlist"""
if symbol in self.watchlist:
self.watchlist.remove(symbol)
-
+
def create_new_chat(self):
"""Create a new chat"""
self.chats.append([])
self.current_chat = len(self.chats) - 1
+
# UI Components
def message(qa: QA) -> rx.Component:
return rx.box(
@@ -211,12 +220,10 @@ def message(qa: QA) -> rx.Component:
width="100%",
)
+
def chat() -> rx.Component:
return rx.vstack(
- rx.box(
- rx.foreach(State.chats[State.current_chat], message),
- width="100%"
- ),
+ rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
py="8",
flex="1",
width="100%",
@@ -227,6 +234,7 @@ def chat() -> rx.Component:
padding_bottom="5em",
)
+
def action_bar() -> rx.Component:
return rx.box(
rx.vstack(
@@ -275,6 +283,7 @@ def action_bar() -> rx.Component:
width="100%",
)
+
def sidebar() -> rx.Component:
return rx.box(
rx.vstack(
@@ -288,7 +297,7 @@ def sidebar() -> rx.Component:
rx.foreach(
State.watchlist,
lambda symbol: rx.hstack(
- rx.text(
+ rx.text(
"×", # Using × symbol as remove icon
on_click=lambda: State.remove_from_watchlist(symbol),
**REMOVE_ICON_STYLE,
@@ -296,7 +305,9 @@ def sidebar() -> rx.Component:
rx.text(symbol, font_size="sm"),
rx.button(
"Analyze",
- on_click=lambda: State.process_question({"question": f"Analyze {symbol}'s performance"}),
+ on_click=lambda: State.process_question(
+ {"question": f"Analyze {symbol}'s performance"}
+ ),
size="2",
**STOCK_BUTTON_STYLE,
),
@@ -360,4 +371,4 @@ def index() -> rx.Component:
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/ai_stock_analyst_agent/requirements.txt b/ai_stock_analyst_agent/requirements.txt
index 4af33e3..48a2d59 100644
--- a/ai_stock_analyst_agent/requirements.txt
+++ b/ai_stock_analyst_agent/requirements.txt
@@ -1,5 +1,6 @@
+reflex==0.7.11
agno
google-generativeai
-reflex
duckduckgo-search
-yfinance
\ No newline at end of file
+yfinance
+google-genai
\ No newline at end of file
diff --git a/browser_use_locally/browser_agent/browser_agent.py b/browser_use_locally/browser_agent/browser_agent.py
index 868edf9..9a8bceb 100644
--- a/browser_use_locally/browser_agent/browser_agent.py
+++ b/browser_use_locally/browser_agent/browser_agent.py
@@ -1,6 +1,5 @@
import reflex as rx
from langchain_ollama import ChatOllama
-import browser_use
from browser_use import Agent
import asyncio
@@ -19,7 +18,7 @@ async def execute_task(self):
self.output = ""
yield
await asyncio.sleep(1)
-
+
result = await self.run_search()
async with self:
self.output = result.final_result()
@@ -88,4 +87,4 @@ def index():
# Run the Reflex App
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/browser_use_locally/requirements.txt b/browser_use_locally/requirements.txt
index 5161a5b..a6d3fa8 100644
--- a/browser_use_locally/requirements.txt
+++ b/browser_use_locally/requirements.txt
@@ -1,5 +1,5 @@
-browser-use==0.1.27
+reflex==0.7.11
+browser-use
langchain-ollama==0.2.2
-ollama==0.4.7
-playwright==1.49.1
-reflex==0.6.8
\ No newline at end of file
+ollama
+playwright
\ No newline at end of file
diff --git a/browser_use_locally/rxconfig.py b/browser_use_locally/rxconfig.py
index 18aab41..38b2187 100644
--- a/browser_use_locally/rxconfig.py
+++ b/browser_use_locally/rxconfig.py
@@ -1,6 +1,5 @@
import reflex as rx
config = rx.Config(
- app_name="browser_agent",
- state_manager_mode = rx.constants.StateManagerMode.MEMORY
+ app_name="browser_agent", state_manager_mode=rx.constants.StateManagerMode.MEMORY
)
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/__init__.py b/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/__init__.py
deleted file mode 100644
index b153f28..0000000
--- a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .loading_icon import loading_icon
-from .navbar import navbar
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/requirements.txt b/chat_with_deepseek_r1_locally/deepseek_r1_chatui/requirements.txt
deleted file mode 100644
index 488eefe..0000000
--- a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-reflex>=0.6.7
-langchain
-ollama==0.4.5
\ No newline at end of file
diff --git a/chat_with_github/chat/chat.py b/chat_with_github/chat/chat.py
index d48ae0c..06a6cb3 100644
--- a/chat_with_github/chat/chat.py
+++ b/chat_with_github/chat/chat.py
@@ -6,7 +6,6 @@
import os
from embedchain import App
from embedchain.loaders.github import GithubLoader
-import asyncio
GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
@@ -365,4 +364,4 @@ def index():
title="GitHub Repository Chat",
description="Chat with GitHub repositories using AI",
route="/",
-)
\ No newline at end of file
+)
diff --git a/chat_with_github/requirements.txt b/chat_with_github/requirements.txt
index 59dd67c..faa206c 100644
--- a/chat_with_github/requirements.txt
+++ b/chat_with_github/requirements.txt
@@ -1,3 +1,3 @@
-embedchain==0.1.126
-reflex==0.6.8
-ollama==0.4.5
\ No newline at end of file
+reflex==0.7.11
+embedchain
+ollama
\ No newline at end of file
diff --git a/chat_with_pdf_locally/chat/chat.py b/chat_with_pdf_locally/chat/chat.py
index 43bef02..6aea59c 100644
--- a/chat_with_pdf_locally/chat/chat.py
+++ b/chat_with_pdf_locally/chat/chat.py
@@ -1,6 +1,7 @@
import reflex as rx
from chat.components.chat import State, chat, action_bar, sidebar
+
def index() -> rx.Component:
"""The main app."""
return rx.box(
@@ -30,5 +31,6 @@ def index() -> rx.Component:
background_color=rx.color("mauve", 1),
)
+
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/chat_with_pdf_locally/chat/components/chat.py b/chat_with_pdf_locally/chat/components/chat.py
index af293bb..d4618f3 100644
--- a/chat_with_pdf_locally/chat/components/chat.py
+++ b/chat_with_pdf_locally/chat/components/chat.py
@@ -3,17 +3,16 @@
from dataclasses import dataclass
import tempfile
import base64
-from pathlib import Path
import asyncio
from embedchain import App
# Styles
message_style = dict(
- display="inline-block",
- padding="1em",
+ display="inline-block",
+ padding="1em",
border_radius="8px",
- max_width=["30em", "30em", "50em", "50em", "50em", "50em"]
+ max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
)
SIDEBAR_STYLE = dict(
@@ -35,12 +34,15 @@
_hover={"bg": rx.color("mauve", 3)},
)
+
@dataclass
class QA:
"""A question and answer pair."""
+
question: str
answer: str
+
class LoadingIcon(rx.Component):
"""A custom loading icon component."""
@@ -60,6 +62,7 @@ def get_event_triggers(self) -> dict:
loading_icon = LoadingIcon.create
+
class State(rx.State):
"""The app state."""
@@ -83,16 +86,16 @@ def get_app(self):
"max_tokens": 250,
"temperature": 0.5,
"stream": True,
- "base_url": 'http://localhost:11434'
- }
+ "base_url": "http://localhost:11434",
+ },
},
"vectordb": {"provider": "chroma", "config": {"dir": self.db_path}},
"embedder": {
"provider": "ollama",
"config": {
"model": "llama3.2:latest",
- "base_url": 'http://localhost:11434'
- }
+ "base_url": "http://localhost:11434",
+ },
},
}
)
@@ -104,7 +107,7 @@ async def process_question(self, form_data: dict):
return
question = form_data["question"]
-
+
async with self:
self.processing = True
self.chats[self.current_chat].append(QA(question=question, answer=""))
@@ -140,7 +143,7 @@ async def handle_upload(self, files: List[rx.UploadFile]):
file_object.write(upload_data)
# Base64 encode the PDF content
- base64_pdf = base64.b64encode(upload_data).decode('utf-8')
+ base64_pdf = base64.b64encode(upload_data).decode("utf-8")
self.base64_pdf = base64_pdf
@@ -157,6 +160,7 @@ def create_new_chat(self):
self.chats.append([])
self.current_chat = len(self.chats) - 1
+
def pdf_preview() -> rx.Component:
"""PDF preview component."""
return rx.box(
@@ -164,14 +168,14 @@ def pdf_preview() -> rx.Component:
rx.cond(
State.base64_pdf != "",
rx.html(
- f'''
+ f"""
- '''
+ """
),
rx.text("No PDF uploaded yet", color="red"),
),
@@ -182,7 +186,6 @@ def pdf_preview() -> rx.Component:
)
-
def message(qa: QA) -> rx.Component:
"""A single question/answer message."""
return rx.box(
@@ -209,13 +212,11 @@ def message(qa: QA) -> rx.Component:
width="100%",
)
+
def chat() -> rx.Component:
"""List all the messages in a conversation."""
return rx.vstack(
- rx.box(
- rx.foreach(State.chats[State.current_chat], message),
- width="100%"
- ),
+ rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
py="8",
flex="1",
width="100%",
@@ -226,6 +227,7 @@ def chat() -> rx.Component:
padding_bottom="5em",
)
+
def action_bar() -> rx.Component:
"""The action bar to send a new message."""
return rx.box(
@@ -274,6 +276,7 @@ def action_bar() -> rx.Component:
width="100%",
)
+
def sidebar() -> rx.Component:
"""The sidebar component."""
return rx.box(
@@ -317,13 +320,9 @@ def sidebar() -> rx.Component:
width="100%",
),
),
- rx.text(
- State.upload_status,
- color=rx.color("mauve", 11),
- font_size="sm"
- ),
+ rx.text(State.upload_status, color=rx.color("mauve", 11), font_size="sm"),
align_items="stretch",
height="100%",
),
**SIDEBAR_STYLE,
- )
\ No newline at end of file
+ )
diff --git a/chat_with_pdf_locally/requirements.txt b/chat_with_pdf_locally/requirements.txt
index f5c2e1e..faa206c 100644
--- a/chat_with_pdf_locally/requirements.txt
+++ b/chat_with_pdf_locally/requirements.txt
@@ -1,3 +1,3 @@
-embedchain==0.1.126
-reflex>=0.6.7
-ollama==0.4.5
\ No newline at end of file
+reflex==0.7.11
+embedchain
+ollama
\ No newline at end of file
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/.github/workflows/repository_dispatch.yml b/deepseek_r1_chatui/.github/workflows/repository_dispatch.yml
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/.github/workflows/repository_dispatch.yml
rename to deepseek_r1_chatui/.github/workflows/repository_dispatch.yml
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/.gitignore b/deepseek_r1_chatui/.gitignore
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/.gitignore
rename to deepseek_r1_chatui/.gitignore
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/LICENSE b/deepseek_r1_chatui/LICENSE
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/LICENSE
rename to deepseek_r1_chatui/LICENSE
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/README.md b/deepseek_r1_chatui/README.md
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/README.md
rename to deepseek_r1_chatui/README.md
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/assets/deepseek_logo.png b/deepseek_r1_chatui/assets/deepseek_logo.png
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/assets/deepseek_logo.png
rename to deepseek_r1_chatui/assets/deepseek_logo.png
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/assets/favicon.ico b/deepseek_r1_chatui/assets/favicon.ico
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/assets/favicon.ico
rename to deepseek_r1_chatui/assets/favicon.ico
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/__init__.py b/deepseek_r1_chatui/chat/__init__.py
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/__init__.py
rename to deepseek_r1_chatui/chat/__init__.py
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/chat.py b/deepseek_r1_chatui/chat/chat.py
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/chat.py
rename to deepseek_r1_chatui/chat/chat.py
diff --git a/deepseek_r1_chatui/chat/components/__init__.py b/deepseek_r1_chatui/chat/components/__init__.py
new file mode 100644
index 0000000..58f5e5b
--- /dev/null
+++ b/deepseek_r1_chatui/chat/components/__init__.py
@@ -0,0 +1,2 @@
+from .loading_icon import loading_icon as loading_icon
+from .navbar import navbar as navbar
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/chat.py b/deepseek_r1_chatui/chat/components/chat.py
similarity index 95%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/chat.py
rename to deepseek_r1_chatui/chat/components/chat.py
index 3407ad5..6ab3860 100644
--- a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/chat.py
+++ b/deepseek_r1_chatui/chat/components/chat.py
@@ -4,7 +4,12 @@
from chat.state import QA, State
-message_style = dict(display="inline-block", padding="1em", border_radius="8px", max_width=["30em", "30em", "50em", "50em", "50em", "50em"])
+message_style = dict(
+ display="inline-block",
+ padding="1em",
+ border_radius="8px",
+ max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
+)
def message(qa: QA) -> rx.Component:
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/loading_icon.py b/deepseek_r1_chatui/chat/components/loading_icon.py
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/loading_icon.py
rename to deepseek_r1_chatui/chat/components/loading_icon.py
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/modal.py b/deepseek_r1_chatui/chat/components/modal.py
similarity index 99%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/modal.py
rename to deepseek_r1_chatui/chat/components/modal.py
index 119e7d4..0979f9c 100644
--- a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/modal.py
+++ b/deepseek_r1_chatui/chat/components/modal.py
@@ -1,6 +1,7 @@
import reflex as rx
from chat.state import State
+
def modal() -> rx.Component:
"""A modal to create a new chat."""
return rx.cond(
@@ -88,4 +89,4 @@ def modal() -> rx.Component:
left="0",
z_index="1000",
),
- )
\ No newline at end of file
+ )
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/navbar.py b/deepseek_r1_chatui/chat/components/navbar.py
similarity index 78%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/navbar.py
rename to deepseek_r1_chatui/chat/components/navbar.py
index 80ec5f0..e6ad497 100644
--- a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/components/navbar.py
+++ b/deepseek_r1_chatui/chat/components/navbar.py
@@ -1,28 +1,34 @@
import reflex as rx
from chat.state import State
+
def sidebar_chat(chat: str) -> rx.Component:
"""A sidebar chat item.
Args:
chat: The chat item.
"""
- return rx.drawer.close(rx.hstack(
- rx.button(
- chat, on_click=lambda: State.set_chat(chat), width="80%", variant="surface"
- ),
- rx.button(
- rx.icon(
- tag="trash",
- on_click=State.delete_chat,
- stroke_width=1,
+ return rx.drawer.close(
+ rx.hstack(
+ rx.button(
+ chat,
+ on_click=lambda: State.set_chat(chat),
+ width="80%",
+ variant="surface",
),
- width="20%",
- variant="surface",
- color_scheme="red",
- ),
- width="100%",
- ))
+ rx.button(
+ rx.icon(
+ tag="trash",
+ on_click=State.delete_chat,
+ stroke_width=1,
+ ),
+ width="20%",
+ variant="surface",
+ color_scheme="red",
+ ),
+ width="100%",
+ )
+ )
def sidebar(trigger) -> rx.Component:
@@ -85,9 +91,12 @@ def navbar():
rx.heading("Chat with DeepSeek-r1 Locally"),
rx.desktop_only(
rx.badge(
- State.current_chat,
- rx.tooltip(rx.icon("info", size=14), content="The current selected chat."),
- variant="soft"
+ State.current_chat,
+ rx.tooltip(
+ rx.icon("info", size=14),
+ content="The current selected chat.",
+ ),
+ variant="soft",
)
),
align_items="center",
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/state.py b/deepseek_r1_chatui/chat/state.py
similarity index 85%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/state.py
rename to deepseek_r1_chatui/chat/state.py
index 3690e8b..550a85d 100644
--- a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/chat/state.py
+++ b/deepseek_r1_chatui/chat/state.py
@@ -1,23 +1,27 @@
-import os
import asyncio
-from typing import AsyncGenerator, Optional
+from typing import AsyncGenerator
import reflex as rx
from ollama import AsyncClient
from langchain.prompts import PromptTemplate
ollama_client = AsyncClient()
+
class QA(rx.Base):
"""A question and answer pair."""
+
question: str
answer: str
+
DEFAULT_CHATS = {
"Intros": [],
}
+
class State(rx.State):
"""The app state."""
+
chats: dict[str, list[QA]] = DEFAULT_CHATS
current_chat: str = "Intros"
question: str = ""
@@ -51,10 +55,7 @@ def _get_chat_history(self) -> str:
"""Get formatted chat history for the current chat."""
history = []
for qa in self.chats[self.current_chat][:-1]: # Exclude the current question
- history.extend([
- f"Human: {qa.question}",
- f"Assistant: {qa.answer}"
- ])
+ history.extend([f"Human: {qa.question}", f"Assistant: {qa.answer}"])
return "\n".join(history)
@rx.event(background=True)
@@ -84,27 +85,28 @@ async def process_question(self, form_data: dict[str, str]) -> AsyncGenerator:
Current Question: {question}
- Please provide a detailed and helpful response."""
+ Please provide a detailed and helpful response.""",
)
# Generate prompt with chat history
prompt = prompt_template.format(
- chat_history=self._get_chat_history(),
- question=question
+ chat_history=self._get_chat_history(), question=question
)
# Stream response from Ollama
async for chunk in await ollama_client.chat(
- model='deepseek-r1:1.5b',
- messages=[{'role': 'user', 'content': prompt}],
+ model="deepseek-r1:1.5b",
+ messages=[{"role": "user", "content": prompt}],
stream=True,
):
async with self:
- if 'message' in chunk and 'content' in chunk['message']:
- self.chats[self.current_chat][-1].answer += chunk['message']['content']
+ if "message" in chunk and "content" in chunk["message"]:
+ self.chats[self.current_chat][-1].answer += chunk["message"][
+ "content"
+ ]
self.chats = self.chats
yield
- await asyncio.sleep(0.05)
+ await asyncio.sleep(0.05)
except Exception as e:
async with self:
@@ -114,4 +116,4 @@ async def process_question(self, form_data: dict[str, str]) -> AsyncGenerator:
finally:
async with self:
self.processing = False
- yield
\ No newline at end of file
+ yield
diff --git a/deepseek_r1_chatui/requirements.txt b/deepseek_r1_chatui/requirements.txt
new file mode 100644
index 0000000..f9d6420
--- /dev/null
+++ b/deepseek_r1_chatui/requirements.txt
@@ -0,0 +1,3 @@
+reflex>=0.7.11
+langchain
+ollama
\ No newline at end of file
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_chatui/rxconfig.py b/deepseek_r1_chatui/rxconfig.py
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_chatui/rxconfig.py
rename to deepseek_r1_chatui/rxconfig.py
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/.gitignore b/deepseek_r1_rag/.gitignore
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/.gitignore
rename to deepseek_r1_rag/.gitignore
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/LICENSE b/deepseek_r1_rag/LICENSE
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/LICENSE
rename to deepseek_r1_rag/LICENSE
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/README.md b/deepseek_r1_rag/README.md
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/README.md
rename to deepseek_r1_rag/README.md
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/assets/chakra_color_mode_provider.js b/deepseek_r1_rag/assets/chakra_color_mode_provider.js
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/assets/chakra_color_mode_provider.js
rename to deepseek_r1_rag/assets/chakra_color_mode_provider.js
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/chat/__init__.py b/deepseek_r1_rag/chat/__init__.py
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/chat/__init__.py
rename to deepseek_r1_rag/chat/__init__.py
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/chat/chat.py b/deepseek_r1_rag/chat/chat.py
similarity index 97%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/chat/chat.py
rename to deepseek_r1_rag/chat/chat.py
index 4c09d7b..456b7b0 100644
--- a/chat_with_deepseek_r1_locally/deepseek_r1_rag/chat/chat.py
+++ b/deepseek_r1_rag/chat/chat.py
@@ -1,6 +1,7 @@
import reflex as rx
from chat.components.chat import State, chat, action_bar, sidebar
+
def index() -> rx.Component:
"""The main app."""
return rx.box(
@@ -32,4 +33,4 @@ def index() -> rx.Component:
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/chat/components/chat.py b/deepseek_r1_rag/chat/components/chat.py
similarity index 92%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/chat/components/chat.py
rename to deepseek_r1_rag/chat/components/chat.py
index 2a4cef3..aa24fa3 100644
--- a/chat_with_deepseek_r1_locally/deepseek_r1_rag/chat/components/chat.py
+++ b/deepseek_r1_rag/chat/components/chat.py
@@ -5,7 +5,6 @@
import base64
from pathlib import Path
import asyncio
-import os
from llama_index.core import VectorStoreIndex, Settings, SimpleDirectoryReader
from llama_index.llms.ollama import Ollama
@@ -14,10 +13,10 @@
# Styles remain the same
message_style = dict(
- display="inline-block",
- padding="1em",
+ display="inline-block",
+ padding="1em",
border_radius="8px",
- max_width=["30em", "30em", "50em", "50em", "50em", "50em"]
+ max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
)
SIDEBAR_STYLE = dict(
@@ -39,14 +38,18 @@
_hover={"bg": rx.color("mauve", 3)},
)
+
@dataclass
class QA:
"""A question and answer pair."""
+
question: str
answer: str
+
class LoadingIcon(rx.Component):
"""A custom loading icon component."""
+
library = "react-loading-icons"
tag = "SpinningCircles"
stroke: rx.Var[str]
@@ -60,10 +63,13 @@ class LoadingIcon(rx.Component):
def get_event_triggers(self) -> dict:
return {"on_change": lambda status: [status]}
+
loading_icon = LoadingIcon.create
+
class State(rx.State):
"""The app state."""
+
chats: List[List[QA]] = [[]]
base64_pdf: str = ""
uploading: bool = False
@@ -82,28 +88,25 @@ def setup_llamaindex(self):
if self._query_engine is None and self._temp_dir:
# Setup LLM
llm = Ollama(model="deepseek-r1:1.5b", request_timeout=120.0)
-
+
# Setup embedding model
embed_model = HuggingFaceEmbedding(
- model_name="BAAI/bge-large-en-v1.5",
- trust_remote_code=True
+ model_name="BAAI/bge-large-en-v1.5", trust_remote_code=True
)
-
+
# Configure settings
Settings.embed_model = embed_model
Settings.llm = llm
# Load documents
loader = SimpleDirectoryReader(
- input_dir=self._temp_dir,
- required_exts=[".pdf"],
- recursive=True
+ input_dir=self._temp_dir, required_exts=[".pdf"], recursive=True
)
docs = loader.load_data()
# Create index and query engine
index = VectorStoreIndex.from_documents(docs, show_progress=True)
-
+
# Setup streaming query engine with custom prompt
qa_prompt_tmpl_str = (
"Context information is below.\n"
@@ -115,7 +118,7 @@ def setup_llamaindex(self):
"Answer: "
)
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
-
+
self._query_engine = index.as_query_engine(streaming=True)
self._query_engine.update_prompts(
{"response_synthesizer:text_qa_template": qa_prompt_tmpl}
@@ -128,7 +131,7 @@ async def process_question(self, form_data: dict):
return
question = form_data["question"]
-
+
async with self:
self.processing = True
self.chats[self.current_chat].append(QA(question=question, answer=""))
@@ -163,11 +166,11 @@ async def handle_upload(self, files: List[rx.UploadFile]):
file = files[0]
upload_data = await file.read()
-
+
# Create temporary directory if not exists
if self._temp_dir is None:
self._temp_dir = tempfile.mkdtemp()
-
+
outfile = Path(self._temp_dir) / file.filename
self.pdf_filename = file.filename
@@ -175,12 +178,12 @@ async def handle_upload(self, files: List[rx.UploadFile]):
file_object.write(upload_data)
# Base64 encode the PDF content
- base64_pdf = base64.b64encode(upload_data).decode('utf-8')
+ base64_pdf = base64.b64encode(upload_data).decode("utf-8")
self.base64_pdf = base64_pdf
# Setup LlamaIndex
self.setup_llamaindex()
-
+
self.knowledge_base_files.append(self.pdf_filename)
self.upload_status = f"Added {self.pdf_filename} to knowledge base"
@@ -192,6 +195,7 @@ def create_new_chat(self):
self.chats.append([])
self.current_chat = len(self.chats) - 1
+
def pdf_preview() -> rx.Component:
"""PDF preview component."""
return rx.box(
@@ -199,14 +203,14 @@ def pdf_preview() -> rx.Component:
rx.cond(
State.base64_pdf != "",
rx.html(
- f'''
+ f"""
- '''
+ """
),
rx.text("No PDF uploaded yet", color="red"),
),
@@ -216,6 +220,7 @@ def pdf_preview() -> rx.Component:
overflow="hidden",
)
+
def message(qa: QA) -> rx.Component:
"""A single question/answer message."""
return rx.box(
@@ -242,13 +247,11 @@ def message(qa: QA) -> rx.Component:
width="100%",
)
+
def chat() -> rx.Component:
"""List all the messages in a conversation."""
return rx.vstack(
- rx.box(
- rx.foreach(State.chats[State.current_chat], message),
- width="100%"
- ),
+ rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
py="8",
flex="1",
width="100%",
@@ -259,6 +262,7 @@ def chat() -> rx.Component:
padding_bottom="5em",
)
+
def action_bar() -> rx.Component:
"""The action bar to send a new message."""
return rx.box(
@@ -307,6 +311,7 @@ def action_bar() -> rx.Component:
width="100%",
)
+
def sidebar() -> rx.Component:
"""The sidebar component."""
return rx.box(
@@ -350,13 +355,9 @@ def sidebar() -> rx.Component:
width="100%",
),
),
- rx.text(
- State.upload_status,
- color=rx.color("mauve", 11),
- font_size="sm"
- ),
+ rx.text(State.upload_status, color=rx.color("mauve", 11), font_size="sm"),
align_items="stretch",
height="100%",
),
**SIDEBAR_STYLE,
- )
\ No newline at end of file
+ )
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/requirements.txt b/deepseek_r1_rag/requirements.txt
similarity index 71%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/requirements.txt
rename to deepseek_r1_rag/requirements.txt
index 91700cb..49c15da 100644
--- a/chat_with_deepseek_r1_locally/deepseek_r1_rag/requirements.txt
+++ b/deepseek_r1_rag/requirements.txt
@@ -1,5 +1,5 @@
-reflex>=0.6.7
-ollama==0.4.5
+reflex>=0.7.11
+ollama
llama_index
llama-index-embeddings-huggingface
llama-index-llms-ollama
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/rxconfig.py b/deepseek_r1_rag/rxconfig.py
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/rxconfig.py
rename to deepseek_r1_rag/rxconfig.py
diff --git a/chat_with_deepseek_r1_locally/deepseek_r1_rag/uploaded_files/Attention is all you need.pdf b/deepseek_r1_rag/uploaded_files/Attention is all you need.pdf
similarity index 100%
rename from chat_with_deepseek_r1_locally/deepseek_r1_rag/uploaded_files/Attention is all you need.pdf
rename to deepseek_r1_rag/uploaded_files/Attention is all you need.pdf
diff --git a/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent.py b/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent.py
index f8dd591..a4fbc11 100644
--- a/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent.py
+++ b/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent.py
@@ -1,13 +1,11 @@
import reflex as rx
-from google import genai
-import time
+import google.generativeai as genai
import asyncio
-from typing import List
-import traceback
class State(rx.State):
"""State for the multimodal AI agent application."""
+
processing: bool = False
upload_status: str = ""
result: str = ""
@@ -21,14 +19,14 @@ async def handle_upload(self, files: list[rx.UploadFile]):
if not files:
self.upload_status = "Please select a video file."
return
-
+
try:
file = files[0]
upload_data = await file.read()
-
+
filename = file.filename
outfile = rx.get_upload_dir() / filename
-
+
# Save the file
with outfile.open("wb") as file_object:
file_object.write(upload_data)
@@ -36,11 +34,11 @@ async def handle_upload(self, files: list[rx.UploadFile]):
self.video_filename = filename
self.video = outfile
self.upload_status = "Video uploaded successfully!"
-
+
except Exception as e:
self.upload_status = f"Error uploading video: {str(e)}"
- @rx.event(background=True)
+ @rx.event(background=True)
async def analyze_video(self):
"""Process video and answer question using AI."""
if not self.question:
@@ -58,31 +56,30 @@ async def analyze_video(self):
self.result = "Analyzing Video..."
yield
await asyncio.sleep(1)
-
+
try:
client = genai.Client()
-
+
video_file = client.files.upload(file=str(self.video))
while video_file.state == "PROCESSING":
await asyncio.sleep(2)
# time.sleep(2)
video_file = client.files.get(name=video_file.name)
-
+
response = client.models.generate_content(
model="gemini-2.0-flash",
contents=[
video_file,
"Describe this video.",
- ])
-
+ ],
+ )
async with self:
self.result = response.text
self.processing = False
-
+
except Exception as e:
async with self:
- full_error = traceback.format_exc()
self.processing = False
self.result = f"An error occurred: {str(e)}"
@@ -94,50 +91,44 @@ def index() -> rx.Component:
rx.el.div(
rx.el.h1(
"Multimodal AI Agent 🕵️♀️ 💬",
- class_name="text-5xl font-bold text-white mb-4"
+ class_name="text-5xl font-bold text-white mb-4",
),
- class_name="w-full p-12 bg-gradient-to-r from-blue-600 to-blue-800 rounded-lg shadow-lg mb-8 text-center"
+ class_name="w-full p-12 bg-gradient-to-r from-blue-600 to-blue-800 rounded-lg shadow-lg mb-8 text-center",
),
-
# Upload section
rx.el.div(
rx.upload(
rx.el.div(
rx.el.button(
"Select a Video File",
- class_name="bg-white text-blue-600 px-6 py-3 rounded-lg font-semibold border-2 border-blue-600 hover:bg-blue-50 transition-colors"
+ class_name="bg-white text-blue-600 px-6 py-3 rounded-lg font-semibold border-2 border-blue-600 hover:bg-blue-50 transition-colors",
),
rx.el.p(
"Drag and drop or click to select",
- class_name="text-gray-500 mt-2"
+ class_name="text-gray-500 mt-2",
),
- class_name="text-center"
+ class_name="text-center",
),
accept={".mp4", ".mov", ".avi"},
max_files=1,
class_name="border-2 border-dashed border-gray-300 rounded-lg p-8 bg-gray-50 hover:bg-gray-100 transition-colors",
- id="upload1"
+ id="upload1",
),
rx.cond(
rx.selected_files("upload1"),
rx.el.p(
- rx.selected_files("upload1")[0],
- class_name="text-gray-600 mt-2"
+ rx.selected_files("upload1")[0], class_name="text-gray-600 mt-2"
),
rx.el.p("", class_name="mt-2"),
),
rx.el.button(
"Upload",
on_click=State.handle_upload(rx.upload_files(upload_id="upload1")),
- class_name="w-full bg-blue-600 text-white px-6 py-3 rounded-lg font-semibold hover:bg-blue-700 transition-colors mt-4"
- ),
- rx.el.p(
- State.upload_status,
- class_name="text-gray-600 mt-2"
+ class_name="w-full bg-blue-600 text-white px-6 py-3 rounded-lg font-semibold hover:bg-blue-700 transition-colors mt-4",
),
- class_name="mb-8 p-6 bg-white rounded-lg shadow-lg"
+ rx.el.p(State.upload_status, class_name="text-gray-600 mt-2"),
+ class_name="mb-8 p-6 bg-white rounded-lg shadow-lg",
),
-
# Video and Analysis section
rx.cond(
State.video_filename != "",
@@ -146,44 +137,43 @@ def index() -> rx.Component:
rx.video(
url=rx.get_upload_url(State.video_filename),
controls=True,
- class_name="w-full rounded-lg shadow-lg"
+ class_name="w-full rounded-lg shadow-lg",
),
- class_name="mb-6"
+ class_name="mb-6",
),
rx.el.textarea(
placeholder="Ask any question related to the video - the AI Agent will analyze it",
value=State.question,
on_change=State.set_question,
- class_name="w-full p-4 border-2 border-gray-300 rounded-lg focus:border-blue-600 focus:ring-1 focus:ring-blue-600 h-32 resize-none"
+ class_name="w-full p-4 border-2 border-gray-300 rounded-lg focus:border-blue-600 focus:ring-1 focus:ring-blue-600 h-32 resize-none",
),
rx.el.button(
"Analyze & Research",
on_click=State.analyze_video,
loading=State.processing,
- class_name="w-full bg-blue-600 text-white px-6 py-3 rounded-lg font-semibold hover:bg-blue-700 transition-colors mt-4"
+ class_name="w-full bg-blue-600 text-white px-6 py-3 rounded-lg font-semibold hover:bg-blue-700 transition-colors mt-4",
),
rx.cond(
State.result != "",
rx.el.div(
rx.el.h2(
"🤖 Agent Response",
- class_name="text-2xl font-bold text-gray-800 mb-4"
+ class_name="text-2xl font-bold text-gray-800 mb-4",
),
rx.markdown(
- State.result,
- class_name="prose prose-blue max-w-none"
+ State.result, class_name="prose prose-blue max-w-none"
),
- class_name="mt-8 p-6 bg-white rounded-lg shadow-lg"
+ class_name="mt-8 p-6 bg-white rounded-lg shadow-lg",
),
),
- class_name="space-y-6"
+ class_name="space-y-6",
),
),
- class_name="max-w-3xl mx-auto px-4"
+ class_name="max-w-3xl mx-auto px-4",
),
- class_name="min-h-screen bg-gray-50 py-12"
+ class_name="min-h-screen bg-gray-50 py-12",
)
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent_agno.py b/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent_agno.py
index 7a02f1b..59ed60e 100644
--- a/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent_agno.py
+++ b/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent_agno.py
@@ -3,12 +3,12 @@
from agno.agent import Agent
from agno.models.google import Gemini
from agno.tools.duckduckgo import DuckDuckGoTools
-import time
import asyncio
class State(rx.State):
"""State for the multimodal AI agent application."""
+
processing: bool = False
upload_status: str = ""
result: str = ""
@@ -20,14 +20,14 @@ async def handle_upload(self, files: list[rx.UploadFile]):
"""Handle video file upload."""
if not files:
return
-
+
try:
file = files[0]
upload_data = await file.read()
-
+
filename = file.filename
outfile = rx.get_upload_dir() / filename
-
+
# Save the file
with outfile.open("wb") as file_object:
file_object.write(upload_data)
@@ -35,11 +35,11 @@ async def handle_upload(self, files: list[rx.UploadFile]):
self.video_filename = filename
self.video = outfile
self.upload_status = "Video uploaded successfully!"
-
+
except Exception as e:
self.upload_status = f"Error uploading video: {str(e)}"
- @rx.event(background=True)
+ @rx.event(background=True)
async def analyze_video(self):
"""Process video and answer question using AI agent."""
if not self.question:
@@ -50,7 +50,7 @@ async def analyze_video(self):
self.processing = True
yield
await asyncio.sleep(1)
-
+
try:
agent = Agent(
name="Multimodal Video Analyst",
@@ -58,39 +58,39 @@ async def analyze_video(self):
tools=[DuckDuckGoTools()],
markdown=True,
)
-
+
video_file = genai.upload_file(str(self.video))
while video_file.state.name == "PROCESSING":
await asyncio.sleep(2)
# time.sleep(2)
video_file = genai.get_file(video_file.name)
-
+
prompt = f"""
First analyze this video and then answer the following question using both
the video analysis and web research: {self.question}
Provide a comprehensive response focusing on practical, actionable information.
"""
-
+
result = agent.run(prompt, videos=[video_file])
async with self:
self.result = result.content
self.processing = False
-
+
except Exception as e:
async with self:
self.processing = False
self.result = f"An error occurred: {str(e)}"
-
+
color = "rgb(107,99,246)"
+
def index():
return rx.container(
rx.vstack(
# Header section
rx.heading("Multimodal AI Agent 🕵️♀️ 💬", size="8", mb="6"),
-
# Upload section
rx.vstack(
rx.upload(
@@ -99,7 +99,7 @@ def index():
"Select a Video File",
color=color,
bg="white",
- border=f"1px solid {color}"
+ border=f"1px solid {color}",
),
rx.text("Drag and drop or click to select"),
),
@@ -107,21 +107,20 @@ def index():
max_files=1,
border="1px dashed",
padding="20px",
- id="upload1"
+ id="upload1",
),
rx.cond(
- rx.selected_files("upload1"),
- rx.text(rx.selected_files("upload1")[0]),
- rx.text(""),
- ),
+ rx.selected_files("upload1"),
+ rx.text(rx.selected_files("upload1")[0]),
+ rx.text(""),
+ ),
rx.button(
"Upload",
- on_click=State.handle_upload(rx.upload_files(upload_id="upload1"))
+ on_click=State.handle_upload(rx.upload_files(upload_id="upload1")),
),
rx.text(State.upload_status),
spacing="4",
),
-
# Video and Analysis section
rx.cond(
State.video_filename != "",
@@ -161,9 +160,9 @@ def index():
),
max_width="600px",
margin="auto",
- padding="40px"
+ padding="40px",
)
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/multi_modal_ai_agent/requirements.txt b/multi_modal_ai_agent/requirements.txt
index 46cfbcd..e7ccea8 100644
--- a/multi_modal_ai_agent/requirements.txt
+++ b/multi_modal_ai_agent/requirements.txt
@@ -1,4 +1,4 @@
-reflex
-phidata==2.7.2
-google-generativeai==0.8.3
+reflex==0.7.11
+phidata
+google-generativeai
duckduckgo-search
\ No newline at end of file
diff --git a/multi_modal_medical_agent/agent/agent.py b/multi_modal_medical_agent/agent/agent.py
index 67a7adf..d60dd97 100644
--- a/multi_modal_medical_agent/agent/agent.py
+++ b/multi_modal_medical_agent/agent/agent.py
@@ -1,14 +1,10 @@
-
import reflex as rx
-from typing import Optional
import asyncio
from phi.agent import Agent
from phi.model.google import Gemini
from phi.tools.duckduckgo import DuckDuckGo
import os
from PIL import Image
-import time
-import asyncio
# Set Google API Key from environment
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
@@ -16,6 +12,7 @@
class MedicalState(rx.State):
"""State for the medical imaging analysis application."""
+
processing: bool = False
upload_status: str = ""
analysis_result: str = ""
@@ -65,14 +62,14 @@ async def handle_upload(self, files: list[rx.UploadFile]):
"""Handle medical image upload."""
if not files:
return
-
+
try:
file = files[0]
upload_data = await file.read()
-
+
filename = file.filename
outfile = rx.get_upload_dir() / filename
-
+
# Save the file
with outfile.open("wb") as file_object:
file_object.write(upload_data)
@@ -80,37 +77,33 @@ async def handle_upload(self, files: list[rx.UploadFile]):
self.image_filename = filename
self._temp_image_path = str(outfile)
self.upload_status = "Image uploaded successfully!"
-
+
except Exception as e:
self.upload_status = f"Error uploading image: {str(e)}"
@rx.var
- def medical_agent(self):
+ def medical_agent(self) -> Agent | None:
if GOOGLE_API_KEY:
return Agent(
- model=Gemini(
- api_key=GOOGLE_API_KEY,
- id="gemini-2.0-flash-exp"
- ),
+ model=Gemini(api_key=GOOGLE_API_KEY, id="gemini-2.0-flash-exp"),
tools=[DuckDuckGo()],
- markdown=True
+ markdown=True,
)
return None
-
- @rx.event(background=True)
+ @rx.event(background=True)
async def analyze_image(self):
"""Process image using medical AI agent."""
if not self.medical_agent:
self.analysis_result = "API Key not configured in environment"
return
-
+
async with self:
self.processing = True
self.analysis_result = ""
yield
await asyncio.sleep(1)
-
+
try:
# Process image
with Image.open(self._temp_image_path) as img:
@@ -123,11 +116,11 @@ async def analyze_image(self):
# Run analysis
result = self.medical_agent.run(self.query, images=[self._temp_image_path])
-
+
async with self:
self.analysis_result = result.content
self.processing = False
-
+
except Exception as e:
async with self:
self.processing = False
@@ -163,15 +156,14 @@ def upload_section() -> rx.Component:
rx.el.i(class_name="fas fa-upload text-3xl text-blue-500 mb-4"),
rx.el.p(
"Drop your medical image here",
- class_name="text-lg font-semibold text-gray-700 mb-2"
+ class_name="text-lg font-semibold text-gray-700 mb-2",
),
rx.el.p(
- "or click to browse",
- class_name="text-sm text-gray-500"
+ "or click to browse", class_name="text-sm text-gray-500"
),
rx.el.p(
"Supported formats: JPG, PNG",
- class_name="text-xs text-gray-400 mt-2"
+ class_name="text-xs text-gray-400 mt-2",
),
class_name="text-center",
),
@@ -191,7 +183,9 @@ def upload_section() -> rx.Component:
),
rx.el.button(
"Upload Image",
- on_click=lambda: MedicalState.handle_upload(rx.upload_files(upload_id="medical_upload")),
+ on_click=lambda: MedicalState.handle_upload(
+ rx.upload_files(upload_id="medical_upload")
+ ),
class_name="mt-4 w-full py-2 px-4 bg-gradient-to-r from-blue-500 to-cyan-500 text-white rounded-lg hover:from-blue-600 hover:to-cyan-600 transition-all duration-300 shadow-md hover:shadow-lg",
),
class_name="w-full max-w-md mx-auto",
@@ -221,7 +215,7 @@ def analysis_section() -> rx.Component:
),
rx.el.p(
"Analyzing image...",
- class_name="mt-2 text-sm text-gray-600"
+ class_name="mt-2 text-sm text-gray-600",
),
class_name="flex flex-col items-center justify-center p-4",
),
@@ -256,13 +250,13 @@ def index() -> rx.Component:
rx.el.div(
upload_section(),
analysis_section(),
- class_name="max-w-4xl mx-auto px-4 space-y-6"
+ class_name="max-w-4xl mx-auto px-4 space-y-6",
),
- class_name="py-8 bg-gray-50 min-h-screen"
+ class_name="py-8 bg-gray-50 min-h-screen",
),
- class_name="min-h-screen bg-gray-50"
+ class_name="min-h-screen bg-gray-50",
)
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/multi_modal_medical_agent/requirements.txt b/multi_modal_medical_agent/requirements.txt
index f84a5d5..bc70e51 100644
--- a/multi_modal_medical_agent/requirements.txt
+++ b/multi_modal_medical_agent/requirements.txt
@@ -1,4 +1,6 @@
+reflex==0.7.11
agno
google-generativeai
-reflex
-duckduckgo-search
\ No newline at end of file
+duckduckgo-search
+phidata
+pillow
\ No newline at end of file
diff --git a/news_agent/news_agent/news_agent.py b/news_agent/news_agent/news_agent.py
index c203ec9..c75d115 100644
--- a/news_agent/news_agent/news_agent.py
+++ b/news_agent/news_agent/news_agent.py
@@ -3,7 +3,6 @@
from swarm import Swarm, Agent
from datetime import datetime
from dotenv import load_dotenv
-import os
import asyncio
# Load environment variables
@@ -14,23 +13,25 @@
MODEL = "llama3.2"
client = Swarm()
+
def fetch_latest_news(topic):
"""Retrieve the latest news articles related to a given topic using DuckDuckGo."""
query = f"{topic} news {datetime.now().strftime('%Y-%m')}"
-
+
with DDGS() as search_engine:
articles = search_engine.text(query, max_results=3)
-
+
if articles:
formatted_results = "\n\n".join(
- f"Title: {article['title']}\nURL: {article['href']}\nSummary: {article['body']}"
+ f"Title: {article['title']}\nURL: {article['href']}\nSummary: {article['body']}"
for article in articles
)
return formatted_results
-
+
return f"No news articles found on the topic: {topic}."
+
# Create specialized agents
search_agent = Agent(
name="News Searcher",
@@ -41,7 +42,7 @@ def fetch_latest_news(topic):
3. Presenting the raw search results in a clear and organized manner.
""",
functions=[fetch_latest_news],
- model=MODEL
+ model=MODEL,
)
summary_agent = Agent(
@@ -71,13 +72,13 @@ def fetch_latest_news(topic):
**IMPORTANT NOTE:** Deliver the content as polished news analysis only. Avoid labels, introductions, or meta-comments. Begin directly with the story, ensuring neutrality and factual accuracy throughout.
""",
- model=MODEL
+ model=MODEL,
)
-
class State(rx.State):
"""Manage the application state."""
+
topic: str = "AI Agents"
raw_news: str = ""
final_summary: str = ""
@@ -89,12 +90,11 @@ async def process_news(self):
"""Asynchronous news processing workflow using Swarm agents"""
# Reset previous state
async with self:
-
self.is_loading = True
self.error_message = ""
self.raw_news = ""
self.final_summary = ""
-
+
yield
await asyncio.sleep(1)
@@ -102,15 +102,22 @@ async def process_news(self):
# Search news using search agent
search_response = client.run(
agent=search_agent,
- messages=[{"role": "user", "content": f"Find recent news about {self.topic}"}]
+ messages=[
+ {"role": "user", "content": f"Find recent news about {self.topic}"}
+ ],
)
async with self:
self.raw_news = search_response.messages[-1]["content"]
-
+
# Synthesize and Generate summary using summary agent
summary_response = client.run(
agent=summary_agent,
- messages=[{"role": "user", "content": f"Synthesize these news articles and summarize the synthesis:\n{self.raw_news}"}]
+ messages=[
+ {
+ "role": "user",
+ "content": f"Synthesize these news articles and summarize the synthesis:\n{self.raw_news}",
+ }
+ ],
)
async with self:
@@ -118,7 +125,6 @@ async def process_news(self):
self.is_loading = False
except Exception as e:
-
async with self:
self.error_message = f"An error occurred: {str(e)}"
self.is_loading = False
@@ -127,6 +133,7 @@ def update_topic(self, topic: str):
"""Update the search topic"""
self.topic = topic
+
def news_page() -> rx.Component:
"""Render the main news processing page"""
return rx.box(
@@ -136,10 +143,10 @@ def news_page() -> rx.Component:
placeholder="Enter the news topic",
value=State.topic,
on_change=State.update_topic,
- width="300px"
+ width="300px",
),
rx.button(
- "Process News",
+ "Process News",
on_click=State.process_news,
color_scheme="blue",
loading=State.is_loading,
@@ -149,29 +156,29 @@ def news_page() -> rx.Component:
flex_direction="column",
gap="1rem",
),
-
# Results Section
rx.cond(
State.final_summary != "",
rx.vstack(
rx.heading("📝 News Summary", size="4"),
rx.markdown(State.final_summary),
- rx.button("Copy the Summary", on_click=[rx.set_clipboard(State.final_summary), rx.toast.info("Summary copied")]),
+ rx.button(
+ "Copy the Summary",
+ on_click=[
+ rx.set_clipboard(State.final_summary),
+ rx.toast.info("Summary copied"),
+ ],
+ ),
spacing="4",
- width="100%"
- )
+ width="100%",
+ ),
),
-
spacing="4",
max_width="800px",
margin="auto",
- padding="20px"
+ padding="20px",
)
-app = rx.App(
- theme=rx.theme(
- appearance="light",
- accent_color="blue"
- )
-)
-app.add_page(news_page, route="/")
\ No newline at end of file
+
+app = rx.App(theme=rx.theme(appearance="light", accent_color="blue"))
+app.add_page(news_page, route="/")
diff --git a/news_agent/requirements.txt b/news_agent/requirements.txt
index 684c45a..afa3e7e 100644
--- a/news_agent/requirements.txt
+++ b/news_agent/requirements.txt
@@ -1,3 +1,4 @@
-reflex==0.6.6.post2
+reflex==0.7.11
git+https://github.com/openai/swarm.git
-duckduckgo-search
\ No newline at end of file
+duckduckgo-search
+dotenv
\ No newline at end of file
diff --git a/open_deep_researcher/requirements.txt b/open_deep_researcher/requirements.txt
index 6d6fdd5..27736a0 100644
--- a/open_deep_researcher/requirements.txt
+++ b/open_deep_researcher/requirements.txt
@@ -1,2 +1,3 @@
-reflex
-google-generativeai
\ No newline at end of file
+reflex==0.7.11
+google-generativeai
+aiohttp
\ No newline at end of file
diff --git a/open_deep_researcher/researcher/researcher.py b/open_deep_researcher/researcher/researcher.py
index 4d4dac5..c073408 100644
--- a/open_deep_researcher/researcher/researcher.py
+++ b/open_deep_researcher/researcher/researcher.py
@@ -1,9 +1,7 @@
import reflex as rx
import aiohttp
-import json
import asyncio
-import ast
-from typing import List, Dict, Optional, Tuple
+from typing import List, Dict, Optional
import google.generativeai as genai
import time
@@ -19,27 +17,37 @@
genai.configure(api_key=GEMINI_API_KEY)
+
async def call_google_gemini(messages: List[Dict]) -> Optional[str]:
"""Call Google Gemini asynchronously."""
try:
prompt = "\n".join([msg["content"] for msg in messages])
- model = genai.GenerativeModel('gemini-1.5-flash')
+ model = genai.GenerativeModel("gemini-1.5-flash")
loop = asyncio.get_event_loop()
- response = await loop.run_in_executor(None, lambda: model.generate_content(prompt))
+ response = await loop.run_in_executor(
+ None, lambda: model.generate_content(prompt)
+ )
return response.text
- except Exception as e:
+ except Exception:
return None
-async def generate_search_queries_async(session: aiohttp.ClientSession, user_query: str) -> List[str]:
+
+async def generate_search_queries_async(
+ session: aiohttp.ClientSession, user_query: str
+) -> List[str]:
"""Generate search queries based on user query."""
prompt = (
"You are an expert research assistant. Given the user's query, generate up to four distinct, "
"precise search queries that would help gather complete information on the topic. "
"Return only a valid list of plain strings. Do not include markdown, code blocks, backticks, or explanations. "
- "Just return the list itself, for example: ['query1', 'query2', 'query3'].")
+ "Just return the list itself, for example: ['query1', 'query2', 'query3']."
+ )
messages = [
- {"role": "system", "content": "You are a helpful and precise research assistant."},
- {"role": "user", "content": f"User Query: {user_query}\n\n{prompt}"}
+ {
+ "role": "system",
+ "content": "You are a helpful and precise research assistant.",
+ },
+ {"role": "user", "content": f"User Query: {user_query}\n\n{prompt}"},
]
response = await call_google_gemini(messages)
if response:
@@ -50,22 +58,24 @@ async def generate_search_queries_async(session: aiohttp.ClientSession, user_que
return []
return []
+
async def perform_search_async(session: aiohttp.ClientSession, query: str) -> List[str]:
"""Perform search using SERPAPI."""
- params = {
- "q": query,
- "api_key": SERPAPI_API_KEY,
- "engine": "google"
- }
+ params = {"q": query, "api_key": SERPAPI_API_KEY, "engine": "google"}
try:
async with session.get(SERPAPI_URL, params=params) as resp:
if resp.status == 200:
results = await resp.json()
- return [item.get("link") for item in results.get("organic_results", []) if "link" in item]
+ return [
+ item.get("link")
+ for item in results.get("organic_results", [])
+ if "link" in item
+ ]
return []
except Exception:
return []
+
async def fetch_webpage_text_async(session: aiohttp.ClientSession, url: str) -> str:
"""Fetch webpage text using Jina API."""
full_url = f"{JINA_BASE_URL}{url}"
@@ -76,7 +86,10 @@ async def fetch_webpage_text_async(session: aiohttp.ClientSession, url: str) ->
except Exception:
return ""
-async def is_page_useful_async(session: aiohttp.ClientSession, user_query: str, page_text: str) -> bool:
+
+async def is_page_useful_async(
+ session: aiohttp.ClientSession, user_query: str, page_text: str
+) -> bool:
"""Determine if the page content is useful for the query."""
prompt = (
"You are a critical research evaluator. Given the user's query and the content of a webpage, "
@@ -84,17 +97,21 @@ async def is_page_useful_async(session: aiohttp.ClientSession, user_query: str,
"Respond with exactly one word: 'Yes' if the page is useful, or 'No' if it is not."
)
messages = [
- {"role": "system", "content": "You are a strict and concise evaluator of research relevance."},
- {"role": "user", "content": f"User Query: {user_query}\n\nWebpage Content:\n{page_text[:20000]}\n\n{prompt}"}
+ {
+ "role": "system",
+ "content": "You are a strict and concise evaluator of research relevance.",
+ },
+ {
+ "role": "user",
+ "content": f"User Query: {user_query}\n\nWebpage Content:\n{page_text[:20000]}\n\n{prompt}",
+ },
]
response = await call_google_gemini(messages)
return response and response.strip().lower() == "yes"
+
async def extract_relevant_context_async(
- session: aiohttp.ClientSession,
- user_query: str,
- search_query: str,
- page_text: str
+ session: aiohttp.ClientSession, user_query: str, search_query: str, page_text: str
) -> str:
"""Extract relevant information from page content."""
prompt = (
@@ -102,17 +119,24 @@ async def extract_relevant_context_async(
"Return only the relevant context as plain text."
)
messages = [
- {"role": "system", "content": "You are an expert in extracting relevant information."},
- {"role": "user", "content": f"Query: {user_query}\nSearch Query: {search_query}\n\nContent:\n{page_text[:20000]}\n\n{prompt}"}
+ {
+ "role": "system",
+ "content": "You are an expert in extracting relevant information.",
+ },
+ {
+ "role": "user",
+ "content": f"Query: {user_query}\nSearch Query: {search_query}\n\nContent:\n{page_text[:20000]}\n\n{prompt}",
+ },
]
response = await call_google_gemini(messages)
return response.strip() if response else ""
+
async def get_new_search_queries_async(
session: aiohttp.ClientSession,
user_query: str,
previous_queries: List[str],
- contexts: List[str]
+ contexts: List[str],
) -> List[str]:
"""Generate new search queries based on current findings."""
prompt = (
@@ -121,7 +145,10 @@ async def get_new_search_queries_async(
)
messages = [
{"role": "system", "content": "You are a systematic research planner."},
- {"role": "user", "content": f"Query: {user_query}\nPrevious: {previous_queries}\nContexts:\n{''.join(contexts)}\n\n{prompt}"}
+ {
+ "role": "user",
+ "content": f"Query: {user_query}\nPrevious: {previous_queries}\nContexts:\n{''.join(contexts)}\n\n{prompt}",
+ },
]
response = await call_google_gemini(messages)
if response:
@@ -132,10 +159,9 @@ async def get_new_search_queries_async(
return []
return []
+
async def generate_final_report_async(
- session: aiohttp.ClientSession,
- user_query: str,
- contexts: List[str]
+ session: aiohttp.ClientSession, user_query: str, contexts: List[str]
) -> str:
"""Generate final research report."""
prompt = (
@@ -144,13 +170,18 @@ async def generate_final_report_async(
)
messages = [
{"role": "system", "content": "You are a skilled report writer."},
- {"role": "user", "content": f"Query: {user_query}\nContexts:\n{''.join(contexts)}\n\n{prompt}"}
+ {
+ "role": "user",
+ "content": f"Query: {user_query}\nContexts:\n{''.join(contexts)}\n\n{prompt}",
+ },
]
response = await call_google_gemini(messages)
return response if response else "Unable to generate report."
+
class ResearchState(rx.State):
"""State management for the research assistant."""
+
user_query: str = ""
iteration_limit: int = 2
final_report: str = ""
@@ -165,15 +196,18 @@ def update_logs(self, message: str):
else:
self.process_logs = f"[{timestamp}] {message}"
-
- async def process_link(self, session: aiohttp.ClientSession, link: str, search_query: str) -> Optional[str]:
+ async def process_link(
+ self, session: aiohttp.ClientSession, link: str, search_query: str
+ ) -> Optional[str]:
"""Process a single link and extract relevant information."""
page_text = await fetch_webpage_text_async(session, link)
if not page_text:
return None
if await is_page_useful_async(session, self.user_query, page_text):
- context = await extract_relevant_context_async(session, self.user_query, search_query, page_text)
+ context = await extract_relevant_context_async(
+ session, self.user_query, search_query, page_text
+ )
return context
return None
@@ -190,19 +224,21 @@ async def handle_submit(self):
async with aiohttp.ClientSession() as session:
self.update_logs("Generating initial search queries...")
yield
-
+
queries = await generate_search_queries_async(session, self.user_query)
if not queries:
self.update_logs("No initial queries could be generated")
yield
return
- self.update_logs(f"Generated {len(queries)} initial queries: {', '.join(queries)}")
+ self.update_logs(
+ f"Generated {len(queries)} initial queries: {', '.join(queries)}"
+ )
yield
contexts = []
iteration = 0
-
+
while iteration < self.iteration_limit:
self.update_logs(f"Starting research iteration {iteration + 1}")
yield
@@ -220,7 +256,7 @@ async def handle_submit(self):
if len(all_links) >= 10:
break
all_links.extend(links)
-
+
self.update_logs(f"Found {len(all_links)} links to process")
yield
@@ -228,34 +264,44 @@ async def handle_submit(self):
for link in all_links:
self.update_logs(f"Processing link: {link}")
yield # Update UI after log entry
-
+
context = await self.process_link(session, link, query)
if context:
- self.update_logs("Successfully extracted relevant information")
+ self.update_logs(
+ "Successfully extracted relevant information"
+ )
iteration_contexts.append(context)
yield # Update UI after successful extraction
else:
self.update_logs("No useful information found in link")
yield
-
- self.update_logs(f"Extracted information from {len(iteration_contexts)} sources")
+
+ self.update_logs(
+ f"Extracted information from {len(iteration_contexts)} sources"
+ )
yield
-
+
contexts.extend(iteration_contexts)
- queries = await get_new_search_queries_async(session, self.user_query, queries, contexts)
-
+ queries = await get_new_search_queries_async(
+ session, self.user_query, queries, contexts
+ )
+
if not queries:
self.update_logs("No more queries needed, research complete")
yield
break
-
- self.update_logs(f"Generated {len(queries)} new queries for next iteration")
+
+ self.update_logs(
+ f"Generated {len(queries)} new queries for next iteration"
+ )
yield
iteration += 1
self.update_logs("Generating final research report...")
yield
- self.final_report = await generate_final_report_async(session, self.user_query, contexts)
+ self.final_report = await generate_final_report_async(
+ session, self.user_query, contexts
+ )
self.update_logs("Research process completed successfully")
except Exception as e:
@@ -263,13 +309,18 @@ async def handle_submit(self):
finally:
self.is_processing = False
yield
-
+
+
def index() -> rx.Component:
return rx.container(
rx.vstack(
- rx.heading("Open Deep Researcher 🔬", size="8", margin_bottom="1rem", margin_left="16rem"),
+ rx.heading(
+ "Open Deep Researcher 🔬",
+ size="8",
+ margin_bottom="1rem",
+ margin_left="16rem",
+ ),
rx.text("Enter your research query to generate a report."),
-
# Input Section
rx.box(
rx.vstack(
@@ -294,7 +345,6 @@ def index() -> rx.Component:
border="1px solid #e0e0e0",
border_radius="lg",
),
-
# Results Section
rx.cond(
ResearchState.final_report,
@@ -311,7 +361,6 @@ def index() -> rx.Component:
margin_top="1rem",
),
),
-
# Logs Section
rx.cond(
ResearchState.process_logs,
@@ -337,6 +386,7 @@ def index() -> rx.Component:
padding="2rem",
)
+
# Create app
app = rx.App()
-app.add_page(index, title="Research Assistant")
\ No newline at end of file
+app.add_page(index, title="Research Assistant")
diff --git a/rag_app/rag_app/rag/shared/profile_components.py b/rag_app/rag_app/rag/shared/profile_components.py
index e98c01c..a28b3a7 100644
--- a/rag_app/rag_app/rag/shared/profile_components.py
+++ b/rag_app/rag_app/rag/shared/profile_components.py
@@ -8,7 +8,6 @@
def profile_item_unit():
-
return rx.radio(
["metric", "imperial"],
default_value="metric",
@@ -18,7 +17,6 @@ def profile_item_unit():
def profile_item_physical_stats(value: str, unit: str, fn: Callable):
-
return rx.hstack(
rx.input(
value=value,
@@ -38,7 +36,6 @@ def profile_item_physical_stats(value: str, unit: str, fn: Callable):
def profile_item_activity_stats(title: str, options: list[str]):
-
return rx.vstack(
rx.text(title, size="1", weight="bold", **Typography.passive),
rx.select(
diff --git a/rag_app/rag_app/rag/state.py b/rag_app/rag_app/rag/state.py
index 24f5257..19448f8 100644
--- a/rag_app/rag_app/rag/state.py
+++ b/rag_app/rag_app/rag/state.py
@@ -77,7 +77,6 @@ def track_profil_stat_changes(self) -> dict[str, str]:
async def send_prompt(self):
if self.prompt:
-
self.is_generating = True
yield
diff --git a/rag_app/rag_app/rag_app.py b/rag_app/rag_app/rag_app.py
index c9466ab..f55f156 100644
--- a/rag_app/rag_app/rag_app.py
+++ b/rag_app/rag_app/rag_app.py
@@ -2,6 +2,7 @@
from .rag.main import rag_ai_app
+
# !update UI for easier demoing
def index():
return rag_ai_app()
diff --git a/rag_app/requirements.txt b/rag_app/requirements.txt
index 2f92727..4b6af2d 100644
--- a/rag_app/requirements.txt
+++ b/rag_app/requirements.txt
@@ -1 +1,2 @@
-reflex==0.6.6.post3
+reflex==0.7.11
+google-generativeai
\ No newline at end of file
diff --git a/rag_app/rxconfig.py b/rag_app/rxconfig.py
index 9840e6e..bcccd50 100644
--- a/rag_app/rxconfig.py
+++ b/rag_app/rxconfig.py
@@ -2,4 +2,4 @@
config = rx.Config(
app_name="rag_app",
-)
\ No newline at end of file
+)
diff --git a/rag_with_docling/chat/chat.py b/rag_with_docling/chat/chat.py
index 7d7fa36..51aeaee 100644
--- a/rag_with_docling/chat/chat.py
+++ b/rag_with_docling/chat/chat.py
@@ -4,27 +4,36 @@
import gc
import pandas as pd
from dataclasses import dataclass
-from typing import Any, Optional
+from typing import Optional
import asyncio
-from llama_index.core import Settings, VectorStoreIndex, SimpleDirectoryReader, PromptTemplate
+from llama_index.core import (
+ Settings,
+ VectorStoreIndex,
+ SimpleDirectoryReader,
+ PromptTemplate,
+)
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.readers.docling import DoclingReader
from llama_index.core.node_parser import MarkdownNodeParser
import reflex as rx
+
# Data Models
@dataclass
class QA:
"""A question and answer pair."""
+
question: str
answer: str
+
# Custom Loading Icon
class LoadingIcon(rx.Component):
"""A custom loading icon component."""
+
library = "react-loading-icons"
tag = "SpinningCircles"
stroke: rx.Var[str]
@@ -38,14 +47,15 @@ class LoadingIcon(rx.Component):
def get_event_triggers(self) -> dict:
return {"on_change": lambda status: [status]}
+
loading_icon = LoadingIcon.create
# Styles
message_style = dict(
- display="inline-block",
- padding="1em",
+ display="inline-block",
+ padding="1em",
border_radius="8px",
- max_width=["30em", "30em", "50em", "50em", "50em", "50em"]
+ max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
)
SIDEBAR_STYLE = dict(
@@ -67,6 +77,7 @@ def get_event_triggers(self) -> dict:
_hover={"bg": rx.color("mauve", 3)},
)
+
# Application State
class State(rx.State):
chats: list[list[QA]] = [[]]
@@ -90,49 +101,47 @@ async def handle_upload(self, files: list[rx.UploadFile]):
self.upload_status = "No file selected, Please select a file to continue"
return
yield
-
+
self.uploading = True
yield
-
+
try:
file = files[0]
upload_data = await file.read()
file_name = file.filename
-
+
with tempfile.TemporaryDirectory() as temp_dir:
file_path = os.path.join(temp_dir, file_name)
with open(file_path, "wb") as f:
f.write(upload_data)
-
+
file_key = f"{self.session_id}-{file_name}"
-
+
if file_key not in self.file_cache:
-
reader = DoclingReader()
loader = SimpleDirectoryReader(
input_dir=temp_dir,
file_extractor={".xlsx": reader},
)
docs = loader.load_data()
-
+
llm = self.load_llm()
embed_model = HuggingFaceEmbedding(
- model_name="BAAI/bge-large-en-v1.5",
- trust_remote_code=True
+ model_name="BAAI/bge-large-en-v1.5", trust_remote_code=True
)
-
+
Settings.embed_model = embed_model
node_parser = MarkdownNodeParser()
index = VectorStoreIndex.from_documents(
- documents=docs,
- transformations=[node_parser],
- show_progress=True
+ documents=docs,
+ transformations=[node_parser],
+ show_progress=True,
)
-
+
Settings.llm = llm
query_engine = index.as_query_engine(streaming=True)
-
- qa_prompt_tmpl_str = ("""
+
+ qa_prompt_tmpl_str = """
Context information is below.
---------------------
{context_str}
@@ -142,16 +151,18 @@ async def handle_upload(self, files: list[rx.UploadFile]):
incase case you don't know the answer say 'I don't know!'.
Query: {query_str}
Answer:
- """)
+ """
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": qa_prompt_tmpl}
)
-
+
self.file_cache[file_key] = query_engine
self._query_engine = query_engine
df = pd.read_excel(file_path)
- self.preview_columns = [{"field": col, "header": col} for col in df.columns]
+ self.preview_columns = [
+ {"field": col, "header": col} for col in df.columns
+ ]
self.preview_df = df.to_dict(orient="records")
self.upload_status = f"Uploaded {file_name} successfully"
self.uploading = False
@@ -159,7 +170,7 @@ async def handle_upload(self, files: list[rx.UploadFile]):
else:
self._query_engine = self.file_cache[file_key]
-
+
yield
except Exception as e:
self.uploading = False
@@ -179,21 +190,21 @@ def reset_chat(self):
async def process_query(self, form_data: dict):
if self.processing or not form_data.get("question") or not self._query_engine:
return
-
+
question = form_data.get("question")
if not question:
return
-
+
async with self:
self.processing = True
self.chats[self.current_chat].append(QA(question=question, answer=""))
yield
await asyncio.sleep(0.1)
-
+
try:
streaming_response = self._query_engine.query(question)
answer = ""
-
+
async with self:
for chunk in streaming_response.response_gen:
answer += chunk
@@ -201,20 +212,23 @@ async def process_query(self, form_data: dict):
self.chats = self.chats
yield
await asyncio.sleep(0.05)
-
+
self.processing = False
yield
except Exception as e:
async with self:
- self.chats[self.current_chat][-1].answer = f"Error processing query: {str(e)}"
+ self.chats[self.current_chat][
+ -1
+ ].answer = f"Error processing query: {str(e)}"
self.processing = False
yield
+
def excel_preview() -> rx.Component:
if State.preview_df is None:
return rx.box()
-
+
return rx.box(
rx.heading("Excel Preview", size="4"),
rx.data_table(
@@ -231,6 +245,7 @@ def excel_preview() -> rx.Component:
margin_bottom="2em",
)
+
def message(qa: QA) -> rx.Component:
return rx.box(
rx.box(
@@ -256,6 +271,7 @@ def message(qa: QA) -> rx.Component:
width="100%",
)
+
def action_bar() -> rx.Component:
return rx.box(
rx.vstack(
@@ -325,7 +341,10 @@ def sidebar() -> rx.Component:
border=f"1px dashed {rx.color('mauve', 6)}",
padding="2em",
border_radius="md",
- accept={".xls": "application/vnd.ms-excel", ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"},
+ accept={
+ ".xls": "application/vnd.ms-excel",
+ ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ },
max_files=1,
multiple=False,
),
@@ -335,23 +354,17 @@ def sidebar() -> rx.Component:
loading=State.uploading,
**UPLOAD_BUTTON_STYLE,
),
- rx.text(
- State.upload_status,
- color=rx.color("mauve", 11),
- font_size="sm"
- ),
+ rx.text(State.upload_status, color=rx.color("mauve", 11), font_size="sm"),
align_items="stretch",
height="100%",
),
**SIDEBAR_STYLE,
)
+
def chat() -> rx.Component:
return rx.vstack(
- rx.box(
- rx.foreach(State.chats[State.current_chat], message),
- width="100%"
- ),
+ rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
py="8",
flex="1",
width="100%",
@@ -362,6 +375,7 @@ def chat() -> rx.Component:
padding_bottom="5em",
)
+
def index() -> rx.Component:
"""The main app."""
return rx.box(
@@ -369,7 +383,9 @@ def index() -> rx.Component:
rx.box(
rx.vstack(
rx.hstack(
- rx.heading("Chat with Excel using DeepSeek-R1 💬", margin_right="4em"),
+ rx.heading(
+ "Chat with Excel using DeepSeek-R1 💬", margin_right="4em"
+ ),
rx.button(
"New Chat",
on_click=State.create_new_chat,
@@ -391,5 +407,6 @@ def index() -> rx.Component:
background_color=rx.color("mauve", 1),
)
+
app = rx.App()
-app.add_page(index)
\ No newline at end of file
+app.add_page(index)
diff --git a/rag_with_docling/requirements.txt b/rag_with_docling/requirements.txt
index 1c6cc1d..82e3313 100644
--- a/rag_with_docling/requirements.txt
+++ b/rag_with_docling/requirements.txt
@@ -1,5 +1,5 @@
+reflex==0.7.11
agno
-reflex
llama_index
llama-index-llms-ollama
llama-index-embeddings-huggingface