Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions .github/workflows/export.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
name: Check Export

env:
TELEMETRY_ENABLED: false
on:
push:
branches: [main]
pull_request:
branches: [main]

jobs:
find-folders:
runs-on: ubuntu-latest
outputs:
folders: ${{ steps.find-rxconfig.outputs.folders }}
steps:
- uses: actions/checkout@v3

- name: Find folders with rxconfig.py
id: find-rxconfig
run: |
FOLDERS=$(find . -maxdepth 2 -type f -name "rxconfig.py" | xargs dirname | sed 's|^\./||' | jq -R -s -c 'split("\n")[:-1]')
echo "folders=$FOLDERS" >> $GITHUB_OUTPUT
echo "Found folders: $FOLDERS"

check-export:
needs: find-folders
strategy:
matrix:
folder: ${{ fromJson(needs.find-folders.outputs.folders) }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v6
with:
python-version: 3.12
activate-environment: true
- name: Install dependencies
working-directory: ${{ matrix.folder }}
run: uv pip install -r requirements.txt
- name: Run export
working-directory: ${{ matrix.folder }}
run: uv run reflex export
3 changes: 2 additions & 1 deletion agentic_rag/chat/chat.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import reflex as rx
from chat.components.chat import State, chat, action_bar, sidebar


def index() -> rx.Component:
"""The main app."""
return rx.box(
Expand Down Expand Up @@ -32,4 +33,4 @@ def index() -> rx.Component:


app = rx.App()
app.add_page(index)
app.add_page(index)
84 changes: 40 additions & 44 deletions agentic_rag/chat/components/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,12 @@
import base64
from pathlib import Path
import asyncio
import os
import time

from agno.agent import Agent
from agno.document import Document
from agno.document.reader.pdf_reader import PDFReader
from agno.utils.log import logger
from agno.agent import Agent, AgentMemory
from agno.agent import AgentMemory
from agno.embedder.google import GeminiEmbedder
from agno.knowledge import AgentKnowledge
from agno.memory.db.postgres import PgMemoryDb
Expand All @@ -23,27 +21,24 @@

import traceback

from typing import Optional

db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"


def get_agentic_rag_agent(
model_id: str = "gemini-2.0-flash",
user_id: Optional[str] = None,
session_id: Optional[str] = None,
debug_mode: bool = True,
) -> Agent:
"""Get an Agentic RAG Agent with Memory optimized for Deepseek and PDFs."""

# Initialize Deepseek model
model = Gemini(id=model_id)
model = Gemini(id=model_id)

# Define persistent memory for chat history
memory = AgentMemory(
db=PgMemoryDb(
table_name="pdf_agent_memory",
db_url=db_url
),
db=PgMemoryDb(table_name="pdf_agent_memory", db_url=db_url),
create_user_memories=False,
create_session_summary=False,
)
Expand All @@ -57,10 +52,9 @@ def get_agentic_rag_agent(
embedder=GeminiEmbedder(),
),
num_documents=4, # Optimal for PDF chunking
document_processor=PDFReader(chunk_size=1000
),
batch_size=32,
parallel_processing=True
document_processor=PDFReader(chunk_size=1000),
batch_size=32,
parallel_processing=True,
)

# Create the PDF-focused Agent
Expand All @@ -69,10 +63,7 @@ def get_agentic_rag_agent(
session_id=session_id,
user_id=user_id,
model=model,
storage=PostgresAgentStorage(
table_name="pdf_agent_sessions",
db_url=db_url
),
storage=PostgresAgentStorage(table_name="pdf_agent_sessions", db_url=db_url),
memory=memory,
knowledge=knowledge_base,
description="You are a helpful Agent called 'Agentic RAG' and your goal is to assist the user in the best way possible.",
Expand Down Expand Up @@ -123,10 +114,10 @@ def get_agentic_rag_agent(

# Styles
message_style = dict(
display="inline-block",
padding="1em",
display="inline-block",
padding="1em",
border_radius="8px",
max_width=["30em", "30em", "50em", "50em", "50em", "50em"]
max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
)

SIDEBAR_STYLE = dict(
Expand All @@ -148,14 +139,18 @@ def get_agentic_rag_agent(
_hover={"bg": rx.color("mauve", 3)},
)


@dataclass
class QA:
"""A question and answer pair."""

question: str
answer: str


class LoadingIcon(rx.Component):
"""A custom loading icon component."""

library = "react-loading-icons"
tag = "SpinningCircles"
stroke: rx.Var[str]
Expand All @@ -169,11 +164,13 @@ class LoadingIcon(rx.Component):
def get_event_triggers(self) -> dict:
return {"on_change": lambda status: [status]}


loading_icon = LoadingIcon.create


class State(rx.State):
"""The app state."""

chats: List[List[QA]] = [[]]
base64_pdf: str = ""
uploading: bool = False
Expand All @@ -193,7 +190,7 @@ class Config:
exclude = {"_temp_dir"}
json_encoders = {
Path: lambda v: str(v),
tempfile.TemporaryDirectory: lambda v: None
tempfile.TemporaryDirectory: lambda v: None,
}

def _create_agent(self) -> Agent:
Expand All @@ -202,12 +199,12 @@ def _create_agent(self) -> Agent:
# Generate a consistent session ID based on current chat
if not self._session_id:
self._session_id = f"session_{int(time.time())}"

return get_agentic_rag_agent(
model_id="gemini-2.0-flash",
session_id=self._session_id,
user_id=None,
debug_mode=True
debug_mode=True,
)
except Exception as e:
logger.error(f"Agent creation error: {str(e)}")
Expand All @@ -225,7 +222,7 @@ async def handle_upload(self, files: List[rx.UploadFile]):

file = files[0]
upload_data = await file.read()

# Create persistent temp directory
if self._temp_dir is None:
self._temp_dir = Path(tempfile.mkdtemp())
Expand Down Expand Up @@ -255,7 +252,7 @@ async def handle_upload(self, files: List[rx.UploadFile]):
return

# Store base64 for preview
base64_pdf = base64.b64encode(upload_data).decode('utf-8')
base64_pdf = base64.b64encode(upload_data).decode("utf-8")
self.base64_pdf = base64_pdf
self.knowledge_base_files.append(file.filename)

Expand All @@ -265,15 +262,15 @@ async def handle_upload(self, files: List[rx.UploadFile]):
finally:
self.uploading = False
yield

@rx.event(background=True)
async def process_question(self, form_data: dict):
"""Process a question using streaming responses"""
if self.processing or not form_data.get("question"):
return

question = form_data["question"]

async with self:
self.processing = True
self.chats[self.current_chat].append(QA(question=question, answer=""))
Expand All @@ -291,7 +288,9 @@ def run_stream():
stream_response = agent.run(question, stream=True)
for chunk in stream_response:
if chunk.content:
asyncio.run_coroutine_threadsafe(queue.put(chunk.content), loop)
asyncio.run_coroutine_threadsafe(
queue.put(chunk.content), loop
)
asyncio.run_coroutine_threadsafe(queue.put(None), loop)
except Exception as e:
error_msg = f"Error: {str(e)}"
Expand All @@ -308,7 +307,7 @@ def run_stream():
if isinstance(chunk, str) and chunk.startswith("Error: "):
answer_content = chunk
break

answer_content += chunk
async with self:
self.chats[self.current_chat][-1].answer = answer_content
Expand All @@ -326,15 +325,14 @@ def run_stream():
async with self:
self.processing = False
yield


def clear_knowledge_base(self):
"""Clear knowledge base and reset state"""
try:
# Create temporary agent to clear vector store
agent = self._create_agent()
agent.knowledge.vector_db.delete()

# Reset state
self.loaded_files.clear()
self.knowledge_base_files.clear()
Expand All @@ -344,26 +342,27 @@ def clear_knowledge_base(self):
self.upload_status = "Knowledge base cleared"
except Exception as e:
self.upload_status = f"Error clearing knowledge base: {str(e)}"

def create_new_chat(self):
"""Create a new chat"""
self.chats.append([])
self.current_chat = len(self.chats) - 1


def pdf_preview() -> rx.Component:
return rx.box(
rx.heading("PDF Preview", size="4", margin_bottom="1em"),
rx.cond(
State.base64_pdf != "",
rx.html(
f'''
f"""
<iframe
src="data:application/pdf;base64,{State.base64_pdf}"
width="100%"
height="600px"
style="border: none; border-radius: 8px;">
</iframe>
'''
"""
),
rx.text("No PDF uploaded yet", color="red"),
),
Expand All @@ -373,6 +372,7 @@ def pdf_preview() -> rx.Component:
overflow="hidden",
)


def message(qa: QA) -> rx.Component:
return rx.box(
rx.box(
Expand All @@ -398,12 +398,10 @@ def message(qa: QA) -> rx.Component:
width="100%",
)


def chat() -> rx.Component:
return rx.vstack(
rx.box(
rx.foreach(State.chats[State.current_chat], message),
width="100%"
),
rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
py="8",
flex="1",
width="100%",
Expand All @@ -414,6 +412,7 @@ def chat() -> rx.Component:
padding_bottom="5em",
)


def action_bar() -> rx.Component:
return rx.box(
rx.vstack(
Expand Down Expand Up @@ -461,6 +460,7 @@ def action_bar() -> rx.Component:
width="100%",
)


def sidebar() -> rx.Component:
return rx.box(
rx.vstack(
Expand Down Expand Up @@ -509,11 +509,7 @@ def sidebar() -> rx.Component:
width="100%",
),
),
rx.text(
State.upload_status,
color=rx.color("mauve", 11),
font_size="sm"
),
rx.text(State.upload_status, color=rx.color("mauve", 11), font_size="sm"),
align_items="stretch",
height="100%",
),
Expand Down
5 changes: 3 additions & 2 deletions agentic_rag/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
reflex==0.7.11
agno
google-generativeai
reflex
bs4
duckduckgo-search
qdrant-client
pgvector
psycopg[binary]
pypdf
sqlalchemy
sqlalchemy
google-genai
Loading