From 6152ae4629100cd157ab42a7f91f4f83ba6f8c6e Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Sun, 19 May 2024 15:20:14 +0300
Subject: [PATCH 01/10] Edit in env - dockerfile - node js
---
ui/Dockerfile | 2 +-
ui/package.json | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/ui/Dockerfile b/ui/Dockerfile
index e41e721..279d3c3 100644
--- a/ui/Dockerfile
+++ b/ui/Dockerfile
@@ -1,4 +1,4 @@
-FROM node:lts-alpine
+FROM node:18-alpine
ARG FOLDER=/ui
diff --git a/ui/package.json b/ui/package.json
index 1a74502..dd86c62 100644
--- a/ui/package.json
+++ b/ui/package.json
@@ -58,6 +58,6 @@
"vite": "^4.3.8"
},
"engines": {
- "node": "^18.0.0"
+ "node": ">=18.0.0"
}
}
From deaec6586381331a6918090bc079bcb36e465b3d Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Sun, 19 May 2024 15:29:55 +0300
Subject: [PATCH 02/10] Edit in 1. Update npm Configuration to Ignore SSL 2. CA
certificate
---
ui/Dockerfile | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/ui/Dockerfile b/ui/Dockerfile
index 279d3c3..aa0cf7c 100644
--- a/ui/Dockerfile
+++ b/ui/Dockerfile
@@ -1,4 +1,4 @@
-FROM node:18-alpine
+FROM node:lts-alpine
ARG FOLDER=/ui
@@ -10,6 +10,19 @@ COPY . /ui/.
WORKDIR $FOLDER
+# Copy CA certificate
+# COPY your-ca-certificate.crt /usr/local/share/ca-certificates/your-ca-certificate.crt
+
+# # Update CA certificates
+# RUN update-ca-certificates
+
+# # Configure npm to use the custom CA
+# RUN npm config set cafile /usr/local/share/ca-certificates/your-ca-certificate.crt
+
+
+# Disable strict SSL
+RUN npm config set strict-ssl false
+
RUN npm install
EXPOSE 4173
From 9ebd28f32db6479986ebd27abb0dc6c4225da1df Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Sun, 19 May 2024 17:31:10 +0300
Subject: [PATCH 03/10] EDit
---
ui/Dockerfile | 3 +
ui/import.meta.env | 8 +++
ui/src/chat-with-kg/App.tsx | 99 +++++++++++++++++----------------
ui/src/chat-with-kg/main.tsx | 5 ++
ui/src/react-use-websocket.d.ts | 53 ++++++++++++++++++
ui/tsconfig.json | 11 +++-
ui/vite-env.d.ts | 9 +++
7 files changed, 137 insertions(+), 51 deletions(-)
create mode 100644 ui/import.meta.env
create mode 100644 ui/src/react-use-websocket.d.ts
create mode 100644 ui/vite-env.d.ts
diff --git a/ui/Dockerfile b/ui/Dockerfile
index aa0cf7c..5a42a02 100644
--- a/ui/Dockerfile
+++ b/ui/Dockerfile
@@ -23,6 +23,9 @@ WORKDIR $FOLDER
# Disable strict SSL
RUN npm config set strict-ssl false
+RUN npm install react react-dom
+RUN npm install --save-dev @types/react @types/react-dom
+RUN npm install react-use-websocket
RUN npm install
EXPOSE 4173
diff --git a/ui/import.meta.env b/ui/import.meta.env
new file mode 100644
index 0000000..9c252e4
--- /dev/null
+++ b/ui/import.meta.env
@@ -0,0 +1,8 @@
+const apiUrl = import.meta.env.VITE_API_URL;
+const API_KEY_ENDPOINT = import.meta.env.VITE_HAS_API_KEY_ENDPOINT;
+const KG_CHAT_SAMPLE_QUESTIONS_ENDPOINT = import.meta.env.VITE_KG_CHAT_SAMPLE_QUESTIONS_ENDPOINT;
+
+console.log(apiUrl);
+console.log(API_KEY_ENDPOINT);
+console.log(KG_CHAT_SAMPLE_QUESTIONS_ENDPOINT);
+
diff --git a/ui/src/chat-with-kg/App.tsx b/ui/src/chat-with-kg/App.tsx
index 12a39c4..196808e 100644
--- a/ui/src/chat-with-kg/App.tsx
+++ b/ui/src/chat-with-kg/App.tsx
@@ -261,55 +261,56 @@ function App() {
setText2cypherModel(e.target.value)
}
- return (
-
- {needsApiKey && (
-
-
-
- )}
-
-
-
-
- {!serverAvailable && (
-
Server is unavailable, please reload the page to try again.
- )}
- {serverAvailable && needsApiKeyLoading &&
Initializing...
}
-
- {showContent && readyState === ReadyState.OPEN && (
- <>
-
-
- {errorMessage}
- >
- )}{" "}
- {showContent && readyState === ReadyState.CONNECTING && (
-
Connecting...
- )}
- {showContent && readyState === ReadyState.CLOSED && (
-
-
Could not connect to server, reconnecting...
-
- )}
-
-
- );
+ // return (
+ //
+ // {needsApiKey && (
+ //
+ //
+ //
+ // )}
+ //
+ //
+ //
+ //
+ // {!serverAvailable && (
+ //
Server is unavailable, please reload the page to try again.
+ // )}
+ // {serverAvailable && needsApiKeyLoading &&
Initializing...
}
+ //
+ // {showContent && readyState === ReadyState.OPEN && (
+ // <>
+ //
+ //
+ // {errorMessage}
+ // >
+ // )}{" "}
+ // {showContent && readyState === ReadyState.CONNECTING && (
+ //
Connecting...
+ // )}
+ // {showContent && readyState === ReadyState.CLOSED && (
+ //
+ //
Could not connect to server, reconnecting...
+ //
+ // )}
+ //
+ //
+ // )
+ ;
}
export default App;
diff --git a/ui/src/chat-with-kg/main.tsx b/ui/src/chat-with-kg/main.tsx
index 3fb9bd4..a074fb2 100644
--- a/ui/src/chat-with-kg/main.tsx
+++ b/ui/src/chat-with-kg/main.tsx
@@ -3,6 +3,11 @@ import { createRoot } from "react-dom/client";
import App from "./App.js";
import Modal from "react-modal";
+import ReactDOM from 'react-dom';
+
+// import * as React from 'react';
+// import * as ReactDOM from 'react-dom';
+
import "@neo4j-ndl/base/lib/neo4j-ds-styles.css";
import "./index.css";
diff --git a/ui/src/react-use-websocket.d.ts b/ui/src/react-use-websocket.d.ts
new file mode 100644
index 0000000..789cd64
--- /dev/null
+++ b/ui/src/react-use-websocket.d.ts
@@ -0,0 +1,53 @@
+declare module 'react-use-websocket' {
+ import { ComponentType } from 'react';
+
+ export type ReadyState = number;
+ export type SendMessage = (message: string) => void;
+ export type Options = {
+ fromSocketIO?: boolean;
+ share?: boolean;
+ onOpen?: () => void;
+ onClose?: () => void;
+ onMessage?: (message: WebSocketEventMap['message']) => void;
+ onError?: (error: WebSocketEventMap['error']) => void;
+ filter?: () => boolean;
+ };
+
+ export function useWebSocket(
+ url: string,
+ options?: Options
+ ): {
+ sendMessage: SendMessage;
+ lastMessage: WebSocketEventMap['message'] | null;
+ readyState: ReadyState;
+ };
+
+
+ export const ReadyState: {
+ CONNECTING: number;
+ OPEN: number;
+ CLOSING: number;
+ CLOSED: number;
+ };
+
+ const WebSocketComponent: ComponentType<{ url: string; options?: Options }>;
+ export default WebSocketComponent;
+
+
+}
+
+// import { useWebSocket, ReadyState } from 'react-use-websocket';
+
+// const MyComponent: React.FC = () => {
+// const { sendMessage, lastMessage, readyState } = useWebSocket('ws://example.com');
+
+// return (
+//
+//
+//
Last message: {lastMessage ? lastMessage.data : 'No message received yet'}
+//
Ready state: {readyState}
+//
+// );
+// };
+
+
\ No newline at end of file
diff --git a/ui/tsconfig.json b/ui/tsconfig.json
index 1cb4f27..2ab02cf 100644
--- a/ui/tsconfig.json
+++ b/ui/tsconfig.json
@@ -17,5 +17,12 @@
"types": ["vite/client"],
"jsx": "react-jsx"
},
- "include": ["src"],
- }
\ No newline at end of file
+ // "include": ["src"],
+
+ "include": [
+ "src",
+ "vite-env.d.ts", // Adjust the path according to your setup
+ "types",
+ "react-use-websocket.d.ts" // Adjust the path as needed
+ ]
+ }
diff --git a/ui/vite-env.d.ts b/ui/vite-env.d.ts
new file mode 100644
index 0000000..f89f7fc
--- /dev/null
+++ b/ui/vite-env.d.ts
@@ -0,0 +1,9 @@
+interface ImportMetaEnv {
+ readonly VITE_API_URL: string;
+ readonly VITE_ANOTHER_ENV_VAR: string;
+ // Add other environment variables here as needed
+ }
+
+ interface ImportMeta {
+ readonly env: ImportMetaEnv;
+ }
\ No newline at end of file
From dce78a702f9d0cb26847193de2fa4253da4e1923 Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Sun, 19 May 2024 17:47:26 +0300
Subject: [PATCH 04/10] r
---
ui/src/chat-with-kg/App.tsx | 5 ++++-
ui/src/chat-with-kg/main.tsx | 4 ++--
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/ui/src/chat-with-kg/App.tsx b/ui/src/chat-with-kg/App.tsx
index 196808e..0b5d444 100644
--- a/ui/src/chat-with-kg/App.tsx
+++ b/ui/src/chat-with-kg/App.tsx
@@ -1,8 +1,11 @@
import { useCallback, useEffect, useState, ChangeEvent } from "react";
+import React from 'react';
+import { useWebSocket, ReadyState } from 'react-use-websocket';
+
import ChatContainer from "./ChatContainer";
import type { ChatMessageObject } from "./ChatMessage";
import ChatInput from "./ChatInput";
-import useWebSocket, { ReadyState } from "react-use-websocket";
+// import useWebSocket, { ReadyState } from "react-use-websocket";
import KeyModal from "../components/keymodal";
import type {
ConversationState,
diff --git a/ui/src/chat-with-kg/main.tsx b/ui/src/chat-with-kg/main.tsx
index a074fb2..a427d7d 100644
--- a/ui/src/chat-with-kg/main.tsx
+++ b/ui/src/chat-with-kg/main.tsx
@@ -1,13 +1,13 @@
import React from "react";
import { createRoot } from "react-dom/client";
-import App from "./App.js";
+import App from "./App";
import Modal from "react-modal";
import ReactDOM from 'react-dom';
// import * as React from 'react';
// import * as ReactDOM from 'react-dom';
-
+ReactDOM.render(, document.getElementById('root'));
import "@neo4j-ndl/base/lib/neo4j-ds-styles.css";
import "./index.css";
From 1ac050a260c2bce054d1293bedede62322396d0c Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Sun, 19 May 2024 23:31:49 +0300
Subject: [PATCH 05/10] Edit
---
docker-compose.yml | 7 ++++++-
ui/Dockerfile | 11 +++++++----
ui/src/chat-with-kg/App.tsx | 2 +-
ui/src/vite-env.d.ts | 14 ++++++++++++++
ui/tsconfig.json | 19 +++++++++----------
ui/{ => types}/vite-env.d.ts | 7 ++++++-
.../chat-with-kg => }/types/websocketTypes.ts | 0
ui/vite.config.js | 6 +++---
8 files changed, 46 insertions(+), 20 deletions(-)
create mode 100644 ui/src/vite-env.d.ts
rename ui/{ => types}/vite-env.d.ts (78%)
rename ui/{src/chat-with-kg => }/types/websocketTypes.ts (100%)
diff --git a/docker-compose.yml b/docker-compose.yml
index bf3039b..7bab8c7 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,4 +1,4 @@
-version: "3.7"
+version: "3.8"
services:
backend:
build:
@@ -20,3 +20,8 @@ services:
container_name: ui
ports:
- 4173:4173
+ volumes:
+ - .:/app
+ - /app/node_modules
+ environment:
+ - NODE_ENV=development
diff --git a/ui/Dockerfile b/ui/Dockerfile
index 5a42a02..18ef6ad 100644
--- a/ui/Dockerfile
+++ b/ui/Dockerfile
@@ -19,17 +19,20 @@ WORKDIR $FOLDER
# # Configure npm to use the custom CA
# RUN npm config set cafile /usr/local/share/ca-certificates/your-ca-certificate.crt
+RUN rm -rf node_modules
+RUN rm -rf dist
# Disable strict SSL
RUN npm config set strict-ssl false
-
+RUN npm install vite
RUN npm install react react-dom
-RUN npm install --save-dev @types/react @types/react-dom
+RUN npm install --save-dev @types/react @types/react-dom
+# RUN npm install --save-dev @types/vite
RUN npm install react-use-websocket
RUN npm install
+# RUN npm run build
EXPOSE 4173
-RUN npm run build
-CMD ["npm", "run", "preview"]
+CMD ["npm", "run", "start"]
diff --git a/ui/src/chat-with-kg/App.tsx b/ui/src/chat-with-kg/App.tsx
index 0b5d444..9e28e75 100644
--- a/ui/src/chat-with-kg/App.tsx
+++ b/ui/src/chat-with-kg/App.tsx
@@ -11,7 +11,7 @@ import type {
ConversationState,
WebSocketRequest,
WebSocketResponse,
-} from "./types/websocketTypes";
+} from "../../types/websocketTypes";
const SEND_REQUESTS = true;
diff --git a/ui/src/vite-env.d.ts b/ui/src/vite-env.d.ts
new file mode 100644
index 0000000..f296ca2
--- /dev/null
+++ b/ui/src/vite-env.d.ts
@@ -0,0 +1,14 @@
+// src/vite-env.d.ts
+
+interface ImportMetaEnv {
+ readonly VITE_API_URL: string;
+ readonly VITE_ANOTHER_ENV_VAR: string;
+ // Add other environment variables here as needed
+ }
+
+ interface ImportMeta {
+ readonly env: ImportMetaEnv;
+ }
+
+
+///
\ No newline at end of file
diff --git a/ui/tsconfig.json b/ui/tsconfig.json
index 2ab02cf..5c24da4 100644
--- a/ui/tsconfig.json
+++ b/ui/tsconfig.json
@@ -7,8 +7,9 @@
"skipLibCheck": true,
"esModuleInterop": false,
"allowSyntheticDefaultImports": true,
- "strict": true,
- "forceConsistentCasingInFileNames": true,
+ "noFallthroughCasesInSwitch": true,
+ "strict": true, //
+ "forceConsistentCasingInFileNames": true, //
"module": "ESNext",
"moduleResolution": "Node",
"resolveJsonModule": true,
@@ -16,13 +17,11 @@
"noEmit": true,
"types": ["vite/client"],
"jsx": "react-jsx"
- },
- // "include": ["src"],
-
+ },
"include": [
"src",
- "vite-env.d.ts", // Adjust the path according to your setup
- "types",
- "react-use-websocket.d.ts" // Adjust the path as needed
- ]
- }
+ // "src/vite-env.d.ts", // Adjust the path according to your setup
+ "types",
+ // "src/react-use-websocket.d.ts" // Adjust the path as needed
+ ]
+}
diff --git a/ui/vite-env.d.ts b/ui/types/vite-env.d.ts
similarity index 78%
rename from ui/vite-env.d.ts
rename to ui/types/vite-env.d.ts
index f89f7fc..b03d52e 100644
--- a/ui/vite-env.d.ts
+++ b/ui/types/vite-env.d.ts
@@ -1,3 +1,5 @@
+// vite-env.d.ts
+
interface ImportMetaEnv {
readonly VITE_API_URL: string;
readonly VITE_ANOTHER_ENV_VAR: string;
@@ -6,4 +8,7 @@ interface ImportMetaEnv {
interface ImportMeta {
readonly env: ImportMetaEnv;
- }
\ No newline at end of file
+ }
+
+
+///
\ No newline at end of file
diff --git a/ui/src/chat-with-kg/types/websocketTypes.ts b/ui/types/websocketTypes.ts
similarity index 100%
rename from ui/src/chat-with-kg/types/websocketTypes.ts
rename to ui/types/websocketTypes.ts
diff --git a/ui/vite.config.js b/ui/vite.config.js
index a28ef37..70d4c52 100644
--- a/ui/vite.config.js
+++ b/ui/vite.config.js
@@ -1,6 +1,6 @@
-import { resolve } from 'path'
-import { defineConfig } from 'vite'
-import react from '@vitejs/plugin-react'
+import { resolve } from 'path';
+import { defineConfig } from 'vite';
+import react from '@vitejs/plugin-react';
// https://vitejs.dev/config/
export default defineConfig({
From 06730119f0f510d9754cc7b49791137d14017f37 Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Sun, 19 May 2024 23:49:03 +0300
Subject: [PATCH 06/10] edit
---
ui/src/react-use-websocket.d.ts | 18 +++++++-----------
ui/src/vite-env.d.ts | 14 +++++++++++++-
2 files changed, 20 insertions(+), 12 deletions(-)
diff --git a/ui/src/react-use-websocket.d.ts b/ui/src/react-use-websocket.d.ts
index 789cd64..1570cc5 100644
--- a/ui/src/react-use-websocket.d.ts
+++ b/ui/src/react-use-websocket.d.ts
@@ -36,18 +36,14 @@ declare module 'react-use-websocket' {
}
-// import { useWebSocket, ReadyState } from 'react-use-websocket';
+import { useWebSocket, ReadyState } from 'react-use-websocket';
+const URI = import.meta.env.VITE_KG_CHAT_BACKEND_ENDPOINT ?? "ws://localhost:7860/text2text";
+
+const { sendJsonMessage, lastMessage, readyState } = useWebSocket(URI, {
+ shouldReconnect: () => true,
+ reconnectInterval: 5000,
+});
-// const MyComponent: React.FC = () => {
-// const { sendMessage, lastMessage, readyState } = useWebSocket('ws://example.com');
-// return (
-//
-//
-//
Last message: {lastMessage ? lastMessage.data : 'No message received yet'}
-//
Ready state: {readyState}
-//
-// );
-// };
\ No newline at end of file
diff --git a/ui/src/vite-env.d.ts b/ui/src/vite-env.d.ts
index f296ca2..55659a4 100644
--- a/ui/src/vite-env.d.ts
+++ b/ui/src/vite-env.d.ts
@@ -11,4 +11,16 @@ interface ImportMetaEnv {
}
-///
\ No newline at end of file
+///
+
+
+interface ImportMetaEnv {
+ readonly VITE_KG_CHAT_BACKEND_ENDPOINT: string;
+ readonly VITE_HAS_API_KEY_ENDPOINT: string;
+ readonly VITE_KG_CHAT_SAMPLE_QUESTIONS_ENDPOINT: string;
+ // Add other environment variables here...
+}
+
+interface ImportMeta {
+ readonly env: ImportMetaEnv;
+}
\ No newline at end of file
From 3739dbe43259d98903bcbf089e5ae9ec881c9965 Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Mon, 20 May 2024 12:54:03 +0300
Subject: [PATCH 07/10] EDIT LLAMA
---
api/requirements.txt | 5 +-
api/src/embedding/openai.py | 39 +++--
api/src/llm/openai.py | 157 ++++++++++++-----
api/src/main.py | 76 ++++++---
ui/Dockerfile | 6 +-
ui/src/chat-with-kg/App.tsx | 291 +++++---------------------------
ui/src/react-use-websocket.d.ts | 77 ++++-----
ui/tsconfig.json | 4 +-
ui/types/websocketTypes.ts | 45 +++--
9 files changed, 304 insertions(+), 396 deletions(-)
diff --git a/api/requirements.txt b/api/requirements.txt
index a390d8d..dd3feda 100644
--- a/api/requirements.txt
+++ b/api/requirements.txt
@@ -7,4 +7,7 @@ retry==0.9.2
tiktoken==0.4.0
python-dotenv==1.0.0
websockets===11.0.3
-gunicorn===20.1.0
\ No newline at end of file
+gunicorn===20.1.0
+transformers
+torch
+sentence_transformers
\ No newline at end of file
diff --git a/api/src/embedding/openai.py b/api/src/embedding/openai.py
index 7663332..cd54807 100644
--- a/api/src/embedding/openai.py
+++ b/api/src/embedding/openai.py
@@ -1,19 +1,38 @@
-import openai
-from embedding.base_embedding import BaseEmbedding
+# import openai
+# from embedding.base_embedding import BaseEmbedding
-class OpenAIEmbedding(BaseEmbedding):
- """Wrapper around OpenAI embedding models."""
+# class OpenAIEmbedding(BaseEmbedding):
+# """Wrapper around OpenAI embedding models."""
+
+# def __init__(
+# self, openai_api_key: str, model_name: str = "text-embedding-ada-002"
+# ) -> None:
+# openai.api_key = openai_api_key
+# self.model = model_name
+
+# def generate(
+# self,
+# input: str,
+# ) -> str:
+# embedding = openai.Embedding.create(input=input, model=self.model)
+# return embedding["data"][0]["embedding"]
+
+from sentence_transformers import SentenceTransformer
+from base_embedding import BaseEmbedding
+
+
+class LlamaEmbedding(BaseEmbedding):
+ """Wrapper around HuggingFace embedding models."""
def __init__(
- self, openai_api_key: str, model_name: str = "text-embedding-ada-002"
+ self, model_name: str = "sentence-transformers/all-MiniLM-L6-v2"
) -> None:
- openai.api_key = openai_api_key
- self.model = model_name
+ self.model = SentenceTransformer(model_name)
def generate(
self,
input: str,
- ) -> str:
- embedding = openai.Embedding.create(input=input, model=self.model)
- return embedding["data"][0]["embedding"]
+ ) -> list:
+ embedding = self.model.encode(input)
+ return embedding
diff --git a/api/src/llm/openai.py b/api/src/llm/openai.py
index b09e311..d21c725 100644
--- a/api/src/llm/openai.py
+++ b/api/src/llm/openai.py
@@ -1,26 +1,104 @@
+# from typing import (
+# Callable,
+# List,
+# )
+
+# import openai
+# import tiktoken
+# from llm.basellm import BaseLLM
+# from retry import retry
+
+
+# class OpenAIChat(BaseLLM):
+# """Wrapper around OpenAI Chat large language models."""
+
+# def __init__(
+# self,
+# openai_api_key: str,
+# model_name: str = "gpt-3.5-turbo",
+# max_tokens: int = 1000,
+# temperature: float = 0.0,
+# ) -> None:
+# openai.api_key = openai_api_key
+# self.model = model_name
+# self.max_tokens = max_tokens
+# self.temperature = temperature
+
+# @retry(tries=3, delay=1)
+# def generate(
+# self,
+# messages: List[str],
+# ) -> str:
+# try:
+# completions = openai.ChatCompletion.create(
+# model=self.model,
+# temperature=self.temperature,
+# max_tokens=self.max_tokens,
+# messages=messages,
+# )
+# return completions.choices[0].message.content
+# # catch context length / do not retry
+# except openai.error.InvalidRequestError as e:
+# return str(f"Error: {e}")
+# # catch authorization errors / do not retry
+# except openai.error.AuthenticationError as e:
+# return "Error: The provided OpenAI API key is invalid"
+# except Exception as e:
+# print(f"Retrying LLM call {e}")
+# raise Exception()
+
+# async def generateStreaming(
+# self,
+# messages: List[str],
+# onTokenCallback=Callable[[str], None],
+# ) -> str:
+# result = []
+# completions = openai.ChatCompletion.create(
+# model=self.model,
+# temperature=self.temperature,
+# max_tokens=self.max_tokens,
+# messages=messages,
+# stream=True,
+# )
+# result = []
+# for message in completions:
+# # Process the streamed messages or perform any other desired action
+# delta = message["choices"][0]["delta"]
+# if "content" in delta:
+# result.append(delta["content"])
+# await onTokenCallback(message)
+# return result
+
+# def num_tokens_from_string(self, string: str) -> int:
+# encoding = tiktoken.encoding_for_model(self.model)
+# num_tokens = len(encoding.encode(string))
+# return num_tokens
+
+# def max_allowed_token_length(self) -> int:
+# # TODO: list all models and their max tokens from api
+# return 2049
from typing import (
Callable,
List,
)
-import openai
-import tiktoken
-from llm.basellm import BaseLLM
+from transformers import LlamaForCausalLM, LlamaTokenizer
+import torch
+from basellm import BaseLLM
from retry import retry
-class OpenAIChat(BaseLLM):
- """Wrapper around OpenAI Chat large language models."""
+class Llama2Chat(BaseLLM):
+ """Wrapper around HuggingFace Llama2 large language models."""
def __init__(
self,
- openai_api_key: str,
- model_name: str = "gpt-3.5-turbo",
- max_tokens: int = 1000,
+ model_name: str = "TheBloke/Llama-2-7B-32K-Instruct-GPTQ",
+ max_tokens: int = 2056,
temperature: float = 0.0,
) -> None:
- openai.api_key = openai_api_key
- self.model = model_name
+ self.tokenizer = LlamaTokenizer.from_pretrained(model_name)
+ self.model = LlamaForCausalLM.from_pretrained(model_name)
self.max_tokens = max_tokens
self.temperature = temperature
@@ -30,50 +108,39 @@ def generate(
messages: List[str],
) -> str:
try:
- completions = openai.ChatCompletion.create(
- model=self.model,
- temperature=self.temperature,
- max_tokens=self.max_tokens,
- messages=messages,
- )
- return completions.choices[0].message.content
- # catch context length / do not retry
- except openai.error.InvalidRequestError as e:
- return str(f"Error: {e}")
- # catch authorization errors / do not retry
- except openai.error.AuthenticationError as e:
- return "Error: The provided OpenAI API key is invalid"
+ # Concatenate the messages into a single string
+ input_text = " ".join(messages)
+ inputs = self.tokenizer(input_text, return_tensors="pt", max_length=self.max_tokens, truncation=True)
+ outputs = self.model.generate(**inputs, max_length=self.max_tokens, temperature=self.temperature)
+ return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
print(f"Retrying LLM call {e}")
- raise Exception()
+ raise Exception(f"Error: {e}")
async def generateStreaming(
self,
messages: List[str],
onTokenCallback=Callable[[str], None],
) -> str:
- result = []
- completions = openai.ChatCompletion.create(
- model=self.model,
- temperature=self.temperature,
- max_tokens=self.max_tokens,
- messages=messages,
- stream=True,
- )
- result = []
- for message in completions:
- # Process the streamed messages or perform any other desired action
- delta = message["choices"][0]["delta"]
- if "content" in delta:
- result.append(delta["content"])
- await onTokenCallback(message)
- return result
+ try:
+ input_text = " ".join(messages)
+ inputs = self.tokenizer(input_text, return_tensors="pt", max_length=self.max_tokens, truncation=True)
+ outputs = self.model.generate(**inputs, max_length=self.max_tokens, temperature=self.temperature)
+
+ result = []
+ for token_id in outputs[0]:
+ token = self.tokenizer.decode(token_id, skip_special_tokens=True)
+ result.append(token)
+ await onTokenCallback(token)
+ return result
+ except Exception as e:
+ print(f"Error during streaming generation: {e}")
+ raise Exception(f"Error: {e}")
def num_tokens_from_string(self, string: str) -> int:
- encoding = tiktoken.encoding_for_model(self.model)
- num_tokens = len(encoding.encode(string))
- return num_tokens
+ inputs = self.tokenizer(string, return_tensors="pt")
+ return inputs.input_ids.shape[1]
def max_allowed_token_length(self) -> int:
- # TODO: list all models and their max tokens from api
- return 2049
+ return self.tokenizer.model_max_length
+
diff --git a/api/src/main.py b/api/src/main.py
index cbb35b2..983d5b3 100644
--- a/api/src/main.py
+++ b/api/src/main.py
@@ -17,7 +17,7 @@
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fewshot_examples import get_fewshot_examples
-from llm.openai import OpenAIChat
+from llm.openai import OpenAIChat,Llama2Chat
from pydantic import BaseModel
@@ -79,11 +79,16 @@ async def questionProposalsForCurrentDb(payload: questionProposalPayload):
questionProposalGenerator = QuestionProposalGenerator(
database=neo4j_connection,
- llm=OpenAIChat(
- openai_api_key=api_key,
- model_name="gpt-3.5-turbo-0613",
+ # llm=OpenAIChat(
+ # openai_api_key=api_key,
+ # model_name="gpt-3.5-turbo-0613",
+ # max_tokens=512,
+ # temperature=0.8,
+ llm=Llama2Chat(
+ # openai_api_key=api_key,
+ model_name="TheBloke/Llama-2-7B-32K-Instruct-GPTQ",
max_tokens=512,
- temperature=0.8,
+ temperature=0,
),
)
@@ -128,17 +133,33 @@ async def onToken(token):
)
api_key = openai_api_key if openai_api_key else data.get("api_key")
- default_llm = OpenAIChat(
- openai_api_key=api_key,
- model_name=data.get("model_name", "gpt-3.5-turbo-0613"),
+ # default_llm = OpenAIChat(
+ # openai_api_key=api_key,
+ # model_name=data.get("model_name", "gpt-3.5-turbo-0613"),
+ # )
+
+ default_llm = Llama2Chat(
+ # openai_api_key=api_key,
+ model_name=data.get("model_name", "TheBloke/Llama-2-7B-32K-Instruct-GPTQ"),
+ max_tokens=512,
+ temperature=0,
)
+
+ # summarize_results = SummarizeCypherResult(
+ # llm=OpenAIChat(
+ # openai_api_key=api_key,
+ # model_name="gpt-3.5-turbo-0613",
+ # max_tokens=128,
+ # )
+ # )
summarize_results = SummarizeCypherResult(
- llm=OpenAIChat(
- openai_api_key=api_key,
- model_name="gpt-3.5-turbo-0613",
- max_tokens=128,
- )
+ llm=Llama2Chat(
+ # openai_api_key=api_key,
+ model_name=data.get("model_name", "TheBloke/Llama-2-7B-32K-Instruct-GPTQ"),
+ max_tokens=128,
+ temperature=0,
)
+ )
text2cypher = Text2Cypher(
database=neo4j_connection,
@@ -205,9 +226,16 @@ async def root(payload: ImportPayload):
try:
result = ""
- llm = OpenAIChat(
- openai_api_key=api_key, model_name="gpt-3.5-turbo-16k", max_tokens=4000
- )
+ # llm = OpenAIChat(
+ # openai_api_key=api_key, model_name="gpt-3.5-turbo-16k", max_tokens=4000
+ # )
+ llm=Llama2Chat(
+ # openai_api_key=api_key,
+ model_name="TheBloke/Llama-2-7B-32K-Instruct-GPTQ",
+ max_tokens=512,
+ temperature=0,
+ )
+
if not payload.neo4j_schema:
extractor = DataExtractor(llm=llm)
@@ -246,11 +274,17 @@ async def companyInformation(payload: companyReportPayload):
)
api_key = openai_api_key if openai_api_key else payload.api_key
- llm = OpenAIChat(
- openai_api_key=api_key,
- model_name="gpt-3.5-turbo-16k-0613",
- max_tokens=512,
- )
+ # llm = OpenAIChat(
+ # openai_api_key=api_key,
+ # model_name="gpt-3.5-turbo-16k-0613",
+ # max_tokens=512,
+ # )
+ llm=Llama2Chat(
+ model_name="TheBloke/Llama-2-7B-32K-Instruct-GPTQ",
+ max_tokens=512,
+ temperature=0,
+ )
+
print("Running company report for " + payload.company)
company_report = CompanyReport(neo4j_connection, payload.company, llm)
result = company_report.run()
diff --git a/ui/Dockerfile b/ui/Dockerfile
index 18ef6ad..17c3a4b 100644
--- a/ui/Dockerfile
+++ b/ui/Dockerfile
@@ -24,9 +24,9 @@ RUN rm -rf dist
# Disable strict SSL
RUN npm config set strict-ssl false
-RUN npm install vite
-RUN npm install react react-dom
-RUN npm install --save-dev @types/react @types/react-dom
+RUN npm install vite
+RUN npm install --save react react-dom
+RUN npm install --save-dev @types/react @types/react-dom typescript
# RUN npm install --save-dev @types/vite
RUN npm install react-use-websocket
RUN npm install
diff --git a/ui/src/chat-with-kg/App.tsx b/ui/src/chat-with-kg/App.tsx
index 9e28e75..561beef 100644
--- a/ui/src/chat-with-kg/App.tsx
+++ b/ui/src/chat-with-kg/App.tsx
@@ -1,17 +1,11 @@
-import { useCallback, useEffect, useState, ChangeEvent } from "react";
-import React from 'react';
-import { useWebSocket, ReadyState } from 'react-use-websocket';
+import React, { useCallback, useEffect, useState, ChangeEvent } from "react";
+import { useWebSocket, ReadyState } from "react-use-websocket";
import ChatContainer from "./ChatContainer";
import type { ChatMessageObject } from "./ChatMessage";
import ChatInput from "./ChatInput";
-// import useWebSocket, { ReadyState } from "react-use-websocket";
import KeyModal from "../components/keymodal";
-import type {
- ConversationState,
- WebSocketRequest,
- WebSocketResponse,
-} from "../../types/websocketTypes";
+import type { ConversationState, WebSocketRequest, WebSocketResponse } from "../../types/websocketTypes";
const SEND_REQUESTS = true;
@@ -52,7 +46,7 @@ function loadKeyFromStorage() {
return localStorage.getItem("api_key");
}
-const QUESTION_PREFIX_REGEXP = /^[0-9]{1,2}[\w]*[\.\)\-]*[\w]*/;
+const QUESTION_PREFIX_REGEXP = /^[0-9]{1,2}[\\w]*[\\.\\)\\-]*[\\w]*/;
function stripQuestionPrefix(question: string): string {
if (question.match(QUESTION_PREFIX_REGEXP)) {
@@ -62,258 +56,59 @@ function stripQuestionPrefix(question: string): string {
}
function App() {
- const [serverAvailable, setServerAvailable] = useState(true);
- const [needsApiKeyLoading, setNeedsApiKeyLoading] = useState(true);
- const [needsApiKey, setNeedsApiKey] = useState(true);
- const [chatMessages, setChatMessages] = useState(chatMessageObjects);
- const [conversationState, setConversationState] =
- useState("ready");
- const { sendJsonMessage, lastMessage, readyState } = useWebSocket(URI, {
+ const [messages, setMessages] = useState(chatMessageObjects);
+ const [apiKey, setApiKey] = useState(loadKeyFromStorage());
+ const [isKeyModalOpen, setIsKeyModalOpen] = useState(!apiKey);
+
+ const { sendMessage, lastMessage, readyState } = useWebSocket(URI, {
+ queryParams: { apiKey: apiKey ?? "" },
+ onOpen: () => console.log("Connected to WebSocket"),
+ onClose: () => console.log("Disconnected from WebSocket"),
shouldReconnect: () => true,
- reconnectInterval: 5000,
});
- const [errorMessage, setErrorMessage] = useState(null);
- const [modalIsOpen, setModalIsOpen] = useState(false);
- const [apiKey, setApiKey] = useState(loadKeyFromStorage() || "");
- const [sampleQuestions, setSampleQuestions] = useState([]);
- const [text2cypherModel, setText2cypherModel] = useState("gpt-3.5-turbo-0613");
-
- const showContent = serverAvailable && !needsApiKeyLoading;
-
- function loadSampleQuestions() {
- const body = {
- api_key: apiKey,
- };
- const options = {
- method: "POST",
- headers: {
- "Content-Type": "application/json",
- },
- body: JSON.stringify(body),
- };
- fetch(QUESTIONS_URI, options).then(
- (response) => {
- response.json().then(
- (result) => {
- if (result.output && result.output.length > 0) {
- setSampleQuestions(result.output.map(stripQuestionPrefix));
- } else {
- setSampleQuestions([]);
- }
- },
- (error) => {
- setSampleQuestions([]);
- }
- );
- },
- (error) => {
- setSampleQuestions([]);
- }
- );
- }
useEffect(() => {
- fetch(HAS_API_KEY_URI).then(
- (response) => {
- response.json().then(
- (result) => {
- // const needsKey = result.output;
- const needsKey = !result.output;
- setNeedsApiKey(needsKey);
- setNeedsApiKeyLoading(false);
- if (needsKey) {
- const api_key = loadKeyFromStorage();
- if (api_key) {
- setApiKey(api_key);
- loadSampleQuestions();
- } else {
- setModalIsOpen(true);
- }
- } else {
- loadSampleQuestions();
- }
- },
- (error) => {
- setNeedsApiKeyLoading(false);
- setServerAvailable(false);
- }
- );
- },
- (error) => {
- setNeedsApiKeyLoading(false);
- setServerAvailable(false);
- }
- );
- }, []);
-
- useEffect(() => {
- if (!lastMessage || !serverAvailable) {
- return;
- }
-
- const websocketResponse = JSON.parse(lastMessage.data) as WebSocketResponse;
-
- if (websocketResponse.type === "debug") {
- console.log(websocketResponse.detail);
- } else if (websocketResponse.type === "error") {
- setConversationState("error");
- setErrorMessage(websocketResponse.detail);
- console.error(websocketResponse.detail);
- } else if (websocketResponse.type === "start") {
- setConversationState("streaming");
-
- setChatMessages((chatMessages) => [
- ...chatMessages,
- {
- id: chatMessages.length,
- type: "text",
- sender: "bot",
- message: "",
- complete: false,
- },
+ if (lastMessage !== null) {
+ const response: WebSocketResponse = JSON.parse(lastMessage.data);
+ setMessages((prevMessages) => [
+ ...prevMessages,
+ { id: prevMessages.length, type: "text", sender: "bot", message: response.data, complete: true },
]);
- } else if (websocketResponse.type === "stream") {
- setChatMessages((chatMessages) => {
- const lastChatMessage = chatMessages[chatMessages.length - 1];
- const rest = chatMessages.slice(0, -1);
-
- return [
- ...rest,
- {
- ...lastChatMessage,
- message: lastChatMessage.message + websocketResponse.output,
- },
- ];
- });
- } else if (websocketResponse.type === "end") {
- setChatMessages((chatMessages) => {
- const lastChatMessage = chatMessages[chatMessages.length - 1];
- const rest = chatMessages.slice(0, -1);
- return [
- ...rest,
- {
- ...lastChatMessage,
- complete: true,
- cypher: websocketResponse.generated_cypher,
- },
- ];
- });
- setConversationState("ready");
}
}, [lastMessage]);
- useEffect(() => {
- if (conversationState === "error") {
- const timeout = setTimeout(() => {
- setConversationState("ready");
- }, 1000);
- return () => clearTimeout(timeout);
- }
- }, [conversationState]);
-
- const sendQuestion = (question: string) => {
- const webSocketRequest: WebSocketRequest = {
- type: "question",
- question: question,
- };
- if (serverAvailable && !needsApiKeyLoading && needsApiKey && apiKey) {
- webSocketRequest.api_key = apiKey;
- }
- webSocketRequest.model_name = text2cypherModel;
- sendJsonMessage(webSocketRequest);
- };
-
- const onChatInput = (message: string) => {
- if (conversationState === "ready") {
- setChatMessages((chatMessages) =>
- chatMessages.concat([
- {
- id: chatMessages.length,
- type: "input",
- sender: "self",
- message: message,
- complete: true,
- },
- ])
- );
- if (SEND_REQUESTS) {
- setConversationState("waiting");
- sendQuestion(message);
- }
- setErrorMessage(null);
- }
- };
-
- const openModal = () => {
- setModalIsOpen(true);
+ const handleSendMessage = (message: string) => {
+ const newMessage: ChatMessageObject = { id: messages.length, type: "input", sender: "self", message, complete: true };
+ setMessages((prevMessages) => [...prevMessages, newMessage]);
+ const request: WebSocketRequest = { message };
+ sendMessage(JSON.stringify(request));
};
- const onCloseModal = () => {
- setModalIsOpen(false);
- if (apiKey && sampleQuestions.length === 0) {
- loadSampleQuestions();
- }
+ const handleApiKeyChange = (key: string) => {
+ setApiKey(key);
+ localStorage.setItem("api_key", key);
};
- const onApiKeyChange = (newApiKey: string) => {
- setApiKey(newApiKey);
- localStorage.setItem("api_key", newApiKey);
+ const handleCloseModal = () => {
+ setIsKeyModalOpen(false);
};
- const handleModelChange = (e: ChangeEvent) => {
- setText2cypherModel(e.target.value)
- }
-
- // return (
- //
- // {needsApiKey && (
- //
- //
- //
- // )}
- //
- //
- //
- //
- // {!serverAvailable && (
- //
Server is unavailable, please reload the page to try again.
- // )}
- // {serverAvailable && needsApiKeyLoading &&
Initializing...
}
- //
- // {showContent && readyState === ReadyState.OPEN && (
- // <>
- //
- //
- // {errorMessage}
- // >
- // )}{" "}
- // {showContent && readyState === ReadyState.CONNECTING && (
- //
Connecting...
- // )}
- // {showContent && readyState === ReadyState.CLOSED && (
- //
- //
Could not connect to server, reconnecting...
- //
- // )}
- //
- //
- // )
- ;
+ return (
+
+
+ Chat with Knowledge Graph
+
+
+
+
+
+ );
}
export default App;
diff --git a/ui/src/react-use-websocket.d.ts b/ui/src/react-use-websocket.d.ts
index 1570cc5..63d9a98 100644
--- a/ui/src/react-use-websocket.d.ts
+++ b/ui/src/react-use-websocket.d.ts
@@ -1,49 +1,38 @@
+// react-use-websocket.d.ts
declare module 'react-use-websocket' {
- import { ComponentType } from 'react';
-
- export type ReadyState = number;
- export type SendMessage = (message: string) => void;
- export type Options = {
- fromSocketIO?: boolean;
- share?: boolean;
- onOpen?: () => void;
- onClose?: () => void;
- onMessage?: (message: WebSocketEventMap['message']) => void;
- onError?: (error: WebSocketEventMap['error']) => void;
- filter?: () => boolean;
- };
-
- export function useWebSocket(
- url: string,
- options?: Options
- ): {
- sendMessage: SendMessage;
- lastMessage: WebSocketEventMap['message'] | null;
- readyState: ReadyState;
- };
-
+ import { ComponentType } from 'react';
- export const ReadyState: {
- CONNECTING: number;
- OPEN: number;
- CLOSING: number;
- CLOSED: number;
- };
-
- const WebSocketComponent: ComponentType<{ url: string; options?: Options }>;
- export default WebSocketComponent;
-
+ export type ReadyState = number;
+ export type SendMessage = (message: string) => void;
+ export type Options = {
+ retryOnError?: boolean;
+ reconnectAttempts?: number;
+ reconnectInterval?: number;
+ share?: boolean;
+ onOpen?: () => void;
+ onClose?: () => void;
+ onMessage?: (message: WebSocketEventMap['message']) => void;
+ onError?: (error: WebSocketEventMap['error']) => void;
+ filter?: () => boolean;
+ };
-}
-
-import { useWebSocket, ReadyState } from 'react-use-websocket';
-const URI = import.meta.env.VITE_KG_CHAT_BACKEND_ENDPOINT ?? "ws://localhost:7860/text2text";
+ export function useWebSocket(
+ url: string,
+ options?: Options
+ ): {
+ sendMessage: SendMessage;
+ sendJsonMessage: (message: any) => void;
+ lastMessage: WebSocketEventMap['message'] | null;
+ readyState: ReadyState;
+ };
-const { sendJsonMessage, lastMessage, readyState } = useWebSocket(URI, {
- shouldReconnect: () => true,
- reconnectInterval: 5000,
-});
+ export const ReadyState: {
+ CONNECTING: number;
+ OPEN: number;
+ CLOSING: number;
+ CLOSED: number;
+ };
-
-
-
\ No newline at end of file
+ const WebSocketComponent: ComponentType<{ url: string; options?: Options }>;
+ export default WebSocketComponent;
+}
diff --git a/ui/tsconfig.json b/ui/tsconfig.json
index 5c24da4..e24b39d 100644
--- a/ui/tsconfig.json
+++ b/ui/tsconfig.json
@@ -16,7 +16,8 @@
"isolatedModules": true,
"noEmit": true,
"types": ["vite/client"],
- "jsx": "react-jsx"
+ "jsx": "react-jsx",
+ "baseUrl": "./src"
},
"include": [
"src",
@@ -25,3 +26,4 @@
// "src/react-use-websocket.d.ts" // Adjust the path as needed
]
}
+
diff --git a/ui/types/websocketTypes.ts b/ui/types/websocketTypes.ts
index f88cfa6..8e1d672 100644
--- a/ui/types/websocketTypes.ts
+++ b/ui/types/websocketTypes.ts
@@ -1,28 +1,27 @@
-export type WebSocketRequest = {
- type: "question";
+// types/websocketTypes.ts
+
+export type ConversationState = "ready" | "waiting" | "streaming" | "error";
+
+export interface WebSocketRequest {
+ type: string;
+ message: string;
question: string;
api_key?: string;
model_name?: string;
-};
+}
+
+export interface WebSocketResponse {
+ type: string;
+ detail: string;
+ output?: string;
+ generated_cypher?: string;
+}
-export type WebSocketResponse =
- | { type: "start" }
- | {
- type: "stream";
- output: string;
- }
- | {
- type: "end";
- output: string;
- generated_cypher: string | null;
- }
- | {
- type: "error";
- detail: string;
- }
- | {
- type: "debug";
- detail: string;
- };
+export interface ChatMessageObject {
+ id: number;
+ type: "input" | "text";
+ sender: "self" | "bot";
+ message: string;
+ complete: boolean;
+}
-export type ConversationState = "waiting" | "streaming" | "ready" | "error";
From e7009bbb06868f166048fdf9a2446af0045612e5 Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Wed, 22 May 2024 04:24:29 +0300
Subject: [PATCH 08/10] edit
---
api/requirements.txt | 3 +--
api/src/llm/openai.py | 11 ++++++++---
2 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/api/requirements.txt b/api/requirements.txt
index dd3feda..59cf4b8 100644
--- a/api/requirements.txt
+++ b/api/requirements.txt
@@ -9,5 +9,4 @@ python-dotenv==1.0.0
websockets===11.0.3
gunicorn===20.1.0
transformers
-torch
-sentence_transformers
\ No newline at end of file
+
diff --git a/api/src/llm/openai.py b/api/src/llm/openai.py
index d21c725..568b315 100644
--- a/api/src/llm/openai.py
+++ b/api/src/llm/openai.py
@@ -82,11 +82,16 @@
List,
)
-from transformers import LlamaForCausalLM, LlamaTokenizer
+# from transformers import LlamaForCausalLM, LlamaTokenizer
import torch
from basellm import BaseLLM
from retry import retry
+# Load model directly
+from transformers import AutoTokenizer, AutoModelForCausalLM
+
+# tokenizer = AutoTokenizer.from_pretrained("TheBloke/Llama-2-7B-32K-Instruct-GPTQ", trust_remote_code=True)
+# model = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7B-32K-Instruct-GPTQ", trust_remote_code=True)
class Llama2Chat(BaseLLM):
"""Wrapper around HuggingFace Llama2 large language models."""
@@ -97,8 +102,8 @@ def __init__(
max_tokens: int = 2056,
temperature: float = 0.0,
) -> None:
- self.tokenizer = LlamaTokenizer.from_pretrained(model_name)
- self.model = LlamaForCausalLM.from_pretrained(model_name)
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
+ self.model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
self.max_tokens = max_tokens
self.temperature = temperature
From 8648dfb0333bd41db232341b6655b0f26083de37 Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Fri, 24 May 2024 22:09:08 +0300
Subject: [PATCH 09/10] Edit
---
api/requirements.txt | 2 +-
api/src/llm/openai.py | 4 ++--
api/src/main.py | 4 +++-
3 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/api/requirements.txt b/api/requirements.txt
index 59cf4b8..9673330 100644
--- a/api/requirements.txt
+++ b/api/requirements.txt
@@ -9,4 +9,4 @@ python-dotenv==1.0.0
websockets===11.0.3
gunicorn===20.1.0
transformers
-
+torch==2.3.0
diff --git a/api/src/llm/openai.py b/api/src/llm/openai.py
index 568b315..68edbac 100644
--- a/api/src/llm/openai.py
+++ b/api/src/llm/openai.py
@@ -5,7 +5,7 @@
# import openai
# import tiktoken
-# from llm.basellm import BaseLLM
+from llm.basellm import BaseLLM
# from retry import retry
@@ -84,7 +84,7 @@
# from transformers import LlamaForCausalLM, LlamaTokenizer
import torch
-from basellm import BaseLLM
+# from basellm import BaseLLM
from retry import retry
# Load model directly
diff --git a/api/src/main.py b/api/src/main.py
index 983d5b3..c9ec7d6 100644
--- a/api/src/main.py
+++ b/api/src/main.py
@@ -17,7 +17,9 @@
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fewshot_examples import get_fewshot_examples
-from llm.openai import OpenAIChat,Llama2Chat
+from llm.openai import Llama2Chat
+# from llm.openai import OpenAIChat
+
from pydantic import BaseModel
From b7d36585e85a679a84f6326d724ef4a037ddcd52 Mon Sep 17 00:00:00 2001
From: Mahmoudkhorshed-Queens <21mamm2@queensu.ca>
Date: Fri, 24 May 2024 22:37:55 +0300
Subject: [PATCH 10/10] edit
---
api/Dockerfile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/api/Dockerfile b/api/Dockerfile
index 21dd924..d0e7e7a 100644
--- a/api/Dockerfile
+++ b/api/Dockerfile
@@ -12,7 +12,7 @@ RUN mkdir -p $FOLDER
# Install packages
COPY ./requirements.txt $FOLDER/requirements.txt
-RUN pip install -r $FOLDER/requirements.txt
+RUN pip install --no-cache-dir -r $FOLDER/requirements.txt
# Copy the project files into the container
COPY ./src $FOLDER/src