Skip to content

Commit 078d3df

Browse files
committed
Update config and code for max_tokens changes; remove .env and clean up
1 parent 705c32d commit 078d3df

File tree

8 files changed

+69
-283
lines changed

8 files changed

+69
-283
lines changed

.env

-3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
1-
DOCKER_BUILDKIT=1
2-
MODEL="gpt-4-preview"
3-
MAX_DIFF_TOKENS=3500
41
MAX_LENGTH=72
52
LANGUAGE=en
63
TIMEOUT=30

README.md

+4-3
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,10 @@ cargo install --path .
5959
### Advanced Configuration
6060

6161
Customize Git AI's behavior with these commands:
62-
- `git-ai config set max-tokens <max-tokens>`: Adjust the git diff character limit.
63-
- `git-ai config set timeout <timeout>`: Set OpenAI response timeout.
64-
- `git-ai config set language <language>`: Select the model language (default: `en`).
62+
- `git-ai config set max-commit-length <length>` (default: 72): Set the maximum length of commit messages.
63+
- `git-ai config set max-tokens <tokens>` (default: 512): Set the maximum number of tokens for the assistant.
64+
- `git-ai config set model <model>` (default: "gpt-3.5-turbo"): Set the OpenAI model to use.
65+
- `git-ai config set openai-api-key <api-key>`: Set your OpenAI API key.
6566

6667
## Contributing
6768

resources/prompt.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ You are an AI assistant that generates concise and meaningful git commit message
55
- Consistency: Maintain uniformity in tense, punctuation, and capitalization.
66
- Accuracy: Ensure the message accurately reflects the changes and their purpose.
77
- Present tense, imperative mood. (e.g., "Add x to y" instead of "Added x to y")
8-
- Max 72 chars in the output
8+
- Max {{max_commit_length}} chars in the output
99

1010
## Output:
1111

src/bin/hook.rs

+3-9
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ use indicatif::{ProgressBar, ProgressStyle};
66
use anyhow::{Context, Result};
77
use git2::{Oid, Repository};
88
use ai::{commit, config};
9-
use ai::commit::Session;
109
use clap::Parser;
1110
use ai::hook::*;
1211

@@ -15,7 +14,7 @@ async fn main() -> Result<()> {
1514
env_logger::init();
1615

1716
let args = Args::parse();
18-
let max_tokens = config::APP.max_diff_tokens;
17+
let max_tokens = config::APP.max_tokens;
1918
let pb = ProgressBar::new_spinner();
2019
let repo = Repository::open_from_env().context("Failed to open repository")?;
2120

@@ -63,21 +62,16 @@ async fn main() -> Result<()> {
6362
std::process::exit(1);
6463
})?;
6564

66-
// Create a new session from the client
67-
let session = Session::load_from_repo(&repo).await.unwrap();
65+
pb.set_message("Generating commit message...");
6866

69-
// If the user has a session, then we can use it to generate the commit message
70-
let response = commit::generate(patch.to_string(), session.into(), pb.clone().into()).await?;
67+
let response = commit::generate(patch.to_string()).await?;
7168

7269
// Write the response to the commit message file
7370
args
7471
.commit_msg_file
7572
.write(response.response.trim().to_string())
7673
.unwrap();
7774

78-
// Save the session to the repository
79-
response.session.save_to_repo(&repo).await.unwrap();
80-
8175
pb.finish_and_clear();
8276

8377
Ok(())

src/commit.rs

+39-235
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,11 @@
1-
use std::time::Duration;
21
use std::io;
32

43
use async_openai::types::{
5-
AssistantObject, AssistantTools, AssistantToolsCode, CreateAssistantRequestArgs, CreateMessageRequestArgs, CreateRunRequestArgs, CreateThreadRequestArgs, MessageContent, RunStatus
4+
ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs
65
};
76
use async_openai::config::OpenAIConfig;
87
use async_openai::error::OpenAIError;
9-
use indicatif::ProgressBar;
108
use async_openai::Client;
11-
use tokio::time::sleep;
12-
use git2::Repository;
139
use thiserror::Error;
1410
use anyhow::Context;
1511

@@ -36,249 +32,57 @@ pub enum ChatError {
3632
}
3733

3834
fn instruction() -> String {
39-
include_str!("../resources/prompt.md").to_string()
40-
}
41-
42-
#[derive(Debug, Clone, PartialEq)]
43-
pub struct Session {
44-
pub thread_id: String,
45-
pub assistant_id: String
46-
}
35+
format!("You are an AI assistant that generates concise and meaningful git commit messages based on provided diffs. Please adhere to the following guidelines:
4736
48-
impl Session {
49-
pub async fn new_from_client(client: &Client<OpenAIConfig>) -> Result<Self, ChatError> {
50-
log::debug!("Creating new session from client");
51-
let assistant = create_assistant(client).await?;
52-
let thread_request = CreateThreadRequestArgs::default().build()?;
53-
let thread = client.threads().create(thread_request).await?;
37+
- Structure: Begin with a clear, present-tense summary.
38+
- Content: Emphasize the changes and their rationale, excluding irrelevant details.
39+
- Consistency: Maintain uniformity in tense, punctuation, and capitalization.
40+
- Accuracy: Ensure the message accurately reflects the changes and their purpose.
41+
- Present tense, imperative mood. (e.g., 'Add x to y' instead of 'Added x to y')
42+
- Max {} chars in the output
5443
55-
Ok(Session {
56-
thread_id: thread.id,
57-
assistant_id: assistant.id
58-
})
59-
}
44+
## Output:
6045
61-
// Load the session from the repository
62-
pub async fn load_from_repo(repo: &Repository) -> anyhow::Result<Option<Self>> {
63-
log::debug!("Loading session from repo");
64-
let mut config = repo.config().context("Failed to load config")?;
65-
let thread_id = config.get_string("ai.thread-id").ok();
46+
Your output should be a commit message generated from the input diff and nothing else.
6647
67-
let global_config = config
68-
.open_global()
69-
.context("Failed to open global config")?;
70-
let assistant_id = global_config.get_string("ai.assistant-id").ok();
71-
log::debug!("Loaded session from repo: thread_id: {:?}, assistant_id: {:?}", thread_id, assistant_id);
48+
## Input:
7249
73-
match (thread_id, assistant_id) {
74-
(Some(thread_id), Some(assistant_id)) => {
75-
Ok(Some(Session {
76-
thread_id,
77-
assistant_id
78-
}))
79-
}
80-
_ => Ok(None)
81-
}
82-
}
83-
84-
// Save the session to the repository
85-
pub async fn save_to_repo(&self, repo: &Repository) -> anyhow::Result<()> {
86-
log::debug!("Saving session to repo");
87-
let mut config = repo.config().context("Failed to load config")?;
88-
config.set_str("ai.thread-id", self.thread_id.as_str())?;
89-
config.snapshot().context("Failed to save config")?;
90-
91-
let mut global_config = config
92-
.open_global()
93-
.context("Failed to open global config")?;
94-
global_config.set_str("ai.assistant-id", self.assistant_id.as_str())?;
95-
global_config
96-
.snapshot()
97-
.context("Failed to save global config")?;
98-
Ok(())
99-
}
50+
INPUT:", config::APP.max_commit_length)
10051
}
10152

10253
#[derive(Debug, Clone, PartialEq)]
10354
pub struct OpenAIResponse {
104-
pub session: Session,
10555
pub response: String
10656
}
10757

108-
// Create a new assistant
109-
async fn create_assistant(client: &Client<OpenAIConfig>) -> Result<AssistantObject, ChatError> {
110-
let model = config::APP.model.clone();
111-
let instruction = instruction();
112-
// let example_jsonl_id = "file-a8ghhy1FbWtBKEadAj5OHJWz";
113-
114-
let tools = vec![AssistantTools::Code(AssistantToolsCode {
115-
r#type: "code_interpreter".to_string()
116-
})];
117-
118-
let assistant_request = CreateAssistantRequestArgs::default()
119-
.name("Git Commit Assistant")
120-
.instructions(&instruction)
121-
.tools(tools)
122-
.model(model)
123-
.build()?;
124-
125-
Ok(client.assistants().create(assistant_request).await?)
126-
}
127-
128-
#[derive(Debug, Clone)]
129-
struct Connection {
130-
client: Client<OpenAIConfig>,
131-
session: Session
132-
}
133-
134-
impl Connection {
135-
pub async fn new(session: Option<Session>) -> Result<Self, ChatError> {
136-
let api_key = config::APP
137-
.openai_api_key
138-
.clone()
139-
.context("Failed to get OpenAI API key, please run `git-ai config set openai-api")?;
140-
let config = OpenAIConfig::new().with_api_key(api_key);
141-
let client = Client::with_config(config);
142-
143-
let session = match session {
144-
Some(session) => session,
145-
None => Session::new_from_client(&client).await?
146-
};
147-
148-
Ok(Connection {
149-
client,
150-
session
151-
})
152-
}
153-
154-
// Create a new run
155-
async fn create_run(&self) -> Result<Run, ChatError> {
156-
let request = CreateRunRequestArgs::default()
157-
.assistant_id(self.session.clone().assistant_id)
158-
.build()?;
159-
let run = self
160-
.client
161-
.threads()
162-
.runs(&self.session.thread_id)
163-
.create(request)
164-
.await?;
165-
Ok(Run {
166-
id: run.id,
167-
connection: self.clone()
168-
})
169-
}
170-
171-
// Get the last message from the thread
172-
async fn last_message(&self) -> Result<String, ChatError> {
173-
let query = [("limit", "1")];
174-
let response = self
175-
.client
176-
.threads()
177-
.messages(&self.session.thread_id)
178-
.list(&query)
179-
.await?;
180-
let message_id = response.data.get(0).unwrap().id.clone();
181-
let message = self
182-
.client
183-
.threads()
184-
.messages(&self.session.thread_id)
185-
.retrieve(&message_id)
186-
.await?;
187-
let content = message.content.get(0).unwrap();
188-
let MessageContent::Text(text) = &content else {
189-
return Err(ChatError::OpenAIError("Message content is not text".to_string()));
190-
};
191-
192-
Ok(text.text.value.clone())
193-
}
194-
195-
async fn create_message(&self, message: &str) -> Result<(), ChatError> {
196-
let message = CreateMessageRequestArgs::default()
197-
.role("user")
198-
.content(message)
199-
.build()?;
200-
self
201-
.client
202-
.threads()
203-
.messages(&self.session.thread_id)
204-
.create(message)
205-
.await?;
206-
Ok(())
207-
}
208-
209-
async fn into_response(&self) -> Result<OpenAIResponse, ChatError> {
210-
let message = self.last_message().await?;
211-
let response = OpenAIResponse {
212-
response: message,
213-
session: self.session.clone()
214-
};
215-
Ok(response)
216-
}
217-
}
218-
219-
#[derive(Debug, Clone)]
220-
struct Run {
221-
id: String,
222-
connection: Connection
223-
}
224-
225-
impl Run {
226-
pub async fn pull_status(&self) -> Result<RunStatus, ChatError> {
227-
Ok(
228-
self
229-
.connection
230-
.client
231-
.threads()
232-
.runs(&self.connection.session.thread_id)
233-
.retrieve(self.id.as_str())
234-
.await?
235-
.status
236-
)
237-
}
238-
}
239-
240-
pub async fn generate(
241-
diff: String, session: Option<Session>, progressbar: Option<ProgressBar>
242-
) -> Result<OpenAIResponse, ChatError> {
243-
progressbar
58+
pub async fn generate(diff: String) -> Result<OpenAIResponse, ChatError> {
59+
let api_key = config::APP
60+
.openai_api_key
24461
.clone()
245-
.map(|pb| pb.set_message("Generating commit message..."));
246-
247-
let connection = Connection::new(session).await?;
248-
connection.create_message(&diff).await?;
249-
let run = connection.create_run().await?;
62+
.context("Failed to get OpenAI API key, please run `git-ai config set openai-api")?;
63+
64+
let config = OpenAIConfig::new().with_api_key(api_key);
65+
let client = Client::with_config(config);
66+
let request = CreateChatCompletionRequestArgs::default()
67+
.max_tokens(config::APP.max_tokens as u16)
68+
.model(config::APP.model.clone())
69+
.messages([
70+
ChatCompletionRequestSystemMessageArgs::default()
71+
.content(instruction())
72+
.build()?
73+
.into(),
74+
ChatCompletionRequestUserMessageArgs::default()
75+
.content(diff)
76+
.build()?
77+
.into()
78+
])
79+
.build()?;
25080

251-
return loop {
252-
match run.pull_status().await? {
253-
RunStatus::Completed => {
254-
break connection.into_response().await;
255-
}
256-
RunStatus::Failed => {
257-
break Err(ChatError::OpenAIError("Run failed".to_string()));
258-
}
259-
RunStatus::Cancelled => {
260-
break Err(ChatError::OpenAIError("Run cancelled".to_string()));
261-
}
262-
RunStatus::Expired => {
263-
break Err(ChatError::OpenAIError("Run expired".to_string()));
264-
}
265-
RunStatus::RequiresAction => {
266-
break Err(ChatError::OpenAIError("Run requires action".to_string()));
267-
}
268-
RunStatus::InProgress => {
269-
log::debug!("Run is in progress");
270-
// progressbar.clone().map(|pb| pb.set_message("In progress..."));
271-
}
272-
RunStatus::Queued => {
273-
log::debug!("Run is queued");
274-
// progressbar.clone().map(|pb| pb.set_message("Queued..."));
275-
}
276-
RunStatus::Cancelling => {
277-
log::debug!("Run is cancelling");
278-
// progressbar.clone().map(|pb| pb.set_message("Cancelling..."));
279-
}
280-
}
81+
let response = client.chat().create(request).await?;
82+
let choise = response.choices.get(0).unwrap();
83+
let text = choise.message.content.clone();
28184

282-
sleep(Duration::from_millis(300)).await;
283-
};
85+
Ok(OpenAIResponse {
86+
response: text.unwrap()
87+
})
28488
}

0 commit comments

Comments
 (0)