|
| 1 | +from ai_commit_msg.core.llm_chat_completion import llm_chat_completion |
1 | 2 | from ai_commit_msg.core.prompt import get_prompt |
2 | | -from ai_commit_msg.services.anthropic_service import AnthropicService |
3 | 3 | from ai_commit_msg.services.config_service import ConfigService |
4 | | -from ai_commit_msg.services.git_service import GitService |
5 | | -from ai_commit_msg.services.ollama_service import OLlamaService |
6 | | -from ai_commit_msg.services.openai_service import OpenAiService |
7 | | -from ai_commit_msg.utils.logger import Logger |
8 | | -from ai_commit_msg.utils.models import ANTHROPIC_MODEL_LIST, OPEN_AI_MODEL_LIST |
9 | 4 |
|
10 | 5 |
|
11 | 6 | def generate_commit_message(diff: str = None) -> str: |
12 | 7 |
|
13 | 8 | if diff is None: |
14 | 9 | raise ValueError("Diff is required to generate a commit message") |
15 | 10 |
|
16 | | - select_model = ConfigService.get_model() |
17 | | - |
18 | 11 | prompt = get_prompt(diff) |
19 | | - |
20 | | - # TODO - create a factory with a shared interface for calling the LLM models, this will make it easier to add new models |
21 | | - ai_gen_commit_msg = None |
22 | | - if str(select_model) in OPEN_AI_MODEL_LIST: |
23 | | - ai_gen_commit_msg = OpenAiService().chat_with_openai(prompt) |
24 | | - elif select_model.startswith("ollama"): |
25 | | - ai_gen_commit_msg = OLlamaService().chat_completion(prompt) |
26 | | - elif select_model in ANTHROPIC_MODEL_LIST: |
27 | | - ai_gen_commit_msg = AnthropicService().chat_completion(prompt) |
28 | | - |
29 | | - if ai_gen_commit_msg is None: |
30 | | - Logger().log("Unsupported model: " + select_model) |
31 | | - return "" |
| 12 | + ai_gen_commit_msg = llm_chat_completion(prompt) |
32 | 13 |
|
33 | 14 | prefix = ConfigService().prefix |
34 | | - |
35 | 15 | return prefix + ai_gen_commit_msg |
0 commit comments