-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
85 lines (75 loc) · 2.17 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import torch, uvicorn
from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from param_model import (
ModelList,
BaseResponse,
CreateEmbeddingResponse,
ChatCompletionResponse,
)
from llms import load_custom_models
# 加载模型
load_custom_models("qwen", "embedding", "reranker")
@asynccontextmanager
async def lifespan(app: FastAPI): # collects GPU memory
yield
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
app = FastAPI(lifespan=lifespan)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
'''the following interfaces are not public'''
# from posts_api import (
# copy_writing,
# create_chat,
# create_gender,
# create_comment,
# acquire_prompt,
# create_translate,
# )
# app.post(
# '/api/gpt/copywriting',
# response_model=BaseResponse)(copy_writing)
# app.post(
# '/api/gpt/gender',
# response_model=BaseResponse)(create_gender)
# app.post(
# '/api/gpt/comments',
# summary="帖子评论",
# operation_id="create_comment",
# response_model=BaseResponse)(create_comment)
# app.get(
# "/api/gpt/prompts",
# summary="获取预置提示词",
# operation_id="acquire_prompt",
# response_model=BaseResponse)(acquire_prompt)
# app.post(
# '/api/gpt/trans',
# summary="文本翻译",
# operation_id="create_translate",
# response_model=BaseResponse)(create_translate)
# app.post(
# '/api/gpt/chat/completions',
# summary="与gpt对话",
# operation_id="create_chat",
# response_model=ChatCompletionResponse)(create_chat)
'''openai api'''
from openai_api import create_chat_completion, list_models, create_embeddings
app.get(
'/v1/models',
response_model=ModelList)(list_models)
app.post(
'/v1/embeddings',
response_model=CreateEmbeddingResponse)(create_embeddings)
app.post(
'/v1/chat/completions',
response_model=ChatCompletionResponse)(create_chat_completion)
if __name__ == '__main__':
uvicorn.run(app, host="0.0.0.0", port=5000, reload=True)