-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllm_as_a_judge.py
279 lines (217 loc) · 9.77 KB
/
llm_as_a_judge.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import WebBaseLoader
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langsmith import Client, traceable
from typing_extensions import Annotated, TypedDict
from dotenv import load_dotenv
load_dotenv()
loader = WebBaseLoader(
web_path="https://www.clue-tec.com",
)
pages = loader.load()
text_splitter = CharacterTextSplitter(
chunk_size=200,
separator='\n',
chunk_overlap=50
)
documents = text_splitter.split_documents(pages)
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
faiss_index = FAISS.from_documents(documents, embeddings)
retriever = faiss_index.as_retriever(search_kwargs={"k": 3})
llm = ChatOpenAI(model="gpt-4o-mini")
# Add decorator so this function is traced in LangSmith
@traceable()
def rag_bot(question: str) -> dict:
# langchain Retriever will be automatically traced
docs = retriever.invoke(question)
context = "".join(doc.page_content for doc in docs)
instructions = f"""
あなたは質問応答タスクのアシスタントです。
検索された以下の文脈と会話履歴の一部を使って質問に丁寧に答えてください。
答えがわからなければ、わからないと答えてください。
最大で3つの文章を使い、簡潔な回答を心がけてください。
日本語で回答してください。
文脈:
====
{context}
====
"""
# langchain ChatModel will be automatically traced
ai_msg = llm.invoke(
[
{"role": "system", "content": instructions},
{"role": "user", "content": question},
],
)
return {"answer": ai_msg.content, "documents": docs}
client = Client()
# Define the examples for the dataset
examples = [
(
"須賀秀和はどのようなサービスを導入しましたか?",
"DatabricksやFivetranとSnowflake、dbtのモダンデータスタックによるデータ分析基盤を導入しました",
),
(
"須賀秀和が支援している業務は何ですか?",
"分散システムの開発やデータ分析基盤の導入を技術面から支援しています",
),
]
# Create the dataset and examples in LangSmith
dataset_name = "example datasets"
if not client.has_dataset(dataset_name=dataset_name):
dataset = client.create_dataset(dataset_name=dataset_name)
client.create_examples(
inputs=[{"question": q} for q, _ in examples],
outputs=[{"answer": a} for _, a in examples],
dataset_id=dataset.id,
)
# Grade output schema
class CorrectnessGrade(TypedDict):
# Note that the order in the fields are defined is the order in which the model will generate them.
# It is useful to put explanations before responses because it forces the model to think through
# its final response before generating it:
explanation: Annotated[str, ..., "Explain your reasoning for the score"]
correct: Annotated[bool, ..., "True if the answer is correct, False otherwise."]
# Grade prompt
correctness_instructions = """You are a teacher grading a quiz.
You will be given a QUESTION, the GROUND TRUTH (correct) ANSWER, and the STUDENT ANSWER.
Here is the grade criteria to follow:
(1) Grade the student answers based ONLY on their factual accuracy relative to the ground truth answer.
(2) Ensure that the student answer does not contain any conflicting statements.
(3) It is OK if the student answer contains more information than the ground truth answer, as long as it is factually accurate relative to the ground truth answer.
Correctness:
A correctness value of True means that the student's answer meets all of the criteria.
A correctness value of False means that the student's answer does not meet all of the criteria.
Explain your reasoning in a step-by-step manner to ensure your reasoning and conclusion are correct.
Avoid simply stating the correct answer at the outset."""
# Grader LLM
grader_llm = ChatOpenAI(model="gpt-4o-mini", temperature=0).with_structured_output(
CorrectnessGrade, method="json_schema", strict=True
)
def correctness(inputs: dict, outputs: dict, reference_outputs: dict) -> bool:
"""An evaluator for RAG answer accuracy"""
answers = f""" QUESTION: {inputs['question']}
GROUND TRUTH ANSWER: {reference_outputs['answer']}
STUDENT ANSWER: {outputs['answer']}"""
# Run evaluator
grade = grader_llm.invoke(
[
{"role": "system", "content": correctness_instructions},
{"role": "user", "content": answers},
]
)
return grade["correct"]
# Grade output schema
class RelevanceGrade(TypedDict):
explanation: Annotated[str, ..., "Explain your reasoning for the score"]
relevant: Annotated[
bool, ..., "Provide the score on whether the answer addresses the question"
]
# Grade prompt
relevance_instructions = """You are a teacher grading a quiz.
You will be given a QUESTION and a STUDENT ANSWER.
Here is the grade criteria to follow:
(1) Ensure the STUDENT ANSWER is concise and relevant to the QUESTION
(2) Ensure the STUDENT ANSWER helps to answer the QUESTION
Relevance:
A relevance value of True means that the student's answer meets all of the criteria.
A relevance value of False means that the student's answer does not meet all of the criteria.
Explain your reasoning in a step-by-step manner to ensure your reasoning and conclusion are correct.
Avoid simply stating the correct answer at the outset."""
# Grader LLM
relevance_llm = ChatOpenAI(model="gpt-4o-mini", temperature=0).with_structured_output(
RelevanceGrade, method="json_schema", strict=True
)
# Evaluator
def relevance(inputs: dict, outputs: dict) -> bool:
"""A simple evaluator for RAG answer helpfulness."""
answer = f""" QUESTION: {inputs['question']}
STUDENT ANSWER: {outputs['answer']}"""
grade = relevance_llm.invoke(
[
{"role": "system", "content": relevance_instructions},
{"role": "user", "content": answer},
]
)
return grade["relevant"]
# Grade output schema
class GroundedGrade(TypedDict):
explanation: Annotated[str, ..., "Explain your reasoning for the score"]
grounded: Annotated[
bool, ..., "Provide the score on if the answer hallucinates from the documents"
]
# Grade prompt
grounded_instructions = """You are a teacher grading a quiz.
You will be given FACTS and a STUDENT ANSWER.
Here is the grade criteria to follow:
(1) Ensure the STUDENT ANSWER is grounded in the FACTS.
(2) Ensure the STUDENT ANSWER does not contain "hallucinated" information outside the scope of the FACTS.
Grounded:
A grounded value of True means that the student's answer meets all of the criteria.
A grounded value of False means that the student's answer does not meet all of the criteria.
Explain your reasoning in a step-by-step manner to ensure your reasoning and conclusion are correct.
Avoid simply stating the correct answer at the outset."""
# Grader LLM
grounded_llm = ChatOpenAI(model="gpt-4o-mini", temperature=0).with_structured_output(
GroundedGrade, method="json_schema", strict=True
)
# Evaluator
def groundedness(inputs: dict, outputs: dict) -> bool:
"""A simple evaluator for RAG answer groundedness."""
doc_string = "".join(doc.page_content for doc in outputs["documents"])
answer = f""" FACTS: {doc_string}
STUDENT ANSWER: {outputs['answer']}"""
grade = grounded_llm.invoke(
[
{"role": "system", "content": grounded_instructions},
{"role": "user", "content": answer},
]
)
return grade["grounded"]
# Grade output schema
class RetrievalRelevanceGrade(TypedDict):
explanation: Annotated[str, ..., "Explain your reasoning for the score"]
relevant: Annotated[
bool,
...,
"True if the retrieved documents are relevant to the question, False otherwise",
]
# Grade prompt
retrieval_relevance_instructions = """You are a teacher grading a quiz.
You will be given a QUESTION and a set of FACTS provided by the student.
Here is the grade criteria to follow:
(1) You goal is to identify FACTS that are completely unrelated to the QUESTION
(2) If the facts contain ANY keywords or semantic meaning related to the question, consider them relevant
(3) It is OK if the facts have SOME information that is unrelated to the question as long as (2) is met
Relevance:
A relevance value of True means that the FACTS contain ANY keywords or semantic meaning related to the QUESTION and are therefore relevant.
A relevance value of False means that the FACTS are completely unrelated to the QUESTION.
Explain your reasoning in a step-by-step manner to ensure your reasoning and conclusion are correct.
Avoid simply stating the correct answer at the outset."""
# Grader LLM
retrieval_relevance_llm = ChatOpenAI(
model="gpt-4o-mini", temperature=0
).with_structured_output(RetrievalRelevanceGrade, method="json_schema", strict=True)
def retrieval_relevance(inputs: dict, outputs: dict) -> bool:
"""An evaluator for document relevance"""
doc_string = "".join(doc.page_content for doc in outputs["documents"])
answer = f""" FACTS: {doc_string}
QUESTION: {inputs['question']}"""
# Run evaluator
grade = retrieval_relevance_llm.invoke(
[
{"role": "system", "content": retrieval_relevance_instructions},
{"role": "user", "content": answer},
]
)
return grade["relevant"]
def target(inputs: dict) -> dict:
return rag_bot(inputs["question"])
experiment_results = client.evaluate(
target,
data=dataset_name,
evaluators=[correctness, groundedness, relevance, retrieval_relevance],
experiment_prefix="clue-tec",
metadata={"version": "LCEL context, gpt-4o-mini"},
)