17
17
class OpenlayerHandler (BaseCallbackHandler ):
18
18
"""LangChain callback handler that logs to Openlayer."""
19
19
20
- def __init__ (
21
- self ,
22
- ** kwargs : Any ,
23
- ) -> None :
20
+ def __init__ (self , ** kwargs : Any ) -> None :
24
21
super ().__init__ ()
25
22
26
23
self .start_time : float = None
@@ -37,14 +34,14 @@ def __init__(
37
34
self .output : str = None
38
35
self .metatada : Dict [str , Any ] = kwargs or {}
39
36
40
- def on_llm_start (
41
- self , serialized : Dict [str , Any ], prompts : List [str ], ** kwargs : Any
42
- ) -> Any :
37
+ # noqa arg002
38
+ def on_llm_start (self , serialized : Dict [str , Any ], prompts : List [str ], ** kwargs : Any ) -> Any :
43
39
"""Run when LLM starts running."""
40
+ pass
44
41
45
42
def on_chat_model_start (
46
43
self ,
47
- serialized : Dict [str , Any ],
44
+ serialized : Dict [str , Any ], # noqa: ARG002
48
45
messages : List [List [langchain_schema .BaseMessage ]],
49
46
** kwargs : Any ,
50
47
) -> Any :
@@ -80,44 +77,34 @@ def _langchain_messages_to_prompt(
80
77
81
78
def on_llm_new_token (self , token : str , ** kwargs : Any ) -> Any :
82
79
"""Run on new LLM token. Only available when streaming is enabled."""
80
+ pass
83
81
84
- def on_llm_end (self , response : langchain_schema .LLMResult , ** kwargs : Any ) -> Any :
82
+ def on_llm_end (self , response : langchain_schema .LLMResult , ** kwargs : Any ) -> Any : # noqa: ARG002, E501
85
83
"""Run when LLM ends running."""
86
84
self .end_time = time .time ()
87
85
self .latency = (self .end_time - self .start_time ) * 1000
88
86
89
87
if response .llm_output and "token_usage" in response .llm_output :
90
- self .prompt_tokens = response .llm_output ["token_usage" ].get (
91
- "prompt_tokens" , 0
92
- )
93
- self .completion_tokens = response .llm_output ["token_usage" ].get (
94
- "completion_tokens" , 0
95
- )
88
+ self .prompt_tokens = response .llm_output ["token_usage" ].get ("prompt_tokens" , 0 )
89
+ self .completion_tokens = response .llm_output ["token_usage" ].get ("completion_tokens" , 0 )
96
90
self .cost = self ._get_cost_estimate (
97
91
num_input_tokens = self .prompt_tokens ,
98
92
num_output_tokens = self .completion_tokens ,
99
93
)
100
- self .total_tokens = response .llm_output ["token_usage" ].get (
101
- "total_tokens" , 0
102
- )
94
+ self .total_tokens = response .llm_output ["token_usage" ].get ("total_tokens" , 0 )
103
95
104
96
for generations in response .generations :
105
97
for generation in generations :
106
98
self .output += generation .text .replace ("\n " , " " )
107
99
108
100
self ._add_to_trace ()
109
101
110
- def _get_cost_estimate (
111
- self , num_input_tokens : int , num_output_tokens : int
112
- ) -> float :
102
+ def _get_cost_estimate (self , num_input_tokens : int , num_output_tokens : int ) -> float :
113
103
"""Returns the cost estimate for a given model and number of tokens."""
114
104
if self .model not in constants .OPENAI_COST_PER_TOKEN :
115
105
return None
116
106
cost_per_token = constants .OPENAI_COST_PER_TOKEN [self .model ]
117
- return (
118
- cost_per_token ["input" ] * num_input_tokens
119
- + cost_per_token ["output" ] * num_output_tokens
120
- )
107
+ return cost_per_token ["input" ] * num_input_tokens + cost_per_token ["output" ] * num_output_tokens
121
108
122
109
def _add_to_trace (self ) -> None :
123
110
"""Adds to the trace."""
@@ -139,46 +126,42 @@ def _add_to_trace(self) -> None:
139
126
metadata = self .metatada ,
140
127
)
141
128
142
- def on_llm_error (
143
- self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any
144
- ) -> Any :
129
+ def on_llm_error (self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any ) -> Any :
145
130
"""Run when LLM errors."""
131
+ pass
146
132
147
- def on_chain_start (
148
- self , serialized : Dict [str , Any ], inputs : Dict [str , Any ], ** kwargs : Any
149
- ) -> Any :
133
+ def on_chain_start (self , serialized : Dict [str , Any ], inputs : Dict [str , Any ], ** kwargs : Any ) -> Any :
150
134
"""Run when chain starts running."""
135
+ pass
151
136
152
137
def on_chain_end (self , outputs : Dict [str , Any ], ** kwargs : Any ) -> Any :
153
138
"""Run when chain ends running."""
139
+ pass
154
140
155
- def on_chain_error (
156
- self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any
157
- ) -> Any :
141
+ def on_chain_error (self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any ) -> Any :
158
142
"""Run when chain errors."""
143
+ pass
159
144
160
- def on_tool_start (
161
- self , serialized : Dict [str , Any ], input_str : str , ** kwargs : Any
162
- ) -> Any :
145
+ def on_tool_start (self , serialized : Dict [str , Any ], input_str : str , ** kwargs : Any ) -> Any :
163
146
"""Run when tool starts running."""
147
+ pass
164
148
165
149
def on_tool_end (self , output : str , ** kwargs : Any ) -> Any :
166
150
"""Run when tool ends running."""
151
+ pass
167
152
168
- def on_tool_error (
169
- self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any
170
- ) -> Any :
153
+ def on_tool_error (self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any ) -> Any :
171
154
"""Run when tool errors."""
155
+ pass
172
156
173
157
def on_text (self , text : str , ** kwargs : Any ) -> Any :
174
158
"""Run on arbitrary text."""
159
+ pass
175
160
176
- def on_agent_action (
177
- self , action : langchain_schema .AgentAction , ** kwargs : Any
178
- ) -> Any :
161
+ def on_agent_action (self , action : langchain_schema .AgentAction , ** kwargs : Any ) -> Any :
179
162
"""Run on agent action."""
163
+ pass
180
164
181
- def on_agent_finish (
182
- self , finish : langchain_schema .AgentFinish , ** kwargs : Any
183
- ) -> Any :
165
+ def on_agent_finish (self , finish : langchain_schema .AgentFinish , ** kwargs : Any ) -> Any :
184
166
"""Run on agent end."""
167
+ pass
0 commit comments