Skip to content

Commit 40aa598

Browse files
committed
ci: ignore rye linting errors for custom code
1 parent 9cb9cc1 commit 40aa598

File tree

3 files changed

+47
-98
lines changed

3 files changed

+47
-98
lines changed

src/openlayer/lib/core/base_model.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,7 @@ class OpenlayerModel(abc.ABC):
4242
def run_from_cli(self) -> None:
4343
"""Run the model from the command line."""
4444
parser = argparse.ArgumentParser(description="Run data through a model.")
45-
parser.add_argument(
46-
"--dataset-path", type=str, required=True, help="Path to the dataset"
47-
)
45+
parser.add_argument("--dataset-path", type=str, required=True, help="Path to the dataset")
4846
parser.add_argument(
4947
"--output-dir",
5048
type=str,
@@ -85,9 +83,7 @@ def run_batch_from_df(self, df: pd.DataFrame) -> Tuple[pd.DataFrame, dict]:
8583
# Filter row_dict to only include keys that are valid parameters
8684
# for the 'run' method
8785
row_dict = row.to_dict()
88-
filtered_kwargs = {
89-
k: v for k, v in row_dict.items() if k in run_signature.parameters
90-
}
86+
filtered_kwargs = {k: v for k, v in row_dict.items() if k in run_signature.parameters}
9187

9288
# Call the run method with filtered kwargs
9389
output = self.run(**filtered_kwargs)
@@ -137,9 +133,8 @@ def write_output_to_directory(
137133
"""Writes the output DataFrame to a file in the specified directory based on the
138134
given format.
139135
"""
140-
os.makedirs(
141-
output_dir, exist_ok=True
142-
) # Create the directory if it doesn't exist
136+
# Create the directory if it doesn't exist
137+
os.makedirs(output_dir, exist_ok=True)
143138

144139
# Determine the filename based on the dataset name and format
145140
filename = f"dataset.{fmt}"
@@ -158,7 +153,7 @@ def write_output_to_directory(
158153
else:
159154
raise ValueError("Unsupported format. Please choose 'csv' or 'json'.")
160155

161-
print(f"Output written to {output_path}")
156+
print(f"Output written to {output_path}") # noqa: T201
162157

163158
@abc.abstractmethod
164159
def run(self, **kwargs) -> RunReturn:

src/openlayer/lib/integrations/langchain_callback.py

Lines changed: 29 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,7 @@
1717
class OpenlayerHandler(BaseCallbackHandler):
1818
"""LangChain callback handler that logs to Openlayer."""
1919

20-
def __init__(
21-
self,
22-
**kwargs: Any,
23-
) -> None:
20+
def __init__(self, **kwargs: Any) -> None:
2421
super().__init__()
2522

2623
self.start_time: float = None
@@ -37,14 +34,14 @@ def __init__(
3734
self.output: str = None
3835
self.metatada: Dict[str, Any] = kwargs or {}
3936

40-
def on_llm_start(
41-
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
42-
) -> Any:
37+
# noqa arg002
38+
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> Any:
4339
"""Run when LLM starts running."""
40+
pass
4441

4542
def on_chat_model_start(
4643
self,
47-
serialized: Dict[str, Any],
44+
serialized: Dict[str, Any], # noqa: ARG002
4845
messages: List[List[langchain_schema.BaseMessage]],
4946
**kwargs: Any,
5047
) -> Any:
@@ -80,44 +77,34 @@ def _langchain_messages_to_prompt(
8077

8178
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
8279
"""Run on new LLM token. Only available when streaming is enabled."""
80+
pass
8381

84-
def on_llm_end(self, response: langchain_schema.LLMResult, **kwargs: Any) -> Any:
82+
def on_llm_end(self, response: langchain_schema.LLMResult, **kwargs: Any) -> Any: # noqa: ARG002, E501
8583
"""Run when LLM ends running."""
8684
self.end_time = time.time()
8785
self.latency = (self.end_time - self.start_time) * 1000
8886

8987
if response.llm_output and "token_usage" in response.llm_output:
90-
self.prompt_tokens = response.llm_output["token_usage"].get(
91-
"prompt_tokens", 0
92-
)
93-
self.completion_tokens = response.llm_output["token_usage"].get(
94-
"completion_tokens", 0
95-
)
88+
self.prompt_tokens = response.llm_output["token_usage"].get("prompt_tokens", 0)
89+
self.completion_tokens = response.llm_output["token_usage"].get("completion_tokens", 0)
9690
self.cost = self._get_cost_estimate(
9791
num_input_tokens=self.prompt_tokens,
9892
num_output_tokens=self.completion_tokens,
9993
)
100-
self.total_tokens = response.llm_output["token_usage"].get(
101-
"total_tokens", 0
102-
)
94+
self.total_tokens = response.llm_output["token_usage"].get("total_tokens", 0)
10395

10496
for generations in response.generations:
10597
for generation in generations:
10698
self.output += generation.text.replace("\n", " ")
10799

108100
self._add_to_trace()
109101

110-
def _get_cost_estimate(
111-
self, num_input_tokens: int, num_output_tokens: int
112-
) -> float:
102+
def _get_cost_estimate(self, num_input_tokens: int, num_output_tokens: int) -> float:
113103
"""Returns the cost estimate for a given model and number of tokens."""
114104
if self.model not in constants.OPENAI_COST_PER_TOKEN:
115105
return None
116106
cost_per_token = constants.OPENAI_COST_PER_TOKEN[self.model]
117-
return (
118-
cost_per_token["input"] * num_input_tokens
119-
+ cost_per_token["output"] * num_output_tokens
120-
)
107+
return cost_per_token["input"] * num_input_tokens + cost_per_token["output"] * num_output_tokens
121108

122109
def _add_to_trace(self) -> None:
123110
"""Adds to the trace."""
@@ -139,46 +126,42 @@ def _add_to_trace(self) -> None:
139126
metadata=self.metatada,
140127
)
141128

142-
def on_llm_error(
143-
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
144-
) -> Any:
129+
def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
145130
"""Run when LLM errors."""
131+
pass
146132

147-
def on_chain_start(
148-
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
149-
) -> Any:
133+
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> Any:
150134
"""Run when chain starts running."""
135+
pass
151136

152137
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
153138
"""Run when chain ends running."""
139+
pass
154140

155-
def on_chain_error(
156-
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
157-
) -> Any:
141+
def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
158142
"""Run when chain errors."""
143+
pass
159144

160-
def on_tool_start(
161-
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
162-
) -> Any:
145+
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> Any:
163146
"""Run when tool starts running."""
147+
pass
164148

165149
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
166150
"""Run when tool ends running."""
151+
pass
167152

168-
def on_tool_error(
169-
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
170-
) -> Any:
153+
def on_tool_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
171154
"""Run when tool errors."""
155+
pass
172156

173157
def on_text(self, text: str, **kwargs: Any) -> Any:
174158
"""Run on arbitrary text."""
159+
pass
175160

176-
def on_agent_action(
177-
self, action: langchain_schema.AgentAction, **kwargs: Any
178-
) -> Any:
161+
def on_agent_action(self, action: langchain_schema.AgentAction, **kwargs: Any) -> Any:
179162
"""Run on agent action."""
163+
pass
180164

181-
def on_agent_finish(
182-
self, finish: langchain_schema.AgentFinish, **kwargs: Any
183-
) -> Any:
165+
def on_agent_finish(self, finish: langchain_schema.AgentFinish, **kwargs: Any) -> Any:
184166
"""Run on agent end."""
167+
pass

src/openlayer/lib/integrations/openai_tracer.py

Lines changed: 13 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -139,16 +139,12 @@ def stream_chunks(
139139
if delta.function_call.name:
140140
collected_function_call["name"] += delta.function_call.name
141141
if delta.function_call.arguments:
142-
collected_function_call[
143-
"arguments"
144-
] += delta.function_call.arguments
142+
collected_function_call["arguments"] += delta.function_call.arguments
145143
elif delta.tool_calls:
146144
if delta.tool_calls[0].function.name:
147145
collected_function_call["name"] += delta.tool_calls[0].function.name
148146
if delta.tool_calls[0].function.arguments:
149-
collected_function_call["arguments"] += delta.tool_calls[
150-
0
151-
].function.arguments
147+
collected_function_call["arguments"] += delta.tool_calls[0].function.arguments
152148

153149
yield chunk
154150
end_time = time.time()
@@ -159,22 +155,16 @@ def stream_chunks(
159155
finally:
160156
# Try to add step to the trace
161157
try:
162-
collected_output_data = [
163-
message for message in collected_output_data if message is not None
164-
]
158+
collected_output_data = [message for message in collected_output_data if message is not None]
165159
if collected_output_data:
166160
output_data = "".join(collected_output_data)
167161
else:
168-
collected_function_call["arguments"] = json.loads(
169-
collected_function_call["arguments"]
170-
)
162+
collected_function_call["arguments"] = json.loads(collected_function_call["arguments"])
171163
output_data = collected_function_call
172164
completion_cost = estimate_cost(
173165
model=kwargs.get("model"),
174166
prompt_tokens=0,
175-
completion_tokens=(
176-
num_of_completion_tokens if num_of_completion_tokens else 0
177-
),
167+
completion_tokens=(num_of_completion_tokens if num_of_completion_tokens else 0),
178168
is_azure_openai=is_azure_openai,
179169
)
180170

@@ -191,13 +181,7 @@ def stream_chunks(
191181
model_parameters=get_model_parameters(kwargs),
192182
raw_output=raw_outputs,
193183
id=inference_id,
194-
metadata={
195-
"timeToFirstToken": (
196-
(first_token_time - start_time) * 1000
197-
if first_token_time
198-
else None
199-
)
200-
},
184+
metadata={"timeToFirstToken": ((first_token_time - start_time) * 1000 if first_token_time else None)},
201185
)
202186
add_to_trace(
203187
**trace_args,
@@ -223,10 +207,7 @@ def estimate_cost(
223207
cost_per_token = constants.AZURE_OPENAI_COST_PER_TOKEN[model]
224208
elif model in constants.OPENAI_COST_PER_TOKEN:
225209
cost_per_token = constants.OPENAI_COST_PER_TOKEN[model]
226-
return (
227-
cost_per_token["input"] * prompt_tokens
228-
+ cost_per_token["output"] * completion_tokens
229-
)
210+
return cost_per_token["input"] * prompt_tokens + cost_per_token["output"] * completion_tokens
230211
return None
231212

232213

@@ -285,12 +266,8 @@ def create_trace_args(
285266
def add_to_trace(is_azure_openai: bool = False, **kwargs) -> None:
286267
"""Add a chat completion step to the trace."""
287268
if is_azure_openai:
288-
tracer.add_chat_completion_step_to_trace(
289-
**kwargs, name="Azure OpenAI Chat Completion", provider="Azure"
290-
)
291-
tracer.add_chat_completion_step_to_trace(
292-
**kwargs, name="OpenAI Chat Completion", provider="OpenAI"
293-
)
269+
tracer.add_chat_completion_step_to_trace(**kwargs, name="Azure OpenAI Chat Completion", provider="Azure")
270+
tracer.add_chat_completion_step_to_trace(**kwargs, name="OpenAI Chat Completion", provider="OpenAI")
294271

295272

296273
def handle_non_streaming_create(
@@ -350,9 +327,7 @@ def handle_non_streaming_create(
350327
)
351328
# pylint: disable=broad-except
352329
except Exception as e:
353-
logger.error(
354-
"Failed to trace the create chat completion request with Openlayer. %s", e
355-
)
330+
logger.error("Failed to trace the create chat completion request with Openlayer. %s", e)
356331

357332
return response
358333

@@ -394,9 +369,7 @@ def parse_non_streaming_output_data(
394369

395370

396371
# --------------------------- OpenAI Assistants API -------------------------- #
397-
def trace_openai_assistant_thread_run(
398-
client: openai.OpenAI, run: "openai.types.beta.threads.run.Run"
399-
) -> None:
372+
def trace_openai_assistant_thread_run(client: openai.OpenAI, run: "openai.types.beta.threads.run.Run") -> None:
400373
"""Trace a run from an OpenAI assistant.
401374
402375
Once the run is completed, the thread data is published to Openlayer,
@@ -413,9 +386,7 @@ def trace_openai_assistant_thread_run(
413386
metadata = _extract_run_metadata(run)
414387

415388
# Convert thread to prompt
416-
messages = client.beta.threads.messages.list(
417-
thread_id=run.thread_id, order="asc"
418-
)
389+
messages = client.beta.threads.messages.list(thread_id=run.thread_id, order="asc")
419390
prompt = _thread_messages_to_prompt(messages)
420391

421392
# Add step to the trace
@@ -430,7 +401,7 @@ def trace_openai_assistant_thread_run(
430401

431402
# pylint: disable=broad-except
432403
except Exception as e:
433-
print(f"Failed to monitor run. {e}")
404+
print(f"Failed to monitor run. {e}") # noqa: T201
434405

435406

436407
def _type_check_run(run: "openai.types.beta.threads.run.Run") -> None:

0 commit comments

Comments
 (0)