@@ -32,11 +32,26 @@ client = Openlayer(
32
32
api_key = os.environ.get(" OPENLAYER_API_KEY" ),
33
33
)
34
34
35
- project_create_response = client.projects.create(
36
- name = " My Project" ,
37
- task_type = " llm-base" ,
35
+ data_stream_response = client.inference_pipelines.data.stream(
36
+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
37
+ config = {
38
+ " input_variable_names" : [" user_query" ],
39
+ " output_column_name" : " output" ,
40
+ " num_of_token_column_name" : " tokens" ,
41
+ " cost_column_name" : " cost" ,
42
+ " timestamp_column_name" : " timestamp" ,
43
+ },
44
+ rows = [
45
+ {
46
+ " user_query" : " what's the meaning of life?" ,
47
+ " output" : " 42" ,
48
+ " tokens" : 7 ,
49
+ " cost" : 0.02 ,
50
+ " timestamp" : 1620000000 ,
51
+ }
52
+ ],
38
53
)
39
- print (project_create_response.id )
54
+ print (data_stream_response.success )
40
55
```
41
56
42
57
While you can provide an ` api_key ` keyword argument,
@@ -60,11 +75,26 @@ client = AsyncOpenlayer(
60
75
61
76
62
77
async def main () -> None :
63
- project_create_response = await client.projects.create(
64
- name = " My Project" ,
65
- task_type = " llm-base" ,
78
+ data_stream_response = await client.inference_pipelines.data.stream(
79
+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
80
+ config = {
81
+ " input_variable_names" : [" user_query" ],
82
+ " output_column_name" : " output" ,
83
+ " num_of_token_column_name" : " tokens" ,
84
+ " cost_column_name" : " cost" ,
85
+ " timestamp_column_name" : " timestamp" ,
86
+ },
87
+ rows = [
88
+ {
89
+ " user_query" : " what's the meaning of life?" ,
90
+ " output" : " 42" ,
91
+ " tokens" : 7 ,
92
+ " cost" : 0.02 ,
93
+ " timestamp" : 1620000000 ,
94
+ }
95
+ ],
66
96
)
67
- print (project_create_response.id )
97
+ print (data_stream_response.success )
68
98
69
99
70
100
asyncio.run(main())
@@ -97,9 +127,24 @@ from openlayer import Openlayer
97
127
client = Openlayer()
98
128
99
129
try :
100
- client.projects.create(
101
- name = " My Project" ,
102
- task_type = " llm-base" ,
130
+ client.inference_pipelines.data.stream(
131
+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
132
+ config = {
133
+ " input_variable_names" : [" user_query" ],
134
+ " output_column_name" : " output" ,
135
+ " num_of_token_column_name" : " tokens" ,
136
+ " cost_column_name" : " cost" ,
137
+ " timestamp_column_name" : " timestamp" ,
138
+ },
139
+ rows = [
140
+ {
141
+ " user_query" : " what's the meaning of life?" ,
142
+ " output" : " 42" ,
143
+ " tokens" : 7 ,
144
+ " cost" : 0.02 ,
145
+ " timestamp" : 1620000000 ,
146
+ }
147
+ ],
103
148
)
104
149
except openlayer.APIConnectionError as e:
105
150
print (" The server could not be reached" )
@@ -143,9 +188,24 @@ client = Openlayer(
143
188
)
144
189
145
190
# Or, configure per-request:
146
- client.with_options(max_retries = 5 ).projects.create(
147
- name = " My Project" ,
148
- task_type = " llm-base" ,
191
+ client.with_options(max_retries = 5 ).inference_pipelines.data.stream(
192
+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
193
+ config = {
194
+ " input_variable_names" : [" user_query" ],
195
+ " output_column_name" : " output" ,
196
+ " num_of_token_column_name" : " tokens" ,
197
+ " cost_column_name" : " cost" ,
198
+ " timestamp_column_name" : " timestamp" ,
199
+ },
200
+ rows = [
201
+ {
202
+ " user_query" : " what's the meaning of life?" ,
203
+ " output" : " 42" ,
204
+ " tokens" : 7 ,
205
+ " cost" : 0.02 ,
206
+ " timestamp" : 1620000000 ,
207
+ }
208
+ ],
149
209
)
150
210
```
151
211
@@ -169,9 +229,24 @@ client = Openlayer(
169
229
)
170
230
171
231
# Override per-request:
172
- client.with_options(timeout = 5.0 ).projects.create(
173
- name = " My Project" ,
174
- task_type = " llm-base" ,
232
+ client.with_options(timeout = 5.0 ).inference_pipelines.data.stream(
233
+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
234
+ config = {
235
+ " input_variable_names" : [" user_query" ],
236
+ " output_column_name" : " output" ,
237
+ " num_of_token_column_name" : " tokens" ,
238
+ " cost_column_name" : " cost" ,
239
+ " timestamp_column_name" : " timestamp" ,
240
+ },
241
+ rows = [
242
+ {
243
+ " user_query" : " what's the meaning of life?" ,
244
+ " output" : " 42" ,
245
+ " tokens" : 7 ,
246
+ " cost" : 0.02 ,
247
+ " timestamp" : 1620000000 ,
248
+ }
249
+ ],
175
250
)
176
251
```
177
252
@@ -211,14 +286,27 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
211
286
from openlayer import Openlayer
212
287
213
288
client = Openlayer()
214
- response = client.projects.with_raw_response.create(
215
- name = " My Project" ,
216
- task_type = " llm-base" ,
289
+ response = client.inference_pipelines.data.with_raw_response.stream(
290
+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
291
+ config = {
292
+ " input_variable_names" : [" user_query" ],
293
+ " output_column_name" : " output" ,
294
+ " num_of_token_column_name" : " tokens" ,
295
+ " cost_column_name" : " cost" ,
296
+ " timestamp_column_name" : " timestamp" ,
297
+ },
298
+ rows = [{
299
+ " user_query" : " what's the meaning of life?" ,
300
+ " output" : " 42" ,
301
+ " tokens" : 7 ,
302
+ " cost" : 0.02 ,
303
+ " timestamp" : 1620000000 ,
304
+ }],
217
305
)
218
306
print (response.headers.get(' X-My-Header' ))
219
307
220
- project = response.parse() # get the object that `projects.create ()` would have returned
221
- print (project.id )
308
+ data = response.parse() # get the object that `inference_pipelines.data.stream ()` would have returned
309
+ print (data.success )
222
310
```
223
311
224
312
These methods return an [ ` APIResponse ` ] ( https://github.com/openlayer-ai/openlayer-python/tree/main/src/openlayer/_response.py ) object.
@@ -232,9 +320,24 @@ The above interface eagerly reads the full response body when you make the reque
232
320
To stream the response body, use ` .with_streaming_response ` instead, which requires a context manager and only reads the response body once you call ` .read() ` , ` .text() ` , ` .json() ` , ` .iter_bytes() ` , ` .iter_text() ` , ` .iter_lines() ` or ` .parse() ` . In the async client, these are async methods.
233
321
234
322
``` python
235
- with client.projects.with_streaming_response.create(
236
- name = " My Project" ,
237
- task_type = " llm-base" ,
323
+ with client.inference_pipelines.data.with_streaming_response.stream(
324
+ " 182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ,
325
+ config = {
326
+ " input_variable_names" : [" user_query" ],
327
+ " output_column_name" : " output" ,
328
+ " num_of_token_column_name" : " tokens" ,
329
+ " cost_column_name" : " cost" ,
330
+ " timestamp_column_name" : " timestamp" ,
331
+ },
332
+ rows = [
333
+ {
334
+ " user_query" : " what's the meaning of life?" ,
335
+ " output" : " 42" ,
336
+ " tokens" : 7 ,
337
+ " cost" : 0.02 ,
338
+ " timestamp" : 1620000000 ,
339
+ }
340
+ ],
238
341
) as response:
239
342
print (response.headers.get(" X-My-Header" ))
240
343
0 commit comments