Skip to content

Commit 51f189e

Browse files
committed
no-jira: remove output
1 parent be5b81a commit 51f189e

File tree

1 file changed

+4
-42
lines changed

1 file changed

+4
-42
lines changed

demo-notebooks/additional-demos/batch-inference/remote_offline_bi.ipynb

Lines changed: 4 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -132,23 +132,7 @@
132132
"cell_type": "code",
133133
"execution_count": null,
134134
"metadata": {},
135-
"outputs": [
136-
{
137-
"name": "stderr",
138-
"output_type": "stream",
139-
"text": [
140-
"2025-06-23 16:56:53,008\tINFO dashboard_sdk.py:338 -- Uploading package gcs://_ray_pkg_d3badb03645503e8.zip.\n",
141-
"2025-06-23 16:56:53,010\tINFO packaging.py:576 -- Creating a file package for local module './'.\n"
142-
]
143-
},
144-
{
145-
"name": "stdout",
146-
"output_type": "stream",
147-
"text": [
148-
"raysubmit_AJhmqzWsvHu6SqZD successfully submitted\n"
149-
]
150-
}
151-
],
135+
"outputs": [],
152136
"source": [
153137
"entrypoint_command = \"python simple_batch_inf.py\"\n",
154138
"\n",
@@ -162,20 +146,9 @@
162146
},
163147
{
164148
"cell_type": "code",
165-
"execution_count": 12,
149+
"execution_count": null,
166150
"metadata": {},
167-
"outputs": [
168-
{
169-
"data": {
170-
"text/plain": [
171-
"<JobStatus.PENDING: 'PENDING'>"
172-
]
173-
},
174-
"execution_count": 12,
175-
"metadata": {},
176-
"output_type": "execute_result"
177-
}
178-
],
151+
"outputs": [],
179152
"source": [
180153
"# Get the job's status\n",
181154
"client.get_job_status(submission_id)"
@@ -185,18 +158,7 @@
185158
"cell_type": "code",
186159
"execution_count": null,
187160
"metadata": {},
188-
"outputs": [
189-
{
190-
"data": {
191-
"text/plain": [
192-
"'2025-06-23 15:47:22,272\\tINFO job_manager.py:531 -- Runtime env is setting up.\\nINFO 06-23 15:53:36 [__init__.py:244] Automatically detected platform cuda.\\n2025-06-23 15:53:54,307\\tINFO worker.py:1554 -- Using address 10.128.2.45:6379 set in the environment variable RAY_ADDRESS\\n2025-06-23 15:53:54,308\\tINFO worker.py:1694 -- Connecting to existing Ray cluster at address: 10.128.2.45:6379...\\n2025-06-23 15:53:54,406\\tINFO worker.py:1879 -- Connected to Ray cluster. View the dashboard at \\x1b[1m\\x1b[32mhttp://10.128.2.45:8265 \\x1b[39m\\x1b[22m\\nNo cloud storage mirror configured\\n2025-06-23 15:53:57,501\\tWARNING util.py:589 -- The argument ``compute`` is deprecated in Ray 2.9. Please specify argument ``concurrency`` instead. For more information, see https://docs.ray.io/en/master/data/transforming-data.html#stateful-transforms.\\n2025-06-23 15:53:58,095\\tINFO logging.py:290 -- Registered dataset logger for dataset dataset_33_0\\n2025-06-23 15:53:59,702\\tINFO streaming_executor.py:117 -- Starting execution of Dataset dataset_33_0. Full logs are in /tmp/ray/session_2025-06-23_10-53-41_019757_1/logs/ray-data\\n2025-06-23 15:53:59,702\\tINFO streaming_executor.py:118 -- Execution plan of Dataset dataset_33_0: InputDataBuffer[Input] -> TaskPoolMapOperator[ReadRange->Map(_preprocess)] -> ActorPoolMapOperator[MapBatches(ChatTemplateUDF)] -> ActorPoolMapOperator[MapBatches(TokenizeUDF)] -> ActorPoolMapOperator[MapBatches(vLLMEngineStageUDF)] -> ActorPoolMapOperator[MapBatches(DetokenizeUDF)] -> TaskPoolMapOperator[Map(_postprocess)]\\n\\nRunning 0: 0.00 row [00:00, ? row/s]\\n \\n\\x1b[33m(raylet)\\x1b[0m [2025-06-23 15:54:00,800 E 829 829] (raylet) node_manager.cc:3287: 2 Workers (tasks / actors) killed due to memory pressure (OOM), 0 Workers crashed due to other reasons at node (ID: b72a45799ac9496bf52347fb9f9ef218722683d7bd8dd14702e821f0, IP: 10.128.2.45) over the last time period. To see more information about the Workers killed on this node, use `ray logs raylet.out -ip 10.128.2.45`\\n\\nRunning 0: 0.00 row [00:01, ? row/s]\\n \\n\\x1b[33m(raylet)\\x1b[0m \\n\\nRunning 0: 0.00 row [00:01, ? row/s]\\n \\n\\x1b[33m(raylet)\\x1b[0m Refer to the documentation on how to address the out of memory issue: https://docs.ray.io/en/latest/ray-core/scheduling/ray-oom-prevention.html. Consider provisioning more memory on this node or reducing task parallelism by requesting more CPUs per task. To adjust the kill threshold, set the environment variable `RAY_memory_usage_threshold` when starting Ray. To disable worker killing, set the environment variable `RAY_memory_monitor_refresh_ms` to zero.\\n\\nRunning 0: 0.00 row [00:01, ? row/s]\\n \\n\\x1b[33m(raylet)\\x1b[0m \\n\\nRunning 0: 0.00 row [01:01, ? row/s]\\n \\n\\x1b[33m(raylet)\\x1b[0m [2025-06-23 15:55:00,824 E 829 829] (raylet) node_manager.cc:3287: 1 Workers (tasks / actors) killed due to memory pressure (OOM), 0 Workers crashed due to other reasons at node (ID: b72a45799ac9496bf52347fb9f9ef218722683d7bd8dd14702e821f0, IP: 10.128.2.45) over the last time period. To see more information about the Workers killed on this node, use `ray logs raylet.out -ip 10.128.2.45`\\n\\nRunning 0: 0.00 row [01:01, ? row/s]\\n \\n\\x1b[33m(raylet)\\x1b[0m Refer to the documentation on how to address the out of memory issue: https://docs.ray.io/en/latest/ray-core/scheduling/ray-oom-prevention.html. Consider provisioning more memory on this node or reducing task parallelism by requesting more CPUs per task. To adjust the kill threshold, set the environment variable `RAY_memory_usage_threshold` when starting Ray. To disable worker killing, set the environment variable `RAY_memory_monitor_refresh_ms` to zero.\\n\\nRunning 0: 0.00 row [01:01, ? row/s]'"
193-
]
194-
},
195-
"execution_count": 15,
196-
"metadata": {},
197-
"output_type": "execute_result"
198-
}
199-
],
161+
"outputs": [],
200162
"source": [
201163
"# Get the job's logs\n",
202164
"client.get_job_logs(submission_id)"

0 commit comments

Comments
 (0)