44import sys
55from datetime import timedelta
66from functools import partial
7+ from typing import Any
78
89import django_rq
910import redis
11+ from core .current_request import CurrentContext
1012from django .conf import settings
1113from django_rq import get_connection
1214from rq .command import send_stop_job_command
@@ -80,6 +82,40 @@ def redis_connected():
8082 return redis_healthcheck ()
8183
8284
85+ def _is_serializable (value : Any ) -> bool :
86+ """Check if a value can be serialized for job context."""
87+ return isinstance (value , (str , int , float , bool , list , dict , type (None )))
88+
89+
90+ def _capture_context () -> dict :
91+ """
92+ Capture the current context for passing to a job.
93+ Returns a dictionary of context data that can be serialized.
94+ """
95+ context_data = {}
96+
97+ # Get user information
98+ if user := CurrentContext .get_user ():
99+ context_data ['user_id' ] = user .id
100+
101+ # Get organization if set separately
102+ if org_id := CurrentContext .get_organization_id ():
103+ context_data ['organization_id' ] = org_id
104+
105+ # If organization_id is not set, try to get it from the user, this ensures that we have an organization_id for the job
106+ # And it prefers the original requesting user's organization_id over the current active organization_id of the user which could change during async jobs
107+ if not org_id and user and hasattr (user , 'active_organization_id' ) and user .active_organization_id :
108+ context_data ['organization_id' ] = user .active_organization_id
109+
110+ # Get any custom context values (exclude non-serializable objects)
111+ job_data = CurrentContext .get_job_data ()
112+ for key , value in job_data .items ():
113+ if key not in ['user' , 'request' ] and _is_serializable (value ):
114+ context_data [key ] = value
115+
116+ return context_data
117+
118+
83119def redis_get (key ):
84120 if not redis_healthcheck ():
85121 return
@@ -112,7 +148,9 @@ def redis_delete(key):
112148
113149def start_job_async_or_sync (job , * args , in_seconds = 0 , ** kwargs ):
114150 """
115- Start job async with redis or sync if redis is not connected
151+ Start job async with redis or sync if redis is not connected.
152+ Automatically preserves context for async jobs and clears it after completion.
153+
116154 :param job: Job function
117155 :param args: Function arguments
118156 :param in_seconds: Job will be delayed for in_seconds
@@ -122,28 +160,29 @@ def start_job_async_or_sync(job, *args, in_seconds=0, **kwargs):
122160
123161 redis = redis_connected () and kwargs .get ('redis' , True )
124162 queue_name = kwargs .get ('queue_name' , 'default' )
163+
125164 if 'queue_name' in kwargs :
126165 del kwargs ['queue_name' ]
127166 if 'redis' in kwargs :
128167 del kwargs ['redis' ]
168+
129169 job_timeout = None
130170 if 'job_timeout' in kwargs :
131171 job_timeout = kwargs ['job_timeout' ]
132172 del kwargs ['job_timeout' ]
173+
133174 if redis :
134- # Auto-capture request_id from thread local and pass it via job meta
175+ # Async execution with Redis - wrap job for context management
135176 try :
136- from label_studio . core . current_request import _thread_locals
177+ context_data = _capture_context ()
137178
138- request_id = getattr (_thread_locals , 'request_id' , None )
139- if request_id :
140- # Store in job meta for worker access
179+ if context_data :
141180 meta = kwargs .get ('meta' , {})
142- meta ['request_id' ] = request_id
181+ # Store context data in job meta for worker access
182+ meta .update (context_data )
143183 kwargs ['meta' ] = meta
144184 except Exception :
145- # Fail silently if no request context
146- pass
185+ logger .info (f'Failed to capture context for job { job .__name__ } on queue { queue_name } ' )
147186
148187 try :
149188 args_info = _truncate_args_for_logging (args , kwargs )
@@ -154,6 +193,7 @@ def start_job_async_or_sync(job, *args, in_seconds=0, **kwargs):
154193 enqueue_method = queue .enqueue
155194 if in_seconds > 0 :
156195 enqueue_method = partial (queue .enqueue_in , timedelta (seconds = in_seconds ))
196+
157197 job = enqueue_method (
158198 job ,
159199 * args ,
@@ -164,8 +204,10 @@ def start_job_async_or_sync(job, *args, in_seconds=0, **kwargs):
164204 return job
165205 else :
166206 on_failure = kwargs .pop ('on_failure' , None )
207+
167208 try :
168- return job (* args , ** kwargs )
209+ result = job (* args , ** kwargs )
210+ return result
169211 except Exception :
170212 exc_info = sys .exc_info ()
171213 if on_failure :
0 commit comments