From 6c4e53f84d9efc9fa08870b58f2b67276b0d3e95 Mon Sep 17 00:00:00 2001 From: Davis Vann Bennett Date: Fri, 24 Oct 2025 21:31:05 +0200 Subject: [PATCH 1/4] add global concurrency limit instead of per-routine concurrency limits --- src/zarr/abc/codec.py | 4 -- src/zarr/abc/store.py | 4 +- src/zarr/core/array.py | 4 -- src/zarr/core/codec_pipeline.py | 5 -- src/zarr/core/common.py | 115 +++++++++++++++++++++++++++++++- src/zarr/core/group.py | 10 +-- src/zarr/storage/_local.py | 3 +- src/zarr/storage/_memory.py | 3 +- src/zarr/storage/_obstore.py | 8 +-- 9 files changed, 128 insertions(+), 28 deletions(-) diff --git a/src/zarr/abc/codec.py b/src/zarr/abc/codec.py index d41c457b4e..b5f7819a91 100644 --- a/src/zarr/abc/codec.py +++ b/src/zarr/abc/codec.py @@ -9,7 +9,6 @@ from zarr.abc.metadata import Metadata from zarr.core.buffer import Buffer, NDBuffer from zarr.core.common import NamedConfig, concurrent_map -from zarr.core.config import config if TYPE_CHECKING: from collections.abc import Awaitable, Callable, Iterable @@ -228,7 +227,6 @@ async def decode_partial( return await concurrent_map( list(batch_info), self._decode_partial_single, - config.get("async.concurrency"), ) @@ -265,7 +263,6 @@ async def encode_partial( await concurrent_map( list(batch_info), self._encode_partial_single, - config.get("async.concurrency"), ) @@ -467,7 +464,6 @@ async def _batching_helper( return await concurrent_map( list(batch_info), _noop_for_none(func), - config.get("async.concurrency"), ) diff --git a/src/zarr/abc/store.py b/src/zarr/abc/store.py index 4b3edf78d1..30602edf34 100644 --- a/src/zarr/abc/store.py +++ b/src/zarr/abc/store.py @@ -464,11 +464,9 @@ async def getsize_prefix(self, prefix: str) -> int: # avoid circular import from zarr.core.common import concurrent_map - from zarr.core.config import config keys = [(x,) async for x in self.list_prefix(prefix)] - limit = config.get("async.concurrency") - sizes = await concurrent_map(keys, self.getsize, limit=limit) + sizes = await concurrent_map(keys, self.getsize) return sum(sizes) diff --git a/src/zarr/core/array.py b/src/zarr/core/array.py index 8bd8be40b2..2f42836fc2 100644 --- a/src/zarr/core/array.py +++ b/src/zarr/core/array.py @@ -22,7 +22,6 @@ import numpy as np from typing_extensions import deprecated -import zarr from zarr.abc.codec import ArrayArrayCodec, ArrayBytesCodec, BytesBytesCodec, Codec from zarr.abc.numcodec import Numcodec, _is_numcodec from zarr.codecs._v2 import V2Codec @@ -1853,7 +1852,6 @@ async def _delete_key(key: str) -> None: for chunk_coords in old_chunk_coords.difference(new_chunk_coords) ], _delete_key, - zarr_config.get("async.concurrency"), ) # Write new metadata @@ -4530,7 +4528,6 @@ async def _copy_array_region( await concurrent_map( [(region, data) for region in result._iter_shard_regions()], _copy_array_region, - zarr.core.config.config.get("async.concurrency"), ) else: @@ -4541,7 +4538,6 @@ async def _copy_arraylike_region(chunk_coords: slice, _data: NDArrayLike) -> Non await concurrent_map( [(region, data) for region in result._iter_shard_regions()], _copy_arraylike_region, - zarr.core.config.config.get("async.concurrency"), ) return result diff --git a/src/zarr/core/codec_pipeline.py b/src/zarr/core/codec_pipeline.py index 63fcda7065..e6864c607e 100644 --- a/src/zarr/core/codec_pipeline.py +++ b/src/zarr/core/codec_pipeline.py @@ -270,7 +270,6 @@ async def read_batch( chunk_bytes_batch = await concurrent_map( [(byte_getter, array_spec.prototype) for byte_getter, array_spec, *_ in batch_info], lambda byte_getter, prototype: byte_getter.get(prototype), - config.get("async.concurrency"), ) chunk_array_batch = await self.decode_batch( [ @@ -375,7 +374,6 @@ async def _read_key( for byte_setter, chunk_spec, chunk_selection, _, is_complete_chunk in batch_info ], _read_key, - config.get("async.concurrency"), ) chunk_array_decoded = await self.decode_batch( [ @@ -441,7 +439,6 @@ async def _write_key(byte_setter: ByteSetter, chunk_bytes: Buffer | None) -> Non ) ], _write_key, - config.get("async.concurrency"), ) async def decode( @@ -474,7 +471,6 @@ async def read( for single_batch_info in batched(batch_info, self.batch_size) ], self.read_batch, - config.get("async.concurrency"), ) async def write( @@ -489,7 +485,6 @@ async def write( for single_batch_info in batched(batch_info, self.batch_size) ], self.write_batch, - config.get("async.concurrency"), ) diff --git a/src/zarr/core/common.py b/src/zarr/core/common.py index 651ebd72f3..8f6f899cf8 100644 --- a/src/zarr/core/common.py +++ b/src/zarr/core/common.py @@ -4,7 +4,9 @@ import functools import math import operator +import threading import warnings +import weakref from collections.abc import Iterable, Mapping, Sequence from enum import Enum from itertools import starmap @@ -82,15 +84,126 @@ def ceildiv(a: float, b: float) -> int: V = TypeVar("V") +# Global semaphore management for per-process concurrency limiting +# Use WeakKeyDictionary to automatically clean up semaphores when event loops are garbage collected +_global_semaphores: weakref.WeakKeyDictionary[asyncio.AbstractEventLoop, asyncio.Semaphore] = ( + weakref.WeakKeyDictionary() +) +# Use threading.Lock instead of asyncio.Lock to coordinate across event loops +_global_semaphore_lock = threading.Lock() + + +def get_global_semaphore() -> asyncio.Semaphore: + """ + Get the global semaphore for the current event loop. + + This ensures that all concurrent operations across the process share the same + concurrency limit, preventing excessive concurrent task creation when multiple + arrays or operations are running simultaneously. + + The semaphore is lazily created per event loop and uses the configured + `async.concurrency` value from zarr config. The semaphore is cached per event + loop, so subsequent calls return the same semaphore instance. + + Note: Config changes after the first call will not affect the semaphore limit. + To apply new config values, use :func:`reset_global_semaphores` to clear the cache. + + Returns + ------- + asyncio.Semaphore + The global semaphore for this event loop. + + Raises + ------ + RuntimeError + If called outside of an async context (no running event loop). + + See Also + -------- + reset_global_semaphores : Clear the global semaphore cache + """ + loop = asyncio.get_running_loop() + + # Acquire lock FIRST to prevent TOCTOU race condition + with _global_semaphore_lock: + if loop not in _global_semaphores: + limit = zarr_config.get("async.concurrency") + _global_semaphores[loop] = asyncio.Semaphore(limit) + return _global_semaphores[loop] + + +def reset_global_semaphores() -> None: + """ + Clear all cached global semaphores. + + This is useful when you want config changes to take effect, or for testing. + The next call to :func:`get_global_semaphore` will create a new semaphore + using the current configuration. + + Warning: This should only be called when no async operations are in progress, + as it will invalidate all existing semaphore references. + + Examples + -------- + >>> import zarr + >>> zarr.config.set({"async.concurrency": 50}) + >>> reset_global_semaphores() # Apply new config + """ + with _global_semaphore_lock: + _global_semaphores.clear() + + async def concurrent_map( items: Iterable[T], func: Callable[..., Awaitable[V]], limit: int | None = None, + *, + use_global_semaphore: bool = True, ) -> list[V]: - if limit is None: + """ + Execute an async function concurrently over multiple items with concurrency limiting. + + Parameters + ---------- + items : Iterable[T] + Items to process, where each item is a tuple of arguments to pass to func. + func : Callable[..., Awaitable[V]] + Async function to execute for each item. + limit : int | None, optional + If provided and use_global_semaphore is False, creates a local semaphore + with this limit. If None, no concurrency limiting is applied. + use_global_semaphore : bool, default True + If True, uses the global per-process semaphore for concurrency limiting, + ensuring all concurrent operations share the same limit. If False, uses + the `limit` parameter for local limiting (legacy behavior). + + Returns + ------- + list[V] + Results from executing func on all items. + """ + if use_global_semaphore: + if limit is not None: + raise ValueError( + "Cannot specify both use_global_semaphore=True and a limit value. " + "Either use the global semaphore (use_global_semaphore=True, limit=None) " + "or specify a local limit (use_global_semaphore=False, limit=)." + ) + # Use the global semaphore for process-wide concurrency limiting + sem = get_global_semaphore() + + async def run(item: tuple[Any]) -> V: + async with sem: + return await func(*item) + + return await asyncio.gather(*[asyncio.ensure_future(run(item)) for item in items]) + + elif limit is None: + # No concurrency limiting return await asyncio.gather(*list(starmap(func, items))) else: + # Legacy mode: create local semaphore with specified limit sem = asyncio.Semaphore(limit) async def run(item: tuple[Any]) -> V: diff --git a/src/zarr/core/group.py b/src/zarr/core/group.py index 26aed4fd60..2f381431a3 100644 --- a/src/zarr/core/group.py +++ b/src/zarr/core/group.py @@ -44,6 +44,7 @@ NodeType, ShapeLike, ZarrFormat, + get_global_semaphore, parse_shapelike, ) from zarr.core.config import config @@ -1441,8 +1442,8 @@ async def _members( ) raise ValueError(msg) - # enforce a concurrency limit by passing a semaphore to all the recursive functions - semaphore = asyncio.Semaphore(config.get("async.concurrency")) + # Use global semaphore for process-wide concurrency limiting + semaphore = get_global_semaphore() async for member in _iter_members_deep( self, max_depth=max_depth, @@ -3338,9 +3339,8 @@ async def create_nodes( The created nodes in the order they are created. """ - # Note: the only way to alter this value is via the config. If that's undesirable for some reason, - # then we should consider adding a keyword argument this this function - semaphore = asyncio.Semaphore(config.get("async.concurrency")) + # Use global semaphore for process-wide concurrency limiting + semaphore = get_global_semaphore() create_tasks: list[Coroutine[None, None, str]] = [] for key, value in nodes.items(): diff --git a/src/zarr/storage/_local.py b/src/zarr/storage/_local.py index f64da71bb4..d6f10be862 100644 --- a/src/zarr/storage/_local.py +++ b/src/zarr/storage/_local.py @@ -217,7 +217,8 @@ async def get_partial_values( assert isinstance(key, str) path = self.root / key args.append((_get, path, prototype, byte_range)) - return await concurrent_map(args, asyncio.to_thread, limit=None) # TODO: fix limit + # Use global semaphore to limit concurrent thread spawning + return await concurrent_map(args, asyncio.to_thread) async def set(self, key: str, value: Buffer) -> None: # docstring inherited diff --git a/src/zarr/storage/_memory.py b/src/zarr/storage/_memory.py index a3fd058680..12d7424185 100644 --- a/src/zarr/storage/_memory.py +++ b/src/zarr/storage/_memory.py @@ -104,7 +104,8 @@ async def get_partial_values( async def _get(key: str, byte_range: ByteRequest | None) -> Buffer | None: return await self.get(key, prototype=prototype, byte_range=byte_range) - return await concurrent_map(key_ranges, _get, limit=None) + # In-memory operations are fast and don't benefit from concurrency limiting + return await concurrent_map(key_ranges, _get, use_global_semaphore=False) async def exists(self, key: str) -> bool: # docstring inherited diff --git a/src/zarr/storage/_obstore.py b/src/zarr/storage/_obstore.py index 5c2197ecf6..e1d1bde672 100644 --- a/src/zarr/storage/_obstore.py +++ b/src/zarr/storage/_obstore.py @@ -13,8 +13,7 @@ Store, SuffixByteRequest, ) -from zarr.core.common import concurrent_map -from zarr.core.config import config +from zarr.core.common import concurrent_map, get_global_semaphore if TYPE_CHECKING: from collections.abc import AsyncGenerator, Coroutine, Iterable, Sequence @@ -209,7 +208,7 @@ async def delete_dir(self, prefix: str) -> None: metas = await obs.list(self.store, prefix).collect_async() keys = [(m["path"],) for m in metas] - await concurrent_map(keys, self.delete, limit=config.get("async.concurrency")) + await concurrent_map(keys, self.delete) @property def supports_listing(self) -> bool: @@ -485,7 +484,8 @@ async def _get_partial_values( else: raise ValueError(f"Unsupported range input: {byte_range}") - semaphore = asyncio.Semaphore(config.get("async.concurrency")) + # Use global semaphore for process-wide concurrency limiting + semaphore = get_global_semaphore() futs: list[Coroutine[Any, Any, list[_Response]]] = [] for path, bounded_ranges in per_file_bounded_requests.items(): From e98e6c0a1f4f731bfb76eaa8ffdbe050b8b2c7e6 Mon Sep 17 00:00:00 2001 From: Davis Vann Bennett Date: Fri, 24 Oct 2025 21:32:17 +0200 Subject: [PATCH 2/4] add test --- tests/test_global_concurrency.py | 330 +++++++++++++++++++++++++++++++ 1 file changed, 330 insertions(+) create mode 100644 tests/test_global_concurrency.py diff --git a/tests/test_global_concurrency.py b/tests/test_global_concurrency.py new file mode 100644 index 0000000000..5df1d68a39 --- /dev/null +++ b/tests/test_global_concurrency.py @@ -0,0 +1,330 @@ +""" +Tests for global per-process concurrency limiting. +""" + +import asyncio +from typing import TYPE_CHECKING, Any + +import numpy as np +import pytest + +import zarr +from zarr.core.common import get_global_semaphore, reset_global_semaphores +from zarr.core.config import config + +if TYPE_CHECKING: + from numpy.typing import NDArray + + +class TestGlobalSemaphore: + """Tests for the global semaphore management.""" + + async def test_get_global_semaphore_creates_per_loop(self) -> None: + """Test that each event loop gets its own semaphore.""" + sem1 = get_global_semaphore() + assert sem1 is not None + assert isinstance(sem1, asyncio.Semaphore) + + # Getting it again should return the same instance + sem2 = get_global_semaphore() + assert sem1 is sem2 + + async def test_global_semaphore_uses_config_limit(self) -> None: + """Test that the global semaphore respects the configured limit.""" + # Set a custom concurrency limit + original_limit: Any = config.get("async.concurrency") + try: + config.set({"async.concurrency": 5}) + + # Clear existing semaphores to force recreation + reset_global_semaphores() + + sem = get_global_semaphore() + + # The semaphore should have the configured limit + # We can verify this by acquiring all tokens and checking the semaphore is locked + for i in range(5): + await sem.acquire() + if i < 4: + assert not sem.locked() # Should still have capacity + else: + assert sem.locked() # All tokens acquired, semaphore is now locked + + # Release all tokens + for _ in range(5): + sem.release() + + finally: + # Restore original config + config.set({"async.concurrency": original_limit}) + # Clear semaphores again to reset state + reset_global_semaphores() + + async def test_global_semaphore_shared_across_operations(self) -> None: + """Test that multiple concurrent operations share the same semaphore.""" + # Track the maximum number of concurrent tasks + max_concurrent = 0 + current_concurrent = 0 + lock = asyncio.Lock() + + async def tracked_operation() -> None: + """An operation that tracks concurrency.""" + nonlocal max_concurrent, current_concurrent + + async with lock: + current_concurrent += 1 + max_concurrent = max(max_concurrent, current_concurrent) + + # Small delay to ensure overlap + await asyncio.sleep(0.01) + + async with lock: + current_concurrent -= 1 + + # Set a low concurrency limit to make the test observable + original_limit: Any = config.get("async.concurrency") + try: + config.set({"async.concurrency": 5}) + + # Clear existing semaphores + reset_global_semaphores() + + # Get the global semaphore + sem = get_global_semaphore() + + # Create many tasks that use the semaphore + async def task_with_semaphore() -> None: + async with sem: + await tracked_operation() + + # Launch 20 tasks (4x the limit) + tasks = [task_with_semaphore() for _ in range(20)] + await asyncio.gather(*tasks) + + # Maximum concurrent should respect the limit + assert max_concurrent <= 5, f"Max concurrent was {max_concurrent}, expected <= 5" + assert max_concurrent >= 3, ( + f"Max concurrent was {max_concurrent}, expected some concurrency" + ) + + finally: + config.set({"async.concurrency": original_limit}) + reset_global_semaphores() + + async def test_semaphore_reuse_across_calls(self) -> None: + """Test that repeated calls to get_global_semaphore return the same instance.""" + reset_global_semaphores() + + # Call multiple times and verify we get the same instance + sem1 = get_global_semaphore() + sem2 = get_global_semaphore() + sem3 = get_global_semaphore() + + assert sem1 is sem2 is sem3, "Should return same semaphore instance on repeated calls" + + # Verify it's still the same after using it + async with sem1: + sem4 = get_global_semaphore() + assert sem1 is sem4 + + def test_config_change_after_creation(self) -> None: + """Test and document that config changes don't affect existing semaphores.""" + original_limit: Any = config.get("async.concurrency") + try: + # Set initial config + config.set({"async.concurrency": 5}) + + async def check_limit() -> None: + reset_global_semaphores() + + # Create semaphore with limit=5 + sem1 = get_global_semaphore() + initial_capacity: int = sem1._value + + # Change config + config.set({"async.concurrency": 50}) + + # Get semaphore again - should be same instance with old limit + sem2 = get_global_semaphore() + assert sem1 is sem2, "Should return same semaphore instance" + assert sem2._value == initial_capacity, ( + f"Semaphore limit changed from {initial_capacity} to {sem2._value}. " + "Config changes should not affect existing semaphores." + ) + + # Clean up + reset_global_semaphores() + + asyncio.run(check_limit()) + + finally: + config.set({"async.concurrency": original_limit}) + + +class TestArrayConcurrency: + """Tests that array operations use global concurrency limiting.""" + + @pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning") + async def test_multiple_arrays_share_concurrency_limit(self) -> None: + """Test that reading from multiple arrays shares the global concurrency limit.""" + from zarr.core.common import concurrent_map + + # Track concurrent task executions + max_concurrent_tasks = 0 + current_concurrent_tasks = 0 + task_lock = asyncio.Lock() + + async def tracked_chunk_operation(chunk_id: int) -> int: + """Simulate a chunk operation with tracking.""" + nonlocal max_concurrent_tasks, current_concurrent_tasks + + async with task_lock: + current_concurrent_tasks += 1 + max_concurrent_tasks = max(max_concurrent_tasks, current_concurrent_tasks) + + # Small delay to simulate I/O + await asyncio.sleep(0.001) + + async with task_lock: + current_concurrent_tasks -= 1 + + return chunk_id + + # Set a low concurrency limit + original_limit: Any = config.get("async.concurrency") + try: + config.set({"async.concurrency": 10}) + + # Clear existing semaphores + reset_global_semaphores() + + # Simulate reading many chunks using concurrent_map (which uses the global semaphore) + # This simulates what happens when reading from multiple arrays + chunk_ids = [(i,) for i in range(100)] + await concurrent_map(chunk_ids, tracked_chunk_operation) + + # The maximum concurrent tasks should respect the global limit + assert max_concurrent_tasks <= 10, ( + f"Max concurrent tasks was {max_concurrent_tasks}, expected <= 10" + ) + + assert max_concurrent_tasks >= 5, ( + f"Max concurrent tasks was {max_concurrent_tasks}, " + f"expected at least some concurrency" + ) + + finally: + config.set({"async.concurrency": original_limit}) + # Note: We don't reset_global_semaphores() here because doing so while + # many tasks are still cleaning up can trigger ResourceWarnings from + # asyncio internals. The semaphore will be reused by subsequent tests. + + def test_sync_api_uses_global_concurrency(self) -> None: + """Test that synchronous API also benefits from global concurrency limiting.""" + # This test verifies that the sync API (which wraps async) uses global limiting + + # Set a low concurrency limit + original_limit: Any = config.get("async.concurrency") + try: + config.set({"async.concurrency": 8}) + + # Create a small array - the key is that zarr internally uses + # concurrent_map which now uses the global semaphore + store = zarr.storage.MemoryStore() + arr = zarr.create( + shape=(20, 20), + chunks=(10, 10), + dtype="i4", + store=store, + zarr_format=3, + ) + arr[:] = 42 + + # Read data (synchronously) + data: NDArray[Any] = arr[:] + + # Verify we got the right data + assert np.all(data == 42) + + # The test passes if no errors occurred + # The concurrency limiting is happening under the hood + + finally: + config.set({"async.concurrency": original_limit}) + + +class TestConcurrentMapGlobal: + """Tests for concurrent_map using global semaphore.""" + + async def test_concurrent_map_uses_global_by_default(self) -> None: + """Test that concurrent_map uses global semaphore by default.""" + from zarr.core.common import concurrent_map + + # Track concurrent executions + max_concurrent = 0 + current_concurrent = 0 + lock = asyncio.Lock() + + async def tracked_task(x: int) -> int: + nonlocal max_concurrent, current_concurrent + + async with lock: + current_concurrent += 1 + max_concurrent = max(max_concurrent, current_concurrent) + + await asyncio.sleep(0.01) + + async with lock: + current_concurrent -= 1 + + return x * 2 + + # Set a low limit + original_limit: Any = config.get("async.concurrency") + try: + config.set({"async.concurrency": 5}) + + # Clear existing semaphores + reset_global_semaphores() + + # Use concurrent_map with default settings (use_global_semaphore=True) + items = [(i,) for i in range(20)] + results = await concurrent_map(items, tracked_task) + + assert len(results) == 20 + assert max_concurrent <= 5 + assert max_concurrent >= 3 # Should have some concurrency + + finally: + config.set({"async.concurrency": original_limit}) + reset_global_semaphores() + + async def test_concurrent_map_legacy_mode(self) -> None: + """Test that concurrent_map legacy mode still works.""" + from zarr.core.common import concurrent_map + + async def simple_task(x: int) -> int: + await asyncio.sleep(0.001) + return x * 2 + + # Use legacy mode with local limit + items = [(i,) for i in range(10)] + results = await concurrent_map(items, simple_task, limit=3, use_global_semaphore=False) + + assert len(results) == 10 + assert results == [i * 2 for i in range(10)] + + async def test_concurrent_map_parameter_validation(self) -> None: + """Test that concurrent_map validates conflicting parameters.""" + from zarr.core.common import concurrent_map + + async def simple_task(x: int) -> int: + return x * 2 + + items = [(i,) for i in range(10)] + + # Should raise ValueError when both limit and use_global_semaphore=True + with pytest.raises( + ValueError, match="Cannot specify both use_global_semaphore=True and a limit" + ): + await concurrent_map(items, simple_task, limit=5, use_global_semaphore=True) From 735ee8e68232da2ddeeb74c89ea902926869ae3c Mon Sep 17 00:00:00 2001 From: Davis Vann Bennett Date: Fri, 24 Oct 2025 21:51:14 +0200 Subject: [PATCH 3/4] lint --- tests/test_global_concurrency.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/test_global_concurrency.py b/tests/test_global_concurrency.py index 5df1d68a39..f6366e3c53 100644 --- a/tests/test_global_concurrency.py +++ b/tests/test_global_concurrency.py @@ -3,7 +3,7 @@ """ import asyncio -from typing import TYPE_CHECKING, Any +from typing import Any import numpy as np import pytest @@ -12,9 +12,6 @@ from zarr.core.common import get_global_semaphore, reset_global_semaphores from zarr.core.config import config -if TYPE_CHECKING: - from numpy.typing import NDArray - class TestGlobalSemaphore: """Tests for the global semaphore management.""" @@ -241,7 +238,7 @@ def test_sync_api_uses_global_concurrency(self) -> None: arr[:] = 42 # Read data (synchronously) - data: NDArray[Any] = arr[:] + data = arr[:] # Verify we got the right data assert np.all(data == 42) From 3ef6cfba55f2e7203551424792e55005230cee90 Mon Sep 17 00:00:00 2001 From: Davis Vann Bennett Date: Mon, 27 Oct 2025 16:31:44 +0100 Subject: [PATCH 4/4] changelog --- changes/3547.misc.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/3547.misc.md diff --git a/changes/3547.misc.md b/changes/3547.misc.md new file mode 100644 index 0000000000..771bfe8861 --- /dev/null +++ b/changes/3547.misc.md @@ -0,0 +1 @@ +Moved concurrency limits to a global per-event loop setting instead of per-array call. \ No newline at end of file