|
| 1 | +//===----------------------------------------------------------------------===// |
| 2 | +// |
| 3 | +// This source file is part of the SwiftNIO open source project |
| 4 | +// |
| 5 | +// Copyright (c) 2017-2022 Apple Inc. and the SwiftNIO project authors |
| 6 | +// Licensed under Apache License v2.0 |
| 7 | +// |
| 8 | +// See LICENSE.txt for license information |
| 9 | +// See CONTRIBUTORS.txt for the list of SwiftNIO project authors |
| 10 | +// |
| 11 | +// SPDX-License-Identifier: Apache-2.0 |
| 12 | +// |
| 13 | +//===----------------------------------------------------------------------===// |
| 14 | + |
| 15 | +#if canImport(Darwin) |
| 16 | +import Darwin |
| 17 | +#elseif os(Windows) |
| 18 | +import ucrt |
| 19 | +import WinSDK |
| 20 | +#elseif canImport(Glibc) |
| 21 | +@preconcurrency import Glibc |
| 22 | +#elseif canImport(Musl) |
| 23 | +@preconcurrency import Musl |
| 24 | +#elseif canImport(Bionic) |
| 25 | +@preconcurrency import Bionic |
| 26 | +#elseif canImport(WASILibc) |
| 27 | +@preconcurrency import WASILibc |
| 28 | +#if canImport(wasi_pthread) |
| 29 | +import wasi_pthread |
| 30 | +#endif |
| 31 | +#else |
| 32 | +#error("The concurrency NIOLock module was unable to identify your C library.") |
| 33 | +#endif |
| 34 | + |
| 35 | +#if os(Windows) |
| 36 | +@usableFromInline |
| 37 | +typealias LockPrimitive = SRWLOCK |
| 38 | +#else |
| 39 | +@usableFromInline |
| 40 | +typealias LockPrimitive = pthread_mutex_t |
| 41 | +#endif |
| 42 | + |
| 43 | +/// A utility function that runs the body code only in debug builds, without |
| 44 | +/// emitting compiler warnings. |
| 45 | +/// |
| 46 | +/// This is currently the only way to do this in Swift: see |
| 47 | +/// https://forums.swift.org/t/support-debug-only-code/11037 for a discussion. |
| 48 | +@inlinable |
| 49 | +internal func debugOnly(_ body: () -> Void) { |
| 50 | + assert( |
| 51 | + { |
| 52 | + body() |
| 53 | + return true |
| 54 | + }() |
| 55 | + ) |
| 56 | +} |
| 57 | + |
| 58 | +@usableFromInline |
| 59 | +enum LockOperations {} |
| 60 | + |
| 61 | +extension LockOperations { |
| 62 | + @inlinable |
| 63 | + static func create(_ mutex: UnsafeMutablePointer<LockPrimitive>) { |
| 64 | + mutex.assertValidAlignment() |
| 65 | + |
| 66 | +#if os(Windows) |
| 67 | + InitializeSRWLock(mutex) |
| 68 | +#elseif (compiler(<6.1) && !os(WASI)) || (compiler(>=6.1) && _runtime(_multithreaded)) |
| 69 | + var attr = pthread_mutexattr_t() |
| 70 | + pthread_mutexattr_init(&attr) |
| 71 | + debugOnly { |
| 72 | + pthread_mutexattr_settype(&attr, .init(PTHREAD_MUTEX_ERRORCHECK)) |
| 73 | + } |
| 74 | + |
| 75 | + let err = pthread_mutex_init(mutex, &attr) |
| 76 | + precondition(err == 0, "\(#function) failed in pthread_mutex with error \(err)") |
| 77 | +#endif |
| 78 | + } |
| 79 | + |
| 80 | + @inlinable |
| 81 | + static func destroy(_ mutex: UnsafeMutablePointer<LockPrimitive>) { |
| 82 | + mutex.assertValidAlignment() |
| 83 | + |
| 84 | +#if os(Windows) |
| 85 | + // SRWLOCK does not need to be free'd |
| 86 | +#elseif (compiler(<6.1) && !os(WASI)) || (compiler(>=6.1) && _runtime(_multithreaded)) |
| 87 | + let err = pthread_mutex_destroy(mutex) |
| 88 | + precondition(err == 0, "\(#function) failed in pthread_mutex with error \(err)") |
| 89 | +#endif |
| 90 | + } |
| 91 | + |
| 92 | + @inlinable |
| 93 | + static func lock(_ mutex: UnsafeMutablePointer<LockPrimitive>) { |
| 94 | + mutex.assertValidAlignment() |
| 95 | + |
| 96 | +#if os(Windows) |
| 97 | + AcquireSRWLockExclusive(mutex) |
| 98 | +#elseif (compiler(<6.1) && !os(WASI)) || (compiler(>=6.1) && _runtime(_multithreaded)) |
| 99 | + let err = pthread_mutex_lock(mutex) |
| 100 | + precondition(err == 0, "\(#function) failed in pthread_mutex with error \(err)") |
| 101 | +#endif |
| 102 | + } |
| 103 | + |
| 104 | + @inlinable |
| 105 | + static func unlock(_ mutex: UnsafeMutablePointer<LockPrimitive>) { |
| 106 | + mutex.assertValidAlignment() |
| 107 | + |
| 108 | +#if os(Windows) |
| 109 | + ReleaseSRWLockExclusive(mutex) |
| 110 | +#elseif (compiler(<6.1) && !os(WASI)) || (compiler(>=6.1) && _runtime(_multithreaded)) |
| 111 | + let err = pthread_mutex_unlock(mutex) |
| 112 | + precondition(err == 0, "\(#function) failed in pthread_mutex with error \(err)") |
| 113 | +#endif |
| 114 | + } |
| 115 | +} |
| 116 | + |
| 117 | +// Tail allocate both the mutex and a generic value using ManagedBuffer. |
| 118 | +// Both the header pointer and the elements pointer are stable for |
| 119 | +// the class's entire lifetime. |
| 120 | +// |
| 121 | +// However, for safety reasons, we elect to place the lock in the "elements" |
| 122 | +// section of the buffer instead of the head. The reasoning here is subtle, |
| 123 | +// so buckle in. |
| 124 | +// |
| 125 | +// _As a practical matter_, the implementation of ManagedBuffer ensures that |
| 126 | +// the pointer to the header is stable across the lifetime of the class, and so |
| 127 | +// each time you call `withUnsafeMutablePointers` or `withUnsafeMutablePointerToHeader` |
| 128 | +// the value of the header pointer will be the same. This is because ManagedBuffer uses |
| 129 | +// `Builtin.addressOf` to load the value of the header, and that does ~magic~ to ensure |
| 130 | +// that it does not invoke any weird Swift accessors that might copy the value. |
| 131 | +// |
| 132 | +// _However_, the header is also available via the `.header` field on the ManagedBuffer. |
| 133 | +// This presents a problem! The reason there's an issue is that `Builtin.addressOf` and friends |
| 134 | +// do not interact with Swift's exclusivity model. That is, the various `with` functions do not |
| 135 | +// conceptually trigger a mutating access to `.header`. For elements this isn't a concern because |
| 136 | +// there's literally no other way to perform the access, but for `.header` it's entirely possible |
| 137 | +// to accidentally recursively read it. |
| 138 | +// |
| 139 | +// Our implementation is free from these issues, so we don't _really_ need to worry about it. |
| 140 | +// However, out of an abundance of caution, we store the Value in the header, and the LockPrimitive |
| 141 | +// in the trailing elements. We still don't use `.header`, but it's better to be safe than sorry, |
| 142 | +// and future maintainers will be happier that we were cautious. |
| 143 | +// |
| 144 | +// See also: https://github.com/apple/swift/pull/40000 |
| 145 | +@usableFromInline |
| 146 | +final class LockStorage<Value>: ManagedBuffer<Value, LockPrimitive> { |
| 147 | + |
| 148 | + @inlinable |
| 149 | + static func create(value: Value) -> Self { |
| 150 | + let buffer = Self.create(minimumCapacity: 1) { _ in |
| 151 | + value |
| 152 | + } |
| 153 | + // Intentionally using a force cast here to avoid a miss compiliation in 5.10. |
| 154 | + // This is as fast as an unsafeDownCast since ManagedBuffer is inlined and the optimizer |
| 155 | + // can eliminate the upcast/downcast pair |
| 156 | + let storage = buffer as! Self |
| 157 | + |
| 158 | + storage.withUnsafeMutablePointers { _, lockPtr in |
| 159 | + LockOperations.create(lockPtr) |
| 160 | + } |
| 161 | + |
| 162 | + return storage |
| 163 | + } |
| 164 | + |
| 165 | + @inlinable |
| 166 | + func lock() { |
| 167 | + self.withUnsafeMutablePointerToElements { lockPtr in |
| 168 | + LockOperations.lock(lockPtr) |
| 169 | + } |
| 170 | + } |
| 171 | + |
| 172 | + @inlinable |
| 173 | + func unlock() { |
| 174 | + self.withUnsafeMutablePointerToElements { lockPtr in |
| 175 | + LockOperations.unlock(lockPtr) |
| 176 | + } |
| 177 | + } |
| 178 | + |
| 179 | + @inlinable |
| 180 | + deinit { |
| 181 | + self.withUnsafeMutablePointerToElements { lockPtr in |
| 182 | + LockOperations.destroy(lockPtr) |
| 183 | + } |
| 184 | + } |
| 185 | + |
| 186 | + @inlinable |
| 187 | + func withLockPrimitive<T>(_ body: (UnsafeMutablePointer<LockPrimitive>) throws -> T) rethrows -> T { |
| 188 | + try self.withUnsafeMutablePointerToElements { lockPtr in |
| 189 | + try body(lockPtr) |
| 190 | + } |
| 191 | + } |
| 192 | + |
| 193 | + @inlinable |
| 194 | + func withLockedValue<T>(_ mutate: (inout Value) throws -> T) rethrows -> T { |
| 195 | + try self.withUnsafeMutablePointers { valuePtr, lockPtr in |
| 196 | + LockOperations.lock(lockPtr) |
| 197 | + defer { LockOperations.unlock(lockPtr) } |
| 198 | + return try mutate(&valuePtr.pointee) |
| 199 | + } |
| 200 | + } |
| 201 | +} |
| 202 | + |
| 203 | +/// A threading lock based on `libpthread` instead of `libdispatch`. |
| 204 | +/// |
| 205 | +/// - Note: ``NIOLock`` has reference semantics. |
| 206 | +/// |
| 207 | +/// This object provides a lock on top of a single `pthread_mutex_t`. This kind |
| 208 | +/// of lock is safe to use with `libpthread`-based threading models, such as the |
| 209 | +/// one used by NIO. On Windows, the lock is based on the substantially similar |
| 210 | +/// `SRWLOCK` type. |
| 211 | +package struct NIOLock { |
| 212 | + @usableFromInline |
| 213 | + internal let _storage: LockStorage<Void> |
| 214 | + |
| 215 | + /// Create a new lock. |
| 216 | + @inlinable |
| 217 | + package init() { |
| 218 | + self._storage = .create(value: ()) |
| 219 | + } |
| 220 | + |
| 221 | + /// Acquire the lock. |
| 222 | + /// |
| 223 | + /// Whenever possible, consider using `withLock` instead of this method and |
| 224 | + /// `unlock`, to simplify lock handling. |
| 225 | + @inlinable |
| 226 | + package func lock() { |
| 227 | + self._storage.lock() |
| 228 | + } |
| 229 | + |
| 230 | + /// Release the lock. |
| 231 | + /// |
| 232 | + /// Whenever possible, consider using `withLock` instead of this method and |
| 233 | + /// `lock`, to simplify lock handling. |
| 234 | + @inlinable |
| 235 | + package func unlock() { |
| 236 | + self._storage.unlock() |
| 237 | + } |
| 238 | + |
| 239 | + @inlinable |
| 240 | + internal func withLockPrimitive<T>(_ body: (UnsafeMutablePointer<LockPrimitive>) throws -> T) rethrows -> T { |
| 241 | + try self._storage.withLockPrimitive(body) |
| 242 | + } |
| 243 | +} |
| 244 | + |
| 245 | +extension NIOLock { |
| 246 | + /// Acquire the lock for the duration of the given block. |
| 247 | + /// |
| 248 | + /// This convenience method should be preferred to `lock` and `unlock` in |
| 249 | + /// most situations, as it ensures that the lock will be released regardless |
| 250 | + /// of how `body` exits. |
| 251 | + /// |
| 252 | + /// - Parameter body: The block to execute while holding the lock. |
| 253 | + /// - Returns: The value returned by the block. |
| 254 | + @inlinable |
| 255 | + package func withLock<T>(_ body: () throws -> T) rethrows -> T { |
| 256 | + self.lock() |
| 257 | + defer { |
| 258 | + self.unlock() |
| 259 | + } |
| 260 | + return try body() |
| 261 | + } |
| 262 | + |
| 263 | + @inlinable |
| 264 | + package func withLockVoid(_ body: () throws -> Void) rethrows { |
| 265 | + try self.withLock(body) |
| 266 | + } |
| 267 | +} |
| 268 | + |
| 269 | +extension NIOLock: @unchecked Sendable {} |
| 270 | + |
| 271 | +extension UnsafeMutablePointer { |
| 272 | + @inlinable |
| 273 | + func assertValidAlignment() { |
| 274 | + assert(UInt(bitPattern: self) % UInt(MemoryLayout<Pointee>.alignment) == 0) |
| 275 | + } |
| 276 | +} |
| 277 | + |
0 commit comments