Skip to content

Commit 5ddd661

Browse files
authored
Merge branch 'main' into jahnvi/native-uuid
2 parents 729b5c9 + 5a0bc59 commit 5ddd661

File tree

8 files changed

+485
-276
lines changed

8 files changed

+485
-276
lines changed

PyPI_Description.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,12 @@ PyBind11 provides:
3939

4040
We are currently in **Public Preview**.
4141

42-
## What's new in v0.13.0
42+
## What's new in v0.13.1
4343

44-
- **Enhanced Batch Operations:** Complete support for UNIQUEIDENTIFIER and DATETIMEOFFSET in `executemany()` operations with automatic type inference, enabling efficient bulk inserts of complex data types including UUIDs and timezone-aware datetimes.
45-
- **Streaming Large Values:** Robust handling of large objects (NVARCHAR/VARCHAR/VARBINARY(MAX)) in `executemany()` with automatic Data-At-Execution detection and fallback, supporting streaming inserts and fetches for massive datasets.
46-
- **Improved Cursor Reliability:** Enhanced `cursor.rowcount` accuracy across all fetch operations, including proper handling of empty result sets and consistent behavior for SELECT, INSERT, and UPDATE operations.
47-
- **Critical Stability Fixes:** Resolved memory leaks with secure token buffer handling, fixed resource cleanup to prevent segmentation faults during Python shutdown, and corrected type inference bugs in batch operations.
44+
- **Authentication Reliability:** Fixed token handling for Microsoft Entra ID authentication to ensure stable and reliable connections.
45+
- **Timezone Preservation:** Removed forced UTC conversion for `datetimeoffset` values, preserving original timezone information in Python `datetime` objects for accurate cross-timezone data handling.
46+
- **Connection Pooling Stability:** Enhanced pool shutdown mechanism with proper tracking to prevent resource leaks and ensure cleanup operations execute reliably.
47+
- **Predictable Type Handling:** Refined UUID string parameter handling to prevent automatic type coercion, ensuring strings are processed as intended.
4848

4949
For more information, please visit the project link on Github: https://github.com/microsoft/mssql-python
5050

mssql_python/pooling.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
class PoolingManager:
77
_enabled = False
88
_initialized = False
9+
_pools_closed = False # Track if pools have been closed
910
_lock = threading.Lock()
1011
_config = {
1112
"max_size": 100,
@@ -30,6 +31,9 @@ def enable(cls, max_size=100, idle_timeout=600):
3031
@classmethod
3132
def disable(cls):
3233
with cls._lock:
34+
if cls._enabled and not cls._pools_closed: # Only cleanup if enabled and not already closed
35+
ddbc_bindings.close_pooling()
36+
cls._pools_closed = True
3337
cls._enabled = False
3438
cls._initialized = True
3539

@@ -40,8 +44,18 @@ def is_enabled(cls):
4044
@classmethod
4145
def is_initialized(cls):
4246
return cls._initialized
47+
48+
@classmethod
49+
def _reset_for_testing(cls):
50+
"""Reset pooling state - for testing purposes only"""
51+
with cls._lock:
52+
cls._enabled = False
53+
cls._initialized = False
54+
cls._pools_closed = False
4355

4456
@atexit.register
4557
def shutdown_pooling():
46-
if PoolingManager.is_enabled():
47-
ddbc_bindings.close_pooling()
58+
with PoolingManager._lock:
59+
if PoolingManager._enabled and not PoolingManager._pools_closed:
60+
ddbc_bindings.close_pooling()
61+
PoolingManager._pools_closed = True

mssql_python/pybind/connection/connection.cpp

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ SQLRETURN Connection::setAttribute(SQLINTEGER attribute, py::object value) {
173173
LOG("Setting SQL attribute");
174174
SQLPOINTER ptr = nullptr;
175175
SQLINTEGER length = 0;
176-
std::string buffer; // to hold sensitive data temporarily
176+
static std::string buffer; // to hold sensitive data temporarily
177177

178178
if (py::isinstance<py::int_>(value)) {
179179
int intValue = value.cast<int>();
@@ -196,10 +196,6 @@ SQLRETURN Connection::setAttribute(SQLINTEGER attribute, py::object value) {
196196
LOG("Set attribute successfully");
197197
}
198198

199-
// Zero out sensitive data if used
200-
if (!buffer.empty()) {
201-
std::fill(buffer.begin(), buffer.end(), static_cast<char>(0));
202-
}
203199
return ret;
204200
}
205201

@@ -383,4 +379,4 @@ py::object ConnectionHandle::getInfo(SQLUSMALLINT infoType) const {
383379
ThrowStdException("Connection object is not initialized");
384380
}
385381
return _conn->getInfo(infoType);
386-
}
382+
}

mssql_python/pybind/ddbc_bindings.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2805,7 +2805,6 @@ SQLRETURN SQLGetData_wrap(SqlHandlePtr StatementHandle, SQLUSMALLINT colCount, p
28052805
microseconds,
28062806
tzinfo
28072807
);
2808-
py_dt = py_dt.attr("astimezone")(datetime.attr("timezone").attr("utc"));
28092808
row.append(py_dt);
28102809
} else {
28112810
LOG("Error fetching DATETIMEOFFSET for column {}, ret={}", i, ret);
@@ -3318,7 +3317,6 @@ SQLRETURN FetchBatchData(SQLHSTMT hStmt, ColumnBuffers& buffers, py::list& colum
33183317
dtoValue.fraction / 1000, // ns → µs
33193318
tzinfo
33203319
);
3321-
py_dt = py_dt.attr("astimezone")(datetime.attr("timezone").attr("utc"));
33223320
row.append(py_dt);
33233321
} else {
33243322
row.append(py::none());

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def finalize_options(self):
8383

8484
setup(
8585
name='mssql-python',
86-
version='0.13.0',
86+
version='0.13.1',
8787
description='A Python library for interacting with Microsoft SQL Server',
8888
long_description=open('PyPI_Description.md', encoding='utf-8').read(),
8989
long_description_content_type='text/markdown',

tests/test_003_connection.py

Lines changed: 1 addition & 227 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,6 @@
77
- test_commit: Make a transaction and commit.
88
- test_rollback: Make a transaction and rollback.
99
- test_invalid_connection_string: Check if initializing with an invalid connection string raises an exception.
10-
- test_connection_pooling_speed: Test connection pooling speed.
11-
- test_connection_pooling_basic: Test basic connection pooling functionality.
1210
- test_autocommit_default: Check if autocommit is False by default.
1311
- test_autocommit_setter: Test setting autocommit mode and its effect on transactions.
1412
- test_set_autocommit: Test the setautocommit method.
@@ -288,131 +286,6 @@ def test_connection_close(conn_str):
288286
# Check if the database connection can be closed
289287
temp_conn.close()
290288

291-
def test_connection_pooling_speed(conn_str):
292-
"""Test that connection pooling provides performance benefits over multiple iterations."""
293-
import statistics
294-
295-
# Warm up to eliminate cold start effects
296-
for _ in range(3):
297-
conn = connect(conn_str)
298-
conn.close()
299-
300-
# Disable pooling first
301-
pooling(enabled=False)
302-
303-
# Test without pooling (multiple times)
304-
no_pool_times = []
305-
for _ in range(10):
306-
start = time.perf_counter()
307-
conn = connect(conn_str)
308-
conn.close()
309-
end = time.perf_counter()
310-
no_pool_times.append(end - start)
311-
312-
# Enable pooling
313-
pooling(max_size=5, idle_timeout=30)
314-
315-
# Test with pooling (multiple times)
316-
pool_times = []
317-
for _ in range(10):
318-
start = time.perf_counter()
319-
conn = connect(conn_str)
320-
conn.close()
321-
end = time.perf_counter()
322-
pool_times.append(end - start)
323-
324-
# Use median times to reduce impact of outliers
325-
median_no_pool = statistics.median(no_pool_times)
326-
median_pool = statistics.median(pool_times)
327-
328-
# Allow for some variance - pooling should be at least 30% faster on average
329-
improvement_threshold = 0.7 # Pool should be <= 70% of no-pool time
330-
331-
print(f"No pool median: {median_no_pool:.6f}s")
332-
print(f"Pool median: {median_pool:.6f}s")
333-
print(f"Improvement ratio: {median_pool/median_no_pool:.2f}")
334-
335-
# Clean up - disable pooling for other tests
336-
pooling(enabled=False)
337-
338-
assert median_pool <= median_no_pool * improvement_threshold, \
339-
f"Expected pooling to be at least 30% faster. No-pool: {median_no_pool:.6f}s, Pool: {median_pool:.6f}s"
340-
341-
def test_connection_pooling_reuse_spid(conn_str):
342-
"""Test that connections are actually reused from the pool"""
343-
# Enable pooling
344-
pooling(max_size=1, idle_timeout=30)
345-
346-
# Create and close a connection
347-
conn1 = connect(conn_str)
348-
cursor1 = conn1.cursor()
349-
cursor1.execute("SELECT @@SPID") # Get SQL Server process ID
350-
spid1 = cursor1.fetchone()[0]
351-
conn1.close()
352-
353-
# Get another connection - should be the same one from pool
354-
conn2 = connect(conn_str)
355-
cursor2 = conn2.cursor()
356-
cursor2.execute("SELECT @@SPID")
357-
spid2 = cursor2.fetchone()[0]
358-
conn2.close()
359-
360-
# The SPID should be the same, indicating connection reuse
361-
assert spid1 == spid2, "Connections not reused - different SPIDs"
362-
363-
# Clean up
364-
365-
def test_pool_exhaustion_max_size_1(conn_str):
366-
"""Test pool exhaustion when max_size=1 and multiple concurrent connections are requested."""
367-
pooling(max_size=1, idle_timeout=30)
368-
conn1 = connect(conn_str)
369-
results = []
370-
371-
def try_connect():
372-
try:
373-
conn2 = connect(conn_str)
374-
results.append("success")
375-
conn2.close()
376-
except Exception as e:
377-
results.append(str(e))
378-
379-
# Start a thread that will attempt to get a second connection while the first is open
380-
t = threading.Thread(target=try_connect)
381-
t.start()
382-
t.join(timeout=2)
383-
conn1.close()
384-
385-
# Depending on implementation, either blocks, raises, or times out
386-
assert results, "Second connection attempt did not complete"
387-
# If pool blocks, the thread may not finish until conn1 is closed, so allow both outcomes
388-
assert results[0] == "success" or "pool" in results[0].lower() or "timeout" in results[0].lower(), \
389-
f"Unexpected pool exhaustion result: {results[0]}"
390-
pooling(enabled=False)
391-
392-
def test_pool_idle_timeout_removes_connections(conn_str):
393-
"""Test that idle_timeout removes connections from the pool after the timeout."""
394-
pooling(max_size=2, idle_timeout=2)
395-
conn1 = connect(conn_str)
396-
spid_list = []
397-
cursor1 = conn1.cursor()
398-
cursor1.execute("SELECT @@SPID")
399-
spid1 = cursor1.fetchone()[0]
400-
spid_list.append(spid1)
401-
conn1.close()
402-
403-
# Wait for longer than idle_timeout
404-
time.sleep(3)
405-
406-
# Get a new connection, which should not reuse the previous SPID
407-
conn2 = connect(conn_str)
408-
cursor2 = conn2.cursor()
409-
cursor2.execute("SELECT @@SPID")
410-
spid2 = cursor2.fetchone()[0]
411-
spid_list.append(spid2)
412-
conn2.close()
413-
414-
assert spid1 != spid2, "Idle timeout did not remove connection from pool"
415-
416289
def test_connection_timeout_invalid_password(conn_str):
417290
"""Test that connecting with an invalid password raises an exception quickly (timeout)."""
418291
# Modify the connection string to use an invalid password
@@ -429,6 +302,7 @@ def test_connection_timeout_invalid_password(conn_str):
429302
# Should fail quickly (within 10 seconds)
430303
assert elapsed < 10, f"Connection with invalid password took too long: {elapsed:.2f}s"
431304

305+
432306
def test_connection_timeout_invalid_host(conn_str):
433307
"""Test that connecting to an invalid host fails with a timeout."""
434308
# Replace server/host with an invalid one
@@ -449,106 +323,6 @@ def test_connection_timeout_invalid_host(conn_str):
449323
# If it takes too long, it may indicate a misconfiguration or network issue.
450324
assert elapsed < 30, f"Connection to invalid host took too long: {elapsed:.2f}s"
451325

452-
def test_pool_removes_invalid_connections(conn_str):
453-
"""Test that the pool removes connections that become invalid (simulate by closing underlying connection)."""
454-
pooling(max_size=1, idle_timeout=30)
455-
conn = connect(conn_str)
456-
cursor = conn.cursor()
457-
cursor.execute("SELECT 1")
458-
# Simulate invalidation by forcibly closing the connection at the driver level
459-
try:
460-
# Try to access a private attribute or method to forcibly close the underlying connection
461-
# This is implementation-specific; if not possible, skip
462-
if hasattr(conn, "_conn") and hasattr(conn._conn, "close"):
463-
conn._conn.close()
464-
else:
465-
pytest.skip("Cannot forcibly close underlying connection for this driver")
466-
except Exception:
467-
pass
468-
# Safely close the connection, ignoring errors due to forced invalidation
469-
try:
470-
conn.close()
471-
except RuntimeError as e:
472-
if "not initialized" not in str(e):
473-
raise
474-
# Now, get a new connection from the pool and ensure it works
475-
new_conn = connect(conn_str)
476-
new_cursor = new_conn.cursor()
477-
try:
478-
new_cursor.execute("SELECT 1")
479-
result = new_cursor.fetchone()
480-
assert result is not None and result[0] == 1, "Pool did not remove invalid connection"
481-
finally:
482-
new_conn.close()
483-
pooling(enabled=False)
484-
485-
def test_pool_recovery_after_failed_connection(conn_str):
486-
"""Test that the pool recovers after a failed connection attempt."""
487-
pooling(max_size=1, idle_timeout=30)
488-
# First, try to connect with a bad password (should fail)
489-
if "Pwd=" in conn_str:
490-
bad_conn_str = conn_str.replace("Pwd=", "Pwd=wrongpassword")
491-
elif "Password=" in conn_str:
492-
bad_conn_str = conn_str.replace("Password=", "Password=wrongpassword")
493-
else:
494-
pytest.skip("No password found in connection string to modify")
495-
with pytest.raises(Exception):
496-
connect(bad_conn_str)
497-
# Now, connect with the correct string and ensure it works
498-
conn = connect(conn_str)
499-
cursor = conn.cursor()
500-
cursor.execute("SELECT 1")
501-
result = cursor.fetchone()
502-
assert result is not None and result[0] == 1, "Pool did not recover after failed connection"
503-
conn.close()
504-
pooling(enabled=False)
505-
506-
def test_pool_capacity_limit_and_overflow(conn_str):
507-
"""Test that pool does not grow beyond max_size and handles overflow gracefully."""
508-
pooling(max_size=2, idle_timeout=30)
509-
conns = []
510-
try:
511-
# Open up to max_size connections
512-
conns.append(connect(conn_str))
513-
conns.append(connect(conn_str))
514-
# Try to open a third connection, which should fail or block
515-
overflow_result = []
516-
def try_overflow():
517-
try:
518-
c = connect(conn_str)
519-
overflow_result.append("success")
520-
c.close()
521-
except Exception as e:
522-
overflow_result.append(str(e))
523-
t = threading.Thread(target=try_overflow)
524-
t.start()
525-
t.join(timeout=2)
526-
assert overflow_result, "Overflow connection attempt did not complete"
527-
# Accept either block, error, or success if pool implementation allows overflow
528-
assert overflow_result[0] == "success" or "pool" in overflow_result[0].lower() or "timeout" in overflow_result[0].lower(), \
529-
f"Unexpected pool overflow result: {overflow_result[0]}"
530-
finally:
531-
for c in conns:
532-
c.close()
533-
pooling(enabled=False)
534-
535-
def test_connection_pooling_basic(conn_str):
536-
# Enable pooling with small pool size
537-
pooling(max_size=2, idle_timeout=5)
538-
conn1 = connect(conn_str)
539-
conn2 = connect(conn_str)
540-
assert conn1 is not None
541-
assert conn2 is not None
542-
try:
543-
conn3 = connect(conn_str)
544-
assert conn3 is not None, "Third connection failed — pooling is not working or limit is too strict"
545-
conn3.close()
546-
except Exception as e:
547-
print(f"Expected: Could not open third connection due to max_size=2: {e}")
548-
549-
conn1.close()
550-
conn2.close()
551-
552326
def test_context_manager_commit(conn_str):
553327
"""Test that context manager closes connection on normal exit"""
554328
# Create a permanent table for testing across connections

0 commit comments

Comments
 (0)