Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -310,9 +310,24 @@ def psf_precision_value_from(

for k0_y in range(kernel_native.shape[0]):
for k0_x in range(kernel_native.shape[1]):
value = value_native[
ip0_y + k0_y + kernel_shift_y, ip0_x + k0_x + kernel_shift_x
]
iy = ip0_y + k0_y + kernel_shift_y
ix = ip0_x + k0_x + kernel_shift_x

# numba @jit() does not bounds-check array reads. Without this
# guard, a kernel position that lands off the noise-map array
# (e.g. a mask pixel within `kernel_shift` of the array edge)
# silently reads uninitialized memory, producing astronomical
# or non-finite contributions that poison the entire
# psf_precision_operator and downstream curvature matrix.
if (
iy < 0
or iy >= value_native.shape[0]
or ix < 0
or ix >= value_native.shape[1]
):
continue

value = value_native[iy, ix]

if value > 0.0:
k1_y = k0_y + ip_y_offset
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,92 @@ def test__psf_precision_operator_sparse_from():
assert psf_weighted_noise_lengths == pytest.approx(np.array([4, 3, 2, 1]), 1.0e-4)


def test__psf_precision_operator_sparse_from__edge_pixels():
# Regression test: every slim pixel sits at a corner of the 4x4 noise map,
# so the kernel walk in psf_precision_value_from indexes off the array.
# numba.jit() does not bounds-check, so without the explicit guard added
# in the function those reads return uninitialized memory and produce
# astronomically large or non-finite operator entries.
noise_map = np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
kernel = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 1.0]])
native_index_for_slim_index = np.array([[0, 0], [0, 3], [3, 0], [3, 3]])

(
op,
indexes,
lengths,
) = aa.util.inversion_imaging_numba.psf_precision_operator_sparse_from(
noise_map_native=noise_map,
kernel_native=kernel,
native_index_for_slim_index=native_index_for_slim_index,
)

# Sanity: no inf/nan in the operator.
assert np.isfinite(op).all()
assert int(lengths.sum()) == op.shape[0]

# Independent reference: a pure-numpy bounds-checked re-implementation of
# psf_precision_value_from. The numba version with the fix applied must
# match this byte-for-byte.
def _reference_value(ip0_y, ip0_x, ip1_y, ip1_x):
h, w = noise_map.shape
kh, kw = kernel.shape
kernel_shift_y = -(kw // 2)
kernel_shift_x = -(kh // 2)
ip_y_offset = ip0_y - ip1_y
ip_x_offset = ip0_x - ip1_x
if (
ip_y_offset < 2 * kernel_shift_y
or ip_y_offset > -2 * kernel_shift_y
or ip_x_offset < 2 * kernel_shift_x
or ip_x_offset > -2 * kernel_shift_x
):
return 0.0
total = 0.0
for k0_y in range(kh):
for k0_x in range(kw):
iy = ip0_y + k0_y + kernel_shift_y
ix = ip0_x + k0_x + kernel_shift_x
if iy < 0 or iy >= h or ix < 0 or ix >= w:
continue
v = noise_map[iy, ix]
if v > 0.0:
k1_y = k0_y + ip_y_offset
k1_x = k0_x + ip_x_offset
if 0 <= k1_y < kh and 0 <= k1_x < kw:
total += kernel[k0_y, k0_x] * kernel[k1_y, k1_x] / v ** 2
return total

n_pix = native_index_for_slim_index.shape[0]
expected = []
expected_indexes = []
expected_lengths = []
for ip0 in range(n_pix):
ip0_y, ip0_x = native_index_for_slim_index[ip0]
count = 0
for ip1 in range(ip0, n_pix):
ip1_y, ip1_x = native_index_for_slim_index[ip1]
v = _reference_value(ip0_y, ip0_x, ip1_y, ip1_x)
if ip0 == ip1:
v /= 2.0
if v > 0.0:
expected.append(v)
expected_indexes.append(ip1)
count += 1
expected_lengths.append(count)

assert op == pytest.approx(np.array(expected), 1.0e-4)
assert indexes == pytest.approx(np.array(expected_indexes), 1.0e-4)
assert lengths == pytest.approx(np.array(expected_lengths), 1.0e-4)


def test__data_vector_via_blurred_mapping_matrix_from():
blurred_mapping_matrix = np.array(
[
Expand Down
Loading