Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
71d0d3a
Allow for mu rescaling when simulating from likelihood.
robertsjames Apr 9, 2024
f4c6726
Towards passing in likelihood directly in non-asymptotic inference ro…
robertsjames Apr 9, 2024
41a8f06
change set_data to handle new method
Apr 10, 2024
632845c
Remove sources not used in a given fit from likelihood. NOTE: non_asy…
robertsjames Apr 10, 2024
46d2d99
Hessian back on.
robertsjames Apr 11, 2024
179d0ee
Handle setting data in batch routines for combined likelihood.
robertsjames Apr 11, 2024
9de483b
Add asymptotic p-value calculation.
robertsjames Apr 17, 2024
32247b6
Fix constraint batching issue.
robertsjames Apr 18, 2024
0ebe58d
Extract discovery significance.
robertsjames May 9, 2024
73f83ab
Extra TS evaluation for expexted discovery significance plot.
robertsjames May 14, 2024
b41c382
Add temporary interpolator fix...
robertsjames May 15, 2024
c0dca87
Asymptotic calculation of median discovery potential curve.
robertsjames May 20, 2024
ba5d9f3
Fix, I think, for memory leak...
robertsjames May 24, 2024
e5039f8
Bug fix.
robertsjames May 24, 2024
196ac25
Old (linear) version of 1D temlate morphing.
robertsjames May 27, 2024
a7e18cb
Fixes to morphing implementation (simple).
robertsjames May 28, 2024
2c963e0
Fix batched_differential_rate with morphing source.
robertsjames May 29, 2024
e01fe36
Fix memory leak fix... Data tensor was not updating before.
robertsjames May 31, 2024
7a4b108
Add in normalisation variation with morphing (Josh).
robertsjames May 31, 2024
bcb9556
Cubic spline template morphing: very hacky, for now (Josh).
robertsjames May 31, 2024
d03376e
Fix for batch size > 1 in cubic spline morphing.
robertsjames Jun 3, 2024
0676116
Undo temporary change from before.
robertsjames Jun 3, 2024
793d26a
Fix to not setting non-rate NPs to conditional BF values in toys.
robertsjames Jun 5, 2024
64896d7
Fix for disco sig < 0.
robertsjames Jun 9, 2024
eeb68cd
Fix to earlier commit (non-rate NPs) not working when in sensitivity …
robertsjames Jun 17, 2024
94d66ab
Update band calculation routine: handle failed toys.
robertsjames Jun 18, 2024
9d35ec6
Return UL without PCL, also.
robertsjames Jul 22, 2024
72df5b7
Fix long-standing bug with indexing constraint centers in toys.
robertsjames Oct 21, 2024
0ed3bb1
Bug fix with template morphing: tracking of mu variation.
robertsjames Mar 17, 2025
4017cc9
allow_failure on by default for fits.
robertsjames May 7, 2025
27b5e9a
Handle case where toys end up with empty dataset(s): skip that toy.
robertsjames May 8, 2025
583e821
Fix to anchor padding for B-spline interpolation (template morphing):…
robertsjames May 30, 2025
fd8d326
Whoops, that last commit wasn't catching every case.
robertsjames Jun 2, 2025
044cb16
Generalise morphing when the ptensor has other elements.
robertsjames Jul 8, 2025
c92b18e
Adding in sampling of non-rate parameter constraint centers (Wei).
robertsjames Jul 11, 2025
d5ba074
Non-asymptotic inference updates: better handling of empty dataset; r…
robertsjames Jul 13, 2025
4af5d3b
Fixes: wasn't correctly sampling constraint centres for non-rate para…
robertsjames Jul 16, 2025
3881977
For now, handle combined_rate_scaling differently in toys (throw when…
robertsjames Jul 16, 2025
2895108
Okay, undo the earlier change to the guesses.
robertsjames Jul 16, 2025
3ece905
Flag to control when to handle combined_rate_scaling differently.
robertsjames Jul 31, 2025
954e58b
Don't save fit results as tensors.
robertsjames Oct 3, 2025
2e74990
Option to return the toys (toy indices) for the toys setting ULs clos…
robertsjames Oct 6, 2025
f184931
Was saving the wrong thing when saving toy fit results, for the backg…
robertsjames Oct 8, 2025
89737c8
Feat: allows evaluation of expected median discovery significance
josh0-jrg Nov 20, 2025
e4cc688
fix: get_median_disc_asymptotic to pass sigma_level
seriksen Jul 1, 2025
1db1cc3
debug: adding print statements whilst debugging
seriksen Jul 1, 2025
40bf3d7
chore: remove debug prints from non_asymptotic_inference.py
seriksen Nov 25, 2025
8e46e7d
Fix: return zero gradients instead of none on rate multipliers
josh0-jrg Dec 4, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions flamedisx/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,6 @@
# Custom TFP files
# Access through fd.tfp_files.xxx
from . import tfp_files

# TEMPORARY,I HOPE
from . import tfbspline
7 changes: 5 additions & 2 deletions flamedisx/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,10 +176,13 @@ def _dict_to_array(self, x: dict) -> np.array:

def _array_to_dict(self, x: ty.Union[np.ndarray, tf.Tensor]) -> dict:
"""Convert from array/tensor to {parameter: value} dictionary"""
x = tf.cast(x, fd.float_type())
assert isinstance(x, (np.ndarray, tf.Tensor))
assert len(x) == len(self.arg_names)
return {k: x[i]
for i, k in enumerate(self.arg_names)}
param_dict = dict()
for i, k in enumerate(self.arg_names):
param_dict[k] = tf.gather(x, i)
return param_dict

def normalize(self,
x: ty.Union[dict, np.ndarray],
Expand Down
51 changes: 30 additions & 21 deletions flamedisx/likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,8 @@ def set_data(self,
UserWarning)
for s in self.sources.values():
s.set_data(None)
s.n_batches = 0
self.batch_info = None
return

batch_info = np.zeros((len(self.dsetnames), 3), dtype=int)
Expand Down Expand Up @@ -340,7 +342,8 @@ def set_data(self,
np.concatenate([[0], stop_idx[:-1]]),
stop_idx])

def simulate(self, fix_truth=None, **params):
def simulate(self, fix_truth=None, alter_source_mus=False,
**params):
"""Simulate events from sources.
"""
params = self.prepare_params(params, free_all_rates=True)
Expand All @@ -352,6 +355,8 @@ def simulate(self, fix_truth=None, **params):
rm = self._get_rate_mult(sname, params)
mu = rm * s.mu_before_efficiencies(
**self._filter_source_kwargs(params, sname))
if alter_source_mus:
mu *= self.mu_estimators[sname](**self._filter_source_kwargs(params, sname))
# Simulate this many events from source
n_to_sim = np.random.poisson(mu)
if n_to_sim == 0:
Expand Down Expand Up @@ -380,9 +385,6 @@ def log_likelihood(self, second_order=False,
omit_grads=tuple(), **kwargs):
params = self.prepare_params(kwargs)
n_grads = len(self.param_defaults) - len(omit_grads)
ll = 0.
llgrad = np.zeros(n_grads, dtype=np.float64)
llgrad2 = np.zeros((n_grads, n_grads), dtype=np.float64)

for dsetname in self.dsetnames:
# Getting this from the batch_info tensor is much slower
Expand All @@ -395,14 +397,18 @@ def log_likelihood(self, second_order=False,
else:
empty_batch = False

ll = {i_batch: 0. for i_batch in range(n_batches)}
llgrad = np.zeros(n_grads, dtype=np.float64)
llgrad2 = np.zeros((n_grads, n_grads), dtype=np.float64)

for i_batch in range(n_batches):
# Iterating over tf.range seems much slower!
if empty_batch:
batch_data_tensor = None
else:
batch_data_tensor = self.data_tensors[dsetname][i_batch]
batch_data_tensor = tf.gather(self.data_tensors[dsetname], i_batch)
results = self._log_likelihood(
tf.constant(i_batch, dtype=fd.int_type()),
i_batch,
dsetname=dsetname,
data_tensor=batch_data_tensor,
batch_info=self.batch_info,
Expand All @@ -411,18 +417,18 @@ def log_likelihood(self, second_order=False,
empty_batch=empty_batch,
constraint_extra_args=self.constraint_extra_args,
**params)
ll += results[0].numpy().astype(np.float64)
ll[i_batch] = results[0]

if self.param_names:
if results[1] is None:
raise ValueError("TensorFlow returned None as gradient!")
llgrad += results[1].numpy().astype(np.float64)
llgrad += results[1]
if second_order:
llgrad2 += results[2].numpy().astype(np.float64)
llgrad2 += results[2]

if second_order:
return ll, llgrad, llgrad2
return ll, llgrad, None
return np.sum(list(ll.values())), llgrad, llgrad2
return np.sum(list(ll.values())), llgrad, None

def minus2_ll(self, *, omit_grads=tuple(), **kwargs):
result = self.log_likelihood(omit_grads=omit_grads, **kwargs)
Expand All @@ -431,11 +437,6 @@ def minus2_ll(self, *, omit_grads=tuple(), **kwargs):
return -2 * ll, -2 * grad, hess

def prepare_params(self, kwargs, free_all_rates=False):
for k in kwargs:
if k not in self.param_defaults:
if k.endswith('_rate_multiplier') and free_all_rates:
continue
raise ValueError(f"Unknown parameter {k}")
return {**self.param_defaults, **fd.values_to_constants(kwargs)}

def _get_rate_mult(self, sname, kwargs):
Expand Down Expand Up @@ -468,7 +469,9 @@ def mu(self, *,
:param dataset_name: ... for just this dataset
:param source_name: ... for just this source.
You must provide either dsetname or source, since it makes no sense to
add events from multiple datasets
add events from multiple datasets.
For rate multipliers (always linear) add a 0 x r.m**2 term to give a 0
hessian instead of None.
"""
kwargs = {**self.param_defaults, **kwargs}
if dataset_name is None and source_name is None:
Expand All @@ -481,8 +484,10 @@ def mu(self, *,
if source_name is not None and sname != source_name:
continue
filtered_params = self._filter_source_kwargs(kwargs, sname)
mu += (self._get_rate_mult(sname, kwargs)
* self.mu_estimators[sname](**filtered_params))
_rate_multiplier = self._get_rate_mult(sname, kwargs)
mu += (_rate_multiplier
* self.mu_estimators[sname](**filtered_params)
+ tf.constant(0.,fd.float_type())*_rate_multiplier**2)
return mu

@tf.function
Expand Down Expand Up @@ -521,10 +526,14 @@ def _log_likelihood(self,
0.)
if dsetname == self.dsetnames[0]:
if constraint_extra_args is None:
ll += self.log_constraint(**params_unstacked)
ll += tf.where(tf.equal(i_batch, tf.constant(0, dtype=fd.int_type())),
self.log_constraint(**params_unstacked),
0.)
else:
kwargs = {**params_unstacked, **constraint_extra_args}
ll += self.log_constraint(**kwargs)
ll += tf.where(tf.equal(i_batch, tf.constant(0, dtype=fd.int_type())),
self.log_constraint(**kwargs),
0.)

# Autodifferentiation. This is why we use tensorflow:
grad = tf.gradients(ll, grad_par_stack)[0]
Expand Down
Loading
Loading