-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDEMpapers.bib
188 lines (165 loc) · 8.08 KB
/
DEMpapers.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
% This file was created with JabRef 2.8.1.
% Encoding: ISO8859_1
@PHDTHESIS{Beal2003,
author = {Matthew J. Beal},
title = {Variational algorithms for approximate {B}ayesian inference},
school = {University of London},
year = {2003},
file = {Beal2003.pdf:Beal2003.pdf:PDF},
owner = {sbitzer},
timestamp = {2012.11.18}
}
@INCOLLECTION{Doucet2011,
author = {Doucet, Arnaud and Johansen, Adam M.},
title = {A Tutorial on Particle Filtering and Smoothing: Fifteen years later},
booktitle = {Oxford Handbook of Nonlinear Filtering},
publisher = {Oxford University Press},
year = {2011},
abstract = {Optimal estimation problems for non-linear non-Gaussian state-space
models do not typically admit analytic solutions. Since their introduction
in 1993, particle filtering methods have become a very popular class
of algorithms to solve these estimation problems numerically in an
online manner, i.e. recursively as observations become available,
and are now routinely used in fields as diverse as computer vision,
econometrics, robotics and navigation. The objective of this tutorial
is to provide a complete, up-to-date survey of this field as of 2008.
Basic and advanced particle methods for filtering as well as smoothing
are presented.},
file = {Doucet2011.pdf:Doucet2011.pdf:PDF},
owner = {bitzer},
timestamp = {2012.07.03}
}
@ARTICLE{Friston2008,
author = {Karl Friston},
title = {Hierarchical models in the brain.},
journal = {PLoS Comput Biol},
year = {2008},
volume = {4},
pages = {e1000211},
number = {11},
month = {Nov},
abstract = {This paper describes a general model that subsumes many parametric
models for continuous data. The model comprises hidden layers of
state-space or dynamic causal models, arranged so that the output
of one provides input to another. The ensuing hierarchy furnishes
a model for many types of data, of arbitrary complexity. Special
cases range from the general linear model for static data to generalised
convolution models, with system noise, for nonlinear time-series
analysis. Crucially, all of these models can be inverted using exactly
the same scheme, namely, dynamic expectation maximization. This means
that a single model and optimisation scheme can be used to invert
a wide range of models. We present the model and a brief review of
its inversion to disclose the relationships among, apparently, diverse
generative models of empirical data. We then show that this inversion
can be formulated as a simple neural network and may provide a useful
metaphor for inference and learning in the brain.},
doi = {10.1371/journal.pcbi.1000211},
file = {Friston2008.pdf:Friston2008.pdf:PDF},
institution = {The Wellcome Trust Centre of Neuroimaging, University College London,
London, United Kingdom. [email protected]},
keywords = {Algorithms; Animals; Brain, anatomy /&/ histology/physiology; Humans;
Linear Models; Mental Processes, physiology; Models, Neurological;
Nerve Net, anatomy /&/ histology/physiology; Neural Networks (Computer);
Nonlinear Dynamics; Probability},
language = {eng},
medline-pst = {ppublish},
owner = {bitzer},
pmid = {18989391},
timestamp = {2010.04.12},
url = {http://dx.doi.org/10.1371/journal.pcbi.1000211}
}
@ARTICLE{Friston2008a,
author = {K.J. Friston and N. Trujillo-Barreto and J. Daunizeau},
title = {{DEM}: A variational treatment of dynamic systems},
journal = {NeuroImage},
year = {2008},
volume = {41},
pages = {849 - 885},
number = {3},
abstract = {This paper presents a variational treatment of dynamic models that
furnishes time-dependent conditional densities on the path or trajectory
of a system's states and the time-independent densities of its parameters.
These are obtained by maximising a variational action with respect
to conditional densities, under a fixed-form assumption about their
form. The action or path-integral of free-energy represents a lower
bound on the model's log-evidence or marginal likelihood required
for model selection and averaging. This approach rests on formulating
the optimisation dynamically, in generalised coordinates of motion.
The resulting scheme can be used for online Bayesian inversion of
nonlinear dynamic causal models and is shown to outperform existing
approaches, such as Kalman and particle filtering. Furthermore, it
provides for dual and triple inferences on a system's states, parameters
and hyperparameters using exactly the same principles. We refer to
this approach as dynamic expectation maximisation (DEM).},
doi = {10.1016/j.neuroimage.2008.02.054},
file = {Friston2008a.pdf:Friston2008a.pdf:PDF},
issn = {1053-8119},
keywords = {Variational Bayes; Free energy; Action; Dynamic expectation maximisation;
Dynamical systems; Nonlinear; Bayesian filtering; Variational filtering},
owner = {bitzer},
timestamp = {2010.04.12},
url = {http://www.sciencedirect.com/science/article/B6WNP-4S19RH8-1/2/48c1aa6f77adaeeaba06cd57573a986d}
}
@BOOK{Murphy2012,
title = {Machine Learning: A Probabilistic Perspective},
publisher = {MIT Press},
year = {2012},
author = {Kevin P. Murphy},
series = {Adaptive Computation and Machine Learning},
abstract = {My book (MLaPP) is similar to Bishop's Pattern recognition and machine
learning, Hastie et al's The Elements of Statistical Learning, and
to Wasserman's All of statistics, with the following key differences:
MLaPP is more accessible to undergrads. It pre-supposes a background
in probability, linear algebra, calculus, and programming; however,
the mathematical level ramps up slowly, with more difficult sections
clearly denoted as such. This makes the book suitable for both undergrads
and grads. Summaries of the relevant mathematical background, on
topics such as linear algebra, optimization and classical statistics
make the book self-contained.
MLaPP is more practically-oriented. In particular, it comes with
Matlab software to reproduce almost every figure, and to implement
almost every algorithm, discussed in the book. It includes many worked
examples of the methods applied to real data, with readable source
code online.
MLaPP covers various important topics that are not discussed in these
other books, such as conditional random fields, deep learning, etc.
MLaPP is "more Bayesian" than the Hastie or Wasserman books, but
"more frequentist" than the Bishop book. In particular, in MLaPP,
we make extensive use of MAP estimation, which we regard as "poor
man's Bayes". We prefer this to the regularization interpretation
of MAP, because then all the methods in the book (except cross validation...)
can be viewed as probabilistic inference, or some approximation thereof.
The MAP interpretation also allows for an easy "upgrade path" to
more accurate methods of approximate Bayesian inference, such as
empirical Bayes, variational Bayes, MCMC, SMC, etc.
The emphasis is on simple parametric models (linear and logistic
regression, discriminant analysis/ naive Bayes, mixture models, factor
analysis, graphical models, etc.), which are the ones most often
used in practice. However, we also briefly discuss non-parametric
models, such as Gaussian processes, Dirichlet processes, SVMs, RVMs,
etc.},
owner = {bitzer},
timestamp = {2012.11.19},
url = {http://www.cs.ubc.ca/~murphyk/MLbook/index.html}
}
@TECHREPORT{Petersen2008,
author = {K. B. Petersen and M. S. Pedersen},
title = {The Matrix Cookbook},
institution = {Technical University of Denmark},
year = {2008},
month = {oct},
note = {Version 20081110},
abstract = {Matrix identities, relations and approximations. A desktop reference
for quick overview of mathematics of matrices.},
file = {Petersen2008.pdf:Petersen2008.pdf:PDF},
keywords = {Matrix identity, matrix relations, inverse, matrix derivative},
owner = {bitzer},
publisher = {Technical University of Denmark},
timestamp = {2010.07.30},
url = {http://www2.imm.dtu.dk/pubdb/p.php?3274}
}
@comment{jabref-meta: selector_review:}
@comment{jabref-meta: selector_publisher:}
@comment{jabref-meta: selector_author:}
@comment{jabref-meta: selector_journal:}
@comment{jabref-meta: selector_keywords:}