@incollection{yang_commutative_2017,
address = {Berlin, Heidelberg},
title = {Commutative {Semantics} for {Probabilistic} {Programming}},
volume = {10201},
isbn = {978-3-662-54433-4 978-3-662-54434-1},
url = {http://link.springer.com/10.1007/978-3-662-54434-1_32},
abstract = {We show that a measure-based denotational semantics for probabilistic programming is commutative. The idea underlying probabilistic programming languages (Anglican, Church, Hakaru, ...) is that programs express statistical models as a combination of prior distributions and likelihood of observations. The product of prior and likelihood is an unnormalized posterior distribution, and the inference problem is to ﬁnd the normalizing constant. One common semantic perspective is thus that a probabilistic program is understood as an unnormalized posterior measure, in the sense of measure theory, and the normalizing constant is the measure of the entire semantic domain.},
language = {en},
urldate = {2019-11-23},
booktitle = {Programming {Languages} and {Systems}},
publisher = {Springer Berlin Heidelberg},
author = {Staton, Sam},
editor = {Yang, Hongseok},
year = {2017},
doi = {10.1007/978-3-662-54434-1_32},
note = {ZSCC: NoCitationData[s0] },
keywords = {Bayesianism, Probabilistic programming, Programming language theory, Semantics},
pages = {855--879}
}
@incollection{poggio_tomaso_2013,
title = {Tomaso {A}. {Poggio} autobiography},
url = {http://poggio-lab.mit.edu/sites/default/files/cv/tomasopoggio.pdf},
author = {Poggio, Tomaso},
year = {2013},
note = {ZSCC: NoCitationData[s0]},
keywords = {Classical ML, Compendium, Machine learning},
pages = {54}
}
@incollection{horimoto_neural_2008,
address = {Berlin, Heidelberg},
title = {Neural {Algebra} and {Consciousness}: {A} {Theory} of {Structural} {Functionality} in {Neural} {Nets}},
volume = {5147},
isbn = {978-3-540-85100-4 978-3-540-85101-1},
shorttitle = {Neural {Algebra} and {Consciousness}},
url = {http://link.springer.com/10.1007/978-3-540-85101-1_8},
abstract = {Thoughts are spatio-temporal patterns of coalitions of ﬁring neurons and their interconnections. Neural algebras represent these patterns as formal algebraic objects, and a suitable composition operation reﬂects their interaction. Thus, a neural algebra is associated with any neural net. The present paper presents this formalization and develops the basic algebraic tools for formulating and solving the problem of ﬁnding the neural correlates of concepts such as reﬂection, association, coordination, etc. The main application is to the notion of consciousness, whose structural and functional basis is made explicit as the emergence of a set of solutions to a ﬁxpoint equation.},
language = {en},
urldate = {2019-11-22},
booktitle = {Algebraic {Biology}},
publisher = {Springer Berlin Heidelberg},
author = {Engeler, Erwin},
editor = {Horimoto, Katsuhisa and Regensburger, Georg and Rosenkranz, Markus and Yoshida, Hiroshi},
year = {2008},
doi = {10.1007/978-3-540-85101-1_8},
note = {ZSCC: NoCitationData[s0] },
keywords = {Emergence, Neuroscience, Sketchy},
pages = {96--109}
}
@incollection{goos_probabilistic_2004,
address = {Berlin, Heidelberg},
title = {Probabilistic {Automata}: {System} {Types}, {Parallel} {Composition} and {Comparison}},
volume = {2925},
isbn = {978-3-540-22265-1 978-3-540-24611-4},
shorttitle = {Probabilistic {Automata}},
url = {http://link.springer.com/10.1007/978-3-540-24611-4_1},
abstract = {We survey various notions of probabilistic automata and probabilistic bisimulation, accumulating in an expressiveness hierarchy of probabilistic system types. The aim of this paper is twofold: On the one hand it provides an overview of existing types of probabilistic systems and, on the other hand, it explains the relationship between these models. We overview probabilistic systems with discrete probabilities only. The expressiveness order used to built the hierarchy is deﬁned via the existence of mappings between the corresponding system types that preserve and reﬂect bisimilarity. Additionally, we discuss parallel composition for the presented types of systems, augmenting the map of probabilistic automata with closedness under this compositional operator.},
language = {en},
urldate = {2019-11-28},
booktitle = {Validation of {Stochastic} {Systems}},
publisher = {Springer Berlin Heidelberg},
author = {Sokolova, Ana and de Vink, Erik P.},
editor = {Goos, Gerhard and Hartmanis, Juris and van Leeuwen, Jan and Baier, Christel and Haverkort, Boudewijn R. and Hermanns, Holger and Katoen, Joost-Pieter and Siegle, Markus},
year = {2004},
doi = {10.1007/978-3-540-24611-4_1},
note = {ZSCC: NoCitationData[s1] },
keywords = {Coalgebras, Probabilistic transition systems, Transition systems},
pages = {1--43}
}
@incollection{wermuth_graphical_2001,
address = {Oxford},
title = {Graphical {Models}: {Overview}},
isbn = {978-0-08-043076-8},
shorttitle = {Graphical {Models}},
url = {http://www.sciencedirect.com/science/article/pii/B008043076700440X},
abstract = {Graphical Markov models provide a method of representing possibly complicated multivariate dependencies in such a way that the general qualitative features can be understood, that statistical independencies are highlighted, and that some properties can be derived directly. Variables are represented by the nodes of a graph. Pairs of nodes may be joined by an edge. Edges are directed if one variable is a response to the other variable considered as explanatory, but are undirected if the variables are on an equal footing. Absence of an edge typically implies statistical independence, conditional, or marginal depending on the kind of graph. The need for a number of types of graph arises because it is helpful to represent a number of different kinds of dependence structures. Of special importance are chain graphs in which variables are arranged in a sequence or chain of blocks, the variables in any one block being on an equal footing, some being possibly joint responses to variables in the past and some being jointly explanatory to variables in the future of the block considered. Some main properties of such systems are outlined, and recent research results are sketched. Suggestions for further reading are given. As an illustrative example, some analysis of data on the treatment of chronic pain is presented.},
language = {en},
urldate = {2019-11-22},
booktitle = {International {Encyclopedia} of the {Social} \& {Behavioral} {Sciences}},
publisher = {Pergamon},
author = {Wermuth, N. and Cox, D. R.},
editor = {Smelser, Neil J. and Baltes, Paul B.},
month = jan,
year = {2001},
doi = {10.1016/B0-08-043076-7/00440-X},
note = {ZSCC: NoCitationData[s0] },
keywords = {Bayesianism, Classical ML, Machine learning},
pages = {6379--6386}
}
@incollection{lawvere_tools_1994,
title = {Tools for the {Advancement} of {Objective} {Logic}: {Closed} {Categories} and {Toposes}},
shorttitle = {Tools for the {Advancement} of {Objective} {Logic}},
booktitle = {The {Logical} {Foundations} of {Cognition}},
publisher = {Oxford University Press USA},
author = {Lawvere, F. William},
editor = {Macnamara, John and Reyes, Gonzalo E.},
year = {1994},
note = {ZSCC: NoCitationData[s0]},
keywords = {Compendium, Emergence, Psychology, Sketchy},
pages = {43--56}
}