@inproceedings{fages_machine_2006,
address = {Berlin, Heidelberg},
series = {Lecture {Notes} in {Computer} {Science}},
title = {Machine {Learning} {Biochemical} {Networks} from {Temporal} {Logic} {Properties}},
isbn = {978-3-540-46236-1},
doi = {10/dd8},
abstract = {One central issue in systems biology is the definition of formal languages for describing complex biochemical systems and their behavior at different levels. The biochemical abstract machine BIOCHAM is based on two formal languages, one rule-based language used for modeling biochemical networks, at three abstraction levels corresponding to three semantics: boolean, concentration and population; and one temporal logic language used for formalizing the biological properties of the system. In this paper, we show how the temporal logic language can be turned into a specification language. We describe two algorithms for inferring reaction rules and kinetic parameter values from a temporal specification formalizing the biological data. Then, with an example of the cell cycle control, we illustrate how these machine learning techniques may be useful to the modeler.},
language = {en},
booktitle = {Transactions on {Computational} {Systems} {Biology} {VI}},
publisher = {Springer},
author = {Fages, François and Calzone, Laurence and Chabrier-Rivier, Nathalie and Soliman, Sylvain},
editor = {Priami, Corrado and Plotkin, Gordon},
year = {2006},
note = {ZSCC: NoCitationData[s0]},
keywords = {Abstract machines, Biology, Classical ML, Machine learning, Symbolic logic, Systems biology},
pages = {68--94}
}
@inproceedings{healy_neural_2004,
title = {Neural {Networks}, {Knowledge} and {Cognition}: {A} {Mathematical} {Semantic} {Model} {Based} upon {Category} {Theory}},
shorttitle = {Neural {Networks}, {Knowledge} and {Cognition}},
abstract = {Category theory can be applied to mathematically model the semantics of cognitive neural systems. We discuss semantics as a hierarchy of concepts, or symbolic descriptions of items sensed and represented in the connection weights distributed throughout a neural network. The hierarchy expresses subconcept relationships, and in a neural network it becomes represented incrementally through a Hebbian-like learning process. The categorical semantic model described here explains the learning process as the derivation of colimits and limits in a concept category. It explains the representation of the concept hierarchy in a neural network at each stage of learning as a system of functors and natural transformations, expressing knowledge coherence across the regions of a multi-regional network equipped with multiple sensors. The model yields design principles that constrain neural network designs capable of the most important aspects of cognitive behavior.},
author = {Healy, Michael J. and Caudell, Thomas P.},
year = {2004}
}
@inproceedings{izbicki_algebraic_2013,
title = {Algebraic classifiers: a generic approach to fast cross-validation, online training, and parallel training},
shorttitle = {Algebraic classifiers},
abstract = {We use abstract algebra to derive new algorithms for fast cross-validation, online learning, and parallel learning. To use these algorithms on a classification model, we must show that the model has appropriate algebraic structure. It is easy to give algebraic structure to some models, and we do this explicitly for Bayesian classifiers and a novel variation of decision stumps called HomStumps. But not all classifiers have an obvious structure, so we introduce the Free HomTrainer. This can be used to give a "generic" algebraic structure to any classifier. We use the Free HomTrainer to give algebraic structure to bagging and boosting. In so doing, we derive novel online and parallel algorithms, and present the first fast cross-validation schemes for these classifiers.},
booktitle = {{ICML}},
author = {Izbicki, Michael},
year = {2013},
note = {ZSCC: 0000013},
keywords = {Algebra, Categorical ML, Machine learning}
}
@article{philipona_is_2003,
title = {Is {There} {Something} {Out} {There}? {Inferring} {Space} from {Sensorimotor} {Dependencies}},
volume = {15},
shorttitle = {Is {There} {Something} {Out} {There}?},
doi = {10/frg7gs},
abstract = {This letter suggests that in biological organisms, the perceived structure of reality, in particular the notions of body, environment, space, object, and attribute, could be a consequence of an effort on the part of brains to account for the dependency between their inputs and their outputs in terms of a small number of parameters. To validate this idea, a procedure is demonstrated whereby the brain of a (simulated) organism with arbitrary input and output connectivity can deduce the dimensionality of the rigid group of the space underlying its input-output relationship, that is, the dimension of what the organism will call physical space.},
journal = {Neural computation},
author = {Philipona, David and O’Regan, J. and Nadal, Jean-Pierre},
month = oct,
year = {2003},
note = {ZSCC: 0000225},
keywords = {Algebra, Neuroscience},
pages = {2029--49}
}
@book{winn_model-based_2019,
title = {Model-{Based} {Machine} {Learning}},
isbn = {978-1-4987-5681-5},
abstract = {This book is unusual for a machine learning text book in that the authors do not review dozens of different algorithms. Instead they introduce all of the key ideas through a series of case studies involving real-world applications. Case studies play a central role because it is only in the context of applications that it makes sense to discuss modelling assumptions. Each chapter therefore introduces one case study which is drawn from a real-world application that has been solved using a model-based approach.},
language = {en},
publisher = {Taylor \& Francis Incorporated},
author = {Winn, John Michael},
month = jun,
year = {2019},
note = {ZSCC: NoCitationData[s1]
Google-Books-ID: 84KRtgEACAAJ},
keywords = {Bayesian inference, Classical ML, Implementation}
}