@inproceedings{fages_machine_2006,
address = {Berlin, Heidelberg},
series = {Lecture {Notes} in {Computer} {Science}},
title = {Machine {Learning} {Biochemical} {Networks} from {Temporal} {Logic} {Properties}},
isbn = {978-3-540-46236-1},
doi = {10/dd8},
abstract = {One central issue in systems biology is the definition of formal languages for describing complex biochemical systems and their behavior at different levels. The biochemical abstract machine BIOCHAM is based on two formal languages, one rule-based language used for modeling biochemical networks, at three abstraction levels corresponding to three semantics: boolean, concentration and population; and one temporal logic language used for formalizing the biological properties of the system. In this paper, we show how the temporal logic language can be turned into a specification language. We describe two algorithms for inferring reaction rules and kinetic parameter values from a temporal specification formalizing the biological data. Then, with an example of the cell cycle control, we illustrate how these machine learning techniques may be useful to the modeler.},
language = {en},
booktitle = {Transactions on {Computational} {Systems} {Biology} {VI}},
publisher = {Springer},
author = {Fages, François and Calzone, Laurence and Chabrier-Rivier, Nathalie and Soliman, Sylvain},
editor = {Priami, Corrado and Plotkin, Gordon},
year = {2006},
note = {ZSCC: NoCitationData[s0]},
keywords = {Abstract machines, Biology, Classical ML, Machine learning, Symbolic logic, Systems biology},
pages = {68--94}
}
@inproceedings{healy_neural_2004,
title = {Neural {Networks}, {Knowledge} and {Cognition}: {A} {Mathematical} {Semantic} {Model} {Based} upon {Category} {Theory}},
shorttitle = {Neural {Networks}, {Knowledge} and {Cognition}},
abstract = {Category theory can be applied to mathematically model the semantics of cognitive neural systems. We discuss semantics as a hierarchy of concepts, or symbolic descriptions of items sensed and represented in the connection weights distributed throughout a neural network. The hierarchy expresses subconcept relationships, and in a neural network it becomes represented incrementally through a Hebbian-like learning process. The categorical semantic model described here explains the learning process as the derivation of colimits and limits in a concept category. It explains the representation of the concept hierarchy in a neural network at each stage of learning as a system of functors and natural transformations, expressing knowledge coherence across the regions of a multi-regional network equipped with multiple sensors. The model yields design principles that constrain neural network designs capable of the most important aspects of cognitive behavior.},
author = {Healy, Michael J. and Caudell, Thomas P.},
year = {2004}
}
@inproceedings{izbicki_algebraic_2013,
title = {Algebraic classifiers: a generic approach to fast cross-validation, online training, and parallel training},
shorttitle = {Algebraic classifiers},
abstract = {We use abstract algebra to derive new algorithms for fast cross-validation, online learning, and parallel learning. To use these algorithms on a classification model, we must show that the model has appropriate algebraic structure. It is easy to give algebraic structure to some models, and we do this explicitly for Bayesian classifiers and a novel variation of decision stumps called HomStumps. But not all classifiers have an obvious structure, so we introduce the Free HomTrainer. This can be used to give a "generic" algebraic structure to any classifier. We use the Free HomTrainer to give algebraic structure to bagging and boosting. In so doing, we derive novel online and parallel algorithms, and present the first fast cross-validation schemes for these classifiers.},
booktitle = {{ICML}},
author = {Izbicki, Michael},
year = {2013},
note = {ZSCC: 0000013},
keywords = {Algebra, Categorical ML, Machine learning}
}
@inproceedings{sprunger_differentiable_2019,
address = {Vancouver, BC, Canada},
title = {Differentiable {Causal} {Computations} via {Delayed} {Trace}},
isbn = {978-1-72813-608-0},
url = {https://ieeexplore.ieee.org/document/8785670/},
doi = {10/ggdf98},
abstract = {We investigate causal computations taking sequences of inputs to sequences of outputs where the nth output depends on the ﬁrst n inputs only. We model these in category theory via a construction taking a Cartesian category C to another category St(C) with a novel trace-like operation called “delayed trace”, which misses yanking and dinaturality axioms of the usual trace. The delayed trace operation provides a feedback mechanism in St(C) with an implicit guardedness guarantee.},
language = {en},
urldate = {2019-11-23},
booktitle = {2019 34th {Annual} {ACM}/{IEEE} {Symposium} on {Logic} in {Computer} {Science} ({LICS})},
publisher = {IEEE},
author = {Sprunger, David and Katsumata, Shin-ya},
month = jun,
year = {2019},
note = {ZSCC: 0000002},
keywords = {Categorical ML, Differentiation},
pages = {1--12}
}