@article{1171,
author = {Tkacik, Gasper},
journal = {Physics of Life Reviews},
pages = {166 -- 167},
publisher = {Elsevier},
title = {{Understanding regulatory networks requires more than computing a multitude of graph statistics: Comment on "Drivers of structural features in gene regulatory networks: From biophysical constraints to biological function" by O. C. Martin et al.}},
doi = {10.1016/j.plrev.2016.06.005},
volume = {17},
year = {2016},
}
@article{1188,
abstract = {We consider a population dynamics model coupling cell growth to a diffusion in the space of metabolic phenotypes as it can be obtained from realistic constraints-based modelling.
In the asymptotic regime of slow
diffusion, that coincides with the relevant experimental range, the resulting
non-linear Fokker–Planck equation is solved for the steady state in the WKB
approximation that maps it into the ground state of a quantum particle in an
Airy potential plus a centrifugal term. We retrieve scaling laws for growth rate
fluctuations and time response with respect to the distance from the maximum
growth rate suggesting that suboptimal populations can have a faster response
to perturbations.},
author = {De Martino, Daniele and Masoero, Davide},
journal = { Journal of Statistical Mechanics: Theory and Experiment},
number = {12},
publisher = {IOPscience},
title = {{Asymptotic analysis of noisy fitness maximization, applied to metabolism & growth}},
doi = {10.1088/1742-5468/aa4e8f},
volume = {2016},
year = {2016},
}
@article{1203,
abstract = {Haemophilus haemolyticus has been recently discovered to have the potential to cause invasive disease. It is closely related to nontypeable Haemophilus influenzae (NT H. influenzae). NT H. influenzae and H. haemolyticus are often misidentified because none of the existing tests targeting the known phenotypes of H. haemolyticus are able to specifically identify H. haemolyticus. Through comparative genomic analysis of H. haemolyticus and NT H. influenzae, we identified genes unique to H. haemolyticus that can be used as targets for the identification of H. haemolyticus. A real-time PCR targeting purT (encoding phosphoribosylglycinamide formyltransferase 2 in the purine synthesis pathway) was developed and evaluated. The lower limit of detection was 40 genomes/PCR; the sensitivity and specificity in detecting H. haemolyticus were 98.9% and 97%, respectively. To improve the discrimination of H. haemolyticus and NT H. influenzae, a testing scheme combining two targets (H. haemolyticus purT and H. influenzae hpd, encoding protein D lipoprotein) was also evaluated and showed 96.7% sensitivity and 98.2% specificity for the identification of H. haemolyticus and 92.8% sensitivity and 100% specificity for the identification of H. influenzae, respectively. The dual-target testing scheme can be used for the diagnosis and surveillance of infection and disease caused by H. haemolyticus and NT H. influenzae.},
author = {Hu, Fang and Rishishwar, Lavanya and Sivadas, Ambily and Mitchell, Gabriel and King, Jordan and Murphy, Timothy and Gilsdorf, Janet and Mayer, Leonard and Wang, Xin},
journal = {Journal of Clinical Microbiology},
number = {12},
pages = {3010 -- 3017},
publisher = {American Society for Microbiology},
title = {{Comparative genomic analysis of Haemophilus haemolyticus and nontypeable Haemophilus influenzae and a new testing scheme for their discrimination}},
doi = {10.1128/JCM.01511-16},
volume = {54},
year = {2016},
}
@inproceedings{1214,
abstract = {With the accelerated development of robot technologies, optimal control becomes one of the central themes of research. In traditional approaches, the controller, by its internal functionality, finds appropriate actions on the basis of the history of sensor values, guided by the goals, intentions, objectives, learning schemes, and so forth. While very successful with classical robots, these methods run into severe difficulties when applied to soft robots, a new field of robotics with large interest for human-robot interaction. We claim that a novel controller paradigm opens new perspective for this field. This paper applies a recently developed neuro controller with differential extrinsic synaptic plasticity to a muscle-tendon driven arm-shoulder system from the Myorobotics toolkit. In the experiments, we observe a vast variety of self-organized behavior patterns: when left alone, the arm realizes pseudo-random sequences of different poses. By applying physical forces, the system can be entrained into definite motion patterns like wiping a table. Most interestingly, after attaching an object, the controller gets in a functional resonance with the object's internal dynamics, starting to shake spontaneously bottles half-filled with water or sensitively driving an attached pendulum into a circular mode. When attached to the crank of a wheel the neural system independently develops to rotate it. In this way, the robot discovers affordances of objects its body is interacting with.},
author = {Martius, Georg S and Hostettler, Raphael and Knoll, Alois and Der, Ralf},
location = {Daejeon, Korea},
publisher = {IEEE},
title = {{Compliant control for soft robots: Emergent behavior of a tendon driven anthropomorphic arm}},
doi = {10.1109/IROS.2016.7759138},
volume = {2016-November},
year = {2016},
}
@inproceedings{1220,
abstract = {Theoretical and numerical aspects of aerodynamic efficiency of propulsion systems coupled to the boundary layer of a fuselage are studied. We discuss the effects of local flow fields, which are affected both by conservative flow acceleration as well as total pressure losses, on the efficiency of boundary layer immersed propulsion devices. We introduce the concept of a boundary layer retardation turbine that helps reduce skin friction over the fuselage. We numerically investigate efficiency gains offered by boundary layer and wake interacting devices. We discuss the results in terms of a total energy consumption framework and show that efficiency gains of any device depend on all the other elements of the propulsion system.},
author = {Mikić, Gregor and Stoll, Alex and Bevirt, Joe and Grah, Rok and Moore, Mark},
location = {Washington, D.C., USA},
pages = {1 -- 19},
publisher = {AIAA},
title = {{Fuselage boundary layer ingestion propulsion applied to a thin haul commuter aircraft for optimal efficiency}},
doi = {10.2514/6.2016-3764},
year = {2016},
}
@article{1242,
abstract = {A crucial step in the regulation of gene expression is binding of transcription factor (TF) proteins to regulatory sites along the DNA. But transcription factors act at nanomolar concentrations, and noise due to random arrival of these molecules at their binding sites can severely limit the precision of regulation. Recent work on the optimization of information flow through regulatory networks indicates that the lower end of the dynamic range of concentrations is simply inaccessible, overwhelmed by the impact of this noise. Motivated by the behavior of homeodomain proteins, such as the maternal morphogen Bicoid in the fruit fly embryo, we suggest a scheme in which transcription factors also act as indirect translational regulators, binding to the mRNA of other regulatory proteins. Intuitively, each mRNA molecule acts as an independent sensor of the input concentration, and averaging over these multiple sensors reduces the noise. We analyze information flow through this scheme and identify conditions under which it outperforms direct transcriptional regulation. Our results suggest that the dual role of homeodomain proteins is not just a historical accident, but a solution to a crucial physics problem in the regulation of gene expression.},
author = {Sokolowski, Thomas R and Walczak, Aleksandra and Bialek, William and Tkacik, Gasper},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {2},
publisher = {American Institute of Physics},
title = {{Extending the dynamic range of transcription factor action by translational regulation}},
doi = {10.1103/PhysRevE.93.022404},
volume = {93},
year = {2016},
}
@article{1244,
abstract = {Cell polarity refers to a functional spatial organization of proteins that is crucial for the control of essential cellular processes such as growth and division. To establish polarity, cells rely on elaborate regulation networks that control the distribution of proteins at the cell membrane. In fission yeast cells, a microtubule-dependent network has been identified that polarizes the distribution of signaling proteins that restricts growth to cell ends and targets the cytokinetic machinery to the middle of the cell. Although many molecular components have been shown to play a role in this network, it remains unknown which molecular functionalities are minimally required to establish a polarized protein distribution in this system. Here we show that a membrane-binding protein fragment, which distributes homogeneously in wild-type fission yeast cells, can be made to concentrate at cell ends by attaching it to a cytoplasmic microtubule end-binding protein. This concentration results in a polarized pattern of chimera proteins with a spatial extension that is very reminiscent of natural polarity patterns in fission yeast. However, chimera levels fluctuate in response to microtubule dynamics, and disruption of microtubules leads to disappearance of the pattern. Numerical simulations confirm that the combined functionality of membrane anchoring and microtubule tip affinity is in principle sufficient to create polarized patterns. Our chimera protein may thus represent a simple molecular functionality that is able to polarize the membrane, onto which additional layers of molecular complexity may be built to provide the temporal robustness that is typical of natural polarity patterns.},
author = {Recouvreux, Pierre and Sokolowski, Thomas R and Grammoustianou, Aristea and Tenwolde, Pieter and Dogterom, Marileen},
journal = {PNAS},
number = {7},
pages = {1811 -- 1816},
publisher = {National Academy of Sciences},
title = {{Chimera proteins with affinity for membranes and microtubule tips polarize in the membrane of fission yeast cells}},
doi = {10.1073/pnas.1419248113},
volume = {113},
year = {2016},
}
@article{1248,
abstract = {Life depends as much on the flow of information as on the flow of energy. Here we review the many efforts to make this intuition precise. Starting with the building blocks of information theory, we explore examples where it has been possible to measure, directly, the flow of information in biological networks, or more generally where information-theoretic ideas have been used to guide the analysis of experiments. Systems of interest range from single molecules (the sequence diversity in families of proteins) to groups of organisms (the distribution of velocities in flocks of birds), and all scales in between. Many of these analyses are motivated by the idea that biological systems may have evolved to optimize the gathering and representation of information, and we review the experimental evidence for this optimization, again across a wide range of scales.},
author = {Tkacik, Gasper and Bialek, William},
journal = {Annual Review of Condensed Matter Physics},
pages = {89 -- 117},
publisher = {Annual Reviews},
title = {{Information processing in living systems}},
doi = {10.1146/annurev-conmatphys-031214-014803},
volume = {7},
year = {2016},
}
@article{1260,
abstract = {In this work, the Gardner problem of inferring interactions and fields for an Ising neural network from given patterns under a local stability hypothesis is addressed under a dual perspective. By means of duality arguments, an integer linear system is defined whose solution space is the dual of the Gardner space and whose solutions represent mutually unstable patterns. We propose and discuss Monte Carlo methods in order to find and remove unstable patterns and uniformly sample the space of interactions thereafter. We illustrate the problem on a set of real data and perform ensemble calculation that shows how the emergence of phase dominated by unstable patterns can be triggered in a nonlinear discontinuous way.},
author = {De Martino, Daniele},
journal = {International Journal of Modern Physics C},
number = {6},
publisher = {World Scientific Publishing},
title = {{The dual of the space of interactions in neural network models}},
doi = {10.1142/S0129183116500674},
volume = {27},
year = {2016},
}
@article{1266,
abstract = {Cortical networks exhibit ‘global oscillations’, in which neural spike times are entrained to an underlying oscillatory rhythm, but where individual neurons fire irregularly, on only a fraction of cycles. While the network dynamics underlying global oscillations have been well characterised, their function is debated. Here, we show that such global oscillations are a direct consequence of optimal efficient coding in spiking networks with synaptic delays and noise. To avoid firing unnecessary spikes, neurons need to share information about the network state. Ideally, membrane potentials should be strongly correlated and reflect a ‘prediction error’ while the spikes themselves are uncorrelated and occur rarely. We show that the most efficient representation is when: (i) spike times are entrained to a global Gamma rhythm (implying a consistent representation of the error); but (ii) few neurons fire on each cycle (implying high efficiency), while (iii) excitation and inhibition are tightly balanced. This suggests that cortical networks exhibiting such dynamics are tuned to achieve a maximally efficient population code.},
author = {Chalk, Matthew J and Gutkin, Boris and Denève, Sophie},
journal = {eLife},
number = {2016JULY},
publisher = {eLife Sciences Publications},
title = {{Neural oscillations as a signature of efficient coding in the presence of synaptic delays}},
doi = {10.7554/eLife.13824},
volume = {5},
year = {2016},
}
@inproceedings{948,
abstract = {Experience constantly shapes neural circuits through a variety of plasticity mechanisms. While the functional roles of some plasticity mechanisms are well-understood, it remains unclear how changes in neural excitability contribute to learning. Here, we develop a normative interpretation of intrinsic plasticity (IP) as a key component of unsupervised learning. We introduce a novel generative mixture model that accounts for the class-specific statistics of stimulus intensities, and we derive a neural circuit that learns the input classes and their intensities. We will analytically show that inference and learning for our generative model can be achieved by a neural circuit with intensity-sensitive neurons equipped with a specific form of IP. Numerical experiments verify our analytical derivations and show robust behavior for artificial and natural stimuli. Our results link IP to non-trivial input statistics, in particular the statistics of stimulus intensities for classes to which a neuron is sensitive. More generally, our work paves the way toward new classification algorithms that are robust to intensity variations.},
author = {Monk, Travis and Savin, Cristina and Lücke, Jörg},
location = {Barcelona, Spaine},
pages = {4285 -- 4293},
publisher = {Neural Information Processing Systems},
title = {{Neurons equipped with intrinsic plasticity learn stimulus intensity statistics}},
volume = {29},
year = {2016},
}
@article{1197,
abstract = {Across the nervous system, certain population spiking patterns are observed far more frequently than others. A hypothesis about this structure is that these collective activity patterns function as population codewords–collective modes–carrying information distinct from that of any single cell. We investigate this phenomenon in recordings of ∼150 retinal ganglion cells, the retina’s output. We develop a novel statistical model that decomposes the population response into modes; it predicts the distribution of spiking activity in the ganglion cell population with high accuracy. We found that the modes represent localized features of the visual stimulus that are distinct from the features represented by single neurons. Modes form clusters of activity states that are readily discriminated from one another. When we repeated the same visual stimulus, we found that the same mode was robustly elicited. These results suggest that retinal ganglion cells’ collective signaling is endowed with a form of error-correcting code–a principle that may hold in brain areas beyond retina.},
author = {Prentice, Jason and Marre, Olivier and Ioffe, Mark and Loback, Adrianna and Tkacik, Gasper and Berry, Michael},
journal = {PLoS Computational Biology},
number = {11},
publisher = {Public Library of Science},
title = {{Error-robust modes of the retinal population code}},
doi = {10.1371/journal.pcbi.1005148},
volume = {12},
year = {2016},
}
@article{1270,
abstract = {A crucial step in the early development of multicellular organisms involves the establishment of spatial patterns of gene expression which later direct proliferating cells to take on different cell fates. These patterns enable the cells to infer their global position within a tissue or an organism by reading out local gene expression levels. The patterning system is thus said to encode positional information, a concept that was formalized recently in the framework of information theory. Here we introduce a toy model of patterning in one spatial dimension, which can be seen as an extension of Wolpert's paradigmatic "French Flag" model, to patterning by several interacting, spatially coupled genes subject to intrinsic and extrinsic noise. Our model, a variant of an Ising spin system, allows us to systematically explore expression patterns that optimally encode positional information. We find that optimal patterning systems use positional cues, as in the French Flag model, together with gene-gene interactions to generate combinatorial codes for position which we call "Counter" patterns. Counter patterns can also be stabilized against noise and variations in system size or morphogen dosage by longer-range spatial interactions of the type invoked in the Turing model. The simple setup proposed here qualitatively captures many of the experimentally observed properties of biological patterning systems and allows them to be studied in a single, theoretically consistent framework.},
author = {Hillenbrand, Patrick and Gerland, Ulrich and Tkacik, Gasper},
journal = {PLoS One},
number = {9},
publisher = {Public Library of Science},
title = {{Beyond the French flag model: Exploiting spatial and gene regulatory interactions for positional information}},
doi = {10.1371/journal.pone.0163628},
volume = {11},
year = {2016},
}
@misc{9870,
abstract = {The effect of noise in the input field on an Ising model is approximated. Furthermore, methods to compute positional information in an Ising model by transfer matrices and Monte Carlo sampling are outlined.},
author = {Hillenbrand, Patrick and Gerland, Ulrich and Tkačik, Gašper},
publisher = {Public Library of Science},
title = {{Computation of positional information in an Ising model}},
doi = {10.1371/journal.pone.0163628.s002},
year = {2016},
}
@misc{9869,
abstract = {A lower bound on the error of a positional estimator with limited positional information is derived.},
author = {Hillenbrand, Patrick and Gerland, Ulrich and Tkačik, Gašper},
publisher = {Public Library of Science},
title = {{Error bound on an estimator of position}},
doi = {10.1371/journal.pone.0163628.s001},
year = {2016},
}
@misc{9871,
abstract = {The positional information in a discrete morphogen field with Gaussian noise is computed.},
author = {Hillenbrand, Patrick and Gerland, Ulrich and Tkačik, Gašper},
publisher = {Public Library of Science},
title = {{Computation of positional information in a discrete morphogen field}},
doi = {10.1371/journal.pone.0163628.s003},
year = {2016},
}
@article{1697,
abstract = {Motion tracking is a challenge the visual system has to solve by reading out the retinal population. It is still unclear how the information from different neurons can be combined together to estimate the position of an object. Here we recorded a large population of ganglion cells in a dense patch of salamander and guinea pig retinas while displaying a bar moving diffusively. We show that the bar’s position can be reconstructed from retinal activity with a precision in the hyperacuity regime using a linear decoder acting on 100+ cells. We then took advantage of this unprecedented precision to explore the spatial structure of the retina’s population code. The classical view would have suggested that the firing rates of the cells form a moving hill of activity tracking the bar’s position. Instead, we found that most ganglion cells in the salamander fired sparsely and idiosyncratically, so that their neural image did not track the bar. Furthermore, ganglion cell activity spanned an area much larger than predicted by their receptive fields, with cells coding for motion far in their surround. As a result, population redundancy was high, and we could find multiple, disjoint subsets of neurons that encoded the trajectory with high precision. This organization allows for diverse collections of ganglion cells to represent high-accuracy motion information in a form easily read out by downstream neural circuits.},
author = {Marre, Olivier and Botella Soler, Vicente and Simmons, Kristina and Mora, Thierry and Tkacik, Gasper and Berry, Michael},
journal = {PLoS Computational Biology},
number = {7},
publisher = {Public Library of Science},
title = {{High accuracy decoding of dynamical motion from a large retinal population}},
doi = {10.1371/journal.pcbi.1004304},
volume = {11},
year = {2015},
}
@article{1701,
abstract = {The activity of a neural network is defined by patterns of spiking and silence from the individual neurons. Because spikes are (relatively) sparse, patterns of activity with increasing numbers of spikes are less probable, but, with more spikes, the number of possible patterns increases. This tradeoff between probability and numerosity is mathematically equivalent to the relationship between entropy and energy in statistical physics. We construct this relationship for populations of up to N = 160 neurons in a small patch of the vertebrate retina, using a combination of direct and model-based analyses of experiments on the response of this network to naturalistic movies. We see signs of a thermodynamic limit, where the entropy per neuron approaches a smooth function of the energy per neuron as N increases. The form of this function corresponds to the distribution of activity being poised near an unusual kind of critical point. We suggest further tests of criticality, and give a brief discussion of its functional significance. },
author = {Tkacik, Gasper and Mora, Thierry and Marre, Olivier and Amodei, Dario and Palmer, Stephanie and Berry Ii, Michael and Bialek, William},
journal = {PNAS},
number = {37},
pages = {11508 -- 11513},
publisher = {National Academy of Sciences},
title = {{Thermodynamics and signatures of criticality in a network of neurons}},
doi = {10.1073/pnas.1514188112},
volume = {112},
year = {2015},
}
@article{1861,
abstract = {Continuous-time Markov chains are commonly used in practice for modeling biochemical reaction networks in which the inherent randomness of themolecular interactions cannot be ignored. This has motivated recent research effort into methods for parameter inference and experiment design for such models. The major difficulty is that such methods usually require one to iteratively solve the chemical master equation that governs the time evolution of the probability distribution of the system. This, however, is rarely possible, and even approximation techniques remain limited to relatively small and simple systems. An alternative explored in this article is to base methods on only some low-order moments of the entire probability distribution. We summarize the theory behind such moment-based methods for parameter inference and experiment design and provide new case studies where we investigate their performance.},
author = {Ruess, Jakob and Lygeros, John},
journal = {ACM Transactions on Modeling and Computer Simulation},
number = {2},
publisher = {ACM},
title = {{Moment-based methods for parameter inference and experiment design for stochastic biochemical reaction networks}},
doi = {10.1145/2688906},
volume = {25},
year = {2015},
}
@article{1885,
abstract = {The concept of positional information is central to our understanding of how cells determine their location in a multicellular structure and thereby their developmental fates. Nevertheless, positional information has neither been defined mathematically nor quantified in a principled way. Here we provide an information-theoretic definition in the context of developmental gene expression patterns and examine the features of expression patterns that affect positional information quantitatively. We connect positional information with the concept of positional error and develop tools to directly measure information and error from experimental data. We illustrate our framework for the case of gap gene expression patterns in the early Drosophila embryo and show how information that is distributed among only four genes is sufficient to determine developmental fates with nearly single-cell resolution. Our approach can be generalized to a variety of different model systems; procedures and examples are discussed in detail. },
author = {Tkacik, Gasper and Dubuis, Julien and Petkova, Mariela and Gregor, Thomas},
journal = {Genetics},
number = {1},
pages = {39 -- 59},
publisher = {Genetics Society of America},
title = {{Positional information, positional error, and readout precision in morphogenesis: A mathematical framework}},
doi = {10.1534/genetics.114.171850},
volume = {199},
year = {2015},
}
@article{1940,
abstract = {We typically think of cells as responding to external signals independently by regulating their gene expression levels, yet they often locally exchange information and coordinate. Can such spatial coupling be of benefit for conveying signals subject to gene regulatory noise? Here we extend our information-theoretic framework for gene regulation to spatially extended systems. As an example, we consider a lattice of nuclei responding to a concentration field of a transcriptional regulator (the "input") by expressing a single diffusible target gene. When input concentrations are low, diffusive coupling markedly improves information transmission; optimal gene activation functions also systematically change. A qualitatively new regulatory strategy emerges where individual cells respond to the input in a nearly step-like fashion that is subsequently averaged out by strong diffusion. While motivated by early patterning events in the Drosophila embryo, our framework is generically applicable to spatially coupled stochastic gene expression models.},
author = {Sokolowski, Thomas R and Tkacik, Gasper},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {6},
publisher = {American Institute of Physics},
title = {{Optimizing information flow in small genetic networks. IV. Spatial coupling}},
doi = {10.1103/PhysRevE.91.062710},
volume = {91},
year = {2015},
}
@article{1538,
abstract = {Systems biology rests on the idea that biological complexity can be better unraveled through the interplay of modeling and experimentation. However, the success of this approach depends critically on the informativeness of the chosen experiments, which is usually unknown a priori. Here, we propose a systematic scheme based on iterations of optimal experiment design, flow cytometry experiments, and Bayesian parameter inference to guide the discovery process in the case of stochastic biochemical reaction networks. To illustrate the benefit of our methodology, we apply it to the characterization of an engineered light-inducible gene expression circuit in yeast and compare the performance of the resulting model with models identified from nonoptimal experiments. In particular, we compare the parameter posterior distributions and the precision to which the outcome of future experiments can be predicted. Moreover, we illustrate how the identified stochastic model can be used to determine light induction patterns that make either the average amount of protein or the variability in a population of cells follow a desired profile. Our results show that optimal experiment design allows one to derive models that are accurate enough to precisely predict and regulate the protein expression in heterogeneous cell populations over extended periods of time.},
author = {Ruess, Jakob and Parise, Francesca and Milias Argeitis, Andreas and Khammash, Mustafa and Lygeros, John},
journal = {PNAS},
number = {26},
pages = {8148 -- 8153},
publisher = {National Academy of Sciences},
title = {{Iterative experiment design guides the characterization of a light-inducible gene expression circuit}},
doi = {10.1073/pnas.1423947112},
volume = {112},
year = {2015},
}
@article{1539,
abstract = {Many stochastic models of biochemical reaction networks contain some chemical species for which the number of molecules that are present in the system can only be finite (for instance due to conservation laws), but also other species that can be present in arbitrarily large amounts. The prime example of such networks are models of gene expression, which typically contain a small and finite number of possible states for the promoter but an infinite number of possible states for the amount of mRNA and protein. One of the main approaches to analyze such models is through the use of equations for the time evolution of moments of the chemical species. Recently, a new approach based on conditional moments of the species with infinite state space given all the different possible states of the finite species has been proposed. It was argued that this approach allows one to capture more details about the full underlying probability distribution with a smaller number of equations. Here, I show that the result that less moments provide more information can only stem from an unnecessarily complicated description of the system in the classical formulation. The foundation of this argument will be the derivation of moment equations that describe the complete probability distribution over the finite state space but only low-order moments over the infinite state space. I will show that the number of equations that is needed is always less than what was previously claimed and always less than the number of conditional moment equations up to the same order. To support these arguments, a symbolic algorithm is provided that can be used to derive minimal systems of unconditional moment equations for models with partially finite state space. },
author = {Ruess, Jakob},
journal = {Journal of Chemical Physics},
number = {24},
publisher = {American Institute of Physics},
title = {{Minimal moment equations for stochastic models of biochemical reaction networks with partially finite state space}},
doi = {10.1063/1.4937937},
volume = {143},
year = {2015},
}
@article{1564,
author = {Gilson, Matthieu and Savin, Cristina and Zenke, Friedemann},
journal = {Frontiers in Computational Neuroscience},
number = {11},
publisher = {Frontiers Research Foundation},
title = {{Editorial: Emergent neural computation from the interaction of different forms of plasticity}},
doi = {10.3389/fncom.2015.00145},
volume = {9},
year = {2015},
}
@article{1570,
abstract = {Grounding autonomous behavior in the nervous system is a fundamental challenge for neuroscience. In particular, self-organized behavioral development provides more questions than answers. Are there special functional units for curiosity, motivation, and creativity? This paper argues that these features can be grounded in synaptic plasticity itself, without requiring any higher-level constructs. We propose differential extrinsic plasticity (DEP) as a new synaptic rule for self-learning systems and apply it to a number of complex robotic systems as a test case. Without specifying any purpose or goal, seemingly purposeful and adaptive rhythmic behavior is developed, displaying a certain level of sensorimotor intelligence. These surprising results require no systemspecific modifications of the DEP rule. They rather arise from the underlying mechanism of spontaneous symmetry breaking,which is due to the tight brain body environment coupling. The new synaptic rule is biologically plausible and would be an interesting target for neurobiological investigation. We also argue that this neuronal mechanism may have been a catalyst in natural evolution.},
author = {Der, Ralf and Martius, Georg S},
journal = {PNAS},
number = {45},
pages = {E6224 -- E6232},
publisher = {National Academy of Sciences},
title = {{Novel plasticity rule can explain the development of sensorimotor intelligence}},
doi = {10.1073/pnas.1508400112},
volume = {112},
year = {2015},
}
@article{1576,
abstract = {Gene expression is controlled primarily by interactions between transcription factor proteins (TFs) and the regulatory DNA sequence, a process that can be captured well by thermodynamic models of regulation. These models, however, neglect regulatory crosstalk: the possibility that noncognate TFs could initiate transcription, with potentially disastrous effects for the cell. Here, we estimate the importance of crosstalk, suggest that its avoidance strongly constrains equilibrium models of TF binding, and propose an alternative nonequilibrium scheme that implements kinetic proofreading to suppress erroneous initiation. This proposal is consistent with the observed covalent modifications of the transcriptional apparatus and predicts increased noise in gene expression as a trade-off for improved specificity. Using information theory, we quantify this trade-off to find when optimal proofreading architectures are favored over their equilibrium counterparts. Such architectures exhibit significant super-Poisson noise at low expression in steady state.},
author = {Cepeda Humerez, Sarah A and Rieckh, Georg and Tkacik, Gasper},
journal = {Physical Review Letters},
number = {24},
publisher = {American Physical Society},
title = {{Stochastic proofreading mechanism alleviates crosstalk in transcriptional regulation}},
doi = {10.1103/PhysRevLett.115.248101},
volume = {115},
year = {2015},
}
@article{1655,
abstract = {Quantifying behaviors of robots which were generated autonomously from task-independent objective functions is an important prerequisite for objective comparisons of algorithms and movements of animals. The temporal sequence of such a behavior can be considered as a time series and hence complexity measures developed for time series are natural candidates for its quantification. The predictive information and the excess entropy are such complexity measures. They measure the amount of information the past contains about the future and thus quantify the nonrandom structure in the temporal sequence. However, when using these measures for systems with continuous states one has to deal with the fact that their values will depend on the resolution with which the systems states are observed. For deterministic systems both measures will diverge with increasing resolution. We therefore propose a new decomposition of the excess entropy in resolution dependent and resolution independent parts and discuss how they depend on the dimensionality of the dynamics, correlations and the noise level. For the practical estimation we propose to use estimates based on the correlation integral instead of the direct estimation of the mutual information based on next neighbor statistics because the latter allows less control of the scale dependencies. Using our algorithm we are able to show how autonomous learning generates behavior of increasing complexity with increasing learning duration.},
author = {Martius, Georg S and Olbrich, Eckehard},
journal = {Entropy},
number = {10},
pages = {7266 -- 7297},
publisher = {Multidisciplinary Digital Publishing Institute},
title = {{Quantifying emergent behavior of autonomous robots}},
doi = {10.3390/e17107266},
volume = {17},
year = {2015},
}
@inproceedings{1658,
abstract = {Continuous-time Markov chain (CTMC) models have become a central tool for understanding the dynamics of complex reaction networks and the importance of stochasticity in the underlying biochemical processes. When such models are employed to answer questions in applications, in order to ensure that the model provides a sufficiently accurate representation of the real system, it is of vital importance that the model parameters are inferred from real measured data. This, however, is often a formidable task and all of the existing methods fail in one case or the other, usually because the underlying CTMC model is high-dimensional and computationally difficult to analyze. The parameter inference methods that tend to scale best in the dimension of the CTMC are based on so-called moment closure approximations. However, there exists a large number of different moment closure approximations and it is typically hard to say a priori which of the approximations is the most suitable for the inference procedure. Here, we propose a moment-based parameter inference method that automatically chooses the most appropriate moment closure method. Accordingly, contrary to existing methods, the user is not required to be experienced in moment closure techniques. In addition to that, our method adaptively changes the approximation during the parameter inference to ensure that always the best approximation is used, even in cases where different approximations are best in different regions of the parameter space.},
author = {Bogomolov, Sergiy and Henzinger, Thomas A and Podelski, Andreas and Ruess, Jakob and Schilling, Christian},
location = {Nantes, France},
pages = {77 -- 89},
publisher = {Springer},
title = {{Adaptive moment closure for parameter inference of biochemical reaction networks}},
doi = {10.1007/978-3-319-23401-4_8},
volume = {9308},
year = {2015},
}
@article{1666,
abstract = {Evolution of gene regulation is crucial for our understanding of the phenotypic differences between species, populations and individuals. Sequence-specific binding of transcription factors to the regulatory regions on the DNA is a key regulatory mechanism that determines gene expression and hence heritable phenotypic variation. We use a biophysical model for directional selection on gene expression to estimate the rates of gain and loss of transcription factor binding sites (TFBS) in finite populations under both point and insertion/deletion mutations. Our results show that these rates are typically slow for a single TFBS in an isolated DNA region, unless the selection is extremely strong. These rates decrease drastically with increasing TFBS length or increasingly specific protein-DNA interactions, making the evolution of sites longer than ∼ 10 bp unlikely on typical eukaryotic speciation timescales. Similarly, evolution converges to the stationary distribution of binding sequences very slowly, making the equilibrium assumption questionable. The availability of longer regulatory sequences in which multiple binding sites can evolve simultaneously, the presence of “pre-sites” or partially decayed old sites in the initial sequence, and biophysical cooperativity between transcription factors, can all facilitate gain of TFBS and reconcile theoretical calculations with timescales inferred from comparative genomics.},
author = {Tugrul, Murat and Paixao, Tiago and Barton, Nicholas H and Tkacik, Gasper},
journal = {PLoS Genetics},
number = {11},
publisher = {Public Library of Science},
title = {{Dynamics of transcription factor binding site evolution}},
doi = {10.1371/journal.pgen.1005639},
volume = {11},
year = {2015},
}
@misc{9712,
author = {Tugrul, Murat and Paixao, Tiago and Barton, Nicholas H and Tkačik, Gašper},
publisher = {Public Library of Science},
title = {{Other fitness models for comparison & for interacting TFBSs}},
doi = {10.1371/journal.pgen.1005639.s001},
year = {2015},
}
@misc{9718,
author = {Friedlander, Tamar and Mayo, Avraham E. and Tlusty, Tsvi and Alon, Uri},
publisher = {Public Library of Science},
title = {{Supporting information text}},
doi = {10.1371/journal.pcbi.1004055.s001},
year = {2015},
}
@article{1827,
abstract = {Bow-tie or hourglass structure is a common architectural feature found in many biological systems. A bow-tie in a multi-layered structure occurs when intermediate layers have much fewer components than the input and output layers. Examples include metabolism where a handful of building blocks mediate between multiple input nutrients and multiple output biomass components, and signaling networks where information from numerous receptor types passes through a small set of signaling pathways to regulate multiple output genes. Little is known, however, about how bow-tie architectures evolve. Here, we address the evolution of bow-tie architectures using simulations of multi-layered systems evolving to fulfill a given input-output goal. We find that bow-ties spontaneously evolve when the information in the evolutionary goal can be compressed. Mathematically speaking, bow-ties evolve when the rank of the input-output matrix describing the evolutionary goal is deficient. The maximal compression possible (the rank of the goal) determines the size of the narrowest part of the network—that is the bow-tie. A further requirement is that a process is active to reduce the number of links in the network, such as product-rule mutations, otherwise a non-bow-tie solution is found in the evolutionary simulations. This offers a mechanism to understand a common architectural principle of biological systems, and a way to quantitate the effective rank of the goals under which they evolved.},
author = {Friedlander, Tamar and Mayo, Avraham and Tlusty, Tsvi and Alon, Uri},
journal = {PLoS Computational Biology},
number = {3},
publisher = {Public Library of Science},
title = {{Evolution of bow-tie architectures in biology}},
doi = {10.1371/journal.pcbi.1004055},
volume = {11},
year = {2015},
}
@misc{9773,
author = {Friedlander, Tamar and Mayo, Avraham E. and Tlusty, Tsvi and Alon, Uri},
publisher = {Public Library of Science},
title = {{Evolutionary simulation code}},
doi = {10.1371/journal.pcbi.1004055.s002},
year = {2015},
}
@inproceedings{1708,
abstract = {It has been long argued that, because of inherent ambiguity and noise, the brain needs to represent uncertainty in the form of probability distributions. The neural encoding of such distributions remains however highly controversial. Here we present a novel circuit model for representing multidimensional real-valued distributions using a spike based spatio-temporal code. Our model combines the computational advantages of the currently competing models for probabilistic codes and exhibits realistic neural responses along a variety of classic measures. Furthermore, the model highlights the challenges associated with interpreting neural activity in relation to behavioral uncertainty and points to alternative population-level approaches for the experimental validation of distributed representations.},
author = {Savin, Cristina and Denève, Sophie},
location = {Montreal, Canada},
number = {January},
pages = {2024 -- 2032},
publisher = {Neural Information Processing Systems},
title = {{Spatio-temporal representations of uncertainty in spiking neural networks}},
volume = {3},
year = {2014},
}
@article{1886,
abstract = {Information processing in the sensory periphery is shaped by natural stimulus statistics. In the periphery, a transmission bottleneck constrains performance; thus efficient coding implies that natural signal components with a predictably wider range should be compressed. In a different regime—when sampling limitations constrain performance—efficient coding implies that more resources should be allocated to informative features that are more variable. We propose that this regime is relevant for sensory cortex when it extracts complex features from limited numbers of sensory samples. To test this prediction, we use central visual processing as a model: we show that visual sensitivity for local multi-point spatial correlations, described by dozens of independently-measured parameters, can be quantitatively predicted from the structure of natural images. This suggests that efficient coding applies centrally, where it extends to higher-order sensory features and operates in a regime in which sensitivity increases with feature variability.},
author = {Hermundstad, Ann and Briguglio, John and Conte, Mary and Victor, Jonathan and Balasubramanian, Vijay and Tkacik, Gasper},
journal = {eLife},
number = {November},
publisher = {eLife Sciences Publications},
title = {{Variance predicts salience in central sensory processing}},
doi = {10.7554/eLife.03722},
year = {2014},
}
@article{1896,
abstract = {Biopolymer length regulation is a complex process that involves a large number of biological, chemical, and physical subprocesses acting simultaneously across multiple spatial and temporal scales. An illustrative example important for genomic stability is the length regulation of telomeres - nucleoprotein structures at the ends of linear chromosomes consisting of tandemly repeated DNA sequences and a specialized set of proteins. Maintenance of telomeres is often facilitated by the enzyme telomerase but, particularly in telomerase-free systems, the maintenance of chromosomal termini depends on alternative lengthening of telomeres (ALT) mechanisms mediated by recombination. Various linear and circular DNA structures were identified to participate in ALT, however, dynamics of the whole process is still poorly understood. We propose a chemical kinetics model of ALT with kinetic rates systematically derived from the biophysics of DNA diffusion and looping. The reaction system is reduced to a coagulation-fragmentation system by quasi-steady-state approximation. The detailed treatment of kinetic rates yields explicit formulas for expected size distributions of telomeres that demonstrate the key role played by the J factor, a quantitative measure of bending of polymers. The results are in agreement with experimental data and point out interesting phenomena: an appearance of very long telomeric circles if the total telomere density exceeds a critical value (excess mass) and a nonlinear response of the telomere size distributions to the amount of telomeric DNA in the system. The results can be of general importance for understanding dynamics of telomeres in telomerase-independent systems as this mode of telomere maintenance is similar to the situation in tumor cells lacking telomerase activity. Furthermore, due to its universality, the model may also serve as a prototype of an interaction between linear and circular DNA structures in various settings.},
author = {Kollár, Richard and Bod'ová, Katarína and Nosek, Jozef and Tomáška, Ľubomír},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {3},
publisher = {American Institute of Physics},
title = {{Mathematical model of alternative mechanism of telomere length maintenance}},
doi = {10.1103/PhysRevE.89.032701},
volume = {89},
year = {2014},
}
@article{1909,
abstract = {Summary: Phenotypes are often environmentally dependent, which requires organisms to track environmental change. The challenge for organisms is to construct phenotypes using the most accurate environmental cue. Here, we use a quantitative genetic model of adaptation by additive genetic variance, within- and transgenerational plasticity via linear reaction norms and indirect genetic effects respectively. We show how the relative influence on the eventual phenotype of these components depends on the predictability of environmental change (fast or slow, sinusoidal or stochastic) and the developmental lag τ between when the environment is perceived and when selection acts. We then decompose expected mean fitness into three components (variance load, adaptation and fluctuation load) to study the fitness costs of within- and transgenerational plasticity. A strongly negative maternal effect coefficient m minimizes the variance load, but a strongly positive m minimises the fluctuation load. The adaptation term is maximized closer to zero, with positive or negative m preferred under different environmental scenarios. Phenotypic plasticity is higher when τ is shorter and when the environment changes frequently between seasonal extremes. Expected mean population fitness is highest away from highest observed levels of phenotypic plasticity. Within- and transgenerational plasticity act in concert to deliver well-adapted phenotypes, which emphasizes the need to study both simultaneously when investigating phenotypic evolution.},
author = {Ezard, Thomas and Prizak, Roshan and Hoyle, Rebecca},
journal = {Functional Ecology},
number = {3},
pages = {693 -- 701},
publisher = {Wiley-Blackwell},
title = {{The fitness costs of adaptation via phenotypic plasticity and maternal effects}},
doi = {10.1111/1365-2435.12207},
volume = {28},
year = {2014},
}
@article{1928,
abstract = {In infectious disease epidemiology the basic reproductive ratio, R0, is defined as the average number of new infections caused by a single infected individual in a fully susceptible population. Many models describing competition for hosts between non-interacting pathogen strains in an infinite population lead to the conclusion that selection favors invasion of new strains if and only if they have higher R0 values than the resident. Here we demonstrate that this picture fails in finite populations. Using a simple stochastic SIS model, we show that in general there is no analogous optimization principle. We find that successive invasions may in some cases lead to strains that infect a smaller fraction of the host population, and that mutually invasible pathogen strains exist. In the limit of weak selection we demonstrate that an optimization principle does exist, although it differs from R0 maximization. For strains with very large R0, we derive an expression for this local fitness function and use it to establish a lower bound for the error caused by neglecting stochastic effects. Furthermore, we apply this weak selection limit to investigate the selection dynamics in the presence of a trade-off between the virulence and the transmission rate of a pathogen.},
author = {Humplik, Jan and Hill, Alison and Nowak, Martin},
journal = {Journal of Theoretical Biology},
pages = {149 -- 162},
publisher = {Elsevier},
title = {{Evolutionary dynamics of infectious diseases in finite populations}},
doi = {10.1016/j.jtbi.2014.06.039},
volume = {360},
year = {2014},
}
@article{1931,
abstract = {A wealth of experimental evidence suggests that working memory circuits preferentially represent information that is behaviorally relevant. Still, we are missing a mechanistic account of how these representations come about. Here we provide a simple explanation for a range of experimental findings, in light of prefrontal circuits adapting to task constraints by reward-dependent learning. In particular, we model a neural network shaped by reward-modulated spike-timing dependent plasticity (r-STDP) and homeostatic plasticity (intrinsic excitability and synaptic scaling). We show that the experimentally-observed neural representations naturally emerge in an initially unstructured circuit as it learns to solve several working memory tasks. These results point to a critical, and previously unappreciated, role for reward-dependent learning in shaping prefrontal cortex activity.},
author = {Savin, Cristina and Triesch, Jochen},
journal = {Frontiers in Computational Neuroscience},
number = {MAY},
publisher = {Frontiers Research Foundation},
title = {{Emergence of task-dependent representations in working memory circuits}},
doi = {10.3389/fncom.2014.00057},
volume = {8},
year = {2014},
}
@article{2231,
abstract = {Based on the measurements of noise in gene expression performed during the past decade, it has become customary to think of gene regulation in terms of a two-state model, where the promoter of a gene can stochastically switch between an ON and an OFF state. As experiments are becoming increasingly precise and the deviations from the two-state model start to be observable, we ask about the experimental signatures of complex multistate promoters, as well as the functional consequences of this additional complexity. In detail, we i), extend the calculations for noise in gene expression to promoters described by state transition diagrams with multiple states, ii), systematically compute the experimentally accessible noise characteristics for these complex promoters, and iii), use information theory to evaluate the channel capacities of complex promoter architectures and compare them with the baseline provided by the two-state model. We find that adding internal states to the promoter generically decreases channel capacity, except in certain cases, three of which (cooperativity, dual-role regulation, promoter cycling) we analyze in detail.},
author = {Rieckh, Georg and Tkacik, Gasper},
issn = {00063495},
journal = {Biophysical Journal},
number = {5},
pages = {1194 -- 1204},
publisher = {Biophysical Society},
title = {{Noise and information transmission in promoters with multiple internal states}},
doi = {10.1016/j.bpj.2014.01.014},
volume = {106},
year = {2014},
}
@article{2257,
abstract = {Maximum entropy models are the least structured probability distributions that exactly reproduce a chosen set of statistics measured in an interacting network. Here we use this principle to construct probabilistic models which describe the correlated spiking activity of populations of up to 120 neurons in the salamander retina as it responds to natural movies. Already in groups as small as 10 neurons, interactions between spikes can no longer be regarded as small perturbations in an otherwise independent system; for 40 or more neurons pairwise interactions need to be supplemented by a global interaction that controls the distribution of synchrony in the population. Here we show that such “K-pairwise” models—being systematic extensions of the previously used pairwise Ising models—provide an excellent account of the data. We explore the properties of the neural vocabulary by: 1) estimating its entropy, which constrains the population's capacity to represent visual information; 2) classifying activity patterns into a small set of metastable collective modes; 3) showing that the neural codeword ensembles are extremely inhomogenous; 4) demonstrating that the state of individual neurons is highly predictable from the rest of the population, allowing the capacity for error correction.},
author = {Tkacik, Gasper and Marre, Olivier and Amodei, Dario and Schneidman, Elad and Bialek, William and Berry, Michael},
issn = {1553734X},
journal = {PLoS Computational Biology},
number = {1},
publisher = {Public Library of Science},
title = {{Searching for collective behavior in a large network of sensory neurons}},
doi = {10.1371/journal.pcbi.1003408},
volume = {10},
year = {2014},
}
@article{537,
abstract = {Transgenerational effects are broader than only parental relationships. Despite mounting evidence that multigenerational effects alter phenotypic and life-history traits, our understanding of how they combine to determine fitness is not well developed because of the added complexity necessary to study them. Here, we derive a quantitative genetic model of adaptation to an extraordinary new environment by an additive genetic component, phenotypic plasticity, maternal and grandmaternal effects. We show how, at equilibrium, negative maternal and negative grandmaternal effects maximize expected population mean fitness. We define negative transgenerational effects as those that have a negative effect on trait expression in the subsequent generation, that is, they slow, or potentially reverse, the expected evolutionary dynamic. When maternal effects are positive, negative grandmaternal effects are preferred. As expected under Mendelian inheritance, the grandmaternal effects have a lower impact on fitness than the maternal effects, but this dual inheritance model predicts a more complex relationship between maternal and grandmaternal effects to constrain phenotypic variance and so maximize expected population mean fitness in the offspring.},
author = {Prizak, Roshan and Ezard, Thomas and Hoyle, Rebecca},
journal = {Ecology and Evolution},
number = {15},
pages = {3139 -- 3145},
publisher = {Wiley-Blackwell},
title = {{Fitness consequences of maternal and grandmaternal effects}},
doi = {10.1002/ece3.1150},
volume = {4},
year = {2014},
}
@article{3263,
abstract = {Adaptation in the retina is thought to optimize the encoding of natural light signals into sequences of spikes sent to the brain. While adaptive changes in retinal processing to the variations of the mean luminance level and second-order stimulus statistics have been documented before, no such measurements have been performed when higher-order moments of the light distribution change. We therefore measured the ganglion cell responses in the tiger salamander retina to controlled changes in the second (contrast), third (skew) and fourth (kurtosis) moments of the light intensity distribution of spatially uniform temporally independent stimuli. The skew and kurtosis of the stimuli were chosen to cover the range observed in natural scenes. We quantified adaptation in ganglion cells by studying linear-nonlinear models that capture well the retinal encoding properties across all stimuli. We found that the encoding properties of retinal ganglion cells change only marginally when higher-order statistics change, compared to the changes observed in response to the variation in contrast. By analyzing optimal coding in LN-type models, we showed that neurons can maintain a high information rate without large dynamic adaptation to changes in skew or kurtosis. This is because, for uncorrelated stimuli, spatio-temporal summation within the receptive field averages away non-gaussian aspects of the light intensity distribution.},
author = {Tkacik, Gasper and Ghosh, Anandamohan and Schneidman, Elad and Segev, Ronen},
journal = {PLoS One},
number = {1},
publisher = {Public Library of Science},
title = {{Adaptation to changes in higher-order stimulus statistics in the salamander retina}},
doi = {10.1371/journal.pone.0085841},
volume = {9},
year = {2014},
}
@misc{9752,
abstract = {Redundancies and correlations in the responses of sensory neurons may seem to waste neural resources, but they can also carry cues about structured stimuli and may help the brain to correct for response errors. To investigate the effect of stimulus structure on redundancy in retina, we measured simultaneous responses from populations of retinal ganglion cells presented with natural and artificial stimuli that varied greatly in correlation structure; these stimuli and recordings are publicly available online. Responding to spatio-temporally structured stimuli such as natural movies, pairs of ganglion cells were modestly more correlated than in response to white noise checkerboards, but they were much less correlated than predicted by a non-adapting functional model of retinal response. Meanwhile, responding to stimuli with purely spatial correlations, pairs of ganglion cells showed increased correlations consistent with a static, non-adapting receptive field and nonlinearity. We found that in response to spatio-temporally correlated stimuli, ganglion cells had faster temporal kernels and tended to have stronger surrounds. These properties of individual cells, along with gain changes that opposed changes in effective contrast at the ganglion cell input, largely explained the pattern of pairwise correlations across stimuli where receptive field measurements were possible.},
author = {Simmons, Kristina and Prentice, Jason and Tkačik, Gašper and Homann, Jan and Yee, Heather and Palmer, Stephanie and Nelson, Philip and Balasubramanian, Vijay},
publisher = {Dryad},
title = {{Data from: Transformation of stimulus correlations by the retina}},
doi = {10.5061/dryad.246qg},
year = {2014},
}
@inbook{2413,
abstract = {Progress in understanding the global brain dynamics has remained slow to date in large part because of the highly multiscale nature of brain activity. Indeed, normal brain dynamics is characterized by complex interactions between multiple levels: from the microscopic scale of single neurons to the mesoscopic level of local groups of neurons, and finally to the macroscopic level of the whole brain. Among the most difficult tasks are those of identifying which scales are significant for a given particular function and describing how the scales affect each other. It is important to realize that the scales of time and space are linked together, or even intertwined, and that causal inference is far more ambiguous between than within levels. We approach this problem from the perspective of our recent work on simultaneous recording from micro- and macroelectrodes in the human brain. We propose a physiological description of these multilevel interactions, based on phase–amplitude coupling of neuronal oscillations that operate at multiple frequencies and on different spatial scales. Specifically, the amplitude of the oscillations on a particular spatial scale is modulated by phasic variations in neuronal excitability induced by lower frequency oscillations that emerge on a larger spatial scale. Following this general principle, it is possible to scale up or scale down the multiscale brain dynamics. It is expected that large-scale network oscillations in the low-frequency range, mediating downward effects, may play an important role in attention and consciousness.},
author = {Valderrama, Mario and Botella Soler, Vicente and Le Van Quyen, Michel},
booktitle = {Multiscale Analysis and Nonlinear Dynamics: From Genes to the Brain},
editor = {Meyer, Misha and Pesenson, Z.},
isbn = {9783527411986 },
publisher = {Wiley-VCH},
title = {{Neuronal oscillations scale up and scale down the brain dynamics }},
doi = {10.1002/9783527671632.ch08},
year = {2013},
}
@article{2818,
abstract = {Models of neural responses to stimuli with complex spatiotemporal correlation structure often assume that neurons are selective for only a small number of linear projections of a potentially high-dimensional input. In this review, we explore recent modeling approaches where the neural response depends on the quadratic form of the input rather than on its linear projection, that is, the neuron is sensitive to the local covariance structure of the signal preceding the spike. To infer this quadratic dependence in the presence of arbitrary (e.g., naturalistic) stimulus distribution, we review several inference methods, focusing in particular on two information theory–based approaches (maximization of stimulus energy and of noise entropy) and two likelihood-based approaches (Bayesian spike-triggered covariance and extensions of generalized linear models). We analyze the formal relationship between the likelihood-based and information-based approaches to demonstrate how they lead to consistent inference. We demonstrate the practical feasibility of these procedures by using model neurons responding to a flickering variance stimulus.},
author = {Rajan, Kanaka and Marre, Olivier and Tkacik, Gasper},
journal = {Neural Computation},
number = {7},
pages = {1661 -- 1692},
publisher = {MIT Press },
title = {{Learning quadratic receptive fields from neural responses to natural stimuli}},
doi = {10.1162/NECO_a_00463},
volume = {25},
year = {2013},
}
@article{2850,
abstract = {Recent work emphasizes that the maximum entropy principle provides a bridge between statistical mechanics models for collective behavior in neural networks and experiments on networks of real neurons. Most of this work has focused on capturing the measured correlations among pairs of neurons. Here we suggest an alternative, constructing models that are consistent with the distribution of global network activity, i.e. the probability that K out of N cells in the network generate action potentials in the same small time bin. The inverse problem that we need to solve in constructing the model is analytically tractable, and provides a natural 'thermodynamics' for the network in the limit of large N. We analyze the responses of neurons in a small patch of the retina to naturalistic stimuli, and find that the implied thermodynamics is very close to an unusual critical point, in which the entropy (in proper units) is exactly equal to the energy. © 2013 IOP Publishing Ltd and SISSA Medialab srl.
},
author = {Tkacik, Gasper and Marre, Olivier and Mora, Thierry and Amodei, Dario and Berry, Michael and Bialek, William},
journal = {Journal of Statistical Mechanics Theory and Experiment},
number = {3},
publisher = {IOP Publishing Ltd.},
title = {{The simplest maximum entropy model for collective behavior in a neural network}},
doi = {10.1088/1742-5468/2013/03/P03011},
volume = {2013},
year = {2013},
}
@article{2851,
abstract = {The number of possible activity patterns in a population of neurons grows exponentially with the size of the population. Typical experiments explore only a tiny fraction of the large space of possible activity patterns in the case of populations with more than 10 or 20 neurons. It is thus impossible, in this undersampled regime, to estimate the probabilities with which most of the activity patterns occur. As a result, the corresponding entropy - which is a measure of the computational power of the neural population - cannot be estimated directly. We propose a simple scheme for estimating the entropy in the undersampled regime, which bounds its value from both below and above. The lower bound is the usual 'naive' entropy of the experimental frequencies. The upper bound results from a hybrid approximation of the entropy which makes use of the naive estimate, a maximum entropy fit, and a coverage adjustment. We apply our simple scheme to artificial data, in order to check their accuracy; we also compare its performance to those of several previously defined entropy estimators. We then apply it to actual measurements of neural activity in populations with up to 100 cells. Finally, we discuss the similarities and differences between the proposed simple estimation scheme and various earlier methods. © 2013 IOP Publishing Ltd and SISSA Medialab srl.},
author = {Berry, Michael and Tkacik, Gasper and Dubuis, Julien and Marre, Olivier and Da Silveira, Ravá},
journal = {Journal of Statistical Mechanics Theory and Experiment},
number = {3},
publisher = {IOP Publishing Ltd.},
title = {{A simple method for estimating the entropy of neural activity}},
doi = {10.1088/1742-5468/2013/03/P03015},
volume = {2013},
year = {2013},
}
@article{2861,
abstract = {We consider a two-parameter family of piecewise linear maps in which the moduli of the two slopes take different values. We provide numerical evidence of the existence of some parameter regions in which the Lyapunov exponent and the topological entropy remain constant. Analytical proof of this phenomenon is also given for certain cases. Surprisingly however, the systems with that property are not conjugate as we prove by using kneading theory.},
author = {Botella Soler, Vicente and Oteo, José and Ros, Javier and Glendinning, Paul},
journal = {Journal of Physics A: Mathematical and Theoretical},
number = {12},
publisher = {IOP Publishing Ltd.},
title = {{Lyapunov exponent and topological entropy plateaus in piecewise linear maps}},
doi = {10.1088/1751-8113/46/12/125101},
volume = {46},
year = {2013},
}
@article{2863,
abstract = {Neural populations encode information about their stimulus in a collective fashion, by joint activity patterns of spiking and silence. A full account of this mapping from stimulus to neural activity is given by the conditional probability distribution over neural codewords given the sensory input. For large populations, direct sampling of these distributions is impossible, and so we must rely on constructing appropriate models. We show here that in a population of 100 retinal ganglion cells in the salamander retina responding to temporal white-noise stimuli, dependencies between cells play an important encoding role. We introduce the stimulus-dependent maximum entropy (SDME) model—a minimal extension of the canonical linear-nonlinear model of a single neuron, to a pairwise-coupled neural population. We find that the SDME model gives a more accurate account of single cell responses and in particular significantly outperforms uncoupled models in reproducing the distributions of population codewords emitted in response to a stimulus. We show how the SDME model, in conjunction with static maximum entropy models of population vocabulary, can be used to estimate information-theoretic quantities like average surprise and information transmission in a neural population.},
author = {Granot Atedgi, Einat and Tkacik, Gasper and Segev, Ronen and Schneidman, Elad},
journal = {PLoS Computational Biology},
number = {3},
publisher = {Public Library of Science},
title = {{Stimulus-dependent maximum entropy models of neural population codes}},
doi = {10.1371/journal.pcbi.1002922},
volume = {9},
year = {2013},
}