@article{1197, abstract = {Across the nervous system, certain population spiking patterns are observed far more frequently than others. A hypothesis about this structure is that these collective activity patterns function as population codewords–collective modes–carrying information distinct from that of any single cell. We investigate this phenomenon in recordings of ∼150 retinal ganglion cells, the retina’s output. We develop a novel statistical model that decomposes the population response into modes; it predicts the distribution of spiking activity in the ganglion cell population with high accuracy. We found that the modes represent localized features of the visual stimulus that are distinct from the features represented by single neurons. Modes form clusters of activity states that are readily discriminated from one another. When we repeated the same visual stimulus, we found that the same mode was robustly elicited. These results suggest that retinal ganglion cells’ collective signaling is endowed with a form of error-correcting code–a principle that may hold in brain areas beyond retina.}, author = {Prentice, Jason and Marre, Olivier and Ioffe, Mark and Loback, Adrianna and Tkacik, Gasper and Berry, Michael}, journal = {PLoS Computational Biology}, number = {11}, publisher = {Public Library of Science}, title = {{Error-robust modes of the retinal population code}}, doi = {10.1371/journal.pcbi.1005148}, volume = {12}, year = {2016}, } @inproceedings{948, abstract = {Experience constantly shapes neural circuits through a variety of plasticity mechanisms. While the functional roles of some plasticity mechanisms are well-understood, it remains unclear how changes in neural excitability contribute to learning. Here, we develop a normative interpretation of intrinsic plasticity (IP) as a key component of unsupervised learning. We introduce a novel generative mixture model that accounts for the class-specific statistics of stimulus intensities, and we derive a neural circuit that learns the input classes and their intensities. We will analytically show that inference and learning for our generative model can be achieved by a neural circuit with intensity-sensitive neurons equipped with a specific form of IP. Numerical experiments verify our analytical derivations and show robust behavior for artificial and natural stimuli. Our results link IP to non-trivial input statistics, in particular the statistics of stimulus intensities for classes to which a neuron is sensitive. More generally, our work paves the way toward new classification algorithms that are robust to intensity variations.}, author = {Monk, Travis and Savin, Cristina and Lücke, Jörg}, location = {Barcelona, Spaine}, pages = {4285 -- 4293}, publisher = {Neural Information Processing Systems}, title = {{Neurons equipped with intrinsic plasticity learn stimulus intensity statistics}}, volume = {29}, year = {2016}, } @article{1270, abstract = {A crucial step in the early development of multicellular organisms involves the establishment of spatial patterns of gene expression which later direct proliferating cells to take on different cell fates. These patterns enable the cells to infer their global position within a tissue or an organism by reading out local gene expression levels. The patterning system is thus said to encode positional information, a concept that was formalized recently in the framework of information theory. Here we introduce a toy model of patterning in one spatial dimension, which can be seen as an extension of Wolpert's paradigmatic "French Flag" model, to patterning by several interacting, spatially coupled genes subject to intrinsic and extrinsic noise. Our model, a variant of an Ising spin system, allows us to systematically explore expression patterns that optimally encode positional information. We find that optimal patterning systems use positional cues, as in the French Flag model, together with gene-gene interactions to generate combinatorial codes for position which we call "Counter" patterns. Counter patterns can also be stabilized against noise and variations in system size or morphogen dosage by longer-range spatial interactions of the type invoked in the Turing model. The simple setup proposed here qualitatively captures many of the experimentally observed properties of biological patterning systems and allows them to be studied in a single, theoretically consistent framework.}, author = {Hillenbrand, Patrick and Gerland, Ulrich and Tkacik, Gasper}, journal = {PLoS One}, number = {9}, publisher = {Public Library of Science}, title = {{Beyond the French flag model: Exploiting spatial and gene regulatory interactions for positional information}}, doi = {10.1371/journal.pone.0163628}, volume = {11}, year = {2016}, } @misc{9870, abstract = {The effect of noise in the input field on an Ising model is approximated. Furthermore, methods to compute positional information in an Ising model by transfer matrices and Monte Carlo sampling are outlined.}, author = {Hillenbrand, Patrick and Gerland, Ulrich and Tkačik, Gašper}, publisher = {Public Library of Science}, title = {{Computation of positional information in an Ising model}}, doi = {10.1371/journal.pone.0163628.s002}, year = {2016}, } @misc{9869, abstract = {A lower bound on the error of a positional estimator with limited positional information is derived.}, author = {Hillenbrand, Patrick and Gerland, Ulrich and Tkačik, Gašper}, publisher = {Public Library of Science}, title = {{Error bound on an estimator of position}}, doi = {10.1371/journal.pone.0163628.s001}, year = {2016}, }