@article{14656, abstract = {Although much is known about how single neurons in the hippocampus represent an animal's position, how circuit interactions contribute to spatial coding is less well understood. Using a novel statistical estimator and theoretical modeling, both developed in the framework of maximum entropy models, we reveal highly structured CA1 cell-cell interactions in male rats during open field exploration. The statistics of these interactions depend on whether the animal is in a familiar or novel environment. In both conditions the circuit interactions optimize the encoding of spatial information, but for regimes that differ in the informativeness of their spatial inputs. This structure facilitates linear decodability, making the information easy to read out by downstream circuits. Overall, our findings suggest that the efficient coding hypothesis is not only applicable to individual neuron properties in the sensory periphery, but also to neural interactions in the central brain.}, author = {Nardin, Michele and Csicsvari, Jozsef L and Tkačik, Gašper and Savin, Cristina}, issn = {1529-2401}, journal = {The Journal of Neuroscience}, number = {48}, pages = {8140--8156}, publisher = {Society of Neuroscience}, title = {{The structure of hippocampal CA1 interactions optimizes spatial coding across experience}}, doi = {10.1523/JNEUROSCI.0194-23.2023}, volume = {43}, year = {2023}, } @unpublished{10077, abstract = {Although much is known about how single neurons in the hippocampus represent an animal’s position, how cell-cell interactions contribute to spatial coding remains poorly understood. Using a novel statistical estimator and theoretical modeling, both developed in the framework of maximum entropy models, we reveal highly structured cell-to-cell interactions whose statistics depend on familiar vs. novel environment. In both conditions the circuit interactions optimize the encoding of spatial information, but for regimes that differ in the signal-to-noise ratio of their spatial inputs. Moreover, the topology of the interactions facilitates linear decodability, making the information easy to read out by downstream circuits. These findings suggest that the efficient coding hypothesis is not applicable only to individual neuron properties in the sensory periphery, but also to neural interactions in the central brain.}, author = {Nardin, Michele and Csicsvari, Jozsef L and Tkačik, Gašper and Savin, Cristina}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{The structure of hippocampal CA1 interactions optimizes spatial coding across experience}}, doi = {10.1101/2021.09.28.460602}, year = {2021}, } @article{730, abstract = {Neural responses are highly structured, with population activity restricted to a small subset of the astronomical range of possible activity patterns. Characterizing these statistical regularities is important for understanding circuit computation, but challenging in practice. Here we review recent approaches based on the maximum entropy principle used for quantifying collective behavior in neural activity. We highlight recent models that capture population-level statistics of neural data, yielding insights into the organization of the neural code and its biological substrate. Furthermore, the MaxEnt framework provides a general recipe for constructing surrogate ensembles that preserve aspects of the data, but are otherwise maximally unstructured. This idea can be used to generate a hierarchy of controls against which rigorous statistical tests are possible.}, author = {Savin, Cristina and Tkacik, Gasper}, issn = {09594388}, journal = {Current Opinion in Neurobiology}, pages = {120 -- 126}, publisher = {Elsevier}, title = {{Maximum entropy models as a tool for building precise neural controls}}, doi = {10.1016/j.conb.2017.08.001}, volume = {46}, year = {2017}, } @inproceedings{1105, abstract = {Jointly characterizing neural responses in terms of several external variables promises novel insights into circuit function, but remains computationally prohibitive in practice. Here we use gaussian process (GP) priors and exploit recent advances in fast GP inference and learning based on Kronecker methods, to efficiently estimate multidimensional nonlinear tuning functions. Our estimator require considerably less data than traditional methods and further provides principled uncertainty estimates. We apply these tools to hippocampal recordings during open field exploration and use them to characterize the joint dependence of CA1 responses on the position of the animal and several other variables, including the animal\'s speed, direction of motion, and network oscillations.Our results provide an unprecedentedly detailed quantification of the tuning of hippocampal neurons. The model\'s generality suggests that our approach can be used to estimate neural response properties in other brain regions.}, author = {Savin, Cristina and Tkacik, Gasper}, location = {Barcelona; Spain}, pages = {3610--3618}, publisher = {Neural Information Processing Systems}, title = {{Estimating nonlinear neural response functions using GP priors and Kronecker methods}}, volume = {29}, year = {2016}, } @inproceedings{948, abstract = {Experience constantly shapes neural circuits through a variety of plasticity mechanisms. While the functional roles of some plasticity mechanisms are well-understood, it remains unclear how changes in neural excitability contribute to learning. Here, we develop a normative interpretation of intrinsic plasticity (IP) as a key component of unsupervised learning. We introduce a novel generative mixture model that accounts for the class-specific statistics of stimulus intensities, and we derive a neural circuit that learns the input classes and their intensities. We will analytically show that inference and learning for our generative model can be achieved by a neural circuit with intensity-sensitive neurons equipped with a specific form of IP. Numerical experiments verify our analytical derivations and show robust behavior for artificial and natural stimuli. Our results link IP to non-trivial input statistics, in particular the statistics of stimulus intensities for classes to which a neuron is sensitive. More generally, our work paves the way toward new classification algorithms that are robust to intensity variations.}, author = {Monk, Travis and Savin, Cristina and Lücke, Jörg}, location = {Barcelona, Spaine}, pages = {4285 -- 4293}, publisher = {Neural Information Processing Systems}, title = {{Neurons equipped with intrinsic plasticity learn stimulus intensity statistics}}, volume = {29}, year = {2016}, } @article{1564, author = {Gilson, Matthieu and Savin, Cristina and Zenke, Friedemann}, journal = {Frontiers in Computational Neuroscience}, number = {11}, publisher = {Frontiers Research Foundation}, title = {{Editorial: Emergent neural computation from the interaction of different forms of plasticity}}, doi = {10.3389/fncom.2015.00145}, volume = {9}, year = {2015}, } @inproceedings{1708, abstract = {It has been long argued that, because of inherent ambiguity and noise, the brain needs to represent uncertainty in the form of probability distributions. The neural encoding of such distributions remains however highly controversial. Here we present a novel circuit model for representing multidimensional real-valued distributions using a spike based spatio-temporal code. Our model combines the computational advantages of the currently competing models for probabilistic codes and exhibits realistic neural responses along a variety of classic measures. Furthermore, the model highlights the challenges associated with interpreting neural activity in relation to behavioral uncertainty and points to alternative population-level approaches for the experimental validation of distributed representations.}, author = {Savin, Cristina and Denève, Sophie}, location = {Montreal, Canada}, number = {January}, pages = {2024 -- 2032}, publisher = {Neural Information Processing Systems}, title = {{Spatio-temporal representations of uncertainty in spiking neural networks}}, volume = {3}, year = {2014}, } @article{1931, abstract = {A wealth of experimental evidence suggests that working memory circuits preferentially represent information that is behaviorally relevant. Still, we are missing a mechanistic account of how these representations come about. Here we provide a simple explanation for a range of experimental findings, in light of prefrontal circuits adapting to task constraints by reward-dependent learning. In particular, we model a neural network shaped by reward-modulated spike-timing dependent plasticity (r-STDP) and homeostatic plasticity (intrinsic excitability and synaptic scaling). We show that the experimentally-observed neural representations naturally emerge in an initially unstructured circuit as it learns to solve several working memory tasks. These results point to a critical, and previously unappreciated, role for reward-dependent learning in shaping prefrontal cortex activity.}, author = {Savin, Cristina and Triesch, Jochen}, journal = {Frontiers in Computational Neuroscience}, number = {MAY}, publisher = {Frontiers Research Foundation}, title = {{Emergence of task-dependent representations in working memory circuits}}, doi = {10.3389/fncom.2014.00057}, volume = {8}, year = {2014}, }