@article{12081, abstract = {Selection accumulates information in the genome—it guides stochastically evolving populations toward states (genotype frequencies) that would be unlikely under neutrality. This can be quantified as the Kullback–Leibler (KL) divergence between the actual distribution of genotype frequencies and the corresponding neutral distribution. First, we show that this population-level information sets an upper bound on the information at the level of genotype and phenotype, limiting how precisely they can be specified by selection. Next, we study how the accumulation and maintenance of information is limited by the cost of selection, measured as the genetic load or the relative fitness variance, both of which we connect to the control-theoretic KL cost of control. The information accumulation rate is upper bounded by the population size times the cost of selection. This bound is very general, and applies across models (Wright–Fisher, Moran, diffusion) and to arbitrary forms of selection, mutation, and recombination. Finally, the cost of maintaining information depends on how it is encoded: Specifying a single allele out of two is expensive, but one bit encoded among many weakly specified loci (as in a polygenic trait) is cheap.}, author = {Hledik, Michal and Barton, Nicholas H and Tkačik, Gašper}, issn = {1091-6490}, journal = {Proceedings of the National Academy of Sciences}, number = {36}, publisher = {Proceedings of the National Academy of Sciences}, title = {{Accumulation and maintenance of information in evolution}}, doi = {10.1073/pnas.2123152119}, volume = {119}, year = {2022}, } @article{10535, abstract = {Realistic models of biological processes typically involve interacting components on multiple scales, driven by changing environment and inherent stochasticity. Such models are often analytically and numerically intractable. We revisit a dynamic maximum entropy method that combines a static maximum entropy with a quasi-stationary approximation. This allows us to reduce stochastic non-equilibrium dynamics expressed by the Fokker-Planck equation to a simpler low-dimensional deterministic dynamics, without the need to track microscopic details. Although the method has been previously applied to a few (rather complicated) applications in population genetics, our main goal here is to explain and to better understand how the method works. We demonstrate the usefulness of the method for two widely studied stochastic problems, highlighting its accuracy in capturing important macroscopic quantities even in rapidly changing non-stationary conditions. For the Ornstein-Uhlenbeck process, the method recovers the exact dynamics whilst for a stochastic island model with migration from other habitats, the approximation retains high macroscopic accuracy under a wide range of scenarios in a dynamic environment.}, author = {Bod'ová, Katarína and Szep, Eniko and Barton, Nicholas H}, issn = {1553-7358}, journal = {PLoS Computational Biology}, number = {12}, publisher = {Public Library of Science}, title = {{Dynamic maximum entropy provides accurate approximation of structured population dynamics}}, doi = {10.1371/journal.pcbi.1009661}, volume = {17}, year = {2021}, } @unpublished{10912, abstract = {Brain dynamics display collective phenomena as diverse as neuronal oscillations and avalanches. Oscillations are rhythmic, with fluctuations occurring at a characteristic scale, whereas avalanches are scale-free cascades of neural activity. Here we show that such antithetic features can coexist in a very generic class of adaptive neural networks. In the most simple yet fully microscopic model from this class we make direct contact with human brain resting-state activity recordings via tractable inference of the model's two essential parameters. The inferred model quantitatively captures the dynamics over a broad range of scales, from single sensor fluctuations, collective behaviors of nearly-synchronous extreme events on multiple sensors, to neuronal avalanches unfolding over multiple sensors across multiple time-bins. Importantly, the inferred parameters correlate with model-independent signatures of "closeness to criticality", suggesting that the coexistence of scale-specific (neural oscillations) and scale-free (neuronal avalanches) dynamics in brain activity occurs close to a non-equilibrium critical point at the onset of self-sustained oscillations.}, author = {Lombardi, Fabrizio and Pepic, Selver and Shriki, Oren and Tkačik, Gašper and De Martino, Daniele}, pages = {37}, publisher = {arXiv}, title = {{Quantifying the coexistence of neuronal oscillations and avalanches}}, doi = {10.48550/ARXIV.2108.06686}, year = {2021}, } @unpublished{10579, abstract = {We consider a totally asymmetric simple exclusion process (TASEP) consisting of particles on a lattice that require binding by a "token" to move. Using a combination of theory and simulations, we address the following questions: (i) How token binding kinetics affects the current-density relation; (ii) How the current-density relation depends on the scarcity of tokens; (iii) How tokens propagate the effects of the locally-imposed disorder (such a slow site) over the entire lattice; (iv) How a shared pool of tokens couples concurrent TASEPs running on multiple lattices; (v) How our results translate to TASEPs with open boundaries that exchange particles with the reservoir. Since real particle motion (including in systems that inspired the standard TASEP model, e.g., protein synthesis or movement of molecular motors) is often catalyzed, regulated, actuated, or otherwise mediated, the token-driven TASEP dynamics analyzed in this paper should allow for a better understanding of real systems and enable a closer match between TASEP theory and experimental observations.}, author = {Kavcic, Bor and Tkačik, Gašper}, booktitle = {arXiv}, title = {{Token-driven totally asymmetric simple exclusion process}}, doi = {10.48550/arXiv.2112.13558}, year = {2021}, } @article{7463, abstract = {Resting-state brain activity is characterized by the presence of neuronal avalanches showing absence of characteristic size. Such evidence has been interpreted in the context of criticality and associated with the normal functioning of the brain. A distinctive attribute of systems at criticality is the presence of long-range correlations. Thus, to verify the hypothesis that the brain operates close to a critical point and consequently assess deviations from criticality for diagnostic purposes, it is of primary importance to robustly and reliably characterize correlations in resting-state brain activity. Recent works focused on the analysis of narrow-band electroencephalography (EEG) and magnetoencephalography (MEG) signal amplitude envelope, showing evidence of long-range temporal correlations (LRTC) in neural oscillations. However, brain activity is a broadband phenomenon, and a significant piece of information useful to precisely discriminate between normal (critical) and pathological behavior (non-critical), may be encoded in the broadband spatio-temporal cortical dynamics. Here we propose to characterize the temporal correlations in the broadband brain activity through the lens of neuronal avalanches. To this end, we consider resting-state EEG and long-term MEG recordings, extract the corresponding neuronal avalanche sequences, and study their temporal correlations. We demonstrate that the broadband resting-state brain activity consistently exhibits long-range power-law correlations in both EEG and MEG recordings, with similar values of the scaling exponents. Importantly, although we observe that the avalanche size distribution depends on scale parameters, scaling exponents characterizing long-range correlations are quite robust. In particular, they are independent of the temporal binning (scale of analysis), indicating that our analysis captures intrinsic characteristics of the underlying dynamics. Because neuronal avalanches constitute a fundamental feature of neural systems with universal characteristics, the proposed approach may serve as a general, systems- and experiment-independent procedure to infer the existence of underlying long-range correlations in extended neural systems, and identify pathological behaviors in the complex spatio-temporal interplay of cortical rhythms.}, author = {Lombardi, Fabrizio and Shriki, Oren and Herrmann, Hans J and de Arcangelis, Lucilla}, issn = {1872-8286}, journal = {Neurocomputing}, pages = {657--666}, publisher = {Elsevier}, title = {{Long-range temporal correlations in the broadband resting state activity of the human brain revealed by neuronal avalanches}}, doi = {10.1016/j.neucom.2020.05.126}, volume = {461}, year = {2021}, } @article{9226, abstract = {Half a century after Lewis Wolpert's seminal conceptual advance on how cellular fates distribute in space, we provide a brief historical perspective on how the concept of positional information emerged and influenced the field of developmental biology and beyond. We focus on a modern interpretation of this concept in terms of information theory, largely centered on its application to cell specification in the early Drosophila embryo. We argue that a true physical variable (position) is encoded in local concentrations of patterning molecules, that this mapping is stochastic, and that the processes by which positions and corresponding cell fates are determined based on these concentrations need to take such stochasticity into account. With this approach, we shift the focus from biological mechanisms, molecules, genes and pathways to quantitative systems-level questions: where does positional information reside, how it is transformed and accessed during development, and what fundamental limits it is subject to?}, author = {Tkačik, Gašper and Gregor, Thomas}, issn = {1477-9129}, journal = {Development}, number = {2}, publisher = {The Company of Biologists}, title = {{The many bits of positional information}}, doi = {10.1242/dev.176065}, volume = {148}, year = {2021}, } @article{9439, abstract = {The ability to adapt to changes in stimulus statistics is a hallmark of sensory systems. Here, we developed a theoretical framework that can account for the dynamics of adaptation from an information processing perspective. We use this framework to optimize and analyze adaptive sensory codes, and we show that codes optimized for stationary environments can suffer from prolonged periods of poor performance when the environment changes. To mitigate the adversarial effects of these environmental changes, sensory systems must navigate tradeoffs between the ability to accurately encode incoming stimuli and the ability to rapidly detect and adapt to changes in the distribution of these stimuli. We derive families of codes that balance these objectives, and we demonstrate their close match to experimentally observed neural dynamics during mean and variance adaptation. Our results provide a unifying perspective on adaptation across a range of sensory systems, environments, and sensory tasks.}, author = {Mlynarski, Wiktor F and Hermundstad, Ann M.}, issn = {1546-1726}, journal = {Nature Neuroscience}, pages = {998--1009}, publisher = {Springer Nature}, title = {{Efficient and adaptive sensory codes}}, doi = {10.1038/s41593-021-00846-0}, volume = {24}, year = {2021}, } @article{9822, abstract = {Attachment of adhesive molecules on cell culture surfaces to restrict cell adhesion to defined areas and shapes has been vital for the progress of in vitro research. In currently existing patterning methods, a combination of pattern properties such as stability, precision, specificity, high-throughput outcome, and spatiotemporal control is highly desirable but challenging to achieve. Here, we introduce a versatile and high-throughput covalent photoimmobilization technique, comprising a light-dose-dependent patterning step and a subsequent functionalization of the pattern via click chemistry. This two-step process is feasible on arbitrary surfaces and allows for generation of sustainable patterns and gradients. The method is validated in different biological systems by patterning adhesive ligands on cell-repellent surfaces, thereby constraining the growth and migration of cells to the designated areas. We then implement a sequential photopatterning approach by adding a second switchable patterning step, allowing for spatiotemporal control over two distinct surface patterns. As a proof of concept, we reconstruct the dynamics of the tip/stalk cell switch during angiogenesis. Our results show that the spatiotemporal control provided by our “sequential photopatterning” system is essential for mimicking dynamic biological processes and that our innovative approach has great potential for further applications in cell science.}, author = {Zisis, Themistoklis and Schwarz, Jan and Balles, Miriam and Kretschmer, Maibritt and Nemethova, Maria and Chait, Remy P and Hauschild, Robert and Lange, Janina and Guet, Calin C and Sixt, Michael K and Zahler, Stefan}, issn = {19448252}, journal = {ACS Applied Materials and Interfaces}, number = {30}, pages = {35545–35560}, publisher = {American Chemical Society}, title = {{Sequential and switchable patterning for studying cellular processes under spatiotemporal control}}, doi = {10.1021/acsami.1c09850}, volume = {13}, year = {2021}, } @article{9828, abstract = {Amplitude demodulation is a classical operation used in signal processing. For a long time, its effective applications in practice have been limited to narrowband signals. In this work, we generalize amplitude demodulation to wideband signals. We pose demodulation as a recovery problem of an oversampled corrupted signal and introduce special iterative schemes belonging to the family of alternating projection algorithms to solve it. Sensibly chosen structural assumptions on the demodulation outputs allow us to reveal the high inferential accuracy of the method over a rich set of relevant signals. This new approach surpasses current state-of-the-art demodulation techniques apt to wideband signals in computational efficiency by up to many orders of magnitude with no sacrifice in quality. Such performance opens the door for applications of the amplitude demodulation procedure in new contexts. In particular, the new method makes online and large-scale offline data processing feasible, including the calculation of modulator-carrier pairs in higher dimensions and poor sampling conditions, independent of the signal bandwidth. We illustrate the utility and specifics of applications of the new method in practice by using natural speech and synthetic signals.}, author = {Gabrielaitis, Mantas}, issn = {1941-0476}, journal = {IEEE Transactions on Signal Processing}, pages = {4039 -- 4054}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Fast and accurate amplitude demodulation of wideband signals}}, doi = {10.1109/TSP.2021.3087899}, volume = {69}, year = {2021}, } @article{9362, abstract = {A central goal in systems neuroscience is to understand the functions performed by neural circuits. Previous top-down models addressed this question by comparing the behaviour of an ideal model circuit, optimised to perform a given function, with neural recordings. However, this requires guessing in advance what function is being performed, which may not be possible for many neural systems. To address this, we propose an inverse reinforcement learning (RL) framework for inferring the function performed by a neural network from data. We assume that the responses of each neuron in a network are optimised so as to drive the network towards ‘rewarded’ states, that are desirable for performing a given function. We then show how one can use inverse RL to infer the reward function optimised by the network from observing its responses. This inferred reward function can be used to predict how the neural network should adapt its dynamics to perform the same function when the external environment or network structure changes. This could lead to theoretical predictions about how neural network dynamics adapt to deal with cell death and/or varying sensory stimulus statistics.}, author = {Chalk, Matthew J and Tkačik, Gašper and Marre, Olivier}, issn = {19326203}, journal = {PLoS ONE}, number = {4}, publisher = {Public Library of Science}, title = {{Inferring the function performed by a recurrent neural network}}, doi = {10.1371/journal.pone.0248940}, volume = {16}, year = {2021}, } @article{8997, abstract = {Phenomenological relations such as Ohm’s or Fourier’s law have a venerable history in physics but are still scarce in biology. This situation restrains predictive theory. Here, we build on bacterial “growth laws,” which capture physiological feedback between translation and cell growth, to construct a minimal biophysical model for the combined action of ribosome-targeting antibiotics. Our model predicts drug interactions like antagonism or synergy solely from responses to individual drugs. We provide analytical results for limiting cases, which agree well with numerical results. We systematically refine the model by including direct physical interactions of different antibiotics on the ribosome. In a limiting case, our model provides a mechanistic underpinning for recent predictions of higher-order interactions that were derived using entropy maximization. We further refine the model to include the effects of antibiotics that mimic starvation and the presence of resistance genes. We describe the impact of a starvation-mimicking antibiotic on drug interactions analytically and verify it experimentally. Our extended model suggests a change in the type of drug interaction that depends on the strength of resistance, which challenges established rescaling paradigms. We experimentally show that the presence of unregulated resistance genes can lead to altered drug interaction, which agrees with the prediction of the model. While minimal, the model is readily adaptable and opens the door to predicting interactions of second and higher-order in a broad range of biological systems.}, author = {Kavcic, Bor and Tkačik, Gašper and Bollenbach, Tobias}, issn = {1553-7358}, journal = {PLOS Computational Biology}, keywords = {Modelling and Simulation, Genetics, Molecular Biology, Antibiotics, Drug interactions}, publisher = {Public Library of Science}, title = {{Minimal biophysical model of combined antibiotic action}}, doi = {10.1371/journal.pcbi.1008529}, volume = {17}, year = {2021}, } @article{9283, abstract = {Gene expression levels are influenced by multiple coexisting molecular mechanisms. Some of these interactions such as those of transcription factors and promoters have been studied extensively. However, predicting phenotypes of gene regulatory networks (GRNs) remains a major challenge. Here, we use a well-defined synthetic GRN to study in Escherichia coli how network phenotypes depend on local genetic context, i.e. the genetic neighborhood of a transcription factor and its relative position. We show that one GRN with fixed topology can display not only quantitatively but also qualitatively different phenotypes, depending solely on the local genetic context of its components. Transcriptional read-through is the main molecular mechanism that places one transcriptional unit (TU) within two separate regulons without the need for complex regulatory sequences. We propose that relative order of individual TUs, with its potential for combinatorial complexity, plays an important role in shaping phenotypes of GRNs.}, author = {Nagy-Staron, Anna A and Tomasek, Kathrin and Caruso Carter, Caroline and Sonnleitner, Elisabeth and Kavcic, Bor and Paixão, Tiago and Guet, Calin C}, issn = {2050-084X}, journal = {eLife}, keywords = {Genetics and Molecular Biology}, publisher = {eLife Sciences Publications}, title = {{Local genetic context shapes the function of a gene regulatory network}}, doi = {10.7554/elife.65993}, volume = {10}, year = {2021}, } @article{7553, abstract = {Normative theories and statistical inference provide complementary approaches for the study of biological systems. A normative theory postulates that organisms have adapted to efficiently solve essential tasks, and proceeds to mathematically work out testable consequences of such optimality; parameters that maximize the hypothesized organismal function can be derived ab initio, without reference to experimental data. In contrast, statistical inference focuses on efficient utilization of data to learn model parameters, without reference to any a priori notion of biological function, utility, or fitness. Traditionally, these two approaches were developed independently and applied separately. Here we unify them in a coherent Bayesian framework that embeds a normative theory into a family of maximum-entropy “optimization priors.” This family defines a smooth interpolation between a data-rich inference regime (characteristic of “bottom-up” statistical models), and a data-limited ab inito prediction regime (characteristic of “top-down” normative theory). We demonstrate the applicability of our framework using data from the visual cortex, and argue that the flexibility it affords is essential to address a number of fundamental challenges relating to inference and prediction in complex, high-dimensional biological problems.}, author = {Mlynarski, Wiktor F and Hledik, Michal and Sokolowski, Thomas R and Tkačik, Gašper}, journal = {Neuron}, number = {7}, pages = {1227--1241.e5}, publisher = {Cell Press}, title = {{Statistical analysis and optimality of neural systems}}, doi = {10.1016/j.neuron.2021.01.020}, volume = {109}, year = {2021}, } @unpublished{10077, abstract = {Although much is known about how single neurons in the hippocampus represent an animal’s position, how cell-cell interactions contribute to spatial coding remains poorly understood. Using a novel statistical estimator and theoretical modeling, both developed in the framework of maximum entropy models, we reveal highly structured cell-to-cell interactions whose statistics depend on familiar vs. novel environment. In both conditions the circuit interactions optimize the encoding of spatial information, but for regimes that differ in the signal-to-noise ratio of their spatial inputs. Moreover, the topology of the interactions facilitates linear decodability, making the information easy to read out by downstream circuits. These findings suggest that the efficient coding hypothesis is not applicable only to individual neuron properties in the sensory periphery, but also to neural interactions in the central brain.}, author = {Nardin, Michele and Csicsvari, Jozsef L and Tkačik, Gašper and Savin, Cristina}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{The structure of hippocampal CA1 interactions optimizes spatial coding across experience}}, doi = {10.1101/2021.09.28.460602}, year = {2021}, } @article{8105, abstract = {Physical and biological systems often exhibit intermittent dynamics with bursts or avalanches (active states) characterized by power-law size and duration distributions. These emergent features are typical of systems at the critical point of continuous phase transitions, and have led to the hypothesis that such systems may self-organize at criticality, i.e. without any fine tuning of parameters. Since the introduction of the Bak-Tang-Wiesenfeld (BTW) model, the paradigm of self-organized criticality (SOC) has been very fruitful for the analysis of emergent collective behaviors in a number of systems, including the brain. Although considerable effort has been devoted in identifying and modeling scaling features of burst and avalanche statistics, dynamical aspects related to the temporal organization of bursts remain often poorly understood or controversial. Of crucial importance to understand the mechanisms responsible for emergent behaviors is the relationship between active and quiet periods, and the nature of the correlations. Here we investigate the dynamics of active (θ-bursts) and quiet states (δ-bursts) in brain activity during the sleep-wake cycle. We show the duality of power-law (θ, active phase) and exponential-like (δ, quiescent phase) duration distributions, typical of SOC, jointly emerge with power-law temporal correlations and anti-correlated coupling between active and quiet states. Importantly, we demonstrate that such temporal organization shares important similarities with earthquake dynamics, and propose that specific power-law correlations and coupling between active and quiet states are distinctive characteristics of a class of systems with self-organization at criticality.}, author = {Lombardi, Fabrizio and Wang, Jilin W.J.L. and Zhang, Xiyun and Ivanov, Plamen Ch}, issn = {2100-014X}, journal = {EPJ Web of Conferences}, publisher = {EDP Sciences}, title = {{Power-law correlations and coupling of active and quiet states underlie a class of complex systems with self-organization at criticality}}, doi = {10.1051/epjconf/202023000005}, volume = {230}, year = {2020}, } @article{7490, abstract = {In plants, clathrin mediated endocytosis (CME) represents the major route for cargo internalisation from the cell surface. It has been assumed to operate in an evolutionary conserved manner as in yeast and animals. Here we report characterisation of ultrastructure, dynamics and mechanisms of plant CME as allowed by our advancement in electron microscopy and quantitative live imaging techniques. Arabidopsis CME appears to follow the constant curvature model and the bona fide CME population generates vesicles of a predominantly hexagonal-basket type; larger and with faster kinetics than in other models. Contrary to the existing paradigm, actin is dispensable for CME events at the plasma membrane but plays a unique role in collecting endocytic vesicles, sorting of internalised cargos and directional endosome movement that itself actively promote CME events. Internalized vesicles display a strongly delayed and sequential uncoating. These unique features highlight the independent evolution of the plant CME mechanism during the autonomous rise of multicellularity in eukaryotes.}, author = {Narasimhan, Madhumitha and Johnson, Alexander J and Prizak, Roshan and Kaufmann, Walter and Tan, Shutang and Casillas Perez, Barbara E and Friml, Jiří}, issn = {2050-084X}, journal = {eLife}, publisher = {eLife Sciences Publications}, title = {{Evolutionarily unique mechanistic framework of clathrin-mediated endocytosis in plants}}, doi = {10.7554/eLife.52067}, volume = {9}, year = {2020}, } @misc{9779, author = {Grah, Rok and Friedlander, Tamar}, publisher = {Public Library of Science}, title = {{Distribution of crosstalk values}}, doi = {10.1371/journal.pcbi.1007642.s003}, year = {2020}, } @misc{9776, author = {Grah, Rok and Friedlander, Tamar}, publisher = {Public Library of Science}, title = {{Supporting information}}, doi = {10.1371/journal.pcbi.1007642.s001}, year = {2020}, } @article{7656, abstract = {We propose that correlations among neurons are generically strong enough to organize neural activity patterns into a discrete set of clusters, which can each be viewed as a population codeword. Our reasoning starts with the analysis of retinal ganglion cell data using maximum entropy models, showing that the population is robustly in a frustrated, marginally sub-critical, or glassy, state. This leads to an argument that neural populations in many other brain areas might share this structure. Next, we use latent variable models to show that this glassy state possesses well-defined clusters of neural activity. Clusters have three appealing properties: (i) clusters exhibit error correction, i.e., they are reproducibly elicited by the same stimulus despite variability at the level of constituent neurons; (ii) clusters encode qualitatively different visual features than their constituent neurons; and (iii) clusters can be learned by downstream neural circuits in an unsupervised fashion. We hypothesize that these properties give rise to a “learnable” neural code which the cortical hierarchy uses to extract increasingly complex features without supervision or reinforcement.}, author = {Berry, Michael J. and Tkačik, Gašper}, issn = {16625188}, journal = {Frontiers in Computational Neuroscience}, publisher = {Frontiers}, title = {{Clustering of neural activity: A design principle for population codes}}, doi = {10.3389/fncom.2020.00020}, volume = {14}, year = {2020}, } @article{8698, abstract = {The brain represents and reasons probabilistically about complex stimuli and motor actions using a noisy, spike-based neural code. A key building block for such neural computations, as well as the basis for supervised and unsupervised learning, is the ability to estimate the surprise or likelihood of incoming high-dimensional neural activity patterns. Despite progress in statistical modeling of neural responses and deep learning, current approaches either do not scale to large neural populations or cannot be implemented using biologically realistic mechanisms. Inspired by the sparse and random connectivity of real neuronal circuits, we present a model for neural codes that accurately estimates the likelihood of individual spiking patterns and has a straightforward, scalable, efficient, learnable, and realistic neural implementation. This model’s performance on simultaneously recorded spiking activity of >100 neurons in the monkey visual and prefrontal cortices is comparable with or better than that of state-of-the-art models. Importantly, the model can be learned using a small number of samples and using a local learning rule that utilizes noise intrinsic to neural circuits. Slower, structural changes in random connectivity, consistent with rewiring and pruning processes, further improve the efficiency and sparseness of the resulting neural representations. Our results merge insights from neuroanatomy, machine learning, and theoretical neuroscience to suggest random sparse connectivity as a key design principle for neuronal computation.}, author = {Maoz, Ori and Tkačik, Gašper and Esteki, Mohamad Saleh and Kiani, Roozbeh and Schneidman, Elad}, issn = {10916490}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, number = {40}, pages = {25066--25073}, publisher = {National Academy of Sciences}, title = {{Learning probabilistic neural representations with randomly connected circuits}}, doi = {10.1073/pnas.1912804117}, volume = {117}, year = {2020}, }