@article{7490, abstract = {In plants, clathrin mediated endocytosis (CME) represents the major route for cargo internalisation from the cell surface. It has been assumed to operate in an evolutionary conserved manner as in yeast and animals. Here we report characterisation of ultrastructure, dynamics and mechanisms of plant CME as allowed by our advancement in electron microscopy and quantitative live imaging techniques. Arabidopsis CME appears to follow the constant curvature model and the bona fide CME population generates vesicles of a predominantly hexagonal-basket type; larger and with faster kinetics than in other models. Contrary to the existing paradigm, actin is dispensable for CME events at the plasma membrane but plays a unique role in collecting endocytic vesicles, sorting of internalised cargos and directional endosome movement that itself actively promote CME events. Internalized vesicles display a strongly delayed and sequential uncoating. These unique features highlight the independent evolution of the plant CME mechanism during the autonomous rise of multicellularity in eukaryotes.}, author = {Narasimhan, Madhumitha and Johnson, Alexander J and Prizak, Roshan and Kaufmann, Walter and Tan, Shutang and Casillas Perez, Barbara E and Friml, Jiří}, issn = {2050-084X}, journal = {eLife}, publisher = {eLife Sciences Publications}, title = {{Evolutionarily unique mechanistic framework of clathrin-mediated endocytosis in plants}}, doi = {10.7554/eLife.52067}, volume = {9}, year = {2020}, } @misc{9779, author = {Grah, Rok and Friedlander, Tamar}, publisher = {Public Library of Science}, title = {{Distribution of crosstalk values}}, doi = {10.1371/journal.pcbi.1007642.s003}, year = {2020}, } @misc{9776, author = {Grah, Rok and Friedlander, Tamar}, publisher = {Public Library of Science}, title = {{Supporting information}}, doi = {10.1371/journal.pcbi.1007642.s001}, year = {2020}, } @article{7656, abstract = {We propose that correlations among neurons are generically strong enough to organize neural activity patterns into a discrete set of clusters, which can each be viewed as a population codeword. Our reasoning starts with the analysis of retinal ganglion cell data using maximum entropy models, showing that the population is robustly in a frustrated, marginally sub-critical, or glassy, state. This leads to an argument that neural populations in many other brain areas might share this structure. Next, we use latent variable models to show that this glassy state possesses well-defined clusters of neural activity. Clusters have three appealing properties: (i) clusters exhibit error correction, i.e., they are reproducibly elicited by the same stimulus despite variability at the level of constituent neurons; (ii) clusters encode qualitatively different visual features than their constituent neurons; and (iii) clusters can be learned by downstream neural circuits in an unsupervised fashion. We hypothesize that these properties give rise to a “learnable” neural code which the cortical hierarchy uses to extract increasingly complex features without supervision or reinforcement.}, author = {Berry, Michael J. and Tkačik, Gašper}, issn = {16625188}, journal = {Frontiers in Computational Neuroscience}, publisher = {Frontiers}, title = {{Clustering of neural activity: A design principle for population codes}}, doi = {10.3389/fncom.2020.00020}, volume = {14}, year = {2020}, } @article{8698, abstract = {The brain represents and reasons probabilistically about complex stimuli and motor actions using a noisy, spike-based neural code. A key building block for such neural computations, as well as the basis for supervised and unsupervised learning, is the ability to estimate the surprise or likelihood of incoming high-dimensional neural activity patterns. Despite progress in statistical modeling of neural responses and deep learning, current approaches either do not scale to large neural populations or cannot be implemented using biologically realistic mechanisms. Inspired by the sparse and random connectivity of real neuronal circuits, we present a model for neural codes that accurately estimates the likelihood of individual spiking patterns and has a straightforward, scalable, efficient, learnable, and realistic neural implementation. This model’s performance on simultaneously recorded spiking activity of >100 neurons in the monkey visual and prefrontal cortices is comparable with or better than that of state-of-the-art models. Importantly, the model can be learned using a small number of samples and using a local learning rule that utilizes noise intrinsic to neural circuits. Slower, structural changes in random connectivity, consistent with rewiring and pruning processes, further improve the efficiency and sparseness of the resulting neural representations. Our results merge insights from neuroanatomy, machine learning, and theoretical neuroscience to suggest random sparse connectivity as a key design principle for neuronal computation.}, author = {Maoz, Ori and Tkačik, Gašper and Esteki, Mohamad Saleh and Kiani, Roozbeh and Schneidman, Elad}, issn = {10916490}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, number = {40}, pages = {25066--25073}, publisher = {National Academy of Sciences}, title = {{Learning probabilistic neural representations with randomly connected circuits}}, doi = {10.1073/pnas.1912804117}, volume = {117}, year = {2020}, }