@article{14841, abstract = {De novo heterozygous variants in KCNC2 encoding the voltage-gated potassium (K+) channel subunit Kv3.2 are a recently described cause of developmental and epileptic encephalopathy (DEE). A de novo variant in KCNC2 c.374G > A (p.Cys125Tyr) was identified via exome sequencing in a patient with DEE. Relative to wild-type Kv3.2, Kv3.2-p.Cys125Tyr induces K+ currents exhibiting a large hyperpolarizing shift in the voltage dependence of activation, accelerated activation, and delayed deactivation consistent with a relative stabilization of the open conformation, along with increased current density. Leveraging the cryogenic electron microscopy (cryo-EM) structure of Kv3.1, molecular dynamic simulations suggest that a strong π-π stacking interaction between the variant Tyr125 and Tyr156 in the α-6 helix of the T1 domain promotes a relative stabilization of the open conformation of the channel, which underlies the observed gain of function. A multicompartment computational model of a Kv3-expressing parvalbumin-positive cerebral cortex fast-spiking γ-aminobutyric acidergic (GABAergic) interneuron (PV-IN) demonstrates how the Kv3.2-Cys125Tyr variant impairs neuronal excitability and dysregulates inhibition in cerebral cortex circuits to explain the resulting epilepsy.}, author = {Clatot, Jerome and Currin, Christopher and Liang, Qiansheng and Pipatpolkai, Tanadet and Massey, Shavonne L. and Helbig, Ingo and Delemotte, Lucie and Vogels, Tim P and Covarrubias, Manuel and Goldberg, Ethan M.}, issn = {1091-6490}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, number = {3}, publisher = {Proceedings of the National Academy of Sciences}, title = {{A structurally precise mechanism links an epilepsy-associated KCNC2 potassium channel mutation to interneuron dysfunction}}, doi = {10.1073/pnas.2307776121}, volume = {121}, year = {2024}, } @article{14887, abstract = {Episodic memories are encoded by experience-activated neuronal ensembles that remain necessary and sufficient for recall. However, the temporal evolution of memory engrams after initial encoding is unclear. In this study, we employed computational and experimental approaches to examine how the neural composition and selectivity of engrams change with memory consolidation. Our spiking neural network model yielded testable predictions: memories transition from unselective to selective as neurons drop out of and drop into engrams; inhibitory activity during recall is essential for memory selectivity; and inhibitory synaptic plasticity during memory consolidation is critical for engrams to become selective. Using activity-dependent labeling, longitudinal calcium imaging and a combination of optogenetic and chemogenetic manipulations in mouse dentate gyrus, we conducted contextual fear conditioning experiments that supported our model’s predictions. Our results reveal that memory engrams are dynamic and that changes in engram composition mediated by inhibitory plasticity are crucial for the emergence of memory selectivity.}, author = {Feitosa Tomé, Douglas and Zhang, Ying and Aida, Tomomi and Mosto, Olivia and Lu, Yifeng and Chen, Mandy and Sadeh, Sadra and Roy, Dheeraj S. and Clopath, Claudia}, issn = {1546-1726}, journal = {Nature Neuroscience}, publisher = {Springer Nature}, title = {{Dynamic and selective engrams emerge with memory consolidation}}, doi = {10.1038/s41593-023-01551-w}, year = {2024}, } @article{15171, abstract = {The brain’s functionality is developed and maintained through synaptic plasticity. As synapses undergo plasticity, they also affect each other. The nature of such ‘co-dependency’ is difficult to disentangle experimentally, because multiple synapses must be monitored simultaneously. To help understand the experimentally observed phenomena, we introduce a framework that formalizes synaptic co-dependency between different connection types. The resulting model explains how inhibition can gate excitatory plasticity while neighboring excitatory–excitatory interactions determine the strength of long-term potentiation. Furthermore, we show how the interplay between excitatory and inhibitory synapses can account for the quick rise and long-term stability of a variety of synaptic weight profiles, such as orientation tuning and dendritic clustering of co-active synapses. In recurrent neuronal networks, co-dependent plasticity produces rich and stable motor cortex-like dynamics with high input sensitivity. Our results suggest an essential role for the neighborly synaptic interaction during learning, connecting micro-level physiology with network-wide phenomena.}, author = {Agnes, Everton J. and Vogels, Tim P}, issn = {1546-1726}, journal = {Nature Neuroscience}, publisher = {Springer Nature}, title = {{Co-dependent excitatory and inhibitory plasticity accounts for quick, stable and long-lasting memories in biological networks}}, doi = {10.1038/s41593-024-01597-4}, year = {2024}, } @article{15169, abstract = {Interpretation of extracellular recordings can be challenging due to the long range of electric field. This challenge can be mitigated by estimating the current source density (CSD). Here we introduce kCSD-python, an open Python package implementing Kernel Current Source Density (kCSD) method and related tools to facilitate CSD analysis of experimental data and the interpretation of results. We show how to counter the limitations imposed by noise and assumptions in the method itself. kCSD-python allows CSD estimation for an arbitrary distribution of electrodes in 1D, 2D, and 3D, assuming distributions of sources in tissue, a slice, or in a single cell, and includes a range of diagnostic aids. We demonstrate its features in a Jupyter Notebook tutorial which illustrates a typical analytical workflow and main functionalities useful in validating analysis results.}, author = {Chintaluri, Chaitanya and Bejtka, Marta and Sredniawa, Wladyslaw and Czerwinski, Michal and Dzik, Jakub M. and Jedrzejewska-Szmek, Joanna and Wojciki, Daniel K.}, issn = {1553-7358}, journal = {PLoS Computational Biology}, number = {3}, publisher = {Public Library of Science}, title = {{kCSD-python, reliable current source density estimation with quality control}}, doi = {10.1371/journal.pcbi.1011941}, volume = {20}, year = {2024}, } @inbook{12866, abstract = {Autism spectrum disorder (ASD) and epilepsy are frequently comorbid neurodevelopmental disorders. Extensive research has demonstrated shared pathological pathways, etiologies, and phenotypes. Many risk factors for these disorders, like genetic mutations and environmental pressures, are linked to changes in childhood brain development, which is a critical period for their manifestation. Decades of research have yielded many signatures for ASD and epilepsy, some shared and others unique or opposing. The anatomical, physiological, and behavioral correlates of these disorders are discussed in this chapter in the context of understanding shared pathological pathways. We end with important takeaways on the presentation, prevention, intervention, and policy changes for ASD and epilepsy. This chapter aims to explore the complexity of these disorders, both in etiology and phenotypes, with the further goal of appreciating the expanse of unknowns still to explore about the brain.}, author = {Currin, Christopher and Beyer, Chad}, booktitle = {Encyclopedia of Child and Adolescent Health}, editor = {Halpern-Felsher, Bonnie}, isbn = {9780128188736}, pages = {86--98}, publisher = {Elsevier}, title = {{Altered childhood brain development in autism and epilepsy}}, doi = {10.1016/b978-0-12-818872-9.00129-1}, year = {2023}, } @phdthesis{14422, abstract = {Animals exhibit a remarkable ability to learn and remember new behaviors, skills, and associations throughout their lifetime. These capabilities are made possible thanks to a variety of changes in the brain throughout adulthood, regrouped under the term "plasticity". Some cells in the brain —neurons— and specifically changes in the connections between neurons, the synapses, were shown to be crucial for the formation, selection, and consolidation of memories from past experiences. These ongoing changes of synapses across time are called synaptic plasticity. Understanding how a myriad of biochemical processes operating at individual synapses can somehow work in concert to give rise to meaningful changes in behavior is a fascinating problem and an active area of research. However, the experimental search for the precise plasticity mechanisms at play in the brain is daunting, as it is difficult to control and observe synapses during learning. Theoretical approaches have thus been the default method to probe the plasticity-behavior connection. Such studies attempt to extract unifying principles across synapses and model all observed synaptic changes using plasticity rules: equations that govern the evolution of synaptic strengths across time in neuronal network models. These rules can use many relevant quantities to determine the magnitude of synaptic changes, such as the precise timings of pre- and postsynaptic action potentials, the recent neuronal activity levels, the state of neighboring synapses, etc. However, analytical studies rely heavily on human intuition and are forced to make simplifying assumptions about plasticity rules. In this thesis, we aim to assist and augment human intuition in this search for plasticity rules. We explore whether a numerical approach could automatically discover the plasticity rules that elicit desired behaviors in large networks of interconnected neurons. This approach is dubbed meta-learning synaptic plasticity: learning plasticity rules which themselves will make neuronal networks learn how to solve a desired task. We first write all the potential plasticity mechanisms to consider using a single expression with adjustable parameters. We then optimize these plasticity parameters using evolutionary strategies or Bayesian inference on tasks known to involve synaptic plasticity, such as familiarity detection and network stabilization. We show that these automated approaches are powerful tools, able to complement established analytical methods. By comprehensively screening plasticity rules at all synapse types in realistic, spiking neuronal network models, we discover entire sets of degenerate plausible plasticity rules that reliably elicit memory-related behaviors. Our approaches allow for more robust experimental predictions, by abstracting out the idiosyncrasies of individual plasticity rules, and provide fresh insights on synaptic plasticity in spiking network models. }, author = {Confavreux, Basile J}, issn = {2663 - 337X}, pages = {148}, publisher = {Institute of Science and Technology Austria}, title = {{Synapseek: Meta-learning synaptic plasticity rules}}, doi = {10.15479/at:ista:14422}, year = {2023}, } @article{14666, abstract = {So-called spontaneous activity is a central hallmark of most nervous systems. Such non-causal firing is contrary to the tenet of spikes as a means of communication, and its purpose remains unclear. We propose that self-initiated firing can serve as a release valve to protect neurons from the toxic conditions arising in mitochondria from lower-than-baseline energy consumption. To demonstrate the viability of our hypothesis, we built a set of models that incorporate recent experimental results indicating homeostatic control of metabolic products—Adenosine triphosphate (ATP), adenosine diphosphate (ADP), and reactive oxygen species (ROS)—by changes in firing. We explore the relationship of metabolic cost of spiking with its effect on the temporal patterning of spikes and reproduce experimentally observed changes in intrinsic firing in the fruitfly dorsal fan-shaped body neuron in a model with ROS-modulated potassium channels. We also show that metabolic spiking homeostasis can produce indefinitely sustained avalanche dynamics in cortical circuits. Our theory can account for key features of neuronal activity observed in many studies ranging from ion channel function all the way to resting state dynamics. We finish with a set of experimental predictions that would confirm an integrated, crucial role for metabolically regulated spiking and firmly link metabolic homeostasis and neuronal function.}, author = {Chintaluri, Chaitanya and Vogels, Tim P}, issn = {1091-6490}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, number = {48}, publisher = {National Academy of Sciences}, title = {{Metabolically regulated spiking could serve neuronal energy homeostasis and protect from reactive oxygen species}}, doi = {10.1073/pnas.2306525120}, volume = {120}, year = {2023}, } @misc{14892, abstract = {Code and data necessary to reproduce the simulations and data analyses reported in our manuscript: Tomé, D.F., Zhang, Y., Aida, T., Mosto, O., Lu, Y., Chen, M., Sadeh, S., Roy, D. S., Clopath, C. Dynamic and selective engrams emerge with memory consolidation. 2023.}, author = {Feitosa Tomé, Douglas}, publisher = {Zenodo}, title = {{douglastome/dynamic-engrams: Dynamic and selective engrams emerge with memory consolidation}}, doi = {10.5281/ZENODO.10251087}, year = {2023}, } @inproceedings{14993, abstract = {Traditional top-down approaches for global health have historically failed to achieve social progress (Hoffman et al., 2015; Hoffman & Røttingen, 2015). Recently, however, a more holistic, multi-level approach termed One Health (OH) (Osterhaus et al., 2020) is being adopted. Several sets of challenges have been identified for the implementation of OH (dos S. Ribeiro et al., 2019), including policy and funding, education and training, and multi-actor, multi-domain, and multi-level collaborations. These exist despite the increasing accessibility to knowledge and digital collaborative research tools through the internet. To address some of these challenges, we propose a general framework for grassroots community-based means of participatory research. Additionally, we present a specific roadmap to create a Machine Learning for Global Health community in Africa. The proposed framework aims to enable any small group of individuals with scarce resources to build and sustain an online community within approximately two years. We provide a discussion on the potential impact of the proposed framework for global health research collaborations.}, author = {Currin, Christopher and Asiedu , Mercy Nyamewaa and Fourie, Chris and Rosman, Benjamin and Turki, Houcemeddine and Lambebo Tonja, Atnafu and Abbott, Jade and Ajala, Marvellous and Adedayo, Sadiq Adewale and Emezue, Chris Chinenye and Machangara, Daphne}, booktitle = {1st Workshop on Machine Learning & Global Health}, location = {Kigali, Rwanda}, publisher = {OpenReview}, title = {{A framework for grassroots research collaboration in machine learning and global health}}, year = {2023}, } @inproceedings{13239, abstract = {Brains are thought to engage in predictive learning - learning to predict upcoming stimuli - to construct an internal model of their environment. This is especially notable for spatial navigation, as first described by Tolman’s latent learning tasks. However, predictive learning has also been observed in sensory cortex, in settings unrelated to spatial navigation. Apart from normative frameworks such as active inference or efficient coding, what could be the utility of learning to predict the patterns of occurrence of correlated stimuli? Here we show that prediction, and thereby the construction of an internal model of sequential stimuli, can bootstrap the learning process of a working memory task in a recurrent neural network. We implemented predictive learning alongside working memory match-tasks, and networks emerged to solve the prediction task first by encoding information across time to predict upcoming stimuli, and then eavesdropped on this solution to solve the matching task. Eavesdropping was most beneficial when neural resources were limited. Hence, predictive learning acts as a general neural mechanism to learn to store sensory information that can later be essential for working memory tasks.}, author = {Van Der Plas, Thijs L. and Vogels, Tim P and Manohar, Sanjay G.}, booktitle = {Proceedings of Machine Learning Research}, issn = {2640-3498}, pages = {518--531}, publisher = {ML Research Press}, title = {{Predictive learning enables neural networks to learn complex working memory tasks}}, volume = {199}, year = {2022}, } @article{11143, abstract = {Dravet syndrome is a neurodevelopmental disorder characterized by epilepsy, intellectual disability, and sudden death due to pathogenic variants in SCN1A with loss of function of the sodium channel subunit Nav1.1. Nav1.1-expressing parvalbumin GABAergic interneurons (PV-INs) from young Scn1a+/− mice show impaired action potential generation. An approach assessing PV-IN function in the same mice at two time points shows impaired spike generation in all Scn1a+/− mice at postnatal days (P) 16–21, whether deceased prior or surviving to P35, with normalization by P35 in surviving mice. However, PV-IN synaptic transmission is dysfunctional in young Scn1a+/− mice that did not survive and in Scn1a+/− mice ≥ P35. Modeling confirms that PV-IN axonal propagation is more sensitive to decreased sodium conductance than spike generation. These results demonstrate dynamic dysfunction in Dravet syndrome: combined abnormalities of PV-IN spike generation and propagation drives early disease severity, while ongoing dysfunction of synaptic transmission contributes to chronic pathology.}, author = {Kaneko, Keisuke and Currin, Christopher and Goff, Kevin M. and Wengert, Eric R. and Somarowthu, Ala and Vogels, Tim P and Goldberg, Ethan M.}, issn = {2211-1247}, journal = {Cell Reports}, number = {13}, publisher = {Elsevier}, title = {{Developmentally regulated impairment of parvalbumin interneuron synaptic transmission in an experimental model of Dravet syndrome}}, doi = {10.1016/j.celrep.2022.110580}, volume = {38}, year = {2022}, } @article{12009, abstract = {Changes in the short-term dynamics of excitatory synapses over development have been observed throughout cortex, but their purpose and consequences remain unclear. Here, we propose that developmental changes in synaptic dynamics buffer the effect of slow inhibitory long-term plasticity, allowing for continuously stable neural activity. Using computational modeling we demonstrate that early in development excitatory short-term depression quickly stabilises neural activity, even in the face of strong, unbalanced excitation. We introduce a model of the commonly observed developmental shift from depression to facilitation and show that neural activity remains stable throughout development, while inhibitory synaptic plasticity slowly balances excitation, consistent with experimental observations. Our model predicts changes in the input responses from phasic to phasic-and-tonic and more precise spike timings. We also observe a gradual emergence of short-lasting memory traces governed by short-term plasticity development. We conclude that the developmental depression-to-facilitation shift may control excitation-inhibition balance throughout development with important functional consequences.}, author = {Jia, David W. and Vogels, Tim P and Costa, Rui Ponte}, issn = {2399-3642}, journal = {Communications biology}, publisher = {Springer Nature}, title = {{Developmental depression-to-facilitation shift controls excitation-inhibition balance}}, doi = {10.1038/s42003-022-03801-2}, volume = {5}, year = {2022}, } @article{12084, abstract = {Neuronal networks encode information through patterns of activity that define the networks’ function. The neurons’ activity relies on specific connectivity structures, yet the link between structure and function is not fully understood. Here, we tackle this structure-function problem with a new conceptual approach. Instead of manipulating the connectivity directly, we focus on upper triangular matrices, which represent the network dynamics in a given orthonormal basis obtained by the Schur decomposition. This abstraction allows us to independently manipulate the eigenspectrum and feedforward structures of a connectivity matrix. Using this method, we describe a diverse repertoire of non-normal transient amplification, and to complement the analysis of the dynamical regimes, we quantify the geometry of output trajectories through the effective rank of both the eigenvector and the dynamics matrices. Counter-intuitively, we find that shrinking the eigenspectrum’s imaginary distribution leads to highly amplifying regimes in linear and long-lasting dynamics in nonlinear networks. We also find a trade-off between amplification and dimensionality of neuronal dynamics, i.e., trajectories in neuronal state-space. Networks that can amplify a large number of orthogonal initial conditions produce neuronal trajectories that lie in the same subspace of the neuronal state-space. Finally, we examine networks of excitatory and inhibitory neurons. We find that the strength of global inhibition is directly linked with the amplitude of amplification, such that weakening inhibitory weights also decreases amplification, and that the eigenspectrum’s imaginary distribution grows with an increase in the ratio between excitatory-to-inhibitory and excitatory-to-excitatory connectivity strengths. Consequently, the strength of global inhibition reveals itself as a strong signature for amplification and a potential control mechanism to switch dynamical regimes. Our results shed a light on how biological networks, i.e., networks constrained by Dale’s law, may be optimised for specific dynamical regimes.}, author = {Christodoulou, Georgia and Vogels, Tim P and Agnes, Everton J.}, issn = {1553-7358}, journal = {PLoS Computational Biology}, number = {8}, publisher = {Public Library of Science}, title = {{Regimes and mechanisms of transient amplification in abstract and biological neural networks}}, doi = {10.1371/journal.pcbi.1010365}, volume = {18}, year = {2022}, } @article{12225, abstract = {In social networks, users often engage with like-minded peers. This selective exposure to opinions might result in echo chambers, i.e., political fragmentation and social polarization of user interactions. When echo chambers form, opinions have a bimodal distribution with two peaks on opposite sides. In certain issues, where either extreme positions contain a degree of misinformation, neutral consensus is preferable for promoting discourse. In this paper, we use an opinion dynamics model that naturally forms echo chambers in order to find a feedback mechanism that bridges these communities and leads to a neutral consensus. We introduce the random dynamical nudge (RDN), which presents each agent with input from a random selection of other agents’ opinions and does not require surveillance of every person’s opinions. Our computational results in two different models suggest that the RDN leads to a unimodal distribution of opinions centered around the neutral consensus. Furthermore, the RDN is effective both for preventing the formation of echo chambers and also for depolarizing existing echo chambers. Due to the simple and robust nature of the RDN, social media networks might be able to implement a version of this self-feedback mechanism, when appropriate, to prevent the segregation of online communities on complex social issues.}, author = {Currin, Christopher and Vera, Sebastián Vallejo and Khaledi-Nasab, Ali}, issn = {2045-2322}, journal = {Scientific Reports}, keywords = {Multidisciplinary}, publisher = {Springer Nature}, title = {{Depolarization of echo chambers by random dynamical nudge}}, doi = {10.1038/s41598-022-12494-w}, volume = {12}, year = {2022}, } @article{10753, abstract = {This is a comment on "Meta-learning synaptic plasticity and memory addressing for continual familiarity detection." Neuron. 2022 Feb 2;110(3):544-557.e8.}, author = {Confavreux, Basile J and Vogels, Tim P}, issn = {1097-4199}, journal = {Neuron}, number = {3}, pages = {361--362}, publisher = {Elsevier}, title = {{A familiar thought: Machines that replace us?}}, doi = {10.1016/j.neuron.2022.01.014}, volume = {110}, year = {2022}, } @unpublished{8125, abstract = {Context, such as behavioral state, is known to modulate memory formation and retrieval, but is usually ignored in associative memory models. Here, we propose several types of contextual modulation for associative memory networks that greatly increase their performance. In these networks, context inactivates specific neurons and connections, which modulates the effective connectivity of the network. Memories are stored only by the active components, thereby reducing interference from memories acquired in other contexts. Such networks exhibit several beneficial characteristics, including enhanced memory capacity, high robustness to noise, increased robustness to memory overloading, and better memory retention during continual learning. Furthermore, memories can be biased to have different relative strengths, or even gated on or off, according to contextual cues, providing a candidate model for cognitive control of memory and efficient memory search. An external context-encoding network can dynamically switch the memory network to a desired state, which we liken to experimentally observed contextual signals in prefrontal cortex and hippocampus. Overall, our work illustrates the benefits of organizing memory around context, and provides an important link between behavioral studies of memory and mechanistic details of neural circuits.SIGNIFICANCEMemory is context dependent — both encoding and recall vary in effectiveness and speed depending on factors like location and brain state during a task. We apply this idea to a simple computational model of associative memory through contextual gating of neurons and synaptic connections. Intriguingly, this results in several advantages, including vastly enhanced memory capacity, better robustness, and flexible memory gating. Our model helps to explain (i) how gating and inhibition contribute to memory processes, (ii) how memory access dynamically changes over time, and (iii) how context representations, such as those observed in hippocampus and prefrontal cortex, may interact with and control memory processes.}, author = {Podlaski, William F. and Agnes, Everton J. and Vogels, Tim P}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{High capacity and dynamic accessibility in associative memory networks with context-dependent neuronal and synaptic gating}}, doi = {10.1101/2020.01.08.898528}, year = {2022}, } @inproceedings{11453, abstract = {Neuronal computations depend on synaptic connectivity and intrinsic electrophysiological properties. Synaptic connectivity determines which inputs from presynaptic neurons are integrated, while cellular properties determine how inputs are filtered over time. Unlike their biological counterparts, most computational approaches to learning in simulated neural networks are limited to changes in synaptic connectivity. However, if intrinsic parameters change, neural computations are altered drastically. Here, we include the parameters that determine the intrinsic properties, e.g., time constants and reset potential, into the learning paradigm. Using sparse feedback signals that indicate target spike times, and gradient-based parameter updates, we show that the intrinsic parameters can be learned along with the synaptic weights to produce specific input-output functions. Specifically, we use a teacher-student paradigm in which a randomly initialised leaky integrate-and-fire or resonate-and-fire neuron must recover the parameters of a teacher neuron. We show that complex temporal functions can be learned online and without backpropagation through time, relying on event-based updates only. Our results are a step towards online learning of neural computations from ungraded and unsigned sparse feedback signals with a biologically inspired learning mechanism.}, author = {Braun, Lukas and Vogels, Tim P}, booktitle = {Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems}, isbn = {9781713845393}, issn = {1049-5258}, location = {Virtual, Online}, pages = {16437--16450}, publisher = {Neural Information Processing Systems Foundation}, title = {{Online learning of neural computations from sparse temporal feedback}}, volume = {20}, year = {2021}, } @article{8253, abstract = {Brains process information in spiking neural networks. Their intricate connections shape the diverse functions these networks perform. In comparison, the functional capabilities of models of spiking networks are still rudimentary. This shortcoming is mainly due to the lack of insight and practical algorithms to construct the necessary connectivity. Any such algorithm typically attempts to build networks by iteratively reducing the error compared to a desired output. But assigning credit to hidden units in multi-layered spiking networks has remained challenging due to the non-differentiable nonlinearity of spikes. To avoid this issue, one can employ surrogate gradients to discover the required connectivity in spiking network models. However, the choice of a surrogate is not unique, raising the question of how its implementation influences the effectiveness of the method. Here, we use numerical simulations to systematically study how essential design parameters of surrogate gradients impact learning performance on a range of classification problems. We show that surrogate gradient learning is robust to different shapes of underlying surrogate derivatives, but the choice of the derivative’s scale can substantially affect learning performance. When we combine surrogate gradients with a suitable activity regularization technique, robust information processing can be achieved in spiking networks even at the sparse activity limit. Our study provides a systematic account of the remarkable robustness of surrogate gradient learning and serves as a practical guide to model functional spiking neural networks.}, author = {Zenke, Friedemann and Vogels, Tim P}, issn = {1530-888X}, journal = {Neural Computation}, number = {4}, pages = {899--925}, publisher = {MIT Press}, title = {{The remarkable robustness of surrogate gradient learning for instilling complex function in spiking neural networks}}, doi = {10.1162/neco_a_01367}, volume = {33}, year = {2021}, } @article{8757, abstract = {Traditional scientific conferences and seminar events have been hugely disrupted by the COVID-19 pandemic, paving the way for virtual forms of scientific communication to take hold and be put to the test.}, author = {Bozelos, Panagiotis and Vogels, Tim P}, issn = {14710048}, journal = {Nature Reviews Neuroscience}, number = {1}, pages = {1--2}, publisher = {Springer Nature}, title = {{Talking science, online}}, doi = {10.1038/s41583-020-00408-6}, volume = {22}, year = {2021}, } @article{9228, abstract = {Legacy conferences are costly and time consuming, and exclude scientists lacking various resources or abilities. During the 2020 pandemic, we created an online conference platform, Neuromatch Conferences (NMC), aimed at developing technological and cultural changes to make conferences more democratic, scalable, and accessible. We discuss the lessons we learned.}, author = {Achakulvisut, Titipat and Ruangrong, Tulakan and Mineault, Patrick and Vogels, Tim P and Peters, Megan A.K. and Poirazi, Panayiota and Rozell, Christopher and Wyble, Brad and Goodman, Dan F.M. and Kording, Konrad Paul}, issn = {1879-307X}, journal = {Trends in Cognitive Sciences}, number = {4}, pages = {265--268}, publisher = {Elsevier}, title = {{Towards democratizing and automating online conferences: Lessons from the Neuromatch Conferences}}, doi = {10.1016/j.tics.2021.01.007}, volume = {25}, year = {2021}, }