@inproceedings{6012, abstract = {We present an approach to identify concise equations from data using a shallow neural network approach. In contrast to ordinary black-box regression, this approach allows understanding functional relations and generalizing them from observed data to unseen parts of the parameter space. We show how to extend the class of learnable equations for a recently proposed equation learning network to include divisions, and we improve the learning and model selection strategy to be useful for challenging real-world data. For systems governed by analytical expressions, our method can in many cases identify the true underlying equation and extrapolate to unseen domains. We demonstrate its effectiveness by experiments on a cart-pendulum system, where only 2 random rollouts are required to learn the forward dynamics and successfully achieve the swing-up task.}, author = {Sahoo, Subham and Lampert, Christoph and Martius, Georg S}, booktitle = {Proceedings of the 35th International Conference on Machine Learning}, location = {Stockholm, Sweden}, pages = {4442--4450}, publisher = {ML Research Press}, title = {{Learning equations for extrapolation and control}}, volume = {80}, year = {2018}, } @inproceedings{6011, abstract = {We establish a data-dependent notion of algorithmic stability for Stochastic Gradient Descent (SGD), and employ it to develop novel generalization bounds. This is in contrast to previous distribution-free algorithmic stability results for SGD which depend on the worst-case constants. By virtue of the data-dependent argument, our bounds provide new insights into learning with SGD on convex and non-convex problems. In the convex case, we show that the bound on the generalization error depends on the risk at the initialization point. In the non-convex case, we prove that the expected curvature of the objective function around the initialization point has crucial influence on the generalization error. In both cases, our results suggest a simple data-driven strategy to stabilize SGD by pre-screening its initialization. As a corollary, our results allow us to show optimistic generalization bounds that exhibit fast convergence rates for SGD subject to a vanishing empirical risk and low noise of stochastic gradient. }, author = {Kuzborskij, Ilja and Lampert, Christoph}, booktitle = {Proceedings of the 35 th International Conference on Machine Learning}, location = {Stockholm, Sweden}, pages = {2815--2824}, publisher = {ML Research Press}, title = {{Data-dependent stability of stochastic gradient descent}}, volume = {80}, year = {2018}, } @inproceedings{6589, abstract = {Distributed training of massive machine learning models, in particular deep neural networks, via Stochastic Gradient Descent (SGD) is becoming commonplace. Several families of communication-reduction methods, such as quantization, large-batch methods, and gradient sparsification, have been proposed. To date, gradient sparsification methods--where each node sorts gradients by magnitude, and only communicates a subset of the components, accumulating the rest locally--are known to yield some of the largest practical gains. Such methods can reduce the amount of communication per step by up to \emph{three orders of magnitude}, while preserving model accuracy. Yet, this family of methods currently has no theoretical justification. This is the question we address in this paper. We prove that, under analytic assumptions, sparsifying gradients by magnitude with local error correction provides convergence guarantees, for both convex and non-convex smooth objectives, for data-parallel SGD. The main insight is that sparsification methods implicitly maintain bounds on the maximum impact of stale updates, thanks to selection by magnitude. Our analysis and empirical validation also reveal that these methods do require analytical conditions to converge well, justifying existing heuristics.}, author = {Alistarh, Dan-Adrian and Hoefler, Torsten and Johansson, Mikael and Konstantinov, Nikola H and Khirirat, Sarit and Renggli, Cedric}, booktitle = {Advances in Neural Information Processing Systems 31}, location = {Montreal, Canada}, pages = {5973--5983}, publisher = {Neural Information Processing Systems Foundation}, title = {{The convergence of sparsified gradient methods}}, volume = {Volume 2018}, year = {2018}, } @article{7, abstract = {Animal social networks are shaped by multiple selection pressures, including the need to ensure efficient communication and functioning while simultaneously limiting disease transmission. Social animals could potentially further reduce epidemic risk by altering their social networks in the presence of pathogens, yet there is currently no evidence for such pathogen-triggered responses. We tested this hypothesis experimentally in the ant Lasius niger using a combination of automated tracking, controlled pathogen exposure, transmission quantification, and temporally explicit simulations. Pathogen exposure induced behavioral changes in both exposed ants and their nestmates, which helped contain the disease by reinforcing key transmission-inhibitory properties of the colony's contact network. This suggests that social network plasticity in response to pathogens is an effective strategy for mitigating the effects of disease in social groups.}, author = {Stroeymeyt, Nathalie and Grasse, Anna V and Crespi, Alessandro and Mersch, Danielle and Cremer, Sylvia and Keller, Laurent}, issn = {1095-9203}, journal = {Science}, number = {6417}, pages = {941 -- 945}, publisher = {AAAS}, title = {{Social network plasticity decreases disease transmission in a eusocial insect}}, doi = {10.1126/science.aat4793}, volume = {362}, year = {2018}, } @article{19, abstract = {Bacteria regulate genes to survive antibiotic stress, but regulation can be far from perfect. When regulation is not optimal, mutations that change gene expression can contribute to antibiotic resistance. It is not systematically understood to what extent natural gene regulation is or is not optimal for distinct antibiotics, and how changes in expression of specific genes quantitatively affect antibiotic resistance. Here we discover a simple quantitative relation between fitness, gene expression, and antibiotic potency, which rationalizes our observation that a multitude of genes and even innate antibiotic defense mechanisms have expression that is critically nonoptimal under antibiotic treatment. First, we developed a pooled-strain drug-diffusion assay and screened Escherichia coli overexpression and knockout libraries, finding that resistance to a range of 31 antibiotics could result from changing expression of a large and functionally diverse set of genes, in a primarily but not exclusively drug-specific manner. Second, by synthetically controlling the expression of single-drug and multidrug resistance genes, we observed that their fitness-expression functions changed dramatically under antibiotic treatment in accordance with a log-sensitivity relation. Thus, because many genes are nonoptimally expressed under antibiotic treatment, many regulatory mutations can contribute to resistance by altering expression and by activating latent defenses.}, author = {Palmer, Adam and Chait, Remy P and Kishony, Roy}, issn = {0737-4038}, journal = {Molecular Biology and Evolution}, number = {11}, pages = {2669 -- 2684}, publisher = {Oxford University Press}, title = {{Nonoptimal gene expression creates latent potential for antibiotic resistance}}, doi = {10.1093/molbev/msy163}, volume = {35}, year = {2018}, }