@article{9249, abstract = {Rhombic dodecahedron is a space filling polyhedron which represents the close packing of spheres in 3D space and the Voronoi structures of the face centered cubic (FCC) lattice. In this paper, we describe a new coordinate system where every 3-integer coordinates grid point corresponds to a rhombic dodecahedron centroid. In order to illustrate the interest of the new coordinate system, we propose the characterization of 3D digital plane with its topological features, such as the interrelation between the thickness of the digital plane and the separability constraint we aim to obtain. We also present the characterization of 3D digital lines and study it as the intersection of multiple digital planes. Characterization of 3D digital sphere with relevant topological features is proposed as well along with the 48-symmetry appearing in the new coordinate system.}, author = {Biswas, Ranita and Largeteau-Skapin, Gaëlle and Zrour, Rita and Andres, Eric}, issn = {2353-3390}, journal = {Mathematical Morphology - Theory and Applications}, number = {1}, pages = {143--158}, publisher = {De Gruyter}, title = {{Digital objects in rhombic dodecahedron grid}}, doi = {10.1515/mathm-2020-0106}, volume = {4}, year = {2020}, } @inproceedings{9299, abstract = {We call a multigraph non-homotopic if it can be drawn in the plane in such a way that no two edges connecting the same pair of vertices can be continuously transformed into each other without passing through a vertex, and no loop can be shrunk to its end-vertex in the same way. It is easy to see that a non-homotopic multigraph on n>1 vertices can have arbitrarily many edges. We prove that the number of crossings between the edges of a non-homotopic multigraph with n vertices and m>4n edges is larger than cm2n for some constant c>0 , and that this bound is tight up to a polylogarithmic factor. We also show that the lower bound is not asymptotically sharp as n is fixed and m⟶∞ .}, author = {Pach, János and Tardos, Gábor and Tóth, Géza}, booktitle = {28th International Symposium on Graph Drawing and Network Visualization}, isbn = {9783030687656}, issn = {1611-3349}, location = {Virtual, Online}, pages = {359--371}, publisher = {Springer Nature}, title = {{Crossings between non-homotopic edges}}, doi = {10.1007/978-3-030-68766-3_28}, volume = {12590}, year = {2020}, } @inproceedings{9632, abstract = {Second-order information, in the form of Hessian- or Inverse-Hessian-vector products, is a fundamental tool for solving optimization problems. Recently, there has been significant interest in utilizing this information in the context of deep neural networks; however, relatively little is known about the quality of existing approximations in this context. Our work examines this question, identifies issues with existing approaches, and proposes a method called WoodFisher to compute a faithful and efficient estimate of the inverse Hessian. Our main application is to neural network compression, where we build on the classic Optimal Brain Damage/Surgeon framework. We demonstrate that WoodFisher significantly outperforms popular state-of-the-art methods for oneshot pruning. Further, even when iterative, gradual pruning is allowed, our method results in a gain in test accuracy over the state-of-the-art approaches, for standard image classification datasets such as ImageNet ILSVRC. We examine how our method can be extended to take into account first-order information, as well as illustrate its ability to automatically set layer-wise pruning thresholds and perform compression in the limited-data regime. The code is available at the following link, https://github.com/IST-DASLab/WoodFisher.}, author = {Singh, Sidak Pal and Alistarh, Dan-Adrian}, booktitle = {Advances in Neural Information Processing Systems}, isbn = {9781713829546}, issn = {10495258}, location = {Vancouver, Canada}, pages = {18098--18109}, publisher = {Curran Associates}, title = {{WoodFisher: Efficient second-order approximation for neural network compression}}, volume = {33}, year = {2020}, } @article{9630, abstract = {Various kinds of data are routinely represented as discrete probability distributions. Examples include text documents summarized by histograms of word occurrences and images represented as histograms of oriented gradients. Viewing a discrete probability distribution as a point in the standard simplex of the appropriate dimension, we can understand collections of such objects in geometric and topological terms. Importantly, instead of using the standard Euclidean distance, we look into dissimilarity measures with information-theoretic justification, and we develop the theory needed for applying topological data analysis in this setting. In doing so, we emphasize constructions that enable the usage of existing computational topology software in this context.}, author = {Edelsbrunner, Herbert and Virk, Ziga and Wagner, Hubert}, issn = {1920180X}, journal = {Journal of Computational Geometry}, number = {2}, pages = {162--182}, publisher = {Carleton University}, title = {{Topological data analysis in information space}}, doi = {10.20382/jocg.v11i2a7}, volume = {11}, year = {2020}, } @inproceedings{9631, abstract = {The ability to leverage large-scale hardware parallelism has been one of the key enablers of the accelerated recent progress in machine learning. Consequently, there has been considerable effort invested into developing efficient parallel variants of classic machine learning algorithms. However, despite the wealth of knowledge on parallelization, some classic machine learning algorithms often prove hard to parallelize efficiently while maintaining convergence. In this paper, we focus on efficient parallel algorithms for the key machine learning task of inference on graphical models, in particular on the fundamental belief propagation algorithm. We address the challenge of efficiently parallelizing this classic paradigm by showing how to leverage scalable relaxed schedulers in this context. We present an extensive empirical study, showing that our approach outperforms previous parallel belief propagation implementations both in terms of scalability and in terms of wall-clock convergence time, on a range of practical applications.}, author = {Aksenov, Vitaly and Alistarh, Dan-Adrian and Korhonen, Janne}, booktitle = {Advances in Neural Information Processing Systems}, isbn = {9781713829546}, issn = {10495258}, location = {Vancouver, Canada}, pages = {22361--22372}, publisher = {Curran Associates}, title = {{Scalable belief propagation via relaxed scheduling}}, volume = {33}, year = {2020}, } @inproceedings{8533, abstract = {Game of Life is a simple and elegant model to study dynamical system over networks. The model consists of a graph where every vertex has one of two types, namely, dead or alive. A configuration is a mapping of the vertices to the types. An update rule describes how the type of a vertex is updated given the types of its neighbors. In every round, all vertices are updated synchronously, which leads to a configuration update. While in general, Game of Life allows a broad range of update rules, we focus on two simple families of update rules, namely, underpopulation and overpopulation, that model several interesting dynamics studied in the literature. In both settings, a dead vertex requires at least a desired number of live neighbors to become alive. For underpopulation (resp., overpopulation), a live vertex requires at least (resp. at most) a desired number of live neighbors to remain alive. We study the basic computation problems, e.g., configuration reachability, for these two families of rules. For underpopulation rules, we show that these problems can be solved in polynomial time, whereas for overpopulation rules they are PSPACE-complete.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Jecker, Ismael R and Svoboda, Jakub}, booktitle = {45th International Symposium on Mathematical Foundations of Computer Science}, isbn = {9783959771597}, issn = {18688969}, location = {Prague, Czech Republic}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Simplified game of life: Algorithms and complexity}}, doi = {10.4230/LIPIcs.MFCS.2020.22}, volume = {170}, year = {2020}, } @inproceedings{8534, abstract = {A regular language L of finite words is composite if there are regular languages L₁,L₂,…,L_t such that L = ⋂_{i = 1}^t L_i and the index (number of states in a minimal DFA) of every language L_i is strictly smaller than the index of L. Otherwise, L is prime. Primality of regular languages was introduced and studied in [O. Kupferman and J. Mosheiff, 2015], where the complexity of deciding the primality of the language of a given DFA was left open, with a doubly-exponential gap between the upper and lower bounds. We study primality for unary regular languages, namely regular languages with a singleton alphabet. A unary language corresponds to a subset of ℕ, making the study of unary prime languages closer to that of primality in number theory. We show that the setting of languages is richer. In particular, while every composite number is the product of two smaller numbers, the number t of languages necessary to decompose a composite unary language induces a strict hierarchy. In addition, a primality witness for a unary language L, namely a word that is not in L but is in all products of languages that contain L and have an index smaller than L’s, may be of exponential length. Still, we are able to characterize compositionality by structural properties of a DFA for L, leading to a LogSpace algorithm for primality checking of unary DFAs.}, author = {Jecker, Ismael R and Kupferman, Orna and Mazzocchi, Nicolas}, booktitle = {45th International Symposium on Mathematical Foundations of Computer Science}, isbn = {9783959771597}, issn = {18688969}, location = {Prague, Czech Republic}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Unary prime languages}}, doi = {10.4230/LIPIcs.MFCS.2020.51}, volume = {170}, year = {2020}, } @article{8538, abstract = {We prove some recent experimental observations of Dan Reznik concerning periodic billiard orbits in ellipses. For example, the sum of cosines of the angles of a periodic billiard polygon remains constant in the 1-parameter family of such polygons (that exist due to the Poncelet porism). In our proofs, we use geometric and complex analytic methods.}, author = {Akopyan, Arseniy and Schwartz, Richard and Tabachnikov, Serge}, issn = {2199-6768}, journal = {European Journal of Mathematics}, publisher = {Springer Nature}, title = {{Billiards in ellipses revisited}}, doi = {10.1007/s40879-020-00426-9}, year = {2020}, } @unpublished{8616, abstract = {The brain vasculature supplies neurons with glucose and oxygen, but little is known about how vascular plasticity contributes to brain function. Using longitudinal in vivo imaging, we reported that a substantial proportion of blood vessels in the adult brain sporadically occluded and regressed. Their regression proceeded through sequential stages of blood-flow occlusion, endothelial cell collapse, relocation or loss of pericytes, and retraction of glial endfeet. Regressing vessels were found to be widespread in mouse, monkey and human brains. Both brief occlusions of the middle cerebral artery and lipopolysaccharide-mediated inflammation induced an increase of vessel regression. Blockage of leukocyte adhesion to endothelial cells alleviated LPS-induced vessel regression. We further revealed that blood vessel regression caused a reduction of neuronal activity due to a dysfunction in mitochondrial metabolism and glutamate production. Our results elucidate the mechanism of vessel regression and its role in neuronal function in the adult brain.}, author = {Gao, Xiaofei and Li, Jun-Liszt and Chen, Xingjun and Ci, Bo and Chen, Fei and Lu, Nannan and Shen, Bo and Zheng, Lijun and Jia, Jie-Min and Yi, Yating and Zhang, Shiwen and Shi, Ying-Chao and Shi, Kaibin and Propson, Nicholas E and Huang, Yubin and Poinsatte, Katherine and Zhang, Zhaohuan and Yue, Yuanlei and Bosco, Dale B and Lu, Ying-mei and Yang, Shi-bing and Adams, Ralf H. and Lindner, Volkhard and Huang, Fen and Wu, Long-Jun and Zheng, Hui and Han, Feng and Hippenmeyer, Simon and Stowe, Ann M. and Peng, Bo and Margeta, Marta and Wang, Xiaoqun and Liu, Qiang and Körbelin, Jakob and Trepel, Martin and Lu, Hui and Zhou, Bo O. and Zhao, Hu and Su, Wenzhi and Bachoo, Robert M. and Ge, Woo-ping}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{Reduction of neuronal activity mediated by blood-vessel regression in the brain}}, doi = {10.1101/2020.09.15.262782}, year = {2020}, } @techreport{8695, abstract = {A look at international activities on Open Science reveals a broad spectrum from individual institutional policies to national action plans. The present Recommendations for a National Open Science Strategy in Austria are based on these international initiatives and present practical considerations for their coordinated implementation with regard to strategic developments in research, technology and innovation (RTI) in Austria until 2030. They are addressed to all relevant actors in the RTI system, in particular to Research Performing Organisations, Research Funding Organisations, Research Policy, memory institutions such as Libraries and Researchers. The recommendation paper was developed from 2018 to 2020 by the OANA working group "Open Science Strategy" and published for the first time in spring 2020 for a public consultation. The now available final version of the recommendation document, which contains feedback and comments from the consultation, is intended to provide an impetus for further discussion and implementation of Open Science in Austria and serves as a contribution and basis for a potential national Open Science Strategy in Austria. The document builds on the diverse expertise of the authors (academia, administration, library and archive, information technology, science policy, funding system, etc.) and reflects their personal experiences and opinions.}, author = {Mayer, Katja and Rieck, Katharina and Reichmann, Stefan and Danowski, Patrick and Graschopf, Anton and König, Thomas and Kraker, Peter and Lehner, Patrick and Reckling, Falk and Ross-Hellauer, Tony and Spichtinger, Daniel and Tzatzanis, Michalis and Schürz, Stefanie}, pages = {36}, publisher = {OANA}, title = {{Empfehlungen für eine nationale Open Science Strategie in Österreich / Recommendations for a National Open Science Strategy in Austria}}, doi = {10.5281/ZENODO.4109242}, year = {2020}, } @article{8706, abstract = {As part of the Austrian Transition to Open Access (AT2OA) project, subproject TP1-B is working on designing a monitoring solution for the output of Open Access publications in Austria. This report on a potential Open Access monitoring approach in Austria is one of the results of these efforts and can serve as a basis for discussion on an international level.}, author = {Danowski, Patrick and Ferus, Andreas and Hikl, Anna-Laetitia and McNeill, Gerda and Miniberger, Clemens and Reding, Steve and Zarka, Tobias and Zojer, Michael}, issn = {10222588}, journal = {Mitteilungen der Vereinigung Österreichischer Bibliothekarinnen und Bibliothekare}, number = {2}, pages = {278--284}, publisher = {Vereinigung Osterreichischer Bibliothekarinnen und Bibliothekare}, title = {{„Recommendation“ for the further procedure for open access monitoring. Deliverable of the AT2OA subproject TP1-B}}, doi = {10.31263/voebm.v73i2.3941}, volume = {73}, year = {2020}, } @article{8978, abstract = {Mosaic analysis with double markers (MADM) technology enables concomitant fluorescent cell labeling and induction of uniparental chromosome disomy (UPD) with single-cell resolution. In UPD, imprinted genes are either overexpressed 2-fold or are not expressed. Here, the MADM platform is utilized to probe imprinting phenotypes at the transcriptional level. This protocol highlights major steps for the generation and isolation of projection neurons and astrocytes with MADM-induced UPD from mouse cerebral cortex for downstream single-cell and low-input sample RNA-sequencing experiments. For complete details on the use and execution of this protocol, please refer to Laukoter et al. (2020b).}, author = {Laukoter, Susanne and Amberg, Nicole and Pauler, Florian and Hippenmeyer, Simon}, issn = {2666-1667}, journal = {STAR Protocols}, number = {3}, publisher = {Elsevier}, title = {{Generation and isolation of single cells from mouse brain with mosaic analysis with double markers-induced uniparental chromosome disomy}}, doi = {10.1016/j.xpro.2020.100215}, volume = {1}, year = {2020}, } @inproceedings{9103, abstract = {We introduce LRT-NG, a set of techniques and an associated toolset that computes a reachtube (an over-approximation of the set of reachable states over a given time horizon) of a nonlinear dynamical system. LRT-NG significantly advances the state-of-the-art Langrangian Reachability and its associated tool LRT. From a theoretical perspective, LRT-NG is superior to LRT in three ways. First, it uses for the first time an analytically computed metric for the propagated ball which is proven to minimize the ball’s volume. We emphasize that the metric computation is the centerpiece of all bloating-based techniques. Secondly, it computes the next reachset as the intersection of two balls: one based on the Cartesian metric and the other on the new metric. While the two metrics were previously considered opposing approaches, their joint use considerably tightens the reachtubes. Thirdly, it avoids the "wrapping effect" associated with the validated integration of the center of the reachset, by optimally absorbing the interval approximation in the radius of the next ball. From a tool-development perspective, LRT-NG is superior to LRT in two ways. First, it is a standalone tool that no longer relies on CAPD. This required the implementation of the Lohner method and a Runge-Kutta time-propagation method. Secondly, it has an improved interface, allowing the input model and initial conditions to be provided as external input files. Our experiments on a comprehensive set of benchmarks, including two Neural ODEs, demonstrates LRT-NG’s superior performance compared to LRT, CAPD, and Flow*.}, author = {Gruenbacher, Sophie and Cyranka, Jacek and Lechner, Mathias and Islam, Md Ariful and Smolka, Scott A. and Grosu, Radu}, booktitle = {Proceedings of the 59th IEEE Conference on Decision and Control}, isbn = {9781728174471}, issn = {07431546}, location = {Jeju Islang, Korea (South)}, pages = {1556--1563}, publisher = {IEEE}, title = {{Lagrangian reachtubes: The next generation}}, doi = {10.1109/CDC42340.2020.9304042}, volume = {2020}, year = {2020}, } @inproceedings{9221, abstract = {Recent works have shown that gradient descent can find a global minimum for over-parameterized neural networks where the widths of all the hidden layers scale polynomially with N (N being the number of training samples). In this paper, we prove that, for deep networks, a single layer of width N following the input layer suffices to ensure a similar guarantee. In particular, all the remaining layers are allowed to have constant widths, and form a pyramidal topology. We show an application of our result to the widely used LeCun’s initialization and obtain an over-parameterization requirement for the single wide layer of order N2. }, author = {Nguyen, Quynh and Mondelli, Marco}, booktitle = {34th Conference on Neural Information Processing Systems}, location = {Vancouver, Canada}, pages = {11961–11972}, publisher = {Curran Associates}, title = {{Global convergence of deep networks with one wide layer followed by pyramidal topology}}, volume = {33}, year = {2020}, } @inproceedings{9415, abstract = {Optimizing convolutional neural networks for fast inference has recently become an extremely active area of research. One of the go-to solutions in this context is weight pruning, which aims to reduce computational and memory footprint by removing large subsets of the connections in a neural network. Surprisingly, much less attention has been given to exploiting sparsity in the activation maps, which tend to be naturally sparse in many settings thanks to the structure of rectified linear (ReLU) activation functions. In this paper, we present an in-depth analysis of methods for maximizing the sparsity of the activations in a trained neural network, and show that, when coupled with an efficient sparse-input convolution algorithm, we can leverage this sparsity for significant performance gains. To induce highly sparse activation maps without accuracy loss, we introduce a new regularization technique, coupled with a new threshold-based sparsification method based on a parameterized activation function called Forced-Activation-Threshold Rectified Linear Unit (FATReLU). We examine the impact of our methods on popular image classification models, showing that most architectures can adapt to significantly sparser activation maps without any accuracy loss. Our second contribution is showing that these these compression gains can be translated into inference speedups: we provide a new algorithm to enable fast convolution operations over networks with sparse activations, and show that it can enable significant speedups for end-to-end inference on a range of popular models on the large-scale ImageNet image classification task on modern Intel CPUs, with little or no retraining cost. }, author = {Kurtz, Mark and Kopinsky, Justin and Gelashvili, Rati and Matveev, Alexander and Carr, John and Goin, Michael and Leiserson, William and Moore, Sage and Nell, Bill and Shavit, Nir and Alistarh, Dan-Adrian}, booktitle = {37th International Conference on Machine Learning, ICML 2020}, issn = {2640-3498}, location = {Online}, pages = {5533--5543}, title = {{Inducing and exploiting activation sparsity for fast neural network inference}}, volume = {119}, year = {2020}, } @inproceedings{10672, abstract = {The family of feedback alignment (FA) algorithms aims to provide a more biologically motivated alternative to backpropagation (BP), by substituting the computations that are unrealistic to be implemented in physical brains. While FA algorithms have been shown to work well in practice, there is a lack of rigorous theory proofing their learning capabilities. Here we introduce the first feedback alignment algorithm with provable learning guarantees. In contrast to existing work, we do not require any assumption about the size or depth of the network except that it has a single output neuron, i.e., such as for binary classification tasks. We show that our FA algorithm can deliver its theoretical promises in practice, surpassing the learning performance of existing FA methods and matching backpropagation in binary classification tasks. Finally, we demonstrate the limits of our FA variant when the number of output neurons grows beyond a certain quantity.}, author = {Lechner, Mathias}, booktitle = {8th International Conference on Learning Representations}, location = {Virtual ; Addis Ababa, Ethiopia}, publisher = {ICLR}, title = {{Learning representations for binary-classification without backpropagation}}, year = {2020}, } @inproceedings{8188, abstract = {A natural approach to generative modeling of videos is to represent them as a composition of moving objects. Recent works model a set of 2D sprites over a slowly-varying background, but without considering the underlying 3D scene that gives rise to them. We instead propose to model a video as the view seen while moving through a scene with multiple 3D objects and a 3D background. Our model is trained from monocular videos without any supervision, yet learns to generate coherent 3D scenes containing several moving objects. We conduct detailed experiments on two datasets, going beyond the visual complexity supported by state-of-the-art generative approaches. We evaluate our method on depth-prediction and 3D object detection---tasks which cannot be addressed by those earlier works---and show it out-performs them even on 2D instance segmentation and tracking.}, author = {Henderson, Paul M and Lampert, Christoph}, booktitle = {34th Conference on Neural Information Processing Systems}, isbn = {9781713829546}, location = {Vancouver, Canada}, pages = {3106–3117}, publisher = {Curran Associates}, title = {{Unsupervised object-centric video generation and decomposition in 3D}}, volume = {33}, year = {2020}, } @book{7474, abstract = {This booklet is a collection of abstracts presented at the AHPC conference.}, editor = {Schlögl, Alois and Kiss, Janos and Elefante, Stefano}, isbn = {978-3-99078-004-6}, location = {Klosterneuburg, Austria}, pages = {72}, publisher = {IST Austria}, title = {{Austrian High-Performance-Computing meeting (AHPC2020)}}, doi = {10.15479/AT:ISTA:7474}, year = {2020}, } @inproceedings{7808, abstract = {Quantization converts neural networks into low-bit fixed-point computations which can be carried out by efficient integer-only hardware, and is standard practice for the deployment of neural networks on real-time embedded devices. However, like their real-numbered counterpart, quantized networks are not immune to malicious misclassification caused by adversarial attacks. We investigate how quantization affects a network’s robustness to adversarial attacks, which is a formal verification question. We show that neither robustness nor non-robustness are monotonic with changing the number of bits for the representation and, also, neither are preserved by quantization from a real-numbered network. For this reason, we introduce a verification method for quantized neural networks which, using SMT solving over bit-vectors, accounts for their exact, bit-precise semantics. We built a tool and analyzed the effect of quantization on a classifier for the MNIST dataset. We demonstrate that, compared to our method, existing methods for the analysis of real-numbered networks often derive false conclusions about their quantizations, both when determining robustness and when detecting attacks, and that existing methods for quantized networks often miss attacks. Furthermore, we applied our method beyond robustness, showing how the number of bits in quantization enlarges the gender bias of a predictor for students’ grades.}, author = {Giacobbe, Mirco and Henzinger, Thomas A and Lechner, Mathias}, booktitle = {International Conference on Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783030452360}, issn = {16113349}, location = {Dublin, Ireland}, pages = {79--97}, publisher = {Springer Nature}, title = {{How many bits does it take to quantize your neural network?}}, doi = {10.1007/978-3-030-45237-7_5}, volume = {12079}, year = {2020}, } @inproceedings{7952, abstract = {Isomanifolds are the generalization of isosurfaces to arbitrary dimension and codimension, i.e. manifolds defined as the zero set of some multivariate vector-valued smooth function f: ℝ^d → ℝ^(d-n). A natural (and efficient) way to approximate an isomanifold is to consider its Piecewise-Linear (PL) approximation based on a triangulation 𝒯 of the ambient space ℝ^d. In this paper, we give conditions under which the PL-approximation of an isomanifold is topologically equivalent to the isomanifold. The conditions are easy to satisfy in the sense that they can always be met by taking a sufficiently fine triangulation 𝒯. This contrasts with previous results on the triangulation of manifolds where, in arbitrary dimensions, delicate perturbations are needed to guarantee topological correctness, which leads to strong limitations in practice. We further give a bound on the Fréchet distance between the original isomanifold and its PL-approximation. Finally we show analogous results for the PL-approximation of an isomanifold with boundary. }, author = {Boissonnat, Jean-Daniel and Wintraecken, Mathijs}, booktitle = {36th International Symposium on Computational Geometry}, isbn = {978-3-95977-143-6}, issn = {1868-8969}, location = {Zürich, Switzerland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{The topological correctness of PL-approximations of isomanifolds}}, doi = {10.4230/LIPIcs.SoCG.2020.20}, volume = {164}, year = {2020}, }