@phdthesis{9728, abstract = {Most real-world flows are multiphase, yet we know little about them compared to their single-phase counterparts. Multiphase flows are more difficult to investigate as their dynamics occur in large parameter space and involve complex phenomena such as preferential concentration, turbulence modulation, non-Newtonian rheology, etc. Over the last few decades, experiments in particle-laden flows have taken a back seat in favour of ever-improving computational resources. However, computers are still not powerful enough to simulate a real-world fluid with millions of finite-size particles. Experiments are essential not only because they offer a reliable way to investigate real-world multiphase flows but also because they serve to validate numerical studies and steer the research in a relevant direction. In this work, we have experimentally investigated particle-laden flows in pipes, and in particular, examined the effect of particles on the laminar-turbulent transition and the drag scaling in turbulent flows. For particle-laden pipe flows, an earlier study [Matas et al., 2003] reported how the sub-critical (i.e., hysteretic) transition that occurs via localised turbulent structures called puffs is affected by the addition of particles. In this study, in addition to this known transition, we found a super-critical transition to a globally fluctuating state with increasing particle concentration. At the same time, the Newtonian-type transition via puffs is delayed to larger Reynolds numbers. At an even higher concentration, only the globally fluctuating state is found. The dynamics of particle-laden flows are hence determined by two competing instabilities that give rise to three flow regimes: Newtonian-type turbulence at low, a particle-induced globally fluctuating state at high, and a coexistence state at intermediate concentrations. The effect of particles on turbulent drag is ambiguous, with studies reporting drag reduction, no net change, and even drag increase. The ambiguity arises because, in addition to particle concentration, particle shape, size, and density also affect the net drag. Even similar particles might affect the flow dissimilarly in different Reynolds number and concentration ranges. In the present study, we explored a wide range of both Reynolds number and concentration, using spherical as well as cylindrical particles. We found that the spherical particles do not reduce drag while the cylindrical particles are drag-reducing within a specific Reynolds number interval. The interval strongly depends on the particle concentration and the relative size of the pipe and particles. Within this interval, the magnitude of drag reduction reaches a maximum. These drag reduction maxima appear to fall onto a distinct power-law curve irrespective of the pipe diameter and particle concentration, and this curve can be considered as the maximum drag reduction asymptote for a given fibre shape. Such an asymptote is well known for polymeric flows but had not been identified for particle-laden flows prior to this work.}, author = {Agrawal, Nishchal}, issn = {2663-337X}, keywords = {Drag Reduction, Transition to Turbulence, Multiphase Flows, particle Laden Flows, Complex Flows, Experiments, Fluid Dynamics}, pages = {118}, publisher = {Institute of Science and Technology Austria}, title = {{Transition to turbulence and drag reduction in particle-laden pipe flows}}, doi = {10.15479/at:ista:9728}, year = {2021}, } @inproceedings{10673, abstract = {We propose a neural information processing system obtained by re-purposing the function of a biological neural circuit model to govern simulated and real-world control tasks. Inspired by the structure of the nervous system of the soil-worm, C. elegans, we introduce ordinary neural circuits (ONCs), defined as the model of biological neural circuits reparameterized for the control of alternative tasks. We first demonstrate that ONCs realize networks with higher maximum flow compared to arbitrary wired networks. We then learn instances of ONCs to control a series of robotic tasks, including the autonomous parking of a real-world rover robot. For reconfiguration of the purpose of the neural circuit, we adopt a search-based optimization algorithm. Ordinary neural circuits perform on par and, in some cases, significantly surpass the performance of contemporary deep learning models. ONC networks are compact, 77% sparser than their counterpart neural controllers, and their neural dynamics are fully interpretable at the cell-level.}, author = {Hasani, Ramin and Lechner, Mathias and Amini, Alexander and Rus, Daniela and Grosu, Radu}, booktitle = {Proceedings of the 37th International Conference on Machine Learning}, issn = {2640-3498}, location = {Virtual}, pages = {4082--4093}, title = {{A natural lottery ticket winner: Reinforcement learning with ordinary neural circuits}}, year = {2020}, } @inproceedings{7272, abstract = {Many systems rely on optimistic concurrent search trees for multi-core scalability. In principle, optimistic trees have a simple performance story: searches are read-only and so run in parallel, with writes to shared memory occurring only when modifying the data structure. However, this paper shows that in practice, obtaining the full performance benefits of optimistic search trees is not so simple. We focus on optimistic binary search trees (BSTs) and perform a detailed performance analysis of 10 state-of-the-art BSTs on large scale x86-64 hardware, using both microbenchmarks and an in-memory database system. We find and explain significant unexpected performance differences between BSTs with similar tree structure and search implementations, which we trace to subtle performance-degrading interactions of BSTs with systems software and hardware subsystems. We further derive a prescriptive approach to avoid this performance degradation, as well as algorithmic insights on optimistic BST design. Our work underlines the gap between the theory and practice of multi-core performance, and calls for further research to help bridge this gap.}, author = {Arbel-Raviv, Maya and Brown, Trevor A and Morrison, Adam}, booktitle = {Proceedings of the 2018 USENIX Annual Technical Conference}, isbn = {9781939133021}, location = {Boston, MA, United States}, pages = {295--306}, publisher = {USENIX Association}, title = {{Getting to the root of concurrent binary search tree performance}}, year = {2020}, } @inproceedings{7346, abstract = {The Price of Anarchy (PoA) is a well-established game-theoretic concept to shed light on coordination issues arising in open distributed systems. Leaving agents to selfishly optimize comes with the risk of ending up in sub-optimal states (in terms of performance and/or costs), compared to a centralized system design. However, the PoA relies on strong assumptions about agents' rationality (e.g., resources and information) and interactions, whereas in many distributed systems agents interact locally with bounded resources. They do so repeatedly over time (in contrast to "one-shot games"), and their strategies may evolve. Using a more realistic evolutionary game model, this paper introduces a realized evolutionary Price of Anarchy (ePoA). The ePoA allows an exploration of equilibrium selection in dynamic distributed systems with multiple equilibria, based on local interactions of simple memoryless agents. Considering a fundamental game related to virus propagation on networks, we present analytical bounds on the ePoA in basic network topologies and for different strategy update dynamics. In particular, deriving stationary distributions of the stochastic evolutionary process, we find that the Nash equilibria are not always the most abundant states, and that different processes can feature significant off-equilibrium behavior, leading to a significantly higher ePoA compared to the PoA studied traditionally in the literature. }, author = {Schmid, Laura and Chatterjee, Krishnendu and Schmid, Stefan}, booktitle = {Proceedings of the 23rd International Conference on Principles of Distributed Systems}, location = {Neuchâtel, Switzerland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{The evolutionary price of anarchy: Locally bounded agents in a dynamic virus game}}, doi = {10.4230/LIPIcs.OPODIS.2019.21}, volume = {153}, year = {2020}, } @inproceedings{7348, abstract = {The monitoring of event frequencies can be used to recognize behavioral anomalies, to identify trends, and to deduce or discard hypotheses about the underlying system. For example, the performance of a web server may be monitored based on the ratio of the total count of requests from the least and most active clients. Exact frequency monitoring, however, can be prohibitively expensive; in the above example it would require as many counters as there are clients. In this paper, we propose the efficient probabilistic monitoring of common frequency properties, including the mode (i.e., the most common event) and the median of an event sequence. We define a logic to express composite frequency properties as a combination of atomic frequency properties. Our main contribution is an algorithm that, under suitable probabilistic assumptions, can be used to monitor these important frequency properties with four counters, independent of the number of different events. Our algorithm samples longer and longer subwords of an infinite event sequence. We prove the almost-sure convergence of our algorithm by generalizing ergodic theory from increasing-length prefixes to increasing-length subwords of an infinite sequence. A similar algorithm could be used to learn a connected Markov chain of a given structure from observing its outputs, to arbitrary precision, for a given confidence. }, author = {Ferrere, Thomas and Henzinger, Thomas A and Kragl, Bernhard}, booktitle = {28th EACSL Annual Conference on Computer Science Logic}, isbn = {9783959771320}, issn = {1868-8969}, location = {Barcelona, Spain}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Monitoring event frequencies}}, doi = {10.4230/LIPIcs.CSL.2020.20}, volume = {152}, year = {2020}, } @article{7567, abstract = {Coxeter triangulations are triangulations of Euclidean space based on a single simplex. By this we mean that given an individual simplex we can recover the entire triangulation of Euclidean space by inductively reflecting in the faces of the simplex. In this paper we establish that the quality of the simplices in all Coxeter triangulations is O(1/d−−√) of the quality of regular simplex. We further investigate the Delaunay property for these triangulations. Moreover, we consider an extension of the Delaunay property, namely protection, which is a measure of non-degeneracy of a Delaunay triangulation. In particular, one family of Coxeter triangulations achieves the protection O(1/d2). We conjecture that both bounds are optimal for triangulations in Euclidean space.}, author = {Choudhary, Aruni and Kachanovich, Siargey and Wintraecken, Mathijs}, issn = {1661-8289}, journal = {Mathematics in Computer Science}, pages = {141--176}, publisher = {Springer Nature}, title = {{Coxeter triangulations have good quality}}, doi = {10.1007/s11786-020-00461-5}, volume = {14}, year = {2020}, } @article{7594, abstract = {The concept of the entanglement between spin and orbital degrees of freedom plays a crucial role in our understanding of various phases and exotic ground states in a broad class of materials, including orbitally ordered materials and spin liquids. We investigate how the spin-orbital entanglement in a Mott insulator depends on the value of the spin-orbit coupling of the relativistic origin. To this end, we numerically diagonalize a one-dimensional spin-orbital model with Kugel-Khomskii exchange interactions between spins and orbitals on different sites supplemented by the on-site spin-orbit coupling. In the regime of small spin-orbit coupling with regard to the spin-orbital exchange, the ground state to a large extent resembles the one obtained in the limit of vanishing spin-orbit coupling. On the other hand, for large spin-orbit coupling the ground state can, depending on the model parameters, either still show negligible spin-orbital entanglement or evolve to a highly spin-orbitally-entangled phase with completely distinct properties that are described by an effective XXZ model. The presented results suggest that (i) the spin-orbital entanglement may be induced by large on-site spin-orbit coupling, as found in the 5d transition metal oxides, such as the iridates; (ii) for Mott insulators with weak spin-orbit coupling of Ising type, such as, e.g., the alkali hyperoxides, the effects of the spin-orbit coupling on the ground state can, in the first order of perturbation theory, be neglected.}, author = {Gotfryd, Dorota and Paerschke, Ekaterina and Chaloupka, Jiri and Oles, Andrzej M. and Wohlfeld, Krzysztof}, journal = {Physical Review Research}, number = {1}, publisher = {American Physical Society}, title = {{How spin-orbital entanglement depends on the spin-orbit coupling in a Mott insulator}}, doi = {10.1103/PhysRevResearch.2.013353}, volume = {2}, year = {2020}, } @inproceedings{7605, abstract = {Union-Find (or Disjoint-Set Union) is one of the fundamental problems in computer science; it has been well-studied from both theoretical and practical perspectives in the sequential case. Recently, there has been mounting interest in analyzing this problem in the concurrent scenario, and several asymptotically-efficient algorithms have been proposed. Yet, to date, there is very little known about the practical performance of concurrent Union-Find. This work addresses this gap. We evaluate and analyze the performance of several concurrent Union-Find algorithms and optimization strategies across a wide range of platforms (Intel, AMD, and ARM) and workloads (social, random, and road networks, as well as integrations into more complex algorithms). We first observe that, due to the limited computational cost, the number of induced cache misses is the critical determining factor for the performance of existing algorithms. We introduce new techniques to reduce this cost by storing node priorities implicitly and by using plain reads and writes in a way that does not affect the correctness of the algorithms. Finally, we show that Union-Find implementations are an interesting application for Transactional Memory (TM): one of the fastest algorithm variants we discovered is a sequential one that uses coarse-grained locking with the lock elision optimization to reduce synchronization cost and increase scalability. }, author = {Alistarh, Dan-Adrian and Fedorov, Alexander and Koval, Nikita}, booktitle = {23rd International Conference on Principles of Distributed Systems}, isbn = {9783959771337}, issn = {18688969}, location = {Neuchatal, Switzerland}, pages = {15:1--15:16}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{In search of the fastest concurrent union-find algorithm}}, doi = {10.4230/LIPIcs.OPODIS.2019.15}, volume = {153}, year = {2020}, } @unpublished{7601, abstract = {Plasmodesmata (PD) are crucial structures for intercellular communication in multicellular plants with remorins being their crucial plant-specific structural and functional constituents. The PD biogenesis is an intriguing but poorly understood process. By expressing an Arabidopsis remorin protein in mammalian cells, we have reconstituted a PD-like filamentous structure, termed remorin filament (RF), connecting neighboring cells physically and physiologically. Notably, RFs are capable of transporting macromolecules intercellularly, in a way similar to plant PD. With further super-resolution microscopic analysis and biochemical characterization, we found that RFs are also composed of actin filaments, forming the core skeleton structure, aligned with the remorin protein. This unique heterologous filamentous structure might explain the molecular mechanism for remorin function as well as PD construction. Furthermore, remorin protein exhibits a specific distribution manner in the plasma membrane in mammalian cells, representing a lipid nanodomain, depending on its lipid modification status. Our studies not only provide crucial insights into the mechanism of PD biogenesis, but also uncovers unsuspected fundamental mechanistic and evolutionary links between intercellular communication systems of plants and animals.}, author = {Wei, Zhuang and Tan, Shutang and Liu, Tao and Wu, Yuan and Lei, Ji-Gang and Chen, ZhengJun and Friml, Jiří and Xue, Hong-Wei and Liao, Kan}, booktitle = {bioRxiv}, pages = {22}, publisher = {Cold Spring Harbor Laboratory}, title = {{Plasmodesmata-like intercellular connections by plant remorin in animal cells}}, doi = {10.1101/791137}, year = {2020}, } @article{7651, abstract = {The growth of snail shells can be described by simple mathematical rules. Variation in a few parameters can explain much of the diversity of shell shapes seen in nature. However, empirical studies of gastropod shell shape variation typically use geometric morphometric approaches, which do not capture this growth pattern. We have developed a way to infer a set of developmentally descriptive shape parameters based on three-dimensional logarithmic helicospiral growth and using landmarks from two-dimensional shell images as input. We demonstrate the utility of this approach, and compare it to the geometric morphometric approach, using a large set of Littorina saxatilis shells in which locally adapted populations differ in shape. Our method can be modified easily to make it applicable to a wide range of shell forms, which would allow for investigations of the similarities and differences between and within many different species of gastropods.}, author = {Larsson, J. and Westram, Anja M and Bengmark, S. and Lundh, T. and Butlin, R. K.}, issn = {1742-5662}, journal = {Journal of The Royal Society Interface}, number = {163}, publisher = {The Royal Society}, title = {{A developmentally descriptive method for quantifying shape in gastropod shells}}, doi = {10.1098/rsif.2019.0721}, volume = {17}, year = {2020}, } @inproceedings{7803, abstract = {We settle the complexity of the (Δ+1)-coloring and (Δ+1)-list coloring problems in the CONGESTED CLIQUE model by presenting a simple deterministic algorithm for both problems running in a constant number of rounds. This matches the complexity of the recent breakthrough randomized constant-round (Δ+1)-list coloring algorithm due to Chang et al. (PODC'19), and significantly improves upon the state-of-the-art O(logΔ)-round deterministic (Δ+1)-coloring bound of Parter (ICALP'18). A remarkable property of our algorithm is its simplicity. Whereas the state-of-the-art randomized algorithms for this problem are based on the quite involved local coloring algorithm of Chang et al. (STOC'18), our algorithm can be described in just a few lines. At a high level, it applies a careful derandomization of a recursive procedure which partitions the nodes and their respective palettes into separate bins. We show that after O(1) recursion steps, the remaining uncolored subgraph within each bin has linear size, and thus can be solved locally by collecting it to a single node. This algorithm can also be implemented in the Massively Parallel Computation (MPC) model provided that each machine has linear (in n, the number of nodes in the input graph) space. We also show an extension of our algorithm to the MPC regime in which machines have sublinear space: we present the first deterministic (Δ+1)-list coloring algorithm designed for sublinear-space MPC, which runs in O(logΔ+loglogn) rounds.}, author = {Czumaj, Artur and Davies, Peter and Parter, Merav}, booktitle = {Proceedings of the 2020 ACM Symposium on Principles of Distributed Computing}, location = {Salerno, Italy}, pages = {309--318}, publisher = {Association for Computing Machinery}, title = {{Simple, deterministic, constant-round coloring in the congested clique}}, doi = {10.1145/3382734.3405751}, year = {2020}, } @inproceedings{7806, abstract = {We consider the following decision problem EMBEDk→d in computational topology (where k ≤ d are fixed positive integers): Given a finite simplicial complex K of dimension k, does there exist a (piecewise-linear) embedding of K into ℝd? The special case EMBED1→2 is graph planarity, which is decidable in linear time, as shown by Hopcroft and Tarjan. In higher dimensions, EMBED2→3 and EMBED3→3 are known to be decidable (as well as NP-hard), and recent results of Čadek et al. in computational homotopy theory, in combination with the classical Haefliger–Weber theorem in geometric topology, imply that EMBEDk→d can be solved in polynomial time for any fixed pair (k, d) of dimensions in the so-called metastable range . Here, by contrast, we prove that EMBEDk→d is algorithmically undecidable for almost all pairs of dimensions outside the metastable range, namely for . This almost completely resolves the decidability vs. undecidability of EMBEDk→d in higher dimensions and establishes a sharp dichotomy between polynomial-time solvability and undecidability. Our result complements (and in a wide range of dimensions strengthens) earlier results of Matoušek, Tancer, and the second author, who showed that EMBEDk→d is undecidable for 4 ≤ k ϵ {d – 1, d}, and NP-hard for all remaining pairs (k, d) outside the metastable range and satisfying d ≥ 4.}, author = {Filakovský, Marek and Wagner, Uli and Zhechev, Stephan Y}, booktitle = {Proceedings of the Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {9781611975994}, location = {Salt Lake City, UT, United States}, pages = {767--785}, publisher = {SIAM}, title = {{Embeddability of simplicial complexes is undecidable}}, doi = {10.1137/1.9781611975994.47}, volume = {2020-January}, year = {2020}, } @article{7814, abstract = {Scientific research is to date largely restricted to wealthy laboratories in developed nations due to the necessity of complex and expensive equipment. This inequality limits the capacity of science to be used as a diplomatic channel. Maker movements use open-source technologies including additive manufacturing (3D printing) and laser cutting, together with low-cost computers for developing novel products. This movement is setting the groundwork for a revolution, allowing scientific equipment to be sourced at a fraction of the cost and has the potential to increase the availability of equipment for scientists around the world. Science education is increasingly recognized as another channel for science diplomacy. In this perspective, we introduce the idea that the Maker movement and open-source technologies have the potential to revolutionize science, technology, engineering and mathematics (STEM) education worldwide. We present an open-source STEM didactic tool called SCOPES (Sparking Curiosity through Open-source Platforms in Education and Science). SCOPES is self-contained, independent of local resources, and cost-effective. SCOPES can be adapted to communicate complex subjects from genetics to neurobiology, perform real-world biological experiments and explore digitized scientific samples. We envision such platforms will enhance science diplomacy by providing a means for scientists to share their findings with classrooms and for educators to incorporate didactic concepts into STEM lessons. By providing students the opportunity to design, perform, and share scientific experiments, students also experience firsthand the benefits of a multinational scientific community. We provide instructions on how to build and use SCOPES on our webpage: http://scopeseducation.org.}, author = {Beattie, Robert J and Hippenmeyer, Simon and Pauler, Florian}, issn = {2504-284X}, journal = {Frontiers in Education}, publisher = {Frontiers Media}, title = {{SCOPES: Sparking curiosity through Open-Source platforms in education and science}}, doi = {10.3389/feduc.2020.00048}, volume = {5}, year = {2020}, } @article{7866, abstract = {In this paper, we establish convergence to equilibrium for a drift–diffusion–recombination system modelling the charge transport within certain semiconductor devices. More precisely, we consider a two-level system for electrons and holes which is augmented by an intermediate energy level for electrons in so-called trapped states. The recombination dynamics use the mass action principle by taking into account this additional trap level. The main part of the paper is concerned with the derivation of an entropy–entropy production inequality, which entails exponential convergence to the equilibrium via the so-called entropy method. The novelty of our approach lies in the fact that the entropy method is applied uniformly in a fast-reaction parameter which governs the lifetime of electrons on the trap level. Thus, the resulting decay estimate for the densities of electrons and holes extends to the corresponding quasi-steady-state approximation.}, author = {Fellner, Klemens and Kniely, Michael}, issn = {22969039}, journal = {Journal of Elliptic and Parabolic Equations}, pages = {529--598}, publisher = {Springer Nature}, title = {{Uniform convergence to equilibrium for a family of drift–diffusion models with trap-assisted recombination and the limiting Shockley–Read–Hall model}}, doi = {10.1007/s41808-020-00068-8}, volume = {6}, year = {2020}, } @article{7919, abstract = {We explore the time evolution of two impurities in a trapped one-dimensional Bose gas that follows a change of the boson-impurity interaction. We study the induced impurity-impurity interactions and their effect on the quench dynamics. In particular, we report on the size of the impurity cloud, the impurity-impurity entanglement, and the impurity-impurity correlation function. The presented numerical simulations are based upon the variational multilayer multiconfiguration time-dependent Hartree method for bosons. To analyze and quantify induced impurity-impurity correlations, we employ an effective two-body Hamiltonian with a contact interaction. We show that the effective model consistent with the mean-field attraction of two heavy impurities explains qualitatively our results for weak interactions. Our findings suggest that the quench dynamics in cold-atom systems can be a tool for studying impurity-impurity correlations.}, author = {Mistakidis, S. I. and Volosniev, Artem and Schmelcher, P.}, issn = {2643-1564}, journal = {Physical Review Research}, publisher = {American Physical Society}, title = {{Induced correlations between impurities in a one-dimensional quenched Bose gas}}, doi = {10.1103/physrevresearch.2.023154}, volume = {2}, year = {2020}, } @inproceedings{7991, abstract = {We define and study a discrete process that generalizes the convex-layer decomposition of a planar point set. Our process, which we call homotopic curve shortening (HCS), starts with a closed curve (which might self-intersect) in the presence of a set P⊂ ℝ² of point obstacles, and evolves in discrete steps, where each step consists of (1) taking shortcuts around the obstacles, and (2) reducing the curve to its shortest homotopic equivalent. We find experimentally that, if the initial curve is held fixed and P is chosen to be either a very fine regular grid or a uniformly random point set, then HCS behaves at the limit like the affine curve-shortening flow (ACSF). This connection between HCS and ACSF generalizes the link between "grid peeling" and the ACSF observed by Eppstein et al. (2017), which applied only to convex curves, and which was studied only for regular grids. We prove that HCS satisfies some properties analogous to those of ACSF: HCS is invariant under affine transformations, preserves convexity, and does not increase the total absolute curvature. Furthermore, the number of self-intersections of a curve, or intersections between two curves (appropriately defined), does not increase. Finally, if the initial curve is simple, then the number of inflection points (appropriately defined) does not increase.}, author = {Avvakumov, Sergey and Nivasch, Gabriel}, booktitle = {36th International Symposium on Computational Geometry}, isbn = {9783959771436}, issn = {18688969}, location = {Zürich, Switzerland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Homotopic curve shortening and the affine curve-shortening flow}}, doi = {10.4230/LIPIcs.SoCG.2020.12}, volume = {164}, year = {2020}, } @inproceedings{7989, abstract = {We prove general topological Radon-type theorems for sets in ℝ^d, smooth real manifolds or finite dimensional simplicial complexes. Combined with a recent result of Holmsen and Lee, it gives fractional Helly theorem, and consequently the existence of weak ε-nets as well as a (p,q)-theorem. More precisely: Let X be either ℝ^d, smooth real d-manifold, or a finite d-dimensional simplicial complex. Then if F is a finite, intersection-closed family of sets in X such that the ith reduced Betti number (with ℤ₂ coefficients) of any set in F is at most b for every non-negative integer i less or equal to k, then the Radon number of F is bounded in terms of b and X. Here k is the smallest integer larger or equal to d/2 - 1 if X = ℝ^d; k=d-1 if X is a smooth real d-manifold and not a surface, k=0 if X is a surface and k=d if X is a d-dimensional simplicial complex. Using the recent result of the author and Kalai, we manage to prove the following optimal bound on fractional Helly number for families of open sets in a surface: Let F be a finite family of open sets in a surface S such that the intersection of any subfamily of F is either empty, or path-connected. Then the fractional Helly number of F is at most three. This also settles a conjecture of Holmsen, Kim, and Lee about an existence of a (p,q)-theorem for open subsets of a surface.}, author = {Patakova, Zuzana}, booktitle = {36th International Symposium on Computational Geometry}, isbn = {9783959771436}, issn = {18688969}, location = {Zürich, Switzerland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Bounding radon number via Betti numbers}}, doi = {10.4230/LIPIcs.SoCG.2020.61}, volume = {164}, year = {2020}, } @inproceedings{7992, abstract = {Let K be a convex body in ℝⁿ (i.e., a compact convex set with nonempty interior). Given a point p in the interior of K, a hyperplane h passing through p is called barycentric if p is the barycenter of K ∩ h. In 1961, Grünbaum raised the question whether, for every K, there exists an interior point p through which there are at least n+1 distinct barycentric hyperplanes. Two years later, this was seemingly resolved affirmatively by showing that this is the case if p=p₀ is the point of maximal depth in K. However, while working on a related question, we noticed that one of the auxiliary claims in the proof is incorrect. Here, we provide a counterexample; this re-opens Grünbaum’s question. It follows from known results that for n ≥ 2, there are always at least three distinct barycentric cuts through the point p₀ ∈ K of maximal depth. Using tools related to Morse theory we are able to improve this bound: four distinct barycentric cuts through p₀ are guaranteed if n ≥ 3.}, author = {Patakova, Zuzana and Tancer, Martin and Wagner, Uli}, booktitle = {36th International Symposium on Computational Geometry}, isbn = {9783959771436}, issn = {18688969}, location = {Zürich, Switzerland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Barycentric cuts through a convex body}}, doi = {10.4230/LIPIcs.SoCG.2020.62}, volume = {164}, year = {2020}, } @inproceedings{7994, abstract = {In the recent study of crossing numbers, drawings of graphs that can be extended to an arrangement of pseudolines (pseudolinear drawings) have played an important role as they are a natural combinatorial extension of rectilinear (or straight-line) drawings. A characterization of the pseudolinear drawings of K_n was found recently. We extend this characterization to all graphs, by describing the set of minimal forbidden subdrawings for pseudolinear drawings. Our characterization also leads to a polynomial-time algorithm to recognize pseudolinear drawings and construct the pseudolines when it is possible.}, author = {Arroyo Guevara, Alan M and Bensmail, Julien and Bruce Richter, R.}, booktitle = {36th International Symposium on Computational Geometry}, isbn = {9783959771436}, issn = {18688969}, location = {Zürich, Switzerland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Extending drawings of graphs to arrangements of pseudolines}}, doi = {10.4230/LIPIcs.SoCG.2020.9}, volume = {164}, year = {2020}, } @article{8011, abstract = {Relaxation to a thermal state is the inevitable fate of nonequilibrium interacting quantum systems without special conservation laws. While thermalization in one-dimensional systems can often be suppressed by integrability mechanisms, in two spatial dimensions thermalization is expected to be far more effective due to the increased phase space. In this work we propose a general framework for escaping or delaying the emergence of the thermal state in two-dimensional arrays of Rydberg atoms via the mechanism of quantum scars, i.e., initial states that fail to thermalize. The suppression of thermalization is achieved in two complementary ways: by adding local perturbations or by adjusting the driving Rabi frequency according to the local connectivity of the lattice. We demonstrate that these mechanisms allow us to realize robust quantum scars in various two-dimensional lattices, including decorated lattices with nonconstant connectivity. In particular, we show that a small decrease of the Rabi frequency at the corners of the lattice is crucial for mitigating the strong boundary effects in two-dimensional systems. Our results identify synchronization as an important tool for future experiments on two-dimensional quantum scars.}, author = {Michailidis, Alexios and Turner, C. J. and Papić, Z. and Abanin, D. A. and Serbyn, Maksym}, issn = {2643-1564}, journal = {Physical Review Research}, number = {2}, publisher = {American Physical Society}, title = {{Stabilizing two-dimensional quantum scars by deformation and synchronization}}, doi = {10.1103/physrevresearch.2.022065}, volume = {2}, year = {2020}, } @unpublished{8063, abstract = {We present a generative model of images that explicitly reasons over the set of objects they show. Our model learns a structured latent representation that separates objects from each other and from the background; unlike prior works, it explicitly represents the 2D position and depth of each object, as well as an embedding of its segmentation mask and appearance. The model can be trained from images alone in a purely unsupervised fashion without the need for object masks or depth information. Moreover, it always generates complete objects, even though a significant fraction of training images contain occlusions. Finally, we show that our model can infer decompositions of novel images into their constituent objects, including accurate prediction of depth ordering and segmentation of occluded parts.}, author = {Anciukevicius, Titas and Lampert, Christoph and Henderson, Paul M}, booktitle = {arXiv}, title = {{Object-centric image generation with factored depths, locations, and appearances}}, year = {2020}, } @unpublished{8081, abstract = {Here, we employ micro- and nanosized cellulose particles, namely paper fines and cellulose nanocrystals, to induce hierarchical organization over a wide length scale. After processing them into carbonaceous materials, we demonstrate that these hierarchically organized materials outperform the best materials for supercapacitors operating with organic electrolytes reported in literature in terms of specific energy/power (Ragone plot) while showing hardly any capacity fade over 4,000 cycles. The highly porous materials feature a specific surface area as high as 2500 m2ˑg-1 and exhibit pore sizes in the range of 0.5 to 200 nm as proven by scanning electron microscopy and N2 physisorption. The carbonaceous materials have been further investigated by X-ray photoelectron spectroscopy and RAMAN spectroscopy. Since paper fines are an underutilized side stream in any paper production process, they are a cheap and highly available feedstock to prepare carbonaceous materials with outstanding performance in electrochemical applications. }, author = {Hobisch, Mathias A. and Mourad, Eléonore and Fischer, Wolfgang J. and Prehal, Christian and Eyley, Samuel and Childress, Anthony and Zankel, Armin and Mautner, Andreas and Breitenbach, Stefan and Rao, Apparao M. and Thielemans, Wim and Freunberger, Stefan Alexander and Eckhart, Rene and Bauer, Wolfgang and Spirk, Stefan }, title = {{High specific capacitance supercapacitors from hierarchically organized all-cellulose composites}}, year = {2020}, } @article{8105, abstract = {Physical and biological systems often exhibit intermittent dynamics with bursts or avalanches (active states) characterized by power-law size and duration distributions. These emergent features are typical of systems at the critical point of continuous phase transitions, and have led to the hypothesis that such systems may self-organize at criticality, i.e. without any fine tuning of parameters. Since the introduction of the Bak-Tang-Wiesenfeld (BTW) model, the paradigm of self-organized criticality (SOC) has been very fruitful for the analysis of emergent collective behaviors in a number of systems, including the brain. Although considerable effort has been devoted in identifying and modeling scaling features of burst and avalanche statistics, dynamical aspects related to the temporal organization of bursts remain often poorly understood or controversial. Of crucial importance to understand the mechanisms responsible for emergent behaviors is the relationship between active and quiet periods, and the nature of the correlations. Here we investigate the dynamics of active (θ-bursts) and quiet states (δ-bursts) in brain activity during the sleep-wake cycle. We show the duality of power-law (θ, active phase) and exponential-like (δ, quiescent phase) duration distributions, typical of SOC, jointly emerge with power-law temporal correlations and anti-correlated coupling between active and quiet states. Importantly, we demonstrate that such temporal organization shares important similarities with earthquake dynamics, and propose that specific power-law correlations and coupling between active and quiet states are distinctive characteristics of a class of systems with self-organization at criticality.}, author = {Lombardi, Fabrizio and Wang, Jilin W.J.L. and Zhang, Xiyun and Ivanov, Plamen Ch}, issn = {2100-014X}, journal = {EPJ Web of Conferences}, publisher = {EDP Sciences}, title = {{Power-law correlations and coupling of active and quiet states underlie a class of complex systems with self-organization at criticality}}, doi = {10.1051/epjconf/202023000005}, volume = {230}, year = {2020}, } @inproceedings{8135, abstract = {Discrete Morse theory has recently lead to new developments in the theory of random geometric complexes. This article surveys the methods and results obtained with this new approach, and discusses some of its shortcomings. It uses simulations to illustrate the results and to form conjectures, getting numerical estimates for combinatorial, topological, and geometric properties of weighted and unweighted Delaunay mosaics, their dual Voronoi tessellations, and the Alpha and Wrap complexes contained in the mosaics.}, author = {Edelsbrunner, Herbert and Nikitenko, Anton and Ölsböck, Katharina and Synak, Peter}, booktitle = {Topological Data Analysis}, isbn = {9783030434076}, issn = {21978549}, pages = {181--218}, publisher = {Springer Nature}, title = {{Radius functions on Poisson–Delaunay mosaics and related complexes experimentally}}, doi = {10.1007/978-3-030-43408-3_8}, volume = {15}, year = {2020}, } @misc{8181, author = {Hauschild, Robert}, publisher = {IST Austria}, title = {{Amplified centrosomes in dendritic cells promote immune cell effector functions}}, doi = {10.15479/AT:ISTA:8181}, year = {2020}, } @misc{8294, abstract = {Automated root growth analysis and tracking of root tips. }, author = {Hauschild, Robert}, publisher = {IST Austria}, title = {{RGtracker}}, doi = {10.15479/AT:ISTA:8294}, year = {2020}, } @inproceedings{8322, abstract = {Reverse firewalls were introduced at Eurocrypt 2015 by Miro-nov and Stephens-Davidowitz, as a method for protecting cryptographic protocols against attacks on the devices of the honest parties. In a nutshell: a reverse firewall is placed outside of a device and its goal is to “sanitize” the messages sent by it, in such a way that a malicious device cannot leak its secrets to the outside world. It is typically assumed that the cryptographic devices are attacked in a “functionality-preserving way” (i.e. informally speaking, the functionality of the protocol remains unchanged under this attacks). In their paper, Mironov and Stephens-Davidowitz construct a protocol for passively-secure two-party computations with firewalls, leaving extension of this result to stronger models as an open question. In this paper, we address this problem by constructing a protocol for secure computation with firewalls that has two main advantages over the original protocol from Eurocrypt 2015. Firstly, it is a multiparty computation protocol (i.e. it works for an arbitrary number n of the parties, and not just for 2). Secondly, it is secure in much stronger corruption settings, namely in the active corruption model. More precisely: we consider an adversary that can fully corrupt up to 𝑛−1 parties, while the remaining parties are corrupt in a functionality-preserving way. Our core techniques are: malleable commitments and malleable non-interactive zero-knowledge, which in particular allow us to create a novel protocol for multiparty augmented coin-tossing into the well with reverse firewalls (that is based on a protocol of Lindell from Crypto 2001).}, author = {Chakraborty, Suvradip and Dziembowski, Stefan and Nielsen, Jesper Buus}, booktitle = {Advances in Cryptology – CRYPTO 2020}, isbn = {9783030568795}, issn = {16113349}, location = {Santa Barbara, CA, United States}, pages = {732--762}, publisher = {Springer Nature}, title = {{Reverse firewalls for actively secure MPCs}}, doi = {10.1007/978-3-030-56880-1_26}, volume = {12171}, year = {2020}, } @inproceedings{8339, abstract = {Discrete Gaussian distributions over lattices are central to lattice-based cryptography, and to the computational and mathematical aspects of lattices more broadly. The literature contains a wealth of useful theorems about the behavior of discrete Gaussians under convolutions and related operations. Yet despite their structural similarities, most of these theorems are formally incomparable, and their proofs tend to be monolithic and written nearly “from scratch,” making them unnecessarily hard to verify, understand, and extend. In this work we present a modular framework for analyzing linear operations on discrete Gaussian distributions. The framework abstracts away the particulars of Gaussians, and usually reduces proofs to the choice of appropriate linear transformations and elementary linear algebra. To showcase the approach, we establish several general properties of discrete Gaussians, and show how to obtain all prior convolution theorems (along with some new ones) as straightforward corollaries. As another application, we describe a self-reduction for Learning With Errors (LWE) that uses a fixed number of samples to generate an unlimited number of additional ones (having somewhat larger error). The distinguishing features of our reduction are its simple analysis in our framework, and its exclusive use of discrete Gaussians without any loss in parameters relative to a prior mixed discrete-and-continuous approach. As a contribution of independent interest, for subgaussian random matrices we prove a singular value concentration bound with explicitly stated constants, and we give tighter heuristics for specific distributions that are commonly used for generating lattice trapdoors. These bounds yield improvements in the concrete bit-security estimates for trapdoor lattice cryptosystems.}, author = {Genise, Nicholas and Micciancio, Daniele and Peikert, Chris and Walter, Michael}, booktitle = {23rd IACR International Conference on the Practice and Theory of Public-Key Cryptography}, isbn = {9783030453732}, issn = {16113349}, location = {Edinburgh, United Kingdom}, pages = {623--651}, publisher = {Springer Nature}, title = {{Improved discrete Gaussian and subgaussian analysis for lattice cryptography}}, doi = {10.1007/978-3-030-45374-9_21}, volume = {12110}, year = {2020}, } @inproceedings{8572, abstract = {We present the results of the ARCH 2020 friendly competition for formal verification of continuous and hybrid systems with linear continuous dynamics. In its fourth edition, eight tools have been applied to solve eight different benchmark problems in the category for linear continuous dynamics (in alphabetical order): CORA, C2E2, HyDRA, Hylaa, Hylaa-Continuous, JuliaReach, SpaceEx, and XSpeed. This report is a snapshot of the current landscape of tools and the types of benchmarks they are particularly suited for. Due to the diversity of problems, we are not ranking tools, yet the presented results provide one of the most complete assessments of tools for the safety verification of continuous and hybrid systems with linear continuous dynamics up to this date.}, author = {Althoff, Matthias and Bak, Stanley and Bao, Zongnan and Forets, Marcelo and Frehse, Goran and Freire, Daniel and Kochdumper, Niklas and Li, Yangge and Mitra, Sayan and Ray, Rajarshi and Schilling, Christian and Schupp, Stefan and Wetzlinger, Mark}, booktitle = {EPiC Series in Computing}, pages = {16--48}, publisher = {EasyChair}, title = {{ARCH-COMP20 Category Report: Continuous and hybrid systems with linear dynamics}}, doi = {10.29007/7dt2}, volume = {74}, year = {2020}, } @inproceedings{8571, abstract = {We present the results of a friendly competition for formal verification of continuous and hybrid systems with nonlinear continuous dynamics. The friendly competition took place as part of the workshop Applied Verification for Continuous and Hybrid Systems (ARCH) in 2020. This year, 6 tools Ariadne, CORA, DynIbex, Flow*, Isabelle/HOL, and JuliaReach (in alphabetic order) participated. These tools are applied to solve reachability analysis problems on six benchmark problems, two of them featuring hybrid dynamics. We do not rank the tools based on the results, but show the current status and discover the potential advantages of different tools.}, author = {Geretti, Luca and Alexandre Dit Sandretto, Julien and Althoff, Matthias and Benet, Luis and Chapoutot, Alexandre and Chen, Xin and Collins, Pieter and Forets, Marcelo and Freire, Daniel and Immler, Fabian and Kochdumper, Niklas and Sanders, David and Schilling, Christian}, booktitle = {EPiC Series in Computing}, pages = {49--75}, publisher = {EasyChair}, title = {{ARCH-COMP20 Category Report: Continuous and hybrid systems with nonlinear dynamics}}, doi = {10.29007/zkf6}, volume = {74}, year = {2020}, } @inproceedings{8600, abstract = {A vector addition system with states (VASS) consists of a finite set of states and counters. A transition changes the current state to the next state, and every counter is either incremented, or decremented, or left unchanged. A state and value for each counter is a configuration; and a computation is an infinite sequence of configurations with transitions between successive configurations. A probabilistic VASS consists of a VASS along with a probability distribution over the transitions for each state. Qualitative properties such as state and configuration reachability have been widely studied for VASS. In this work we consider multi-dimensional long-run average objectives for VASS and probabilistic VASS. For a counter, the cost of a configuration is the value of the counter; and the long-run average value of a computation for the counter is the long-run average of the costs of the configurations in the computation. The multi-dimensional long-run average problem given a VASS and a threshold value for each counter, asks whether there is a computation such that for each counter the long-run average value for the counter does not exceed the respective threshold. For probabilistic VASS, instead of the existence of a computation, we consider whether the expected long-run average value for each counter does not exceed the respective threshold. Our main results are as follows: we show that the multi-dimensional long-run average problem (a) is NP-complete for integer-valued VASS; (b) is undecidable for natural-valued VASS (i.e., nonnegative counters); and (c) can be solved in polynomial time for probabilistic integer-valued VASS, and probabilistic natural-valued VASS when all computations are non-terminating.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, booktitle = {31st International Conference on Concurrency Theory}, isbn = {9783959771603}, issn = {18688969}, location = {Virtual}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Multi-dimensional long-run average problems for vector addition systems with states}}, doi = {10.4230/LIPIcs.CONCUR.2020.23}, volume = {171}, year = {2020}, } @inproceedings{8599, abstract = {A graph game is a two-player zero-sum game in which the players move a token throughout a graph to produce an infinite path, which determines the winner or payoff of the game. In bidding games, both players have budgets, and in each turn, we hold an "auction" (bidding) to determine which player moves the token. In this survey, we consider several bidding mechanisms and study their effect on the properties of the game. Specifically, bidding games, and in particular bidding games of infinite duration, have an intriguing equivalence with random-turn games in which in each turn, the player who moves is chosen randomly. We show how minor changes in the bidding mechanism lead to unexpected differences in the equivalence with random-turn games.}, author = {Avni, Guy and Henzinger, Thomas A}, booktitle = {31st International Conference on Concurrency Theory}, isbn = {9783959771603}, issn = {18688969}, location = {Virtual}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{A survey of bidding games on graphs}}, doi = {10.4230/LIPIcs.CONCUR.2020.2}, volume = {171}, year = {2020}, } @inproceedings{8725, abstract = {The design and implementation of efficient concurrent data structures have seen significant attention. However, most of this work has focused on concurrent data structures providing good \emph{worst-case} guarantees. In real workloads, objects are often accessed at different rates, since access distributions may be non-uniform. Efficient distribution-adaptive data structures are known in the sequential case, e.g. the splay-trees; however, they often are hard to translate efficiently in the concurrent case. In this paper, we investigate distribution-adaptive concurrent data structures and propose a new design called the splay-list. At a high level, the splay-list is similar to a standard skip-list, with the key distinction that the height of each element adapts dynamically to its access rate: popular elements ``move up,'' whereas rarely-accessed elements decrease in height. We show that the splay-list provides order-optimal amortized complexity bounds for a subset of operations while being amenable to efficient concurrent implementation. Experimental results show that the splay-list can leverage distribution-adaptivity to improve on the performance of classic concurrent designs, and can outperform the only previously-known distribution-adaptive design in certain settings.}, author = {Aksenov, Vitaly and Alistarh, Dan-Adrian and Drozdova, Alexandra and Mohtashami, Amirkeivan}, booktitle = {34th International Symposium on Distributed Computing}, isbn = {9783959771689}, issn = {1868-8969}, location = {Freiburg, Germany}, pages = {3:1--3:18}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{The splay-list: A distribution-adaptive concurrent skip-list}}, doi = {10.4230/LIPIcs.DISC.2020.3}, volume = {179}, year = {2020}, } @article{8726, abstract = {Several realistic spin-orbital models for transition metal oxides go beyond the classical expectations and could be understood only by employing the quantum entanglement. Experiments on these materials confirm that spin-orbital entanglement has measurable consequences. Here, we capture the essential features of spin-orbital entanglement in complex quantum matter utilizing 1D spin-orbital model which accommodates SU(2)⊗SU(2) symmetric Kugel-Khomskii superexchange as well as the Ising on-site spin-orbit coupling. Building on the results obtained for full and effective models in the regime of strong spin-orbit coupling, we address the question whether the entanglement found on superexchange bonds always increases when the Ising spin-orbit coupling is added. We show that (i) quantum entanglement is amplified by strong spin-orbit coupling and, surprisingly, (ii) almost classical disentangled states are possible. We complete the latter case by analyzing how the entanglement existing for intermediate values of spin-orbit coupling can disappear for higher values of this coupling.}, author = {Gotfryd, Dorota and Paerschke, Ekaterina and Wohlfeld, Krzysztof and Oleś, Andrzej M.}, issn = {2410-3896}, journal = {Condensed Matter}, number = {3}, publisher = {MDPI}, title = {{Evolution of spin-orbital entanglement with increasing ising spin-orbit coupling}}, doi = {10.3390/condmat5030053}, volume = {5}, year = {2020}, } @inproceedings{9040, abstract = {Machine learning and formal methods have complimentary benefits and drawbacks. In this work, we address the controller-design problem with a combination of techniques from both fields. The use of black-box neural networks in deep reinforcement learning (deep RL) poses a challenge for such a combination. Instead of reasoning formally about the output of deep RL, which we call the wizard, we extract from it a decision-tree based model, which we refer to as the magic book. Using the extracted model as an intermediary, we are able to handle problems that are infeasible for either deep RL or formal methods by themselves. First, we suggest, for the first time, a synthesis procedure that is based on a magic book. We synthesize a stand-alone correct-by-design controller that enjoys the favorable performance of RL. Second, we incorporate a magic book in a bounded model checking (BMC) procedure. BMC allows us to find numerous traces of the plant under the control of the wizard, which a user can use to increase the trustworthiness of the wizard and direct further training.}, author = {Alamdari, Par Alizadeh and Avni, Guy and Henzinger, Thomas A and Lukina, Anna}, booktitle = {Proceedings of the 20th Conference on Formal Methods in Computer-Aided Design}, isbn = {9783854480426}, issn = {2708-7824}, location = {Online Conference}, pages = {138--147}, publisher = {TU Wien Academic Press}, title = {{Formal methods with a touch of magic}}, doi = {10.34727/2020/isbn.978-3-85448-042-6_21}, year = {2020}, } @article{9249, abstract = {Rhombic dodecahedron is a space filling polyhedron which represents the close packing of spheres in 3D space and the Voronoi structures of the face centered cubic (FCC) lattice. In this paper, we describe a new coordinate system where every 3-integer coordinates grid point corresponds to a rhombic dodecahedron centroid. In order to illustrate the interest of the new coordinate system, we propose the characterization of 3D digital plane with its topological features, such as the interrelation between the thickness of the digital plane and the separability constraint we aim to obtain. We also present the characterization of 3D digital lines and study it as the intersection of multiple digital planes. Characterization of 3D digital sphere with relevant topological features is proposed as well along with the 48-symmetry appearing in the new coordinate system.}, author = {Biswas, Ranita and Largeteau-Skapin, Gaëlle and Zrour, Rita and Andres, Eric}, issn = {2353-3390}, journal = {Mathematical Morphology - Theory and Applications}, number = {1}, pages = {143--158}, publisher = {De Gruyter}, title = {{Digital objects in rhombic dodecahedron grid}}, doi = {10.1515/mathm-2020-0106}, volume = {4}, year = {2020}, } @inproceedings{9299, abstract = {We call a multigraph non-homotopic if it can be drawn in the plane in such a way that no two edges connecting the same pair of vertices can be continuously transformed into each other without passing through a vertex, and no loop can be shrunk to its end-vertex in the same way. It is easy to see that a non-homotopic multigraph on n>1 vertices can have arbitrarily many edges. We prove that the number of crossings between the edges of a non-homotopic multigraph with n vertices and m>4n edges is larger than cm2n for some constant c>0 , and that this bound is tight up to a polylogarithmic factor. We also show that the lower bound is not asymptotically sharp as n is fixed and m⟶∞ .}, author = {Pach, János and Tardos, Gábor and Tóth, Géza}, booktitle = {28th International Symposium on Graph Drawing and Network Visualization}, isbn = {9783030687656}, issn = {1611-3349}, location = {Virtual, Online}, pages = {359--371}, publisher = {Springer Nature}, title = {{Crossings between non-homotopic edges}}, doi = {10.1007/978-3-030-68766-3_28}, volume = {12590}, year = {2020}, } @inproceedings{9632, abstract = {Second-order information, in the form of Hessian- or Inverse-Hessian-vector products, is a fundamental tool for solving optimization problems. Recently, there has been significant interest in utilizing this information in the context of deep neural networks; however, relatively little is known about the quality of existing approximations in this context. Our work examines this question, identifies issues with existing approaches, and proposes a method called WoodFisher to compute a faithful and efficient estimate of the inverse Hessian. Our main application is to neural network compression, where we build on the classic Optimal Brain Damage/Surgeon framework. We demonstrate that WoodFisher significantly outperforms popular state-of-the-art methods for oneshot pruning. Further, even when iterative, gradual pruning is allowed, our method results in a gain in test accuracy over the state-of-the-art approaches, for standard image classification datasets such as ImageNet ILSVRC. We examine how our method can be extended to take into account first-order information, as well as illustrate its ability to automatically set layer-wise pruning thresholds and perform compression in the limited-data regime. The code is available at the following link, https://github.com/IST-DASLab/WoodFisher.}, author = {Singh, Sidak Pal and Alistarh, Dan-Adrian}, booktitle = {Advances in Neural Information Processing Systems}, isbn = {9781713829546}, issn = {10495258}, location = {Vancouver, Canada}, pages = {18098--18109}, publisher = {Curran Associates}, title = {{WoodFisher: Efficient second-order approximation for neural network compression}}, volume = {33}, year = {2020}, } @article{9630, abstract = {Various kinds of data are routinely represented as discrete probability distributions. Examples include text documents summarized by histograms of word occurrences and images represented as histograms of oriented gradients. Viewing a discrete probability distribution as a point in the standard simplex of the appropriate dimension, we can understand collections of such objects in geometric and topological terms. Importantly, instead of using the standard Euclidean distance, we look into dissimilarity measures with information-theoretic justification, and we develop the theory needed for applying topological data analysis in this setting. In doing so, we emphasize constructions that enable the usage of existing computational topology software in this context.}, author = {Edelsbrunner, Herbert and Virk, Ziga and Wagner, Hubert}, issn = {1920180X}, journal = {Journal of Computational Geometry}, number = {2}, pages = {162--182}, publisher = {Carleton University}, title = {{Topological data analysis in information space}}, doi = {10.20382/jocg.v11i2a7}, volume = {11}, year = {2020}, } @inproceedings{9631, abstract = {The ability to leverage large-scale hardware parallelism has been one of the key enablers of the accelerated recent progress in machine learning. Consequently, there has been considerable effort invested into developing efficient parallel variants of classic machine learning algorithms. However, despite the wealth of knowledge on parallelization, some classic machine learning algorithms often prove hard to parallelize efficiently while maintaining convergence. In this paper, we focus on efficient parallel algorithms for the key machine learning task of inference on graphical models, in particular on the fundamental belief propagation algorithm. We address the challenge of efficiently parallelizing this classic paradigm by showing how to leverage scalable relaxed schedulers in this context. We present an extensive empirical study, showing that our approach outperforms previous parallel belief propagation implementations both in terms of scalability and in terms of wall-clock convergence time, on a range of practical applications.}, author = {Aksenov, Vitaly and Alistarh, Dan-Adrian and Korhonen, Janne}, booktitle = {Advances in Neural Information Processing Systems}, isbn = {9781713829546}, issn = {10495258}, location = {Vancouver, Canada}, pages = {22361--22372}, publisher = {Curran Associates}, title = {{Scalable belief propagation via relaxed scheduling}}, volume = {33}, year = {2020}, } @inproceedings{8533, abstract = {Game of Life is a simple and elegant model to study dynamical system over networks. The model consists of a graph where every vertex has one of two types, namely, dead or alive. A configuration is a mapping of the vertices to the types. An update rule describes how the type of a vertex is updated given the types of its neighbors. In every round, all vertices are updated synchronously, which leads to a configuration update. While in general, Game of Life allows a broad range of update rules, we focus on two simple families of update rules, namely, underpopulation and overpopulation, that model several interesting dynamics studied in the literature. In both settings, a dead vertex requires at least a desired number of live neighbors to become alive. For underpopulation (resp., overpopulation), a live vertex requires at least (resp. at most) a desired number of live neighbors to remain alive. We study the basic computation problems, e.g., configuration reachability, for these two families of rules. For underpopulation rules, we show that these problems can be solved in polynomial time, whereas for overpopulation rules they are PSPACE-complete.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Jecker, Ismael R and Svoboda, Jakub}, booktitle = {45th International Symposium on Mathematical Foundations of Computer Science}, isbn = {9783959771597}, issn = {18688969}, location = {Prague, Czech Republic}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Simplified game of life: Algorithms and complexity}}, doi = {10.4230/LIPIcs.MFCS.2020.22}, volume = {170}, year = {2020}, } @inproceedings{8534, abstract = {A regular language L of finite words is composite if there are regular languages L₁,L₂,…,L_t such that L = ⋂_{i = 1}^t L_i and the index (number of states in a minimal DFA) of every language L_i is strictly smaller than the index of L. Otherwise, L is prime. Primality of regular languages was introduced and studied in [O. Kupferman and J. Mosheiff, 2015], where the complexity of deciding the primality of the language of a given DFA was left open, with a doubly-exponential gap between the upper and lower bounds. We study primality for unary regular languages, namely regular languages with a singleton alphabet. A unary language corresponds to a subset of ℕ, making the study of unary prime languages closer to that of primality in number theory. We show that the setting of languages is richer. In particular, while every composite number is the product of two smaller numbers, the number t of languages necessary to decompose a composite unary language induces a strict hierarchy. In addition, a primality witness for a unary language L, namely a word that is not in L but is in all products of languages that contain L and have an index smaller than L’s, may be of exponential length. Still, we are able to characterize compositionality by structural properties of a DFA for L, leading to a LogSpace algorithm for primality checking of unary DFAs.}, author = {Jecker, Ismael R and Kupferman, Orna and Mazzocchi, Nicolas}, booktitle = {45th International Symposium on Mathematical Foundations of Computer Science}, isbn = {9783959771597}, issn = {18688969}, location = {Prague, Czech Republic}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Unary prime languages}}, doi = {10.4230/LIPIcs.MFCS.2020.51}, volume = {170}, year = {2020}, } @article{8538, abstract = {We prove some recent experimental observations of Dan Reznik concerning periodic billiard orbits in ellipses. For example, the sum of cosines of the angles of a periodic billiard polygon remains constant in the 1-parameter family of such polygons (that exist due to the Poncelet porism). In our proofs, we use geometric and complex analytic methods.}, author = {Akopyan, Arseniy and Schwartz, Richard and Tabachnikov, Serge}, issn = {2199-6768}, journal = {European Journal of Mathematics}, publisher = {Springer Nature}, title = {{Billiards in ellipses revisited}}, doi = {10.1007/s40879-020-00426-9}, year = {2020}, } @unpublished{8616, abstract = {The brain vasculature supplies neurons with glucose and oxygen, but little is known about how vascular plasticity contributes to brain function. Using longitudinal in vivo imaging, we reported that a substantial proportion of blood vessels in the adult brain sporadically occluded and regressed. Their regression proceeded through sequential stages of blood-flow occlusion, endothelial cell collapse, relocation or loss of pericytes, and retraction of glial endfeet. Regressing vessels were found to be widespread in mouse, monkey and human brains. Both brief occlusions of the middle cerebral artery and lipopolysaccharide-mediated inflammation induced an increase of vessel regression. Blockage of leukocyte adhesion to endothelial cells alleviated LPS-induced vessel regression. We further revealed that blood vessel regression caused a reduction of neuronal activity due to a dysfunction in mitochondrial metabolism and glutamate production. Our results elucidate the mechanism of vessel regression and its role in neuronal function in the adult brain.}, author = {Gao, Xiaofei and Li, Jun-Liszt and Chen, Xingjun and Ci, Bo and Chen, Fei and Lu, Nannan and Shen, Bo and Zheng, Lijun and Jia, Jie-Min and Yi, Yating and Zhang, Shiwen and Shi, Ying-Chao and Shi, Kaibin and Propson, Nicholas E and Huang, Yubin and Poinsatte, Katherine and Zhang, Zhaohuan and Yue, Yuanlei and Bosco, Dale B and Lu, Ying-mei and Yang, Shi-bing and Adams, Ralf H. and Lindner, Volkhard and Huang, Fen and Wu, Long-Jun and Zheng, Hui and Han, Feng and Hippenmeyer, Simon and Stowe, Ann M. and Peng, Bo and Margeta, Marta and Wang, Xiaoqun and Liu, Qiang and Körbelin, Jakob and Trepel, Martin and Lu, Hui and Zhou, Bo O. and Zhao, Hu and Su, Wenzhi and Bachoo, Robert M. and Ge, Woo-ping}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{Reduction of neuronal activity mediated by blood-vessel regression in the brain}}, doi = {10.1101/2020.09.15.262782}, year = {2020}, } @techreport{8695, abstract = {A look at international activities on Open Science reveals a broad spectrum from individual institutional policies to national action plans. The present Recommendations for a National Open Science Strategy in Austria are based on these international initiatives and present practical considerations for their coordinated implementation with regard to strategic developments in research, technology and innovation (RTI) in Austria until 2030. They are addressed to all relevant actors in the RTI system, in particular to Research Performing Organisations, Research Funding Organisations, Research Policy, memory institutions such as Libraries and Researchers. The recommendation paper was developed from 2018 to 2020 by the OANA working group "Open Science Strategy" and published for the first time in spring 2020 for a public consultation. The now available final version of the recommendation document, which contains feedback and comments from the consultation, is intended to provide an impetus for further discussion and implementation of Open Science in Austria and serves as a contribution and basis for a potential national Open Science Strategy in Austria. The document builds on the diverse expertise of the authors (academia, administration, library and archive, information technology, science policy, funding system, etc.) and reflects their personal experiences and opinions.}, author = {Mayer, Katja and Rieck, Katharina and Reichmann, Stefan and Danowski, Patrick and Graschopf, Anton and König, Thomas and Kraker, Peter and Lehner, Patrick and Reckling, Falk and Ross-Hellauer, Tony and Spichtinger, Daniel and Tzatzanis, Michalis and Schürz, Stefanie}, pages = {36}, publisher = {OANA}, title = {{Empfehlungen für eine nationale Open Science Strategie in Österreich / Recommendations for a National Open Science Strategy in Austria}}, doi = {10.5281/ZENODO.4109242}, year = {2020}, } @article{8706, abstract = {As part of the Austrian Transition to Open Access (AT2OA) project, subproject TP1-B is working on designing a monitoring solution for the output of Open Access publications in Austria. This report on a potential Open Access monitoring approach in Austria is one of the results of these efforts and can serve as a basis for discussion on an international level.}, author = {Danowski, Patrick and Ferus, Andreas and Hikl, Anna-Laetitia and McNeill, Gerda and Miniberger, Clemens and Reding, Steve and Zarka, Tobias and Zojer, Michael}, issn = {10222588}, journal = {Mitteilungen der Vereinigung Österreichischer Bibliothekarinnen und Bibliothekare}, number = {2}, pages = {278--284}, publisher = {Vereinigung Osterreichischer Bibliothekarinnen und Bibliothekare}, title = {{„Recommendation“ for the further procedure for open access monitoring. Deliverable of the AT2OA subproject TP1-B}}, doi = {10.31263/voebm.v73i2.3941}, volume = {73}, year = {2020}, } @article{8978, abstract = {Mosaic analysis with double markers (MADM) technology enables concomitant fluorescent cell labeling and induction of uniparental chromosome disomy (UPD) with single-cell resolution. In UPD, imprinted genes are either overexpressed 2-fold or are not expressed. Here, the MADM platform is utilized to probe imprinting phenotypes at the transcriptional level. This protocol highlights major steps for the generation and isolation of projection neurons and astrocytes with MADM-induced UPD from mouse cerebral cortex for downstream single-cell and low-input sample RNA-sequencing experiments. For complete details on the use and execution of this protocol, please refer to Laukoter et al. (2020b).}, author = {Laukoter, Susanne and Amberg, Nicole and Pauler, Florian and Hippenmeyer, Simon}, issn = {2666-1667}, journal = {STAR Protocols}, number = {3}, publisher = {Elsevier}, title = {{Generation and isolation of single cells from mouse brain with mosaic analysis with double markers-induced uniparental chromosome disomy}}, doi = {10.1016/j.xpro.2020.100215}, volume = {1}, year = {2020}, } @inproceedings{9103, abstract = {We introduce LRT-NG, a set of techniques and an associated toolset that computes a reachtube (an over-approximation of the set of reachable states over a given time horizon) of a nonlinear dynamical system. LRT-NG significantly advances the state-of-the-art Langrangian Reachability and its associated tool LRT. From a theoretical perspective, LRT-NG is superior to LRT in three ways. First, it uses for the first time an analytically computed metric for the propagated ball which is proven to minimize the ball’s volume. We emphasize that the metric computation is the centerpiece of all bloating-based techniques. Secondly, it computes the next reachset as the intersection of two balls: one based on the Cartesian metric and the other on the new metric. While the two metrics were previously considered opposing approaches, their joint use considerably tightens the reachtubes. Thirdly, it avoids the "wrapping effect" associated with the validated integration of the center of the reachset, by optimally absorbing the interval approximation in the radius of the next ball. From a tool-development perspective, LRT-NG is superior to LRT in two ways. First, it is a standalone tool that no longer relies on CAPD. This required the implementation of the Lohner method and a Runge-Kutta time-propagation method. Secondly, it has an improved interface, allowing the input model and initial conditions to be provided as external input files. Our experiments on a comprehensive set of benchmarks, including two Neural ODEs, demonstrates LRT-NG’s superior performance compared to LRT, CAPD, and Flow*.}, author = {Gruenbacher, Sophie and Cyranka, Jacek and Lechner, Mathias and Islam, Md Ariful and Smolka, Scott A. and Grosu, Radu}, booktitle = {Proceedings of the 59th IEEE Conference on Decision and Control}, isbn = {9781728174471}, issn = {07431546}, location = {Jeju Islang, Korea (South)}, pages = {1556--1563}, publisher = {IEEE}, title = {{Lagrangian reachtubes: The next generation}}, doi = {10.1109/CDC42340.2020.9304042}, volume = {2020}, year = {2020}, } @inproceedings{9221, abstract = {Recent works have shown that gradient descent can find a global minimum for over-parameterized neural networks where the widths of all the hidden layers scale polynomially with N (N being the number of training samples). In this paper, we prove that, for deep networks, a single layer of width N following the input layer suffices to ensure a similar guarantee. In particular, all the remaining layers are allowed to have constant widths, and form a pyramidal topology. We show an application of our result to the widely used LeCun’s initialization and obtain an over-parameterization requirement for the single wide layer of order N2. }, author = {Nguyen, Quynh and Mondelli, Marco}, booktitle = {34th Conference on Neural Information Processing Systems}, location = {Vancouver, Canada}, pages = {11961–11972}, publisher = {Curran Associates}, title = {{Global convergence of deep networks with one wide layer followed by pyramidal topology}}, volume = {33}, year = {2020}, } @inproceedings{9415, abstract = {Optimizing convolutional neural networks for fast inference has recently become an extremely active area of research. One of the go-to solutions in this context is weight pruning, which aims to reduce computational and memory footprint by removing large subsets of the connections in a neural network. Surprisingly, much less attention has been given to exploiting sparsity in the activation maps, which tend to be naturally sparse in many settings thanks to the structure of rectified linear (ReLU) activation functions. In this paper, we present an in-depth analysis of methods for maximizing the sparsity of the activations in a trained neural network, and show that, when coupled with an efficient sparse-input convolution algorithm, we can leverage this sparsity for significant performance gains. To induce highly sparse activation maps without accuracy loss, we introduce a new regularization technique, coupled with a new threshold-based sparsification method based on a parameterized activation function called Forced-Activation-Threshold Rectified Linear Unit (FATReLU). We examine the impact of our methods on popular image classification models, showing that most architectures can adapt to significantly sparser activation maps without any accuracy loss. Our second contribution is showing that these these compression gains can be translated into inference speedups: we provide a new algorithm to enable fast convolution operations over networks with sparse activations, and show that it can enable significant speedups for end-to-end inference on a range of popular models on the large-scale ImageNet image classification task on modern Intel CPUs, with little or no retraining cost. }, author = {Kurtz, Mark and Kopinsky, Justin and Gelashvili, Rati and Matveev, Alexander and Carr, John and Goin, Michael and Leiserson, William and Moore, Sage and Nell, Bill and Shavit, Nir and Alistarh, Dan-Adrian}, booktitle = {37th International Conference on Machine Learning, ICML 2020}, issn = {2640-3498}, location = {Online}, pages = {5533--5543}, title = {{Inducing and exploiting activation sparsity for fast neural network inference}}, volume = {119}, year = {2020}, }