@article{446, abstract = {We prove that in Thomas–Fermi–Dirac–von Weizsäcker theory, a nucleus of charge Z > 0 can bind at most Z + C electrons, where C is a universal constant. This result is obtained through a comparison with Thomas-Fermi theory which, as a by-product, gives bounds on the screened nuclear potential and the radius of the minimizer. A key ingredient of the proof is a novel technique to control the particles in the exterior region, which also applies to the liquid drop model with a nuclear background potential.}, author = {Frank, Rupert and Phan Thanh, Nam and Van Den Bosch, Hanne}, journal = {Communications on Pure and Applied Mathematics}, number = {3}, pages = {577 -- 614}, publisher = {Wiley-Blackwell}, title = {{The ionization conjecture in Thomas–Fermi–Dirac–von Weizsäcker theory}}, doi = {10.1002/cpa.21717}, volume = {71}, year = {2018}, } @article{430, abstract = {In this issue of GENETICS, a new method for detecting natural selection on polygenic traits is developed and applied to sev- eral human examples ( Racimo et al. 2018 ). By de fi nition, many loci contribute to variation in polygenic traits, and a challenge for evolutionary ge neticists has been that these traits can evolve by small, nearly undetectable shifts in allele frequencies across each of many, typically unknown, loci. Recently, a helpful remedy has arisen. Genome-wide associ- ation studies (GWAS) have been illuminating sets of loci that can be interrogated jointly for c hanges in allele frequencies. By aggregating small signal s of change across many such loci, directional natural selection is now in principle detect- able using genetic data, even for highly polygenic traits. This is an exciting arena of progress – with these methods, tests can be made for selection associated with traits, and we can now study selection in what may be its most prevalent mode. The continuing fast pace of GWAS publications suggest there will be many more polygenic tests of selection in the near future, as every new GWAS is an opportunity for an accom- panying test of polygenic selection. However, it is important to be aware of complications th at arise in interpretation, especially given that these studies may easily be misinter- preted both in and outside the evolutionary genetics commu- nity. Here, we provide context for understanding polygenic tests and urge caution regarding how these results are inter- preted and reported upon more broadly.}, author = {Novembre, John and Barton, Nicholas H}, journal = {Genetics}, number = {4}, pages = {1351 -- 1355}, publisher = {Genetics Society of America}, title = {{Tread lightly interpreting polygenic tests of selection}}, doi = {10.1534/genetics.118.300786}, volume = {208}, year = {2018}, } @article{199, abstract = {Sex-biased genes are central to the study of sexual selection, sexual antagonism, and sex chromosome evolution. We describe a comprehensive de novo assembled transcriptome in the common frog Rana temporaria based on five developmental stages and three adult tissues from both sexes, obtained from a population with karyotypically homomorphic but genetically differentiated sex chromosomes. This allows the study of sex-biased gene expression throughout development, and its effect on the rate of gene evolution while accounting for pleiotropic expression, which is known to negatively correlate with the evolutionary rate. Overall, sex-biased genes had little overlap among developmental stages and adult tissues. Late developmental stages and gonad tissues had the highest numbers of stage-or tissue-specific genes. We find that pleiotropic gene expression is a better predictor than sex bias for the evolutionary rate of genes, though it often interacts with sex bias. Although genetically differentiated, the sex chromosomes were not enriched in sex-biased genes, possibly due to a very recent arrest of XY recombination. These results extend our understanding of the developmental dynamics, tissue specificity, and genomic localization of sex-biased genes.}, author = {Ma, Wen and Veltsos, Paris and Toups, Melissa A and Rodrigues, Nicolas and Sermier, Roberto and Jeffries, Daniel and Perrin, Nicolas}, journal = {Genes}, number = {6}, publisher = {MDPI AG}, title = {{Tissue specificity and dynamics of sex biased gene expression in a common frog population with differentiated, yet homomorphic, sex chromosomes}}, doi = {10.3390/genes9060294}, volume = {9}, year = {2018}, } @article{543, abstract = {A central goal in theoretical neuroscience is to predict the response properties of sensory neurons from first principles. To this end, “efficient coding” posits that sensory neurons encode maximal information about their inputs given internal constraints. There exist, however, many variants of efficient coding (e.g., redundancy reduction, different formulations of predictive coding, robust coding, sparse coding, etc.), differing in their regimes of applicability, in the relevance of signals to be encoded, and in the choice of constraints. It is unclear how these types of efficient coding relate or what is expected when different coding objectives are combined. Here we present a unified framework that encompasses previously proposed efficient coding models and extends to unique regimes. We show that optimizing neural responses to encode predictive information can lead them to either correlate or decorrelate their inputs, depending on the stimulus statistics; in contrast, at low noise, efficiently encoding the past always predicts decorrelation. Later, we investigate coding of naturalistic movies and show that qualitatively different types of visual motion tuning and levels of response sparsity are predicted, depending on whether the objective is to recover the past or predict the future. Our approach promises a way to explain the observed diversity of sensory neural responses, as due to multiple functional goals and constraints fulfilled by different cell types and/or circuits.}, author = {Chalk, Matthew J and Marre, Olivier and Tkacik, Gasper}, journal = {PNAS}, number = {1}, pages = {186 -- 191}, publisher = {National Academy of Sciences}, title = {{Toward a unified theory of efficient, predictive, and sparse coding}}, doi = {10.1073/pnas.1711114115}, volume = {115}, year = {2018}, } @article{421, abstract = {Cell shape is determined by a balance of intrinsic properties of the cell as well as its mechanochemical environment. Inhomogeneous shape changes underlie many morphogenetic events and involve spatial gradients in active cellular forces induced by complex chemical signaling. Here, we introduce a mechanochemical model based on the notion that cell shape changes may be induced by external diffusible biomolecules that influence cellular contractility (or equivalently, adhesions) in a concentration-dependent manner—and whose spatial profile in turn is affected by cell shape. We map out theoretically the possible interplay between chemical concentration and cellular structure. Besides providing a direct route to spatial gradients in cell shape profiles in tissues, we show that the dependence on cell shape helps create robust mechanochemical gradients.}, author = {Dasbiswas, Kinjal and Hannezo, Claude-Edouard B and Gov, Nir}, journal = {Biophysical Journal}, number = {4}, pages = {968 -- 977}, publisher = {Biophysical Society}, title = {{Theory of eppithelial cell shape transitions induced by mechanoactive chemical gradients}}, doi = {10.1016/j.bpj.2017.12.022}, volume = {114}, year = {2018}, } @article{63, abstract = {African cichlids display a remarkable assortment of jaw morphologies, pigmentation patterns, and mating behaviors. In addition to this previously documented diversity, recent studies have documented a rich diversity of sex chromosomes within these fishes. Here we review the known sex-determination network within vertebrates, and the extraordinary number of sex chromosomes systems segregating in African cichlids. We also propose a model for understanding the unusual number of sex chromosome systems within this clade.}, author = {Gammerdinger, William J and Kocher, Thomas}, journal = {Genes}, number = {10}, publisher = {MDPI AG}, title = {{Unusual diversity of sex chromosomes in African cichlid fishes}}, doi = {10.3390/genes9100480}, volume = {9}, year = {2018}, } @article{296, abstract = {The thermodynamic description of many-particle systems rests on the assumption of ergodicity, the ability of a system to explore all allowed configurations in the phase space. Recent studies on many-body localization have revealed the existence of systems that strongly violate ergodicity in the presence of quenched disorder. Here, we demonstrate that ergodicity can be weakly broken by a different mechanism, arising from the presence of special eigenstates in the many-body spectrum that are reminiscent of quantum scars in chaotic non-interacting systems. In the single-particle case, quantum scars correspond to wavefunctions that concentrate in the vicinity of unstable periodic classical trajectories. We show that many-body scars appear in the Fibonacci chain, a model with a constrained local Hilbert space that has recently been experimentally realized in a Rydberg-atom quantum simulator. The quantum scarred eigenstates are embedded throughout the otherwise thermalizing many-body spectrum but lead to direct experimental signatures, as we show for periodic recurrences that reproduce those observed in the experiment. Our results suggest that scarred many-body bands give rise to a new universality class of quantum dynamics, opening up opportunities for the creation of novel states with long-lived coherence in systems that are now experimentally realizable.}, author = {Turner, Christopher and Michailidis, Alexios and Abanin, Dmitry and Serbyn, Maksym and Papić, Zlatko}, journal = {Nature Physics}, pages = {745 -- 749}, publisher = {Nature Publishing Group}, title = {{Weak ergodicity breaking from quantum many-body scars}}, doi = {10.1038/s41567-018-0137-5}, volume = {14}, year = {2018}, } @article{607, abstract = {We study the Fokker-Planck equation derived in the large system limit of the Markovian process describing the dynamics of quantitative traits. The Fokker-Planck equation is posed on a bounded domain and its transport and diffusion coefficients vanish on the domain's boundary. We first argue that, despite this degeneracy, the standard no-flux boundary condition is valid. We derive the weak formulation of the problem and prove the existence and uniqueness of its solutions by constructing the corresponding contraction semigroup on a suitable function space. Then, we prove that for the parameter regime with high enough mutation rate the problem exhibits a positive spectral gap, which implies exponential convergence to equilibrium.Next, we provide a simple derivation of the so-called Dynamic Maximum Entropy (DynMaxEnt) method for approximation of observables (moments) of the Fokker-Planck solution, which can be interpreted as a nonlinear Galerkin approximation. The limited applicability of the DynMaxEnt method inspires us to introduce its modified version that is valid for the whole range of admissible parameters. Finally, we present several numerical experiments to demonstrate the performance of both the original and modified DynMaxEnt methods. We observe that in the parameter regimes where both methods are valid, the modified one exhibits slightly better approximation properties compared to the original one.}, author = {Bodova, Katarina and Haskovec, Jan and Markowich, Peter}, journal = {Physica D: Nonlinear Phenomena}, pages = {108--120}, publisher = {Elsevier}, title = {{Well posedness and maximum entropy approximation for the dynamics of quantitative traits}}, doi = {10.1016/j.physd.2017.10.015}, volume = {376-377}, year = {2018}, } @article{294, abstract = {We developed a method to calculate two-photon processes in quantum mechanics that replaces the infinite summation over the intermediate states by a perturbation expansion. This latter consists of a series of commutators that involve position, momentum, and Hamiltonian quantum operators. We analyzed several single- and many-particle cases for which a closed-form solution to the perturbation expansion exists, as well as more complicated cases for which a solution is found by convergence. Throughout the article, Rayleigh and Raman scattering are taken as examples of two-photon processes. The present method provides a clear distinction between the Thomson scattering, regarded as classical scattering, and quantum contributions. Such a distinction lets us derive general results concerning light scattering. Finally, possible extensions to the developed formalism are discussed.}, author = {Fratini, Filippo and Safari, Laleh and Amaro, Pedro and Santos, José}, journal = {Physical Review A - Atomic, Molecular, and Optical Physics}, number = {4}, publisher = {American Physical Society}, title = {{Two-photon processes based on quantum commutators}}, doi = {10.1103/PhysRevA.97.043842}, volume = {97}, year = {2018}, } @article{606, abstract = {We establish the existence of a global solution for a new family of fluid-like equations, which are obtained in certain regimes in as the mean-field evolution of the supercurrent density in a (2D section of a) type-II superconductor with pinning and with imposed electric current. We also consider general vortex-sheet initial data, and investigate the uniqueness and regularity properties of the solution. For some choice of parameters, the equation under investigation coincides with the so-called lake equation from 2D shallow water fluid dynamics, and our analysis then leads to a new existence result for rough initial data.}, author = {Duerinckx, Mitia and Fischer, Julian L}, journal = {Annales de l'Institut Henri Poincare (C) Non Linear Analysis}, number = {5}, pages = {1267--1319}, publisher = {Elsevier}, title = {{Well-posedness for mean-field evolutions arising in superconductivity}}, doi = {10.1016/j.anihpc.2017.11.004}, volume = {35}, year = {2018}, } @inproceedings{5959, abstract = {Formalizing properties of systems with continuous dynamics is a challenging task. In this paper, we propose a formal framework for specifying and monitoring rich temporal properties of real-valued signals. We introduce signal first-order logic (SFO) as a specification language that combines first-order logic with linear-real arithmetic and unary function symbols interpreted as piecewise-linear signals. We first show that while the satisfiability problem for SFO is undecidable, its membership and monitoring problems are decidable. We develop an offline monitoring procedure for SFO that has polynomial complexity in the size of the input trace and the specification, for a fixed number of quantifiers and function symbols. We show that the algorithm has computation time linear in the size of the input trace for the important fragment of bounded-response specifications interpreted over input traces with finite variability. We can use our results to extend signal temporal logic with first-order quantifiers over time and value parameters, while preserving its efficient monitoring. We finally demonstrate the practical appeal of our logic through a case study in the micro-electronics domain.}, author = {Bakhirkin, Alexey and Ferrere, Thomas and Henzinger, Thomas A and Nickovicl, Deian}, booktitle = {2018 International Conference on Embedded Software}, isbn = {9781538655603}, location = {Turin, Italy}, pages = {1--10}, publisher = {IEEE}, title = {{Keynote: The first-order logic of signals}}, doi = {10.1109/emsoft.2018.8537203}, year = {2018}, } @inproceedings{5962, abstract = {Stochastic Gradient Descent (SGD) is a fundamental algorithm in machine learning, representing the optimization backbone for training several classic models, from regression to neural networks. Given the recent practical focus on distributed machine learning, significant work has been dedicated to the convergence properties of this algorithm under the inconsistent and noisy updates arising from execution in a distributed environment. However, surprisingly, the convergence properties of this classic algorithm in the standard shared-memory model are still not well-understood. In this work, we address this gap, and provide new convergence bounds for lock-free concurrent stochastic gradient descent, executing in the classic asynchronous shared memory model, against a strong adaptive adversary. Our results give improved upper and lower bounds on the "price of asynchrony'' when executing the fundamental SGD algorithm in a concurrent setting. They show that this classic optimization tool can converge faster and with a wider range of parameters than previously known under asynchronous iterations. At the same time, we exhibit a fundamental trade-off between the maximum delay in the system and the rate at which SGD can converge, which governs the set of parameters under which this algorithm can still work efficiently.}, author = {Alistarh, Dan-Adrian and De Sa, Christopher and Konstantinov, Nikola H}, booktitle = {Proceedings of the 2018 ACM Symposium on Principles of Distributed Computing - PODC '18}, isbn = {9781450357951}, location = {Egham, United Kingdom}, pages = {169--178}, publisher = {ACM Press}, title = {{The convergence of stochastic gradient descent in asynchronous shared memory}}, doi = {10.1145/3212734.3212763}, year = {2018}, } @article{5860, abstract = {A major problem for evolutionary theory is understanding the so-called open-ended nature of evolutionary change, from its definition to its origins. Open-ended evolution (OEE) refers to the unbounded increase in complexity that seems to characterize evolution on multiple scales. This property seems to be a characteristic feature of biological and technological evolution and is strongly tied to the generative potential associated with combinatorics, which allows the system to grow and expand their available state spaces. Interestingly, many complex systems presumably displaying OEE, from language to proteins, share a common statistical property: the presence of Zipf's Law. Given an inventory of basic items (such as words or protein domains) required to build more complex structures (sentences or proteins) Zipf's Law tells us that most of these elements are rare whereas a few of them are extremely common. Using algorithmic information theory, in this paper we provide a fundamental definition for open-endedness, which can be understood as postulates. Its statistical counterpart, based on standard Shannon information theory, has the structure of a variational problem which is shown to lead to Zipf's Law as the expected consequence of an evolutionary process displaying OEE. We further explore the problem of information conservation through an OEE process and we conclude that statistical information (standard Shannon information) is not conserved, resulting in the paradoxical situation in which the increase of information content has the effect of erasing itself. We prove that this paradox is solved if we consider non-statistical forms of information. This last result implies that standard information theory may not be a suitable theoretical framework to explore the persistence and increase of the information content in OEE systems.}, author = {Corominas-Murtra, Bernat and Seoane, Luís F. and Solé, Ricard}, issn = {17425689}, journal = {Journal of the Royal Society Interface}, number = {149}, publisher = {Royal Society Publishing}, title = {{Zipf's Law, unbounded complexity and open-ended evolution}}, doi = {10.1098/rsif.2018.0395}, volume = {15}, year = {2018}, } @inproceedings{5961, abstract = {The area of machine learning has made considerable progress over the past decade, enabled by the widespread availability of large datasets, as well as by improved algorithms and models. Given the large computational demands of machine learning workloads, parallelism, implemented either through single-node concurrency or through multi-node distribution, has been a third key ingredient to advances in machine learning. The goal of this tutorial is to provide the audience with an overview of standard distribution techniques in machine learning, with an eye towards the intriguing trade-offs between synchronization and communication costs of distributed machine learning algorithms, on the one hand, and their convergence, on the other.The tutorial will focus on parallelization strategies for the fundamental stochastic gradient descent (SGD) algorithm, which is a key tool when training machine learning models, from classical instances such as linear regression, to state-of-the-art neural network architectures. The tutorial will describe the guarantees provided by this algorithm in the sequential case, and then move on to cover both shared-memory and message-passing parallelization strategies, together with the guarantees they provide, and corresponding trade-offs. The presentation will conclude with a broad overview of ongoing research in distributed and concurrent machine learning. The tutorial will assume no prior knowledge beyond familiarity with basic concepts in algebra and analysis. }, author = {Alistarh, Dan-Adrian}, booktitle = {Proceedings of the 2018 ACM Symposium on Principles of Distributed Computing - PODC '18}, isbn = {9781450357951}, location = {Egham, United Kingdom}, pages = {487--488}, publisher = {ACM Press}, title = {{A brief tutorial on distributed and concurrent machine learning}}, doi = {10.1145/3212734.3212798}, year = {2018}, } @article{5960, abstract = {In this paper we present a reliable method to verify the existence of loops along the uncertain trajectory of a robot, based on proprioceptive measurements only, within a bounded-error context. The loop closure detection is one of the key points in simultaneous localization and mapping (SLAM) methods, especially in homogeneous environments with difficult scenes recognitions. The proposed approach is generic and could be coupled with conventional SLAM algorithms to reliably reduce their computing burden, thus improving the localization and mapping processes in the most challenging environments such as unexplored underwater extents. To prove that a robot performed a loop whatever the uncertainties in its evolution, we employ the notion of topological degree that originates in the field of differential topology. We show that a verification tool based on the topological degree is an optimal method for proving robot loops. This is demonstrated both on datasets from real missions involving autonomous underwater vehicles and by a mathematical discussion.}, author = {Rohou, Simon and Franek, Peter and Aubry, Clément and Jaulin, Luc}, issn = {1741-3176}, journal = {The International Journal of Robotics Research}, number = {12}, pages = {1500--1516}, publisher = {SAGE Publications}, title = {{Proving the existence of loops in robot trajectories}}, doi = {10.1177/0278364918808367}, volume = {37}, year = {2018}, } @inproceedings{5963, abstract = {There has been significant progress in understanding the parallelism inherent to iterative sequential algorithms: for many classic algorithms, the depth of the dependence structure is now well understood, and scheduling techniques have been developed to exploit this shallow dependence structure for efficient parallel implementations. A related, applied research strand has studied methods by which certain iterative task-based algorithms can be efficiently parallelized via relaxed concurrent priority schedulers. These allow for high concurrency when inserting and removing tasks, at the cost of executing superfluous work due to the relaxed semantics of the scheduler. In this work, we take a step towards unifying these two research directions, by showing that there exists a family of relaxed priority schedulers that can efficiently and deterministically execute classic iterative algorithms such as greedy maximal independent set (MIS) and matching. Our primary result shows that, given a randomized scheduler with an expected relaxation factor of k in terms of the maximum allowed priority inversions on a task, and any graph on n vertices, the scheduler is able to execute greedy MIS with only an additive factor of \poly(k) expected additional iterations compared to an exact (but not scalable) scheduler. This counter-intuitive result demonstrates that the overhead of relaxation when computing MIS is not dependent on the input size or structure of the input graph. Experimental results show that this overhead can be clearly offset by the gain in performance due to the highly scalable scheduler. In sum, we present an efficient method to deterministically parallelize iterative sequential algorithms, with provable runtime guarantees in terms of the number of executed tasks to completion.}, author = {Alistarh, Dan-Adrian and Brown, Trevor A and Kopinsky, Justin and Nadiradze, Giorgi}, booktitle = {Proceedings of the 2018 ACM Symposium on Principles of Distributed Computing - PODC '18}, isbn = {9781450357951}, location = {Egham, United Kingdom}, pages = {377--386}, publisher = {ACM Press}, title = {{Relaxed schedulers can efficiently parallelize iterative algorithms}}, doi = {10.1145/3212734.3212756}, year = {2018}, } @inproceedings{5965, abstract = {Relaxed concurrent data structures have become increasingly popular, due to their scalability in graph processing and machine learning applications (\citeNguyen13, gonzalez2012powergraph ). Despite considerable interest, there exist families of natural, high performing randomized relaxed concurrent data structures, such as the popular MultiQueue~\citeMQ pattern for implementing relaxed priority queue data structures, for which no guarantees are known in the concurrent setting~\citeAKLN17. Our main contribution is in showing for the first time that, under a set of analytic assumptions, a family of relaxed concurrent data structures, including variants of MultiQueues, but also a new approximate counting algorithm we call the MultiCounter, provides strong probabilistic guarantees on the degree of relaxation with respect to the sequential specification, in arbitrary concurrent executions. We formalize these guarantees via a new correctness condition called distributional linearizability, tailored to concurrent implementations with randomized relaxations. Our result is based on a new analysis of an asynchronous variant of the classic power-of-two-choices load balancing algorithm, in which placement choices can be based on inconsistent, outdated information (this result may be of independent interest). We validate our results empirically, showing that the MultiCounter algorithm can implement scalable relaxed timestamps.}, author = {Alistarh, Dan-Adrian and Brown, Trevor A and Kopinsky, Justin and Li, Jerry Z. and Nadiradze, Giorgi}, booktitle = {Proceedings of the 30th on Symposium on Parallelism in Algorithms and Architectures - SPAA '18}, isbn = {9781450357999}, location = {Vienna, Austria}, pages = {133--142}, publisher = {ACM Press}, title = {{Distributionally linearizable data structures}}, doi = {10.1145/3210377.3210411}, year = {2018}, } @inproceedings{5967, abstract = {The Big Match is a multi-stage two-player game. In each stage Player 1 hides one or two pebbles in his hand, and his opponent has to guess that number; Player 1 loses a point if Player 2 is correct, and otherwise he wins a point. As soon as Player 1 hides one pebble, the players cannot change their choices in any future stage. Blackwell and Ferguson (1968) give an ε-optimal strategy for Player 1 that hides, in each stage, one pebble with a probability that depends on the entire past history. Any strategy that depends just on the clock or on a finite memory is worthless. The long-standing natural open problem has been whether every strategy that depends just on the clock and a finite memory is worthless. We prove that there is such a strategy that is ε-optimal. In fact, we show that just two states of memory are sufficient. }, author = {Hansen, Kristoffer Arnsfelt and Ibsen-Jensen, Rasmus and Neyman, Abraham}, booktitle = {Proceedings of the 2018 ACM Conference on Economics and Computation - EC '18}, isbn = {9781450358293}, location = {Ithaca, NY, United States}, pages = {149--150}, publisher = {ACM Press}, title = {{The Big Match with a clock and a bit of memory}}, doi = {10.1145/3219166.3219198}, year = {2018}, } @inproceedings{5966, abstract = {The transactional conflict problem arises in transactional systems whenever two or more concurrent transactions clash on a data item. While the standard solution to such conflicts is to immediately abort one of the transactions, some practical systems consider the alternative of delaying conflict resolution for a short interval, which may allow one of the transactions to commit. The challenge in the transactional conflict problem is to choose the optimal length of this delay interval so as to minimize the overall running time penalty for the conflicting transactions. In this paper, we propose a family of optimal online algorithms for the transactional conflict problem. Specifically, we consider variants of this problem which arise in different implementations of transactional systems, namely "requestor wins'' and "requestor aborts'' implementations: in the former, the recipient of a coherence request is aborted, whereas in the latter, it is the requestor which has to abort. Both strategies are implemented by real systems. We show that the requestor aborts case can be reduced to a classic instance of the ski rental problem, while the requestor wins case leads to a new version of this classical problem, for which we derive optimal deterministic and randomized algorithms. Moreover, we prove that, under a simplified adversarial model, our algorithms are constant-competitive with the offline optimum in terms of throughput. We validate our algorithmic results empirically through a hardware simulation of hardware transactional memory (HTM), showing that our algorithms can lead to non-trivial performance improvements for classic concurrent data structures.}, author = {Alistarh, Dan-Adrian and Haider, Syed Kamran and Kübler, Raphael and Nadiradze, Giorgi}, booktitle = {Proceedings of the 30th on Symposium on Parallelism in Algorithms and Architectures - SPAA '18}, isbn = {9781450357999}, location = {Vienna, Austria}, pages = {383--392}, publisher = {ACM Press}, title = {{The transactional conflict problem}}, doi = {10.1145/3210377.3210406}, year = {2018}, } @article{5975, abstract = {We consider the recent formulation of the algorithmic Lov ́asz Local Lemma [N. Har-vey and J. Vondr ́ak, inProceedings of FOCS, 2015, pp. 1327–1345; D. Achlioptas and F. Iliopoulos,inProceedings of SODA, 2016, pp. 2024–2038; D. Achlioptas, F. Iliopoulos, and V. Kolmogorov,ALocal Lemma for Focused Stochastic Algorithms, arXiv preprint, 2018] for finding objects that avoid“bad features,” or “flaws.” It extends the Moser–Tardos resampling algorithm [R. A. Moser andG. Tardos,J. ACM, 57 (2010), 11] to more general discrete spaces. At each step the method picks aflaw present in the current state and goes to a new state according to some prespecified probabilitydistribution (which depends on the current state and the selected flaw). However, the recent formu-lation is less flexible than the Moser–Tardos method since it requires a specific flaw selection rule,whereas the algorithm of Moser and Tardos allows an arbitrary rule (and thus can potentially beimplemented more efficiently). We formulate a new “commutativity” condition and prove that it issufficient for an arbitrary rule to work. It also enables an efficient parallelization under an additionalassumption. We then show that existing resampling oracles for perfect matchings and permutationsdo satisfy this condition.}, author = {Kolmogorov, Vladimir}, issn = {1095-7111}, journal = {SIAM Journal on Computing}, number = {6}, pages = {2029--2056}, publisher = {Society for Industrial & Applied Mathematics (SIAM)}, title = {{Commutativity in the algorithmic Lovász local lemma}}, doi = {10.1137/16m1093306}, volume = {47}, year = {2018}, } @inproceedings{5964, abstract = {A standard design pattern found in many concurrent data structures, such as hash tables or ordered containers, is an alternation of parallelizable sections that incur no data conflicts and critical sections that must run sequentially and are protected with locks. A lock can be viewed as a queue that arbitrates the order in which the critical sections are executed, and a natural question is whether we can use stochastic analysis to predict the resulting throughput. As a preliminary evidence to the affirmative, we describe a simple model that can be used to predict the throughput of coarse-grained lock-based algorithms. We show that our model works well for CLH lock, and we expect it to work for other popular lock designs such as TTAS, MCS, etc.}, author = {Aksenov, Vitaly and Alistarh, Dan-Adrian and Kuznetsov, Petr}, booktitle = {Proceedings of the 2018 ACM Symposium on Principles of Distributed Computing - PODC '18}, isbn = {9781450357951}, location = {Egham, United Kingdom}, pages = {411--413}, publisher = {ACM Press}, title = {{Brief Announcement: Performance prediction for coarse-grained locking}}, doi = {10.1145/3212734.3212785}, year = {2018}, } @article{5971, abstract = {We consider a Wigner-type ensemble, i.e. large hermitian N×N random matrices H=H∗ with centered independent entries and with a general matrix of variances Sxy=𝔼∣∣Hxy∣∣2. The norm of H is asymptotically given by the maximum of the support of the self-consistent density of states. We establish a bound on this maximum in terms of norms of powers of S that substantially improves the earlier bound 2∥S∥1/2∞ given in [O. Ajanki, L. Erdős and T. Krüger, Universality for general Wigner-type matrices, Prob. Theor. Rel. Fields169 (2017) 667–727]. The key element of the proof is an effective Markov chain approximation for the contributions of the weighted Dyck paths appearing in the iterative solution of the corresponding Dyson equation.}, author = {Erdös, László and Mühlbacher, Peter}, issn = {2010-3271}, journal = {Random matrices: Theory and applications}, publisher = {World Scientific Publishing}, title = {{Bounds on the norm of Wigner-type random matrices}}, doi = {10.1142/s2010326319500096}, year = {2018}, } @article{5984, abstract = {G-protein-coupled receptors (GPCRs) form the largest receptor family, relay environmental stimuli to changes in cell behavior and represent prime drug targets. Many GPCRs are classified as orphan receptors because of the limited knowledge on their ligands and coupling to cellular signaling machineries. Here, we engineer a library of 63 chimeric receptors that contain the signaling domains of human orphan and understudied GPCRs functionally linked to the light-sensing domain of rhodopsin. Upon stimulation with visible light, we identify activation of canonical cell signaling pathways, including cAMP-, Ca2+-, MAPK/ERK-, and Rho-dependent pathways, downstream of the engineered receptors. For the human pseudogene GPR33, we resurrect a signaling function that supports its hypothesized role as a pathogen entry site. These results demonstrate that substituting unknown chemical activators with a light switch can reveal information about protein function and provide an optically controlled protein library for exploring the physiology and therapeutic potential of understudied GPCRs.}, author = {Morri, Maurizio and Sanchez-Romero, Inmaculada and Tichy, Alexandra-Madelaine and Kainrath, Stephanie and Gerrard, Elliot J. and Hirschfeld, Priscila and Schwarz, Jan and Janovjak, Harald L}, issn = {2041-1723}, journal = {Nature Communications}, number = {1}, publisher = {Springer Nature}, title = {{Optical functionalization of human class A orphan G-protein-coupled receptors}}, doi = {10.1038/s41467-018-04342-1}, volume = {9}, year = {2018}, } @article{5976, abstract = {We propose FlexMaps, a novel framework for fabricating smooth shapes out of flat, flexible panels with tailored mechanical properties. We start by mapping the 3D surface onto a 2D domain as in traditional UV mapping to design a set of deformable flat panels called FlexMaps. For these panels, we design and obtain specific mechanical properties such that, once they are assembled, the static equilibrium configuration matches the desired 3D shape. FlexMaps can be fabricated from an almost rigid material, such as wood or plastic, and are made flexible in a controlled way by using computationally designed spiraling microstructures.}, author = {Malomo, Luigi and Perez Rodriguez, Jesus and Iarussi, Emmanuel and Pietroni, Nico and Miguel, Eder and Cignoni, Paolo and Bickel, Bernd}, issn = {0730-0301}, journal = {ACM Transactions on Graphics}, number = {6}, publisher = {Association for Computing Machinery (ACM)}, title = {{FlexMaps: Computational design of flat flexible shells for shaping 3D objects}}, doi = {10.1145/3272127.3275076}, volume = {37}, year = {2018}, } @article{5983, abstract = {We study a quantum impurity possessing both translational and internal rotational degrees of freedom interacting with a bosonic bath. Such a system corresponds to a “rotating polaron,” which can be used to model, e.g., a rotating molecule immersed in an ultracold Bose gas or superfluid helium. We derive the Hamiltonian of the rotating polaron and study its spectrum in the weak- and strong-coupling regimes using a combination of variational, diagrammatic, and mean-field approaches. We reveal how the coupling between linear and angular momenta affects stable quasiparticle states, and demonstrate that internal rotation leads to an enhanced self-localization in the translational degrees of freedom.}, author = {Yakaboylu, Enderalp and Midya, Bikashkali and Deuchert, Andreas and Leopold, Nikolai K and Lemeshko, Mikhail}, issn = {2469-9969}, journal = {Physical Review B}, number = {22}, publisher = {American Physical Society}, title = {{Theory of the rotating polaron: Spectrum and self-localization}}, doi = {10.1103/physrevb.98.224506}, volume = {98}, year = {2018}, } @article{5982, abstract = {In the present work, we detail a fast and simple solution-based method to synthesize hexagonal SnSe2 nanoplates (NPLs) and their use to produce crystallographically textured SnSe2 nanomaterials. We also demonstrate that the same strategy can be used to produce orthorhombic SnSe nanostructures and nanomaterials. NPLs are grown through a screw dislocation-driven mechanism. This mechanism typically results in pyramidal structures, but we demonstrate here that the growth from multiple dislocations results in flower-like structures. Crystallographically textured SnSe2 bulk nanomaterials obtained from the hot pressing of these SnSe2 structures display highly anisotropic charge and heat transport properties and thermoelectric (TE) figures of merit limited by relatively low electrical conductivities. To improve this parameter, SnSe2 NPLs are blended here with metal nanoparticles. The electrical conductivities of the blends are significantly improved with respect to bare SnSe2 NPLs, what translates into a three-fold increase of the TE Figure of merit, reaching unprecedented ZT values up to 0.65.}, author = {Zhang, Yu and Liu, Yu and Lim, Khak Ho and Xing, Congcong and Li, Mengyao and Zhang, Ting and Tang, Pengyi and Arbiol, Jordi and Llorca, Jordi and Ng, Ka Ming and Ibáñez, Maria and Guardia, Pablo and Prato, Mirko and Cadavid, Doris and Cabot, Andreu}, issn = {1433-7851}, journal = {Angewandte Chemie International Edition}, number = {52}, pages = {17063--17068}, publisher = {Wiley}, title = {{Tin diselenide molecular precursor for solution-processable thermoelectric materials}}, doi = {10.1002/anie.201809847}, volume = {57}, year = {2018}, } @inproceedings{5978, abstract = {We consider the MAP-inference problem for graphical models,which is a valued constraint satisfaction problem defined onreal numbers with a natural summation operation. We proposea family of relaxations (different from the famous Sherali-Adams hierarchy), which naturally define lower bounds for itsoptimum. This family always contains a tight relaxation andwe give an algorithm able to find it and therefore, solve theinitial non-relaxed NP-hard problem.The relaxations we consider decompose the original probleminto two non-overlapping parts: an easy LP-tight part and adifficult one. For the latter part a combinatorial solver must beused. As we show in our experiments, in a number of applica-tions the second, difficult part constitutes only a small fractionof the whole problem. This property allows to significantlyreduce the computational time of the combinatorial solver andtherefore solve problems which were out of reach before.}, author = {Haller, Stefan and Swoboda, Paul and Savchynskyy, Bogdan}, booktitle = {Proceedings of the 32st AAAI Conference on Artificial Intelligence}, location = {New Orleans, LU, United States}, pages = {6581--6588}, publisher = {AAAI Press}, title = {{Exact MAP-inference by confining combinatorial search with LP relaxation}}, year = {2018}, } @article{5990, abstract = {A Ge–Si core–shell nanowire is used to realize a Josephson field‐effect transistor with highly transparent contacts to superconducting leads. By changing the electric field, access to two distinct regimes, not combined before in a single device, is gained: in the accumulation mode the device is highly transparent and the supercurrent is carried by multiple subbands, while near depletion, the supercurrent is carried by single‐particle levels of a strongly coupled quantum dot operating in the few‐hole regime. These results establish Ge–Si nanowires as an important platform for hybrid superconductor–semiconductor physics and Majorana fermions.}, author = {Ridderbos, Joost and Brauns, Matthias and Shen, Jie and de Vries, Folkert K. and Li, Ang and Bakkers, Erik P. A. M. and Brinkman, Alexander and Zwanenburg, Floris A.}, issn = {0935-9648}, journal = {Advanced Materials}, number = {44}, publisher = {Wiley}, title = {{Josephson effect in a few-hole quantum dot}}, doi = {10.1002/adma.201802257}, volume = {30}, year = {2018}, } @article{5980, abstract = {The problem of private set-intersection (PSI) has been traditionally treated as an instance of the more general problem of multi-party computation (MPC). Consequently, in order to argue security, or compose these protocols one has to rely on the general theory that was developed for the purpose of MPC. The pursuit of efficient protocols, however, has resulted in designs that exploit properties pertaining to PSI. In almost all practical applications where a PSI protocol is deployed, it is expected to be executed multiple times, possibly on related inputs. In this work we initiate a dedicated study of PSI in the multi-interaction (MI) setting. In this model a server sets up the common system parameters and executes set-intersection multiple times with potentially different clients. We discuss a few attacks that arise when protocols are naïvely composed in this manner and, accordingly, craft security definitions for the MI setting and study their inter-relation. Finally, we suggest a set of protocols that are MI-secure, at the same time almost as efficient as their parent, stand-alone, protocols.}, author = {Chatterjee, Sanjit and Kamath Hosdurg, Chethan and Kumar, Vikas}, journal = {American Institute of Mathematical Sciences}, number = {1}, pages = {17--47}, publisher = {AIMS}, title = {{Private set-intersection with common set-up}}, doi = {10.3934/amc.2018002}, volume = {12}, year = {2018}, } @article{5998, abstract = {Genome amplification and cellular senescence are commonly associated with pathological processes. While physiological roles for polyploidization and senescence have been described in mouse development, controversy exists over their significance in humans. Here, we describe tetraploidization and senescence as phenomena of normal human placenta development. During pregnancy, placental extravillous trophoblasts (EVTs) invade the pregnant endometrium, termed decidua, to establish an adapted microenvironment required for the developing embryo. This process is critically dependent on continuous cell proliferation and differentiation, which is thought to follow the classical model of cell cycle arrest prior to terminal differentiation. Strikingly, flow cytometry and DNAseq revealed that EVT formation is accompanied with a genome-wide polyploidization, independent of mitotic cycles. DNA replication in these cells was analysed by a fluorescent cell-cycle indicator reporter system, cell cycle marker expression and EdU incorporation. Upon invasion into the decidua, EVTs widely lose their replicative potential and enter a senescent state characterized by high senescence-associated (SA) β-galactosidase activity, induction of a SA secretory phenotype as well as typical metabolic alterations. Furthermore, we show that the shift from endocycle-dependent genome amplification to growth arrest is disturbed in androgenic complete hydatidiform moles (CHM), a hyperplastic pregnancy disorder associated with increased risk of developing choriocarinoma. Senescence is decreased in CHM-EVTs, accompanied by exacerbated endoreduplication and hyperploidy. We propose induction of cellular senescence as a ploidy-limiting mechanism during normal human placentation and unravel a link between excessive polyploidization and reduced senescence in CHM.}, author = {Velicky, Philipp and Meinhardt, Gudrun and Plessl, Kerstin and Vondra, Sigrid and Weiss, Tamara and Haslinger, Peter and Lendl, Thomas and Aumayr, Karin and Mairhofer, Mario and Zhu, Xiaowei and Schütz, Birgit and Hannibal, Roberta L. and Lindau, Robert and Weil, Beatrix and Ernerudh, Jan and Neesen, Jürgen and Egger, Gerda and Mikula, Mario and Röhrl, Clemens and Urban, Alexander E. and Baker, Julie and Knöfler, Martin and Pollheimer, Jürgen}, issn = {1553-7404}, journal = {PLOS Genetics}, number = {10}, publisher = {Public Library of Science}, title = {{Genome amplification and cellular senescence are hallmarks of human placenta development}}, doi = {10.1371/journal.pgen.1007698}, volume = {14}, year = {2018}, } @article{5995, abstract = {Motivation Computational prediction of the effect of mutations on protein stability is used by researchers in many fields. The utility of the prediction methods is affected by their accuracy and bias. Bias, a systematic shift of the predicted change of stability, has been noted as an issue for several methods, but has not been investigated systematically. Presence of the bias may lead to misleading results especially when exploring the effects of combination of different mutations. Results Here we use a protocol to measure the bias as a function of the number of introduced mutations. It is based on a self-consistency test of the reciprocity the effect of a mutation. An advantage of the used approach is that it relies solely on crystal structures without experimentally measured stability values. We applied the protocol to four popular algorithms predicting change of protein stability upon mutation, FoldX, Eris, Rosetta and I-Mutant, and found an inherent bias. For one program, FoldX, we manage to substantially reduce the bias using additional relaxation by Modeller. Authors using algorithms for predicting effects of mutations should be aware of the bias described here.}, author = {Usmanova, Dinara R and Bogatyreva, Natalya S and Ariño Bernad, Joan and Eremina, Aleksandra A and Gorshkova, Anastasiya A and Kanevskiy, German M and Lonishin, Lyubov R and Meister, Alexander V and Yakupova, Alisa G and Kondrashov, Fyodor and Ivankov, Dmitry}, issn = {1367-4803}, journal = {Bioinformatics}, number = {21}, pages = {3653--3658}, publisher = {Oxford University Press }, title = {{Self-consistency test reveals systematic bias in programs for prediction change of stability upon mutation}}, doi = {10.1093/bioinformatics/bty340}, volume = {34}, year = {2018}, } @article{5992, abstract = {Lamellipodia are flat membrane protrusions formed during mesenchymal motion. Polymerization at the leading edge assembles the actin filament network and generates protrusion force. How this force is supported by the network and how the assembly rate is shared between protrusion and network retrograde flow determines the protrusion rate. We use mathematical modeling to understand experiments changing the F-actin density in lamellipodia of B16-F1 melanoma cells by modulation of Arp2/3 complex activity or knockout of the formins FMNL2 and FMNL3. Cells respond to a reduction of density with a decrease of protrusion velocity, an increase in the ratio of force to filament number, but constant network assembly rate. The relation between protrusion force and tension gradient in the F-actin network and the density dependency of friction, elasticity, and viscosity of the network explain the experimental observations. The formins act as filament nucleators and elongators with differential rates. Modulation of their activity suggests an effect on network assembly rate. Contrary to these expectations, the effect of changes in elongator composition is much weaker than the consequences of the density change. We conclude that the force acting on the leading edge membrane is the force required to drive F-actin network retrograde flow.}, author = {Dolati, Setareh and Kage, Frieda and Mueller, Jan and Müsken, Mathias and Kirchner, Marieluise and Dittmar, Gunnar and Sixt, Michael K and Rottner, Klemens and Falcke, Martin}, issn = {1939-4586}, journal = {Molecular Biology of the Cell}, number = {22}, pages = {2674--2686}, publisher = {American Society for Cell Biology }, title = {{On the relation between filament density, force generation, and protrusion rate in mesenchymal cell motility}}, doi = {10.1091/mbc.e18-02-0082}, volume = {29}, year = {2018}, } @article{6010, abstract = {The optic tectum (TeO), or superior colliculus, is a multisensory midbrain center that organizes spatially orienting responses to relevant stimuli. To define the stimulus with the highest priority at each moment, a network of reciprocal connections between the TeO and the isthmi promotes competition between concurrent tectal inputs. In the avian midbrain, the neurons mediating enhancement and suppression of tectal inputs are located in separate isthmic nuclei, facilitating the analysis of the neural processes that mediate competition. A specific subset of radial neurons in the intermediate tectal layers relay retinal inputs to the isthmi, but at present it is unclear whether separate neurons innervate individual nuclei or a single neural type sends a common input to several of them. In this study, we used in vitro neural tracing and cell-filling experiments in chickens to show that single neurons innervate, via axon collaterals, the three nuclei that comprise the isthmotectal network. This demonstrates that the input signals representing the strength of the incoming stimuli are simultaneously relayed to the mechanisms promoting both enhancement and suppression of the input signals. By performing in vivo recordings in anesthetized chicks, we also show that this common input generates synchrony between both antagonistic mechanisms, demonstrating that activity enhancement and suppression are closely coordinated. From a computational point of view, these results suggest that these tectal neurons constitute integrative nodes that combine inputs from different sources to drive in parallel several concurrent neural processes, each performing complementary functions within the network through different firing patterns and connectivity.}, author = {Garrido-Charad, Florencia and Vega Zuniga, Tomas A and Gutiérrez-Ibáñez, Cristián and Fernandez, Pedro and López-Jury, Luciana and González-Cabrera, Cristian and Karten, Harvey J. and Luksch, Harald and Marín, Gonzalo J.}, issn = {1091-6490}, journal = {Proceedings of the National Academy of Sciences}, number = {32}, pages = {E7615--E7623}, publisher = {National Academy of Sciences}, title = {{“Shepherd’s crook” neurons drive and synchronize the enhancing and suppressive mechanisms of the midbrain stimulus selection network}}, doi = {10.1073/pnas.1804517115}, volume = {115}, year = {2018}, } @article{6003, abstract = {Digital fabrication devices are powerful tools for creating tangible reproductions of 3D digital models. Most available printing technologies aim at producing an accurate copy of a tridimensional shape. However, fabrication technologies can also be used to create a stylistic representation of a digital shape. We refer to this class of methods as ‘stylized fabrication methods’. These methods abstract geometric and physical features of a given shape to create an unconventional representation, to produce an optical illusion or to devise a particular interaction with the fabricated model. In this state‐of‐the‐art report, we classify and overview this broad and emerging class of approaches and also propose possible directions for future research.}, author = {Bickel, Bernd and Cignoni, Paolo and Malomo, Luigi and Pietroni, Nico}, issn = {0167-7055}, journal = {Computer Graphics Forum}, number = {6}, pages = {325--342}, publisher = {Wiley}, title = {{State of the art on stylized fabrication}}, doi = {10.1111/cgf.13327}, volume = {37}, year = {2018}, } @article{6002, abstract = {The Bogoliubov free energy functional is analysed. The functional serves as a model of a translation-invariant Bose gas at positive temperature. We prove the existence of minimizers in the case of repulsive interactions given by a sufficiently regular two-body potential. Furthermore, we prove the existence of a phase transition in this model and provide its phase diagram.}, author = {Napiórkowski, Marcin M and Reuvers, Robin and Solovej, Jan Philip}, issn = {1432-0673}, journal = {Archive for Rational Mechanics and Analysis}, number = {3}, pages = {1037--1090}, publisher = {Springer Nature}, title = {{The Bogoliubov free energy functional I: Existence of minimizers and phase diagram}}, doi = {10.1007/s00205-018-1232-6}, volume = {229}, year = {2018}, } @article{5996, abstract = {In pipes, turbulence sets in despite the linear stability of the laminar Hagen–Poiseuille flow. The Reynolds number ( ) for which turbulence first appears in a given experiment – the ‘natural transition point’ – depends on imperfections of the set-up, or, more precisely, on the magnitude of finite amplitude perturbations. At onset, turbulence typically only occupies a certain fraction of the flow, and this fraction equally is found to differ from experiment to experiment. Despite these findings, Reynolds proposed that after sufficiently long times, flows may settle to steady conditions: below a critical velocity, flows should (regardless of initial conditions) always return to laminar, while above this velocity, eddying motion should persist. As will be shown, even in pipes several thousand diameters long, the spatio-temporal intermittent flow patterns observed at the end of the pipe strongly depend on the initial conditions, and there is no indication that different flow patterns would eventually settle to a (statistical) steady state. Exploiting the fact that turbulent puffs do not age (i.e. they are memoryless), we continuously recreate the puff sequence exiting the pipe at the pipe entrance, and in doing so introduce periodic boundary conditions for the puff pattern. This procedure allows us to study the evolution of the flow patterns for arbitrary long times, and we find that after times in excess of advective time units, indeed a statistical steady state is reached. Although the resulting flows remain spatio-temporally intermittent, puff splitting and decay rates eventually reach a balance, so that the turbulent fraction fluctuates around a well-defined level which only depends on . In accordance with Reynolds’ proposition, we find that at lower (here 2020), flows eventually always resume to laminar, while for higher ( ), turbulence persists. The critical point for pipe flow hence falls in the interval of $2020 , which is in very good agreement with the recently proposed value of . The latter estimate was based on single-puff statistics and entirely neglected puff interactions. Unlike in typical contact processes where such interactions strongly affect the percolation threshold, in pipe flow, the critical point is only marginally influenced. Interactions, on the other hand, are responsible for the approach to the statistical steady state. As shown, they strongly affect the resulting flow patterns, where they cause ‘puff clustering’, and these regions of large puff densities are observed to travel across the puff pattern in a wave-like fashion.}, author = {Vasudevan, Mukund and Hof, Björn}, issn = {1469-7645}, journal = {Journal of Fluid Mechanics}, pages = {76--94}, publisher = {Cambridge University Press}, title = {{The critical point of the transition to turbulence in pipe flow}}, doi = {10.1017/jfm.2017.923}, volume = {839}, year = {2018}, } @article{5993, abstract = {In this article, we consider the termination problem of probabilistic programs with real-valued variables. Thequestions concerned are: qualitative ones that ask (i) whether the program terminates with probability 1(almost-sure termination) and (ii) whether the expected termination time is finite (finite termination); andquantitative ones that ask (i) to approximate the expected termination time (expectation problem) and (ii) tocompute a boundBsuch that the probability not to terminate afterBsteps decreases exponentially (con-centration problem). To solve these questions, we utilize the notion of ranking supermartingales, which isa powerful approach for proving termination of probabilistic programs. In detail, we focus on algorithmicsynthesis of linear ranking-supermartingales over affine probabilistic programs (Apps) with both angelic anddemonic non-determinism. An important subclass of Apps is LRApp which is defined as the class of all Appsover which a linear ranking-supermartingale exists.Our main contributions are as follows. Firstly, we show that the membership problem of LRApp (i) canbe decided in polynomial time for Apps with at most demonic non-determinism, and (ii) isNP-hard and inPSPACEfor Apps with angelic non-determinism. Moreover, theNP-hardness result holds already for Appswithout probability and demonic non-determinism. Secondly, we show that the concentration problem overLRApp can be solved in the same complexity as for the membership problem of LRApp. Finally, we show thatthe expectation problem over LRApp can be solved in2EXPTIMEand isPSPACE-hard even for Apps withoutprobability and non-determinism (i.e., deterministic programs). Our experimental results demonstrate theeffectiveness of our approach to answer the qualitative and quantitative questions over Apps with at mostdemonic non-determinism.}, author = {Chatterjee, Krishnendu and Fu, Hongfei and Novotný, Petr and Hasheminezhad, Rouzbeh}, issn = {0164-0925}, journal = {ACM Transactions on Programming Languages and Systems}, number = {2}, publisher = {Association for Computing Machinery (ACM)}, title = {{Algorithmic analysis of qualitative and quantitative termination problems for affine probabilistic programs}}, doi = {10.1145/3174800}, volume = {40}, year = {2018}, } @article{5999, abstract = {We introduce for each quiver Q and each algebraic oriented cohomology theory A, the cohomological Hall algebra (CoHA) of Q, as the A-homology of the moduli of representations of the preprojective algebra of Q. This generalizes the K-theoretic Hall algebra of commuting varieties defined by Schiffmann-Vasserot. When A is the Morava K-theory, we show evidence that this algebra is a candidate for Lusztig's reformulated conjecture on modular representations of algebraic groups. We construct an action of the preprojective CoHA on the A-homology of Nakajima quiver varieties. We compare this with the action of the Borel subalgebra of Yangian when A is the intersection theory. We also give a shuffle algebra description of this CoHA in terms of the underlying formal group law of A. As applications, we obtain a shuffle description of the Yangian. }, author = {Yang, Yaping and Zhao, Gufang}, issn = {0024-6115}, journal = {Proceedings of the London Mathematical Society}, number = {5}, pages = {1029--1074}, publisher = {Oxford University Press}, title = {{The cohomological Hall algebra of a preprojective algebra}}, doi = {10.1112/plms.12111}, volume = {116}, year = {2018}, } @article{5989, abstract = {Schistosomes are the causative agents of schistosomiasis, a neglected tropical disease affecting over 230 million people worldwide.Additionally to their major impact on human health, they are also models of choice in evolutionary biology. These parasitic flatwormsare unique among the common hermaphroditic trematodes as they have separate sexes. This so-called “evolutionary scandal”displays a female heterogametic genetic sex-determination system (ZZ males and ZW females), as well as a pronounced adult sexualdimorphism. These phenotypic differences are determined by a shared set of genes in both sexes, potentially leading to intralocussexual conflicts. To resolve these conflicts in sexually selected traits, molecular mechanisms such as sex-biased gene expression couldoccur, but parent-of-origin gene expression also provides an alternative. In this work we investigated the latter mechanism, that is,genes expressed preferentially from either the maternal or the paternal allele, inSchistosoma mansonispecies. To this end, tran-scriptomes from male and female hybrid adults obtained by strain crosses were sequenced. Strain-specific single nucleotide poly-morphism (SNP) markers allowed us to discriminate the parental origin, while reciprocal crosses helped to differentiate parentalexpression from strain-specific expression. We identified genes containing SNPs expressed in a parent-of-origin manner consistentwith paternal and maternal imprints. Although the majority of the SNPs was identified in mitochondrial and Z-specific loci, theremaining SNPs found in male and female transcriptomes were situated in genes that have the potential to explain sexual differencesin schistosome parasites. Furthermore, we identified and validated four new Z-specific scaffolds.}, author = {Kincaid-Smith, Julien and Picard, Marion A L and Cosseau, Céline and Boissier, Jérôme and Severac, Dany and Grunau, Christoph and Toulza, Eve}, issn = {1759-6653}, journal = {Genome Biology and Evolution}, number = {3}, pages = {840--856}, publisher = {Oxford University Press}, title = {{Parent-of-Origin-Dependent Gene Expression in Male and Female Schistosome Parasites}}, doi = {10.1093/gbe/evy037}, volume = {10}, year = {2018}, } @inproceedings{6031, abstract = {We introduce Clover, a new library for efficient computation using low-precision data, providing mathematical routines required by fundamental methods in optimization and sparse recovery. Our library faithfully implements variants of stochastic quantization that guarantee convergence at low precision, and supports data formats from 4-bit quantized to 32-bit IEEE-754 on current Intel processors. In particular, we show that 4-bit can be implemented efficiently using Intel AVX despite the lack of native support for this data format. Experimental results with dot product, matrix-vector multiplication (MVM), gradient descent (GD), and iterative hard thresholding (IHT) demonstrate that the attainable speedups are in many cases close to linear with respect to the reduction of precision due to reduced data movement. Finally, for GD and IHT, we show examples of absolute speedup achieved by 4-bit versus 32-bit, by iterating until a given target error is achieved.}, author = {Stojanov, Alen and Smith, Tyler Michael and Alistarh, Dan-Adrian and Puschel, Markus}, booktitle = {2018 IEEE International Workshop on Signal Processing Systems}, location = {Cape Town, South Africa}, publisher = {IEEE}, title = {{Fast quantized arithmetic on x86: Trading compute for data movement}}, doi = {10.1109/SiPS.2018.8598402}, volume = {2018-October}, year = {2018}, } @inproceedings{25, abstract = {Partially observable Markov decision processes (POMDPs) are the standard models for planning under uncertainty with both finite and infinite horizon. Besides the well-known discounted-sum objective, indefinite-horizon objective (aka Goal-POMDPs) is another classical objective for POMDPs. In this case, given a set of target states and a positive cost for each transition, the optimization objective is to minimize the expected total cost until a target state is reached. In the literature, RTDP-Bel or heuristic search value iteration (HSVI) have been used for solving Goal-POMDPs. Neither of these algorithms has theoretical convergence guarantees, and HSVI may even fail to terminate its trials. We give the following contributions: (1) We discuss the challenges introduced in Goal-POMDPs and illustrate how they prevent the original HSVI from converging. (2) We present a novel algorithm inspired by HSVI, termed Goal-HSVI, and show that our algorithm has convergence guarantees. (3) We show that Goal-HSVI outperforms RTDP-Bel on a set of well-known examples.}, author = {Horák, Karel and Bošanský, Branislav and Chatterjee, Krishnendu}, booktitle = {Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence}, location = {Stockholm, Sweden}, pages = {4764 -- 4770}, publisher = {IJCAI}, title = {{Goal-HSVI: Heuristic search value iteration for goal-POMDPs}}, doi = {10.24963/ijcai.2018/662}, volume = {2018-July}, year = {2018}, } @inproceedings{24, abstract = {Partially-observable Markov decision processes (POMDPs) with discounted-sum payoff are a standard framework to model a wide range of problems related to decision making under uncertainty. Traditionally, the goal has been to obtain policies that optimize the expectation of the discounted-sum payoff. A key drawback of the expectation measure is that even low probability events with extreme payoff can significantly affect the expectation, and thus the obtained policies are not necessarily risk-averse. An alternate approach is to optimize the probability that the payoff is above a certain threshold, which allows obtaining risk-averse policies, but ignores optimization of the expectation. We consider the expectation optimization with probabilistic guarantee (EOPG) problem, where the goal is to optimize the expectation ensuring that the payoff is above a given threshold with at least a specified probability. We present several results on the EOPG problem, including the first algorithm to solve it.}, author = {Chatterjee, Krishnendu and Elgyütt, Adrian and Novotny, Petr and Rouillé, Owen}, location = {Stockholm, Sweden}, pages = {4692 -- 4699}, publisher = {IJCAI}, title = {{Expectation optimization with probabilistic guarantees in POMDPs with discounted-sum objectives}}, doi = {10.24963/ijcai.2018/652}, volume = {2018}, year = {2018}, } @inproceedings{34, abstract = {Partially observable Markov decision processes (POMDPs) are widely used in probabilistic planning problems in which an agent interacts with an environment using noisy and imprecise sensors. We study a setting in which the sensors are only partially defined and the goal is to synthesize “weakest” additional sensors, such that in the resulting POMDP, there is a small-memory policy for the agent that almost-surely (with probability 1) satisfies a reachability objective. We show that the problem is NP-complete, and present a symbolic algorithm by encoding the problem into SAT instances. We illustrate trade-offs between the amount of memory of the policy and the number of additional sensors on a simple example. We have implemented our approach and consider three classical POMDP examples from the literature, and show that in all the examples the number of sensors can be significantly decreased (as compared to the existing solutions in the literature) without increasing the complexity of the policies.}, author = {Chatterjee, Krishnendu and Chemlík, Martin and Topcu, Ufuk}, location = {Delft, Netherlands}, pages = {47 -- 55}, publisher = {AAAI Press}, title = {{Sensor synthesis for POMDPs with reachability objectives}}, volume = {2018}, year = {2018}, } @article{18, abstract = {An N-superconcentrator is a directed, acyclic graph with N input nodes and N output nodes such that every subset of the inputs and every subset of the outputs of same cardinality can be connected by node-disjoint paths. It is known that linear-size and bounded-degree superconcentrators exist. We prove the existence of such superconcentrators with asymptotic density 25.3 (where the density is the number of edges divided by N). The previously best known densities were 28 [12] and 27.4136 [17].}, author = {Kolmogorov, Vladimir and Rolinek, Michal}, issn = {0381-7032}, journal = {Ars Combinatoria}, number = {10}, pages = {269 -- 304}, publisher = {Charles Babbage Research Centre}, title = {{Superconcentrators of density 25.3}}, volume = {141}, year = {2018}, } @article{6355, abstract = {We prove that any cyclic quadrilateral can be inscribed in any closed convex C1-curve. The smoothness condition is not required if the quadrilateral is a rectangle.}, author = {Akopyan, Arseniy and Avvakumov, Sergey}, issn = {2050-5094}, journal = {Forum of Mathematics, Sigma}, publisher = {Cambridge University Press}, title = {{Any cyclic quadrilateral can be inscribed in any closed convex smooth curve}}, doi = {10.1017/fms.2018.7}, volume = {6}, year = {2018}, } @inproceedings{6195, abstract = {In the context of robotic manipulation and grasping, the shift from a view that is static (force closure of a single posture) and contact-deprived (only contact for force closure is allowed, everything else is obstacle) towards a view that is dynamic and contact-rich (soft manipulation) has led to an increased interest in soft hands. These hands can easily exploit environmental constraints and object surfaces without risk, and safely interact with humans, but present also some challenges. Designing them is difficult, as well as predicting, modelling, and “programming” their interactions with the objects and the environment. This paper tackles the problem of simulating them in a fast and effective way, leveraging on novel and existing simulation technologies. We present a triple-layered simulation framework where dynamic properties such as stiffness are determined from slow but accurate FEM simulation data once, and then condensed into a lumped parameter model that can be used to fast simulate soft fingers and soft hands. We apply our approach to the simulation of soft pneumatic fingers.}, author = {Pozzi, Maria and Miguel Villalba, Eder and Deimel, Raphael and Malvezzi, Monica and Bickel, Bernd and Brock, Oliver and Prattichizzo, Domenico}, isbn = {9781538630815}, location = {Brisbane, Australia}, publisher = {IEEE}, title = {{Efficient FEM-based simulation of soft robots modeled as kinematic chains}}, doi = {10.1109/icra.2018.8461106}, year = {2018}, } @inproceedings{6941, abstract = {Bitcoin has become the most successful cryptocurrency ever deployed, and its most distinctive feature is that it is decentralized. Its underlying protocol (Nakamoto consensus) achieves this by using proof of work, which has the drawback that it causes the consumption of vast amounts of energy to maintain the ledger. Moreover, Bitcoin mining dynamics have become less distributed over time. Towards addressing these issues, we propose SpaceMint, a cryptocurrency based on proofs of space instead of proofs of work. Miners in SpaceMint dedicate disk space rather than computation. We argue that SpaceMint’s design solves or alleviates several of Bitcoin’s issues: most notably, its large energy consumption. SpaceMint also rewards smaller miners fairly according to their contribution to the network, thus incentivizing more distributed participation. This paper adapts proof of space to enable its use in cryptocurrency, studies the attacks that can arise against a Bitcoin-like blockchain that uses proof of space, and proposes a new blockchain format and transaction types to address these attacks. Our prototype shows that initializing 1 TB for mining takes about a day (a one-off setup cost), and miners spend on average just a fraction of a second per block mined. Finally, we provide a game-theoretic analysis modeling SpaceMint as an extensive game (the canonical game-theoretic notion for games that take place over time) and show that this stylized game satisfies a strong equilibrium notion, thereby arguing for SpaceMint ’s stability and consensus.}, author = {Park, Sunoo and Kwon, Albert and Fuchsbauer, Georg and Gazi, Peter and Alwen, Joel F and Pietrzak, Krzysztof Z}, booktitle = {22nd International Conference on Financial Cryptography and Data Security}, isbn = {9783662583869}, issn = {1611-3349}, location = {Nieuwpoort, Curacao}, pages = {480--499}, publisher = {Springer Nature}, title = {{SpaceMint: A cryptocurrency based on proofs of space}}, doi = {10.1007/978-3-662-58387-6_26}, volume = {10957}, year = {2018}, } @article{6497, abstract = {T cells are actively scanning pMHC-presenting cells in lymphoid organs and nonlymphoid tissues (NLTs) with divergent topologies and confinement. How the T cell actomyosin cytoskeleton facilitates this task in distinct environments is incompletely understood. Here, we show that lack of Myosin IXb (Myo9b), a negative regulator of the small GTPase Rho, led to increased Rho-GTP levels and cell surface stiffness in primary T cells. Nonetheless, intravital imaging revealed robust motility of Myo9b−/− CD8+ T cells in lymphoid tissue and similar expansion and differentiation during immune responses. In contrast, accumulation of Myo9b−/− CD8+ T cells in NLTs was strongly impaired. Specifically, Myo9b was required for T cell crossing of basement membranes, such as those which are present between dermis and epidermis. As consequence, Myo9b−/− CD8+ T cells showed impaired control of skin infections. In sum, we show that Myo9b is critical for the CD8+ T cell adaptation from lymphoid to NLT surveillance and the establishment of protective tissue–resident T cell populations.}, author = {Moalli, Federica and Ficht, Xenia and Germann, Philipp and Vladymyrov, Mykhailo and Stolp, Bettina and de Vries, Ingrid and Lyck, Ruth and Balmer, Jasmin and Fiocchi, Amleto and Kreutzfeldt, Mario and Merkler, Doron and Iannacone, Matteo and Ariga, Akitaka and Stoffel, Michael H. and Sharpe, James and Bähler, Martin and Sixt, Michael K and Diz-Muñoz, Alba and Stein, Jens V.}, issn = {1540-9538}, journal = {The Journal of Experimental Medicine}, number = {7}, pages = {1869–1890}, publisher = {Rockefeller University Press}, title = {{The Rho regulator Myosin IXb enables nonlymphoid tissue seeding of protective CD8+T cells}}, doi = {10.1084/jem.20170896}, volume = {2015}, year = {2018}, } @article{6499, abstract = {Expansion microscopy is a recently introduced imaging technique that achieves super‐resolution through physically expanding the specimen by ~4×, after embedding into a swellable gel. The resolution attained is, correspondingly, approximately fourfold better than the diffraction limit, or ~70 nm. This is a major improvement over conventional microscopy, but still lags behind modern STED or STORM setups, whose resolution can reach 20–30 nm. We addressed this issue here by introducing an improved gel recipe that enables an expansion factor of ~10× in each dimension, which corresponds to an expansion of the sample volume by more than 1,000‐fold. Our protocol, which we termed X10 microscopy, achieves a resolution of 25–30 nm on conventional epifluorescence microscopes. X10 provides multi‐color images similar or even superior to those produced with more challenging methods, such as STED, STORM, and iterative expansion microscopy (iExM). X10 is therefore the cheapest and easiest option for high‐quality super‐resolution imaging currently available. X10 should be usable in any laboratory, irrespective of the machinery owned or of the technical knowledge.}, author = {Truckenbrodt, Sven M and Maidorn, Manuel and Crzan, Dagmar and Wildhagen, Hanna and Kabatas, Selda and Rizzoli, Silvio O}, issn = {1469-3178}, journal = {EMBO reports}, number = {9}, publisher = {EMBO}, title = {{X10 expansion microscopy enables 25‐nm resolution on conventional microscopes}}, doi = {10.15252/embr.201845836}, volume = {19}, year = {2018}, } @inproceedings{7123, abstract = {Population protocols are a popular model of distributed computing, in which n agents with limited local state interact randomly, and cooperate to collectively compute global predicates. Inspired by recent developments in DNA programming, an extensive series of papers, across different communities, has examined the computability and complexity characteristics of this model. Majority, or consensus, is a central task in this model, in which agents need to collectively reach a decision as to which one of two states A or B had a higher initial count. Two metrics are important: the time that a protocol requires to stabilize to an output decision, and the state space size that each agent requires to do so. It is known that majority requires Ω(log log n) states per agent to allow for fast (poly-logarithmic time) stabilization, and that O(log2 n) states are sufficient. Thus, there is an exponential gap between the space upper and lower bounds for this problem. This paper addresses this question. On the negative side, we provide a new lower bound of Ω(log n) states for any protocol which stabilizes in O(n1–c) expected time, for any constant c > 0. This result is conditional on monotonicity and output assumptions, satisfied by all known protocols. Technically, it represents a departure from previous lower bounds, in that it does not rely on the existence of dense configurations. Instead, we introduce a new generalized surgery technique to prove the existence of incorrect executions for any algorithm which would contradict the lower bound. Subsequently, our lower bound also applies to general initial configurations, including ones with a leader. On the positive side, we give a new algorithm for majority which uses O(log n) states, and stabilizes in O(log2 n) expected time. Central to the algorithm is a new leaderless phase clock technique, which allows agents to synchronize in phases of Θ(n log n) consecutive interactions using O(log n) states per agent, exploiting a new connection between population protocols and power-of-two-choices load balancing mechanisms. We also employ our phase clock to build a leader election algorithm with a state space of size O(log n), which stabilizes in O(log2 n) expected time.}, author = {Alistarh, Dan-Adrian and Aspnes, James and Gelashvili, Rati}, booktitle = {Proceedings of the 29th Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {9781611975031}, location = {New Orleans, LA, United States}, pages = {2221--2239}, publisher = {ACM}, title = {{Space-optimal majority in population protocols}}, doi = {10.1137/1.9781611975031.144}, year = {2018}, }