@inproceedings{10883, abstract = {Solving parity games, which are equivalent to modal μ-calculus model checking, is a central algorithmic problem in formal methods, with applications in reactive synthesis, program repair, verification of branching-time properties, etc. Besides the standard compu- tation model with the explicit representation of games, another important theoretical model of computation is that of set-based symbolic algorithms. Set-based symbolic algorithms use basic set operations and one-step predecessor operations on the implicit description of games, rather than the explicit representation. The significance of symbolic algorithms is that they provide scalable algorithms for large finite-state systems, as well as for infinite-state systems with finite quotient. Consider parity games on graphs with n vertices and parity conditions with d priorities. While there is a rich literature of explicit algorithms for parity games, the main results for set-based symbolic algorithms are as follows: (a) the basic algorithm that requires O(nd) symbolic operations and O(d) symbolic space; and (b) an improved algorithm that requires O(nd/3+1) symbolic operations and O(n) symbolic space. In this work, our contributions are as follows: (1) We present a black-box set-based symbolic algorithm based on the explicit progress measure algorithm. Two important consequences of our algorithm are as follows: (a) a set-based symbolic algorithm for parity games that requires quasi-polynomially many symbolic operations and O(n) symbolic space; and (b) any future improvement in progress measure based explicit algorithms immediately imply an efficiency improvement in our set-based symbolic algorithm for parity games. (2) We present a set-based symbolic algorithm that requires quasi-polynomially many symbolic operations and O(d · log n) symbolic space. Moreover, for the important special case of d ≤ log n, our algorithm requires only polynomially many symbolic operations and poly-logarithmic symbolic space.}, author = {Chatterjee, Krishnendu and Dvořák, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, booktitle = {22nd International Conference on Logic for Programming, Artificial Intelligence and Reasoning}, issn = {2398-7340}, location = {Awassa, Ethiopia}, pages = {233--253}, publisher = {EasyChair}, title = {{Quasipolynomial set-based symbolic algorithms for parity games}}, doi = {10.29007/5z5k}, volume = {57}, year = {2018}, } @inproceedings{11, abstract = {We report on a novel strategy to derive mean-field limits of quantum mechanical systems in which a large number of particles weakly couple to a second-quantized radiation field. The technique combines the method of counting and the coherent state approach to study the growth of the correlations among the particles and in the radiation field. As an instructional example, we derive the Schrödinger–Klein–Gordon system of equations from the Nelson model with ultraviolet cutoff and possibly massless scalar field. In particular, we prove the convergence of the reduced density matrices (of the nonrelativistic particles and the field bosons) associated with the exact time evolution to the projectors onto the solutions of the Schrödinger–Klein–Gordon equations in trace norm. Furthermore, we derive explicit bounds on the rate of convergence of the one-particle reduced density matrix of the nonrelativistic particles in Sobolev norm.}, author = {Leopold, Nikolai K and Pickl, Peter}, location = {Munich, Germany}, pages = {185 -- 214}, publisher = {Springer}, title = {{Mean-field limits of particles in interaction with quantised radiation fields}}, doi = {10.1007/978-3-030-01602-9_9}, volume = {270}, year = {2018}, } @article{1215, abstract = {Two generalizations of Itô formula to infinite-dimensional spaces are given. The first one, in Hilbert spaces, extends the classical one by taking advantage of cancellations when they occur in examples and it is applied to the case of a group generator. The second one, based on the previous one and a limit procedure, is an Itô formula in a special class of Banach spaces having a product structure with the noise in a Hilbert component; again the key point is the extension due to a cancellation. This extension to Banach spaces and in particular the specific cancellation are motivated by path-dependent Itô calculus.}, author = {Flandoli, Franco and Russo, Francesco and Zanco, Giovanni A}, journal = {Journal of Theoretical Probability}, number = {2}, pages = {789--826}, publisher = {Springer}, title = {{Infinite-dimensional calculus under weak spatial regularity of the processes}}, doi = {10.1007/s10959-016-0724-2}, volume = {31}, year = {2018}, } @inproceedings{185, abstract = {We resolve in the affirmative conjectures of A. Skopenkov and Repovš (1998), and M. Skopenkov (2003) generalizing the classical Hanani-Tutte theorem to the setting of approximating maps of graphs on 2-dimensional surfaces by embeddings. Our proof of this result is constructive and almost immediately implies an efficient algorithm for testing whether a given piecewise linear map of a graph in a surface is approximable by an embedding. More precisely, an instance of this problem consists of (i) a graph G whose vertices are partitioned into clusters and whose inter-cluster edges are partitioned into bundles, and (ii) a region R of a 2-dimensional compact surface M given as the union of a set of pairwise disjoint discs corresponding to the clusters and a set of pairwise disjoint "pipes" corresponding to the bundles, connecting certain pairs of these discs. We are to decide whether G can be embedded inside M so that the vertices in every cluster are drawn in the corresponding disc, the edges in every bundle pass only through its corresponding pipe, and every edge crosses the boundary of each disc at most once.}, author = {Fulek, Radoslav and Kynčl, Jan}, isbn = {978-3-95977-066-8}, location = {Budapest, Hungary}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Hanani-Tutte for approximating maps of graphs}}, doi = {10.4230/LIPIcs.SoCG.2018.39}, volume = {99}, year = {2018}, } @inproceedings{188, abstract = {Smallest enclosing spheres of finite point sets are central to methods in topological data analysis. Focusing on Bregman divergences to measure dissimilarity, we prove bounds on the location of the center of a smallest enclosing sphere. These bounds depend on the range of radii for which Bregman balls are convex.}, author = {Edelsbrunner, Herbert and Virk, Ziga and Wagner, Hubert}, location = {Budapest, Hungary}, pages = {35:1 -- 35:13}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Smallest enclosing spheres and Chernoff points in Bregman geometry}}, doi = {10.4230/LIPIcs.SoCG.2018.35}, volume = {99}, year = {2018}, } @article{306, abstract = {A cornerstone of statistical inference, the maximum entropy framework is being increasingly applied to construct descriptive and predictive models of biological systems, especially complex biological networks, from large experimental data sets. Both its broad applicability and the success it obtained in different contexts hinge upon its conceptual simplicity and mathematical soundness. Here we try to concisely review the basic elements of the maximum entropy principle, starting from the notion of ‘entropy’, and describe its usefulness for the analysis of biological systems. As examples, we focus specifically on the problem of reconstructing gene interaction networks from expression data and on recent work attempting to expand our system-level understanding of bacterial metabolism. Finally, we highlight some extensions and potential limitations of the maximum entropy approach, and point to more recent developments that are likely to play a key role in the upcoming challenges of extracting structures and information from increasingly rich, high-throughput biological data.}, author = {De Martino, Andrea and De Martino, Daniele}, journal = {Heliyon}, number = {4}, publisher = {Elsevier}, title = {{An introduction to the maximum entropy approach and its application to inference problems in biology}}, doi = {10.1016/j.heliyon.2018.e00596}, volume = {4}, year = {2018}, } @book{3300, abstract = {This book first explores the origins of this idea, grounded in theoretical work on temporal logic and automata. The editors and authors are among the world's leading researchers in this domain, and they contributed 32 chapters representing a thorough view of the development and application of the technique. Topics covered include binary decision diagrams, symbolic model checking, satisfiability modulo theories, partial-order reduction, abstraction, interpolation, concurrency, security protocols, games, probabilistic model checking, and process algebra, and chapters on the transfer of theory to industrial practice, property specification languages for hardware, and verification of real-time systems and hybrid systems. The book will be valuable for researchers and graduate students engaged with the development of formal methods and verification tools.}, author = {Clarke, Edmund M. and Henzinger, Thomas A and Veith, Helmut and Bloem, Roderick}, isbn = {978-3-319-10574-1}, pages = {XLVIII, 1212}, publisher = {Springer Nature}, title = {{Handbook of Model Checking}}, doi = {10.1007/978-3-319-10575-8}, year = {2018}, } @inbook{37, abstract = {Developmental processes are inherently dynamic and understanding them requires quantitative measurements of gene and protein expression levels in space and time. While live imaging is a powerful approach for obtaining such data, it is still a challenge to apply it over long periods of time to large tissues, such as the embryonic spinal cord in mouse and chick. Nevertheless, dynamics of gene expression and signaling activity patterns in this organ can be studied by collecting tissue sections at different developmental stages. In combination with immunohistochemistry, this allows for measuring the levels of multiple developmental regulators in a quantitative manner with high spatiotemporal resolution. The mean protein expression levels over time, as well as embryo-to-embryo variability can be analyzed. A key aspect of the approach is the ability to compare protein levels across different samples. This requires a number of considerations in sample preparation, imaging and data analysis. Here we present a protocol for obtaining time course data of dorsoventral expression patterns from mouse and chick neural tube in the first 3 days of neural tube development. The described workflow starts from embryo dissection and ends with a processed dataset. Software scripts for data analysis are included. The protocol is adaptable and instructions that allow the user to modify different steps are provided. Thus, the procedure can be altered for analysis of time-lapse images and applied to systems other than the neural tube.}, author = {Zagórski, Marcin P and Kicheva, Anna}, booktitle = {Morphogen Gradients }, isbn = {978-1-4939-8771-9}, issn = {1064-3745}, pages = {47 -- 63}, publisher = {Springer Nature}, title = {{Measuring dorsoventral pattern and morphogen signaling profiles in the growing neural tube}}, doi = {10.1007/978-1-4939-8772-6_4}, volume = {1863}, year = {2018}, } @article{305, abstract = {The hanging-drop network (HDN) is a technology platform based on a completely open microfluidic network at the bottom of an inverted, surface-patterned substrate. The platform is predominantly used for the formation, culturing, and interaction of self-assembled spherical microtissues (spheroids) under precisely controlled flow conditions. Here, we describe design, fabrication, and operation of microfluidic hanging-drop networks.}, author = {Misun, Patrick and Birchler, Axel and Lang, Moritz and Hierlemann, Andreas and Frey, Olivier}, journal = {Methods in Molecular Biology}, pages = {183 -- 202}, publisher = {Springer}, title = {{Fabrication and operation of microfluidic hanging drop networks}}, doi = {10.1007/978-1-4939-7792-5_15}, volume = {1771}, year = {2018}, } @inproceedings{325, abstract = {Probabilistic programs extend classical imperative programs with real-valued random variables and random branching. The most basic liveness property for such programs is the termination property. The qualitative (aka almost-sure) termination problem asks whether a given program program terminates with probability 1. While ranking functions provide a sound and complete method for non-probabilistic programs, the extension of them to probabilistic programs is achieved via ranking supermartingales (RSMs). Although deep theoretical results have been established about RSMs, their application to probabilistic programs with nondeterminism has been limited only to programs of restricted control-flow structure. For non-probabilistic programs, lexicographic ranking functions provide a compositional and practical approach for termination analysis of real-world programs. In this work we introduce lexicographic RSMs and show that they present a sound method for almost-sure termination of probabilistic programs with nondeterminism. We show that lexicographic RSMs provide a tool for compositional reasoning about almost-sure termination, and for probabilistic programs with linear arithmetic they can be synthesized efficiently (in polynomial time). We also show that with additional restrictions even asymptotic bounds on expected termination time can be obtained through lexicographic RSMs. Finally, we present experimental results on benchmarks adapted from previous work to demonstrate the effectiveness of our approach.}, author = {Agrawal, Sheshansh and Chatterjee, Krishnendu and Novotny, Petr}, location = {Los Angeles, CA, USA}, number = {POPL}, publisher = {ACM}, title = {{Lexicographic ranking supermartingales: an efficient approach to termination of probabilistic programs}}, doi = {10.1145/3158122}, volume = {2}, year = {2018}, }