@article{2187,
abstract = {Systems should not only be correct but also robust in the sense that they behave reasonably in unexpected situations. This article addresses synthesis of robust reactive systems from temporal specifications. Existing methods allow arbitrary behavior if assumptions in the specification are violated. To overcome this, we define two robustness notions, combine them, and show how to enforce them in synthesis. The first notion applies to safety properties: If safety assumptions are violated temporarily, we require that the system recovers to normal operation with as few errors as possible. The second notion requires that, if liveness assumptions are violated, as many guarantees as possible should be fulfilled nevertheless. We present a synthesis procedure achieving this for the important class of GR(1) specifications, and establish complexity bounds. We also present an implementation of a special case of robustness, and show experimental results.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Hofferek, Georg and Jobstmann, Barbara and Könighofer, Bettina and Könighofer, Robert},
journal = {Acta Informatica},
number = {3-4},
pages = {193 -- 220},
publisher = {Springer},
title = {{Synthesizing robust systems}},
doi = {10.1007/s00236-013-0191-5},
volume = {51},
year = {2014},
}
@article{2188,
abstract = {Although plant and animal cells use a similar core mechanism to deliver proteins to the plasma membrane, their different lifestyle, body organization and specific cell structures resulted in the acquisition of regulatory mechanisms that vary in the two kingdoms. In particular, cell polarity regulators do not seem to be conserved, because genes encoding key components are absent in plant genomes. In plants, the broad knowledge on polarity derives from the study of auxin transporters, the PIN-FORMED proteins, in the model plant Arabidopsis thaliana. In animals, much information is provided from the study of polarity in epithelial cells that exhibit basolateral and luminal apical polarities, separated by tight junctions. In this review, we summarize the similarities and differences of the polarization mechanisms between plants and animals and survey the main genetic approaches that have been used to characterize new genes involved in polarity establishment in plants, including the frequently used forward and reverse genetics screens as well as a novel chemical genetics approach that is expected to overcome the limitation of classical genetics methods.},
author = {Kania, Urszula and Fendrych, Matyas and Friml, Jiřĺ},
journal = {Open Biology},
number = {APRIL},
publisher = {Royal Society},
title = {{Polar delivery in plants; commonalities and differences to animal epithelial cells}},
doi = {10.1098/rsob.140017},
volume = {4},
year = {2014},
}
@misc{5411,
abstract = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing.
In this paper, we study compositional properties of the IOCO-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the IOCO conformance relation, the resulting methodology can be applied to a broader class of systems.},
author = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
issn = {2664-1690},
pages = {20},
publisher = {IST Austria},
title = {{Compositional specifications for IOCO testing}},
doi = {10.15479/AT:IST-2014-148-v2-1},
year = {2014},
}
@inproceedings{1903,
abstract = {We consider two-player zero-sum partial-observation stochastic games on graphs. Based on the information available to the players these games can be classified as follows: (a) general partial-observation (both players have partial view of the game); (b) one-sided partial-observation (one player has partial-observation and the other player has complete-observation); and (c) perfect-observation (both players have complete view of the game). The one-sided partial-observation games subsumes the important special case of one-player partial-observation stochastic games (or partial-observation Markov decision processes (POMDPs)). Based on the randomization available for the strategies, (a) the players may not be allowed to use randomization (pure strategies), or (b) they may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) they may use full randomization. We consider all these classes of games with reachability, and parity objectives that can express all ω-regular objectives. The analysis problems are classified into the qualitative analysis that asks for the existence of a strategy that ensures the objective with probability 1; and the quantitative analysis that asks for the existence of a strategy that ensures the objective with probability at least λ (0,1). In this talk we will cover a wide range of results: for perfect-observation games; for POMDPs; for one-sided partial-observation games; and for general partial-observation games.},
author = {Chatterjee, Krishnendu},
location = {Budapest, Hungary},
number = {PART 1},
pages = {1 -- 4},
publisher = {Springer},
title = {{Partial-observation stochastic reachability and parity games}},
doi = {10.1007/978-3-662-44522-8_1},
volume = {8634},
year = {2014},
}
@inproceedings{2212,
abstract = {The theory of graph games is the foundation for modeling and synthesizing reactive processes. In the synthesis of stochastic processes, we use 2 1/2-player games where some transitions of the game graph are controlled by two adversarial players, the System and the Environment, and the other transitions are determined probabilistically. We consider 2 1/2-player games where the objective of the System is the conjunction of a qualitative objective (specified as a parity condition) and a quantitative objective (specified as a mean-payoff condition). We establish that the problem of deciding whether the System can ensure that the probability to satisfy the mean-payoff parity objective is at least a given threshold is in NP ∩ coNP, matching the best known bound in the special case of 2-player games (where all transitions are deterministic). We present an algorithm running in time O(d·n2d·MeanGame) to compute the set of almost-sure winning states from which the objective can be ensured with probability 1, where n is the number of states of the game, d the number of priorities of the parity objective, and MeanGame is the complexity to compute the set of almost-sure winning states in 2 1/2-player mean-payoff games. Our results are useful in the synthesis of stochastic reactive systems with both functional requirement (given as a qualitative objective) and performance requirement (given as a quantitative objective). },
author = {Chatterjee, Krishnendu and Doyen, Laurent and Gimbert, Hugo and Oualhadj, Youssouf},
location = {Grenoble, France},
pages = {210 -- 225},
publisher = {Springer},
title = {{Perfect-information stochastic mean-payoff parity games}},
doi = {10.1007/978-3-642-54830-7_14},
volume = {8412},
year = {2014},
}
@article{2257,
abstract = {Maximum entropy models are the least structured probability distributions that exactly reproduce a chosen set of statistics measured in an interacting network. Here we use this principle to construct probabilistic models which describe the correlated spiking activity of populations of up to 120 neurons in the salamander retina as it responds to natural movies. Already in groups as small as 10 neurons, interactions between spikes can no longer be regarded as small perturbations in an otherwise independent system; for 40 or more neurons pairwise interactions need to be supplemented by a global interaction that controls the distribution of synchrony in the population. Here we show that such “K-pairwise” models—being systematic extensions of the previously used pairwise Ising models—provide an excellent account of the data. We explore the properties of the neural vocabulary by: 1) estimating its entropy, which constrains the population's capacity to represent visual information; 2) classifying activity patterns into a small set of metastable collective modes; 3) showing that the neural codeword ensembles are extremely inhomogenous; 4) demonstrating that the state of individual neurons is highly predictable from the rest of the population, allowing the capacity for error correction.},
author = {Tkacik, Gasper and Marre, Olivier and Amodei, Dario and Schneidman, Elad and Bialek, William and Berry, Michael},
issn = {1553734X},
journal = {PLoS Computational Biology},
number = {1},
publisher = {Public Library of Science},
title = {{Searching for collective behavior in a large network of sensory neurons}},
doi = {10.1371/journal.pcbi.1003408},
volume = {10},
year = {2014},
}
@article{2261,
abstract = {To reveal the full potential of human pluripotent stem cells, new methods for rapid, site-specific genomic engineering are needed. Here, we describe a system for precise genetic modification of human embryonic stem cells (ESCs) and induced pluripotent stem cells (iPSCs). We identified a novel human locus, H11, located in a safe, intergenic, transcriptionally active region of chromosome 22, as the recipient site, to provide robust, ubiquitous expression of inserted genes. Recipient cell lines were established by site-specific placement of a ‘landing pad’ cassette carrying attP sites for phiC31 and Bxb1 integrases at the H11 locus by spontaneous or TALEN-assisted homologous recombination. Dual integrase cassette exchange (DICE) mediated by phiC31 and Bxb1 integrases was used to insert genes of interest flanked by phiC31 and Bxb1 attB sites at the H11 locus, replacing the landing pad. This system provided complete control over content, direction and copy number of inserted genes, with a specificity of 100%. A series of genes, including mCherry and various combinations of the neural transcription factors LMX1a, FOXA2 and OTX2, were inserted in recipient cell lines derived from H9 ESC, as well as iPSC lines derived from a Parkinson’s disease patient and a normal sibling control. The DICE system offers rapid, efficient and precise gene insertion in ESC and iPSC and is particularly well suited for repeated modifications of the same locus.},
author = {Zhu, Fangfang and Gamboa, Matthew and Farruggio, Alfonso and Hippenmeyer, Simon and Tasic, Bosiljka and Schüle, Birgitt and Chen Tsai, Yanru and Calos, Michele},
journal = {Nucleic Acids Research},
number = {5},
publisher = {Oxford University Press},
title = {{DICE, an efficient system for iterative genomic editing in human pluripotent stem cells}},
doi = {10.1093/nar/gkt1290},
volume = {42},
year = {2014},
}
@inbook{2265,
abstract = {Coordinated migration of newly-born neurons to their target territories is essential for correct neuronal circuit assembly in the developing brain. Although a cohort of signaling pathways has been implicated in the regulation of cortical projection neuron migration, the precise molecular mechanisms and how a balanced interplay of cell-autonomous and non-autonomous functions of candidate signaling molecules controls the discrete steps in the migration process, are just being revealed. In this chapter, I will focally review recent advances that improved our understanding of the cell-autonomous and possible cell-nonautonomous functions of the evolutionarily conserved LIS1/NDEL1-complex in regulating the sequential steps of cortical projection neuron migration. I will then elaborate on the emerging concept that the Reelin signaling pathway, acts exactly at precise stages in the course of cortical projection neuron migration. Lastly, I will discuss how finely tuned transcriptional programs and downstream effectors govern particular aspects in driving radial migration at discrete stages and how they regulate the precise positioning of cortical projection neurons in the developing cerebral cortex.},
author = {Hippenmeyer, Simon},
booktitle = { Cellular and Molecular Control of Neuronal Migration},
editor = {Nguyen, Laurent},
pages = {1 -- 24},
publisher = {Springer},
title = {{Molecular pathways controlling the sequential steps of cortical projection neuron migration}},
doi = {10.1007/978-94-007-7687-6_1},
volume = {800},
year = {2014},
}
@article{2285,
abstract = {GABAergic inhibitory interneurons control fundamental aspects of neuronal network function. Their functional roles are assumed to be defined by the identity of their input synapses, the architecture of their dendritic tree, the passive and active membrane properties and finally the nature of their postsynaptic targets. Indeed, interneurons display a high degree of morphological and physiological heterogeneity. However, whether their morphological and physiological characteristics are correlated and whether interneuron diversity can be described by a continuum of GABAergic cell types or by distinct classes has remained unclear. Here we perform a detailed morphological and physiological characterization of GABAergic cells in the dentate gyrus, the input region of the hippocampus. To achieve an unbiased and efficient sampling and classification we used knock-in mice expressing the enhanced green fluorescent protein (eGFP) in glutamate decarboxylase 67 (GAD67)-positive neurons and performed cluster analysis. We identified five interneuron classes, each of them characterized by a distinct set of anatomical and physiological parameters. Cross-correlation analysis further revealed a direct relation between morphological and physiological properties indicating that dentate gyrus interneurons fall into functionally distinct classes which may differentially control neuronal network activity.},
author = {Hosp, Jonas and Strüber, Michael and Yanagawa, Yuchio and Obata, Kunihiko and Vida, Imre and Jonas, Peter M and Bartos, Marlene},
journal = {Hippocampus},
number = {2},
pages = {189 -- 203},
publisher = {Wiley-Blackwell},
title = {{Morpho-physiological criteria divide dentate gyrus interneurons into classes}},
doi = {10.1002/hipo.22214},
volume = {23},
year = {2014},
}
@inproceedings{2275,
abstract = {Energies with high-order non-submodular interactions have been shown to be very useful in vision due to their high modeling power. Optimization of such energies, however, is generally NP-hard. A naive approach that works for small problem instances is exhaustive search, that is, enumeration of all possible labelings of the underlying graph. We propose a general minimization approach for large graphs based on enumeration of labelings of certain small patches.
This partial enumeration technique reduces complex high-order energy formulations to pairwise Constraint Satisfaction Problems with unary costs (uCSP), which can be efficiently solved using standard methods like TRW-S. Our approach outperforms a number of existing state-of-the-art algorithms on well known difficult problems (e.g. curvature regularization, stereo, deconvolution); it gives near global minimum and better speed.
Our main application of interest is curvature regularization. In the context of segmentation, our partial enumeration technique allows to evaluate curvature directly on small patches using a novel integral geometry approach.
},
author = {Olsson, Carl and Ulen, Johannes and Boykov, Yuri and Kolmogorov, Vladimir},
location = {Sydney, Australia},
pages = {2936 -- 2943},
publisher = {IEEE},
title = {{Partial enumeration and curvature regularization}},
doi = {10.1109/ICCV.2013.365},
year = {2014},
}
@article{2716,
abstract = {Multi-dimensional mean-payoff and energy games provide the mathematical foundation for the quantitative study of reactive systems, and play a central role in the emerging quantitative theory of verification and synthesis. In this work, we study the strategy synthesis problem for games with such multi-dimensional objectives along with a parity condition, a canonical way to express ω ω -regular conditions. While in general, the winning strategies in such games may require infinite memory, for synthesis the most relevant problem is the construction of a finite-memory winning strategy (if one exists). Our main contributions are as follows. First, we show a tight exponential bound (matching upper and lower bounds) on the memory required for finite-memory winning strategies in both multi-dimensional mean-payoff and energy games along with parity objectives. This significantly improves the triple exponential upper bound for multi energy games (without parity) that could be derived from results in literature for games on vector addition systems with states. Second, we present an optimal symbolic and incremental algorithm to compute a finite-memory winning strategy (if one exists) in such games. Finally, we give a complete characterization of when finite memory of strategies can be traded off for randomness. In particular, we show that for one-dimension mean-payoff parity games, randomized memoryless strategies are as powerful as their pure finite-memory counterparts.},
author = {Chatterjee, Krishnendu and Randour, Mickael and Raskin, Jean},
journal = {Acta Informatica},
number = {3-4},
pages = {129 -- 163},
publisher = {Springer},
title = {{Strategy synthesis for multi-dimensional quantitative objectives}},
doi = {10.1007/s00236-013-0182-6},
volume = {51},
year = {2014},
}
@article{2699,
abstract = {We prove the universality of the β-ensembles with convex analytic potentials and for any β >
0, i.e. we show that the spacing distributions of log-gases at any inverse temperature β coincide with those of the Gaussian β-ensembles.},
author = {Erdös, László and Bourgade, Paul and Yau, Horng},
journal = {Duke Mathematical Journal},
number = {6},
pages = {1127 -- 1190},
publisher = {Duke University Press},
title = {{Universality of general β-ensembles}},
doi = {10.1215/00127094-2649752},
volume = {163},
year = {2014},
}
@article{2852,
abstract = {A robust combiner for hash functions takes two candidate implementations and constructs a hash function which is secure as long as at least one of the candidates is secure. So far, hash function combiners only aim at preserving a single property such as collision-resistance or pseudorandomness. However, when hash functions are used in protocols like TLS they are often required to provide several properties simultaneously. We therefore put forward the notion of robust multi-property combiners and elaborate on different definitions for such combiners. We then propose a combiner that provably preserves (target) collision-resistance, pseudorandomness, and being a secure message authentication code. This combiner satisfies the strongest notion we propose, which requires that the combined function satisfies every security property which is satisfied by at least one of the underlying hash function. If the underlying hash functions have output length n, the combiner has output length 2 n. This basically matches a known lower bound for black-box combiners for collision-resistance only, thus the other properties can be achieved without penalizing the length of the hash values. We then propose a combiner which also preserves the property of being indifferentiable from a random oracle, slightly increasing the output length to 2 n+ω(log n). Moreover, we show how to augment our constructions in order to make them also robust for the one-wayness property, but in this case require an a priory upper bound on the input length.},
author = {Fischlin, Marc and Lehmann, Anja and Pietrzak, Krzysztof Z},
journal = {Journal of Cryptology},
number = {3},
pages = {397 -- 428},
publisher = {Springer},
title = {{Robust multi-property combiners for hash functions}},
doi = {10.1007/s00145-013-9148-7},
volume = {27},
year = {2014},
}
@article{2255,
abstract = {Motivated by applications in biology, we present an algorithm for estimating the length of tube-like shapes in 3-dimensional Euclidean space. In a first step, we combine the tube formula of Weyl with integral geometric methods to obtain an integral representation of the length, which we approximate using a variant of the Koksma-Hlawka Theorem. In a second step, we use tools from computational topology to decrease the dependence on small perturbations of the shape. We present computational experiments that shed light on the stability and the convergence rate of our algorithm.},
author = {Edelsbrunner, Herbert and Pausinger, Florian},
issn = {09249907},
journal = {Journal of Mathematical Imaging and Vision},
number = {1},
pages = {164 -- 177},
publisher = {Springer},
title = {{Stable length estimates of tube-like shapes}},
doi = {10.1007/s10851-013-0468-x},
volume = {50},
year = {2014},
}
@article{1733,
abstract = {The classical (boolean) notion of refinement for behavioral interfaces of system components is the alternating refinement preorder. In this paper, we define a distance for interfaces, called interface simulation distance. It makes the alternating refinement preorder quantitative by, intuitively, tolerating errors (while counting them) in the alternating simulation game. We show that the interface simulation distance satisfies the triangle inequality, that the distance between two interfaces does not increase under parallel composition with a third interface, that the distance between two interfaces can be bounded from above and below by distances between abstractions of the two interfaces, and how to synthesize an interface from incompatible requirements. We illustrate the framework, and the properties of the distances under composition of interfaces, with two case studies.},
author = {Cerny, Pavol and Chmelik, Martin and Henzinger, Thomas A and Radhakrishna, Arjun},
journal = {Theoretical Computer Science},
number = {3},
pages = {348 -- 363},
publisher = {Elsevier},
title = {{Interface simulation distances}},
doi = {10.1016/j.tcs.2014.08.019},
volume = {560},
year = {2014},
}
@inproceedings{1516,
abstract = {We present a rigorous derivation of the BCS gap equation for superfluid fermionic gases with point interactions. Our starting point is the BCS energy functional, whose minimizer we investigate in the limit when the range of the interaction potential goes to zero.
},
author = {Bräunlich, Gerhard and Hainzl, Christian and Seiringer, Robert},
booktitle = {Proceedings of the QMath12 Conference},
location = {Berlin, Germany},
pages = {127 -- 137},
publisher = {World Scientific Publishing},
title = {{On the BCS gap equation for superfluid fermionic gases}},
doi = {10.1142/9789814618144_0007},
year = {2014},
}
@misc{5428,
abstract = {Simulation is an attractive alternative for language inclusion for automata as it is an under-approximation of language inclusion, but usually has much lower complexity. For non-deterministic automata, while language inclusion is PSPACE-complete, simulation can be computed in polynomial time. Simulation has also been extended in two orthogonal directions, namely, (1) fair simulation, for simulation over specified set of infinite runs; and (2) quantitative simulation, for simulation between weighted automata. Again, while fair trace inclusion is PSPACE-complete, fair simulation can be computed in polynomial time. For weighted automata, the (quantitative) language inclusion problem is undecidable for mean-payoff automata and the decidability is open for discounted-sum automata, whereas the (quantitative) simulation reduce to mean-payoff games and discounted-sum games, which admit pseudo-polynomial time algorithms.
In this work, we study (quantitative) simulation for weighted automata with Büchi acceptance conditions, i.e., we generalize fair simulation from non-weighted automata to weighted automata. We show that imposing Büchi acceptance conditions on weighted automata changes many fundamental properties of the simulation games. For example, whereas for mean-payoff and discounted-sum games, the players do not need memory to play optimally; we show in contrast that for simulation games with Büchi acceptance conditions, (i) for mean-payoff objectives, optimal strategies for both players require infinite memory in general, and (ii) for discounted-sum objectives, optimal strategies need not exist for both players. While the simulation games with Büchi acceptance conditions are more complicated (e.g., due to infinite-memory requirements for mean-payoff objectives) as compared to their counterpart without Büchi acceptance conditions, we still present pseudo-polynomial time algorithms to solve simulation games with Büchi acceptance conditions for both weighted mean-payoff and weighted discounted-sum automata.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan and Velner, Yaron},
issn = {2664-1690},
pages = {26},
publisher = {IST Austria},
title = {{Quantitative fair simulation games}},
doi = {10.15479/AT:IST-2014-315-v1-1},
year = {2014},
}
@inproceedings{1392,
abstract = {Fault-tolerant distributed algorithms play an important role in ensuring the reliability of many software applications. In this paper we consider distributed algorithms whose computations are organized in rounds. To verify the correctness of such algorithms, we reason about (i) properties (such as invariants) of the state, (ii) the transitions controlled by the algorithm, and (iii) the communication graph. We introduce a logic that addresses these points, and contains set comprehensions with cardinality constraints, function symbols to describe the local states of each process, and a limited form of quantifier alternation to express the verification conditions. We show its use in automating the verification of consensus algorithms. In particular, we give a semi-decision procedure for the unsatisfiability problem of the logic and identify a decidable fragment. We successfully applied our framework to verify the correctness of a variety of consensus algorithms tolerant to both benign faults (message loss, process crashes) and value faults (message corruption).},
author = {Dragoi, Cezara and Henzinger, Thomas A and Veith, Helmut and Widder, Josef and Zufferey, Damien},
location = {San Diego, USA},
pages = {161 -- 181},
publisher = {Springer},
title = {{A logic-based framework for verifying consensus algorithms}},
doi = {10.1007/978-3-642-54013-4_10},
volume = {8318},
year = {2014},
}
@inproceedings{1393,
abstract = {Probabilistic programs are usual functional or imperative programs with two added constructs: (1) the ability to draw values at random from distributions, and (2) the ability to condition values of variables in a program via observations. Models from diverse application areas such as computer vision, coding theory, cryptographic protocols, biology and reliability analysis can be written as probabilistic programs. Probabilistic inference is the problem of computing an explicit representation of the probability distribution implicitly specified by a probabilistic program. Depending on the application, the desired output from inference may vary-we may want to estimate the expected value of some function f with respect to the distribution, or the mode of the distribution, or simply a set of samples drawn from the distribution. In this paper, we describe connections this research area called \Probabilistic Programming" has with programming languages and software engineering, and this includes language design, and the static and dynamic analysis of programs. We survey current state of the art and speculate on promising directions for future research.},
author = {Gordon, Andrew and Henzinger, Thomas A and Nori, Aditya and Rajamani, Sriram},
booktitle = {Proceedings of the on Future of Software Engineering},
location = {Hyderabad, India},
pages = {167 -- 181},
publisher = {ACM},
title = {{Probabilistic programming}},
doi = {10.1145/2593882.2593900},
year = {2014},
}
@inproceedings{1507,
abstract = {The Wigner-Dyson-Gaudin-Mehta conjecture asserts that the local eigenvalue statistics of large real and complex Hermitian matrices with independent, identically distributed entries are universal in a sense that they depend only on the symmetry class of the matrix and otherwise are independent of the details of the distribution. We present the recent solution to this half-century old conjecture. We explain how stochastic tools, such as the Dyson Brownian motion, and PDE ideas, such as De Giorgi-Nash-Moser regularity theory, were combined in the solution. We also show related results for log-gases that represent a universal model for strongly correlated systems. Finally, in the spirit of Wigner’s original vision, we discuss the extensions of these universality results to more realistic physical systems such as random band matrices.},
author = {Erdös, László},
location = {Seoul, Korea},
pages = {214 -- 236},
publisher = {Kyung Moon SA Co. Ltd.},
title = {{Random matrices, log-gases and Hölder regularity}},
volume = {3},
year = {2014},
}