@article{3317,
abstract = {The physical distance between presynaptic Ca2+ channels and the Ca2+ sensors that trigger exocytosis of neurotransmitter-containing vesicles is a key determinant of the signalling properties of synapses in the nervous system. Recent functional analysis indicates that in some fast central synapses, transmitter release is triggered by a small number of Ca2+ channels that are coupled to Ca2+ sensors at the nanometre scale. Molecular analysis suggests that this tight coupling is generated by protein–protein interactions involving Ca2+ channels, Ca2+ sensors and various other synaptic proteins. Nanodomain coupling has several functional advantages, as it increases the efficacy, speed and energy efficiency of synaptic transmission.},
author = {Eggermann, Emmanuel and Bucurenciu, Iancu and Goswami, Sarit and Jonas, Peter M},
journal = {Nature Reviews Neuroscience},
number = {1},
pages = {7 -- 21},
publisher = {Nature Publishing Group},
title = {{Nanodomain coupling between Ca(2+) channels and sensors of exocytosis at fast mammalian synapses}},
doi = {10.1038/nrn3125},
volume = {13},
year = {2012},
}
@article{3331,
abstract = {Computing the topology of an algebraic plane curve C means computing a combinatorial graph that is isotopic to C and thus represents its topology in R2. We prove that, for a polynomial of degree n with integer coefficients bounded by 2ρ, the topology of the induced curve can be computed with bit operations ( indicates that we omit logarithmic factors). Our analysis improves the previous best known complexity bounds by a factor of n2. The improvement is based on new techniques to compute and refine isolating intervals for the real roots of polynomials, and on the consequent amortized analysis of the critical fibers of the algebraic curve.},
author = {Kerber, Michael and Sagraloff, Michael},
journal = { Journal of Symbolic Computation},
number = {3},
pages = {239 -- 258},
publisher = {Elsevier},
title = {{A worst case bound for topology computation of algebraic curves}},
doi = {10.1016/j.jsc.2011.11.001},
volume = {47},
year = {2012},
}
@inproceedings{3341,
abstract = {We consider two-player stochastic games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine a probability distribution over the successor states. We also consider the important special case of turn-based stochastic games where players make moves in turns, rather than concurrently. We study concurrent games with \omega-regular winning conditions specified as parity objectives. The value for player 1 for a parity objective is the maximal probability with which the player can guarantee the satisfaction of the objective against all strategies of the opponent. We study the problem of continuity and robustness of the value function in concurrent and turn-based stochastic parity gameswith respect to imprecision in the transition probabilities. We present quantitative bounds on the difference of the value function (in terms of the imprecision of the transition probabilities) and show the value continuity for structurally equivalent concurrent games (two games are structurally equivalent if the support of the transition function is same and the probabilities differ). We also show robustness of optimal strategies for structurally equivalent turn-based stochastic parity games. Finally we show that the value continuity property breaks without the structurally equivalent assumption (even for Markov chains) and show that our quantitative bound is asymptotically optimal. Hence our results are tight (the assumption is both necessary and sufficient) and optimal (our quantitative bound is asymptotically optimal).},
author = {Chatterjee, Krishnendu},
location = {Tallinn, Estonia},
pages = {270 -- 285},
publisher = {Springer},
title = {{Robustness of structurally equivalent concurrent parity games}},
doi = {10.1007/978-3-642-28729-9_18},
volume = {7213},
year = {2012},
}
@article{493,
abstract = {The BCI competition IV stands in the tradition of prior BCI competitions that aim to provide high quality neuroscientific data for open access to the scientific community. As experienced already in prior competitions not only scientists from the narrow field of BCI compete, but scholars with a broad variety of backgrounds and nationalities. They include high specialists as well as students.The goals of all BCI competitions have always been to challenge with respect to novel paradigms and complex data. We report on the following challenges: (1) asynchronous data, (2) synthetic, (3) multi-class continuous data, (4) sessionto-session transfer, (5) directionally modulated MEG, (6) finger movements recorded by ECoG. As after past competitions, our hope is that winning entries may enhance the analysis methods of future BCIs.},
author = {Tangermann, Michael and Müller, Klaus and Aertsen, Ad and Birbaumer, Niels and Braun, Christoph and Brunner, Clemens and Leeb, Robert and Mehring, Carsten and Miller, Kai and Müller Putz, Gernot and Nolte, Guido and Pfurtscheller, Gert and Preissl, Hubert and Schalk, Gerwin and Schlögl, Alois and Vidaurre, Carmen and Waldert, Stephan and Blankertz, Benjamin},
journal = {Frontiers in Neuroscience},
publisher = {Frontiers Research Foundation},
title = {{Review of the BCI competition IV}},
doi = {10.3389/fnins.2012.00055},
volume = {6},
year = {2012},
}
@article{494,
abstract = {We solve the longstanding open problems of the blow-up involved in the translations, when possible, of a nondeterministic Büchi word automaton (NBW) to a nondeterministic co-Büchi word automaton (NCW) and to a deterministic co-Büchi word automaton (DCW). For the NBW to NCW translation, the currently known upper bound is 2o(nlog n) and the lower bound is 1.5n. We improve the upper bound to n2n and describe a matching lower bound of 2ω(n). For the NBW to DCW translation, the currently known upper bound is 2o(nlog n). We improve it to 2 o(n), which is asymptotically tight. Both of our upper-bound constructions are based on a simple subset construction, do not involve intermediate automata with richer acceptance conditions, and can be implemented symbolically. We continue and solve the open problems of translating nondeterministic Streett, Rabin, Muller, and parity word automata to NCW and to DCW. Going via an intermediate NBW is not optimal and we describe direct, simple, and asymptotically tight constructions, involving a 2o(n) blow-up. The constructions are variants of the subset construction, providing a unified approach for translating all common classes of automata to NCW and DCW. Beyond the theoretical importance of the results, we point to numerous applications of the new constructions. In particular, they imply a simple subset-construction based translation, when possible, of LTL to deterministic Büchi word automata.},
author = {Boker, Udi and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Translating to Co-Büchi made tight, unified, and useful}},
doi = {10.1145/2362355.2362357},
volume = {13},
year = {2012},
}
@inproceedings{495,
abstract = {An automaton with advice is a finite state automaton which has access to an additional fixed infinite string called an advice tape. We refine the Myhill-Nerode theorem to characterize the languages of finite strings that are accepted by automata with advice. We do the same for tree automata with advice.},
author = {Kruckman, Alex and Rubin, Sasha and Sheridan, John and Zax, Ben},
booktitle = {Proceedings GandALF 2012},
location = {Napoli, Italy},
pages = {238 -- 246},
publisher = {Open Publishing Association},
title = {{A Myhill Nerode theorem for automata with advice}},
doi = {10.4204/EPTCS.96.18},
volume = {96},
year = {2012},
}
@inproceedings{496,
abstract = {We study the expressive power of logical interpretations on the class of scattered trees, namely those with countably many infinite branches. Scattered trees can be thought of as the tree analogue of scattered linear orders. Every scattered tree has an ordinal rank that reflects the structure of its infinite branches. We prove, roughly, that trees and orders of large rank cannot be interpreted in scattered trees of small rank. We consider a quite general notion of interpretation: each element of the interpreted structure is represented by a set of tuples of subsets of the interpreting tree. Our trees are countable, not necessarily finitely branching, and may have finitely many unary predicates as labellings. We also show how to replace injective set-interpretations in (not necessarily scattered) trees by 'finitary' set-interpretations.},
author = {Rabinovich, Alexander and Rubin, Sasha},
location = {Dubrovnik, Croatia},
publisher = {IEEE},
title = {{Interpretations in trees with countably many branches}},
doi = {10.1109/LICS.2012.65},
year = {2012},
}
@inproceedings{497,
abstract = {One central issue in the formal design and analysis of reactive systems is the notion of refinement that asks whether all behaviors of the implementation is allowed by the specification. The local interpretation of behavior leads to the notion of simulation. Alternating transition systems (ATSs) provide a general model for composite reactive systems, and the simulation relation for ATSs is known as alternating simulation. The simulation relation for fair transition systems is called fair simulation. In this work our main contributions are as follows: (1) We present an improved algorithm for fair simulation with Büchi fairness constraints; our algorithm requires O(n 3·m) time as compared to the previous known O(n 6)-time algorithm, where n is the number of states and m is the number of transitions. (2) We present a game based algorithm for alternating simulation that requires O(m2)-time as compared to the previous known O((n·m)2)-time algorithm, where n is the number of states and m is the size of transition relation. (3) We present an iterative algorithm for alternating simulation that matches the time complexity of the game based algorithm, but is more space efficient than the game based algorithm. © Krishnendu Chatterjee, Siddhesh Chaubal, and Pritish Kamath.},
author = {Chatterjee, Krishnendu and Chaubal, Siddhesh and Kamath, Pritish},
location = {Fontainebleau, France},
pages = {167 -- 182},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Faster algorithms for alternating refinement relations}},
doi = {10.4230/LIPIcs.CSL.2012.167},
volume = {16},
year = {2012},
}
@article{498,
abstract = {Understanding patterns and correlates of local adaptation in heterogeneous landscapes can provide important information in the selection of appropriate seed sources for restoration. We assessed the extent of local adaptation of fitness components in 12 population pairs of the perennial herb Rutidosis leptorrhynchoides (Asteraceae) and examined whether spatial scale (0.7-600 km), environmental distance, quantitative (QST) and neutral (FST) genetic differentiation, and size of the local and foreign populations could predict patterns of adaptive differentiation. Local adaptation varied among populations and fitness components. Including all population pairs, local adaptation was observed for seedling survival, but not for biomass, while foreign genotype advantage was observed for reproduction (number of inflorescences). Among population pairs, local adaptation increased with QST and local population size for biomass. QST was associated with environmental distance, suggesting ecological selection for phenotypic divergence. However, low FST and variation in population structure in small populations demonstrates the interaction of gene flow and drift in constraining local adaptation in R. leptorrhynchoides. Our study indicates that for species in heterogeneous landscapes, collecting seed from large populations from similar environments to candidate sites is likely to provide the most appropriate seed sources for restoration.},
author = {Pickup, Melinda and Field, David and Rowell, David and Young, Andrew},
journal = {Evolutionary Applications},
number = {8},
pages = {913 -- 924},
publisher = {Wiley-Blackwell},
title = {{Predicting local adaptation in fragmented plant populations: Implications for restoration genetics}},
doi = {10.1111/j.1752-4571.2012.00284.x},
volume = {5},
year = {2012},
}
@article{506,
author = {Sixt, Michael K},
journal = {Journal of Cell Biology},
number = {3},
pages = {347 -- 349},
publisher = {Rockefeller University Press},
title = {{Cell migration: Fibroblasts find a new way to get ahead}},
doi = {10.1083/jcb.201204039},
volume = {197},
year = {2012},
}
@misc{5377,
abstract = {Two-player games on graphs are central in many problems in formal verification and program analysis such as synthesis and verification of open systems. In this work we consider solving recursive game graphs (or pushdown game graphs) that can model the control flow of sequential programs with recursion. While pushdown games have been studied before with qualitative objectives, such as reachability and ω-regular objectives, in this work we study for the first time such games with the most well-studied quantitative objective, namely, mean-payoff objectives. In pushdown games two types of strategies are relevant: (1) global strategies, that depend on the entire global history; and (2) modular strategies, that have only local memory and thus do not depend on the context of invocation, but only on the history of the current invocation of the module. Our main results are as follows: (1) One-player pushdown games with mean-payoff objectives under global strategies are decidable in polynomial time. (2) Two- player pushdown games with mean-payoff objectives under global strategies are undecidable. (3) One-player pushdown games with mean-payoff objectives under modular strategies are NP- hard. (4) Two-player pushdown games with mean-payoff objectives under modular strategies can be solved in NP (i.e., both one-player and two-player pushdown games with mean-payoff objectives under modular strategies are NP-complete). We also establish the optimal strategy complexity showing that global strategies for mean-payoff objectives require infinite memory even in one-player pushdown games; and memoryless modular strategies are sufficient in two- player pushdown games. Finally we also show that all the problems have the same complexity if the stack boundedness condition is added, where along with the mean-payoff objective the player must also ensure that the stack height is bounded.},
author = {Chatterjee, Krishnendu and Velner, Yaron},
issn = {2664-1690},
pages = {33},
publisher = {IST Austria},
title = {{Mean-payoff pushdown games}},
doi = {10.15479/AT:IST-2012-0002},
year = {2012},
}
@misc{5378,
abstract = {One central issue in the formal design and analysis of reactive systems is the notion of refinement that asks whether all behaviors of the implementation is allowed by the specification. The local interpretation of behavior leads to the notion of simulation. Alternating transition systems (ATSs) provide a general model for composite reactive systems, and the simulation relation for ATSs is known as alternating simulation. The simulation relation for fair transition systems is called fair simulation. In this work our main contributions are as follows: (1) We present an improved algorithm for fair simulation with Büchi fairness constraints; our algorithm requires O(n3 · m) time as compared to the previous known O(n6)-time algorithm, where n is the number of states and m is the number of transitions. (2) We present a game based algorithm for alternating simulation that requires O(m2)-time as compared to the previous known O((n · m)2)-time algorithm, where n is the number of states and m is the size of transition relation. (3) We present an iterative algorithm for alternating simulation that matches the time complexity of the game based algorithm, but is more space efficient than the game based algorithm.},
author = {Chatterjee, Krishnendu and Chaubal, Siddhesh and Kamath, Pritish},
issn = {2664-1690},
pages = {21},
publisher = {IST Austria},
title = {{Faster algorithms for alternating refinement relations}},
doi = {10.15479/AT:IST-2012-0001},
year = {2012},
}
@misc{5396,
abstract = {We consider the problem of inference in agraphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pair-wise terms over a discretized domain. This allows the use of techniques originally devel-oped for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can out-perform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.},
author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph},
issn = {2664-1690},
pages = {13},
publisher = {IST Austria},
title = {{Approximating marginals using discrete energy minimization}},
doi = {10.15479/AT:IST-2012-0003},
year = {2012},
}
@techreport{5398,
abstract = {This document is created as a part of the project “Repository for Research Data on IST Austria”. It summarises the actual state of research data at IST Austria, based on survey results. It supports the choice of appropriate software, which would best fit the requirements of their users, the researchers.},
author = {Porsche, Jana},
publisher = {IST Austria},
title = {{Actual state of research data @ ISTAustria}},
year = {2012},
}
@inbook{5745,
author = {Gupta, Ashutosh},
booktitle = {Automated Technology for Verification and Analysis},
isbn = {9783642333859},
issn = {0302-9743},
location = {Thiruvananthapuram, Kerala, India},
pages = {107--121},
publisher = {Springer Berlin Heidelberg},
title = {{Improved Single Pass Algorithms for Resolution Proof Reduction}},
doi = {10.1007/978-3-642-33386-6_10},
volume = {7561},
year = {2012},
}
@article{6588,
abstract = {First we note that the best polynomial approximation to vertical bar x vertical bar on the set, which consists of an interval on the positive half-axis and a point on the negative half-axis, can be given by means of the classical Chebyshev polynomials. Then we explore the cases when a solution of the related problem on two intervals can be given in elementary functions.},
author = {Pausinger, Florian},
issn = {1812-9471},
journal = {Journal of Mathematical Physics, Analysis, Geometry},
number = {1},
pages = {63--78},
publisher = {B. Verkin Institute for Low Temperature Physics and Engineering},
title = {{Elementary solutions of the Bernstein problem on two intervals}},
volume = {8},
year = {2012},
}
@inproceedings{1384,
abstract = {Software model checking, as an undecidable problem, has three possible outcomes: (1) the program satisfies the specification, (2) the program does not satisfy the specification, and (3) the model checker fails. The third outcome usually manifests itself in a space-out, time-out, or one component of the verification tool giving up; in all of these failing cases, significant computation is performed by the verification tool before the failure, but no result is reported. We propose to reformulate the model-checking problem as follows, in order to have the verification tool report a summary of the performed work even in case of failure: given a program and a specification, the model checker returns a condition Ψ - usually a state predicate - such that the program satisfies the specification under the condition Ψ - that is, as long as the program does not leave the states in which Ψ is satisfied. In our experiments, we investigated as one major application of conditional model checking the sequential combination of model checkers with information passing. We give the condition that one model checker produces, as input to a second conditional model checker, such that the verification problem for the second is restricted to the part of the state space that is not covered by the condition, i.e., the second model checker works on the problems that the first model checker could not solve. Our experiments demonstrate that repeated application of conditional model checkers, passing information from one model checker to the next, can significantly improve the verification results and performance, i.e., we can now verify programs that we could not verify before.},
author = {Beyer, Dirk and Henzinger, Thomas A and Keremoglu, Mehmet and Wendler, Philipp},
booktitle = {Proceedings of the ACM SIGSOFT 20th International Symposium on the Foundations of Software Engineering},
location = {Cary, NC, USA},
publisher = {ACM},
title = {{Conditional model checking: A technique to pass information between verifiers}},
doi = {10.1145/2393596.2393664},
year = {2012},
}
@inproceedings{2048,
abstract = {Leakage resilient cryptography attempts to incorporate side-channel leakage into the black-box security model and designs cryptographic schemes that are provably secure within it. Informally, a scheme is leakage-resilient if it remains secure even if an adversary learns a bounded amount of arbitrary information about the schemes internal state. Unfortunately, most leakage resilient schemes are unnecessarily complicated in order to achieve strong provable security guarantees. As advocated by Yu et al. [CCS’10], this mostly is an artefact of the security proof and in practice much simpler construction may already suffice to protect against realistic side-channel attacks. In this paper, we show that indeed for simpler constructions leakage-resilience can be obtained when we aim for relaxed security notions where the leakage-functions and/or the inputs to the primitive are chosen non-adaptively. For example, we show that a three round Feistel network instantiated with a leakage resilient PRF yields a leakage resilient PRP if the inputs are chosen non-adaptively (This complements the result of Dodis and Pietrzak [CRYPTO’10] who show that if a adaptive queries are allowed, a superlogarithmic number of rounds is necessary.) We also show that a minor variation of the classical GGM construction gives a leakage resilient PRF if both, the leakage-function and the inputs, are chosen non-adaptively.},
author = {Faust, Sebastian and Pietrzak, Krzysztof Z and Schipper, Joachim},
booktitle = { Conference proceedings CHES 2012},
location = {Leuven, Belgium},
pages = {213 -- 232},
publisher = {Springer},
title = {{Practical leakage-resilient symmetric cryptography}},
doi = {10.1007/978-3-642-33027-8_13},
volume = {7428},
year = {2012},
}
@inproceedings{2049,
abstract = {We propose a new authentication protocol that is provably secure based on a ring variant of the learning parity with noise (LPN) problem. The protocol follows the design principle of the LPN-based protocol from Eurocrypt’11 (Kiltz et al.), and like it, is a two round protocol secure against active attacks. Moreover, our protocol has small communication complexity and a very small footprint which makes it applicable in scenarios that involve low-cost, resource-constrained devices.
Performance-wise, our protocol is more efficient than previous LPN-based schemes, such as the many variants of the Hopper-Blum (HB) protocol and the aforementioned protocol from Eurocrypt’11. Our implementation results show that it is even comparable to the standard challenge-and-response protocols based on the AES block-cipher. Our basic protocol is roughly 20 times slower than AES, but with the advantage of having 10 times smaller code size. Furthermore, if a few hundred bytes of non-volatile memory are available to allow the storage of some off-line pre-computations, then the online phase of our protocols is only twice as slow as AES.
},
author = {Heyse, Stefan and Kiltz, Eike and Lyubashevsky, Vadim and Paar, Christof and Pietrzak, Krzysztof Z},
booktitle = { Conference proceedings FSE 2012},
location = {Washington, DC, USA},
pages = {346 -- 365},
publisher = {Springer},
title = {{Lapin: An efficient authentication protocol based on ring-LPN}},
doi = {10.1007/978-3-642-34047-5_20},
volume = {7549},
year = {2012},
}
@article{2263,
abstract = {Nestin-cre transgenic mice have been widely used to direct recombination to neural stem cells (NSCs) and intermediate neural progenitor cells (NPCs). Here we report that a readily utilized, and the only commercially available, Nestin-cre line is insufficient for directing recombination in early embryonic NSCs and NPCs. Analysis of recombination efficiency in multiple cre-dependent reporters and a genetic mosaic line revealed consistent temporal and spatial patterns of recombination in NSCs and NPCs. For comparison we utilized a knock-in Emx1cre line and found robust recombination in NSCs and NPCs in ventricular and subventricular zones of the cerebral cortices as early as embryonic day 12.5. In addition we found that the rate of Nestin-cre driven recombination only reaches sufficiently high levels in NSCs and NPCs during late embryonic and early postnatal periods. These findings are important when commercially available cre lines are considered for directing recombination to embryonic NSCs and NPCs.},
author = {Liang, Huixuan and Hippenmeyer, Simon and Ghashghaei, H.},
journal = {Biology open},
number = {12},
pages = {1200 -- 1203},
publisher = {The Company of Biologists},
title = {{A Nestin-cre transgenic mouse is insufficient for recombination in early embryonic neural progenitors}},
doi = {10.1242/bio.20122287},
volume = {1},
year = {2012},
}
@article{2302,
abstract = {We introduce propagation models (PMs), a formalism able to express several kinds of equations that describe the behavior of biochemical reaction networks. Furthermore, we introduce the propagation abstract data type (PADT), which separates concerns regarding different numerical algorithms for the transient analysis of biochemical reaction networks from concerns regarding their implementation, thus allowing for portable and efficient solutions. The state of a propagation abstract data type is given by a vector that assigns mass values to a set of nodes, and its (next) operator propagates mass values through this set of nodes. We propose an approximate implementation of the (next) operator, based on threshold abstraction, which propagates only "significant" mass values and thus achieves a compromise between efficiency and accuracy. Finally, we give three use cases for propagation models: the chemical master equation (CME), the reaction rate equation (RRE), and a hybrid method that combines these two equations. These three applications use propagation models in order to propagate probabilities and/or expected values and variances of the model's variables.},
author = {Henzinger, Thomas A and Mateescu, Maria},
journal = {IEEE ACM Transactions on Computational Biology and Bioinformatics},
number = {2},
pages = {310 -- 322},
publisher = {IEEE},
title = {{The propagation approach for computing biochemical reaction networks}},
doi = {10.1109/TCBB.2012.91},
volume = {10},
year = {2012},
}
@article{2318,
abstract = {We show that bosons interacting via pair potentials with negative scattering length form bound states for a suitable number of particles. In other words, the absence of many-particle bound states of any kind implies the non-negativity of the scattering length of the interaction potential. },
author = {Seiringer, Robert},
journal = {Journal of Spectral Theory},
number = {3},
pages = {321--328},
publisher = {European Mathematical Society},
title = {{Absence of bound states implies non-negativity of the scattering length}},
doi = {10.4171/JST/31},
volume = {2},
year = {2012},
}
@article{2411,
abstract = {The kingdom of fungi provides model organisms for biotechnology, cell biology, genetics, and life sciences in general. Only when their phylogenetic relationships are stably resolved, can individual results from fungal research be integrated into a holistic picture of biology. However, and despite recent progress, many deep relationships within the fungi remain unclear. Here, we present the first phylogenomic study of an entire eukaryotic kingdom that uses a consistency criterion to strengthen phylogenetic conclusions. We reason that branches (splits) recovered with independent data and different tree reconstruction methods are likely to reflect true evolutionary relationships. Two complementary phylogenomic data sets based on 99 fungal genomes and 109 fungal expressed sequence tag (EST) sets analyzed with four different tree reconstruction methods shed light from different angles on the fungal tree of life. Eleven additional data sets address specifically the phylogenetic position of Blastocladiomycota, Ustilaginomycotina, and Dothideomycetes, respectively. The combined evidence from the resulting trees supports the deep-level stability of the fungal groups toward a comprehensive natural system of the fungi. In addition, our analysis reveals methodologically interesting aspects. Enrichment for EST encoded data-a common practice in phylogenomic analyses-introduces a strong bias toward slowly evolving and functionally correlated genes. Consequently, the generalization of phylogenomic data sets as collections of randomly selected genes cannot be taken for granted. A thorough characterization of the data to assess possible influences on the tree reconstruction should therefore become a standard in phylogenomic analyses.},
author = {Ebersberger, Ingo and De Matos Simoes, Ricardo and Kupczok, Anne and Gube, Matthias and Kothe, Erika and Voigt, Kerstin and Von Haeseler, Arndt},
journal = {Molecular Biology and Evolution},
number = {5},
pages = {1319 -- 1334},
publisher = {Oxford University Press},
title = {{A consistent phylogenetic backbone for the fungi}},
doi = {10.1093/molbev/msr285},
volume = {29},
year = {2012},
}
@inproceedings{2715,
abstract = {We consider Markov decision processes (MDPs) with specifications given as Büchi (liveness) objectives. We consider the problem of computing the set of almost-sure winning vertices from where the objective can be ensured with probability 1. We study for the first time the average case complexity of the classical algorithm for computing the set of almost-sure winning vertices for MDPs with Büchi objectives. Our contributions are as follows: First, we show that for MDPs with constant out-degree the expected number of iterations is at most logarithmic and the average case running time is linear (as compared to the worst case linear number of iterations and quadratic time complexity). Second, for the average case analysis over all MDPs we show that the expected number of iterations is constant and the average case running time is linear (again as compared to the worst case linear number of iterations and quadratic time complexity). Finally we also show that given that all MDPs are equally likely, the probability that the classical algorithm requires more than constant number of iterations is exponentially small.},
author = {Chatterjee, Krishnendu and Joglekar, Manas and Shah, Nisarg},
location = {Hyderabad, India},
pages = {461 -- 473},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Average case analysis of the classical algorithm for Markov decision processes with Büchi objectives}},
doi = {10.4230/LIPIcs.FSTTCS.2012.461},
volume = {18},
year = {2012},
}
@article{9451,
abstract = {The Arabidopsis thaliana central cell, the companion cell of the egg, undergoes DNA demethylation before fertilization, but the targeting preferences, mechanism, and biological significance of this process remain unclear. Here, we show that active DNA demethylation mediated by the DEMETER DNA glycosylase accounts for all of the demethylation in the central cell and preferentially targets small, AT-rich, and nucleosome-depleted euchromatic transposable elements. The vegetative cell, the companion cell of sperm, also undergoes DEMETER-dependent demethylation of similar sequences, and lack of DEMETER in vegetative cells causes reduced small RNA–directed DNA methylation of transposons in sperm. Our results demonstrate that demethylation in companion cells reinforces transposon methylation in plant gametes and likely contributes to stable silencing of transposable elements across generations.},
author = {Ibarra, Christian A. and Feng, Xiaoqi and Schoft, Vera K. and Hsieh, Tzung-Fu and Uzawa, Rie and Rodrigues, Jessica A. and Zemach, Assaf and Chumak, Nina and Machlicova, Adriana and Nishimura, Toshiro and Rojas, Denisse and Fischer, Robert L. and Tamaru, Hisashi and Zilberman, Daniel},
issn = {1095-9203},
journal = {Science},
number = {6100},
pages = {1360--1364},
publisher = {American Association for the Advancement of Science},
title = {{Active DNA demethylation in plant companion cells reinforces transposon methylation in gametes}},
doi = {10.1126/science.1224839},
volume = {337},
year = {2012},
}
@article{9497,
abstract = {The regulation of eukaryotic chromatin relies on interactions between many epigenetic factors, including histone modifications, DNA methylation, and the incorporation of histone variants. H2A.Z, one of the most conserved but enigmatic histone variants that is enriched at the transcriptional start sites of genes, has been implicated in a variety of chromosomal processes. Recently, we reported a genome-wide anticorrelation between H2A.Z and DNA methylation, an epigenetic hallmark of heterochromatin that has also been found in the bodies of active genes in plants and animals. Here, we investigate the basis of this anticorrelation using a novel h2a.z loss-of-function line in Arabidopsis thaliana. Through genome-wide bisulfite sequencing, we demonstrate that loss of H2A.Z in Arabidopsis has only a minor effect on the level or profile of DNA methylation in genes, and we propose that the global anticorrelation between DNA methylation and H2A.Z is primarily caused by the exclusion of H2A.Z from methylated DNA. RNA sequencing and genomic mapping of H2A.Z show that H2A.Z enrichment across gene bodies, rather than at the TSS, is correlated with lower transcription levels and higher measures of gene responsiveness. Loss of H2A.Z causes misregulation of many genes that are disproportionately associated with response to environmental and developmental stimuli. We propose that H2A.Z deposition in gene bodies promotes variability in levels and patterns of gene expression, and that a major function of genic DNA methylation is to exclude H2A.Z from constitutively expressed genes.},
author = {Coleman-Derr, Devin and Zilberman, Daniel},
issn = {1553-7404},
journal = {PLoS Genetics},
number = {10},
publisher = {Public Library of Science},
title = {{Deposition of histone variant H2A.Z within gene bodies regulates responsive genes}},
doi = {10.1371/journal.pgen.1002988},
volume = {8},
year = {2012},
}
@article{9499,
abstract = {EMBRYONIC FLOWER1 (EMF1) is a plant-specific gene crucial to Arabidopsis vegetative development. Loss of function mutants in the EMF1 gene mimic the phenotype caused by mutations in Polycomb Group protein (PcG) genes, which encode epigenetic repressors that regulate many aspects of eukaryotic development. In Arabidopsis, Polycomb Repressor Complex 2 (PRC2), made of PcG proteins, catalyzes trimethylation of lysine 27 on histone H3 (H3K27me3) and PRC1-like proteins catalyze H2AK119 ubiquitination. Despite functional similarity to PcG proteins, EMF1 lacks sequence homology with known PcG proteins; thus, its role in the PcG mechanism is unclear. To study the EMF1 functions and its mechanism of action, we performed genome-wide mapping of EMF1 binding and H3K27me3 modification sites in Arabidopsis seedlings. The EMF1 binding pattern is similar to that of H3K27me3 modification on the chromosomal and genic level. ChIPOTLe peak finding and clustering analyses both show that the highly trimethylated genes also have high enrichment levels of EMF1 binding, termed EMF1_K27 genes. EMF1 interacts with regulatory genes, which are silenced to allow vegetative growth, and with genes specifying cell fates during growth and differentiation. H3K27me3 marks not only these genes but also some genes that are involved in endosperm development and maternal effects. Transcriptome analysis, coupled with the H3K27me3 pattern, of EMF1_K27 genes in emf1 and PRC2 mutants showed that EMF1 represses gene activities via diverse mechanisms and plays a novel role in the PcG mechanism.},
author = {Kim, Sang Yeol and Lee, Jungeun and Eshed-Williams, Leor and Zilberman, Daniel and Sung, Z. Renee},
issn = {1553-7404},
journal = {PLoS Genetics},
number = {3},
publisher = {Public Library of Science},
title = {{EMF1 and PRC2 cooperate to repress key regulators of Arabidopsis development}},
doi = {10.1371/journal.pgen.1002512},
volume = {8},
year = {2012},
}
@article{9528,
abstract = {Accumulating evidence points toward diverse functions for plant chromatin. Remarkable progress has been made over the last few years in elucidating the mechanisms for a number of these functions. Activity of the histone demethylase IBM1 accurately targets DNA methylation to silent repeats and transposable elements, not to genes. A genetic screen uncovered the surprising role of H2A.Z-containing nucleosomes in sensing precise differences in ambient temperature and consequent gene regulation. Precise maintenance of chromosome number is assured by a histone modification that suppresses inappropriate DNA replication and by centromeric histone H3 regulation of chromosome segregation. Histones and noncoding RNAs regulate FLOWERING LOCUS C, the expression of which quantitatively measures the duration of cold exposure, functioning as memory of winter. These findings are a testament to the power of using plants to research chromatin organization, and demonstrate examples of how chromatin functions to achieve biological accuracy, precision, and memory.},
author = {Huff, Jason T. and Zilberman, Daniel},
issn = {0959-437X},
journal = {Current Opinion in Genetics and Development},
number = {2},
pages = {132--138},
publisher = {Elsevier},
title = {{Regulation of biological accuracy, precision, and memory by plant chromatin organization}},
doi = {10.1016/j.gde.2012.01.007},
volume = {22},
year = {2012},
}
@article{9535,
abstract = {The most well-studied function of DNA methylation in eukaryotic cells is the transcriptional silencing of genes and transposons. More recent results showed that many eukaryotes methylate the bodies of genes as well and that this methylation correlates with transcriptional activity rather than repression. The purpose of gene body methylation remains mysterious, but is potentially related to the histone variant H2A.Z. Studies in plants and animals have shown that the genome-wide distributions of H2A.Z and DNA methylation are strikingly anticorrelated. Furthermore, we and other investigators have shown that this relationship is likely to be the result of an ancient but unknown mechanism by which DNA methylation prevents the incorporation of H2A.Z. Recently, we discovered strong correlations between the presence of H2A.Z within gene bodies, the degree to which a gene's expression varies across tissue types or environmental conditions, and transcriptional misregulation in an h2a.z mutant. We propose that one basal function of gene body methylation is the establishment of constitutive expression patterns within housekeeping genes by excluding H2A.Z from their bodies.},
author = {Coleman-Derr, D. and Zilberman, Daniel},
issn = {1943-4456},
journal = {Cold Spring Harbor Symposia on Quantitative Biology},
pages = {147--154},
publisher = {Cold Spring Harbor Laboratory Press},
title = {{DNA methylation, H2A.Z, and the regulation of constitutive expression}},
doi = {10.1101/sqb.2012.77.014944},
volume = {77},
year = {2012},
}
@misc{9755,
abstract = {Due to the omnipresent risk of epidemics, insect societies have evolved sophisticated disease defences at the individual and colony level. An intriguing yet little understood phenomenon is that social contact to pathogen-exposed individuals reduces susceptibility of previously naive nestmates to this pathogen. We tested whether such social immunisation in Lasius ants against the entomopathogenic fungus Metarhizium anisopliae is based on active upregulation of the immune system of nestmates following contact to an infectious individual or passive protection via transfer of immune effectors among group members—that is, active versus passive immunisation. We found no evidence for involvement of passive immunisation via transfer of antimicrobials among colony members. Instead, intensive allogrooming behaviour between naive and pathogen-exposed ants before fungal conidia firmly attached to their cuticle suggested passage of the pathogen from the exposed individuals to their nestmates. By tracing fluorescence-labelled conidia we indeed detected frequent pathogen transfer to the nestmates, where they caused low-level infections as revealed by growth of small numbers of fungal colony forming units from their dissected body content. These infections rarely led to death, but instead promoted an enhanced ability to inhibit fungal growth and an active upregulation of immune genes involved in antifungal defences (defensin and prophenoloxidase, PPO). Contrarily, there was no upregulation of the gene cathepsin L, which is associated with antibacterial and antiviral defences, and we found no increased antibacterial activity of nestmates of fungus-exposed ants. This indicates that social immunisation after fungal exposure is specific, similar to recent findings for individual-level immune priming in invertebrates. Epidemiological modeling further suggests that active social immunisation is adaptive, as it leads to faster elimination of the disease and lower death rates than passive immunisation. Interestingly, humans have also utilised the protective effect of low-level infections to fight smallpox by intentional transfer of low pathogen doses (“variolation” or “inoculation”).},
author = {Konrad, Matthias and Vyleta, Meghan and Theis, Fabian and Stock, Miriam and Klatt, Martina and Drescher, Verena and Marr, Carsten and Ugelvig, Line V and Cremer, Sylvia},
publisher = {Dryad},
title = {{Data from: Social transfer of pathogenic fungus promotes active immunisation in ant colonies}},
doi = {10.5061/dryad.sv37s},
year = {2012},
}
@misc{9757,
abstract = {To fight infectious diseases, host immune defences are employed at multiple levels. Sanitary behaviour, such as pathogen avoidance and removal, acts as a first line of defence to prevent infection [1] before activation of the physiological immune system. Insect societies have evolved a wide range of collective hygiene measures and intensive health care towards pathogen-exposed group members [2]. One of the most common behaviours is allogrooming, in which nestmates remove infectious particles from the body surfaces of exposed individuals [3]. Here we show that, in invasive garden ants, grooming of fungus-exposed brood is effective beyond the sheer mechanical removal of fungal conidiospores as it also includes chemical disinfection through the application of poison produced by the ants themselves. Formic acid is the main active component of the poison. It inhibits fungal growth of conidiospores remaining on the brood surface after grooming and also those collected in the mouth of the grooming ant. This dual function is achieved by uptake of the poison droplet into the mouth through acidopore self-grooming and subsequent application onto the infectious brood via brood grooming. This extraordinary behaviour extends current understanding of grooming and the establishment of social immunity in insect societies.},
author = {Tragust, Simon and Mitteregger, Barbara and Barone, Vanessa and Konrad, Matthias and Ugelvig, Line V and Cremer, Sylvia},
publisher = {Dryad},
title = {{Data from: Ants disinfect fungus-exposed brood by oral uptake and spread of their poison}},
doi = {10.5061/dryad.61649},
year = {2012},
}
@misc{9758,
abstract = {We propose a two-step procedure for estimating multiple migration rates in an approximate Bayesian computation (ABC) framework, accounting for global nuisance parameters. The approach is not limited to migration, but generally of interest for inference problems with multiple parameters and a modular structure (e.g. independent sets of demes or loci). We condition on a known, but complex demographic model of a spatially subdivided population, motivated by the reintroduction of Alpine ibex (Capra ibex) into Switzerland. In the first step, the global parameters ancestral mutation rate and male mating skew have been estimated for the whole population in Aeschbacher et al. (Genetics 2012; 192: 1027). In the second step, we estimate in this study the migration rates independently for clusters of demes putatively connected by migration. For large clusters (many migration rates), ABC faces the problem of too many summary statistics. We therefore assess by simulation if estimation per pair of demes is a valid alternative. We find that the trade-off between reduced dimensionality for the pairwise estimation on the one hand and lower accuracy due to the assumption of pairwise independence on the other depends on the number of migration rates to be inferred: the accuracy of the pairwise approach increases with the number of parameters, relative to the joint estimation approach. To distinguish between low and zero migration, we perform ABC-type model comparison between a model with migration and one without. Applying the approach to microsatellite data from Alpine ibex, we find no evidence for substantial gene flow via migration, except for one pair of demes in one direction.},
author = {Aeschbacher, Simon and Futschik, Andreas and Beaumont, Mark},
publisher = {Dryad},
title = {{Data from: Approximate Bayesian computation for modular inference problems with many parameters: the example of migration rates}},
doi = {10.5061/dryad.274b1},
year = {2012},
}
@article{3836,
abstract = {Hierarchical Timing Language (HTL) is a coordination language for distributed, hard real-time applications. HTL is a hierarchical extension of Giotto and, like its predecessor, based on the logical execution time (LET) paradigm of real-time programming. Giotto is compiled into code for a virtual machine, called the EmbeddedMachine (or E machine). If HTL is targeted to the E machine, then the hierarchicalprogram structure needs to be flattened; the flattening makes separatecompilation difficult, and may result in E machinecode of exponential size. In this paper, we propose a generalization of the E machine, which supports a hierarchicalprogram structure at runtime through real-time trigger mechanisms that are arranged in a tree. We present the generalized E machine, and a modular compiler for HTL that generates code of linear size. The compiler may generate code for any part of a given HTL program separately in any order.},
author = {Ghosal, Arkadeb and Iercan, Daniel and Kirsch, Christoph and Henzinger, Thomas A and Sangiovanni Vincentelli, Alberto},
journal = {Science of Computer Programming},
number = {2},
pages = {96 -- 112},
publisher = {Elsevier},
title = {{Separate compilation of hierarchical real-time programs into linear-bounded embedded machine code}},
doi = {10.1016/j.scico.2010.06.004},
volume = {77},
year = {2012},
}
@article{3846,
abstract = {We summarize classical and recent results about two-player games played on graphs with ω-regular objectives. These games have applications in the verification and synthesis of reactive systems. Important distinctions are whether a graph game is turn-based or concurrent; deterministic or stochastic; zero-sum or not. We cluster known results and open problems according to these classifications.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A},
journal = {Journal of Computer and System Sciences},
number = {2},
pages = {394 -- 413},
publisher = {Elsevier},
title = {{A survey of stochastic ω regular games}},
doi = {10.1016/j.jcss.2011.05.002},
volume = {78},
year = {2012},
}
@article{3118,
abstract = {We present a method for recovering a temporally coherent, deforming triangle mesh with arbitrarily changing topology from an incoherent sequence of static closed surfaces. We solve this problem using the surface geometry alone, without any prior information like surface templates or velocity fields. Our system combines a proven strategy for triangle mesh improvement, a robust multi-resolution non-rigid registration routine, and a reliable technique for changing surface mesh topology. We also introduce a novel topological constraint enforcement algorithm to ensure that the output and input always have similar topology. We apply our technique to a series of diverse input data from video reconstructions, physics simulations, and artistic morphs. The structured output of our algorithm allows us to efficiently track information like colors and displacement maps, recover velocity information, and solve PDEs on the mesh as a post process.},
author = {Bojsen-Hansen, Morten and Li, Hao and Wojtan, Christopher J},
journal = {ACM Transactions on Graphics},
number = {4},
publisher = {ACM},
title = {{Tracking surfaces with evolving topology}},
doi = {10.1145/2185520.2185549},
volume = {31},
year = {2012},
}
@article{3248,
abstract = {We describe RTblob, a high speed vision system that detects objects in cluttered scenes based on their color and shape at a speed of over 800 frames/s. Because the system is available as open-source software and relies only on off-the-shelf PC hardware components, it can provide the basis for multiple application scenarios. As an illustrative example, we show how RTblob can be used in a robotic table tennis scenario to estimate ball trajectories through 3D space simultaneously from four cameras images at a speed of 200 Hz.},
author = {Lampert, Christoph and Peters, Jan},
issn = {1861-8219},
journal = {Journal of Real-Time Image Processing},
number = {1},
pages = {31 -- 41},
publisher = {Springer},
title = {{Real-time detection of colored objects in multiple camera streams with off-the-shelf hardware components}},
doi = {10.1007/s11554-010-0168-3},
volume = {7},
year = {2012},
}
@inproceedings{3134,
abstract = {It has been an open question whether the sum of finitely many isotropic Gaussian kernels in n ≥ 2 dimensions can have more modes than kernels, until in 2003 Carreira-Perpiñán and Williams exhibited n +1 isotropic Gaussian kernels in ℝ n with n + 2 modes. We give a detailed analysis of this example, showing that it has exponentially many critical points and that the resilience of the extra mode grows like √n. In addition, we exhibit finite configurations of isotropic Gaussian kernels with superlinearly many modes. },
author = {Edelsbrunner, Herbert and Fasy, Brittany and Rote, Günter},
booktitle = {Proceedings of the twenty-eighth annual symposium on Computational geometry },
location = {Chapel Hill, NC, USA},
pages = {91 -- 100},
publisher = {ACM},
title = {{Add isotropic Gaussian kernels at own risk: More and more resilient modes in higher dimensions}},
doi = {10.1145/2261250.2261265},
year = {2012},
}
@inproceedings{10906,
abstract = {HSF(C) is a tool that automates verification of safety and liveness properties for C programs. This paper describes the verification approach taken by HSF(C) and provides instructions on how to install and use the tool.},
author = {Grebenshchikov, Sergey and Gupta, Ashutosh and Lopes, Nuno P. and Popeea, Corneliu and Rybalchenko, Andrey},
booktitle = {Tools and Algorithms for the Construction and Analysis of Systems},
editor = {Flanagan, Cormac and König, Barbara},
isbn = {9783642287558},
issn = {0302-9743},
location = {Tallinn, Estonia},
pages = {549--551},
publisher = {Springer},
title = {{HSF(C): A software verifier based on Horn clauses}},
doi = {10.1007/978-3-642-28756-5_46},
volume = {7214},
year = {2012},
}
@inproceedings{10904,
abstract = {Multi-dimensional mean-payoff and energy games provide the mathematical foundation for the quantitative study of reactive systems, and play a central role in the emerging quantitative theory of verification and synthesis. In this work, we study the strategy synthesis problem for games with such multi-dimensional objectives along with a parity condition, a canonical way to express ω-regular conditions. While in general, the winning strategies in such games may require infinite memory, for synthesis the most relevant problem is the construction of a finite-memory winning strategy (if one exists). Our main contributions are as follows. First, we show a tight exponential bound (matching upper and lower bounds) on the memory required for finite-memory winning strategies in both multi-dimensional mean-payoff and energy games along with parity objectives. This significantly improves the triple exponential upper bound for multi energy games (without parity) that could be derived from results in literature for games on VASS (vector addition systems with states). Second, we present an optimal symbolic and incremental algorithm to compute a finite-memory winning strategy (if one exists) in such games. Finally, we give a complete characterization of when finite memory of strategies can be traded off for randomness. In particular, we show that for one-dimension mean-payoff parity games, randomized memoryless strategies are as powerful as their pure finite-memory counterparts.},
author = {Chatterjee, Krishnendu and Randour, Mickael and Raskin, Jean-François},
booktitle = {CONCUR 2012 - Concurrency Theory},
editor = {Koutny, Maciej and Ulidowski, Irek},
isbn = {9783642329395},
issn = {0302-9743},
location = {Newcastle upon Tyne, United Kingdom},
pages = {115--131},
publisher = {Springer},
title = {{Strategy synthesis for multi-dimensional quantitative objectives}},
doi = {10.1007/978-3-642-32940-1_10},
volume = {7454},
year = {2012},
}
@inproceedings{10903,
abstract = {We propose a logic-based framework for automated reasoning about sequential programs manipulating singly-linked lists and arrays with unbounded data. We introduce the logic SLAD, which allows combining shape constraints, written in a fragment of Separation Logic, with data and size constraints. We address the problem of checking the entailment between SLAD formulas, which is crucial in performing pre-post condition reasoning. Although this problem is undecidable in general for SLAD, we propose a sound and powerful procedure that is able to solve this problem for a large class of formulas, beyond the capabilities of existing techniques and tools. We prove that this procedure is complete, i.e., it is actually a decision procedure for this problem, for an important fragment of SLAD including known decidable logics. We implemented this procedure and shown its preciseness and its efficiency on a significant benchmark of formulas.},
author = {Bouajjani, Ahmed and Dragoi, Cezara and Enea, Constantin and Sighireanu, Mihaela},
booktitle = {Automated Technology for Verification and Analysis},
isbn = {9783642333859},
issn = {0302-9743},
location = {Thiruvananthapuram, India},
pages = {167--182},
publisher = {Springer},
title = {{Accurate invariant checking for programs manipulating lists and arrays with infinite data}},
doi = {10.1007/978-3-642-33386-6_14},
volume = {7561},
year = {2012},
}
@inbook{10896,
abstract = {Under physiological conditions the brain, via the purine salvage pathway, reuses the preformed purine bases hypoxanthine, derived from ATP degradation, and adenine (Ade), derived from polyamine synthesis, to restore its ATP pool. However, the massive degradation of ATP during ischemia, although providing valuable neuroprotective adenosine, results in the accumulation and loss of diffusible purine metabolites and thereby leads to a protracted reduction in the post-ischemic ATP pool size. In vivo, this may both limit the ability to deploy ATP-dependent reparative mechanisms and reduce the subsequent availability of adenosine, whilst in brain slices results in tissue with substantially lower levels of ATP than in vivo. In the present review, we describe the mechanisms by which brain tissue replenishes its ATP, how this can be improved with the clinically tolerated chemicals D-ribose and adenine, and the functional, and potential therapeutic, implications of doing so.},
author = {zur Nedden, Stephanie and Doney, Alexander S. and Frenguelli, Bruno G.},
booktitle = {Adenosine},
editor = {Masino, Susan and Boison, Detlev},
isbn = {9781461439028},
pages = {109--129},
publisher = {Springer},
title = {{The double-edged sword: Gaining Adenosine at the expense of ATP. How to balance the books}},
doi = {10.1007/978-1-4614-3903-5_6},
year = {2012},
}
@inproceedings{10905,
abstract = {Energy games belong to a class of turn-based two-player infinite-duration games played on a weighted directed graph. It is one of the rare and intriguing combinatorial problems that lie in NP ∩ co−NP, but are not known to be in P. While the existence of polynomial-time algorithms has been a major open problem for decades, there is no algorithm that solves any non-trivial subclass in polynomial time.
In this paper, we give several results based on the weight structures of the graph. First, we identify a notion of penalty and present a polynomial-time algorithm when the penalty is large. Our algorithm is the first polynomial-time algorithm on a large class of weighted graphs. It includes several counter examples that show that many previous algorithms, such as value iteration and random facet algorithms, require at least sub-exponential time. Our main technique is developing the first non-trivial approximation algorithm and showing how to convert it to an exact algorithm. Moreover, we show that in a practical case in verification where weights are clustered around a constant number of values, the energy game problem can be solved in polynomial time. We also show that the problem is still as hard as in general when the clique-width is bounded or the graph is strongly ergodic, suggesting that restricting graph structures need not help.},
author = {Chatterjee, Krishnendu and Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon},
booktitle = {Algorithms – ESA 2012},
editor = {Epstein, Leah and Ferragina, Paolo},
isbn = {9783642330896},
issn = {0302-9743},
location = {Ljubljana, Slovenia},
pages = {301--312},
publisher = {Springer},
title = {{Polynomial-time algorithms for energy games with special weight structures}},
doi = {10.1007/978-3-642-33090-2_27},
volume = {7501},
year = {2012},
}
@inproceedings{3165,
abstract = {Computing the winning set for Büchi objectives in alternating games on graphs is a central problem in computer aided verification with a large number of applications. The long standing best known upper bound for solving the problem is Õ(n·m), where n is the number of vertices and m is the number of edges in the graph. We are the first to break the Õ(n·m) boundary by presenting a new technique that reduces the running time to O(n 2). This bound also leads to O(n 2) time algorithms for computing the set of almost-sure winning vertices for Büchi objectives (1) in alternating games with probabilistic transitions (improving an earlier bound of Õ(n·m)), (2) in concurrent graph games with constant actions (improving an earlier bound of O(n 3)), and (3) in Markov decision processes (improving for m > n 4/3 an earlier bound of O(min(m 1.5, m·n 2/3)). We also show that the same technique can be used to compute the maximal end-component decomposition of a graph in time O(n 2), which is an improvement over earlier bounds for m > n 4/3. Finally, we show how to maintain the winning set for Büchi objectives in alternating games under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per operation. This is the first dynamic algorithm for this problem.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
booktitle = {Proceedings of the Annual ACM-SIAM Symposium on Discrete Algorithms},
location = {Kyoto, Japan},
pages = {1386 -- 1399},
publisher = {SIAM},
title = {{An O(n2) time algorithm for alternating Büchi games}},
doi = {10.1137/1.9781611973099.109},
year = {2012},
}
@inproceedings{3163,
abstract = {We study multi-label prediction for structured output sets, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multilabel classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label set, which is infeasible in case of structured outputs. Relying on techniques originally designed for single-label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
author = {Lampert, Christoph},
location = {Granada, Spain},
publisher = {Neural Information Processing Systems},
title = {{Maximum margin multi-label structured prediction}},
year = {2011},
}
@inproceedings{3264,
abstract = {Verification of programs with procedures, multi-threaded programs, and higher-order functional programs can be effectively au- tomated using abstraction and refinement schemes that rely on spurious counterexamples for abstraction discovery. The analysis of counterexam- ples can be automated by a series of interpolation queries, or, alterna- tively, as a constraint solving query expressed by a set of recursion free Horn clauses. (A set of interpolation queries can be formulated as a single constraint over Horn clauses with linear dependency structure between the unknown relations.) In this paper we present an algorithm for solving recursion free Horn clauses over a combined theory of linear real/rational arithmetic and uninterpreted functions. Our algorithm performs resolu- tion to deal with the clausal structure and relies on partial solutions to deal with (non-local) instances of functionality axioms.},
author = {Gupta, Ashutosh and Popeea, Corneliu and Rybalchenko, Andrey},
editor = {Yang, Hongseok},
location = {Kenting, Taiwan},
pages = {188 -- 203},
publisher = {Springer},
title = {{Solving recursion-free Horn clauses over LI+UIF}},
doi = {10.1007/978-3-642-25318-8_16},
volume = {7078},
year = {2011},
}
@inproceedings{3266,
abstract = {We present a joint image segmentation and labeling model (JSL) which, given a bag of figure-ground segment hypotheses extracted at multiple image locations and scales, constructs a joint probability distribution over both the compatible image interpretations (tilings or image segmentations) composed from those segments, and over their labeling into categories. The process of drawing samples from the joint distribution can be interpreted as first sampling tilings, modeled as maximal cliques, from a graph connecting spatially non-overlapping segments in the bag [1], followed by sampling labels for those segments, conditioned on the choice of a particular tiling. We learn the segmentation and labeling parameters jointly, based on Maximum Likelihood with a novel Incremental Saddle Point estimation procedure. The partition function over tilings and labelings is increasingly more accurately approximated by including incorrect configurations that a not-yet-competent model rates probable during learning. We show that the proposed methodologymatches the current state of the art in the Stanford dataset [2], as well as in VOC2010, where 41.7% accuracy on the test set is achieved.},
author = {Ion, Adrian and Carreira, Joao and Sminchisescu, Cristian},
booktitle = {NIPS Proceedings},
location = {Granada, Spain},
pages = {1827 -- 1835},
publisher = {Neural Information Processing Systems Foundation},
title = {{Probabilistic joint image segmentation and labeling}},
volume = {24},
year = {2011},
}
@article{3269,
abstract = {The unintentional scattering of light between neighboring surfaces in complex projection environments increases the brightness and decreases the contrast, disrupting the appearance of the desired imagery. To achieve satisfactory projection results, the inverse problem of global illumination must be solved to cancel this secondary scattering. In this paper, we propose a global illumination cancellation method that minimizes the perceptual difference between the desired imagery and the actual total illumination in the resulting physical environment. Using Gauss-Newton and active set methods, we design a fast solver for the bound constrained nonlinear least squares problem raised by the perceptual error metrics. Our solver is further accelerated with a CUDA implementation and multi-resolution method to achieve 1–2 fps for problems with approximately 3000 variables. We demonstrate the global illumination cancellation algorithm with our multi-projector system. Results show that our method preserves the color fidelity of the desired imagery significantly better than previous methods.},
author = {Sheng, Yu and Cutler, Barbara and Chen, Chao and Nasman, Joshua},
journal = {Computer Graphics Forum},
number = {4},
pages = {1261 -- 1268},
publisher = {Wiley-Blackwell},
title = {{Perceptual global illumination cancellation in complex projection environments}},
doi = {10.1111/j.1467-8659.2011.01985.x},
volume = {30},
year = {2011},
}
@inproceedings{3270,
abstract = {The persistence diagram of a filtered simplicial com- plex is usually computed by reducing the boundary matrix of the complex. We introduce a simple op- timization technique: by processing the simplices of the complex in decreasing dimension, we can “kill” columns (i.e., set them to zero) without reducing them. This technique completely avoids reduction on roughly half of the columns. We demonstrate that this idea significantly improves the running time of the reduction algorithm in practice. We also give an output-sensitive complexity analysis for the new al- gorithm which yields to sub-cubic asymptotic bounds under certain assumptions.},
author = {Chen, Chao and Kerber, Michael},
location = {Morschach, Switzerland},
pages = {197 -- 200},
publisher = {TU Dortmund},
title = {{Persistent homology computation with a twist}},
year = {2011},
}
@inbook{3271,
abstract = {In this paper we present an efficient framework for computation of persis- tent homology of cubical data in arbitrary dimensions. An existing algorithm using simplicial complexes is adapted to the setting of cubical complexes. The proposed approach enables efficient application of persistent homology in domains where the data is naturally given in a cubical form. By avoiding triangulation of the data, we significantly reduce the size of the complex. We also present a data-structure de- signed to compactly store and quickly manipulate cubical complexes. By means of numerical experiments, we show high speed and memory efficiency of our ap- proach. We compare our framework to other available implementations, showing its superiority. Finally, we report performance on selected 3D and 4D data-sets.},
author = {Wagner, Hubert and Chen, Chao and Vuçini, Erald},
booktitle = {Topological Methods in Data Analysis and Visualization II},
editor = {Peikert, Ronald and Hauser, Helwig and Carr, Hamish and Fuchs, Raphael},
pages = {91 -- 106},
publisher = {Springer},
title = {{Efficient computation of persistent homology for cubical data}},
doi = {10.1007/978-3-642-23175-9_7},
year = {2011},
}
@phdthesis{3273,
author = {Maître, Jean-Léon},
publisher = {IST Austria},
title = {{Mechanics of adhesion and de‐adhesion in zebrafish germ layer progenitors}},
year = {2011},
}
@phdthesis{3275,
abstract = {Chemokines organize immune cell trafficking by inducing either directed (tactic) or random (kinetic) migration and by activating integrins in order to support surface adhesion (haptic). Beyond that the same chemokines can establish clearly defined functional areas in secondary lymphoid organs. Until now it is unclear how chemokines can fulfill such diverse functions. One decisive prerequisite to explain these capacities is to know how chemokines are presented in tissue. In theory chemokines could occur either soluble or immobilized, and could be distributed either homogenously or as a concentration gradient. To dissect if and how the presenting mode of chemokines influences immune cells, I tested the response of dendritic cells (DCs) to differentially displayed chemokines. DCs are antigen presenting cells that reside in the periphery and migrate into draining lymph nodes (LNs) once exposed to inflammatory stimuli to activate naïve T cells. DCs are guided to and within the LN by the chemokine receptor CCR7, which has two ligands, the chemokines CCL19 and CCL21. Both CCR7 ligands are expressed by fibroblastic reticular cells in the LN, but differ in their ability to bind to heparan sulfate residues. CCL21 has a highly charged C-terminal extension, which mediates binding to anionic surfaces, whereas CCL19 is lacking such residues and likely distributes as a soluble molecule. This study shows that surface-bound CCL21 causes random, haptokinetic DC motility, which is confined to the chemokine coated area by insideout activation of β2 integrins that mediate cell binding to the surface. CCL19 on the other hand forms concentration gradients which trigger directional, chemotactic movement, but no surface adhesion. In addition DCs can actively manipulate this system by recruiting and activating serine proteases on their surfaces, which create - by proteolytically removing the adhesive C-terminus - a solubilized variant of CCL21 that functionally resembles CCL19. By generating a CCL21 concentration gradient DCs establish a positive feedback loop to recruit further DCs from the periphery to the CCL21 coated region. In addition DCs can sense chemotactic gradients as well as immobilized haptokinetic fields at the same time and integrate these signals. The result is chemotactically biased haptokinesis - directional migration confined to a chemokine coated track or area - which could explain the dynamic but spatially tightly controlled swarming leukocyte locomotion patterns that have been observed in lymphatic organs by intravital microscopists. The finding that DCs can approach soluble cues in a non-adhesive manner while they attach to surfaces coated with immobilized cues raises the question how these cells transmit intracellular forces to the environment, especially in the non-adherent migration mode. In order to migrate, cells have to generate and transmit force to the extracellular substrate. Force transmission is the prerequisite to procure an expansion of the leading edge and a forward motion of the whole cell body. In the current conceptions actin polymerization at the leading edge is coupled to extracellular ligands via the integrin family of transmembrane receptors, which allows the transmission of intracellular force. Against the paradigm of force transmission during migration, leukocytes, like DCs, are able to migrate in threedimensional environments without using integrin transmembrane receptors (Lämmermann et al., 2008). This reflects the biological function of leukocytes, as they can invade almost all tissues, whereby their migration has to be independent from the extracellular environment. How the cells can achieve this is unclear. For this study I examined DC migration in a defined threedimensional environment and highlighted actin-dynamics with the probe Lifeact-GFP. The result was that chemotactic DCs can switch between integrin-dependent and integrin- independent locomotion and can thereby adapt to the adhesive properties of their environment. If the cells are able to couple their actin cytoskeleton to the substrate, actin polymerization is entirely converted into protrusion. Without coupling the actin cortex undergoes slippage and retrograde actin flow can be observed. But retrograde actin flow can be completely compensated by higher actin polymerization rate keeping the migration velocity and the shape of the cells unaltered. Mesenchymal cells like fibroblast cannot balance the loss of adhesive interaction, cannot protrude into open space and, therefore, strictly depend on integrinmediated force coupling. This leukocyte specific phenomenon of “adaptive force transmission” endows these cells with the unique ability to transit and invade almost every type of tissue. },
author = {Schumann, Kathrin},
pages = {141},
publisher = {IST Austria},
title = {{The role of chemotactic gradients in dendritic cell migration}},
year = {2011},
}
@article{3287,
abstract = {Diffusing membrane constituents are constantly exposed to a variety of forces that influence their stochastic path. Single molecule experiments allow for resolving trajectories at extremely high spatial and temporal accuracy, thereby offering insights into en route interactions of the tracer. In this review we discuss approaches to derive information about the underlying processes, based on single molecule tracking experiments. In particular, we focus on a new versatile way to analyze single molecule diffusion in the absence of a full analytical treatment. The method is based on comprehensive comparison of an experimental data set against the hypothetical outcome of multiple experiments performed on the computer. Since Monte Carlo simulations can be easily and rapidly performed even on state-of-the-art PCs, our method provides a simple way for testing various - even complicated - diffusion models. We describe the new method in detail, and show the applicability on two specific examples: firstly, kinetic rate constants can be derived for the transient interaction of mobile membrane proteins; secondly, residence time and corral size can be extracted for confined diffusion.},
author = {Ruprecht, Verena and Axmann, Markus and Wieser, Stefan and Schuetz, Gerhard},
journal = {Current Protein & Peptide Science},
number = {8},
pages = {714 -- 724},
publisher = {Bentham Science Publishers},
title = {{What can we learn from single molecule trajectories?}},
doi = {10.2174/138920311798841753},
volume = {12},
year = {2011},
}
@article{3288,
abstract = {The zonula adherens (ZA) of epithelial cells is a site of cell-cell adhesion where cellular forces are exerted and resisted. Increasing evidence indicates that E-cadherin adhesion molecules at the ZA serve to sense force applied on the junctions and coordinate cytoskeletal responses to those forces. Efforts to understand the role that cadherins play in mechanotransduction have been limited by the lack of assays to measure the impact of forces on the ZA. In this study we used 4D imaging of GFP-tagged E-cadherin to analyse the movement of the ZA. Junctions in confluent epithelial monolayers displayed prominent movements oriented orthogonal (perpendicular) to the ZA itself. Two components were identified in these movements: a relatively slow unidirectional (translational) component that could be readily fitted by least-squares regression analysis, upon which were superimposed more rapid oscillatory movements. Myosin IIB was a dominant factor responsible for driving the unilateral translational movements. In contrast, frequency spectrum analysis revealed that depletion of Myosin IIA increased the power of the oscillatory movements. This implies that Myosin IIA may serve to dampen oscillatory movements of the ZA. This extends our recent analysis of Myosin II at the ZA to demonstrate that Myosin IIA and Myosin IIB make distinct contributions to junctional movement at the ZA.},
author = {Smutny, Michael and Wu, Selwin and Gomez, Guillermo and Mangold, Sabine and Yap, Alpha and Hamilton, Nicholas},
journal = {PLoS One},
number = {7},
publisher = {Public Library of Science},
title = {{Multicomponent analysis of junctional movements regulated by Myosin II isoforms at the epithelial zonula adherens}},
doi = {10.1371/journal.pone.0022458},
volume = {6},
year = {2011},
}
@article{3290,
abstract = {Analysis of genomic data requires an efficient way to calculate likelihoods across very large numbers of loci. We describe a general method for finding the distribution of genealogies: we allow migration between demes, splitting of demes [as in the isolation-with-migration (IM) model], and recombination between linked loci. These processes are described by a set of linear recursions for the generating function of branch lengths. Under the infinite-sites model, the probability of any configuration of mutations can be found by differentiating this generating function. Such calculations are feasible for small numbers of sampled genomes: as an example, we show how the generating function can be derived explicitly for three genes under the two-deme IM model. This derivation is done automatically, using Mathematica. Given data from a large number of unlinked and nonrecombining blocks of sequence, these results can be used to find maximum-likelihood estimates of model parameters by tabulating the probabilities of all relevant mutational configurations and then multiplying across loci. The feasibility of the method is demonstrated by applying it to simulated data and to a data set previously analyzed by Wang and Hey (2010) consisting of 26,141 loci sampled from Drosophila simulans and D. melanogaster. Our results suggest that such likelihood calculations are scalable to genomic data as long as the numbers of sampled individuals and mutations per sequence block are small.},
author = {Lohse, Konrad and Harrison, Richard and Barton, Nicholas H},
journal = {Genetics},
number = {3},
pages = {977 -- 987},
publisher = {Genetics Society of America},
title = {{A general method for calculating likelihoods under the coalescent process}},
doi = {10.1534/genetics.111.129569},
volume = {189},
year = {2011},
}
@inproceedings{3297,
abstract = {Animating detailed liquid surfaces has always been a challenge for computer graphics researchers and visual effects artists. Over the past few years, researchers in this field have focused on mesh-based surface tracking to synthesize extremely detailed liquid surfaces as efficiently as possible. This course provides a solid understanding of the steps required to create a fluid simulator with a mesh-based liquid surface.
The course begins with an overview of several existing liquid-surface-tracking techniques and the pros and cons of each method. Then it explains how to embed a triangle mesh into a finite-difference-based fluid simulator and describes several methods for allowing the liquid surface to merge together or break apart. The final section showcases the benefits and further applications of a mesh-based liquid surface, highlighting state-of-the-art methods for tracking colors and textures, maintaining liquid volume, preserving small surface features, and simulating realistic surface-tension waves.},
author = {Wojtan, Christopher J and Müller Fischer, Matthias and Brochu, Tyson},
location = {Vancouver, BC, Canada},
publisher = {ACM},
title = {{Liquid simulation with mesh-based surface tracking}},
doi = {10.1145/2037636.2037644},
year = {2011},
}
@inproceedings{3298,
abstract = {We present a new algorithm for enforcing incompressibility for Smoothed Particle Hydrodynamics (SPH) by preserving uniform density across the domain. We propose a hybrid method that uses a Poisson solve on a coarse grid to enforce a divergence free velocity ﬁeld, followed by a local density correction of the particles. This avoids typical grid artifacts and maintains the Lagrangian nature of SPH by directly transferring pressures onto particles. Our method can be easily integrated with existing SPH techniques such as the incompressible PCISPH method as well as weakly compressible SPH by adding an additional force term. We show that this hybrid method accelerates convergence towards uniform density and permits a signiﬁcantly larger time step compared to earlier approaches while producing similar results. We demonstrate our approach in a variety of scenarios with signiﬁcant pressure gradients such as splashing liquids.},
author = {Raveendran, Karthik and Wojtan, Christopher J and Turk, Greg},
editor = {Spencer, Stephen},
location = {Vancouver, Canada},
pages = {33 -- 42},
publisher = {ACM},
title = {{Hybrid smoothed particle hydrodynamics}},
doi = {10.1145/2019406.2019411},
year = {2011},
}
@inproceedings{3299,
abstract = {We introduce propagation models, a formalism designed to support general and efficient data structures for the transient analysis of biochemical reaction networks. We give two use cases for propagation abstract data types: the uniformization method and numerical integration. We also sketch an implementation of a propagation abstract data type, which uses abstraction to approximate states.},
author = {Henzinger, Thomas A and Mateescu, Maria},
location = {Paris, France},
pages = {1 -- 3},
publisher = {Springer},
title = {{Propagation models for computing biochemical reaction networks}},
doi = {10.1145/2037509.2037510},
year = {2011},
}
@inproceedings{3301,
abstract = {The chemical master equation is a differential equation describing the time evolution of the probability distribution over the possible “states” of a biochemical system. The solution of this equation is of interest within the systems biology field ever since the importance of the molec- ular noise has been acknowledged. Unfortunately, most of the systems do not have analytical solutions, and numerical solutions suffer from the course of dimensionality and therefore need to be approximated. Here, we introduce the concept of tail approximation, which retrieves an approximation of the probabilities in the tail of a distribution from the total probability of the tail and its conditional expectation. This approximation method can then be used to numerically compute the solution of the chemical master equation on a subset of the state space, thus fighting the explosion of the state space, for which this problem is renowned.},
author = {Henzinger, Thomas A and Mateescu, Maria},
publisher = {Tampere International Center for Signal Processing},
title = {{Tail approximation for the chemical master equation}},
year = {2011},
}
@inproceedings{3302,
abstract = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We present a new job execution environment Flextic that exploits scal- able static scheduling techniques to provide the user with a flexible pricing model, such as a tradeoff between dif- ferent degrees of execution speed and execution price, and at the same time, reduce scheduling overhead for the cloud provider. We have evaluated a prototype of Flextic on Amazon EC2 and compared it against Hadoop. For various data parallel jobs from machine learning, im- age processing, and gene sequencing that we considered, Flextic has low scheduling overhead and reduces job du- ration by up to 15% compared to Hadoop, a dynamic cloud scheduler.},
author = {Henzinger, Thomas A and Singh, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
pages = {1 -- 6},
publisher = {USENIX},
title = {{Static scheduling in clouds}},
year = {2011},
}
@misc{3312,
abstract = {We study the 3D reconstruction of plant roots from multiple 2D images. To meet the challenge caused by the delicate nature of thin branches, we make three innovations to cope with the sensitivity to image quality and calibration. First, we model the background as a harmonic function to improve the segmentation of the root in each 2D image. Second, we develop the concept of the regularized visual hull which reduces the effect of jittering and refraction by ensuring consistency with one 2D image. Third, we guarantee connectedness through adjustments to the 3D reconstruction that minimize global error. Our software is part of a biological phenotype/genotype study of agricultural root systems. It has been tested on more than 40 plant roots and results are promising in terms of reconstruction quality and efficiency.},
author = {Zheng, Ying and Gu, Steve and Edelsbrunner, Herbert and Tomasi, Carlo and Benfey, Philip},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Detailed reconstruction of 3D plant root shape}},
doi = {10.1109/ICCV.2011.6126475},
year = {2011},
}
@inproceedings{3313,
abstract = {Interpreting an image as a function on a compact sub- set of the Euclidean plane, we get its scale-space by diffu- sion, spreading the image over the entire plane. This gener- ates a 1-parameter family of functions alternatively defined as convolutions with a progressively wider Gaussian ker- nel. We prove that the corresponding 1-parameter family of persistence diagrams have norms that go rapidly to zero as time goes to infinity. This result rationalizes experimental observations about scale-space. We hope this will lead to targeted improvements of related computer vision methods.},
author = {Chen, Chao and Edelsbrunner, Herbert},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Diffusion runs low on persistence fast}},
doi = {10.1109/ICCV.2011.6126271},
year = {2011},
}
@article{3315,
abstract = {We consider two-player games played in real time on game structures with clocks where the objectives of players are described using parity conditions. The games are concurrent in that at each turn, both players independently propose a time delay and an action, and the action with the shorter delay is chosen. To prevent a player from winning by blocking time, we restrict each player to play strategies that ensure that the player cannot be responsible for causing a zeno run. First, we present an efficient reduction of these games to turn-based (i.e., not concurrent) finite-state (i.e., untimed) parity games. Our reduction improves the best known complexity for solving timed parity games. Moreover, the rich class of algorithms for classical parity games can now be applied to timed parity games. The states of the resulting game are based on clock regions of the original game, and the state space of the finite game is linear in the size of the region graph. Second, we consider two restricted classes of strategies for the player that represents the controller in a real-time synthesis problem, namely, limit-robust and bounded-robust winning strategies. Using a limit-robust winning strategy, the controller cannot choose an exact real-valued time delay but must allow for some nonzero jitter in each of its actions. If there is a given lower bound on the jitter, then the strategy is bounded-robust winning. We show that exact strategies are more powerful than limit-robust strategies, which are more powerful than bounded-robust winning strategies for any bound. For both kinds of robust strategies, we present efficient reductions to standard timed automaton games. These reductions provide algorithms for the synthesis of robust real-time controllers.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Prabhu, Vinayak},
journal = {Logical Methods in Computer Science},
number = {4},
publisher = {International Federation of Computational Logic},
title = {{Timed parity games: Complexity and robustness}},
doi = {10.2168/LMCS-7(4:8)2011},
volume = {7},
year = {2011},
}
@inproceedings{3316,
abstract = {In addition to being correct, a system should be robust, that is, it should behave reasonably even after receiving unexpected inputs. In this paper, we summarize two formal notions of robustness that we have introduced previously for reactive systems. One of the notions is based on assigning costs for failures on a user-provided notion of incorrect transitions in a specification. Here, we define a system to be robust if a finite number of incorrect inputs does not lead to an infinite number of incorrect outputs. We also give a more refined notion of robustness that aims to minimize the ratio of output failures to input failures. The second notion is aimed at liveness. In contrast to the previous notion, it has no concept of recovery from an error. Instead, it compares the ratio of the number of liveness constraints that the system violates to the number of liveness constraints that the environment violates.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Jobstmann, Barbara},
booktitle = {6th IEEE International Symposium on Industrial and Embedded Systems},
location = {Vasteras, Sweden},
pages = {176 -- 185},
publisher = {IEEE},
title = {{Specification-centered robustness}},
doi = {10.1109/SIES.2011.5953660},
year = {2011},
}
@article{3318,
abstract = {Parvalbumin is thought to act in a manner similar to EGTA, but how a slow Ca2+ buffer affects nanodomain-coupling regimes at GABAergic synapses is unclear. Direct measurements of parvalbumin concentration and paired recordings in rodent hippocampus and cerebellum revealed that parvalbumin affects synaptic dynamics only when expressed at high levels. Modeling suggests that, in high concentrations, parvalbumin may exert BAPTA-like effects, modulating nanodomain coupling via competition with local saturation of endogenous fixed buffers.},
author = {Eggermann, Emmanuel and Jonas, Peter M},
journal = {Nature Neuroscience},
pages = {20 -- 22},
publisher = {Nature Publishing Group},
title = {{How the “slow” Ca(2+) buffer parvalbumin affects transmitter release in nanodomain coupling regimes at GABAergic synapses}},
doi = {10.1038/nn.3002},
volume = {15},
year = {2011},
}
@inproceedings{3319,
abstract = {We address the problem of metric learning for multi-view data, namely the construction of embedding projections from data in different representations into a shared feature space, such that the Euclidean distance in this space provides a meaningful within-view as well as between-view similarity. Our motivation stems from the problem of cross-media retrieval tasks, where the availability of a joint Euclidean distance function is a pre-requisite to allow fast, in particular hashing-based, nearest neighbor queries. We formulate an objective function that expresses the intuitive concept that matching samples are mapped closely together in the output space, whereas non-matching samples are pushed apart, no matter in which view they are available. The resulting optimization problem is not convex, but it can be decomposed explicitly into a convex and a concave part, thereby allowing efficient optimization using the convex-concave procedure. Experiments on an image retrieval task show that nearest-neighbor based cross-view retrieval is indeed possible, and the proposed technique improves the retrieval accuracy over baseline techniques.},
author = {Quadrianto, Novi and Lampert, Christoph},
location = {Bellevue, USA},
pages = {425 -- 432},
publisher = {Omnipress},
title = {{Learning multi-view neighborhood preserving projections}},
year = {2011},
}
@article{3320,
abstract = {Powerful statistical models that can be learned efficiently from large amounts of data are currently revolutionizing computer vision. These models possess a rich internal structure reflecting task-specific relations and constraints. This monograph introduces the reader to the most popular classes of structured models in computer vision. Our focus is discrete undirected graphical models which we cover in detail together with a description of algorithms for both probabilistic inference and maximum a posteriori inference. We discuss separately recently successful techniques for prediction in general structured models. In the second part of this monograph we describe methods for parameter learning where we distinguish the classic maximum likelihood based methods from the more recent prediction-based parameter learning methods. We highlight developments to enhance current models and discuss kernelized models and latent variable models. To make the monograph more practical and to provide links to further study we provide examples of successful application of many methods in the computer vision literature.},
author = {Nowozin, Sebastian and Lampert, Christoph},
journal = {Foundations and Trends in Computer Graphics and Vision},
number = {3-4},
pages = {185 -- 365},
publisher = {now},
title = {{Structured learning and prediction in computer vision}},
doi = {10.1561/0600000033},
volume = {6},
year = {2011},
}
@misc{3322,
abstract = {We study multi-label prediction for structured output spaces, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multi-label classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label space, which is infeasible in case of structured outputs. Relying on techniques originally designed for single- label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular a formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
author = {Lampert, Christoph},
booktitle = {NIPS: Neural Information Processing Systems},
publisher = {Neural Information Processing Systems},
title = {{Maximum margin multi label structured prediction}},
year = {2011},
}
@inproceedings{3323,
abstract = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
author = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
location = {Wrocław, Poland},
pages = {476 -- 491},
publisher = {Springer},
title = {{An efficient decision procedure for imperative tree data structures}},
doi = {10.1007/978-3-642-22438-6_36},
volume = {6803},
year = {2011},
}
@inproceedings{3324,
abstract = {Automated termination provers often use the following schema to prove that a program terminates: construct a relational abstraction of the program's transition relation and then show that the relational abstraction is well-founded. The focus of current tools has been on developing sophisticated techniques for constructing the abstractions while relying on known decidable logics (such as linear arithmetic) to express them. We believe we can significantly increase the class of programs that are amenable to automated termination proofs by identifying more expressive decidable logics for reasoning about well-founded relations. We therefore present a new decision procedure for reasoning about multiset orderings, which are among the most powerful orderings used to prove termination. We show that, using our decision procedure, one can automatically prove termination of natural abstractions of programs.},
author = {Piskac, Ruzica and Wies, Thomas},
editor = {Jhala, Ranjit and Schmidt, David},
location = {Texas, USA},
pages = {371 -- 386},
publisher = {Springer},
title = {{Decision procedures for automating termination proofs}},
doi = {10.1007/978-3-642-18275-4_26},
volume = {6538},
year = {2011},
}
@inproceedings{3325,
abstract = {We introduce streaming data string transducers that map input data strings to output data strings in a single left-to-right pass in linear time. Data strings are (unbounded) sequences of data values, tagged with symbols from a finite set, over a potentially infinite data do- main that supports only the operations of equality and ordering. The transducer uses a finite set of states, a finite set of variables ranging over the data domain, and a finite set of variables ranging over data strings. At every step, it can make decisions based on the next in- put symbol, updating its state, remembering the input data value in its data variables, and updating data-string variables by concatenat- ing data-string variables and new symbols formed from data vari- ables, while avoiding duplication. We establish that the problems of checking functional equivalence of two streaming transducers, and of checking whether a streaming transducer satisfies pre/post verification conditions specified by streaming acceptors over in- put/output data-strings, are in PSPACE. We identify a class of imperative and a class of functional pro- grams, manipulating lists of data items, which can be effectively translated to streaming data-string transducers. The imperative pro- grams dynamically modify a singly-linked heap by changing next- pointers of heap-nodes and by adding new nodes. The main re- striction specifies how the next-pointers can be used for traversal. We also identify an expressively equivalent fragment of functional programs that traverse a list using syntactically restricted recursive calls. Our results lead to algorithms for assertion checking and for checking functional equivalence of two programs, written possibly in different programming styles, for commonly used routines such as insert, delete, and reverse.},
author = {Alur, Rajeev and Cerny, Pavol},
location = {Texas, USA},
number = {1},
pages = {599 -- 610},
publisher = {ACM},
title = {{Streaming transducers for algorithmic verification of single pass list processing programs}},
doi = {10.1145/1926385.1926454},
volume = {46},
year = {2011},
}
@inproceedings{3326,
abstract = {Weighted automata map input words to numerical values. Ap- plications of weighted automata include formal verification of quantitative properties, as well as text, speech, and image processing. A weighted au- tomaton is defined with respect to a semiring. For the tropical semiring, the weight of a run is the sum of the weights of the transitions taken along the run, and the value of a word is the minimal weight of an accepting run on it. In the 90’s, Krob studied the decidability of problems on rational series defined with respect to the tropical semiring. Rational series are strongly related to weighted automata, and Krob’s results apply to them. In par- ticular, it follows from Krob’s results that the universality problem (that is, deciding whether the values of all words are below some threshold) is decidable for weighted automata defined with respect to the tropical semir- ing with domain ∪ {∞}, and that the equality problem is undecidable when the domain is ∪ {∞}. In this paper we continue the study of the borders of decidability in weighted automata, describe alternative and direct proofs of the above results, and tighten them further. Unlike the proofs of Krob, which are algebraic in their nature, our proofs stay in the terrain of state machines, and the reduction is from the halting problem of a two-counter machine. This enables us to significantly simplify Krob’s reasoning, make the un- decidability result accessible to the automata-theoretic community, and strengthen it to apply already to a very simple class of automata: all the states are accepting, there are no initial nor final weights, and all the weights on the transitions are from the set {−1, 0, 1}. The fact we work directly with the automata enables us to tighten also the decidability re- sults and to show that the universality problem for weighted automata defined with respect to the tropical semiring with domain ∪ {∞}, and in fact even with domain ≥0 ∪ {∞}, is PSPACE-complete. Our results thus draw a sharper picture about the decidability of decision problems for weighted automata, in both the front of containment vs. universality and the front of the ∪ {∞} vs. the ∪ {∞} domains.},
author = {Almagor, Shaull and Boker, Udi and Kupferman, Orna},
location = {Taipei, Taiwan},
pages = {482 -- 491},
publisher = {Springer},
title = {{What’s decidable about weighted automata }},
doi = {10.1007/978-3-642-24372-1_37},
volume = {6996},
year = {2011},
}
@inproceedings{3328,
abstract = {We report on a generic uni- and bivariate algebraic kernel that is publicly available with CGAL 3.7. It comprises complete, correct, though efficient state-of-the-art implementations on polynomials, roots of polynomial systems, and the support to analyze algebraic curves defined by bivariate polynomials. The kernel design is generic, that is, various number types and substeps can be exchanged. It is accompanied with a ready-to-use interface to enable arrangements induced by algebraic curves, that have already been used as basis for various geometric applications, as arrangements on Dupin cyclides or the triangulation of algebraic surfaces. We present two novel applications: arrangements of rotated algebraic curves and Boolean set operations on polygons bounded by segments of algebraic curves. We also provide experiments showing that our general implementation is competitive and even often clearly outperforms existing implementations that are explicitly tailored for specific types of non-linear curves that are available in CGAL.},
author = {Berberich, Eric and Hemmer, Michael and Kerber, Michael},
location = {Paris, France},
pages = {179 -- 186},
publisher = {ACM},
title = {{A generic algebraic kernel for non linear geometric applications}},
doi = {10.1145/1998196.1998224},
year = {2011},
}
@inproceedings{3329,
abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance µ in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution shape P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(n log n)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. An alternative algorithm, based purely on rational arithmetic, answers the same deconstruction problem, up to an uncertainty parameter, and its running time depends on the parameter δ (in addition to the other input parameters: n, δ and the radius of the disk). If the input shape is found to be approximable, the rational-arithmetic algorithm also computes an approximate solution shape for the problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one. Our study is motivated by applications from two different domains. However, since the offset operation has numerous uses, we anticipate that the reverse question that we study here will be still more broadly applicable. We present results obtained with our implementation of the rational-arithmetic algorithm.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
booktitle = {Proceedings of the twenty-seventh annual symposium on Computational geometry},
location = {Paris, France},
pages = {187 -- 196},
publisher = {ACM},
title = {{Deconstructing approximate offsets}},
doi = {10.1145/1998196.1998225},
year = {2011},
}
@inproceedings{3330,
abstract = {We consider the problem of approximating all real roots of a square-free polynomial f. Given isolating intervals, our algorithm refines each of them to a width at most 2-L, that is, each of the roots is approximated to L bits after the binary point. Our method provides a certified answer for arbitrary real polynomials, only requiring finite approximations of the polynomial coefficient and choosing a suitable working precision adaptively. In this way, we get a correct algorithm that is simple to implement and practically efficient. Our algorithm uses the quadratic interval refinement method; we adapt that method to be able to cope with inaccuracies when evaluating f, without sacrificing its quadratic convergence behavior. We prove a bound on the bit complexity of our algorithm in terms of degree, coefficient size and discriminant. Our bound improves previous work on integer polynomials by a factor of deg f and essentially matches best known theoretical bounds on root approximation which are obtained by very sophisticated algorithms.},
author = {Kerber, Michael and Sagraloff, Michael},
location = {California, USA},
pages = {209 -- 216},
publisher = {Springer},
title = {{Root refinement for real polynomials}},
doi = {10.1145/1993886.1993920},
year = {2011},
}
@article{3332,
abstract = {Given an algebraic hypersurface O in ℝd, how many simplices are necessary for a simplicial complex isotopic to O? We address this problem and the variant where all vertices of the complex must lie on O. We give asymptotically tight worst-case bounds for algebraic plane curves. Our results gradually improve known bounds in higher dimensions; however, the question for tight bounds remains unsolved for d ≥ 3.},
author = {Kerber, Michael and Sagraloff, Michael},
journal = {Graphs and Combinatorics},
number = {3},
pages = {419 -- 430},
publisher = {Springer},
title = {{A note on the complexity of real algebraic hypersurfaces}},
doi = {10.1007/s00373-011-1020-7},
volume = {27},
year = {2011},
}
@article{3334,
author = {Edelsbrunner, Herbert and Pach, János and Ziegler, Günter},
journal = {Discrete & Computational Geometry},
number = {1},
pages = {1 -- 2},
publisher = {Springer},
title = {{Letter from the new editors-in-chief}},
doi = {10.1007/s00454-010-9313-9},
volume = {45},
year = {2011},
}
@inbook{3335,
abstract = {We study the topology of the Megaparsec Cosmic Web in terms of the scale-dependent Betti numbers, which formalize the topological information content of the cosmic mass distribution. While the Betti numbers do not fully quantify topology, they extend the information beyond conventional cosmological studies of topology in terms of genus and Euler characteristic. The richer information content of Betti numbers goes along the availability of fast algorithms to compute them. For continuous density fields, we determine the scale-dependence of Betti numbers by invoking the cosmologically familiar filtration of sublevel or superlevel sets defined by density thresholds. For the discrete galaxy distribution, however, the analysis is based on the alpha shapes of the particles. These simplicial complexes constitute an ordered sequence of nested subsets of the Delaunay tessellation, a filtration defined by the scale parameter, α. As they are homotopy equivalent to the sublevel sets of the distance field, they are an excellent tool for assessing the topological structure of a discrete point distribution. In order to develop an intuitive understanding for the behavior of Betti numbers as a function of α, and their relation to the morphological patterns in the Cosmic Web, we first study them within the context of simple heuristic Voronoi clustering models. These can be tuned to consist of specific morphological elements of the Cosmic Web, i.e. clusters, filaments, or sheets. To elucidate the relative prominence of the various Betti numbers in different stages of morphological evolution, we introduce the concept of alpha tracks. Subsequently, we address the topology of structures emerging in the standard LCDM scenario and in cosmological scenarios with alternative dark energy content. The evolution of the Betti numbers is shown to reflect the hierarchical evolution of the Cosmic Web. We also demonstrate that the scale-dependence of the Betti numbers yields a promising measure of cosmological parameters, with a potential to help in determining the nature of dark energy and to probe primordial non-Gaussianities. We also discuss the expected Betti numbers as a function of the density threshold for superlevel sets of a Gaussian random field. Finally, we introduce the concept of persistent homology. It measures scale levels of the mass distribution and allows us to separate small from large scale features. Within the context of the hierarchical cosmic structure formation, persistence provides a natural formalism for a multiscale topology study of the Cosmic Web.},
author = {Van De Weygaert, Rien and Vegter, Gert and Edelsbrunner, Herbert and Jones, Bernard and Pranav, Pratyush and Park, Changbom and Hellwing, Wojciech and Eldering, Bob and Kruithof, Nico and Bos, Patrick and Hidding, Johan and Feldbrugge, Job and Ten Have, Eline and Van Engelen, Matti and Caroli, Manuel and Teillaud, Monique},
booktitle = {Transactions on Computational Science XIV},
editor = {Gavrilova, Marina and Tan, Kenneth and Mostafavi, Mir},
pages = {60 -- 101},
publisher = {Springer},
title = {{Alpha, Betti and the Megaparsec Universe: On the topology of the Cosmic Web}},
doi = {10.1007/978-3-642-25249-5_3},
volume = {6970},
year = {2011},
}
@inproceedings{3337,
abstract = {Playing table tennis is a difficult task for robots, especially due to their limitations of acceleration. A key bottleneck is the amount of time needed to reach the desired hitting position and velocity of the racket for returning the incoming ball. Here, it often does not suffice to simply extrapolate the ball's trajectory after the opponent returns it but more information is needed. Humans are able to predict the ball's trajectory based on the opponent's moves and, thus, have a considerable advantage. Hence, we propose to incorporate an anticipation system into robot table tennis players, which enables the robot to react earlier while the opponent is performing the striking movement. Based on visual observation of the opponent's racket movement, the robot can predict the aim of the opponent and adjust its movement generation accordingly. The policies for deciding how and when to react are obtained by reinforcement learning. We conduct experiments with an existing robot player to show that the learned reaction policy can significantly improve the performance of the overall system.},
author = {Wang, Zhikun and Lampert, Christoph and Mülling, Katharina and Schölkopf, Bernhard and Peters, Jan},
location = {San Francisco, USA},
pages = {332 -- 337},
publisher = {IEEE},
title = {{Learning anticipation policies for robot table tennis}},
doi = {10.1109/IROS.2011.6094892},
year = {2011},
}
@unpublished{3338,
abstract = {We consider 2-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves inde- pendently and simultaneously; the current state and the two moves determine the successor state. We study concurrent games with ω-regular winning conditions specified as parity objectives. We consider the qualitative analysis problems: the computation of the almost-sure and limit-sure winning set of states, where player 1 can ensure to win with probability 1 and with probability arbitrarily close to 1, respec- tively. In general the almost-sure and limit-sure winning strategies require both infinite-memory as well as infinite-precision (to describe probabilities). We study the bounded-rationality problem for qualitative analysis of concurrent parity games, where the strategy set for player 1 is restricted to bounded-resource strategies. In terms of precision, strategies can be deterministic, uniform, finite-precision or infinite- precision; and in terms of memory, strategies can be memoryless, finite-memory or infinite-memory. We present a precise and complete characterization of the qualitative winning sets for all combinations of classes of strategies. In particular, we show that uniform memoryless strategies are as powerful as finite-precision infinite-memory strategies, and infinite-precision memoryless strategies are as power- ful as infinite-precision finite-memory strategies. We show that the winning sets can be computed in O(n2d+3) time, where n is the size of the game structure and 2d is the number of priorities (or colors), and our algorithms are symbolic. The membership problem of whether a state belongs to a winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms, that are obtained by characterization of the winning sets as μ-calculus formulas, are considerably more involved than those for turn-based games.},
author = {Chatterjee, Krishnendu},
booktitle = {arXiv},
pages = {1 -- 51},
publisher = {ArXiv},
title = {{Bounded rationality in concurrent parity games}},
year = {2011},
}
@unpublished{3339,
abstract = {Turn-based stochastic games and its important subclass Markov decision processes (MDPs) provide models for systems with both probabilistic and nondeterministic behaviors. We consider turn-based stochastic games with two classical quantitative objectives: discounted-sum and long-run average objectives. The game models and the quantitative objectives are widely used in probabilistic verification, planning, optimal inventory control, network protocol and performance analysis. Games and MDPs that model realistic systems often have very large state spaces, and probabilistic abstraction techniques are necessary to handle the state-space explosion. The commonly used full-abstraction techniques do not yield space-savings for systems that have many states with similar value, but does not necessarily have similar transition structure. A semi-abstraction technique, namely Magnifying-lens abstractions (MLA), that clusters states based on value only, disregarding differences in their transition relation was proposed for qualitative objectives (reachability and safety objectives). In this paper we extend the MLA technique to solve stochastic games with discounted-sum and long-run average objectives. We present the MLA technique based abstraction-refinement algorithm for stochastic games and MDPs with discounted-sum objectives. For long-run average objectives, our solution works for all MDPs and a sub-class of stochastic games where every state has the same value. },
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Pritam, Roy},
booktitle = {arXiv},
pages = {17},
publisher = {ArXiv},
title = {{Magnifying lens abstraction for stochastic games with discounted and long-run average objectives}},
year = {2011},
}
@inproceedings{3344,
abstract = {Games played on graphs provide the mathematical framework to analyze several important problems in computer science as well as mathematics, such as the synthesis problem of Church, model checking of open reactive systems and many others. On the basis of mode of interaction of the players these games can be classified as follows: (a) turn-based (players make moves in turns); and (b) concurrent (players make moves simultaneously). On the basis of the information available to the players these games can be classified as follows: (a) perfect-information (players have perfect view of the game); and (b) partial-information (players have partial view of the game). In this talk we will consider all these classes of games with reachability objectives, where the goal of one player is to reach a set of target vertices of the graph, and the goal of the opponent player is to prevent the player from reaching the target. We will survey the results for various classes of games, and the results range from linear time decision algorithms to EXPTIME-complete problems to undecidable problems.},
author = {Chatterjee, Krishnendu},
editor = {Delzanno, Giorgo and Potapov, Igor},
location = {Genoa, Italy},
pages = {1 -- 1},
publisher = {Springer},
title = {{Graph games with reachability objectives}},
doi = {10.1007/978-3-642-24288-5_1},
volume = {6945},
year = {2011},
}
@inproceedings{3345,
abstract = {We consider Markov Decision Processes (MDPs) with mean-payoff parity and energy parity objectives. In system design, the parity objective is used to encode ω-regular specifications, and the mean-payoff and energy objectives can be used to model quantitative resource constraints. The energy condition re- quires that the resource level never drops below 0, and the mean-payoff condi- tion requires that the limit-average value of the resource consumption is within a threshold. While these two (energy and mean-payoff) classical conditions are equivalent for two-player games, we show that they differ for MDPs. We show that the problem of deciding whether a state is almost-sure winning (i.e., winning with probability 1) in energy parity MDPs is in NP ∩ coNP, while for mean- payoff parity MDPs, the problem is solvable in polynomial time, improving a recent PSPACE bound.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
location = {Warsaw, Poland},
pages = {206 -- 218},
publisher = {Springer},
title = {{Energy and mean-payoff parity Markov Decision Processes}},
doi = {10.1007/978-3-642-22993-0_21},
volume = {6907},
year = {2011},
}
@inproceedings{3346,
abstract = {We study Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) functions. We consider two different objectives, namely, expectation and satisfaction objectives. Given an MDP with k reward functions, in the expectation objective the goal is to maximize the expected limit-average value, and in the satisfaction objective the goal is to maximize the probability of runs such that the limit-average value stays above a given vector. We show that under the expectation objective, in contrast to the single-objective case, both randomization and memory are necessary for strategies, and that finite-memory randomized strategies are sufficient. Under the satisfaction objective, in contrast to the single-objective case, infinite memory is necessary for strategies, and that randomized memoryless strategies are sufficient for epsilon-approximation, for all epsilon>;0. We further prove that the decision problems for both expectation and satisfaction objectives can be solved in polynomial time and the trade-off curve (Pareto curve) can be epsilon-approximated in time polynomial in the size of the MDP and 1/epsilon, and exponential in the number of reward functions, for all epsilon>;0. Our results also reveal flaws in previous work for MDPs with multiple mean-payoff functions under the expectation objective, correct the flaws and obtain improved results.},
author = {Brázdil, Tomáš and Brožek, Václav and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Two views on multiple mean payoff objectives in Markov Decision Processes}},
doi = {10.1109/LICS.2011.10},
year = {2011},
}
@inproceedings{3347,
abstract = {The class of omega-regular languages provides a robust specification language in verification. Every omega-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens "eventually". Finitary liveness was proposed by Alur and Henzinger as a stronger formulation of liveness. It requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider automata with finitary acceptance conditions defined by finitary Buchi, parity and Streett languages. We study languages expressible by such automata: we give their topological complexity and present a regular-expression characterization. We compare the expressive power of finitary automata and give optimal algorithms for classical decisions questions. We show that the finitary languages are Sigma 2-complete; we present a complete picture of the expressive power of various classes of automata with finitary and infinitary acceptance conditions; we show that the languages defined by finitary parity automata exactly characterize the star-free fragment of omega B-regular languages; and we show that emptiness is NLOGSPACE-complete and universality as well as language inclusion are PSPACE-complete for finitary parity and Streett automata.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Tarragona, Spain},
pages = {216 -- 226},
publisher = {Springer},
title = {{Finitary languages}},
doi = {10.1007/978-3-642-21254-3_16},
volume = {6638},
year = {2011},
}
@inproceedings{3348,
abstract = {We study synthesis of controllers for real-time systems, where the objective is to stay in a given safe set. The problem is solved by obtaining winning strategies in the setting of concurrent two-player timed automaton games with safety objectives. To prevent a player from winning by blocking time, we restrict each player to strategies that ensure that the player cannot be responsible for causing a zeno run. We construct winning strategies for the controller which require access only to (1) the system clocks (thus, controllers which require their own internal infinitely precise clocks are not necessary), and (2) a linear (in the number of clocks) number of memory bits. Precisely, we show that for safety objectives, a memory of size (3 · |C|+lg(|C|+1)) bits suffices for winning controller strategies, where C is the set of clocks of the timed automaton game, significantly improving the previous known exponential bound. We also settle the open question of whether winning region controller strategies require memory for safety objectives by showing with an example the necessity of memory for region strategies to win for safety objectives.},
author = {Chatterjee, Krishnendu and Prabhu, Vinayak},
location = {Chicago, USA},
pages = {221 -- 230},
publisher = {Springer},
title = {{Synthesis of memory efficient real time controllers for safety objectives}},
doi = {10.1145/1967701.1967734},
year = {2011},
}
@inproceedings{3349,
abstract = {Games on graphs provide a natural model for reactive non-terminating systems. In such games, the interaction of two players on an arena results in an infinite path that describes a run of the system. Different settings are used to model various open systems in computer science, as for instance turn-based or concurrent moves, and deterministic or stochastic transitions. In this paper, we are interested in turn-based games, and specifically in deterministic parity games and stochastic reachability games (also known as simple stochastic games). We present a simple, direct and efficient reduction from deterministic parity games to simple stochastic games: it yields an arena whose size is linear up to a logarithmic factor in size of the original arena.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Minori, Italy},
pages = {74 -- 86},
publisher = {EPTCS},
title = {{A reduction from parity games to simple stochastic games}},
doi = {10.4204/EPTCS.54.6},
volume = {54},
year = {2011},
}
@inproceedings{3350,
abstract = {A controller for a discrete game with ω-regular objectives requires attention if, intuitively, it requires measuring the state and switching from the current control action. Minimum attention controllers are preferable in modern shared implementations of cyber-physical systems because they produce the least burden on system resources such as processor time or communication bandwidth. We give algorithms to compute minimum attention controllers for ω-regular objectives in imperfect information discrete two-player games. We show a polynomial-time reduction from minimum attention controller synthesis to synthesis of controllers for mean-payoff parity objectives in games of incomplete information. This gives an optimal EXPTIME-complete synthesis algorithm. We show that the minimum attention controller problem is decidable for infinite state systems with finite bisimulation quotients. In particular, the problem is decidable for timed and rectangular automata.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
editor = {Fahrenberg, Uli and Tripakis, Stavros},
location = {Aalborg, Denmark},
pages = {145 -- 159},
publisher = {Springer},
title = {{Minimum attention controller synthesis for omega regular objectives}},
doi = {10.1007/978-3-642-24310-3_11},
volume = {6919},
year = {2011},
}
@inproceedings{3351,
abstract = {In two-player games on graph, the players construct an infinite path through the game graph and get a reward computed by a payoff function over infinite paths. Over weighted graphs, the typical and most studied payoff functions compute the limit-average or the discounted sum of the rewards along the path. Besides their simple definition, these two payoff functions enjoy the property that memoryless optimal strategies always exist. In an attempt to construct other simple payoff functions, we define a class of payoff functions which compute an (infinite) weighted average of the rewards. This new class contains both the limit-average and the discounted sum functions, and we show that they are the only members of this class which induce memoryless optimal strategies, showing that there is essentially no other simple payoff functions.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Singh, Rohit},
editor = {Owe, Olaf and Steffen, Martin and Telle, Jan Arne},
location = {Oslo, Norway},
pages = {148 -- 159},
publisher = {Springer},
title = {{On memoryless quantitative objectives}},
doi = {10.1007/978-3-642-22953-4_13},
volume = {6914},
year = {2011},
}
@article{3352,
abstract = {Exploring the connection of biology with reactive systems to better understand living systems.},
author = {Fisher, Jasmin and Harel, David and Henzinger, Thomas A},
journal = {Communications of the ACM},
number = {10},
pages = {72 -- 82},
publisher = {ACM},
title = {{Biology as reactivity}},
doi = {10.1145/2001269.2001289},
volume = {54},
year = {2011},
}
@article{3353,
abstract = {Compositional theories are crucial when designing large and complex systems from smaller components. In this work we propose such a theory for synchronous concurrent systems. Our approach follows so-called interface theories, which use game-theoretic interpretations of composition and refinement. These are appropriate for systems with distinct inputs and outputs, and explicit conditions on inputs that must be enforced during composition. Our interfaces model systems that execute in an infinite sequence of synchronous rounds. At each round, a contract must be satisfied. The contract is simply a relation specifying the set of valid input/output pairs. Interfaces can be composed by parallel, serial or feedback composition. A refinement relation between interfaces is defined, and shown to have two main properties: (1) it is preserved by composition, and (2) it is equivalent to substitutability, namely, the ability to replace an interface by another one in any context. Shared refinement and abstraction operators, corresponding to greatest lower and least upper bounds with respect to refinement, are also defined. Input-complete interfaces, that impose no restrictions on inputs, and deterministic interfaces, that produce a unique output for any legal input, are discussed as special cases, and an interesting duality between the two classes is exposed. A number of illustrative examples are provided, as well as algorithms to compute compositions, check refinement, and so on, for finite-state interfaces.},
author = {Tripakis, Stavros and Lickly, Ben and Henzinger, Thomas A and Lee, Edward},
journal = {ACM Transactions on Programming Languages and Systems (TOPLAS)},
number = {4},
publisher = {ACM},
title = {{A theory of synchronous relational interfaces}},
doi = {10.1145/1985342.1985345},
volume = {33},
year = {2011},
}
@article{3354,
abstract = {We consider two-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine the successor state. We consider ω-regular winning conditions specified as parity objectives. Both players are allowed to use randomization when choosing their moves. We study the computation of the limit-winning set of states, consisting of the states where the sup-inf value of the game for player 1 is 1: in other words, a state is limit-winning if player 1 can ensure a probability of winning arbitrarily close to 1. We show that the limit-winning set can be computed in O(n2d+2) time, where n is the size of the game structure and 2d is the number of priorities (or colors). The membership problem of whether a state belongs to the limit-winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms are considerably more involved than those for turn-based games. This is because concurrent games do not satisfy two of the most fundamental properties of turn-based parity games. First, in concurrent games limit-winning strategies require randomization; and second, they require infinite memory.},
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Henzinger, Thomas A},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Qualitative concurrent parity games}},
doi = {10.1145/1970398.1970404},
volume = {12},
year = {2011},
}
@inproceedings{3355,
abstract = {Byzantine Fault Tolerant (BFT) protocols aim to improve the reliability of distributed systems. They enable systems to tolerate arbitrary failures in a bounded number of nodes. BFT protocols are usually proven correct for certain safety and liveness properties. However, recent studies have shown that the performance of state-of-the-art BFT protocols decreases drastically in the presence of even a single malicious node. This motivates a formal quantitative analysis of BFT protocols to investigate their performance characteristics under different scenarios. We present HyPerf, a new hybrid methodology based on model checking and simulation techniques for evaluating the performance of BFT protocols. We build a transition system corresponding to a BFT protocol and systematically explore the set of behaviors allowed by the protocol. We associate certain timing information with different operations in the protocol, like cryptographic operations and message transmission. After an elaborate state exploration, we use the time information to evaluate the performance characteristics of the protocol using simulation techniques. We integrate our framework in Mace, a tool for building and verifying distributed systems. We evaluate the performance of PBFT using our framework. We describe two different use-cases of our methodology. For the benign operation of the protocol, we use the time information as random variables to compute the probability distribution of the execution times. In the presence of faults, we estimate the worst-case performance of the protocol for various attacks that can be employed by malicious nodes. Our results show the importance of hybrid techniques in systematically analyzing the performance of large-scale systems.},
author = {Halalai, Raluca and Henzinger, Thomas A and Singh, Vasu},
location = {Aachen, Germany},
pages = {255 -- 264},
publisher = {IEEE},
title = {{Quantitative evaluation of BFT protocols}},
doi = {10.1109/QEST.2011.40},
year = {2011},
}
@inproceedings{3356,
abstract = {There is recently a significant effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions, aiming for a general and flexible framework for quantitative-oriented specifications. In the heart of quantitative objectives lies the accumulation of values along a computation. It is either the accumulated summation, as with the energy objectives, or the accumulated average, as with the mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point of time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire computation. We study the border of decidability for extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities by prefix-accumulation assertions and extending LTL with path-accumulation assertions, result in temporal logics whose model-checking problem is decidable. The extended logics allow to significantly extend the currently known energy and mean-payoff objectives. Moreover, the prefix-accumulation assertions may be refined with "controlled-accumulation", allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that the fragment we point to is, in a sense, the maximal logic whose extension with prefix-accumulation assertions permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, and in particular CTL and LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Temporal specifications with accumulative values}},
doi = {10.1109/LICS.2011.33},
year = {2011},
}
@inproceedings{3357,
abstract = {We consider two-player graph games whose objectives are request-response condition, i.e conjunctions of conditions of the form "if a state with property Rq is visited, then later a state with property Rp is visited". The winner of such games can be decided in EXPTIME and the problem is known to be NP-hard. In this paper, we close this gap by showing that this problem is, in fact, EXPTIME-complete. We show that the problem becomes PSPACE-complete if we only consider games played on DAGs, and NP-complete or PTIME-complete if there is only one player (depending on whether he wants to enforce or spoil the request-response condition). We also present near-optimal bounds on the memory needed to design winning strategies for each player, in each case.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Horn, Florian},
editor = {Dediu, Adrian-Horia and Inenaga, Shunsuke and Martín-Vide, Carlos},
location = {Tarragona, Spain},
pages = {227 -- 237},
publisher = {Springer},
title = {{The complexity of request-response games}},
doi = {10.1007/978-3-642-21254-3_17},
volume = {6638},
year = {2011},
}
@inproceedings{3358,
abstract = {The static scheduling problem often arises as a fundamental problem in real-time systems and grid computing. We consider the problem of statically scheduling a large job expressed as a task graph on a large number of computing nodes, such as a data center. This paper solves the large-scale static scheduling problem using abstraction refinement, a technique commonly used in formal verification to efficiently solve computationally hard problems. A scheduler based on abstraction refinement first attempts to solve the scheduling problem with abstract representations of the job and the computing resources. As abstract representations are generally small, the scheduling can be done reasonably fast. If the obtained schedule does not meet specified quality conditions (like data center utilization or schedule makespan) then the scheduler refines the job and data center abstractions and, again solves the scheduling problem. We develop different schedulers based on abstraction refinement. We implemented these schedulers and used them to schedule task graphs from various computing domains on simulated data centers with realistic topologies. We compared the speed of scheduling and the quality of the produced schedules with our abstraction refinement schedulers against a baseline scheduler that does not use any abstraction. We conclude that abstraction refinement techniques give a significant speed-up compared to traditional static scheduling heuristics, at a reasonable cost in the quality of the produced schedules. We further used our static schedulers in an actual system that we deployed on Amazon EC2 and compared it against the Hadoop dynamic scheduler for large MapReduce jobs. Our experiments indicate that there is great potential for static scheduling techniques.},
author = {Henzinger, Thomas A and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
location = {Salzburg, Austria},
pages = {329 -- 342},
publisher = {ACM},
title = {{Scheduling large jobs by abstraction refinement}},
doi = {10.1145/1966445.1966476},
year = {2011},
}
@inproceedings{3359,
abstract = {Motivated by improvements in constraint-solving technology and by the increase of routinely available computational power, partial-program synthesis is emerging as an effective approach for increasing programmer productivity. The goal of the approach is to allow the programmer to specify a part of her intent imperatively (that is, give a partial program) and a part of her intent declaratively, by specifying which conditions need to be achieved or maintained. The task of the synthesizer is to construct a program that satisfies the specification. As an example, consider a partial program where threads access shared data without using any synchronization mechanism, and a declarative specification that excludes data races and deadlocks. The task of the synthesizer is then to place locks into the program code in order for the program to meet the specification.
In this paper, we argue that quantitative objectives are needed in partial-program synthesis in order to produce higher-quality programs, while enabling simpler specifications. Returning to the example, the synthesizer could construct a naive solution that uses one global lock for shared data. This can be prevented either by constraining the solution space further (which is error-prone and partly defeats the point of synthesis), or by optimizing a quantitative objective that models performance. Other quantitative notions useful in synthesis include fault tolerance, robustness, resource (memory, power) consumption, and information flow.},
author = {Cerny, Pavol and Henzinger, Thomas A},
location = {Taipei; Taiwan},
pages = {149 -- 154},
publisher = {ACM},
title = {{From boolean to quantitative synthesis}},
doi = {10.1145/2038642.2038666},
year = {2011},
}
@inproceedings{3360,
abstract = {A discounted-sum automaton (NDA) is a nondeterministic finite automaton with edge weights, which values a run by the discounted sum of visited edge weights. More precisely, the weight in the i-th position of the run is divided by lambda^i, where the discount factor lambda is a fixed rational number greater than 1. Discounted summation is a common and useful measuring scheme, especially for infinite sequences, which reflects the assumption that earlier weights are more important than later weights. Determinizing automata is often essential, for example, in formal verification, where there are polynomial algorithms for comparing two deterministic NDAs, while the equivalence problem for NDAs is not known to be decidable. Unfortunately, however, discounted-sum automata are, in general, not determinizable: it is currently known that for every rational discount factor 1 < lambda < 2, there is an NDA with lambda (denoted lambda-NDA) that cannot be determinized. We provide positive news, showing that every NDA with an integral factor is determinizable. We also complete the picture by proving that the integers characterize exactly the discount factors that guarantee determinizability: we show that for every non-integral rational factor lambda, there is a nondeterminizable lambda-NDA. Finally, we prove that the class of NDAs with integral discount factors enjoys closure under the algebraic operations min, max, addition, and subtraction, which is not the case for general NDAs nor for deterministic NDAs. This shows that for integral discount factors, the class of NDAs forms an attractive specification formalism in quantitative formal verification. All our results hold equally for automata over finite words and for automata over infinite words. },
author = {Boker, Udi and Henzinger, Thomas A},
location = {Bergen, Norway},
pages = {82 -- 96},
publisher = {Springer},
title = {{Determinizing discounted-sum automata}},
doi = {10.4230/LIPIcs.CSL.2011.82},
volume = {12},
year = {2011},
}
@inproceedings{3361,
abstract = {In this paper, we investigate the computational complexity of quantitative information flow (QIF) problems. Information-theoretic quantitative relaxations of noninterference (based on Shannon entropy)have been introduced to enable more fine-grained reasoning about programs in situations where limited information flow is acceptable. The QIF bounding problem asks whether the information flow in a given program is bounded by a constant $d$. Our first result is that the QIF bounding problem is PSPACE-complete. The QIF memoryless synthesis problem asks whether it is possible to resolve nondeterministic choices in a given partial program in such a way that in the resulting deterministic program, the quantitative information flow is bounded by a given constant $d$. Our second result is that the QIF memoryless synthesis problem is also EXPTIME-complete. The QIF memoryless synthesis problem generalizes to QIF general synthesis problem which does not impose the memoryless requirement (that is, by allowing the synthesized program to have more variables then the original partial program). Our third result is that the QIF general synthesis problem is EXPTIME-hard.},
author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A},
location = {Cernay-la-Ville, France},
pages = {205 -- 217},
publisher = {IEEE},
title = {{The complexity of quantitative information flow problems}},
doi = {10.1109/CSF.2011.21},
year = {2011},
}
@inproceedings{3362,
abstract = {State-transition systems communicating by shared variables have been the underlying model of choice for applications of model checking. Such formalisms, however, have difficulty with modeling process creation or death and communication reconfigurability. Here, we introduce “dynamic reactive modules” (DRM), a state-transition modeling formalism that supports dynamic reconfiguration and creation/death of processes. The resulting formalism supports two types of variables, data variables and reference variables. Reference variables enable changing the connectivity between processes and referring to instances of processes. We show how this new formalism supports parallel composition and refinement through trace containment. DRM provide a natural language for modeling (and ultimately reasoning about) biological systems and multiple threads communicating through shared variables.},
author = {Fisher, Jasmin and Henzinger, Thomas A and Nickovic, Dejan and Piterman, Nir and Singh, Anmol and Vardi, Moshe},
location = {Aachen, Germany},
pages = {404 -- 418},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Dynamic reactive modules}},
doi = {10.1007/978-3-642-23217-6_27},
volume = {6901},
year = {2011},
}
@unpublished{3363,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by safety, reachability, Büchi, coBüchi, and limit-average conditions. We consider quantitative and qualitative decision problems. We present extensions and adaptations of proofs for probabilistic finite automata and present a complete characterization of the decidability and undecidability frontier of the quantitative and qualitative decision problems for probabilistic automata on infinite words.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Tracol, Mathieu},
pages = {19},
publisher = {ArXiv},
title = {{The decidability frontier for probabilistic automata on infinite words}},
year = {2011},
}