@misc{3322,
abstract = {We study multi-label prediction for structured output spaces, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multi-label classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label space, which is infeasible in case of structured outputs. Relying on techniques originally designed for single- label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular a formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
author = {Lampert, Christoph},
booktitle = {NIPS: Neural Information Processing Systems},
publisher = {Neural Information Processing Systems},
title = {{Maximum margin multi label structured prediction}},
year = {2011},
}
@inproceedings{3323,
abstract = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
author = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
location = {Wrocław, Poland},
pages = {476 -- 491},
publisher = {Springer},
title = {{An efficient decision procedure for imperative tree data structures}},
doi = {10.1007/978-3-642-22438-6_36},
volume = {6803},
year = {2011},
}
@inproceedings{3324,
abstract = {Automated termination provers often use the following schema to prove that a program terminates: construct a relational abstraction of the program's transition relation and then show that the relational abstraction is well-founded. The focus of current tools has been on developing sophisticated techniques for constructing the abstractions while relying on known decidable logics (such as linear arithmetic) to express them. We believe we can significantly increase the class of programs that are amenable to automated termination proofs by identifying more expressive decidable logics for reasoning about well-founded relations. We therefore present a new decision procedure for reasoning about multiset orderings, which are among the most powerful orderings used to prove termination. We show that, using our decision procedure, one can automatically prove termination of natural abstractions of programs.},
author = {Piskac, Ruzica and Wies, Thomas},
editor = {Jhala, Ranjit and Schmidt, David},
location = {Texas, USA},
pages = {371 -- 386},
publisher = {Springer},
title = {{Decision procedures for automating termination proofs}},
doi = {10.1007/978-3-642-18275-4_26},
volume = {6538},
year = {2011},
}
@inproceedings{3325,
abstract = {We introduce streaming data string transducers that map input data strings to output data strings in a single left-to-right pass in linear time. Data strings are (unbounded) sequences of data values, tagged with symbols from a finite set, over a potentially infinite data do- main that supports only the operations of equality and ordering. The transducer uses a finite set of states, a finite set of variables ranging over the data domain, and a finite set of variables ranging over data strings. At every step, it can make decisions based on the next in- put symbol, updating its state, remembering the input data value in its data variables, and updating data-string variables by concatenat- ing data-string variables and new symbols formed from data vari- ables, while avoiding duplication. We establish that the problems of checking functional equivalence of two streaming transducers, and of checking whether a streaming transducer satisfies pre/post verification conditions specified by streaming acceptors over in- put/output data-strings, are in PSPACE. We identify a class of imperative and a class of functional pro- grams, manipulating lists of data items, which can be effectively translated to streaming data-string transducers. The imperative pro- grams dynamically modify a singly-linked heap by changing next- pointers of heap-nodes and by adding new nodes. The main re- striction specifies how the next-pointers can be used for traversal. We also identify an expressively equivalent fragment of functional programs that traverse a list using syntactically restricted recursive calls. Our results lead to algorithms for assertion checking and for checking functional equivalence of two programs, written possibly in different programming styles, for commonly used routines such as insert, delete, and reverse.},
author = {Alur, Rajeev and Cerny, Pavol},
location = {Texas, USA},
number = {1},
pages = {599 -- 610},
publisher = {ACM},
title = {{Streaming transducers for algorithmic verification of single pass list processing programs}},
doi = {10.1145/1926385.1926454},
volume = {46},
year = {2011},
}
@inproceedings{3326,
abstract = {Weighted automata map input words to numerical values. Ap- plications of weighted automata include formal verification of quantitative properties, as well as text, speech, and image processing. A weighted au- tomaton is defined with respect to a semiring. For the tropical semiring, the weight of a run is the sum of the weights of the transitions taken along the run, and the value of a word is the minimal weight of an accepting run on it. In the 90’s, Krob studied the decidability of problems on rational series defined with respect to the tropical semiring. Rational series are strongly related to weighted automata, and Krob’s results apply to them. In par- ticular, it follows from Krob’s results that the universality problem (that is, deciding whether the values of all words are below some threshold) is decidable for weighted automata defined with respect to the tropical semir- ing with domain ∪ {∞}, and that the equality problem is undecidable when the domain is ∪ {∞}. In this paper we continue the study of the borders of decidability in weighted automata, describe alternative and direct proofs of the above results, and tighten them further. Unlike the proofs of Krob, which are algebraic in their nature, our proofs stay in the terrain of state machines, and the reduction is from the halting problem of a two-counter machine. This enables us to significantly simplify Krob’s reasoning, make the un- decidability result accessible to the automata-theoretic community, and strengthen it to apply already to a very simple class of automata: all the states are accepting, there are no initial nor final weights, and all the weights on the transitions are from the set {−1, 0, 1}. The fact we work directly with the automata enables us to tighten also the decidability re- sults and to show that the universality problem for weighted automata defined with respect to the tropical semiring with domain ∪ {∞}, and in fact even with domain ≥0 ∪ {∞}, is PSPACE-complete. Our results thus draw a sharper picture about the decidability of decision problems for weighted automata, in both the front of containment vs. universality and the front of the ∪ {∞} vs. the ∪ {∞} domains.},
author = {Almagor, Shaull and Boker, Udi and Kupferman, Orna},
location = {Taipei, Taiwan},
pages = {482 -- 491},
publisher = {Springer},
title = {{What’s decidable about weighted automata }},
doi = {10.1007/978-3-642-24372-1_37},
volume = {6996},
year = {2011},
}
@inproceedings{3328,
abstract = {We report on a generic uni- and bivariate algebraic kernel that is publicly available with CGAL 3.7. It comprises complete, correct, though efficient state-of-the-art implementations on polynomials, roots of polynomial systems, and the support to analyze algebraic curves defined by bivariate polynomials. The kernel design is generic, that is, various number types and substeps can be exchanged. It is accompanied with a ready-to-use interface to enable arrangements induced by algebraic curves, that have already been used as basis for various geometric applications, as arrangements on Dupin cyclides or the triangulation of algebraic surfaces. We present two novel applications: arrangements of rotated algebraic curves and Boolean set operations on polygons bounded by segments of algebraic curves. We also provide experiments showing that our general implementation is competitive and even often clearly outperforms existing implementations that are explicitly tailored for specific types of non-linear curves that are available in CGAL.},
author = {Berberich, Eric and Hemmer, Michael and Kerber, Michael},
location = {Paris, France},
pages = {179 -- 186},
publisher = {ACM},
title = {{A generic algebraic kernel for non linear geometric applications}},
doi = {10.1145/1998196.1998224},
year = {2011},
}
@inproceedings{3329,
abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance µ in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution shape P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(n log n)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. An alternative algorithm, based purely on rational arithmetic, answers the same deconstruction problem, up to an uncertainty parameter, and its running time depends on the parameter δ (in addition to the other input parameters: n, δ and the radius of the disk). If the input shape is found to be approximable, the rational-arithmetic algorithm also computes an approximate solution shape for the problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one. Our study is motivated by applications from two different domains. However, since the offset operation has numerous uses, we anticipate that the reverse question that we study here will be still more broadly applicable. We present results obtained with our implementation of the rational-arithmetic algorithm.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
booktitle = {Proceedings of the twenty-seventh annual symposium on Computational geometry},
location = {Paris, France},
pages = {187 -- 196},
publisher = {ACM},
title = {{Deconstructing approximate offsets}},
doi = {10.1145/1998196.1998225},
year = {2011},
}
@inproceedings{3330,
abstract = {We consider the problem of approximating all real roots of a square-free polynomial f. Given isolating intervals, our algorithm refines each of them to a width at most 2-L, that is, each of the roots is approximated to L bits after the binary point. Our method provides a certified answer for arbitrary real polynomials, only requiring finite approximations of the polynomial coefficient and choosing a suitable working precision adaptively. In this way, we get a correct algorithm that is simple to implement and practically efficient. Our algorithm uses the quadratic interval refinement method; we adapt that method to be able to cope with inaccuracies when evaluating f, without sacrificing its quadratic convergence behavior. We prove a bound on the bit complexity of our algorithm in terms of degree, coefficient size and discriminant. Our bound improves previous work on integer polynomials by a factor of deg f and essentially matches best known theoretical bounds on root approximation which are obtained by very sophisticated algorithms.},
author = {Kerber, Michael and Sagraloff, Michael},
location = {California, USA},
pages = {209 -- 216},
publisher = {Springer},
title = {{Root refinement for real polynomials}},
doi = {10.1145/1993886.1993920},
year = {2011},
}
@article{3332,
abstract = {Given an algebraic hypersurface O in ℝd, how many simplices are necessary for a simplicial complex isotopic to O? We address this problem and the variant where all vertices of the complex must lie on O. We give asymptotically tight worst-case bounds for algebraic plane curves. Our results gradually improve known bounds in higher dimensions; however, the question for tight bounds remains unsolved for d ≥ 3.},
author = {Kerber, Michael and Sagraloff, Michael},
journal = {Graphs and Combinatorics},
number = {3},
pages = {419 -- 430},
publisher = {Springer},
title = {{A note on the complexity of real algebraic hypersurfaces}},
doi = {10.1007/s00373-011-1020-7},
volume = {27},
year = {2011},
}
@article{3334,
author = {Edelsbrunner, Herbert and Pach, János and Ziegler, Günter},
journal = {Discrete & Computational Geometry},
number = {1},
pages = {1 -- 2},
publisher = {Springer},
title = {{Letter from the new editors-in-chief}},
doi = {10.1007/s00454-010-9313-9},
volume = {45},
year = {2011},
}
@inbook{3335,
abstract = {We study the topology of the Megaparsec Cosmic Web in terms of the scale-dependent Betti numbers, which formalize the topological information content of the cosmic mass distribution. While the Betti numbers do not fully quantify topology, they extend the information beyond conventional cosmological studies of topology in terms of genus and Euler characteristic. The richer information content of Betti numbers goes along the availability of fast algorithms to compute them. For continuous density fields, we determine the scale-dependence of Betti numbers by invoking the cosmologically familiar filtration of sublevel or superlevel sets defined by density thresholds. For the discrete galaxy distribution, however, the analysis is based on the alpha shapes of the particles. These simplicial complexes constitute an ordered sequence of nested subsets of the Delaunay tessellation, a filtration defined by the scale parameter, α. As they are homotopy equivalent to the sublevel sets of the distance field, they are an excellent tool for assessing the topological structure of a discrete point distribution. In order to develop an intuitive understanding for the behavior of Betti numbers as a function of α, and their relation to the morphological patterns in the Cosmic Web, we first study them within the context of simple heuristic Voronoi clustering models. These can be tuned to consist of specific morphological elements of the Cosmic Web, i.e. clusters, filaments, or sheets. To elucidate the relative prominence of the various Betti numbers in different stages of morphological evolution, we introduce the concept of alpha tracks. Subsequently, we address the topology of structures emerging in the standard LCDM scenario and in cosmological scenarios with alternative dark energy content. The evolution of the Betti numbers is shown to reflect the hierarchical evolution of the Cosmic Web. We also demonstrate that the scale-dependence of the Betti numbers yields a promising measure of cosmological parameters, with a potential to help in determining the nature of dark energy and to probe primordial non-Gaussianities. We also discuss the expected Betti numbers as a function of the density threshold for superlevel sets of a Gaussian random field. Finally, we introduce the concept of persistent homology. It measures scale levels of the mass distribution and allows us to separate small from large scale features. Within the context of the hierarchical cosmic structure formation, persistence provides a natural formalism for a multiscale topology study of the Cosmic Web.},
author = {Van De Weygaert, Rien and Vegter, Gert and Edelsbrunner, Herbert and Jones, Bernard and Pranav, Pratyush and Park, Changbom and Hellwing, Wojciech and Eldering, Bob and Kruithof, Nico and Bos, Patrick and Hidding, Johan and Feldbrugge, Job and Ten Have, Eline and Van Engelen, Matti and Caroli, Manuel and Teillaud, Monique},
booktitle = {Transactions on Computational Science XIV},
editor = {Gavrilova, Marina and Tan, Kenneth and Mostafavi, Mir},
pages = {60 -- 101},
publisher = {Springer},
title = {{Alpha, Betti and the Megaparsec Universe: On the topology of the Cosmic Web}},
doi = {10.1007/978-3-642-25249-5_3},
volume = {6970},
year = {2011},
}
@inproceedings{3336,
abstract = {We introduce TopoCut: a new way to integrate knowledge about topological properties (TPs) into random field image segmentation model. Instead of including TPs as additional constraints during minimization of the energy function, we devise an efficient algorithm for modifying the unary potentials such that the resulting segmentation is guaranteed with the desired properties. Our method is more flexible in the sense that it handles more topology constraints than previous methods, which were only able to enforce pairwise or global connectivity. In particular, our method is very fast, making it for the first time possible to enforce global topological properties in practical image segmentation tasks.},
author = {Chen, Chao and Freedman, Daniel and Lampert, Christoph},
booktitle = {CVPR: Computer Vision and Pattern Recognition},
location = {Colorado Springs, CO, USA},
pages = {2089 -- 2096},
publisher = {IEEE},
title = {{Enforcing topological constraints in random field image segmentation}},
doi = {10.1109/CVPR.2011.5995503},
year = {2011},
}
@inproceedings{3337,
abstract = {Playing table tennis is a difficult task for robots, especially due to their limitations of acceleration. A key bottleneck is the amount of time needed to reach the desired hitting position and velocity of the racket for returning the incoming ball. Here, it often does not suffice to simply extrapolate the ball's trajectory after the opponent returns it but more information is needed. Humans are able to predict the ball's trajectory based on the opponent's moves and, thus, have a considerable advantage. Hence, we propose to incorporate an anticipation system into robot table tennis players, which enables the robot to react earlier while the opponent is performing the striking movement. Based on visual observation of the opponent's racket movement, the robot can predict the aim of the opponent and adjust its movement generation accordingly. The policies for deciding how and when to react are obtained by reinforcement learning. We conduct experiments with an existing robot player to show that the learned reaction policy can significantly improve the performance of the overall system.},
author = {Wang, Zhikun and Lampert, Christoph and Mülling, Katharina and Schölkopf, Bernhard and Peters, Jan},
location = {San Francisco, USA},
pages = {332 -- 337},
publisher = {IEEE},
title = {{Learning anticipation policies for robot table tennis}},
doi = {10.1109/IROS.2011.6094892},
year = {2011},
}
@unpublished{3338,
abstract = {We consider 2-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves inde- pendently and simultaneously; the current state and the two moves determine the successor state. We study concurrent games with ω-regular winning conditions specified as parity objectives. We consider the qualitative analysis problems: the computation of the almost-sure and limit-sure winning set of states, where player 1 can ensure to win with probability 1 and with probability arbitrarily close to 1, respec- tively. In general the almost-sure and limit-sure winning strategies require both infinite-memory as well as infinite-precision (to describe probabilities). We study the bounded-rationality problem for qualitative analysis of concurrent parity games, where the strategy set for player 1 is restricted to bounded-resource strategies. In terms of precision, strategies can be deterministic, uniform, finite-precision or infinite- precision; and in terms of memory, strategies can be memoryless, finite-memory or infinite-memory. We present a precise and complete characterization of the qualitative winning sets for all combinations of classes of strategies. In particular, we show that uniform memoryless strategies are as powerful as finite-precision infinite-memory strategies, and infinite-precision memoryless strategies are as power- ful as infinite-precision finite-memory strategies. We show that the winning sets can be computed in O(n2d+3) time, where n is the size of the game structure and 2d is the number of priorities (or colors), and our algorithms are symbolic. The membership problem of whether a state belongs to a winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms, that are obtained by characterization of the winning sets as μ-calculus formulas, are considerably more involved than those for turn-based games.},
author = {Chatterjee, Krishnendu},
booktitle = {arXiv},
pages = {1 -- 51},
publisher = {ArXiv},
title = {{Bounded rationality in concurrent parity games}},
year = {2011},
}
@unpublished{3339,
abstract = {Turn-based stochastic games and its important subclass Markov decision processes (MDPs) provide models for systems with both probabilistic and nondeterministic behaviors. We consider turn-based stochastic games with two classical quantitative objectives: discounted-sum and long-run average objectives. The game models and the quantitative objectives are widely used in probabilistic verification, planning, optimal inventory control, network protocol and performance analysis. Games and MDPs that model realistic systems often have very large state spaces, and probabilistic abstraction techniques are necessary to handle the state-space explosion. The commonly used full-abstraction techniques do not yield space-savings for systems that have many states with similar value, but does not necessarily have similar transition structure. A semi-abstraction technique, namely Magnifying-lens abstractions (MLA), that clusters states based on value only, disregarding differences in their transition relation was proposed for qualitative objectives (reachability and safety objectives). In this paper we extend the MLA technique to solve stochastic games with discounted-sum and long-run average objectives. We present the MLA technique based abstraction-refinement algorithm for stochastic games and MDPs with discounted-sum objectives. For long-run average objectives, our solution works for all MDPs and a sub-class of stochastic games where every state has the same value. },
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Pritam, Roy},
booktitle = {arXiv},
pages = {17},
publisher = {ArXiv},
title = {{Magnifying lens abstraction for stochastic games with discounted and long-run average objectives}},
year = {2011},
}
@inproceedings{3342,
abstract = {We consider Markov decision processes (MDPs) with ω-regular specifications given as parity objectives. We consider the problem of computing the set of almost-sure winning states from where the objective can be ensured with probability 1. The algorithms for the computation of the almost-sure winning set for parity objectives iteratively use the solutions for the almost-sure winning set for Büchi objectives (a special case of parity objectives). Our contributions are as follows: First, we present the first subquadratic symbolic algorithm to compute the almost-sure winning set for MDPs with Büchi objectives; our algorithm takes O(nm) symbolic steps as compared to the previous known algorithm that takes O(n 2) symbolic steps, where n is the number of states and m is the number of edges of the MDP. In practice MDPs often have constant out-degree, and then our symbolic algorithm takes O(nn) symbolic steps, as compared to the previous known O(n 2) symbolic steps algorithm. Second, we present a new algorithm, namely win-lose algorithm, with the following two properties: (a) the algorithm iteratively computes subsets of the almost-sure winning set and its complement, as compared to all previous algorithms that discover the almost-sure winning set upon termination; and (b) requires O(nK) symbolic steps, where K is the maximal number of edges of strongly connected components (scc’s) of the MDP. The win-lose algorithm requires symbolic computation of scc’s. Third, we improve the algorithm for symbolic scc computation; the previous known algorithm takes linear symbolic steps, and our new algorithm improves the constants associated with the linear number of steps. In the worst case the previous known algorithm takes 5·n symbolic steps, whereas our new algorithm takes 4 ·n symbolic steps.},
author = {Chatterjee, Krishnendu and Henzinger, Monika and Joglekar, Manas and Nisarg, Shah},
editor = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
location = {Snowbird, USA},
pages = {260 -- 276},
publisher = {Springer},
title = {{Symbolic algorithms for qualitative analysis of Markov decision processes with Büchi objectives}},
doi = {10.1007/978-3-642-22110-1_21},
volume = {6806},
year = {2011},
}
@inproceedings{3343,
abstract = {We present faster and dynamic algorithms for the following problems arising in probabilistic verification: Computation of the maximal end-component (mec) decomposition of Markov decision processes (MDPs), and of the almost sure winning set for reachability and parity objectives in MDPs. We achieve the following running time for static algorithms in MDPs with graphs of n vertices and m edges: (1) O(m · min{ √m, n2/3 }) for the mec decomposition, improving the longstanding O(m·n) bound; (2) O(m·n2/3) for reachability objectives, improving the previous O(m · √m) bound for m > n4/3; and (3) O(m · min{ √m, n2/3 } · log(d)) for parity objectives with d priorities, improving the previous O(m · √m · d) bound. We also give incremental and decremental algorithms in linear time for mec decomposition and reachability objectives and O(m · log d) time for parity ob jectives.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
location = {San Francisco, USA},
pages = {1318 -- 1336},
publisher = {SIAM},
title = {{Faster and dynamic algorithms for maximal end component decomposition and related graph problems in probabilistic verification}},
doi = {10.1137/1.9781611973082.101},
year = {2011},
}
@inproceedings{3344,
abstract = {Games played on graphs provide the mathematical framework to analyze several important problems in computer science as well as mathematics, such as the synthesis problem of Church, model checking of open reactive systems and many others. On the basis of mode of interaction of the players these games can be classified as follows: (a) turn-based (players make moves in turns); and (b) concurrent (players make moves simultaneously). On the basis of the information available to the players these games can be classified as follows: (a) perfect-information (players have perfect view of the game); and (b) partial-information (players have partial view of the game). In this talk we will consider all these classes of games with reachability objectives, where the goal of one player is to reach a set of target vertices of the graph, and the goal of the opponent player is to prevent the player from reaching the target. We will survey the results for various classes of games, and the results range from linear time decision algorithms to EXPTIME-complete problems to undecidable problems.},
author = {Chatterjee, Krishnendu},
editor = {Delzanno, Giorgo and Potapov, Igor},
location = {Genoa, Italy},
pages = {1 -- 1},
publisher = {Springer},
title = {{Graph games with reachability objectives}},
doi = {10.1007/978-3-642-24288-5_1},
volume = {6945},
year = {2011},
}
@inproceedings{3345,
abstract = {We consider Markov Decision Processes (MDPs) with mean-payoff parity and energy parity objectives. In system design, the parity objective is used to encode ω-regular specifications, and the mean-payoff and energy objectives can be used to model quantitative resource constraints. The energy condition re- quires that the resource level never drops below 0, and the mean-payoff condi- tion requires that the limit-average value of the resource consumption is within a threshold. While these two (energy and mean-payoff) classical conditions are equivalent for two-player games, we show that they differ for MDPs. We show that the problem of deciding whether a state is almost-sure winning (i.e., winning with probability 1) in energy parity MDPs is in NP ∩ coNP, while for mean- payoff parity MDPs, the problem is solvable in polynomial time, improving a recent PSPACE bound.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
location = {Warsaw, Poland},
pages = {206 -- 218},
publisher = {Springer},
title = {{Energy and mean-payoff parity Markov Decision Processes}},
doi = {10.1007/978-3-642-22993-0_21},
volume = {6907},
year = {2011},
}
@inproceedings{3346,
abstract = {We study Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) functions. We consider two different objectives, namely, expectation and satisfaction objectives. Given an MDP with k reward functions, in the expectation objective the goal is to maximize the expected limit-average value, and in the satisfaction objective the goal is to maximize the probability of runs such that the limit-average value stays above a given vector. We show that under the expectation objective, in contrast to the single-objective case, both randomization and memory are necessary for strategies, and that finite-memory randomized strategies are sufficient. Under the satisfaction objective, in contrast to the single-objective case, infinite memory is necessary for strategies, and that randomized memoryless strategies are sufficient for epsilon-approximation, for all epsilon>;0. We further prove that the decision problems for both expectation and satisfaction objectives can be solved in polynomial time and the trade-off curve (Pareto curve) can be epsilon-approximated in time polynomial in the size of the MDP and 1/epsilon, and exponential in the number of reward functions, for all epsilon>;0. Our results also reveal flaws in previous work for MDPs with multiple mean-payoff functions under the expectation objective, correct the flaws and obtain improved results.},
author = {Brázdil, Tomáš and Brožek, Václav and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Two views on multiple mean payoff objectives in Markov Decision Processes}},
doi = {10.1109/LICS.2011.10},
year = {2011},
}
@inproceedings{3347,
abstract = {The class of omega-regular languages provides a robust specification language in verification. Every omega-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens "eventually". Finitary liveness was proposed by Alur and Henzinger as a stronger formulation of liveness. It requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider automata with finitary acceptance conditions defined by finitary Buchi, parity and Streett languages. We study languages expressible by such automata: we give their topological complexity and present a regular-expression characterization. We compare the expressive power of finitary automata and give optimal algorithms for classical decisions questions. We show that the finitary languages are Sigma 2-complete; we present a complete picture of the expressive power of various classes of automata with finitary and infinitary acceptance conditions; we show that the languages defined by finitary parity automata exactly characterize the star-free fragment of omega B-regular languages; and we show that emptiness is NLOGSPACE-complete and universality as well as language inclusion are PSPACE-complete for finitary parity and Streett automata.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Tarragona, Spain},
pages = {216 -- 226},
publisher = {Springer},
title = {{Finitary languages}},
doi = {10.1007/978-3-642-21254-3_16},
volume = {6638},
year = {2011},
}
@inproceedings{3348,
abstract = {We study synthesis of controllers for real-time systems, where the objective is to stay in a given safe set. The problem is solved by obtaining winning strategies in the setting of concurrent two-player timed automaton games with safety objectives. To prevent a player from winning by blocking time, we restrict each player to strategies that ensure that the player cannot be responsible for causing a zeno run. We construct winning strategies for the controller which require access only to (1) the system clocks (thus, controllers which require their own internal infinitely precise clocks are not necessary), and (2) a linear (in the number of clocks) number of memory bits. Precisely, we show that for safety objectives, a memory of size (3 · |C|+lg(|C|+1)) bits suffices for winning controller strategies, where C is the set of clocks of the timed automaton game, significantly improving the previous known exponential bound. We also settle the open question of whether winning region controller strategies require memory for safety objectives by showing with an example the necessity of memory for region strategies to win for safety objectives.},
author = {Chatterjee, Krishnendu and Prabhu, Vinayak},
location = {Chicago, USA},
pages = {221 -- 230},
publisher = {Springer},
title = {{Synthesis of memory efficient real time controllers for safety objectives}},
doi = {10.1145/1967701.1967734},
year = {2011},
}
@inproceedings{3349,
abstract = {Games on graphs provide a natural model for reactive non-terminating systems. In such games, the interaction of two players on an arena results in an infinite path that describes a run of the system. Different settings are used to model various open systems in computer science, as for instance turn-based or concurrent moves, and deterministic or stochastic transitions. In this paper, we are interested in turn-based games, and specifically in deterministic parity games and stochastic reachability games (also known as simple stochastic games). We present a simple, direct and efficient reduction from deterministic parity games to simple stochastic games: it yields an arena whose size is linear up to a logarithmic factor in size of the original arena.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Minori, Italy},
pages = {74 -- 86},
publisher = {EPTCS},
title = {{A reduction from parity games to simple stochastic games}},
doi = {10.4204/EPTCS.54.6},
volume = {54},
year = {2011},
}
@inproceedings{3350,
abstract = {A controller for a discrete game with ω-regular objectives requires attention if, intuitively, it requires measuring the state and switching from the current control action. Minimum attention controllers are preferable in modern shared implementations of cyber-physical systems because they produce the least burden on system resources such as processor time or communication bandwidth. We give algorithms to compute minimum attention controllers for ω-regular objectives in imperfect information discrete two-player games. We show a polynomial-time reduction from minimum attention controller synthesis to synthesis of controllers for mean-payoff parity objectives in games of incomplete information. This gives an optimal EXPTIME-complete synthesis algorithm. We show that the minimum attention controller problem is decidable for infinite state systems with finite bisimulation quotients. In particular, the problem is decidable for timed and rectangular automata.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
editor = {Fahrenberg, Uli and Tripakis, Stavros},
location = {Aalborg, Denmark},
pages = {145 -- 159},
publisher = {Springer},
title = {{Minimum attention controller synthesis for omega regular objectives}},
doi = {10.1007/978-3-642-24310-3_11},
volume = {6919},
year = {2011},
}
@inproceedings{3351,
abstract = {In two-player games on graph, the players construct an infinite path through the game graph and get a reward computed by a payoff function over infinite paths. Over weighted graphs, the typical and most studied payoff functions compute the limit-average or the discounted sum of the rewards along the path. Besides their simple definition, these two payoff functions enjoy the property that memoryless optimal strategies always exist. In an attempt to construct other simple payoff functions, we define a class of payoff functions which compute an (infinite) weighted average of the rewards. This new class contains both the limit-average and the discounted sum functions, and we show that they are the only members of this class which induce memoryless optimal strategies, showing that there is essentially no other simple payoff functions.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Singh, Rohit},
editor = {Owe, Olaf and Steffen, Martin and Telle, Jan Arne},
location = {Oslo, Norway},
pages = {148 -- 159},
publisher = {Springer},
title = {{On memoryless quantitative objectives}},
doi = {10.1007/978-3-642-22953-4_13},
volume = {6914},
year = {2011},
}
@article{3352,
abstract = {Exploring the connection of biology with reactive systems to better understand living systems.},
author = {Fisher, Jasmin and Harel, David and Henzinger, Thomas A},
journal = {Communications of the ACM},
number = {10},
pages = {72 -- 82},
publisher = {ACM},
title = {{Biology as reactivity}},
doi = {10.1145/2001269.2001289},
volume = {54},
year = {2011},
}
@article{3353,
abstract = {Compositional theories are crucial when designing large and complex systems from smaller components. In this work we propose such a theory for synchronous concurrent systems. Our approach follows so-called interface theories, which use game-theoretic interpretations of composition and refinement. These are appropriate for systems with distinct inputs and outputs, and explicit conditions on inputs that must be enforced during composition. Our interfaces model systems that execute in an infinite sequence of synchronous rounds. At each round, a contract must be satisfied. The contract is simply a relation specifying the set of valid input/output pairs. Interfaces can be composed by parallel, serial or feedback composition. A refinement relation between interfaces is defined, and shown to have two main properties: (1) it is preserved by composition, and (2) it is equivalent to substitutability, namely, the ability to replace an interface by another one in any context. Shared refinement and abstraction operators, corresponding to greatest lower and least upper bounds with respect to refinement, are also defined. Input-complete interfaces, that impose no restrictions on inputs, and deterministic interfaces, that produce a unique output for any legal input, are discussed as special cases, and an interesting duality between the two classes is exposed. A number of illustrative examples are provided, as well as algorithms to compute compositions, check refinement, and so on, for finite-state interfaces.},
author = {Tripakis, Stavros and Lickly, Ben and Henzinger, Thomas A and Lee, Edward},
journal = {ACM Transactions on Programming Languages and Systems (TOPLAS)},
number = {4},
publisher = {ACM},
title = {{A theory of synchronous relational interfaces}},
doi = {10.1145/1985342.1985345},
volume = {33},
year = {2011},
}
@article{3354,
abstract = {We consider two-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine the successor state. We consider ω-regular winning conditions specified as parity objectives. Both players are allowed to use randomization when choosing their moves. We study the computation of the limit-winning set of states, consisting of the states where the sup-inf value of the game for player 1 is 1: in other words, a state is limit-winning if player 1 can ensure a probability of winning arbitrarily close to 1. We show that the limit-winning set can be computed in O(n2d+2) time, where n is the size of the game structure and 2d is the number of priorities (or colors). The membership problem of whether a state belongs to the limit-winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms are considerably more involved than those for turn-based games. This is because concurrent games do not satisfy two of the most fundamental properties of turn-based parity games. First, in concurrent games limit-winning strategies require randomization; and second, they require infinite memory.},
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Henzinger, Thomas A},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Qualitative concurrent parity games}},
doi = {10.1145/1970398.1970404},
volume = {12},
year = {2011},
}
@inproceedings{3355,
abstract = {Byzantine Fault Tolerant (BFT) protocols aim to improve the reliability of distributed systems. They enable systems to tolerate arbitrary failures in a bounded number of nodes. BFT protocols are usually proven correct for certain safety and liveness properties. However, recent studies have shown that the performance of state-of-the-art BFT protocols decreases drastically in the presence of even a single malicious node. This motivates a formal quantitative analysis of BFT protocols to investigate their performance characteristics under different scenarios. We present HyPerf, a new hybrid methodology based on model checking and simulation techniques for evaluating the performance of BFT protocols. We build a transition system corresponding to a BFT protocol and systematically explore the set of behaviors allowed by the protocol. We associate certain timing information with different operations in the protocol, like cryptographic operations and message transmission. After an elaborate state exploration, we use the time information to evaluate the performance characteristics of the protocol using simulation techniques. We integrate our framework in Mace, a tool for building and verifying distributed systems. We evaluate the performance of PBFT using our framework. We describe two different use-cases of our methodology. For the benign operation of the protocol, we use the time information as random variables to compute the probability distribution of the execution times. In the presence of faults, we estimate the worst-case performance of the protocol for various attacks that can be employed by malicious nodes. Our results show the importance of hybrid techniques in systematically analyzing the performance of large-scale systems.},
author = {Halalai, Raluca and Henzinger, Thomas A and Singh, Vasu},
location = {Aachen, Germany},
pages = {255 -- 264},
publisher = {IEEE},
title = {{Quantitative evaluation of BFT protocols}},
doi = {10.1109/QEST.2011.40},
year = {2011},
}
@inproceedings{3356,
abstract = {There is recently a significant effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions, aiming for a general and flexible framework for quantitative-oriented specifications. In the heart of quantitative objectives lies the accumulation of values along a computation. It is either the accumulated summation, as with the energy objectives, or the accumulated average, as with the mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point of time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire computation. We study the border of decidability for extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities by prefix-accumulation assertions and extending LTL with path-accumulation assertions, result in temporal logics whose model-checking problem is decidable. The extended logics allow to significantly extend the currently known energy and mean-payoff objectives. Moreover, the prefix-accumulation assertions may be refined with "controlled-accumulation", allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that the fragment we point to is, in a sense, the maximal logic whose extension with prefix-accumulation assertions permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, and in particular CTL and LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Temporal specifications with accumulative values}},
doi = {10.1109/LICS.2011.33},
year = {2011},
}
@inproceedings{3357,
abstract = {We consider two-player graph games whose objectives are request-response condition, i.e conjunctions of conditions of the form "if a state with property Rq is visited, then later a state with property Rp is visited". The winner of such games can be decided in EXPTIME and the problem is known to be NP-hard. In this paper, we close this gap by showing that this problem is, in fact, EXPTIME-complete. We show that the problem becomes PSPACE-complete if we only consider games played on DAGs, and NP-complete or PTIME-complete if there is only one player (depending on whether he wants to enforce or spoil the request-response condition). We also present near-optimal bounds on the memory needed to design winning strategies for each player, in each case.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Horn, Florian},
editor = {Dediu, Adrian-Horia and Inenaga, Shunsuke and Martín-Vide, Carlos},
location = {Tarragona, Spain},
pages = {227 -- 237},
publisher = {Springer},
title = {{The complexity of request-response games}},
doi = {10.1007/978-3-642-21254-3_17},
volume = {6638},
year = {2011},
}
@inproceedings{3358,
abstract = {The static scheduling problem often arises as a fundamental problem in real-time systems and grid computing. We consider the problem of statically scheduling a large job expressed as a task graph on a large number of computing nodes, such as a data center. This paper solves the large-scale static scheduling problem using abstraction refinement, a technique commonly used in formal verification to efficiently solve computationally hard problems. A scheduler based on abstraction refinement first attempts to solve the scheduling problem with abstract representations of the job and the computing resources. As abstract representations are generally small, the scheduling can be done reasonably fast. If the obtained schedule does not meet specified quality conditions (like data center utilization or schedule makespan) then the scheduler refines the job and data center abstractions and, again solves the scheduling problem. We develop different schedulers based on abstraction refinement. We implemented these schedulers and used them to schedule task graphs from various computing domains on simulated data centers with realistic topologies. We compared the speed of scheduling and the quality of the produced schedules with our abstraction refinement schedulers against a baseline scheduler that does not use any abstraction. We conclude that abstraction refinement techniques give a significant speed-up compared to traditional static scheduling heuristics, at a reasonable cost in the quality of the produced schedules. We further used our static schedulers in an actual system that we deployed on Amazon EC2 and compared it against the Hadoop dynamic scheduler for large MapReduce jobs. Our experiments indicate that there is great potential for static scheduling techniques.},
author = {Henzinger, Thomas A and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
location = {Salzburg, Austria},
pages = {329 -- 342},
publisher = {ACM},
title = {{Scheduling large jobs by abstraction refinement}},
doi = {10.1145/1966445.1966476},
year = {2011},
}
@inproceedings{3359,
abstract = {Motivated by improvements in constraint-solving technology and by the increase of routinely available computational power, partial-program synthesis is emerging as an effective approach for increasing programmer productivity. The goal of the approach is to allow the programmer to specify a part of her intent imperatively (that is, give a partial program) and a part of her intent declaratively, by specifying which conditions need to be achieved or maintained. The task of the synthesizer is to construct a program that satisfies the specification. As an example, consider a partial program where threads access shared data without using any synchronization mechanism, and a declarative specification that excludes data races and deadlocks. The task of the synthesizer is then to place locks into the program code in order for the program to meet the specification.
In this paper, we argue that quantitative objectives are needed in partial-program synthesis in order to produce higher-quality programs, while enabling simpler specifications. Returning to the example, the synthesizer could construct a naive solution that uses one global lock for shared data. This can be prevented either by constraining the solution space further (which is error-prone and partly defeats the point of synthesis), or by optimizing a quantitative objective that models performance. Other quantitative notions useful in synthesis include fault tolerance, robustness, resource (memory, power) consumption, and information flow.},
author = {Cerny, Pavol and Henzinger, Thomas A},
location = {Taipei; Taiwan},
pages = {149 -- 154},
publisher = {ACM},
title = {{From boolean to quantitative synthesis}},
doi = {10.1145/2038642.2038666},
year = {2011},
}
@inproceedings{3360,
abstract = {A discounted-sum automaton (NDA) is a nondeterministic finite automaton with edge weights, which values a run by the discounted sum of visited edge weights. More precisely, the weight in the i-th position of the run is divided by lambda^i, where the discount factor lambda is a fixed rational number greater than 1. Discounted summation is a common and useful measuring scheme, especially for infinite sequences, which reflects the assumption that earlier weights are more important than later weights. Determinizing automata is often essential, for example, in formal verification, where there are polynomial algorithms for comparing two deterministic NDAs, while the equivalence problem for NDAs is not known to be decidable. Unfortunately, however, discounted-sum automata are, in general, not determinizable: it is currently known that for every rational discount factor 1 < lambda < 2, there is an NDA with lambda (denoted lambda-NDA) that cannot be determinized. We provide positive news, showing that every NDA with an integral factor is determinizable. We also complete the picture by proving that the integers characterize exactly the discount factors that guarantee determinizability: we show that for every non-integral rational factor lambda, there is a nondeterminizable lambda-NDA. Finally, we prove that the class of NDAs with integral discount factors enjoys closure under the algebraic operations min, max, addition, and subtraction, which is not the case for general NDAs nor for deterministic NDAs. This shows that for integral discount factors, the class of NDAs forms an attractive specification formalism in quantitative formal verification. All our results hold equally for automata over finite words and for automata over infinite words. },
author = {Boker, Udi and Henzinger, Thomas A},
location = {Bergen, Norway},
pages = {82 -- 96},
publisher = {Springer},
title = {{Determinizing discounted-sum automata}},
doi = {10.4230/LIPIcs.CSL.2011.82},
volume = {12},
year = {2011},
}
@inproceedings{3361,
abstract = {In this paper, we investigate the computational complexity of quantitative information flow (QIF) problems. Information-theoretic quantitative relaxations of noninterference (based on Shannon entropy)have been introduced to enable more fine-grained reasoning about programs in situations where limited information flow is acceptable. The QIF bounding problem asks whether the information flow in a given program is bounded by a constant $d$. Our first result is that the QIF bounding problem is PSPACE-complete. The QIF memoryless synthesis problem asks whether it is possible to resolve nondeterministic choices in a given partial program in such a way that in the resulting deterministic program, the quantitative information flow is bounded by a given constant $d$. Our second result is that the QIF memoryless synthesis problem is also EXPTIME-complete. The QIF memoryless synthesis problem generalizes to QIF general synthesis problem which does not impose the memoryless requirement (that is, by allowing the synthesized program to have more variables then the original partial program). Our third result is that the QIF general synthesis problem is EXPTIME-hard.},
author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A},
location = {Cernay-la-Ville, France},
pages = {205 -- 217},
publisher = {IEEE},
title = {{The complexity of quantitative information flow problems}},
doi = {10.1109/CSF.2011.21},
year = {2011},
}
@inproceedings{3362,
abstract = {State-transition systems communicating by shared variables have been the underlying model of choice for applications of model checking. Such formalisms, however, have difficulty with modeling process creation or death and communication reconfigurability. Here, we introduce “dynamic reactive modules” (DRM), a state-transition modeling formalism that supports dynamic reconfiguration and creation/death of processes. The resulting formalism supports two types of variables, data variables and reference variables. Reference variables enable changing the connectivity between processes and referring to instances of processes. We show how this new formalism supports parallel composition and refinement through trace containment. DRM provide a natural language for modeling (and ultimately reasoning about) biological systems and multiple threads communicating through shared variables.},
author = {Fisher, Jasmin and Henzinger, Thomas A and Nickovic, Dejan and Piterman, Nir and Singh, Anmol and Vardi, Moshe},
location = {Aachen, Germany},
pages = {404 -- 418},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Dynamic reactive modules}},
doi = {10.1007/978-3-642-23217-6_27},
volume = {6901},
year = {2011},
}
@unpublished{3363,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by safety, reachability, Büchi, coBüchi, and limit-average conditions. We consider quantitative and qualitative decision problems. We present extensions and adaptations of proofs for probabilistic finite automata and present a complete characterization of the decidability and undecidability frontier of the quantitative and qualitative decision problems for probabilistic automata on infinite words.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Tracol, Mathieu},
pages = {19},
publisher = {ArXiv},
title = {{The decidability frontier for probabilistic automata on infinite words}},
year = {2011},
}
@article{3364,
abstract = {Molecular noise, which arises from the randomness of the discrete events in the cell, significantly influences fundamental biological processes. Discrete-state continuous-time stochastic models (CTMC) can be used to describe such effects, but the calculation of the probabilities of certain events is computationally expensive. We present a comparison of two analysis approaches for CTMC. On one hand, we estimate the probabilities of interest using repeated Gillespie simulation and determine the statistical accuracy that we obtain. On the other hand, we apply a numerical reachability analysis that approximates the probability distributions of the system at several time instances. We use examples of cellular processes to demonstrate the superiority of the reachability analysis if accurate results are required.},
author = {Didier, Frédéric and Henzinger, Thomas A and Mateescu, Maria and Wolf, Verena},
journal = {Theoretical Computer Science},
number = {21},
pages = {2128 -- 2141},
publisher = {Elsevier},
title = {{Approximation of event probabilities in noisy cellular processes}},
doi = {10.1016/j.tcs.2010.10.022},
volume = {412},
year = {2011},
}
@inproceedings{3365,
abstract = {We present the tool Quasy, a quantitative synthesis tool. Quasy takes qualitative and quantitative specifications and automatically constructs a system that satisfies the qualitative specification and optimizes the quantitative specification, if such a system exists. The user can choose between a system that satisfies and optimizes the specifications (a) under all possible environment behaviors or (b) under the most-likely environment behaviors given as a probability distribution on the possible input sequences. Quasy solves these two quantitative synthesis problems by reduction to instances of 2-player games and Markov Decision Processes (MDPs) with quantitative winning objectives. Quasy can also be seen as a game solver for quantitative games. Most notable, it can solve lexicographic mean-payoff games with 2 players, MDPs with mean-payoff objectives, and ergodic MDPs with mean-payoff parity objectives.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Singh, Rohit},
location = {Saarbrucken, Germany},
pages = {267 -- 271},
publisher = {Springer},
title = {{QUASY: quantitative synthesis tool}},
doi = {10.1007/978-3-642-19835-9_24},
volume = {6605},
year = {2011},
}
@inproceedings{3366,
abstract = {We present an algorithmic method for the quantitative, performance-aware synthesis of concurrent programs. The input consists of a nondeterministic partial program and of a parametric performance model. The nondeterminism allows the programmer to omit which (if any) synchronization construct is used at a particular program location. The performance model, specified as a weighted automaton, can capture system architectures by assigning different costs to actions such as locking, context switching, and memory and cache accesses. The quantitative synthesis problem is to automatically resolve the nondeterminism of the partial program so that both correctness is guaranteed and performance is optimal. As is standard for shared memory concurrency, correctness is formalized "specification free", in particular as race freedom or deadlock freedom. For worst-case (average-case) performance, we show that the problem can be reduced to 2-player graph games (with probabilistic transitions) with quantitative objectives. While we show, using game-theoretic methods, that the synthesis problem is Nexp-complete, we present an algorithmic method and an implementation that works efficiently for concurrent programs and performance models of practical interest. We have implemented a prototype tool and used it to synthesize finite-state concurrent programs that exhibit different programming patterns, for several performance models representing different architectures. },
author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A and Radhakrishna, Arjun and Singh, Rohit},
editor = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
location = {Snowbird, USA},
pages = {243 -- 259},
publisher = {Springer},
title = {{Quantitative synthesis for concurrent programs}},
doi = {10.1007/978-3-642-22110-1_20},
volume = {6806},
year = {2011},
}
@inproceedings{3367,
abstract = {In this paper, we present the first output-sensitive algorithm to compute the persistence diagram of a filtered simplicial complex. For any Γ>0, it returns only those homology classes with persistence at least Γ. Instead of the classical reduction via column operations, our algorithm performs rank computations on submatrices of the boundary matrix. For an arbitrary constant δ ∈ (0,1), the running time is O(C(1-δ)ΓR(n)log n), where C(1-δ)Γ is the number of homology classes with persistence at least (1-δ)Γ, n is the total number of simplices, and R(n) is the complexity of computing the rank of an n x n matrix with O(n) nonzero entries. Depending on the choice of the rank algorithm, this yields a deterministic O(C(1-δ)Γn2.376) algorithm, a O(C(1-δ)Γn2.28) Las-Vegas algorithm, or a O(C(1-δ)Γn2+ε) Monte-Carlo algorithm for an arbitrary ε>0.},
author = {Chen, Chao and Kerber, Michael},
location = {Paris, France},
pages = {207 -- 216},
publisher = {ACM},
title = {{An output sensitive algorithm for persistent homology}},
doi = {10.1145/1998196.1998228},
year = {2011},
}
@article{3368,
abstract = {Tissue surface tension (TST) is an important mechanical property influencing cell sorting and tissue envelopment. The study by Manning et al. (1) reported on a mathematical model describing TST on the basis of the balance between adhesive and tensile properties of the constituent cells. The model predicts that, in high-adhesion cell aggregates, surface cells will be stretched to maintain the same area of cell–cell contact as interior bulk cells, resulting in an elongated and flattened cell shape. The authors (1) observed flat and elongated cells at the surface of high-adhesion zebrafish germ-layer explants, which they argue are undifferentiated stretched germ-layer progenitor cells, and they use this observation as a validation of their model.},
author = {Krens, Gabriel and Möllmert, Stephanie and Heisenberg, Carl-Philipp J},
journal = {PNAS},
number = {3},
pages = {E9 -- E10},
publisher = {National Academy of Sciences},
title = {{Enveloping cell layer differentiation at the surface of zebrafish germ layer tissue explants}},
doi = {10.1073/pnas.1010767108},
volume = {108},
year = {2011},
}
@article{3369,
abstract = {Rab3 interacting molecules (RIMs) are highly enriched in the active zones of presynaptic terminals. It is generally thought that they operate as effectors of the small G protein Rab3. Three recent papers, by Han et al. (this issue of Neuron), Deng et al. (this issue of Neuron), and Kaeser et al. (a recent issue of Cell), shed new light on the functional role of RIM in presynaptic terminals. First, RIM tethers Ca2+ channels to active zones. Second, RIM contributes to priming of synaptic vesicles by interacting with another presynaptic protein, Munc13.},
author = {Pernia-Andrade, Alejandro and Jonas, Peter M},
journal = {Neuron},
number = {2},
pages = {185 -- 187},
publisher = {Elsevier},
title = {{The multiple faces of RIM}},
doi = {10.1016/j.neuron.2011.01.010},
volume = {69},
year = {2011},
}
@article{3370,
abstract = {Supertree methods are widely applied and give rise to new conclusions about phylogenies (e.g., Bininda-Emonds et al. 2007). Although several desiderata for supertree methods exist (Wilkinson, Thorley, et al. 2004), only few of them have been studied in greater detail, examples include shape bias (Wilkinson et al. 2005) or pareto properties (Wilkinson et al. 2007). Here I look more closely at two matrix representation methods, matrix representation with compatibility (MRC) and matrix representation with parsimony (MRP). Different null models of random data are studied and the resulting tree shapes are investigated. Thereby I consider unrooted trees and a bias in tree shape is determined by a tree balance measure. The measure for unrooted trees is a modification of a tree balance measure for rooted trees. I observe that depending on the underlying null model of random data, the methods may resolve conflict in favor of more balanced tree shapes. The analyses refer only to trees with the same taxon set, also known as the consensus setting (e.g., Wilkinson et al. 2007), but I will be able to draw conclusions on how to deal with missing data.},
author = {Kupczok, Anne},
journal = {Systematic Biology},
number = {2},
pages = {218 -- 225},
publisher = {Oxford University Press},
title = {{Consequences of different null models on the tree shape bias of supertree methods}},
doi = {10.1093/sysbio/syq086},
volume = {60},
year = {2011},
}
@article{3371,
abstract = {The Minisymposium “Cell Migration and Motility” was attended by approximately 500 visitors and covered a broad range of questions in the field using diverse model systems. Topics comprised actin dynamics, cell polarity, force transduction, signal transduction, bar- rier transmigration, and chemotactic guidance.},
author = {Sixt, Michael K and Parent, Carole},
journal = {Molecular Biology and Evolution},
number = {6},
pages = {724},
publisher = {Oxford University Press},
title = {{Cells on the move in Philadelphia}},
doi = {10.1091/mbc.E10-12-0958},
volume = {22},
year = {2011},
}
@article{3372,
abstract = {Nowak et al.1 argue that inclusive fitness theory has been of little value in explaining the natural world, and that it has led to negligible progress in explaining the evolution of eusociality. However, we believe that their arguments are based upon a misunderstanding of evolutionary theory and a misrepresentation of the empirical literature. We will focus our comments on three general issues.},
author = {Abbot, Patrick and Abe, Jun and Alcock, John and Alizon, Samuel and Alpedrinha, Joao and Andersson, Malte and Andre, Jean and Van Baalen, Minus and Balloux, Francois and Balshine, Sigal and Barton, Nicholas H and Beukeboom, Leo and Biernaskie, Jay and Bilde, Trine and Borgia, Gerald and Breed, Michael and Brown, Sam and Bshary, Redouan and Buckling, Angus and Burley, Nancy and Burton Chellew, Max and Cant, Michael and Chapuisat, Michel and Charnov, Eric and Clutton Brock, Tim and Cockburn, Andrew and Cole, Blaine and Colegrave, Nick and Cosmides, Leda and Couzin, Iain and Coyne, Jerry and Creel, Scott and Crespi, Bernard and Curry, Robert and Dall, Sasha and Day, Troy and Dickinson, Janis and Dugatkin, Lee and El Mouden, Claire and Emlen, Stephen and Evans, Jay and Ferriere, Regis and Field, Jeremy and Foitzik, Susanne and Foster, Kevin and Foster, William and Fox, Charles and Gadau, Juergen and Gandon, Sylvain and Gardner, Andy and Gardner, Michael and Getty, Thomas and Goodisman, Michael and Grafen, Alan and Grosberg, Rick and Grozinger, Christina and Gouyon, Pierre and Gwynne, Darryl and Harvey, Paul and Hatchwell, Ben and Heinze, Jürgen and Helantera, Heikki and Helms, Ken and Hill, Kim and Jiricny, Natalie and Johnstone, Rufus and Kacelnik, Alex and Kiers, E Toby and Kokko, Hanna and Komdeur, Jan and Korb, Judith and Kronauer, Daniel and Kümmerli, Rolf and Lehmann, Laurent and Linksvayer, Timothy and Lion, Sébastien and Lyon, Bruce and Marshall, James and Mcelreath, Richard and Michalakis, Yannis and Michod, Richard and Mock, Douglas and Monnin, Thibaud and Montgomerie, Robert and Moore, Allen and Mueller, Ulrich and Noë, Ronald and Okasha, Samir and Pamilo, Pekka and Parker, Geoff and Pedersen, Jes and Pen, Ido and Pfennig, David and Queller, David and Rankin, Daniel and Reece, Sarah and Reeve, Hudson and Reuter, Max and Roberts, Gilbert and Robson, Simon and Roze, Denis and Rousset, Francois and Rueppell, Olav and Sachs, Joel and Santorelli, Lorenzo and Schmid Hempel, Paul and Schwarz, Michael and Scott Phillips, Tom and Shellmann Sherman, Janet and Sherman, Paul and Shuker, David and Smith, Jeff and Spagna, Joseph and Strassmann, Beverly and Suarez, Andrew and Sundström, Liselotte and Taborsky, Michael and Taylor, Peter and Thompson, Graham and Tooby, John and Tsutsui, Neil and Tsuji, Kazuki and Turillazzi, Stefano and Úbeda, Francisco and Vargo, Edward and Voelkl, Bernard and Wenseleers, Tom and West, Stuart and West Eberhard, Mary and Westneat, David and Wiernasz, Diane and Wild, Geoff and Wrangham, Richard and Young, Andrew and Zeh, David and Zeh, Jeanne and Zink, Andrew},
journal = {Nature},
number = {7339},
pages = {E1 -- E4},
publisher = {Nature Publishing Group},
title = {{Inclusive fitness theory and eusociality}},
doi = {10.1038/nature09831},
volume = {471},
year = {2011},
}
@article{3373,
abstract = {The use of optical traps to measure or apply forces on the molecular level requires a precise knowledge of the trapping force field. Close to the trap center, this field is typically approximated as linear in the displacement of the trapped microsphere. However, applications demanding high forces at low laser intensities can probe the light-microsphere interaction beyond the linear regime. Here, we measured the full nonlinear force and displacement response of an optical trap in two dimensions using a dual-beam optical trap setup with back-focal-plane photodetection. We observed a substantial stiffening of the trap beyond the linear regime that depends on microsphere size, in agreement with Mie theory calculations. Surprisingly, we found that the linear detection range for forces exceeds the one for displacement by far. Our approach allows for a complete calibration of an optical trap.},
author = {Jahnel, Marcus and Behrndt, Martin and Jannasch, Anita and Schaeffer, Erik and Grill, Stephan},
journal = {Optics Letters},
number = {7},
pages = {1260 -- 1262},
publisher = {OSA},
title = {{Measuring the complete force field of an optical trap}},
doi = {10.1364/OL.36.001260},
volume = {36},
year = {2011},
}
@article{3374,
abstract = {Genetic regulatory networks enable cells to respond to changes in internal and external conditions by dynamically coordinating their gene expression profiles. Our ability to make quantitative measurements in these biochemical circuits has deepened our understanding of what kinds of computations genetic regulatory networks can perform, and with what reliability. These advances have motivated researchers to look for connections between the architecture and function of genetic regulatory networks. Transmitting information between a network's inputs and outputs has been proposed as one such possible measure of function, relevant in certain biological contexts. Here we summarize recent developments in the application of information theory to gene regulatory networks. We first review basic concepts in information theory necessary for understanding recent work. We then discuss the functional complexity of gene regulation, which arises from the molecular nature of the regulatory interactions. We end by reviewing some experiments that support the view that genetic networks responsible for early development of multicellular organisms might be maximizing transmitted 'positional information'.},
author = {Tkacik, Gasper and Walczak, Aleksandra},
journal = {Journal of Physics: Condensed Matter},
number = {15},
publisher = {IOP Publishing Ltd.},
title = {{Information transmission in genetic regulatory networks a review}},
doi = {10.1088/0953-8984/23/15/153102},
volume = {23},
year = {2011},
}
@article{3375,
abstract = {By exploiting an analogy between population genetics and statistical mechanics, we study the evolution of a polygenic trait under stabilizing selection, mutation and genetic drift. This requires us to track only four macroscopic variables, instead of the distribution of all the allele frequencies that influence the trait. These macroscopic variables are the expectations of: the trait mean and its square, the genetic variance, and of a measure of heterozygosity, and are derived from a generating function that is in turn derived by maximizing an entropy measure. These four macroscopics are enough to accurately describe the dynamics of the trait mean and of its genetic variance (and in principle of any other quantity). Unlike previous approaches that were based on an infinite series of moments or cumulants, which had to be truncated arbitrarily, our calculations provide a well-defined approximation procedure. We apply the framework to abrupt and gradual changes in the optimum, as well as to changes in the strength of stabilizing selection. Our approximations are surprisingly accurate, even for systems with as few as five loci. We find that when the effects of drift are included, the expected genetic variance is hardly altered by directional selection, even though it fluctuates in any particular instance. We also find hysteresis, showing that even after averaging over the microscopic variables, the macroscopic trajectories retain a memory of the underlying genetic states.},
author = {de Vladar, Harold and Barton, Nicholas H},
journal = {Journal of the Royal Society Interface},
number = {58},
pages = {720 -- 739},
publisher = {Royal Society of London},
title = {{The statistical mechanics of a polygenic character under stabilizing selection mutation and drift}},
doi = {10.1098/rsif.2010.0438},
volume = {8},
year = {2011},
}
@article{3376,
abstract = {Regulatory conflicts occur when two signals that individually trigger opposite cellular responses are present simultaneously. Here, we investigate regulatory conflicts in the bacterial response to antibiotic combinations. We use an Escherichia coli promoter-GFP library to study the transcriptional response of many promoters to either additive or antagonistic drug pairs at fine two-dimensional (2D) resolution of drug concentration. Surprisingly, we find that this data set can be characterized as a linear sum of only two principal components. Component one, accounting for over 70% of the response, represents the response to growth inhibition by the drugs. Component two describes how regulatory conflicts are resolved. For the additive drug pair, conflicts are resolved by linearly interpolating the single drug responses, while for the antagonistic drug pair, the growth-limiting drug dominates the response. Importantly, for a given drug pair, the same conflict resolution strategy applies to almost all genes. These results provide a recipe for predicting gene expression responses to antibiotic combinations.},
author = {Bollenbach, Mark Tobias and Kishony, Roy},
journal = {Molecular Cell},
number = {4},
pages = {413 -- 425},
publisher = {Cell Press},
title = {{Resolution of gene regulatory conflicts caused by combinations of antibiotics}},
doi = {10.1016/j.molcel.2011.04.016},
volume = {42},
year = {2011},
}
@article{3377,
abstract = {By definition, transverse intersections are stable under in- finitesimal perturbations. Using persistent homology, we ex- tend this notion to sizeable perturbations. Specifically, we assign to each homology class of the intersection its robust- ness, the magnitude of a perturbation necessary to kill it, and prove that robustness is stable. Among the applications of this result is a stable notion of robustness for fixed points of continuous mappings and a statement of stability for con- tours of smooth mappings.},
author = {Edelsbrunner, Herbert and Morozov, Dmitriy and Patel, Amit},
journal = {Foundations of Computational Mathematics},
number = {3},
pages = {345 -- 361},
publisher = {Springer},
title = {{Quantifying transversality by measuring the robustness of intersections}},
doi = {10.1007/s10208-011-9090-8},
volume = {11},
year = {2011},
}
@article{3378,
abstract = {The theory of intersection homology was developed to study the singularities of a topologically stratified space. This paper in- corporates this theory into the already developed framework of persistent homology. We demonstrate that persistent intersec- tion homology gives useful information about the relationship between an embedded stratified space and its singularities. We give, and prove the correctness of, an algorithm for the computa- tion of the persistent intersection homology groups of a filtered simplicial complex equipped with a stratification by subcom- plexes. We also derive, from Poincare ́ Duality, some structural results about persistent intersection homology.},
author = {Bendich, Paul and Harer, John},
journal = {Foundations of Computational Mathematics},
number = {3},
pages = {305 -- 336},
publisher = {Springer},
title = {{Persistent intersection homology}},
doi = {10.1007/s10208-010-9081-1},
volume = {11},
year = {2011},
}
@article{3379,
abstract = {The process of gastrulation is highly conserved across vertebrates on both the genetic and morphological levels, despite great variety in embryonic shape and speed of development. This mechanism spatially separates the germ layers and establishes the organizational foundation for future development. Mesodermal identity is specified in a superficial layer of cells, the epiblast, where cells maintain an epithelioid morphology. These cells involute to join the deeper hypoblast layer where they adopt a migratory, mesenchymal morphology. Expression of a cascade of related transcription factors orchestrates the parallel genetic transition from primitive to mature mesoderm. Although the early and late stages of this process are increasingly well understood, the transition between them has remained largely mysterious. We present here the first high resolution in vivo observations of the blebby transitional morphology of involuting mesodermal cells in a vertebrate embryo. We further demonstrate that the zebrafish spadetail mutation creates a reversible block in the maturation program, stalling cells in the transition state. This mutation creates an ideal system for dissecting the specific properties of cells undergoing the morphological transition of maturing mesoderm, as we demonstrate with a direct measurement of cell–cell adhesion.},
author = {Row, Richard and Maître, Jean-Léon and Martin, Benjamin and Stockinger, Petra and Heisenberg, Carl-Philipp J and Kimelman, David},
journal = {Developmental Biology},
number = {1},
pages = {102 -- 110},
publisher = {Elsevier},
title = {{Completion of the epithelial to mesenchymal transition in zebrafish mesoderm requires Spadetail}},
doi = {10.1016/j.ydbio.2011.03.025},
volume = {354},
year = {2011},
}
@article{3380,
abstract = {Linkage between markers and genes that affect a phenotype of interest may be determined by examining differences in marker allele frequency in the extreme progeny of a cross between two inbred lines. This strategy is usually employed when pooling is used to reduce genotyping costs. When the cross progeny are asexual, the extreme progeny may be selected by multiple generations of asexual reproduction and selection. We analyse this method of measuring phenotype in asexual progeny and examine the changes in marker allele frequency due to selection over many generations. Stochasticity in marker frequency in the selected population arises due to the finite initial population size. We derive the distribution of marker frequency as a result of selection at a single major locus, and show that in order to avoid spurious changes in marker allele frequency in the selected population, the initial population size should be in the low to mid hundreds.},
author = {Logeswaran, Sayanthan and Barton, Nicholas H},
journal = {Genetical Research},
number = {3},
pages = {221 -- 232},
publisher = {Cambridge University Press},
title = {{Mapping Mendelian traits in asexual progeny using changes in marker allele frequency}},
doi = {10.1017/S0016672311000115},
volume = {93},
year = {2011},
}
@article{3381,
abstract = {In this survey, we compare several languages for specifying Markovian population models such as queuing networks and chemical reaction networks. All these languages — matrix descriptions, stochastic Petri nets, stoichiometric equations, stochastic process algebras, and guarded command models — describe continuous-time Markov chains, but they differ according to important properties, such as compositionality, expressiveness and succinctness, executability, and ease of use. Moreover, they provide different support for checking the well-formedness of a model and for analyzing a model.},
author = {Henzinger, Thomas A and Jobstmann, Barbara and Wolf, Verena},
journal = {IJFCS: International Journal of Foundations of Computer Science},
number = {4},
pages = {823 -- 841},
publisher = {World Scientific Publishing},
title = {{Formalisms for specifying Markovian population models}},
doi = {10.1142/S0129054111008441},
volume = {22},
year = {2011},
}
@article{3382,
abstract = {Dynamic tactile sensing is a fundamental ability to recognize materials and objects. However, while humans are born with partially developed dynamic tactile sensing and quickly master this skill, today's robots remain in their infancy. The development of such a sense requires not only better sensors but the right algorithms to deal with these sensors' data as well. For example, when classifying a material based on touch, the data are noisy, high-dimensional, and contain irrelevant signals as well as essential ones. Few classification methods from machine learning can deal with such problems. In this paper, we propose an efficient approach to infer suitable lower dimensional representations of the tactile data. In order to classify materials based on only the sense of touch, these representations are autonomously discovered using visual information of the surfaces during training. However, accurately pairing vision and tactile samples in real-robot applications is a difficult problem. The proposed approach, therefore, works with weak pairings between the modalities. Experiments show that the resulting approach is very robust and yields significantly higher classification performance based on only dynamic tactile sensing.},
author = {Kroemer, Oliver and Lampert, Christoph and Peters, Jan},
journal = {IEEE Transactions on Robotics},
number = {3},
pages = {545 -- 557},
publisher = {IEEE},
title = {{Learning dynamic tactile sensing with robust vision based training}},
doi = {10.1109/TRO.2011.2121130},
volume = {27},
year = {2011},
}
@article{3383,
author = {Heisenberg, Carl-Philipp J},
journal = {FEBS Journal},
number = {S1},
pages = {24 -- 24},
publisher = {Wiley-Blackwell},
title = {{Invited Lectures ‐ Symposia Area}},
doi = {10.1111/j.1742-4658.2011.08136.x},
volume = {278},
year = {2011},
}
@article{3384,
abstract = {Here we introduce a database of calibrated natural images publicly available through an easy-to-use web interface. Using a Nikon D70 digital SLR camera, we acquired about six-megapixel images of Okavango Delta of Botswana, a tropical savanna habitat similar to where the human eye is thought to have evolved. Some sequences of images were captured unsystematically while following a baboon troop, while others were designed to vary a single parameter such as aperture, object distance, time of day or position on the horizon. Images are available in the raw RGB format and in grayscale. Images are also available in units relevant to the physiology of human cone photoreceptors, where pixel values represent the expected number of photoisomerizations per second for cones sensitive to long (L), medium (M) and short (S) wavelengths. This database is distributed under a Creative Commons Attribution-Noncommercial Unported license to facilitate research in computer vision, psychophysics of perception, and visual neuroscience.},
author = {Tkacik, Gasper and Garrigan, Patrick and Ratliff, Charles and Milcinski, Grega and Klein, Jennifer and Seyfarth, Lucia and Sterling, Peter and Brainard, David and Balasubramanian, Vijay},
journal = {PLoS One},
number = {6},
publisher = {Public Library of Science},
title = {{Natural images from the birthplace of the human eye}},
doi = {10.1371/journal.pone.0020409},
volume = {6},
year = {2011},
}
@article{3385,
author = {Sixt, Michael K},
journal = {Immunology Letters},
number = {1},
pages = {32 -- 34},
publisher = {Elsevier},
title = {{Interstitial locomotion of leukocytes}},
doi = {10.1016/j.imlet.2011.02.013},
volume = {138},
year = {2011},
}
@article{3386,
abstract = {Evolutionary theories of ageing predict that life span increases with decreasing extrinsic mortality, and life span variation among queens in ant species seems to corroborate this prediction: queens, which are the only reproductive in a colony, live much longer than queens in multi-queen colonies. The latter often inhabit ephemeral nest sites and accordingly are assumed to experience a higher mortality risk. Yet, all prior studies compared queens from different single- and multi-queen species. Here, we demonstrate an effect of queen number on longevity and fecundity within a single, socially plastic species, where queens experience the similar level of extrinsic mortality. Queens from single- and two-queen colonies had significantly longer lifespan and higher fecundity than queens living in associations of eight queens. As queens also differ neither in morphology nor the mode of colony foundation, our study shows that the social environment itself strongly affects ageing rate.},
author = {Schrempf, Alexandra and Cremer, Sylvia and Heinze, Jürgen},
journal = {Journal of Evolutionary Biology},
number = {7},
pages = {1455 -- 1461},
publisher = {Wiley-Blackwell},
title = {{Social influence on age and reproduction reduced lifespan and fecundity in multi queen ant colonies}},
doi = {10.1111/j.1420-9101.2011.02278.x},
volume = {24},
year = {2011},
}
@article{3387,
abstract = {Background: Supertree methods combine overlapping input trees into a larger supertree. Here, I consider split-based supertree methods that first extract the split information of the input trees and subsequently combine this split information into a phylogeny. Well known split-based supertree methods are matrix representation with parsimony and matrix representation with compatibility. Combining input trees on the same taxon set, as in the consensus setting, is a well-studied task and it is thus desirable to generalize consensus methods to supertree methods. Results: Here, three variants of majority-rule (MR) supertrees that generalize majority-rule consensus trees are investigated. I provide simple formulas for computing the respective score for bifurcating input- and supertrees. These score computations, together with a heuristic tree search minmizing the scores, were implemented in the python program PluMiST (Plus- and Minus SuperTrees) available from http://www.cibiv.at/software/ plumist. The different MR methods were tested by simulation and on real data sets. The search heuristic was successful in combining compatible input trees. When combining incompatible input trees, especially one variant, MR(-) supertrees, performed well. Conclusions: The presented framework allows for an efficient score computation of three majority-rule supertree variants and input trees. I combined the score computation with a heuristic search over the supertree space. The implementation was tested by simulation and on real data sets and showed promising results. Especially the MR(-) variant seems to be a reasonable score for supertree reconstruction. Generalizing these computations to multifurcating trees is an open problem, which may be tackled using this framework.},
author = {Kupczok, Anne},
journal = {BMC Evolutionary Biology},
number = {205},
publisher = {BioMed Central},
title = {{Split based computation of majority rule supertrees}},
doi = {10.1186/1471-2148-11-205},
volume = {11},
year = {2011},
}
@article{3388,
abstract = {Background: Fragmentation of terrestrial ecosystems has had detrimental effects on metapopulations of habitat specialists. Maculinea butterflies have been particularly affected because of their specialized lifecycles, requiring both specific food-plants and host-ants. However, the interaction between dispersal, effective population size, and long-term genetic erosion of these endangered butterflies remains unknown. Using non-destructive sampling, we investigated the genetic diversity of the last extant population of M. arion in Denmark, which experienced critically low numbers in the 1980s. Results: Using nine microsatellite markers, we show that the population is genetically impoverished compared to nearby populations in Sweden, but less so than monitoring programs suggested. Ten additional short repeat microsatellites were used to reconstruct changes in genetic diversity and population structure over the last 77 years from museum specimens. We also tested amplification efficiency in such historical samples as a function of repeat length and sample age. Low population numbers in the 1980s did not affect genetic diversity, but considerable turnover of alleles has characterized this population throughout the time-span of our analysis. Conclusions: Our results suggest that M. arion is less sensitive to genetic erosion via population bottlenecks than previously thought, and that managing clusters of high quality habitat may be key for long-term conservation.},
author = {Ugelvig, Line V and Nielsen, Per and Boomsma, Jacobus and Nash, David},
journal = {BMC Evolutionary Biology},
number = {201},
publisher = {BioMed Central},
title = {{Reconstructing eight decades of genetic variation in an isolated Danish population of the large blue butterfly Maculinea arion}},
doi = {10.1186/1471-2148-11-201},
volume = {11},
year = {2011},
}
@article{3389,
abstract = {Kernel canonical correlation analysis (KCCA) is a general technique for subspace learning that incorporates principal components analysis (PCA) and Fisher linear discriminant analysis (LDA) as special cases. By finding directions that maximize correlation, KCCA learns representations that are more closely tied to the underlying process that generates the data and can ignore high-variance noise directions. However, for data where acquisition in one or more modalities is expensive or otherwise limited, KCCA may suffer from small sample effects. We propose to use semi-supervised Laplacian regularization to utilize data that are present in only one modality. This approach is able to find highly correlated directions that also lie along the data manifold, resulting in a more robust estimate of correlated subspaces. Functional magnetic resonance imaging (fMRI) acquired data are naturally amenable to subspace techniques as data are well aligned. fMRI data of the human brain are a particularly interesting candidate. In this study we implemented various supervised and semi-supervised versions of KCCA on human fMRI data, with regression to single and multi-variate labels (corresponding to video content subjects viewed during the image acquisition). In each variate condition, the semi-supervised variants of KCCA performed better than the supervised variants, including a supervised variant with Laplacian regularization. We additionally analyze the weights learned by the regression in order to infer brain regions that are important to different types of visual processing.},
author = {Blaschko, Matthew and Shelton, Jacquelyn and Bartels, Andreas and Lampert, Christoph and Gretton, Arthur},
journal = {Pattern Recognition Letters},
number = {11},
pages = {1572 -- 1583},
publisher = {Elsevier},
title = {{Semi supervised kernel canonical correlation analysis with application to human fMRI}},
doi = {10.1016/j.patrec.2011.02.011},
volume = {32},
year = {2011},
}
@article{3390,
abstract = {What determines the genetic contribution that an individual makes to future generations? With biparental reproduction, each individual leaves a 'pedigree' of descendants, determined by the biparental relationships in the population. The pedigree of an individual constrains the lines of descent of each of its genes. An individual's reproductive value is the expected number of copies of each of its genes that is passed on to distant generations conditional on its pedigree. For the simplest model of biparental reproduction analogous to the Wright-Fisher model, an individual's reproductive value is determined within ~10 generations, independent of population size. Partial selfing and subdivision do not greatly slow this convergence. Our central result is that the probability that a gene will survive is proportional to the reproductive value of the individual that carries it, and that conditional on survival, after a few tens of generations, the distribution of the number of surviving copies is the same for all individuals, whatever their reproductive value. These results can be generalized to the joint distribution of surviving blocks of ancestral genome. Selection on unlinked loci in the genetic background may greatly increase the variance in reproductive value, but the above results nevertheless still hold. The almost linear relationship between survival probability and reproductive value also holds for weakly favored alleles. Thus, the influence of the complex pedigree of descendants on an individual's genetic contribution to the population can be summarized through a single number: its reproductive value.},
author = {Barton, Nicholas H and Etheridge, Alison},
journal = {Genetics},
number = {4},
pages = {953 -- 973},
publisher = {Genetics Society of America},
title = {{The relation between reproductive value and genetic contribution}},
doi = {10.1534/genetics.111.127555},
volume = {188},
year = {2011},
}
@article{3391,
abstract = {Evolutionary biology shares many concepts with statistical physics: both deal with populations, whether of molecules or organisms, and both seek to simplify evolution in very many dimensions. Often, methodologies have undergone parallel and independent development, as with stochastic methods in population genetics. Here, we discuss aspects of population genetics that have embraced methods from physics: non-equilibrium statistical mechanics, travelling waves and Monte-Carlo methods, among others, have been used to study polygenic evolution, rates of adaptation and range expansions. These applications indicate that evolutionary biology can further benefit from interactions with other areas of statistical physics; for example, by following the distribution of paths taken by a population through time},
author = {de Vladar, Harold and Barton, Nicholas H},
journal = {Trends in Ecology and Evolution},
number = {8},
pages = {424 -- 432},
publisher = {Cell Press},
title = {{The contribution of statistical physics to evolutionary biology}},
doi = {10.1016/j.tree.2011.04.002},
volume = {26},
year = {2011},
}
@article{3392,
abstract = {Migrating lymphocytes acquire a polarized phenotype with a leading and a trailing edge, or uropod. Although in vitro experiments in cell lines or activated primary cell cultures have established that Rho-p160 coiled-coil kinase (ROCK)-myosin II-mediated uropod contractility is required for integrin de-adhesion on two-dimensional surfaces and nuclear propulsion through narrow pores in three-dimensional matrices, less is known about the role of these two events during the recirculation of primary, nonactivated lymphocytes. Using pharmacological antagonists of ROCK and myosin II, we report that inhibition of uropod contractility blocked integrin-independent mouse T cell migration through narrow, but not large, pores in vitro. T cell crawling on chemokine-coated endothelial cells under shear was severely impaired by ROCK inhibition, whereas transendothelial migration was only reduced through endothelial cells with high, but not low, barrier properties. Using three-dimensional thick-tissue imaging and dynamic two-photon microscopy of T cell motility in lymphoid tissue, we demonstrated a significant role for uropod contractility in intraluminal crawling and transendothelial migration through lymph node, but not bone marrow, endothelial cells. Finally, we demonstrated that ICAM-1, but not anatomical constraints or integrin-independent interactions, reduced parenchymal motility of inhibitor-treated T cells within the dense lymphoid microenvironment, thus assigning context-dependent roles for uropod contraction during lymphocyte recirculation.},
author = {Soriano, Silvia and Hons, Miroslav and Schumann, Kathrin and Kumar, Varsha and Dennier, Timo and Lyck, Ruth and Sixt, Michael K and Stein, Jens},
journal = {Journal of Immunology},
number = {5},
pages = {2356 -- 2364},
publisher = {American Association of Immunologists},
title = {{In vivo analysis of uropod function during physiological T cell trafficking}},
doi = {10.4049/jimmunol.1100935},
volume = {187},
year = {2011},
}
@article{3393,
abstract = {Unlike unconditionally advantageous “Fisherian” variants that tend to spread throughout a species range once introduced anywhere, “bistable” variants, such as chromosome translocations, have two alternative stable frequencies, absence and (near) fixation. Analogous to populations with Allee effects, bistable variants tend to increase locally only once they become sufficiently common, and their spread depends on their rate of increase averaged over all frequencies. Several proposed manipulations of insect populations, such as using Wolbachia or “engineered underdominance” to suppress vector-borne diseases, produce bistable rather than Fisherian dynamics. We synthesize and extend theoretical analyses concerning three features of their spatial behavior: rate of spread, conditions to initiate spread from a localized introduction, and wave stopping caused by variation in population densities or dispersal rates. Unlike Fisherian variants, bistable variants tend to spread spatially only for particular parameter combinations and initial conditions. Wave initiation requires introduction over an extended region, while subsequent spatial spread is slower than for Fisherian waves and can easily be halted by local spatial inhomogeneities. We present several new results, including robust sufficient conditions to initiate (and stop) spread, using a one-parameter cubic approximation applicable to several models. The results have both basic and applied implications.},
author = {Barton, Nicholas H and Turelli, Michael},
journal = {American Naturalist},
number = {3},
pages = {E48 -- E75},
publisher = {University of Chicago Press},
title = {{Spatial waves of advance with bistable dynamics: Cytoplasmic and genetic analogues of Allee effects}},
doi = {10.1086/661246},
volume = {178},
year = {2011},
}
@article{3394,
abstract = {Random genetic drift shifts clines in space, alters their width, and distorts their shape. Such random fluctuations complicate inferences from cline width and position. Notably, the effect of genetic drift on the expected shape of the cline is opposite to the naive (but quite common) misinterpretation of classic results on the expected cline. While random drift on average broadens the overall cline in expected allele frequency, it narrows the width of any particular cline. The opposing effects arise because locally, drift drives alleles to fixation—but fluctuations in position widen the expected cline. The effect of genetic drift can be predicted from standardized variance in allele frequencies, averaged across the habitat: 〈F〉. A cline maintained by spatially varying selection (step change) is expected to be narrower by a factor of relative to the cline in the absence of drift. The expected cline is broader by the inverse of this factor. In a tension zone maintained by underdominance, the expected cline width is narrower by about 1 – 〈F〉relative to the width in the absence of drift. Individual clines can differ substantially from the expectation, and we give quantitative predictions for the variance in cline position and width. The predictions apply to clines in almost one-dimensional circumstances such as hybrid zones in rivers, deep valleys, or along a coast line and give a guide to what patterns to expect in two dimensions.},
author = {Polechova, Jitka and Barton, Nicholas H},
journal = {Genetics},
number = {1},
pages = {227 -- 235},
publisher = {Genetics Society of America},
title = {{Genetic drift widens the expected cline but narrows the expected cline width}},
doi = {10.1534/genetics.111.129817},
volume = {189},
year = {2011},
}
@article{3395,
abstract = {Defining population structure and genetic diversity levels is of the utmost importance for developing efficient conservation strategies. Overfishing has caused mean annual catches of the European spiny lobster (Palinurus elephas) to decrease alarmingly along its distribution area. In this context, there is a need for comprehensive studies aiming to evaluate the genetic health of the exploited populations. The present study is based on a set of ten nuclear markers amplified in 331 individuals from ten different localities covering most of P. elephas distribution area. Samples from Atlantic and Mediterranean basins showed small but significant differences, indicating that P. elephas populations do not behave as a single panmictic unit but form two partially-overlapping groups. Despite intense overfishing, our dataset did not recover a recent bottleneck signal, and instead showed a large and stable historical effective size. This result could be accounted for by specific life-history traits (reproduction and longevity) and the limitations of molecular markers in covering recent timescales for nontemporal samples. The findings of the present study emphasize the need to integrate information on effective population sizes and life-history parameters when evaluating population connectivity levels from genetic data.},
author = {Palero, Ferran and Abello, Pere and Macpherson, Enrique and Beaumont, Mark and Pascual, Marta},
journal = {Biological Journal of the Linnean Society},
number = {2},
pages = {407 -- 418},
publisher = {Wiley-Blackwell},
title = {{Effect of oceanographic barriers and overfishing on the population genetic structure of the European spiny lobster Palinurus elephas }},
doi = {10.1111/j.1095-8312.2011.01728.x},
volume = {104},
year = {2011},
}
@article{3396,
abstract = {Facial branchiomotor neurons (FBMNs) in zebrafish and mouse embryonic hindbrain undergo a characteristic tangential migration from rhombomere (r) 4, where they are born, to r6/7. Cohesion among neuroepithelial cells (NCs) has been suggested to function in FBMN migration by inhibiting FBMNs positioned in the basal neuroepithelium such that they move apically between NCs towards the midline of the neuroepithelium instead of tangentially along the basal side of the neuroepithelium towards r6/7. However, direct experimental evaluation of this hypothesis is still lacking. Here, we have used a combination of biophysical cell adhesion measurements and high-resolution time-lapse microscopy to determine the role of NC cohesion in FBMN migration. We show that reducing NC cohesion by interfering with Cadherin 2 (Cdh2) activity results in FBMNs positioned at the basal side of the neuroepithelium moving apically towards the neural tube midline instead of tangentially towards r6/7. In embryos with strongly reduced NC cohesion, ectopic apical FBMN movement frequently results in fusion of the bilateral FBMN clusters over the apical midline of the neural tube. By contrast, reducing cohesion among FBMNs by interfering with Contactin 2 (Cntn2) expression in these cells has little effect on apical FBMN movement, but reduces the fusion of the bilateral FBMN clusters in embryos with strongly diminished NC cohesion. These data provide direct experimental evidence that NC cohesion functions in tangential FBMN migration by restricting their apical movement.},
author = {Stockinger, Petra and Heisenberg, Carl-Philipp J and Maître, Jean-Léon},
journal = {Development},
number = {21},
pages = {4673 -- 4683},
publisher = {Company of Biologists},
title = {{Defective neuroepithelial cell cohesion affects tangential branchiomotor neuron migration in the zebrafish neural tube}},
doi = {10.1242/dev.071233},
volume = {138},
year = {2011},
}
@article{3397,
abstract = {Recent advances in microscopy techniques and biophysical measurements have provided novel insight into the molecular, cellular and biophysical basis of cell adhesion. However, comparably little is known about a core element of cell–cell adhesion—the energy of adhesion at the cell–cell contact. In this review, we discuss approaches to understand the nature and regulation of adhesion energy, and propose strategies to determine adhesion energy between cells in vitro and in vivo.},
author = {Maître, Jean-Léon and Heisenberg, Carl-Philipp J},
journal = {Current Opinion in Cell Biology},
number = {5},
pages = {508 -- 514},
publisher = {Elsevier},
title = {{The role of adhesion energy in controlling cell-cell contacts}},
doi = {10.1016/j.ceb.2011.07.004},
volume = {23},
year = {2011},
}
@article{3399,
abstract = {Context-dependent adjustment of mating tactics can drastically increase the mating success of behaviourally flexible animals. We used the ant Cardiocondyla obscurior as a model system to study adaptive adjustment of male mating tactics. This species shows a male diphenism of wingless fighter males and peaceful winged males. Whereas the wingless males stay and exclusively mate in the maternal colony, the mating behaviour of winged males is plastic. They copulate with female sexuals in their natal nests early in life but later disperse in search for sexuals outside. In this study, we observed the nest-leaving behaviour of winged males under different conditions and found that they adaptively adjust the timing of their dispersal to the availability of mating partners, as well as the presence, and even the type of competitors in their natal nests. In colonies with virgin female queens winged males stayed longest when they were the only male in the nest. They left earlier when mating partners were not available or when other males were present. In the presence of wingless, locally mating fighter males, winged males dispersed earlier than in the presence of docile, winged competitors. This suggests that C. obscurior males are capable of estimating their local breeding chances and adaptively adjust their dispersal behaviour in both an opportunistic and a risk-sensitive way, thus showing hitherto unknown behavioural plasticity in social insect males.},
author = {Cremer, Sylvia and Schrempf, Alexandra and Heinze, Jürgen},
journal = {PLoS One},
number = {3},
publisher = {Public Library of Science},
title = {{Competition and opportunity shape the reproductive tactics of males in the ant Cardiocondyla obscurior}},
doi = {10.1371/journal.pone.0017323},
volume = {6},
year = {2011},
}
@article{3405,
abstract = {Glutamate is the major excitatory neurotransmitter in the mammalian central nervous system and gates non-selective cation channels. The origins of glutamate receptors are not well understood as they differ structurally and functionally from simple bacterial ligand-gated ion channels. Here we report the discovery of an ionotropic glutamate receptor that combines the typical eukaryotic domain architecture with the 'TXVGYG' signature sequence of the selectivity filter found in K+ channels. This receptor exhibits functional properties intermediate between bacterial and eukaryotic glutamate-gated ion channels, suggesting a link in the evolution of ionotropic glutamate receptors.},
author = {Janovjak, Harald L and Sandoz, Guillaume and Isacoff, Ehud},
journal = {Nature Communications},
number = {232},
pages = {1 -- 6},
publisher = {Nature Publishing Group},
title = {{Modern ionotropic glutamate receptor with a K+ selectivity signature sequence}},
doi = {10.1038/ncomms1231},
volume = {2},
year = {2011},
}
@article{3429,
abstract = {Transcription factors are central to sustaining pluripotency, yet little is known about transcription factor dynamics in defining pluripotency in the early mammalian embryo. Here, we establish a fluorescence decay after photoactivation (FDAP) assay to quantitatively study the kinetic behaviour of Oct4, a key transcription factor controlling pre-implantation development in the mouse embryo. FDAP measurements reveal that each cell in a developing embryo shows one of two distinct Oct4 kinetics, before there are any morphologically distinguishable differences or outward signs of lineage patterning. The differences revealed by FDAP are due to differences in the accessibility of Oct4 to its DNA binding sites in the nucleus. Lineage tracing of the cells in the two distinct sub-populations demonstrates that the Oct4 kinetics predict lineages of the early embryo. Cells with slower Oct4 kinetics are more likely to give rise to the pluripotent cell lineage that contributes to the inner cell mass. Those with faster Oct4 kinetics contribute mostly to the extra-embryonic lineage. Our findings identify Oct4 kinetics, rather than differences in total transcription factor expression levels, as a predictive measure of developmental cell lineage patterning in the early mouse embryo.},
author = {Plachta, Nicolas and Bollenbach, Mark Tobias and Pease, Shirley and Fraser, Scott and Pantazis, Periklis},
journal = {Nature Cell Biology},
number = {2},
pages = {117 -- 123},
publisher = {Nature Publishing Group},
title = {{Oct4 kinetics predict cell lineage patterning in the early mammalian embryo}},
doi = {10.1038/ncb2154},
volume = {13},
year = {2011},
}
@article{3505,
abstract = {Cell migration on two-dimensional (2D) substrates follows entirely different rules than cell migration in three-dimensional (3D) environments. This is especially relevant for leukocytes that are able to migrate in the absence of adhesion receptors within the confined geometry of artificial 3D extracellular matrix scaffolds and within the interstitial space in vivo. Here, we describe in detail a simple and economical protocol to visualize dendritic cell migration in 3D collagen scaffolds along chemotactic gradients. This method can be adapted to other cell types and may serve as a physiologically relevant paradigm for the directed locomotion of most amoeboid cells.},
author = {Sixt, Michael K and Lämmermann, Tim},
journal = {Cell Migration},
pages = {149 -- 165},
publisher = {Springer},
title = {{In vitro analysis of chemotactic leukocyte migration in 3D environments}},
doi = {10.1007/978-1-61779-207-6_11},
volume = {769},
year = {2011},
}
@article{3771,
abstract = {The small-sized frugivorous bat Carollia perspicillata is an understory specialist and occurs in a wide range of lowland habitats, tending to be more common in tropical dry or moist forests of South and Central America. Its sister species, Carollia brevicauda, occurs almost exclusively in the Amazon rainforest. A recent phylogeographic study proposed a hypothesis of origin and subsequent diversification for C. perspicillata along the Atlantic coastal forest of Brazil. Additionally, it also found two allopatric clades for C. brevicauda separated by the Amazon Basin. We used cytochrome b gene sequences and a more extensive sampling to test hypotheses related to the origin and diversification of C. perspicillata plus C. brevicauda clade in South America. The results obtained indicate that there are two sympatric evolutionary lineages within each species. In C. perspicillata, one lineage is limited to the Southern Atlantic Forest, whereas the other is widely distributed. Coalescent analysis points to a simultaneous origin for C. perspicillata and C. brevicauda, although no place for the diversification of each species can be firmly suggested. The phylogeographic pattern shown by C. perspicillata is also congruent with the Pleistocene refugia hypothesis as a likely vicariant phenomenon shaping the present distribution of its intraspecific lineages.},
author = {Pavan, Ana and Martins, Felipe and Santos, Fabrício and Ditchfield, Albert and Fernandes Redondo, Rodrigo A},
journal = {Biological Journal of the Linnean Society},
number = {3},
pages = {527 -- 539},
publisher = {Wiley-Blackwell},
title = {{Patterns of diversification in two species of short-tailed bats (Carollia Gray, 1838): the effects of historical fragmentation of Brazilian rainforests.}},
doi = {10.1111/j.1095-8312.2010.01601.x},
volume = {102},
year = {2011},
}
@article{3778,
author = {Barton, Nicholas H},
journal = {Heredity},
number = {2},
pages = {205 -- 206},
publisher = {Nature Publishing Group},
title = {{Estimating linkage disequilibria}},
doi = {10.1038/hdy.2010.67},
volume = {106},
year = {2011},
}
@article{3781,
abstract = {We bound the difference in length of two curves in terms of their total curvatures and the Fréchet distance. The bound is independent of the dimension of the ambient Euclidean space, it improves upon a bound by Cohen-Steiner and Edelsbrunner, and it generalizes a result by Fáry and Chakerian.},
author = {Fasy, Brittany Terese},
journal = {Acta Sci. Math. (Szeged)},
number = {1-2},
pages = {359 -- 367},
publisher = {Szegedi Tudományegyetem},
title = {{The difference in length of curves in R^n}},
volume = {77},
year = {2011},
}
@article{3784,
abstract = {Advanced stages of Scyllarus phyllosoma larvae were collected by demersal trawling during fishery research surveys in the western Mediterranean Sea in 2003–2005. Nucleotide sequence analysis of the mitochondrial 16S rDNA gene allowed the final-stage phyllosoma of Scyllarus arctus to be identified among these larvae. Its morphology is described and illustrated. This constitutes the second complete description of a Scyllaridae phyllosoma with its specific identity being validated by molecular techniques (the first was S. pygmaeus). These results also solved a long lasting taxonomic anomaly of several species assigned to the ancient genus Phyllosoma Leach, 1814. Detailed examination indicated that the final-stage phyllosoma of S. arctus shows closer affinities with the American scyllarid Scyllarus depressus or with the Australian Scyllarus sp. b (sensu Phillips et al., 1981) than to its sympatric species S. pygmaeus.},
author = {Palero, Ferran and Guerao, Guillermo and Clark, Paul and Abello, Pere},
journal = {Journal of the Marine Biological Association of the United Kingdom},
number = {2},
pages = {485 -- 492},
publisher = {Cambridge University Press},
title = {{Scyllarus arctus (Crustacea: Decapoda: Scyllaridae) final stage phyllosoma identified by DNA analysis, with morphological description}},
doi = {10.1017/S0025315410000287},
volume = {91},
year = {2011},
}
@inbook{3796,
abstract = {We address the problem of covering ℝ n with congruent balls, while minimizing the number of balls that contain an average point. Considering the 1-parameter family of lattices defined by stretching or compressing the integer grid in diagonal direction, we give a closed formula for the covering density that depends on the distortion parameter. We observe that our family contains the thinnest lattice coverings in dimensions 2 to 5. We also consider the problem of packing congruent balls in ℝ n , for which we give a closed formula for the packing density as well. Again we observe that our family contains optimal configurations, this time densest packings in dimensions 2 and 3.},
author = {Edelsbrunner, Herbert and Kerber, Michael},
booktitle = {Rainbow of Computer Science},
editor = {Calude, Cristian and Rozenberg, Grzegorz and Salomaa, Arto},
pages = {20 -- 35},
publisher = {Springer},
title = {{Covering and packing with spheres by diagonal distortion in R^n}},
doi = {10.1007/978-3-642-19391-0_2},
volume = {6570},
year = {2011},
}
@inbook{3791,
abstract = {During the development of multicellular organisms, cell fate specification is followed by the sorting of different cell types into distinct domains from where the different tissues and organs are formed. Cell sorting involves both the segregation of a mixed population of cells with different fates and properties into distinct domains, and the active maintenance of their segregated state. Because of its biological importance and apparent resemblance to fluid segregation in physics, cell sorting was extensively studied by both biologists and physicists over the last decades. Different theories were developed that try to explain cell sorting on the basis of the physical properties of the constituent cells. However, only recently the molecular and cellular mechanisms that control the physical properties driving cell sorting, have begun to be unraveled. In this review, we will provide an overview of different cell-sorting processes in development and discuss how these processes can be explained by the different sorting theories, and how these theories in turn can be connected to the molecular and cellular mechanisms driving these processes.},
author = {Krens, Gabriel and Heisenberg, Carl-Philipp J},
booktitle = {Forces and Tension in Development},
editor = {Labouesse, Michel},
pages = {189 -- 213},
publisher = {Elsevier},
title = {{Cell sorting in development}},
doi = {10.1016/B978-0-12-385065-2.00006-2},
volume = {95},
year = {2011},
}
@article{2409,
abstract = {Background: The availability of many gene alignments with overlapping taxon sets raises the question of which strategy is the best to infer species phylogenies from multiple gene information. Methods and programs abound that use the gene alignment in different ways to reconstruct the species tree. In particular, different methods combine the original data at different points along the way from the underlying sequences to the final tree. Accordingly, they are classified into superalignment, supertree and medium-level approaches. Here, we present a simulation study to compare different methods from each of these three approaches.
Results: We observe that superalignment methods usually outperform the other approaches over a wide range of parameters including sparse data and gene-specific evolutionary parameters. In the presence of high incongruency among gene trees, however, other combination methods show better performance than the superalignment approach. Surprisingly, some supertree and medium-level methods exhibit, on average, worse results than a single gene phylogeny with complete taxon information.
Conclusions: For some methods, using the reconstructed gene tree as an estimation of the species tree is superior to the combination of incomplete information. Superalignment usually performs best since it is less susceptible to stochastic error. Supertree methods can outperform superalignment in the presence of gene-tree conflict.},
author = {Kupczok, Anne and Schmidt, Heiko and Von Haeseler, Arndt},
journal = {Algorithms for Molecular Biology},
number = {1},
publisher = {BioMed Central},
title = {{Accuracy of phylogeny reconstruction methods combining overlapping gene data sets }},
doi = {10.1186/1748-7188-5-37},
volume = {5},
year = {2010},
}
@phdthesis{3962,
author = {Pflicke, Holger},
publisher = {IST Austria},
title = {{Dendritic cell migration across basement membranes in the skin}},
year = {2010},
}
@article{4134,
abstract = {All species are restricted in their distribution. Currently, ecological models can only explain such limits if patches vary in quality, leading to asymmetrical dispersal, or if genetic variation is too low at the margins for adaptation. However, population genetic models suggest that the increase in genetic variance resulting from dispersal should allow adaptation to almost any ecological gradient. Clearly therefore, these models miss something that prevents evolution in natural populations. We developed an individual-based simulation to explore stochastic effects in these models. At high carrying capacities, our simulations largely agree with deterministic predictions. However, when carrying capacity is low, the population fails to establish for a wide range of parameter values where adaptation was expected from previous models. Stochastic or transient effects appear critical around the boundaries in parameter space between simulation behaviours. Dispersal, gradient steepness, and population density emerge as key factors determining adaptation on an ecological gradient. },
author = {Bridle, Jon and Polechova, Jitka and Kawata, Masakado and Butlin, Roger},
journal = {Ecology Letters},
number = {4},
pages = {485 -- 494},
publisher = {Wiley-Blackwell},
title = {{Why is adaptation prevented at ecological margins? New insights from individual-based simulations}},
doi = {10.1111/j.1461-0248.2010.01442.x},
volume = {13},
year = {2010},
}
@article{4157,
abstract = {Integrin- and cadherin-mediated adhesion is central for cell and tissue morphogenesis, allowing cells and tissues to change shape without loosing integrity. Studies predominantly in cell culture showed that mechanosensation through adhesion structures is achieved by force-mediated modulation of their molecular composition. The specific molecular composition of adhesion sites in turn determines their signalling activity and dynamic reorganization. Here, we will review how adhesion sites respond to mecanical stimuli, and how spatially and temporally regulated signalling from different adhesion sites controls cell migration and tissue morphogenesis.},
author = {Papusheva, Ekaterina and Heisenberg, Carl-Philipp J},
journal = {EMBO Journal},
number = {16},
pages = {2753 -- 2768},
publisher = {Wiley-Blackwell},
title = {{Spatial organization of adhesion: force-dependent regulation and function in tissue morphogenesis}},
doi = {10.1038/emboj.2010.182},
volume = {29},
year = {2010},
}
@article{4243,
abstract = {We investigate a new model for populations evolving in a spatial continuum. This model can be thought of as a spatial version of the Lambda-Fleming-Viot process. It explicitly incorporates both small scale reproduction events and large scale extinction-recolonisation events. The lineages ancestral to a sample from a population evolving according to this model can be described in terms of a spatial version of the Lambda-coalescent. Using a technique of Evans (1997), we prove existence and uniqueness in law for the model. We then investigate the asymptotic behaviour of the genealogy of a finite number of individuals sampled uniformly at random (or more generally `far enough apart') from a two-dimensional torus of sidelength L as L tends to infinity. Under appropriate conditions (and on a suitable timescale) we can obtain as limiting genealogical processes a Kingman coalescent, a more general Lambda-coalescent or a system of coalescing Brownian motions (with a non-local coalescence mechanism).},
author = {Barton, Nicholas H and Etheridge, Alison and Véber, Amandine},
journal = {Electronic Journal of Probability},
number = {7},
pages = {162 -- 216},
publisher = {Institute of Mathematical Statistics},
title = {{A new model for evolution in a spatial continuum}},
doi = {10.1214/EJP.v15-741},
volume = {15},
year = {2010},
}
@inbook{4339,
abstract = {Mit diesem Buch möchten wir einen Überblick der aktuellen Diskussion zum Thema Bibliothek 2.0 geben und den Stand der tatsächlichen Umsetzung der Web 2.0-Ansätze in deutschsprachigen Bibliotheken beleuchten. An dieser Stelle ist die Frage erlaubt, warum es zu einer Zeit, in der es bereits die ersten "Web 3.0"- Konferenzen gibt, eines Handbuches der Bibliothek 2.0 noch bedarf. Und warum es überhaupt ein deutschsprachiges Handbuch zur Bibliothek 2.0 braucht, wo es doch bereits verschiedenste Publikationen zu diesem Thema aus anderen Ländern, insbesondere des angloamerikanischen Raums gibt. Ist dazu nicht bereits alles gesagt?},
author = {Bergmann, Julia and Danowski, Patrick},
booktitle = {Handbuch Bibliothek 2.0},
editor = {Bergmann, Julia and Danowski, Patrick},
pages = {5 -- 20},
publisher = {De Gruyter},
title = {{Ist Bibliothek 2.0 überhaupt noch relevant? – Eine Einleitung in das Handbuch}},
doi = {10.1515/9783110232103},
year = {2010},
}
@book{4346,
abstract = {With the term "Library 2.0" the editors mean an institution which applies the principles of the Web 2.0 such as openness, re-use, collaboration and interaction in the entire organization. Libraries are extending their service offerings and work processes to include the potential of Web 2.0 technologies. This changes the job description and self-image of librarians. The collective volume offers a complete overview of the topic Library 2.0 and the current state of developments from a technological, sociological, information theoretical and practice-oriented perspective.},
author = {Danowski, Patrick and Bergmann, Julia},
publisher = {De Gruyter},
title = {{Handbuch Bibliothek 2.0}},
year = {2010},
}
@inproceedings{4361,
abstract = {Depth-bounded processes form the most expressive known fragment of the π-calculus for which interesting verification problems are still decidable. In this paper we develop an adequate domain of limits for the well-structured transition systems that are induced by depth-bounded processes. An immediate consequence of our result is that there exists a forward algorithm that decides the covering problem for this class. Unlike backward algorithms, the forward algorithm terminates even if the depth of the process is not known a priori. More importantly, our result suggests a whole spectrum of forward algorithms that enable the effective verification of a large class of mobile systems.},
author = {Wies, Thomas and Zufferey, Damien and Henzinger, Thomas A},
editor = {Ong, Luke},
location = {Paphos, Cyprus},
pages = {94 -- 108},
publisher = {Springer},
title = {{Forward analysis of depth-bounded processes}},
doi = {10.1007/978-3-642-12032-9_8},
volume = {6014},
year = {2010},
}
@inproceedings{4362,
abstract = {Software transactional memories (STMs) promise simple and efficient concurrent programming. Several correctness properties have been proposed for STMs. Based on a bounded conflict graph algorithm for verifying correctness of STMs, we develop TRACER, a tool for runtime verification of STM implementations. The novelty of TRACER lies in the way it combines coarse and precise runtime analyses to guarantee sound and complete verification in an efficient manner. We implement TRACER in the TL2 STM implementation. We evaluate the performance of TRACER on STAMP benchmarks. While a precise runtime verification technique based on conflict graphs results in an average slowdown of 60x, the two-level approach of TRACER performs complete verification with an average slowdown of around 25x across different benchmarks.},
author = {Singh, Vasu},
editor = {Sokolsky, Oleg and Rosu, Grigore and Tilmann, Nikolai and Barringer, Howard and Falcone, Ylies and Finkbeiner, Bernd and Havelund, Klaus and Lee, Insup and Pace, Gordon},
location = {St. Julians, Malta},
pages = {421 -- 435},
publisher = {Springer},
title = {{Runtime verification for software transactional memories}},
doi = {10.1007/978-3-642-16612-9_32},
volume = {6418},
year = {2010},
}
@inproceedings{4369,
abstract = {In this paper we propose a novel technique for constructing timed automata from properties expressed in the logic mtl, under bounded-variability assumptions. We handle full mtl and include all future operators. Our construction is based on separation of the continuous time monitoring of the input sequence and discrete predictions regarding the future. The separation of the continuous from the discrete allows us to determinize our automata in an exponential construction that does not increase the number of clocks. This leads to a doubly exponential construction from mtl to deterministic timed automata, compared with triply exponential using existing approaches. We offer an alternative to the existing approach to linear real-time model checking, which has never been implemented. It further offers a unified framework for model checking, runtime monitoring, and synthesis, in an approach that can reuse tools, implementations, and insights from the discrete setting.},
author = {Nickovic, Dejan and Piterman, Nir},
editor = {Henzinger, Thomas A. and Chatterjee, Krishnendu},
location = {Klosterneuburg, Austria},
pages = {152 -- 167},
publisher = {Springer},
title = {{From MTL to deterministic timed automata}},
doi = {10.1007/978-3-642-15297-9_13},
volume = {6246},
year = {2010},
}
@inproceedings{4378,
abstract = {Techniques such as verification condition generation, predicate abstraction, and expressive type systems reduce software verification to proving formulas in expressive logics. Programs and their specifications often make use of data structures such as sets, multisets, algebraic data types, or graphs. Consequently, formulas generated from verification also involve such data structures. To automate the proofs of such formulas we propose a logic (a “calculus”) of such data structures. We build the calculus by starting from decidable logics of individual data structures, and connecting them through functions and sets, in ways that go beyond the frameworks such as Nelson-Oppen. The result are new decidable logics that can simultaneously specify properties of different kinds of data structures and overcome the limitations of the individual logics. Several of our decidable logics include abstraction functions that map a data structure into its more abstract view (a tree into a multiset, a multiset into a set), into a numerical quantity (the size or the height), or into the truth value of a candidate data structure invariant (sortedness, or the heap property). For algebraic data types, we identify an asymptotic many-to-one condition on the abstraction function that guarantees the existence of a decision procedure. In addition to the combination based on abstraction functions, we can combine multiple data structure theories if they all reduce to the same data structure logic. As an instance of this approach, we describe a decidable logic whose formulas are propositional combinations of formulas in: weak monadic second-order logic of two successors, two-variable logic with counting, multiset algebra with Presburger arithmetic, the Bernays-Schönfinkel-Ramsey class of first-order logic, and the logic of algebraic data types with the set content function. The subformulas in this combination can share common variables that refer to sets of objects along with the common set algebra operations. Such sound and complete combination is possible because the relations on sets definable in the component logics are all expressible in Boolean Algebra with Presburger Arithmetic. Presburger arithmetic and its new extensions play an important role in our decidability results. In several cases, when we combine logics that belong to NP, we can prove the satisfiability for the combined logic is still in NP.},
author = {Kuncak, Viktor and Piskac, Ruzica and Suter, Philippe and Wies, Thomas},
editor = {Barthe, Gilles and Hermenegildo, Manuel},
location = {Madrid, Spain},
pages = {26 -- 44},
publisher = {Springer},
title = {{Building a calculus of data structures}},
doi = {10.1007/978-3-642-11319-2_6},
volume = {5944},
year = {2010},
}
@inproceedings{4380,
abstract = {Cloud computing is an emerging paradigm aimed to offer users pay-per-use computing resources, while leaving the burden of managing the computing infrastructure to the cloud provider. We present a new programming and pricing model that gives the cloud user the flexibility of trading execution speed and price on a per-job basis. We discuss the scheduling and resource management challenges for the cloud provider that arise in the implementation of this model. We argue that techniques from real-time and embedded software can be useful in this context.},
author = {Henzinger, Thomas A and Tomar, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
location = {Arizona, USA},
pages = {1 -- 8},
publisher = {ACM},
title = {{A marketplace for cloud resources}},
doi = {10.1145/1879021.1879022},
year = {2010},
}
@inproceedings{4381,
abstract = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We claim that, in order to realize the full potential of cloud computing, the user must be presented with a pricing model that offers flexibility at the requirements level, such as a choice between different degrees of execution speed and the cloud provider must be presented with a programming model that offers flexibility at the execution level, such as a choice between different scheduling policies. In such a flexible framework, with each job, the user purchases a virtual computer with the desired speed and cost characteristics, and the cloud provider can optimize the utilization of resources across a stream of jobs from different users. We designed a flexible framework to test our hypothesis, which is called FlexPRICE (Flexible Provisioning of Resources in a Cloud Environment) and works as follows. A user presents a job to the cloud. The cloud finds different schedules to execute the job and presents a set of quotes to the user in terms of price and duration for the execution. The user then chooses a particular quote and the cloud is obliged to execute the job according to the chosen quote. FlexPRICE thus hides the complexity of the actual scheduling decisions from the user, but still provides enough flexibility to meet the users actual demands. We implemented FlexPRICE in a simulator called PRICES that allows us to experiment with our framework. We observe that FlexPRICE provides a wide range of execution options-from fast and expensive to slow and cheap-- for the whole spectrum of data-intensive and computation-intensive jobs. We also observe that the set of quotes computed by FlexPRICE do not vary as the number of simultaneous jobs increases.},
author = {Henzinger, Thomas A and Tomar, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
location = {Miami, USA},
pages = {83 -- 90},
publisher = {IEEE},
title = {{FlexPRICE: Flexible provisioning of resources in a cloud environment}},
doi = {10.1109/CLOUD.2010.71},
year = {2010},
}
@inproceedings{4382,
abstract = {Transactional memory (TM) has shown potential to simplify the task of writing concurrent programs. Inspired by classical work on databases, formal definitions of the semantics of TM executions have been proposed. Many of these definitions assumed that accesses to shared data are solely performed through transactions. In practice, due to legacy code and concurrency libraries, transactions in a TM have to share data with non-transactional operations. The semantics of such interaction, while widely discussed by practitioners, lacks a clear formal specification. Those interactions can vary, sometimes in subtle ways, between TM implementations and underlying memory models. We propose a correctness condition for TMs, parametrized opacity, to formally capture the now folklore notion of strong atomicity by stipulating the two following intuitive requirements: first, every transaction appears as if it is executed instantaneously with respect to other transactions and non-transactional operations, and second, non-transactional operations conform to the given underlying memory model. We investigate the inherent cost of implementing parametrized opacity. We first prove that parametrized opacity requires either instrumenting non-transactional operations (for most memory models) or writing to memory by transactions using potentially expensive read-modify-write instructions (such as compare-and-swap). Then, we show that for a class of practical relaxed memory models, parametrized opacity can indeed be implemented with constant-time instrumentation of non-transactional writes and no instrumentation of non-transactional reads. We show that, in practice, parametrizing the notion of correctness allows developing more efficient TM implementations.},
author = {Guerraoui, Rachid and Henzinger, Thomas A and Kapalka, Michal and Singh, Vasu},
location = {Santorini, Greece},
pages = {263 -- 272},
publisher = {ACM},
title = {{Transactions in the jungle}},
doi = {10.1145/1810479.1810529},
year = {2010},
}
@inproceedings{4388,
abstract = {GIST is a tool that (a) solves the qualitative analysis problem of turn-based probabilistic games with ω-regular objectives; and (b) synthesizes reasonable environment assumptions for synthesis of unrealizable specifications. Our tool provides the first and efficient implementations of several reduction-based techniques to solve turn-based probabilistic games, and uses the analysis of turn-based probabilistic games for synthesizing environment assumptions for unrealizable specifications.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Radhakrishna, Arjun},
location = {Edinburgh, UK},
pages = {665 -- 669},
publisher = {Springer},
title = {{GIST: A solver for probabilistic games}},
doi = {10.1007/978-3-642-14295-6_57},
volume = {6174},
year = {2010},
}
@inproceedings{4389,
abstract = {Digital components play a central role in the design of complex embedded systems. These components are interconnected with other, possibly analog, devices and the physical environment. This environment cannot be entirely captured and can provide inaccurate input data to the component. It is thus important for digital components to have a robust behavior, i.e. the presence of a small change in the input sequences should not result in a drastic change in the output sequences. In this paper, we study a notion of robustness for sequential circuits. However, since sequential circuits may have parts that are naturally discontinuous (e.g., digital controllers with switching behavior), we need a flexible framework that accommodates this fact and leaves discontinuous parts of the circuit out from the robustness analysis. As a consequence, we consider sequential circuits that have their input variables partitioned into two disjoint sets: control and disturbance variables. Our contributions are (1) a definition of robustness for sequential circuits as a form of continuity with respect to disturbance variables, (2) the characterization of the exact class of sequential circuits that are robust according to our definition, (3) an algorithm to decide whether a sequential circuit is robust or not.},
author = {Doyen, Laurent and Henzinger, Thomas A and Legay, Axel and Nickovic, Dejan},
pages = {77 -- 84},
publisher = {IEEE},
title = {{Robustness of sequential circuits}},
doi = {10.1109/ACSD.2010.26},
year = {2010},
}
@inproceedings{4390,
abstract = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each vertex stores an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free implementation and proved that the corrected version is linearizable.},
author = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
location = {Edinburgh, UK},
pages = {465 -- 479},
publisher = {Springer},
title = {{Model checking of linearizability of concurrent list implementations}},
doi = {10.1007/978-3-642-14295-6_41},
volume = {6174},
year = {2010},
}
@inbook{4392,
abstract = {While a boolean notion of correctness is given by a preorder on systems and properties, a quantitative notion of correctness is defined by a distance function on systems and properties, where the distance between a system and a property provides a measure of “fit” or “desirability.” In this article, we explore several ways how the simulation preorder can be generalized to a distance function. This is done by equipping the classical simulation game between a system and a property with quantitative objectives. In particular, for systems that satisfy a property, a quantitative simulation game can measure the “robustness” of the satisfaction, that is, how much the system can deviate from its nominal behavior while still satisfying the property. For systems that violate a property, a quantitative simulation game can measure the “seriousness” of the violation, that is, how much the property has to be modified so that it is satisfied by the system. These distances can be computed in polynomial time, since the computation reduces to the value problem in limit average games with constant weights. Finally, we demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes. },
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
booktitle = {Time For Verification: Essays in Memory of Amir Pnueli},
editor = {Manna, Zohar and Peled, Doron},
pages = {42 -- 60},
publisher = {Springer},
title = {{Quantitative Simulation Games}},
doi = {10.1007/978-3-642-13754-9_3},
volume = {6200},
year = {2010},
}
@inproceedings{4393,
abstract = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the implementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
location = {Paris, France},
pages = {235 -- 268},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Simulation distances}},
doi = {10.1007/978-3-642-15375-4_18},
volume = {6269},
year = {2010},
}