@inproceedings{3129,
abstract = {Let K be a simplicial complex and g the rank of its p-th homology group Hp(K) defined with ℤ2 coefficients. We show that we can compute a basis H of Hp(K) and annotate each p-simplex of K with a binary vector of length g with the following property: the annotations, summed over all p-simplices in any p-cycle z, provide the coordinate vector of the homology class [z] in the basis H. The basis and the annotations for all simplices can be computed in O(n ω ) time, where n is the size of K and ω < 2.376 is a quantity so that two n×n matrices can be multiplied in O(n ω ) time. The precomputed annotations permit answering queries about the independence or the triviality of p-cycles efficiently.
Using annotations of edges in 2-complexes, we derive better algorithms for computing optimal basis and optimal homologous cycles in 1 - dimensional homology. Specifically, for computing an optimal basis of H1(K) , we improve the previously known time complexity from O(n 4) to O(n ω + n 2 g ω − 1). Here n denotes the size of the 2-skeleton of K and g the rank of H1(K) . Computing an optimal cycle homologous to a given 1-cycle is NP-hard even for surfaces and an algorithm taking 2 O(g) nlogn time is known for surfaces. We extend this algorithm to work with arbitrary 2-complexes in O(n ω ) + 2 O(g) n 2logn time using annotations.
},
author = {Busaryev, Oleksiy and Cabello, Sergio and Chen, Chao and Dey, Tamal and Wang, Yusu},
location = {Helsinki, Finland},
pages = {189 -- 200},
publisher = {Springer},
title = {{Annotating simplices with a homology basis and its applications}},
doi = {10.1007/978-3-642-31155-0_17},
volume = {7357},
year = {2012},
}
@article{3131,
abstract = {In large populations, many beneficial mutations may be simultaneously available and may compete with one another, slowing adaptation. By finding the probability of fixation of a favorable allele in a simple model of a haploid sexual population, we find limits to the rate of adaptive substitution, Λ, that depend on simple parameter combinations. When variance in fitness is low and linkage is loose, the baseline rate of substitution is Λ 0=2NU〈s〉 is the population size, U is the rate of beneficial mutations per genome, and 〈s〉 is their mean selective advantage. Heritable variance ν in log fitness due to unlinked loci reduces Λ by e -4ν under polygamy and e -8ν under monogamy. With a linear genetic map of length R Morgans, interference is yet stronger. We use a scaling argument to show that the density of adaptive substitutions depends on s, N, U, and R only through the baseline density: Λ/R=F(Λ 0/R). Under the approximation that the interference due to different sweeps adds up, we show that Λ/R~(Λ 0/R)/(1+2Λ 0/R), implying that interference prevents the rate of adaptive substitution from exceeding one per centimorgan per 200 generations. Simulations and numerical calculations confirm the scaling argument and confirm the additive approximation for Λ 0/R 1; for higher Λ 0/R, the rate of adaptation grows above R/2, but only very slowly. We also consider the effect of sweeps on neutral diversity and show that, while even occasional sweeps can greatly reduce neutral diversity, this effect saturates as sweeps become more common-diversity can be maintained even in populations experiencing very strong interference. Our results indicate that for some organisms the rate of adaptive substitution may be primarily recombination-limited, depending only weakly on the mutation supply and the strength of selection.},
author = {Weissman, Daniel and Barton, Nicholas H},
journal = {PLoS Genetics},
number = {6},
publisher = {Public Library of Science},
title = {{Limits to the rate of adaptive substitution in sexual populations}},
doi = {10.1371/journal.pgen.1002740},
volume = {8},
year = {2012},
}
@article{3117,
abstract = {We consider the problem of minimizing a function represented as a sum of submodular terms. We assume each term allows an efficient computation of exchange capacities. This holds, for example, for terms depending on a small number of variables, or for certain cardinality-dependent terms. A naive application of submodular minimization algorithms would not exploit the existence of specialized exchange capacity subroutines for individual terms. To overcome this, we cast the problem as a submodular flow (SF) problem in an auxiliary graph in such a way that applying most existing SF algorithms would rely only on these subroutines. We then explore in more detail Iwata's capacity scaling approach for submodular flows (Iwata 1997 [19]). In particular, we show how to improve its complexity in the case when the function contains cardinality-dependent terms.},
author = {Kolmogorov, Vladimir},
journal = {Discrete Applied Mathematics},
number = {15},
pages = {2246 -- 2258},
publisher = {Elsevier},
title = {{Minimizing a sum of submodular functions}},
doi = {10.1016/j.dam.2012.05.025},
volume = {160},
year = {2012},
}
@article{3167,
author = {Weber, Michele},
journal = {Science},
number = {6077},
pages = {32--34},
publisher = {American Association for the Advancement of Science},
title = {{NextGen speaks 13 }},
doi = {10.1126/science.336.6077.32},
volume = {336},
year = {2012},
}
@article{3256,
abstract = {We use a distortion to define the dual complex of a cubical subdivision of ℝ n as an n-dimensional subcomplex of the nerve of the set of n-cubes. Motivated by the topological analysis of high-dimensional digital image data, we consider such subdivisions defined by generalizations of quad- and oct-trees to n dimensions. Assuming the subdivision is balanced, we show that mapping each vertex to the center of the corresponding n-cube gives a geometric realization of the dual complex in ℝ n.},
author = {Edelsbrunner, Herbert and Kerber, Michael},
journal = {Discrete & Computational Geometry},
number = {2},
pages = {393 -- 414},
publisher = {Springer},
title = {{Dual complexes of cubical subdivisions of ℝn}},
doi = {10.1007/s00454-011-9382-4},
volume = {47},
year = {2012},
}
@article{3244,
author = {Danowski, Patrick},
journal = {BuB – Forum Bibliothek und Information},
number = {4},
pages = {284},
publisher = {Bock & Herchen Verlag},
title = {{Die Zeit des Abwartens ist vorbei!}},
volume = {64},
year = {2012},
}
@inproceedings{3282,
abstract = {Traditionally, symmetric-key message authentication codes (MACs) are easily built from pseudorandom functions (PRFs). In this work we propose a wide variety of other approaches to building efficient MACs, without going through a PRF first. In particular, unlike deterministic PRF-based MACs, where each message has a unique valid tag, we give a number of probabilistic MAC constructions from various other primitives/assumptions. Our main results are summarized as follows: We show several new probabilistic MAC constructions from a variety of general assumptions, including CCA-secure encryption, Hash Proof Systems and key-homomorphic weak PRFs. By instantiating these frameworks under concrete number theoretic assumptions, we get several schemes which are more efficient than just using a state-of-the-art PRF instantiation under the corresponding assumption. For probabilistic MACs, unlike deterministic ones, unforgeability against a chosen message attack (uf-cma ) alone does not imply security if the adversary can additionally make verification queries (uf-cmva ). We give an efficient generic transformation from any uf-cma secure MAC which is "message-hiding" into a uf-cmva secure MAC. This resolves the main open problem of Kiltz et al. from Eurocrypt'11; By using our transformation on their constructions, we get the first efficient MACs from the LPN assumption. While all our new MAC constructions immediately give efficient actively secure, two-round symmetric-key identification schemes, we also show a very simple, three-round actively secure identification protocol from any weak PRF. In particular, the resulting protocol is much more efficient than the trivial approach of building a regular PRF from a weak PRF. © 2012 International Association for Cryptologic Research.},
author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Kiltz, Eike and Wichs, Daniel},
location = {Cambridge, UK},
pages = {355 -- 374},
publisher = {Springer},
title = {{Message authentication, revisited}},
doi = {10.1007/978-3-642-29011-4_22},
volume = {7237},
year = {2012},
}
@inproceedings{3124,
abstract = {We consider the problem of inference in a graphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pairwise terms over a discretized domain. This allows the use of techniques originally developed for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can outperform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.
},
author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph},
location = {Edinburgh, Scotland},
publisher = {ICML},
title = {{Approximating marginals using discrete energy minimization}},
year = {2012},
}
@misc{5396,
abstract = {We consider the problem of inference in agraphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pair-wise terms over a discretized domain. This allows the use of techniques originally devel-oped for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can out-perform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.},
author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph},
issn = {2664-1690},
pages = {13},
publisher = {IST Austria},
title = {{Approximating marginals using discrete energy minimization}},
doi = {10.15479/AT:IST-2012-0003},
year = {2012},
}
@inbook{5745,
author = {Gupta, Ashutosh},
booktitle = {Automated Technology for Verification and Analysis},
isbn = {9783642333859},
issn = {0302-9743},
location = {Thiruvananthapuram, Kerala, India},
pages = {107--121},
publisher = {Springer Berlin Heidelberg},
title = {{Improved Single Pass Algorithms for Resolution Proof Reduction}},
doi = {10.1007/978-3-642-33386-6_10},
volume = {7561},
year = {2012},
}
@article{3249,
abstract = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of "fit" or "desirability". We extend the simulation preorder to the quantitative setting by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the implementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
journal = {Theoretical Computer Science},
number = {1},
pages = {21 -- 35},
publisher = {Elsevier},
title = {{Simulation distances}},
doi = {10.1016/j.tcs.2011.08.002},
volume = {413},
year = {2012},
}
@article{2950,
abstract = {Contractile actomyosin rings drive various fundamental morphogenetic processes ranging from cytokinesis to wound healing. Actomyosin rings are generally thought to function by circumferential contraction. Here, we show that the spreading of the enveloping cell layer (EVL) over the yolk cell during zebrafish gastrulation is driven by a contractile actomyosin ring. In contrast to previous suggestions, we find that this ring functions not only by circumferential contraction but also by a flow-friction mechanism. This generates a pulling force through resistance against retrograde actomyosin flow. EVL spreading proceeds normally in situations where circumferential contraction is unproductive, indicating that the flow-friction mechanism is sufficient. Thus, actomyosin rings can function in epithelial morphogenesis through a combination of cable-constriction and flow-friction mechanisms.},
author = {Behrndt, Martin and Salbreux, Guillaume and Campinho, Pedro and Hauschild, Robert and Oswald, Felix and Roensch, Julia and Grill, Stephan and Heisenberg, Carl-Philipp J},
journal = {Science},
number = {6104},
pages = {257 -- 260},
publisher = {American Association for the Advancement of Science},
title = {{Forces driving epithelial spreading in zebrafish gastrulation}},
doi = {10.1126/science.1224143},
volume = {338},
year = {2012},
}
@misc{5377,
abstract = {Two-player games on graphs are central in many problems in formal verification and program analysis such as synthesis and verification of open systems. In this work we consider solving recursive game graphs (or pushdown game graphs) that can model the control flow of sequential programs with recursion. While pushdown games have been studied before with qualitative objectives, such as reachability and ω-regular objectives, in this work we study for the first time such games with the most well-studied quantitative objective, namely, mean-payoff objectives. In pushdown games two types of strategies are relevant: (1) global strategies, that depend on the entire global history; and (2) modular strategies, that have only local memory and thus do not depend on the context of invocation, but only on the history of the current invocation of the module. Our main results are as follows: (1) One-player pushdown games with mean-payoff objectives under global strategies are decidable in polynomial time. (2) Two- player pushdown games with mean-payoff objectives under global strategies are undecidable. (3) One-player pushdown games with mean-payoff objectives under modular strategies are NP- hard. (4) Two-player pushdown games with mean-payoff objectives under modular strategies can be solved in NP (i.e., both one-player and two-player pushdown games with mean-payoff objectives under modular strategies are NP-complete). We also establish the optimal strategy complexity showing that global strategies for mean-payoff objectives require infinite memory even in one-player pushdown games; and memoryless modular strategies are sufficient in two- player pushdown games. Finally we also show that all the problems have the same complexity if the stack boundedness condition is added, where along with the mean-payoff objective the player must also ensure that the stack height is bounded.},
author = {Chatterjee, Krishnendu and Velner, Yaron},
issn = {2664-1690},
pages = {33},
publisher = {IST Austria},
title = {{Mean-payoff pushdown games}},
doi = {10.15479/AT:IST-2012-0002},
year = {2012},
}
@article{2967,
abstract = {For programs whose data variables range over Boolean or finite domains, program verification is decidable, and this forms the basis of recent tools for software model checking. In this article, we consider algorithmic verification of programs that use Boolean variables, and in addition, access a single read-only array whose length is potentially unbounded, and whose elements range over an unbounded data domain. We show that the reachability problem, while undecidable in general, is (1) PSPACE-complete for programs in which the array-accessing for-loops are not nested, (2) decidable for a restricted class of programs with doubly nested loops. The second result establishes connections to automata and logics defining languages over data words.},
author = {Alur, Rajeev and Cerny, Pavol and Weinstein, Scott},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {3},
publisher = {ACM},
title = {{Algorithmic analysis of array-accessing programs}},
doi = {10.1145/2287718.2287727},
volume = {13},
year = {2012},
}
@inproceedings{2955,
abstract = {We consider two-player stochastic games played on finite graphs with reachability objectives where the first player tries to ensure a target state to be visited almost-surely (i.e., with probability 1), or positively (i.e., with positive probability), no matter the strategy of the second player. We classify such games according to the information and the power of randomization available to the players. On the basis of information, the game can be one-sided with either (a) player 1, or (b) player 2 having partial observation (and the other player has perfect observation), or two-sided with (c) both players having partial observation. On the basis of randomization, the players (a) may not be allowed to use randomization (pure strategies), or (b) may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) may use full randomization. Our main results for pure strategies are as follows. (1) For one-sided games with player 1 having partial observation we show that (in contrast to full randomized strategies) belief-based (subset-construction based) strategies are not sufficient, and we present an exponential upper bound on memory both for almostsure and positive winning strategies; we show that the problem of deciding the existence of almost-sure and positive winning strategies for player 1 is EXPTIME-complete. (2) For one-sided games with player 2 having partial observation we show that non-elementary memory is both necessary and sufficient for both almost-sure and positive winning strategies. (3) We show that for the general (two-sided) case finite-memory strategies are sufficient for both positive and almost-sure winning, and at least non-elementary memory is required. We establish the equivalence of the almost-sure winning problems for pure strategies and for randomized strategies with actions invisible. Our equivalence result exhibits serious flaws in previous results of the literature: we show a non-elementary memory lower bound for almost-sure winning whereas an exponential upper bound was previously claimed.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
booktitle = {Proceedings of the 2012 27th Annual ACM/IEEE Symposium on Logic in Computer Science},
location = {Dubrovnik, Croatia},
publisher = {IEEE},
title = {{Partial-observation stochastic games: How to win when belief fails}},
doi = {10.1109/LICS.2012.28},
year = {2012},
}
@inproceedings{2936,
abstract = {The notion of delays arises naturally in many computational models, such as, in the design of circuits, control systems, and dataflow languages. In this work, we introduce automata with delay blocks (ADBs), extending finite state automata with variable time delay blocks, for deferring individual transition output symbols, in a discrete-time setting. We show that the ADB languages strictly subsume the regular languages, and are incomparable in expressive power to the context-free languages. We show that ADBs are closed under union, concatenation and Kleene star, and under intersection with regular languages, but not closed under complementation and intersection with other ADB languages. We show that the emptiness and the membership problems are decidable in polynomial time for ADBs, whereas the universality problem is undecidable. Finally we consider the linear-time model checking problem, i.e., whether the language of an ADB is contained in a regular language, and show that the model checking problem is PSPACE-complete. Copyright 2012 ACM.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Prabhu, Vinayak},
booktitle = {roceedings of the tenth ACM international conference on Embedded software},
location = {Tampere, Finland},
pages = {43 -- 52},
publisher = {ACM},
title = {{Finite automata with time delay blocks}},
doi = {10.1145/2380356.2380370},
year = {2012},
}
@inproceedings{3251,
abstract = {Many infinite state systems can be seen as well-structured transition systems (WSTS), i.e., systems equipped with a well-quasi-ordering on states that is also a simulation relation. WSTS are an attractive target for formal analysis because there exist generic algorithms that decide interesting verification problems for this class. Among the most popular algorithms are acceleration-based forward analyses for computing the covering set. Termination of these algorithms can only be guaranteed for flattable WSTS. Yet, many WSTS of practical interest are not flattable and the question whether any given WSTS is flattable is itself undecidable. We therefore propose an analysis that computes the covering set and captures the essence of acceleration-based algorithms, but sacrifices precision for guaranteed termination. Our analysis is an abstract interpretation whose abstract domain builds on the ideal completion of the well-quasi-ordered state space, and a widening operator that mimics acceleration and controls the loss of precision of the analysis. We present instances of our framework for various classes of WSTS. Our experience with a prototype implementation indicates that, despite the inherent precision loss, our analysis often computes the precise covering set of the analyzed system.},
author = {Zufferey, Damien and Wies, Thomas and Henzinger, Thomas A},
location = {Philadelphia, PA, USA},
pages = {445 -- 460},
publisher = {Springer},
title = {{Ideal abstractions for well structured transition systems}},
doi = {10.1007/978-3-642-27940-9_29},
volume = {7148},
year = {2012},
}
@article{3314,
abstract = {We introduce two-level discounted and mean-payoff games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted or mean-payoff game and the lower level game is a (undiscounted) reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. For both discounted and mean-payoff two-level games, we show the existence of pure memoryless optimal strategies for both players and an ordered field property. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted or mean-payoff games can be decided in NP ∩ coNP. We also give an alternate strategy improvement algorithm to compute the value. © 2012 World Scientific Publishing Company.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
journal = {International Journal of Foundations of Computer Science},
number = {3},
pages = {609 -- 625},
publisher = {World Scientific Publishing},
title = {{Discounting and averaging in games across time scales}},
doi = {10.1142/S0129054112400308},
volume = {23},
year = {2012},
}
@inproceedings{496,
abstract = {We study the expressive power of logical interpretations on the class of scattered trees, namely those with countably many infinite branches. Scattered trees can be thought of as the tree analogue of scattered linear orders. Every scattered tree has an ordinal rank that reflects the structure of its infinite branches. We prove, roughly, that trees and orders of large rank cannot be interpreted in scattered trees of small rank. We consider a quite general notion of interpretation: each element of the interpreted structure is represented by a set of tuples of subsets of the interpreting tree. Our trees are countable, not necessarily finitely branching, and may have finitely many unary predicates as labellings. We also show how to replace injective set-interpretations in (not necessarily scattered) trees by 'finitary' set-interpretations.},
author = {Rabinovich, Alexander and Rubin, Sasha},
location = {Dubrovnik, Croatia},
publisher = {IEEE},
title = {{Interpretations in trees with countably many branches}},
doi = {10.1109/LICS.2012.65},
year = {2012},
}
@inproceedings{2715,
abstract = {We consider Markov decision processes (MDPs) with specifications given as Büchi (liveness) objectives. We consider the problem of computing the set of almost-sure winning vertices from where the objective can be ensured with probability 1. We study for the first time the average case complexity of the classical algorithm for computing the set of almost-sure winning vertices for MDPs with Büchi objectives. Our contributions are as follows: First, we show that for MDPs with constant out-degree the expected number of iterations is at most logarithmic and the average case running time is linear (as compared to the worst case linear number of iterations and quadratic time complexity). Second, for the average case analysis over all MDPs we show that the expected number of iterations is constant and the average case running time is linear (again as compared to the worst case linear number of iterations and quadratic time complexity). Finally we also show that given that all MDPs are equally likely, the probability that the classical algorithm requires more than constant number of iterations is exponentially small.},
author = {Chatterjee, Krishnendu and Joglekar, Manas and Shah, Nisarg},
location = {Hyderabad, India},
pages = {461 -- 473},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Average case analysis of the classical algorithm for Markov decision processes with Büchi objectives}},
doi = {10.4230/LIPIcs.FSTTCS.2012.461},
volume = {18},
year = {2012},
}