@inproceedings{2715,
abstract = {We consider Markov decision processes (MDPs) with specifications given as Büchi (liveness) objectives. We consider the problem of computing the set of almost-sure winning vertices from where the objective can be ensured with probability 1. We study for the first time the average case complexity of the classical algorithm for computing the set of almost-sure winning vertices for MDPs with Büchi objectives. Our contributions are as follows: First, we show that for MDPs with constant out-degree the expected number of iterations is at most logarithmic and the average case running time is linear (as compared to the worst case linear number of iterations and quadratic time complexity). Second, for the average case analysis over all MDPs we show that the expected number of iterations is constant and the average case running time is linear (again as compared to the worst case linear number of iterations and quadratic time complexity). Finally we also show that given that all MDPs are equally likely, the probability that the classical algorithm requires more than constant number of iterations is exponentially small.},
author = {Chatterjee, Krishnendu and Joglekar, Manas and Shah, Nisarg},
location = {Hyderabad, India},
pages = {461 -- 473},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Average case analysis of the classical algorithm for Markov decision processes with Büchi objectives}},
doi = {10.4230/LIPIcs.FSTTCS.2012.461},
volume = {18},
year = {2012},
}
@inproceedings{2825,
abstract = {We study the problem of maximum marginal prediction (MMP) in probabilistic graphical models, a task that occurs, for example, as the Bayes optimal decision rule under a Hamming loss. MMP is typically performed as a two-stage procedure: one estimates each variable's marginal probability and then forms a prediction from the states of maximal probability. In this work we propose a simple yet effective technique for accelerating MMP when inference is sampling-based: instead of the above two-stage procedure we directly estimate the posterior probability of each decision variable. This allows us to identify the point of time when we are sufficiently certain about any individual decision. Whenever this is the case, we dynamically prune the variables we are confident about from the underlying factor graph. Consequently, at any time only samples of variables whose decision is still uncertain need to be created. Experiments in two prototypical scenarios, multi-label classification and image inpainting, show that adaptive sampling can drastically accelerate MMP without sacrificing prediction accuracy.},
author = {Lampert, Christoph},
location = {Lake Tahoe, NV, United States},
pages = {82 -- 90},
publisher = {Neural Information Processing Systems},
title = {{Dynamic pruning of factor graphs for maximum marginal prediction}},
volume = {1},
year = {2012},
}
@article{2848,
abstract = {We study evolutionary game theory in a setting where individuals learn from each other. We extend the traditional approach by assuming that a population contains individuals with different learning abilities. In particular, we explore the situation where individuals have different search spaces, when attempting to learn the strategies of others. The search space of an individual specifies the set of strategies learnable by that individual. The search space is genetically given and does not change under social evolutionary dynamics. We introduce a general framework and study a specific example in the context of direct reciprocity. For this example, we obtain the counter intuitive result that cooperation can only evolve for intermediate benefit-to-cost ratios, while small and large benefit-to-cost ratios favor defection. Our paper is a step toward making a connection between computational learning theory and evolutionary game dynamics.},
author = {Chatterjee, Krishnendu and Zufferey, Damien and Nowak, Martin},
journal = {Journal of Theoretical Biology},
pages = {161 -- 173},
publisher = {Elsevier},
title = {{Evolutionary game dynamics in populations with different learners}},
doi = {10.1016/j.jtbi.2012.02.021},
volume = {301},
year = {2012},
}
@article{2849,
author = {Edelsbrunner, Herbert and Strelkova, Nataliya},
journal = {Russian Mathematical Surveys},
number = {6},
pages = {1167 -- 1168},
publisher = {IOP Publishing Ltd.},
title = {{On the configuration space of Steiner minimal trees}},
doi = {10.1070/RM2012v067n06ABEH004820},
volume = {67},
year = {2012},
}
@inproceedings{2888,
abstract = {Formal verification aims to improve the quality of hardware and software by detecting errors before they do harm. At the basis of formal verification lies the logical notion of correctness, which purports to capture whether or not a circuit or program behaves as desired. We suggest that the boolean partition into correct and incorrect systems falls short of the practical need to assess the behavior of hardware and software in a more nuanced fashion against multiple criteria.},
author = {Henzinger, Thomas A},
booktitle = {Conference proceedings MODELS 2012},
location = {Innsbruck, Austria},
pages = {1 -- 2},
publisher = {Springer},
title = {{Quantitative reactive models}},
doi = {10.1007/978-3-642-33666-9_1},
volume = {7590},
year = {2012},
}
@inproceedings{2890,
abstract = {Systems are often specified using multiple requirements on their behavior. In practice, these requirements can be contradictory. The classical approach to specification, verification, and synthesis demands more detailed specifications that resolve any contradictions in the requirements. These detailed specifications are usually large, cumbersome, and hard to maintain or modify. In contrast, quantitative frameworks allow the formalization of the intuitive idea that what is desired is an implementation that comes "closest" to satisfying the mutually incompatible requirements, according to a measure of fit that can be defined by the requirements engineer. One flexible framework for quantifying how "well" an implementation satisfies a specification is offered by simulation distances that are parameterized by an error model. We introduce this framework, study its properties, and provide an algorithmic solution for the following quantitative synthesis question: given two (or more) behavioral requirements specified by possibly incompatible finite-state machines, and an error model, find the finite-state implementation that minimizes the maximal simulation distance to the given requirements. Furthermore, we generalize the framework to handle infinite alphabets (for example, realvalued domains). We also demonstrate how quantitative specifications based on simulation distances might lead to smaller and easier to modify specifications. Finally, we illustrate our approach using case studies on error correcting codes and scheduler synthesis.},
author = {Cerny, Pavol and Gopi, Sivakanth and Henzinger, Thomas A and Radhakrishna, Arjun and Totla, Nishant},
booktitle = {Proceedings of the tenth ACM international conference on Embedded software},
location = {Tampere, Finland},
pages = {53 -- 62},
publisher = {ACM},
title = {{Synthesis from incompatible specifications}},
doi = {10.1145/2380356.2380371},
year = {2012},
}
@inproceedings{2891,
abstract = {Quantitative automata are nondeterministic finite automata with edge weights. They value a
run by some function from the sequence of visited weights to the reals, and value a word by its
minimal/maximal run. They generalize boolean automata, and have gained much attention in
recent years. Unfortunately, important automaton classes, such as sum, discounted-sum, and
limit-average automata, cannot be determinized. Yet, the quantitative setting provides the potential
of approximate determinization. We define approximate determinization with respect to
a distance function, and investigate this potential.
We show that sum automata cannot be determinized approximately with respect to any
distance function. However, restricting to nonnegative weights allows for approximate determinization
with respect to some distance functions.
Discounted-sum automata allow for approximate determinization, as the influence of a word’s
suffix is decaying. However, the naive approach, of unfolding the automaton computations up
to a sufficient level, is shown to be doubly exponential in the discount factor. We provide an
alternative construction that is singly exponential in the discount factor, in the precision, and
in the number of states. We prove matching lower bounds, showing exponential dependency on
each of these three parameters.
Average and limit-average automata are shown to prohibit approximate determinization with
respect to any distance function, and this is the case even for two weights, 0 and 1.},
author = {Boker, Udi and Henzinger, Thomas A},
booktitle = {Leibniz International Proceedings in Informatics},
location = {Hyderabad, India},
pages = {362 -- 373},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Approximate determinization of quantitative automata}},
doi = {10.4230/LIPIcs.FSTTCS.2012.362},
volume = {18},
year = {2012},
}
@article{2902,
abstract = {We present an algorithm for simplifying linear cartographic objects and results obtained with a computer program implementing this algorithm. },
author = {Edelsbrunner, Herbert and Musin, Oleg and Ukhalov, Alexey and Yakimova, Olga and Alexeev, Vladislav and Bogaevskaya, Victoriya and Gorohov, Andrey and Preobrazhenskaya, Margarita},
journal = {Modeling and Analysis of Information Systems},
number = {6},
pages = {152 -- 160},
publisher = {Technische Universität Darmstadt},
title = {{Fractal and computational geometry for generalizing cartographic objects}},
volume = {19},
year = {2012},
}
@inproceedings{2903,
abstract = {In order to enjoy a digital version of the Jordan Curve Theorem, it is common to use the closed topology for the foreground and the open topology for the background of a 2-dimensional binary image. In this paper, we introduce a single topology that enjoys this theorem for all thresholds decomposing a real-valued image into foreground and background. This topology is easy to construct and it generalizes to n-dimensional images.},
author = {Edelsbrunner, Herbert and Symonova, Olga},
location = {New Brunswick, NJ, USA },
pages = {41 -- 48},
publisher = {IEEE},
title = {{The adaptive topology of a digital image}},
doi = {10.1109/ISVD.2012.11},
year = {2012},
}
@article{2904,
abstract = {Generalized van der Corput sequences are onedimensional, infinite sequences in the unit interval. They are generated from permutations in integer base b and are the building blocks of the multi-dimensional Halton sequences. Motivated by recent progress of Atanassov on the uniform distribution behavior of Halton sequences, we study, among others, permutations of the form P(i) = ai (mod b) for coprime integers a and b. We show that multipliers a that either divide b - 1 or b + 1 generate van der Corput sequences with weak distribution properties. We give explicit lower bounds for the asymptotic distribution behavior of these sequences and relate them to sequences generated from the identity permutation in smaller bases, which are, due to Faure, the weakest distributed generalized van der Corput sequences.},
author = {Pausinger, Florian},
issn = {2118-8572},
journal = {Journal de Theorie des Nombres des Bordeaux},
number = {3},
pages = {729 -- 749},
publisher = {Universite de Bordeaux},
title = {{Weak multipliers for generalized van der Corput sequences}},
doi = {10.5802/jtnb.819},
volume = {24},
year = {2012},
}