@article{3262,
abstract = {Living cells must control the reading out or "expression" of information encoded in their genomes, and this regulation often is mediated by transcription factors--proteins that bind to DNA and either enhance or repress the expression of nearby genes. But the expression of transcription factor proteins is itself regulated, and many transcription factors regulate their own expression in addition to responding to other input signals. Here we analyze the simplest of such self-regulatory circuits, asking how parameters can be chosen to optimize information transmission from inputs to outputs in the steady state. Some nonzero level of self-regulation is almost always optimal, with self-activation dominant when transcription factor concentrations are low and self-repression dominant when concentrations are high. In steady state the optimal self-activation is never strong enough to induce bistability, although there is a limit in which the optimal parameters are very close to the critical point.},
author = {Tkacik, Gasper and Walczak, Aleksandra and Bialek, William},
journal = { Physical Review E statistical nonlinear and soft matter physics },
number = {4},
publisher = {American Institute of Physics},
title = {{Optimizing information flow in small genetic networks. III. A self-interacting gene}},
doi = {10.1103/PhysRevE.85.041903},
volume = {85},
year = {2012},
}
@inproceedings{3265,
abstract = {We propose a mid-level statistical model for image segmentation that composes multiple figure-ground hypotheses (FG) obtained by applying constraints at different locations and scales, into larger interpretations (tilings) of the entire image. Inference is cast as optimization over sets of maximal cliques sampled from a graph connecting all non-overlapping figure-ground segment hypotheses. Potential functions over cliques combine unary, Gestalt-based figure qualities, and pairwise compatibilities among spatially neighboring segments, constrained by T-junctions and the boundary interface statistics of real scenes. Learning the model parameters is based on maximum likelihood, alternating between sampling image tilings and optimizing their potential function parameters. State of the art results are reported on the Berkeley and Stanford segmentation datasets, as well as VOC2009, where a 28% improvement was achieved.},
author = {Ion, Adrian and Carreira, Joao and Sminchisescu, Cristian},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Image segmentation by figure-ground composition into maximal cliques}},
doi = {10.1109/ICCV.2011.6126486},
year = {2012},
}
@article{3274,
abstract = {A boundary element model of a tunnel running through horizontally layered soil with anisotropic material properties is presented. Since there is no analytical fundamental solution for wave propagation inside a layered orthotropic medium in 3D, the fundamental displacements and stresses have to be calculated numerically. In our model this is done in the Fourier domain with respect to space and time. The assumption of a straight tunnel with infinite extension in the x direction makes it possible to decouple the system for every wave number kx, leading to a 2.5D-problem, which is suited for parallel computation. The special form of the fundamental solution, resulting from our Fourier ansatz, and the fact, that the calculation of the boundary integral equation is performed in the Fourier domain, enhances the stability and efficiency of the numerical calculations.},
author = {Rieckh, Georg and Kreuzer, Wolfgang and Waubke, Holger and Balazs, Peter},
journal = { Engineering Analysis with Boundary Elements},
number = {6},
pages = {960 -- 967},
publisher = {Elsevier},
title = {{A 2.5D-Fourier-BEM model for vibrations in a tunnel running through layered anisotropic soil}},
doi = {10.1016/j.enganabound.2011.12.014},
volume = {36},
year = {2012},
}
@inbook{3277,
abstract = {The problem of the origin of metazoa is becoming more urgent in the context of astrobiology. By now it is clear that clues to the understanding of this crucial transition in the evolution of life can arise in a fourth pathway besides the three possibilities in the quest for simplicity outlined by Bonner in his classical book. In other words, solar system exploration seems to be one way in the long-term to elucidate the simplicity of evolutionary development. We place these ideas in the context of different inheritance systems, namely the genotypic and phenotypic replicators with limited or unlimited heredity, and ask which of these can support multicellular development, and to which degree of complexity. However, the quest for evidence on the evolution of biotas from planets around other stars does not seem to be feasible with present technology with direct visualization of living organisms on exoplanets. But this may be attempted on the Galilean moons of Jupiter where there is a possibility of detecting reliable biomarkers in the next decade with the Europa Jupiter System Mission, in view of recent progress by landing micropenetrators on planetary, or satellite surfaces. Mars is a second possibility in the inner Solar System, in spite of the multiple difficulties faced by the fleet of past, present and future missions. We discuss a series of preliminary ideas for elucidating the origin of metazoan analogues with available instrumentation in potential payloads of feasible space missions to the Galilean moons.},
author = {de Vladar, Harold and Chela Flores, Julian},
booktitle = {Life on Earth and other planetary bodies},
pages = {387 -- 405},
publisher = {Springer},
title = {{Can the evolution of multicellularity be anticipated in the exploration of the solar system?}},
doi = {10.1007/978-94-007-4966-5_22},
volume = {24},
year = {2012},
}
@inproceedings{3279,
abstract = {We show a hardness-preserving construction of a PRF from any length doubling PRG which improves upon known constructions whenever we can put a non-trivial upper bound q on the number of queries to the PRF. Our construction requires only O(logq) invocations to the underlying PRG with each query. In comparison, the number of invocations by the best previous hardness-preserving construction (GGM using Levin's trick) is logarithmic in the hardness of the PRG. For example, starting from an exponentially secure PRG {0,1} n → {0,1} 2n, we get a PRF which is exponentially secure if queried at most q = exp(√n)times and where each invocation of the PRF requires Θ(√n) queries to the underlying PRG. This is much less than the Θ(n) required by known constructions.
},
author = {Jain, Abhishek and Pietrzak, Krzysztof Z and Tentes, Aris},
location = {Taormina, Sicily, Italy},
pages = {369 -- 382},
publisher = {Springer},
title = {{Hardness preserving constructions of pseudorandom functions}},
doi = {10.1007/978-3-642-28914-9_21},
volume = {7194},
year = {2012},
}
@inproceedings{3280,
abstract = {The (decisional) learning with errors problem (LWE) asks to distinguish "noisy" inner products of a secret vector with random vectors from uniform. The learning parities with noise problem (LPN) is the special case where the elements of the vectors are bits. In recent years, the LWE and LPN problems have found many applications in cryptography. In this paper we introduce a (seemingly) much stronger adaptive assumption, called "subspace LWE" (SLWE), where the adversary can learn the inner product of the secret and random vectors after they were projected into an adaptively and adversarially chosen subspace. We prove that, surprisingly, the SLWE problem mapping into subspaces of dimension d is almost as hard as LWE using secrets of length d (the other direction is trivial.) This result immediately implies that several existing cryptosystems whose security is based on the hardness of the LWE/LPN problems are provably secure in a much stronger sense than anticipated. As an illustrative example we show that the standard way of using LPN for symmetric CPA secure encryption is even secure against a very powerful class of related key attacks. },
author = {Pietrzak, Krzysztof Z},
location = {Taormina, Sicily, Italy},
pages = {548 -- 563},
publisher = {Springer},
title = {{Subspace LWE}},
doi = {10.1007/978-3-642-28914-9_31},
volume = {7194},
year = {2012},
}
@inproceedings{3281,
abstract = {We consider the problem of amplifying the "lossiness" of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic "lossy functions," where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.},
author = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil},
location = {Taormina, Sicily, Italy},
pages = {458 -- 475},
publisher = {Springer},
title = {{Lossy functions do not amplify well}},
doi = {10.1007/978-3-642-28914-9_26},
volume = {7194},
year = {2012},
}
@inproceedings{3282,
abstract = {Traditionally, symmetric-key message authentication codes (MACs) are easily built from pseudorandom functions (PRFs). In this work we propose a wide variety of other approaches to building efficient MACs, without going through a PRF first. In particular, unlike deterministic PRF-based MACs, where each message has a unique valid tag, we give a number of probabilistic MAC constructions from various other primitives/assumptions. Our main results are summarized as follows: We show several new probabilistic MAC constructions from a variety of general assumptions, including CCA-secure encryption, Hash Proof Systems and key-homomorphic weak PRFs. By instantiating these frameworks under concrete number theoretic assumptions, we get several schemes which are more efficient than just using a state-of-the-art PRF instantiation under the corresponding assumption. For probabilistic MACs, unlike deterministic ones, unforgeability against a chosen message attack (uf-cma ) alone does not imply security if the adversary can additionally make verification queries (uf-cmva ). We give an efficient generic transformation from any uf-cma secure MAC which is "message-hiding" into a uf-cmva secure MAC. This resolves the main open problem of Kiltz et al. from Eurocrypt'11; By using our transformation on their constructions, we get the first efficient MACs from the LPN assumption. While all our new MAC constructions immediately give efficient actively secure, two-round symmetric-key identification schemes, we also show a very simple, three-round actively secure identification protocol from any weak PRF. In particular, the resulting protocol is much more efficient than the trivial approach of building a regular PRF from a weak PRF. © 2012 International Association for Cryptologic Research.},
author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Kiltz, Eike and Wichs, Daniel},
location = {Cambridge, UK},
pages = {355 -- 374},
publisher = {Springer},
title = {{Message authentication, revisited}},
doi = {10.1007/978-3-642-29011-4_22},
volume = {7237},
year = {2012},
}
@article{3289,
abstract = {Viral manipulation of transduction pathways associated with key cellular functions such as survival, response to microbial infection, and cytoskeleton reorganization can provide the supportive milieu for a productive infection. Here, we demonstrate that vaccinia virus (VACV) infection leads to activation of the stress-activated protein kinase (SAPK)/extracellular signal-regulated kinase (ERK) 4/7 (MKK4/7)-c-Jun N-terminal protein kinase 1/2 (JNK1/2) pathway; further, the stimulation of this pathway requires postpenetration, prereplicative events in the viral replication cycle. Although the formation of intracellular mature virus (IMV) was not affected in MKK4/7- or JNK1/2-knockout (KO) cells, we did note an accentuated deregulation of microtubule and actin network organization in infected JNK1/2-KO cells. This was followed by deregulated viral trafficking to the periphery and enhanced enveloped particle release. Furthermore, VACV infection induced alterations in the cell contractility and morphology, and cell migration was reduced in the JNK-KO cells. In addition, phosphorylation of proteins implicated with early cell contractility and cell migration, such as microtubule-associated protein 1B and paxillin, respectively, was not detected in the VACV-infected KO cells. In sum, our findings uncover a regulatory role played by the MKK4/7-JNK1/2 pathway in cytoskeleton reorganization during VACV infection.
},
author = {Pereira, Anna and Leite, Flávia and Brasil, Bruno and Soares Martins, Jamaria and Torres, Alice and Pimenta, Paulo and Souto Padrón, Thais and Tranktman, Paula and Ferreira, Paulo and Kroon, Erna and Bonjardim, Cláudio},
journal = {Journal of Virology},
number = {1},
pages = {172 -- 184},
publisher = {ASM},
title = {{A vaccinia virus-driven interplay between the MKK4/7-JNK1/2 pathway and cytoskeleton reorganization}},
doi = {10.1128/JVI.05638-11},
volume = {86},
year = {2012},
}
@article{3310,
abstract = {The theory of persistent homology opens up the possibility to reason about topological features of a space or a function quantitatively and in combinatorial terms. We refer to this new angle at a classical subject within algebraic topology as a point calculus, which we present for the family of interlevel sets of a real-valued function. Our account of the subject is expository, devoid of proofs, and written for non-experts in algebraic topology.},
author = {Bendich, Paul and Cabello, Sergio and Edelsbrunner, Herbert},
journal = {Pattern Recognition Letters},
number = {11},
pages = {1436 -- 1444},
publisher = {Elsevier},
title = {{A point calculus for interlevel set homology}},
doi = {10.1016/j.patrec.2011.10.007},
volume = {33},
year = {2012},
}
@article{3314,
abstract = {We introduce two-level discounted and mean-payoff games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted or mean-payoff game and the lower level game is a (undiscounted) reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. For both discounted and mean-payoff two-level games, we show the existence of pure memoryless optimal strategies for both players and an ordered field property. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted or mean-payoff games can be decided in NP ∩ coNP. We also give an alternate strategy improvement algorithm to compute the value. © 2012 World Scientific Publishing Company.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
journal = {International Journal of Foundations of Computer Science},
number = {3},
pages = {609 -- 625},
publisher = {World Scientific Publishing},
title = {{Discounting and averaging in games across time scales}},
doi = {10.1142/S0129054112400308},
volume = {23},
year = {2012},
}
@article{3317,
abstract = {The physical distance between presynaptic Ca2+ channels and the Ca2+ sensors that trigger exocytosis of neurotransmitter-containing vesicles is a key determinant of the signalling properties of synapses in the nervous system. Recent functional analysis indicates that in some fast central synapses, transmitter release is triggered by a small number of Ca2+ channels that are coupled to Ca2+ sensors at the nanometre scale. Molecular analysis suggests that this tight coupling is generated by protein–protein interactions involving Ca2+ channels, Ca2+ sensors and various other synaptic proteins. Nanodomain coupling has several functional advantages, as it increases the efficacy, speed and energy efficiency of synaptic transmission.},
author = {Eggermann, Emmanuel and Bucurenciu, Iancu and Goswami, Sarit and Jonas, Peter M},
journal = {Nature Reviews Neuroscience},
number = {1},
pages = {7 -- 21},
publisher = {Nature Publishing Group},
title = {{Nanodomain coupling between Ca(2+) channels and sensors of exocytosis at fast mammalian synapses}},
doi = {10.1038/nrn3125},
volume = {13},
year = {2012},
}
@article{3331,
abstract = {Computing the topology of an algebraic plane curve C means computing a combinatorial graph that is isotopic to C and thus represents its topology in R2. We prove that, for a polynomial of degree n with integer coefficients bounded by 2ρ, the topology of the induced curve can be computed with bit operations ( indicates that we omit logarithmic factors). Our analysis improves the previous best known complexity bounds by a factor of n2. The improvement is based on new techniques to compute and refine isolating intervals for the real roots of polynomials, and on the consequent amortized analysis of the critical fibers of the algebraic curve.},
author = {Kerber, Michael and Sagraloff, Michael},
journal = { Journal of Symbolic Computation},
number = {3},
pages = {239 -- 258},
publisher = {Elsevier},
title = {{A worst case bound for topology computation of algebraic curves}},
doi = {10.1016/j.jsc.2011.11.001},
volume = {47},
year = {2012},
}
@inproceedings{3341,
abstract = {We consider two-player stochastic games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine a probability distribution over the successor states. We also consider the important special case of turn-based stochastic games where players make moves in turns, rather than concurrently. We study concurrent games with \omega-regular winning conditions specified as parity objectives. The value for player 1 for a parity objective is the maximal probability with which the player can guarantee the satisfaction of the objective against all strategies of the opponent. We study the problem of continuity and robustness of the value function in concurrent and turn-based stochastic parity gameswith respect to imprecision in the transition probabilities. We present quantitative bounds on the difference of the value function (in terms of the imprecision of the transition probabilities) and show the value continuity for structurally equivalent concurrent games (two games are structurally equivalent if the support of the transition function is same and the probabilities differ). We also show robustness of optimal strategies for structurally equivalent turn-based stochastic parity games. Finally we show that the value continuity property breaks without the structurally equivalent assumption (even for Markov chains) and show that our quantitative bound is asymptotically optimal. Hence our results are tight (the assumption is both necessary and sufficient) and optimal (our quantitative bound is asymptotically optimal).},
author = {Chatterjee, Krishnendu},
location = {Tallinn, Estonia},
pages = {270 -- 285},
publisher = {Springer},
title = {{Robustness of structurally equivalent concurrent parity games}},
doi = {10.1007/978-3-642-28729-9_18},
volume = {7213},
year = {2012},
}
@inproceedings{3251,
abstract = {Many infinite state systems can be seen as well-structured transition systems (WSTS), i.e., systems equipped with a well-quasi-ordering on states that is also a simulation relation. WSTS are an attractive target for formal analysis because there exist generic algorithms that decide interesting verification problems for this class. Among the most popular algorithms are acceleration-based forward analyses for computing the covering set. Termination of these algorithms can only be guaranteed for flattable WSTS. Yet, many WSTS of practical interest are not flattable and the question whether any given WSTS is flattable is itself undecidable. We therefore propose an analysis that computes the covering set and captures the essence of acceleration-based algorithms, but sacrifices precision for guaranteed termination. Our analysis is an abstract interpretation whose abstract domain builds on the ideal completion of the well-quasi-ordered state space, and a widening operator that mimics acceleration and controls the loss of precision of the analysis. We present instances of our framework for various classes of WSTS. Our experience with a prototype implementation indicates that, despite the inherent precision loss, our analysis often computes the precise covering set of the analyzed system.},
author = {Zufferey, Damien and Wies, Thomas and Henzinger, Thomas A},
location = {Philadelphia, PA, USA},
pages = {445 -- 460},
publisher = {Springer},
title = {{Ideal abstractions for well structured transition systems}},
doi = {10.1007/978-3-642-27940-9_29},
volume = {7148},
year = {2012},
}
@article{3242,
abstract = {Due to the omnipresent risk of epidemics, insect societies have evolved sophisticated disease defences at the individual and colony level. An intriguing yet little understood phenomenon is that social contact to pathogen-exposed individuals reduces susceptibility of previously naive nestmates to this pathogen. We tested whether such social immunisation in Lasius ants against the entomopathogenic fungus Metarhizium anisopliae is based on active upregulation of the immune system of nestmates following contact to an infectious individual or passive protection via transfer of immune effectors among group members—that is, active versus passive immunisation. We found no evidence for involvement of passive immunisation via transfer of antimicrobials among colony members. Instead, intensive allogrooming behaviour between naive and pathogen-exposed ants before fungal conidia firmly attached to their cuticle suggested passage of the pathogen from the exposed individuals to their nestmates. By tracing fluorescence-labelled conidia we indeed detected frequent pathogen transfer to the nestmates, where they caused low-level infections as revealed by growth of small numbers of fungal colony forming units from their dissected body content. These infections rarely led to death, but instead promoted an enhanced ability to inhibit fungal growth and an active upregulation of immune genes involved in antifungal defences (defensin and prophenoloxidase, PPO). Contrarily, there was no upregulation of the gene cathepsin L, which is associated with antibacterial and antiviral defences, and we found no increased antibacterial activity of nestmates of fungus-exposed ants. This indicates that social immunisation after fungal exposure is specific, similar to recent findings for individual-level immune priming in invertebrates. Epidemiological modeling further suggests that active social immunisation is adaptive, as it leads to faster elimination of the disease and lower death rates than passive immunisation. Interestingly, humans have also utilised the protective effect of low-level infections to fight smallpox by intentional transfer of low pathogen doses (“variolation” or “inoculation”).},
author = {Konrad, Matthias and Vyleta, Meghan and Theis, Fabian and Stock, Miriam and Tragust, Simon and Klatt, Martina and Drescher, Verena and Marr, Carsten and Ugelvig, Line V and Cremer, Sylvia},
journal = {PLoS Biology},
number = {4},
publisher = {Public Library of Science},
title = {{Social transfer of pathogenic fungus promotes active immunisation in ant colonies}},
doi = {10.1371/journal.pbio.1001300},
volume = {10},
year = {2012},
}
@misc{9755,
abstract = {Due to the omnipresent risk of epidemics, insect societies have evolved sophisticated disease defences at the individual and colony level. An intriguing yet little understood phenomenon is that social contact to pathogen-exposed individuals reduces susceptibility of previously naive nestmates to this pathogen. We tested whether such social immunisation in Lasius ants against the entomopathogenic fungus Metarhizium anisopliae is based on active upregulation of the immune system of nestmates following contact to an infectious individual or passive protection via transfer of immune effectors among group members—that is, active versus passive immunisation. We found no evidence for involvement of passive immunisation via transfer of antimicrobials among colony members. Instead, intensive allogrooming behaviour between naive and pathogen-exposed ants before fungal conidia firmly attached to their cuticle suggested passage of the pathogen from the exposed individuals to their nestmates. By tracing fluorescence-labelled conidia we indeed detected frequent pathogen transfer to the nestmates, where they caused low-level infections as revealed by growth of small numbers of fungal colony forming units from their dissected body content. These infections rarely led to death, but instead promoted an enhanced ability to inhibit fungal growth and an active upregulation of immune genes involved in antifungal defences (defensin and prophenoloxidase, PPO). Contrarily, there was no upregulation of the gene cathepsin L, which is associated with antibacterial and antiviral defences, and we found no increased antibacterial activity of nestmates of fungus-exposed ants. This indicates that social immunisation after fungal exposure is specific, similar to recent findings for individual-level immune priming in invertebrates. Epidemiological modeling further suggests that active social immunisation is adaptive, as it leads to faster elimination of the disease and lower death rates than passive immunisation. Interestingly, humans have also utilised the protective effect of low-level infections to fight smallpox by intentional transfer of low pathogen doses (“variolation” or “inoculation”).},
author = {Konrad, Matthias and Vyleta, Meghan and Theis, Fabian and Stock, Miriam and Klatt, Martina and Drescher, Verena and Marr, Carsten and Ugelvig, Line V and Cremer, Sylvia},
publisher = {Dryad},
title = {{Data from: Social transfer of pathogenic fungus promotes active immunisation in ant colonies}},
doi = {10.5061/dryad.sv37s},
year = {2012},
}
@misc{9757,
abstract = {To fight infectious diseases, host immune defences are employed at multiple levels. Sanitary behaviour, such as pathogen avoidance and removal, acts as a first line of defence to prevent infection [1] before activation of the physiological immune system. Insect societies have evolved a wide range of collective hygiene measures and intensive health care towards pathogen-exposed group members [2]. One of the most common behaviours is allogrooming, in which nestmates remove infectious particles from the body surfaces of exposed individuals [3]. Here we show that, in invasive garden ants, grooming of fungus-exposed brood is effective beyond the sheer mechanical removal of fungal conidiospores as it also includes chemical disinfection through the application of poison produced by the ants themselves. Formic acid is the main active component of the poison. It inhibits fungal growth of conidiospores remaining on the brood surface after grooming and also those collected in the mouth of the grooming ant. This dual function is achieved by uptake of the poison droplet into the mouth through acidopore self-grooming and subsequent application onto the infectious brood via brood grooming. This extraordinary behaviour extends current understanding of grooming and the establishment of social immunity in insect societies.},
author = {Tragust, Simon and Mitteregger, Barbara and Barone, Vanessa and Konrad, Matthias and Ugelvig, Line V and Cremer, Sylvia},
publisher = {Dryad},
title = {{Data from: Ants disinfect fungus-exposed brood by oral uptake and spread of their poison}},
doi = {10.5061/dryad.61649},
year = {2012},
}
@misc{9758,
abstract = {We propose a two-step procedure for estimating multiple migration rates in an approximate Bayesian computation (ABC) framework, accounting for global nuisance parameters. The approach is not limited to migration, but generally of interest for inference problems with multiple parameters and a modular structure (e.g. independent sets of demes or loci). We condition on a known, but complex demographic model of a spatially subdivided population, motivated by the reintroduction of Alpine ibex (Capra ibex) into Switzerland. In the first step, the global parameters ancestral mutation rate and male mating skew have been estimated for the whole population in Aeschbacher et al. (Genetics 2012; 192: 1027). In the second step, we estimate in this study the migration rates independently for clusters of demes putatively connected by migration. For large clusters (many migration rates), ABC faces the problem of too many summary statistics. We therefore assess by simulation if estimation per pair of demes is a valid alternative. We find that the trade-off between reduced dimensionality for the pairwise estimation on the one hand and lower accuracy due to the assumption of pairwise independence on the other depends on the number of migration rates to be inferred: the accuracy of the pairwise approach increases with the number of parameters, relative to the joint estimation approach. To distinguish between low and zero migration, we perform ABC-type model comparison between a model with migration and one without. Applying the approach to microsatellite data from Alpine ibex, we find no evidence for substantial gene flow via migration, except for one pair of demes in one direction.},
author = {Aeschbacher, Simon and Futschik, Andreas and Beaumont, Mark},
publisher = {Dryad},
title = {{Data from: Approximate Bayesian computation for modular inference problems with many parameters: the example of migration rates}},
doi = {10.5061/dryad.274b1},
year = {2012},
}
@article{469,
abstract = {Spontaneous release of glutamate is important for maintaining synaptic strength and controlling spike timing in the brain. Mechanisms regulating spontaneous exocytosis remain poorly understood. Extracellular calcium concentration ([Ca2+]o) regulates Ca2+ entry through voltage-activated calcium channels (VACCs) and consequently is a pivotal determinant of action potential-evoked vesicle fusion. Extracellular Ca 2+ also enhances spontaneous release, but via unknown mechanisms. Here we report that external Ca2+ triggers spontaneous glutamate release more weakly than evoked release in mouse neocortical neurons. Blockade of VACCs has no effect on the spontaneous release rate or its dependence on [Ca2+]o. Intracellular [Ca2+] slowly increases in a minority of neurons following increases in [Ca2+]o. Furthermore, the enhancement of spontaneous release by extracellular calcium is insensitive to chelation of intracellular calcium by BAPTA. Activation of the calcium-sensing receptor (CaSR), a G-protein-coupled receptor present in nerve terminals, by several specific agonists increased spontaneous glutamate release. The frequency of spontaneous synaptic transmission was decreased in CaSR mutant neurons. The concentration-effect relationship for extracellular calcium regulation of spontaneous release was well described by a combination of CaSR-dependent and CaSR-independent mechanisms. Overall these results indicate that extracellular Ca2+ does not trigger spontaneous glutamate release by simply increasing calcium influx but stimulates CaSR and thereby promotes resting spontaneous glutamate release. },
author = {Vyleta, Nicholas and Smith, Stephen},
journal = {European Journal of Neuroscience},
number = {12},
pages = {4593 -- 4606},
publisher = {Wiley-Blackwell},
title = {{Spontaneous glutamate release is independent of calcium influx and tonically activated by the calcium-sensing receptor}},
doi = {10.1523/JNEUROSCI.6398-10.2011},
volume = {31},
year = {2011},
}
@article{490,
abstract = {BioSig is an open source software library for biomedical signal processing. The aim of the BioSig project is to foster research in biomedical signal processing by providing free and open source software tools for many different application areas. Some of the areas where BioSig can be employed are neuroinformatics, brain-computer interfaces, neurophysiology, psychology, cardiovascular systems, and sleep research. Moreover, the analysis of biosignals such as the electroencephalogram (EEG), electrocorticogram (ECoG), electrocardiogram (ECG), electrooculogram (EOG), electromyogram (EMG), or respiration signals is a very relevant element of the BioSig project. Specifically, BioSig provides solutions for data acquisition, artifact processing, quality control, feature extraction, classification, modeling, and data visualization, to name a few. In this paper, we highlight several methods to help students and researchers to work more efficiently with biomedical signals. },
author = {Schlögl, Alois and Vidaurre, Carmen and Sander, Tilmann},
journal = {Computational Intelligence and Neuroscience},
publisher = {Hindawi Publishing Corporation},
title = {{BioSig: The free and open source software library for biomedical signal processing}},
doi = {10.1155/2011/935364},
volume = {2011},
year = {2011},
}
@article{491,
abstract = {In their search for antigens, lymphocytes continuously shuttle among blood vessels, lymph vessels, and lymphatic tissues. Chemokines mediate entry of lymphocytes into lymphatic tissues, and sphingosine 1-phosphate (S1P) promotes localization of lymphocytes to the vasculature. Both signals are sensed through G protein-coupled receptors (GPCRs). Most GPCRs undergo ligand-dependent homologous receptor desensitization, a process that decreases their signaling output after previous exposure to high ligand concentration. Such desensitization can explain why lymphocytes do not take an intermediate position between two signals but rather oscillate between them. The desensitization of S1P receptor 1 (S1PR1) is mediated by GPCR kinase 2 (GRK2). Deletion of GRK2 in lymphocytes compromises desensitization by high vascular S1P concentrations, thereby reducing responsiveness to the chemokine signal and trapping the cells in the vascular compartment. The desensitization kinetics of S1PR1 allows lymphocytes to dynamically shuttle between vasculature and lymphatic tissue, although the positional information in both compartments is static.},
author = {Eichner, Alexander and Sixt, Michael K},
journal = {Science Signaling},
number = {198},
publisher = {American Association for the Advancement of Science},
title = {{Setting the clock for recirculating lymphocytes}},
doi = {10.1126/scisignal.2002617},
volume = {4},
year = {2011},
}
@article{518,
abstract = {Cancer stem cells or cancer initiating cells are believed to contribute to cancer recurrence after therapy. MicroRNAs (miRNAs) are short RNA molecules with fundamental roles in gene regulation. The role of miRNAs in cancer stem cells is only poorly understood. Here, we report miRNA expression profiles of glioblastoma stem cell-containing CD133 + cell populations. We find that miR-9, miR-9 * (referred to as miR-9/9 *), miR-17 and miR-106b are highly abundant in CD133 + cells. Furthermore, inhibition of miR-9/9 * or miR-17 leads to reduced neurosphere formation and stimulates cell differentiation. Calmodulin-binding transcription activator 1 (CAMTA1) is a putative transcription factor, which induces the expression of the anti-proliferative cardiac hormone natriuretic peptide A (NPPA). We identify CAMTA1 as an miR-9/9 * and miR-17 target. CAMTA1 expression leads to reduced neurosphere formation and tumour growth in nude mice, suggesting that CAMTA1 can function as tumour suppressor. Consistently, CAMTA1 and NPPA expression correlate with patient survival. Our findings could provide a basis for novel strategies of glioblastoma therapy.},
author = {Schraivogel, Daniel and Weinmann, Lasse and Beier, Dagmar and Tabatabai, Ghazaleh and Eichner, Alexander and Zhu, Jia and Anton, Martina and Sixt, Michael K and Weller, Michael and Beier, Christoph and Meister, Gunter},
journal = {EMBO Journal},
number = {20},
pages = {4309 -- 4322},
publisher = {Wiley-Blackwell},
title = {{CAMTA1 is a novel tumour suppressor regulated by miR-9/9 * in glioblastoma stem cells}},
doi = {10.1038/emboj.2011.301},
volume = {30},
year = {2011},
}
@article{531,
abstract = {Software transactional memories (STM) are described in the literature with assumptions of sequentially consistent program execution and atomicity of high level operations like read, write, and abort. However, in a realistic setting, processors use relaxed memory models to optimize hardware performance. Moreover, the atomicity of operations depends on the underlying hardware. This paper presents the first approach to verify STMs under relaxed memory models with atomicity of 32 bit loads and stores, and read-modify-write operations. We describe RML, a simple language for expressing concurrent programs. We develop a semantics of RML parametrized by a relaxed memory model. We then present our tool, FOIL, which takes as input the RML description of an STM algorithm restricted to two threads and two variables, and the description of a memory model, and automatically determines the locations of fences, which if inserted, ensure the correctness of the restricted STM algorithm under the given memory model. We use FOIL to verify DSTM, TL2, and McRT STM under the memory models of sequential consistency, total store order, partial store order, and relaxed memory order for two threads and two variables. Finally, we extend the verification results for DSTM and TL2 to an arbitrary number of threads and variables by manually proving that the structural properties of STMs are satisfied at the hardware level of atomicity under the considered relaxed memory models.},
author = {Guerraoui, Rachid and Henzinger, Thomas A and Singh, Vasu},
journal = {Formal Methods in System Design},
number = {3},
pages = {297 -- 331},
publisher = {Springer},
title = {{Verification of STM on relaxed memory models}},
doi = {10.1007/s10703-011-0131-3},
volume = {39},
year = {2011},
}
@misc{5379,
abstract = {Computing the winning set for Büchi objectives in alternating games on graphs is a central problem in computer aided verification with a large number of applications. The long standing best known upper bound for solving the problem is ̃O(n·m), where n is the number of vertices and m is the number of edges in the graph. We are the first to break the ̃O(n·m) boundary by presenting a new technique that reduces the running time to O(n2). This bound also leads to O(n2) time algorithms for computing the set of almost-sure winning vertices for Büchi objectives (1) in alternating games with probabilistic transitions (improving an earlier bound of O(n·m)), (2) in concurrent graph games with constant actions (improving an earlier bound of O(n3)), and (3) in Markov decision processes (improving for m > n4/3 an earlier bound of O(min(m1.5, m·n2/3)). We also show that the same technique can be used to compute the maximal end-component decomposition of a graph in time O(n2), which is an improvement over earlier bounds for m > n4/3. Finally, we show how to maintain the winning set for Büchi objectives in alternating games under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per operation. This is the first dynamic algorithm for this problem.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
issn = {2664-1690},
pages = {20},
publisher = {IST Austria},
title = {{An O(n2) time algorithm for alternating Büchi games}},
doi = {10.15479/AT:IST-2011-0009},
year = {2011},
}
@misc{5380,
abstract = {We consider 2-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine the successor state. We study concurrent games with ω-regular winning conditions specified as parity objectives. We consider the qualitative analysis problems: the computation of the almost-sure and limit-sure winning set of states, where player 1 can ensure to win with probability 1 and with probability arbitrarily close to 1, respectively. In general the almost-sure and limit-sure winning strategies require both infinite-memory as well as infinite-precision (to describe probabilities). We study the bounded-rationality problem for qualitative analysis of concurrent parity games, where the strategy set for player 1 is restricted to bounded-resource strategies. In terms of precision, strategies can be deterministic, uniform, finite-precision or infinite-precision; and in terms of memory, strategies can be memoryless, finite-memory or infinite-memory. We present a precise and complete characterization of the qualitative winning sets for all combinations of classes of strategies. In particular, we show that uniform memoryless strategies are as powerful as finite-precision infinite-memory strategies, and infinite-precision memoryless strategies are as powerful as infinite-precision finite-memory strategies. We show that the winning sets can be computed in O(n2d+3) time, where n is the size of the game structure and 2d is the number of priorities (or colors), and our algorithms are symbolic. The membership problem of whether a state belongs to a winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms,that are obtained by characterization of the winning sets as μ-calculus formulas, are considerably more involved than those for turn-based games.},
author = {Chatterjee, Krishnendu},
issn = {2664-1690},
pages = {53},
publisher = {IST Austria},
title = {{Bounded rationality in concurrent parity games}},
doi = {10.15479/AT:IST-2011-0008},
year = {2011},
}
@misc{5381,
abstract = {In two-player finite-state stochastic games of partial obser- vation on graphs, in every state of the graph, the players simultaneously choose an action, and their joint actions determine a probability distri- bution over the successor states. The game is played for infinitely many rounds and thus the players construct an infinite path in the graph. We consider reachability objectives where the first player tries to ensure a target state to be visited almost-surely (i.e., with probability 1) or pos- itively (i.e., with positive probability), no matter the strategy of the second player.
We classify such games according to the information and to the power of randomization available to the players. On the basis of information, the game can be one-sided with either (a) player 1, or (b) player 2 having partial observation (and the other player has perfect observation), or two- sided with (c) both players having partial observation. On the basis of randomization, (a) the players may not be allowed to use randomization (pure strategies), or (b) they may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) they may use full randomization.
Our main results for pure strategies are as follows: (1) For one-sided games with player 2 perfect observation we show that (in contrast to full randomized strategies) belief-based (subset-construction based) strate- gies are not sufficient, and present an exponential upper bound on mem- ory both for almost-sure and positive winning strategies; we show that the problem of deciding the existence of almost-sure and positive winning strategies for player 1 is EXPTIME-complete and present symbolic algo- rithms that avoid the explicit exponential construction. (2) For one-sided games with player 1 perfect observation we show that non-elementary memory is both necessary and sufficient for both almost-sure and posi- tive winning strategies. (3) We show that for the general (two-sided) case finite-memory strategies are sufficient for both positive and almost-sure winning, and at least non-elementary memory is required. We establish the equivalence of the almost-sure winning problems for pure strategies and for randomized strategies with actions invisible. Our equivalence re- sult exhibit serious flaws in previous results in the literature: we show a non-elementary memory lower bound for almost-sure winning whereas an exponential upper bound was previously claimed.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
issn = {2664-1690},
pages = {43},
publisher = {IST Austria},
title = {{Partial-observation stochastic games: How to win when belief fails}},
doi = {10.15479/AT:IST-2011-0007},
year = {2011},
}
@misc{5382,
abstract = {We consider two-player stochastic games played on a finite state space for an infinite num- ber of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine a probability distribution over the successor states. We also consider the important special case of turn-based stochastic games where players make moves in turns, rather than concurrently. We study concurrent games with ω-regular winning conditions specified as parity objectives. The value for player 1 for a parity objective is the maximal probability with which the player can guarantee the satisfaction of the objective against all strategies of the opponent. We study the problem of continuity and robustness of the value function in concurrent and turn-based stochastic parity games with respect to imprecision in the transition probabilities. We present quantitative bounds on the difference of the value function (in terms of the imprecision of the transition probabilities) and show the value continuity for structurally equivalent concurrent games (two games are structurally equivalent if the support of the transition func- tion is same and the probabilities differ). We also show robustness of optimal strategies for structurally equivalent turn-based stochastic parity games. Finally we show that the value continuity property breaks without the structurally equivalent assumption (even for Markov chains) and show that our quantitative bound is asymptotically optimal. Hence our results are tight (the assumption is both necessary and sufficient) and optimal (our quantitative bound is asymptotically optimal).},
author = {Chatterjee, Krishnendu},
issn = {2664-1690},
pages = {18},
publisher = {IST Austria},
title = {{Robustness of structurally equivalent concurrent parity games}},
doi = {10.15479/AT:IST-2011-0006},
year = {2011},
}
@misc{5383,
abstract = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
author = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
issn = {2664-1690},
pages = {25},
publisher = {IST Austria},
title = {{On an efficient decision procedure for imperative tree data structures}},
doi = {10.15479/AT:IST-2011-0005},
year = {2011},
}
@misc{5384,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by parity conditions. We consider three qualitative decision problems: (i) the positive decision problem asks whether there is a word that is accepted with positive probability; (ii) the almost decision problem asks whether there is a word that is accepted with probability 1; and (iii) the limit decision problem asks whether for every ε > 0 there is a word that is accepted with probability at least 1 − ε. We unify and generalize several decidability results for probabilistic automata over infinite words, and identify a robust (closed under union and intersection) subclass of probabilistic automata for which all the qualitative decision problems are decidable for parity conditions. We also show that if the input words are restricted to lasso shape words, then the positive and almost problems are decidable for all probabilistic automata with parity conditions.},
author = {Chatterjee, Krishnendu and Tracol, Mathieu},
issn = {2664-1690},
pages = {30},
publisher = {IST Austria},
title = {{Decidable problems for probabilistic automata on infinite words}},
doi = {10.15479/AT:IST-2011-0004},
year = {2011},
}
@misc{5385,
abstract = {There is recently a significant effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions, aiming for a general and flexible framework for quantitative-oriented specifications. In the heart of quantitative objectives lies the accumulation of values along a computation. It is either the accumulated summation, as with the energy objectives, or the accumulated average, as with the mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point of time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire computation. We study the border of decidability for extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities by prefix-accumulation assertions and extending LTL with path-accumulation assertions, result in temporal logics whose model-checking problem is decidable. The extended logics allow to significantly extend the currently known energy and mean-payoff objectives. Moreover, the prefix-accumulation assertions may be refined with “controlled-accumulation”, allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that the fragment we point to is, in a sense, the maximal logic whose extension with prefix-accumulation assertions permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, and in particular CTL and LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
issn = {2664-1690},
pages = {14},
publisher = {IST Austria},
title = {{Temporal specifications with accumulative values}},
doi = {10.15479/AT:IST-2011-0003},
year = {2011},
}
@misc{5386,
abstract = {We introduce TopoCut: a new way to integrate knowledge about topological properties (TPs) into random field image segmentation model. Instead of including TPs as additional constraints during minimization of the energy function, we devise an efficient algorithm for modifying the unary potentials such that the resulting segmentation is guaranteed with the desired properties. Our method is more flexible in the sense that it handles more topology constraints than previous methods, which were only able to enforce pairwise or global connectivity. In particular, our method is very fast, making it for the first time possible to enforce global topological properties in practical image segmentation tasks.},
author = {Chen, Chao and Freedman, Daniel and Lampert, Christoph},
issn = {2664-1690},
pages = {69},
publisher = {IST Austria},
title = {{Enforcing topological constraints in random field image segmentation}},
doi = {10.15479/AT:IST-2011-0002},
year = {2011},
}
@misc{5387,
abstract = {We consider Markov Decision Processes (MDPs) with mean-payoff parity and energy parity objectives. In system design, the parity objective is used to encode ω-regular specifications, and the mean-payoff and energy objectives can be used to model quantitative resource constraints. The energy condition re- quires that the resource level never drops below 0, and the mean-payoff condi- tion requires that the limit-average value of the resource consumption is within a threshold. While these two (energy and mean-payoff) classical conditions are equivalent for two-player games, we show that they differ for MDPs. We show that the problem of deciding whether a state is almost-sure winning (i.e., winning with probability 1) in energy parity MDPs is in NP ∩ coNP, while for mean- payoff parity MDPs, the problem is solvable in polynomial time, improving a recent PSPACE bound.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
issn = {2664-1690},
pages = {20},
publisher = {IST Austria},
title = {{Energy and mean-payoff parity Markov decision processes}},
doi = {10.15479/AT:IST-2011-0001},
year = {2011},
}
@article{3771,
abstract = {The small-sized frugivorous bat Carollia perspicillata is an understory specialist and occurs in a wide range of lowland habitats, tending to be more common in tropical dry or moist forests of South and Central America. Its sister species, Carollia brevicauda, occurs almost exclusively in the Amazon rainforest. A recent phylogeographic study proposed a hypothesis of origin and subsequent diversification for C. perspicillata along the Atlantic coastal forest of Brazil. Additionally, it also found two allopatric clades for C. brevicauda separated by the Amazon Basin. We used cytochrome b gene sequences and a more extensive sampling to test hypotheses related to the origin and diversification of C. perspicillata plus C. brevicauda clade in South America. The results obtained indicate that there are two sympatric evolutionary lineages within each species. In C. perspicillata, one lineage is limited to the Southern Atlantic Forest, whereas the other is widely distributed. Coalescent analysis points to a simultaneous origin for C. perspicillata and C. brevicauda, although no place for the diversification of each species can be firmly suggested. The phylogeographic pattern shown by C. perspicillata is also congruent with the Pleistocene refugia hypothesis as a likely vicariant phenomenon shaping the present distribution of its intraspecific lineages.},
author = {Pavan, Ana and Martins, Felipe and Santos, Fabrício and Ditchfield, Albert and Fernandes Redondo, Rodrigo A},
journal = {Biological Journal of the Linnean Society},
number = {3},
pages = {527 -- 539},
publisher = {Wiley-Blackwell},
title = {{Patterns of diversification in two species of short-tailed bats (Carollia Gray, 1838): the effects of historical fragmentation of Brazilian rainforests.}},
doi = {10.1111/j.1095-8312.2010.01601.x},
volume = {102},
year = {2011},
}
@article{3778,
author = {Barton, Nicholas H},
journal = {Heredity},
number = {2},
pages = {205 -- 206},
publisher = {Nature Publishing Group},
title = {{Estimating linkage disequilibria}},
doi = {10.1038/hdy.2010.67},
volume = {106},
year = {2011},
}
@article{3781,
abstract = {We bound the difference in length of two curves in terms of their total curvatures and the Fréchet distance. The bound is independent of the dimension of the ambient Euclidean space, it improves upon a bound by Cohen-Steiner and Edelsbrunner, and it generalizes a result by Fáry and Chakerian.},
author = {Fasy, Brittany Terese},
journal = {Acta Sci. Math. (Szeged)},
number = {1-2},
pages = {359 -- 367},
publisher = {Szegedi Tudományegyetem},
title = {{The difference in length of curves in R^n}},
volume = {77},
year = {2011},
}
@article{3784,
abstract = {Advanced stages of Scyllarus phyllosoma larvae were collected by demersal trawling during fishery research surveys in the western Mediterranean Sea in 2003–2005. Nucleotide sequence analysis of the mitochondrial 16S rDNA gene allowed the final-stage phyllosoma of Scyllarus arctus to be identified among these larvae. Its morphology is described and illustrated. This constitutes the second complete description of a Scyllaridae phyllosoma with its specific identity being validated by molecular techniques (the first was S. pygmaeus). These results also solved a long lasting taxonomic anomaly of several species assigned to the ancient genus Phyllosoma Leach, 1814. Detailed examination indicated that the final-stage phyllosoma of S. arctus shows closer affinities with the American scyllarid Scyllarus depressus or with the Australian Scyllarus sp. b (sensu Phillips et al., 1981) than to its sympatric species S. pygmaeus.},
author = {Palero, Ferran and Guerao, Guillermo and Clark, Paul and Abello, Pere},
journal = {Journal of the Marine Biological Association of the United Kingdom},
number = {2},
pages = {485 -- 492},
publisher = {Cambridge University Press},
title = {{Scyllarus arctus (Crustacea: Decapoda: Scyllaridae) final stage phyllosoma identified by DNA analysis, with morphological description}},
doi = {10.1017/S0025315410000287},
volume = {91},
year = {2011},
}
@inbook{3791,
abstract = {During the development of multicellular organisms, cell fate specification is followed by the sorting of different cell types into distinct domains from where the different tissues and organs are formed. Cell sorting involves both the segregation of a mixed population of cells with different fates and properties into distinct domains, and the active maintenance of their segregated state. Because of its biological importance and apparent resemblance to fluid segregation in physics, cell sorting was extensively studied by both biologists and physicists over the last decades. Different theories were developed that try to explain cell sorting on the basis of the physical properties of the constituent cells. However, only recently the molecular and cellular mechanisms that control the physical properties driving cell sorting, have begun to be unraveled. In this review, we will provide an overview of different cell-sorting processes in development and discuss how these processes can be explained by the different sorting theories, and how these theories in turn can be connected to the molecular and cellular mechanisms driving these processes.},
author = {Krens, Gabriel and Heisenberg, Carl-Philipp J},
booktitle = {Forces and Tension in Development},
editor = {Labouesse, Michel},
pages = {189 -- 213},
publisher = {Elsevier},
title = {{Cell sorting in development}},
doi = {10.1016/B978-0-12-385065-2.00006-2},
volume = {95},
year = {2011},
}
@inbook{3796,
abstract = {We address the problem of covering ℝ n with congruent balls, while minimizing the number of balls that contain an average point. Considering the 1-parameter family of lattices defined by stretching or compressing the integer grid in diagonal direction, we give a closed formula for the covering density that depends on the distortion parameter. We observe that our family contains the thinnest lattice coverings in dimensions 2 to 5. We also consider the problem of packing congruent balls in ℝ n , for which we give a closed formula for the packing density as well. Again we observe that our family contains optimal configurations, this time densest packings in dimensions 2 and 3.},
author = {Edelsbrunner, Herbert and Kerber, Michael},
booktitle = {Rainbow of Computer Science},
editor = {Calude, Cristian and Rozenberg, Grzegorz and Salomaa, Arto},
pages = {20 -- 35},
publisher = {Springer},
title = {{Covering and packing with spheres by diagonal distortion in R^n}},
doi = {10.1007/978-3-642-19391-0_2},
volume = {6570},
year = {2011},
}
@article{3965,
abstract = {The elevation function on a smoothly embedded 2-manifold in R-3 reflects the multiscale topography of cavities and protrusions as local maxima. The function has been useful in identifying coarse docking configurations for protein pairs. Transporting the concept from the smooth to the piecewise linear category, this paper describes an algorithm for finding all local maxima. While its worst-case running time is the same as of the algorithm used in prior work, its performance in practice is orders of magnitudes superior. We cast light on this improvement by relating the running time to the total absolute Gaussian curvature of the 2-manifold.},
author = {Wang, Bei and Edelsbrunner, Herbert and Morozov, Dmitriy},
journal = {Journal of Experimental Algorithmics},
number = {2.2},
pages = {1 -- 13},
publisher = {ACM},
title = {{Computing elevation maxima by searching the Gauss sphere}},
doi = {10.1145/1963190.1970375},
volume = {16},
year = {2011},
}
@inproceedings{3163,
abstract = {We study multi-label prediction for structured output sets, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multilabel classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label set, which is infeasible in case of structured outputs. Relying on techniques originally designed for single-label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
author = {Lampert, Christoph},
location = {Granada, Spain},
publisher = {Neural Information Processing Systems},
title = {{Maximum margin multi-label structured prediction}},
year = {2011},
}
@inproceedings{3264,
abstract = {Verification of programs with procedures, multi-threaded programs, and higher-order functional programs can be effectively au- tomated using abstraction and refinement schemes that rely on spurious counterexamples for abstraction discovery. The analysis of counterexam- ples can be automated by a series of interpolation queries, or, alterna- tively, as a constraint solving query expressed by a set of recursion free Horn clauses. (A set of interpolation queries can be formulated as a single constraint over Horn clauses with linear dependency structure between the unknown relations.) In this paper we present an algorithm for solving recursion free Horn clauses over a combined theory of linear real/rational arithmetic and uninterpreted functions. Our algorithm performs resolu- tion to deal with the clausal structure and relies on partial solutions to deal with (non-local) instances of functionality axioms.},
author = {Gupta, Ashutosh and Popeea, Corneliu and Rybalchenko, Andrey},
editor = {Yang, Hongseok},
location = {Kenting, Taiwan},
pages = {188 -- 203},
publisher = {Springer},
title = {{Solving recursion-free Horn clauses over LI+UIF}},
doi = {10.1007/978-3-642-25318-8_16},
volume = {7078},
year = {2011},
}
@inproceedings{3266,
abstract = {We present a joint image segmentation and labeling model (JSL) which, given a bag of figure-ground segment hypotheses extracted at multiple image locations and scales, constructs a joint probability distribution over both the compatible image interpretations (tilings or image segmentations) composed from those segments, and over their labeling into categories. The process of drawing samples from the joint distribution can be interpreted as first sampling tilings, modeled as maximal cliques, from a graph connecting spatially non-overlapping segments in the bag [1], followed by sampling labels for those segments, conditioned on the choice of a particular tiling. We learn the segmentation and labeling parameters jointly, based on Maximum Likelihood with a novel Incremental Saddle Point estimation procedure. The partition function over tilings and labelings is increasingly more accurately approximated by including incorrect configurations that a not-yet-competent model rates probable during learning. We show that the proposed methodologymatches the current state of the art in the Stanford dataset [2], as well as in VOC2010, where 41.7% accuracy on the test set is achieved.},
author = {Ion, Adrian and Carreira, Joao and Sminchisescu, Cristian},
booktitle = {NIPS Proceedings},
location = {Granada, Spain},
pages = {1827 -- 1835},
publisher = {Neural Information Processing Systems Foundation},
title = {{Probabilistic joint image segmentation and labeling}},
volume = {24},
year = {2011},
}
@article{3267,
abstract = {We address the problem of localizing homology classes, namely, finding the cycle representing a given class with the most concise geometric measure. We study the problem with different measures: volume, diameter and radius. For volume, that is, the 1-norm of a cycle, two main results are presented. First, we prove that the problem is NP-hard to approximate within any constant factor. Second, we prove that for homology of dimension two or higher, the problem is NP-hard to approximate even when the Betti number is O(1). The latter result leads to the inapproximability of the problem of computing the nonbounding cycle with the smallest volume and computing cycles representing a homology basis with the minimal total volume. As for the other two measures defined by pairwise geodesic distance, diameter and radius, we show that the localization problem is NP-hard for diameter but is polynomial for radius. Our work is restricted to homology over the ℤ2 field.},
author = {Chen, Chao and Freedman, Daniel},
journal = {Discrete & Computational Geometry},
number = {3},
pages = {425 -- 448},
publisher = {Springer},
title = {{Hardness results for homology localization}},
doi = {10.1007/s00454-010-9322-8},
volume = {45},
year = {2011},
}
@article{3269,
abstract = {The unintentional scattering of light between neighboring surfaces in complex projection environments increases the brightness and decreases the contrast, disrupting the appearance of the desired imagery. To achieve satisfactory projection results, the inverse problem of global illumination must be solved to cancel this secondary scattering. In this paper, we propose a global illumination cancellation method that minimizes the perceptual difference between the desired imagery and the actual total illumination in the resulting physical environment. Using Gauss-Newton and active set methods, we design a fast solver for the bound constrained nonlinear least squares problem raised by the perceptual error metrics. Our solver is further accelerated with a CUDA implementation and multi-resolution method to achieve 1–2 fps for problems with approximately 3000 variables. We demonstrate the global illumination cancellation algorithm with our multi-projector system. Results show that our method preserves the color fidelity of the desired imagery significantly better than previous methods.},
author = {Sheng, Yu and Cutler, Barbara and Chen, Chao and Nasman, Joshua},
journal = {Computer Graphics Forum},
number = {4},
pages = {1261 -- 1268},
publisher = {Wiley-Blackwell},
title = {{Perceptual global illumination cancellation in complex projection environments}},
doi = {10.1111/j.1467-8659.2011.01985.x},
volume = {30},
year = {2011},
}
@inproceedings{3270,
abstract = {The persistence diagram of a filtered simplicial com- plex is usually computed by reducing the boundary matrix of the complex. We introduce a simple op- timization technique: by processing the simplices of the complex in decreasing dimension, we can “kill” columns (i.e., set them to zero) without reducing them. This technique completely avoids reduction on roughly half of the columns. We demonstrate that this idea significantly improves the running time of the reduction algorithm in practice. We also give an output-sensitive complexity analysis for the new al- gorithm which yields to sub-cubic asymptotic bounds under certain assumptions.},
author = {Chen, Chao and Kerber, Michael},
location = {Morschach, Switzerland},
pages = {197 -- 200},
publisher = {TU Dortmund},
title = {{Persistent homology computation with a twist}},
year = {2011},
}
@inbook{3271,
abstract = {In this paper we present an efficient framework for computation of persis- tent homology of cubical data in arbitrary dimensions. An existing algorithm using simplicial complexes is adapted to the setting of cubical complexes. The proposed approach enables efficient application of persistent homology in domains where the data is naturally given in a cubical form. By avoiding triangulation of the data, we significantly reduce the size of the complex. We also present a data-structure de- signed to compactly store and quickly manipulate cubical complexes. By means of numerical experiments, we show high speed and memory efficiency of our ap- proach. We compare our framework to other available implementations, showing its superiority. Finally, we report performance on selected 3D and 4D data-sets.},
author = {Wagner, Hubert and Chen, Chao and Vuçini, Erald},
booktitle = {Topological Methods in Data Analysis and Visualization II},
editor = {Peikert, Ronald and Hauser, Helwig and Carr, Hamish and Fuchs, Raphael},
pages = {91 -- 106},
publisher = {Springer},
title = {{Efficient computation of persistent homology for cubical data}},
doi = {10.1007/978-3-642-23175-9_7},
year = {2011},
}
@phdthesis{3273,
author = {Maître, Jean-Léon},
publisher = {IST Austria},
title = {{Mechanics of adhesion and de‐adhesion in zebrafish germ layer progenitors}},
year = {2011},
}
@article{3287,
abstract = {Diffusing membrane constituents are constantly exposed to a variety of forces that influence their stochastic path. Single molecule experiments allow for resolving trajectories at extremely high spatial and temporal accuracy, thereby offering insights into en route interactions of the tracer. In this review we discuss approaches to derive information about the underlying processes, based on single molecule tracking experiments. In particular, we focus on a new versatile way to analyze single molecule diffusion in the absence of a full analytical treatment. The method is based on comprehensive comparison of an experimental data set against the hypothetical outcome of multiple experiments performed on the computer. Since Monte Carlo simulations can be easily and rapidly performed even on state-of-the-art PCs, our method provides a simple way for testing various - even complicated - diffusion models. We describe the new method in detail, and show the applicability on two specific examples: firstly, kinetic rate constants can be derived for the transient interaction of mobile membrane proteins; secondly, residence time and corral size can be extracted for confined diffusion.},
author = {Ruprecht, Verena and Axmann, Markus and Wieser, Stefan and Schuetz, Gerhard},
journal = {Current Protein & Peptide Science},
number = {8},
pages = {714 -- 724},
publisher = {Bentham Science Publishers},
title = {{What can we learn from single molecule trajectories?}},
doi = {10.2174/138920311798841753},
volume = {12},
year = {2011},
}
@article{3288,
abstract = {The zonula adherens (ZA) of epithelial cells is a site of cell-cell adhesion where cellular forces are exerted and resisted. Increasing evidence indicates that E-cadherin adhesion molecules at the ZA serve to sense force applied on the junctions and coordinate cytoskeletal responses to those forces. Efforts to understand the role that cadherins play in mechanotransduction have been limited by the lack of assays to measure the impact of forces on the ZA. In this study we used 4D imaging of GFP-tagged E-cadherin to analyse the movement of the ZA. Junctions in confluent epithelial monolayers displayed prominent movements oriented orthogonal (perpendicular) to the ZA itself. Two components were identified in these movements: a relatively slow unidirectional (translational) component that could be readily fitted by least-squares regression analysis, upon which were superimposed more rapid oscillatory movements. Myosin IIB was a dominant factor responsible for driving the unilateral translational movements. In contrast, frequency spectrum analysis revealed that depletion of Myosin IIA increased the power of the oscillatory movements. This implies that Myosin IIA may serve to dampen oscillatory movements of the ZA. This extends our recent analysis of Myosin II at the ZA to demonstrate that Myosin IIA and Myosin IIB make distinct contributions to junctional movement at the ZA.},
author = {Smutny, Michael and Wu, Selwin and Gomez, Guillermo and Mangold, Sabine and Yap, Alpha and Hamilton, Nicholas},
journal = {PLoS One},
number = {7},
publisher = {Public Library of Science},
title = {{Multicomponent analysis of junctional movements regulated by Myosin II isoforms at the epithelial zonula adherens}},
doi = {10.1371/journal.pone.0022458},
volume = {6},
year = {2011},
}
@article{3290,
abstract = {Analysis of genomic data requires an efficient way to calculate likelihoods across very large numbers of loci. We describe a general method for finding the distribution of genealogies: we allow migration between demes, splitting of demes [as in the isolation-with-migration (IM) model], and recombination between linked loci. These processes are described by a set of linear recursions for the generating function of branch lengths. Under the infinite-sites model, the probability of any configuration of mutations can be found by differentiating this generating function. Such calculations are feasible for small numbers of sampled genomes: as an example, we show how the generating function can be derived explicitly for three genes under the two-deme IM model. This derivation is done automatically, using Mathematica. Given data from a large number of unlinked and nonrecombining blocks of sequence, these results can be used to find maximum-likelihood estimates of model parameters by tabulating the probabilities of all relevant mutational configurations and then multiplying across loci. The feasibility of the method is demonstrated by applying it to simulated data and to a data set previously analyzed by Wang and Hey (2010) consisting of 26,141 loci sampled from Drosophila simulans and D. melanogaster. Our results suggest that such likelihood calculations are scalable to genomic data as long as the numbers of sampled individuals and mutations per sequence block are small.},
author = {Lohse, Konrad and Harrison, Richard and Barton, Nicholas H},
journal = {Genetics},
number = {3},
pages = {977 -- 987},
publisher = {Genetics Society of America},
title = {{A general method for calculating likelihoods under the coalescent process}},
doi = {10.1534/genetics.111.129569},
volume = {189},
year = {2011},
}
@inproceedings{3297,
abstract = {Animating detailed liquid surfaces has always been a challenge for computer graphics researchers and visual effects artists. Over the past few years, researchers in this field have focused on mesh-based surface tracking to synthesize extremely detailed liquid surfaces as efficiently as possible. This course provides a solid understanding of the steps required to create a fluid simulator with a mesh-based liquid surface.
The course begins with an overview of several existing liquid-surface-tracking techniques and the pros and cons of each method. Then it explains how to embed a triangle mesh into a finite-difference-based fluid simulator and describes several methods for allowing the liquid surface to merge together or break apart. The final section showcases the benefits and further applications of a mesh-based liquid surface, highlighting state-of-the-art methods for tracking colors and textures, maintaining liquid volume, preserving small surface features, and simulating realistic surface-tension waves.},
author = {Wojtan, Christopher J and Müller Fischer, Matthias and Brochu, Tyson},
location = {Vancouver, BC, Canada},
publisher = {ACM},
title = {{Liquid simulation with mesh-based surface tracking}},
doi = {10.1145/2037636.2037644},
year = {2011},
}
@inproceedings{3298,
abstract = {We present a new algorithm for enforcing incompressibility for Smoothed Particle Hydrodynamics (SPH) by preserving uniform density across the domain. We propose a hybrid method that uses a Poisson solve on a coarse grid to enforce a divergence free velocity ﬁeld, followed by a local density correction of the particles. This avoids typical grid artifacts and maintains the Lagrangian nature of SPH by directly transferring pressures onto particles. Our method can be easily integrated with existing SPH techniques such as the incompressible PCISPH method as well as weakly compressible SPH by adding an additional force term. We show that this hybrid method accelerates convergence towards uniform density and permits a signiﬁcantly larger time step compared to earlier approaches while producing similar results. We demonstrate our approach in a variety of scenarios with signiﬁcant pressure gradients such as splashing liquids.},
author = {Raveendran, Karthik and Wojtan, Christopher J and Turk, Greg},
editor = {Spencer, Stephen},
location = {Vancouver, Canada},
pages = {33 -- 42},
publisher = {ACM},
title = {{Hybrid smoothed particle hydrodynamics}},
doi = {10.1145/2019406.2019411},
year = {2011},
}
@inproceedings{3299,
abstract = {We introduce propagation models, a formalism designed to support general and efficient data structures for the transient analysis of biochemical reaction networks. We give two use cases for propagation abstract data types: the uniformization method and numerical integration. We also sketch an implementation of a propagation abstract data type, which uses abstraction to approximate states.},
author = {Henzinger, Thomas A and Mateescu, Maria},
location = {Paris, France},
pages = {1 -- 3},
publisher = {Springer},
title = {{Propagation models for computing biochemical reaction networks}},
doi = {10.1145/2037509.2037510},
year = {2011},
}
@inproceedings{3301,
abstract = {The chemical master equation is a differential equation describing the time evolution of the probability distribution over the possible “states” of a biochemical system. The solution of this equation is of interest within the systems biology field ever since the importance of the molec- ular noise has been acknowledged. Unfortunately, most of the systems do not have analytical solutions, and numerical solutions suffer from the course of dimensionality and therefore need to be approximated. Here, we introduce the concept of tail approximation, which retrieves an approximation of the probabilities in the tail of a distribution from the total probability of the tail and its conditional expectation. This approximation method can then be used to numerically compute the solution of the chemical master equation on a subset of the state space, thus fighting the explosion of the state space, for which this problem is renowned.},
author = {Henzinger, Thomas A and Mateescu, Maria},
publisher = {Tampere International Center for Signal Processing},
title = {{Tail approximation for the chemical master equation}},
year = {2011},
}
@inproceedings{3302,
abstract = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We present a new job execution environment Flextic that exploits scal- able static scheduling techniques to provide the user with a flexible pricing model, such as a tradeoff between dif- ferent degrees of execution speed and execution price, and at the same time, reduce scheduling overhead for the cloud provider. We have evaluated a prototype of Flextic on Amazon EC2 and compared it against Hadoop. For various data parallel jobs from machine learning, im- age processing, and gene sequencing that we considered, Flextic has low scheduling overhead and reduces job du- ration by up to 15% compared to Hadoop, a dynamic cloud scheduler.},
author = {Henzinger, Thomas A and Singh, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
pages = {1 -- 6},
publisher = {USENIX},
title = {{Static scheduling in clouds}},
year = {2011},
}
@inbook{3311,
abstract = {Alpha shapes have been conceived in 1981 as an attempt to define the shape of a finite set of point in the plane. Since then, connections to diverse areas in the sciences and engineering have developed, including to pattern recognition, digital shape sampling and processing, and structural molecular biology. This survey begins with a historical account and discusses geometric, algorithmic, topological, and combinatorial aspects of alpha shapes in this sequence.},
author = {Herbert Edelsbrunner},
booktitle = {Tessellations in the Sciences},
publisher = {Springer},
title = {{Alpha shapes - a survey}},
year = {2011},
}
@misc{3312,
abstract = {We study the 3D reconstruction of plant roots from multiple 2D images. To meet the challenge caused by the delicate nature of thin branches, we make three innovations to cope with the sensitivity to image quality and calibration. First, we model the background as a harmonic function to improve the segmentation of the root in each 2D image. Second, we develop the concept of the regularized visual hull which reduces the effect of jittering and refraction by ensuring consistency with one 2D image. Third, we guarantee connectedness through adjustments to the 3D reconstruction that minimize global error. Our software is part of a biological phenotype/genotype study of agricultural root systems. It has been tested on more than 40 plant roots and results are promising in terms of reconstruction quality and efficiency.},
author = {Zheng, Ying and Gu, Steve and Edelsbrunner, Herbert and Tomasi, Carlo and Benfey, Philip},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Detailed reconstruction of 3D plant root shape}},
doi = {10.1109/ICCV.2011.6126475},
year = {2011},
}
@inproceedings{3313,
abstract = {Interpreting an image as a function on a compact sub- set of the Euclidean plane, we get its scale-space by diffu- sion, spreading the image over the entire plane. This gener- ates a 1-parameter family of functions alternatively defined as convolutions with a progressively wider Gaussian ker- nel. We prove that the corresponding 1-parameter family of persistence diagrams have norms that go rapidly to zero as time goes to infinity. This result rationalizes experimental observations about scale-space. We hope this will lead to targeted improvements of related computer vision methods.},
author = {Chen, Chao and Edelsbrunner, Herbert},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Diffusion runs low on persistence fast}},
doi = {10.1109/ICCV.2011.6126271},
year = {2011},
}
@article{3315,
abstract = {We consider two-player games played in real time on game structures with clocks where the objectives of players are described using parity conditions. The games are concurrent in that at each turn, both players independently propose a time delay and an action, and the action with the shorter delay is chosen. To prevent a player from winning by blocking time, we restrict each player to play strategies that ensure that the player cannot be responsible for causing a zeno run. First, we present an efficient reduction of these games to turn-based (i.e., not concurrent) finite-state (i.e., untimed) parity games. Our reduction improves the best known complexity for solving timed parity games. Moreover, the rich class of algorithms for classical parity games can now be applied to timed parity games. The states of the resulting game are based on clock regions of the original game, and the state space of the finite game is linear in the size of the region graph. Second, we consider two restricted classes of strategies for the player that represents the controller in a real-time synthesis problem, namely, limit-robust and bounded-robust winning strategies. Using a limit-robust winning strategy, the controller cannot choose an exact real-valued time delay but must allow for some nonzero jitter in each of its actions. If there is a given lower bound on the jitter, then the strategy is bounded-robust winning. We show that exact strategies are more powerful than limit-robust strategies, which are more powerful than bounded-robust winning strategies for any bound. For both kinds of robust strategies, we present efficient reductions to standard timed automaton games. These reductions provide algorithms for the synthesis of robust real-time controllers.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Prabhu, Vinayak},
journal = {Logical Methods in Computer Science},
number = {4},
publisher = {International Federation of Computational Logic},
title = {{Timed parity games: Complexity and robustness}},
doi = {10.2168/LMCS-7(4:8)2011},
volume = {7},
year = {2011},
}
@inproceedings{3316,
abstract = {In addition to being correct, a system should be robust, that is, it should behave reasonably even after receiving unexpected inputs. In this paper, we summarize two formal notions of robustness that we have introduced previously for reactive systems. One of the notions is based on assigning costs for failures on a user-provided notion of incorrect transitions in a specification. Here, we define a system to be robust if a finite number of incorrect inputs does not lead to an infinite number of incorrect outputs. We also give a more refined notion of robustness that aims to minimize the ratio of output failures to input failures. The second notion is aimed at liveness. In contrast to the previous notion, it has no concept of recovery from an error. Instead, it compares the ratio of the number of liveness constraints that the system violates to the number of liveness constraints that the environment violates.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Jobstmann, Barbara},
booktitle = {6th IEEE International Symposium on Industrial and Embedded Systems},
location = {Vasteras, Sweden},
pages = {176 -- 185},
publisher = {IEEE},
title = {{Specification-centered robustness}},
doi = {10.1109/SIES.2011.5953660},
year = {2011},
}
@article{3318,
abstract = {Parvalbumin is thought to act in a manner similar to EGTA, but how a slow Ca2+ buffer affects nanodomain-coupling regimes at GABAergic synapses is unclear. Direct measurements of parvalbumin concentration and paired recordings in rodent hippocampus and cerebellum revealed that parvalbumin affects synaptic dynamics only when expressed at high levels. Modeling suggests that, in high concentrations, parvalbumin may exert BAPTA-like effects, modulating nanodomain coupling via competition with local saturation of endogenous fixed buffers.},
author = {Eggermann, Emmanuel and Jonas, Peter M},
journal = {Nature Neuroscience},
pages = {20 -- 22},
publisher = {Nature Publishing Group},
title = {{How the “slow” Ca(2+) buffer parvalbumin affects transmitter release in nanodomain coupling regimes at GABAergic synapses}},
doi = {10.1038/nn.3002},
volume = {15},
year = {2011},
}
@inproceedings{3319,
abstract = {We address the problem of metric learning for multi-view data, namely the construction of embedding projections from data in different representations into a shared feature space, such that the Euclidean distance in this space provides a meaningful within-view as well as between-view similarity. Our motivation stems from the problem of cross-media retrieval tasks, where the availability of a joint Euclidean distance function is a pre-requisite to allow fast, in particular hashing-based, nearest neighbor queries. We formulate an objective function that expresses the intuitive concept that matching samples are mapped closely together in the output space, whereas non-matching samples are pushed apart, no matter in which view they are available. The resulting optimization problem is not convex, but it can be decomposed explicitly into a convex and a concave part, thereby allowing efficient optimization using the convex-concave procedure. Experiments on an image retrieval task show that nearest-neighbor based cross-view retrieval is indeed possible, and the proposed technique improves the retrieval accuracy over baseline techniques.},
author = {Quadrianto, Novi and Lampert, Christoph},
location = {Bellevue, USA},
pages = {425 -- 432},
publisher = {Omnipress},
title = {{Learning multi-view neighborhood preserving projections}},
year = {2011},
}
@article{3320,
abstract = {Powerful statistical models that can be learned efficiently from large amounts of data are currently revolutionizing computer vision. These models possess a rich internal structure reflecting task-specific relations and constraints. This monograph introduces the reader to the most popular classes of structured models in computer vision. Our focus is discrete undirected graphical models which we cover in detail together with a description of algorithms for both probabilistic inference and maximum a posteriori inference. We discuss separately recently successful techniques for prediction in general structured models. In the second part of this monograph we describe methods for parameter learning where we distinguish the classic maximum likelihood based methods from the more recent prediction-based parameter learning methods. We highlight developments to enhance current models and discuss kernelized models and latent variable models. To make the monograph more practical and to provide links to further study we provide examples of successful application of many methods in the computer vision literature.},
author = {Nowozin, Sebastian and Lampert, Christoph},
journal = {Foundations and Trends in Computer Graphics and Vision},
number = {3-4},
pages = {185 -- 365},
publisher = {now},
title = {{Structured learning and prediction in computer vision}},
doi = {10.1561/0600000033},
volume = {6},
year = {2011},
}
@misc{3322,
abstract = {We study multi-label prediction for structured output spaces, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multi-label classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label space, which is infeasible in case of structured outputs. Relying on techniques originally designed for single- label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular a formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
author = {Lampert, Christoph},
booktitle = {NIPS: Neural Information Processing Systems},
publisher = {Neural Information Processing Systems},
title = {{Maximum margin multi label structured prediction}},
year = {2011},
}
@inproceedings{3323,
abstract = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
author = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
location = {Wrocław, Poland},
pages = {476 -- 491},
publisher = {Springer},
title = {{An efficient decision procedure for imperative tree data structures}},
doi = {10.1007/978-3-642-22438-6_36},
volume = {6803},
year = {2011},
}
@inproceedings{3324,
abstract = {Automated termination provers often use the following schema to prove that a program terminates: construct a relational abstraction of the program's transition relation and then show that the relational abstraction is well-founded. The focus of current tools has been on developing sophisticated techniques for constructing the abstractions while relying on known decidable logics (such as linear arithmetic) to express them. We believe we can significantly increase the class of programs that are amenable to automated termination proofs by identifying more expressive decidable logics for reasoning about well-founded relations. We therefore present a new decision procedure for reasoning about multiset orderings, which are among the most powerful orderings used to prove termination. We show that, using our decision procedure, one can automatically prove termination of natural abstractions of programs.},
author = {Piskac, Ruzica and Wies, Thomas},
editor = {Jhala, Ranjit and Schmidt, David},
location = {Texas, USA},
pages = {371 -- 386},
publisher = {Springer},
title = {{Decision procedures for automating termination proofs}},
doi = {10.1007/978-3-642-18275-4_26},
volume = {6538},
year = {2011},
}
@inproceedings{3325,
abstract = {We introduce streaming data string transducers that map input data strings to output data strings in a single left-to-right pass in linear time. Data strings are (unbounded) sequences of data values, tagged with symbols from a finite set, over a potentially infinite data do- main that supports only the operations of equality and ordering. The transducer uses a finite set of states, a finite set of variables ranging over the data domain, and a finite set of variables ranging over data strings. At every step, it can make decisions based on the next in- put symbol, updating its state, remembering the input data value in its data variables, and updating data-string variables by concatenat- ing data-string variables and new symbols formed from data vari- ables, while avoiding duplication. We establish that the problems of checking functional equivalence of two streaming transducers, and of checking whether a streaming transducer satisfies pre/post verification conditions specified by streaming acceptors over in- put/output data-strings, are in PSPACE. We identify a class of imperative and a class of functional pro- grams, manipulating lists of data items, which can be effectively translated to streaming data-string transducers. The imperative pro- grams dynamically modify a singly-linked heap by changing next- pointers of heap-nodes and by adding new nodes. The main re- striction specifies how the next-pointers can be used for traversal. We also identify an expressively equivalent fragment of functional programs that traverse a list using syntactically restricted recursive calls. Our results lead to algorithms for assertion checking and for checking functional equivalence of two programs, written possibly in different programming styles, for commonly used routines such as insert, delete, and reverse.},
author = {Alur, Rajeev and Cerny, Pavol},
location = {Texas, USA},
number = {1},
pages = {599 -- 610},
publisher = {ACM},
title = {{Streaming transducers for algorithmic verification of single pass list processing programs}},
doi = {10.1145/1926385.1926454},
volume = {46},
year = {2011},
}
@inproceedings{3326,
abstract = {Weighted automata map input words to numerical values. Ap- plications of weighted automata include formal verification of quantitative properties, as well as text, speech, and image processing. A weighted au- tomaton is defined with respect to a semiring. For the tropical semiring, the weight of a run is the sum of the weights of the transitions taken along the run, and the value of a word is the minimal weight of an accepting run on it. In the 90’s, Krob studied the decidability of problems on rational series defined with respect to the tropical semiring. Rational series are strongly related to weighted automata, and Krob’s results apply to them. In par- ticular, it follows from Krob’s results that the universality problem (that is, deciding whether the values of all words are below some threshold) is decidable for weighted automata defined with respect to the tropical semir- ing with domain ∪ {∞}, and that the equality problem is undecidable when the domain is ∪ {∞}. In this paper we continue the study of the borders of decidability in weighted automata, describe alternative and direct proofs of the above results, and tighten them further. Unlike the proofs of Krob, which are algebraic in their nature, our proofs stay in the terrain of state machines, and the reduction is from the halting problem of a two-counter machine. This enables us to significantly simplify Krob’s reasoning, make the un- decidability result accessible to the automata-theoretic community, and strengthen it to apply already to a very simple class of automata: all the states are accepting, there are no initial nor final weights, and all the weights on the transitions are from the set {−1, 0, 1}. The fact we work directly with the automata enables us to tighten also the decidability re- sults and to show that the universality problem for weighted automata defined with respect to the tropical semiring with domain ∪ {∞}, and in fact even with domain ≥0 ∪ {∞}, is PSPACE-complete. Our results thus draw a sharper picture about the decidability of decision problems for weighted automata, in both the front of containment vs. universality and the front of the ∪ {∞} vs. the ∪ {∞} domains.},
author = {Almagor, Shaull and Boker, Udi and Kupferman, Orna},
location = {Taipei, Taiwan},
pages = {482 -- 491},
publisher = {Springer},
title = {{What’s decidable about weighted automata }},
doi = {10.1007/978-3-642-24372-1_37},
volume = {6996},
year = {2011},
}
@inproceedings{3328,
abstract = {We report on a generic uni- and bivariate algebraic kernel that is publicly available with CGAL 3.7. It comprises complete, correct, though efficient state-of-the-art implementations on polynomials, roots of polynomial systems, and the support to analyze algebraic curves defined by bivariate polynomials. The kernel design is generic, that is, various number types and substeps can be exchanged. It is accompanied with a ready-to-use interface to enable arrangements induced by algebraic curves, that have already been used as basis for various geometric applications, as arrangements on Dupin cyclides or the triangulation of algebraic surfaces. We present two novel applications: arrangements of rotated algebraic curves and Boolean set operations on polygons bounded by segments of algebraic curves. We also provide experiments showing that our general implementation is competitive and even often clearly outperforms existing implementations that are explicitly tailored for specific types of non-linear curves that are available in CGAL.},
author = {Berberich, Eric and Hemmer, Michael and Kerber, Michael},
location = {Paris, France},
pages = {179 -- 186},
publisher = {ACM},
title = {{A generic algebraic kernel for non linear geometric applications}},
doi = {10.1145/1998196.1998224},
year = {2011},
}
@inproceedings{3329,
abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance µ in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution shape P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(n log n)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. An alternative algorithm, based purely on rational arithmetic, answers the same deconstruction problem, up to an uncertainty parameter, and its running time depends on the parameter δ (in addition to the other input parameters: n, δ and the radius of the disk). If the input shape is found to be approximable, the rational-arithmetic algorithm also computes an approximate solution shape for the problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one. Our study is motivated by applications from two different domains. However, since the offset operation has numerous uses, we anticipate that the reverse question that we study here will be still more broadly applicable. We present results obtained with our implementation of the rational-arithmetic algorithm.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
booktitle = {Proceedings of the twenty-seventh annual symposium on Computational geometry},
location = {Paris, France},
pages = {187 -- 196},
publisher = {ACM},
title = {{Deconstructing approximate offsets}},
doi = {10.1145/1998196.1998225},
year = {2011},
}
@inproceedings{3330,
abstract = {We consider the problem of approximating all real roots of a square-free polynomial f. Given isolating intervals, our algorithm refines each of them to a width at most 2-L, that is, each of the roots is approximated to L bits after the binary point. Our method provides a certified answer for arbitrary real polynomials, only requiring finite approximations of the polynomial coefficient and choosing a suitable working precision adaptively. In this way, we get a correct algorithm that is simple to implement and practically efficient. Our algorithm uses the quadratic interval refinement method; we adapt that method to be able to cope with inaccuracies when evaluating f, without sacrificing its quadratic convergence behavior. We prove a bound on the bit complexity of our algorithm in terms of degree, coefficient size and discriminant. Our bound improves previous work on integer polynomials by a factor of deg f and essentially matches best known theoretical bounds on root approximation which are obtained by very sophisticated algorithms.},
author = {Kerber, Michael and Sagraloff, Michael},
location = {California, USA},
pages = {209 -- 216},
publisher = {Springer},
title = {{Root refinement for real polynomials}},
doi = {10.1145/1993886.1993920},
year = {2011},
}
@article{3332,
abstract = {Given an algebraic hypersurface O in ℝd, how many simplices are necessary for a simplicial complex isotopic to O? We address this problem and the variant where all vertices of the complex must lie on O. We give asymptotically tight worst-case bounds for algebraic plane curves. Our results gradually improve known bounds in higher dimensions; however, the question for tight bounds remains unsolved for d ≥ 3.},
author = {Kerber, Michael and Sagraloff, Michael},
journal = {Graphs and Combinatorics},
number = {3},
pages = {419 -- 430},
publisher = {Springer},
title = {{A note on the complexity of real algebraic hypersurfaces}},
doi = {10.1007/s00373-011-1020-7},
volume = {27},
year = {2011},
}
@article{3334,
author = {Edelsbrunner, Herbert and Pach, János and Ziegler, Günter},
journal = {Discrete & Computational Geometry},
number = {1},
pages = {1 -- 2},
publisher = {Springer},
title = {{Letter from the new editors-in-chief}},
doi = {10.1007/s00454-010-9313-9},
volume = {45},
year = {2011},
}
@inbook{3335,
abstract = {We study the topology of the Megaparsec Cosmic Web in terms of the scale-dependent Betti numbers, which formalize the topological information content of the cosmic mass distribution. While the Betti numbers do not fully quantify topology, they extend the information beyond conventional cosmological studies of topology in terms of genus and Euler characteristic. The richer information content of Betti numbers goes along the availability of fast algorithms to compute them. For continuous density fields, we determine the scale-dependence of Betti numbers by invoking the cosmologically familiar filtration of sublevel or superlevel sets defined by density thresholds. For the discrete galaxy distribution, however, the analysis is based on the alpha shapes of the particles. These simplicial complexes constitute an ordered sequence of nested subsets of the Delaunay tessellation, a filtration defined by the scale parameter, α. As they are homotopy equivalent to the sublevel sets of the distance field, they are an excellent tool for assessing the topological structure of a discrete point distribution. In order to develop an intuitive understanding for the behavior of Betti numbers as a function of α, and their relation to the morphological patterns in the Cosmic Web, we first study them within the context of simple heuristic Voronoi clustering models. These can be tuned to consist of specific morphological elements of the Cosmic Web, i.e. clusters, filaments, or sheets. To elucidate the relative prominence of the various Betti numbers in different stages of morphological evolution, we introduce the concept of alpha tracks. Subsequently, we address the topology of structures emerging in the standard LCDM scenario and in cosmological scenarios with alternative dark energy content. The evolution of the Betti numbers is shown to reflect the hierarchical evolution of the Cosmic Web. We also demonstrate that the scale-dependence of the Betti numbers yields a promising measure of cosmological parameters, with a potential to help in determining the nature of dark energy and to probe primordial non-Gaussianities. We also discuss the expected Betti numbers as a function of the density threshold for superlevel sets of a Gaussian random field. Finally, we introduce the concept of persistent homology. It measures scale levels of the mass distribution and allows us to separate small from large scale features. Within the context of the hierarchical cosmic structure formation, persistence provides a natural formalism for a multiscale topology study of the Cosmic Web.},
author = {Van De Weygaert, Rien and Vegter, Gert and Edelsbrunner, Herbert and Jones, Bernard and Pranav, Pratyush and Park, Changbom and Hellwing, Wojciech and Eldering, Bob and Kruithof, Nico and Bos, Patrick and Hidding, Johan and Feldbrugge, Job and Ten Have, Eline and Van Engelen, Matti and Caroli, Manuel and Teillaud, Monique},
booktitle = {Transactions on Computational Science XIV},
editor = {Gavrilova, Marina and Tan, Kenneth and Mostafavi, Mir},
pages = {60 -- 101},
publisher = {Springer},
title = {{Alpha, Betti and the Megaparsec Universe: On the topology of the Cosmic Web}},
doi = {10.1007/978-3-642-25249-5_3},
volume = {6970},
year = {2011},
}
@inproceedings{3336,
abstract = {We introduce TopoCut: a new way to integrate knowledge about topological properties (TPs) into random field image segmentation model. Instead of including TPs as additional constraints during minimization of the energy function, we devise an efficient algorithm for modifying the unary potentials such that the resulting segmentation is guaranteed with the desired properties. Our method is more flexible in the sense that it handles more topology constraints than previous methods, which were only able to enforce pairwise or global connectivity. In particular, our method is very fast, making it for the first time possible to enforce global topological properties in practical image segmentation tasks.},
author = {Chen, Chao and Freedman, Daniel and Lampert, Christoph},
booktitle = {CVPR: Computer Vision and Pattern Recognition},
location = {Colorado Springs, CO, USA},
pages = {2089 -- 2096},
publisher = {IEEE},
title = {{Enforcing topological constraints in random field image segmentation}},
doi = {10.1109/CVPR.2011.5995503},
year = {2011},
}
@inproceedings{3337,
abstract = {Playing table tennis is a difficult task for robots, especially due to their limitations of acceleration. A key bottleneck is the amount of time needed to reach the desired hitting position and velocity of the racket for returning the incoming ball. Here, it often does not suffice to simply extrapolate the ball's trajectory after the opponent returns it but more information is needed. Humans are able to predict the ball's trajectory based on the opponent's moves and, thus, have a considerable advantage. Hence, we propose to incorporate an anticipation system into robot table tennis players, which enables the robot to react earlier while the opponent is performing the striking movement. Based on visual observation of the opponent's racket movement, the robot can predict the aim of the opponent and adjust its movement generation accordingly. The policies for deciding how and when to react are obtained by reinforcement learning. We conduct experiments with an existing robot player to show that the learned reaction policy can significantly improve the performance of the overall system.},
author = {Wang, Zhikun and Lampert, Christoph and Mülling, Katharina and Schölkopf, Bernhard and Peters, Jan},
location = {San Francisco, USA},
pages = {332 -- 337},
publisher = {IEEE},
title = {{Learning anticipation policies for robot table tennis}},
doi = {10.1109/IROS.2011.6094892},
year = {2011},
}
@unpublished{3338,
abstract = {We consider 2-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves inde- pendently and simultaneously; the current state and the two moves determine the successor state. We study concurrent games with ω-regular winning conditions specified as parity objectives. We consider the qualitative analysis problems: the computation of the almost-sure and limit-sure winning set of states, where player 1 can ensure to win with probability 1 and with probability arbitrarily close to 1, respec- tively. In general the almost-sure and limit-sure winning strategies require both infinite-memory as well as infinite-precision (to describe probabilities). We study the bounded-rationality problem for qualitative analysis of concurrent parity games, where the strategy set for player 1 is restricted to bounded-resource strategies. In terms of precision, strategies can be deterministic, uniform, finite-precision or infinite- precision; and in terms of memory, strategies can be memoryless, finite-memory or infinite-memory. We present a precise and complete characterization of the qualitative winning sets for all combinations of classes of strategies. In particular, we show that uniform memoryless strategies are as powerful as finite-precision infinite-memory strategies, and infinite-precision memoryless strategies are as power- ful as infinite-precision finite-memory strategies. We show that the winning sets can be computed in O(n2d+3) time, where n is the size of the game structure and 2d is the number of priorities (or colors), and our algorithms are symbolic. The membership problem of whether a state belongs to a winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms, that are obtained by characterization of the winning sets as μ-calculus formulas, are considerably more involved than those for turn-based games.},
author = {Chatterjee, Krishnendu},
booktitle = {arXiv},
pages = {1 -- 51},
publisher = {ArXiv},
title = {{Bounded rationality in concurrent parity games}},
year = {2011},
}
@unpublished{3339,
abstract = {Turn-based stochastic games and its important subclass Markov decision processes (MDPs) provide models for systems with both probabilistic and nondeterministic behaviors. We consider turn-based stochastic games with two classical quantitative objectives: discounted-sum and long-run average objectives. The game models and the quantitative objectives are widely used in probabilistic verification, planning, optimal inventory control, network protocol and performance analysis. Games and MDPs that model realistic systems often have very large state spaces, and probabilistic abstraction techniques are necessary to handle the state-space explosion. The commonly used full-abstraction techniques do not yield space-savings for systems that have many states with similar value, but does not necessarily have similar transition structure. A semi-abstraction technique, namely Magnifying-lens abstractions (MLA), that clusters states based on value only, disregarding differences in their transition relation was proposed for qualitative objectives (reachability and safety objectives). In this paper we extend the MLA technique to solve stochastic games with discounted-sum and long-run average objectives. We present the MLA technique based abstraction-refinement algorithm for stochastic games and MDPs with discounted-sum objectives. For long-run average objectives, our solution works for all MDPs and a sub-class of stochastic games where every state has the same value. },
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Pritam, Roy},
booktitle = {arXiv},
pages = {17},
publisher = {ArXiv},
title = {{Magnifying lens abstraction for stochastic games with discounted and long-run average objectives}},
year = {2011},
}
@inproceedings{3342,
abstract = {We consider Markov decision processes (MDPs) with ω-regular specifications given as parity objectives. We consider the problem of computing the set of almost-sure winning states from where the objective can be ensured with probability 1. The algorithms for the computation of the almost-sure winning set for parity objectives iteratively use the solutions for the almost-sure winning set for Büchi objectives (a special case of parity objectives). Our contributions are as follows: First, we present the first subquadratic symbolic algorithm to compute the almost-sure winning set for MDPs with Büchi objectives; our algorithm takes O(nm) symbolic steps as compared to the previous known algorithm that takes O(n 2) symbolic steps, where n is the number of states and m is the number of edges of the MDP. In practice MDPs often have constant out-degree, and then our symbolic algorithm takes O(nn) symbolic steps, as compared to the previous known O(n 2) symbolic steps algorithm. Second, we present a new algorithm, namely win-lose algorithm, with the following two properties: (a) the algorithm iteratively computes subsets of the almost-sure winning set and its complement, as compared to all previous algorithms that discover the almost-sure winning set upon termination; and (b) requires O(nK) symbolic steps, where K is the maximal number of edges of strongly connected components (scc’s) of the MDP. The win-lose algorithm requires symbolic computation of scc’s. Third, we improve the algorithm for symbolic scc computation; the previous known algorithm takes linear symbolic steps, and our new algorithm improves the constants associated with the linear number of steps. In the worst case the previous known algorithm takes 5·n symbolic steps, whereas our new algorithm takes 4 ·n symbolic steps.},
author = {Chatterjee, Krishnendu and Henzinger, Monika and Joglekar, Manas and Nisarg, Shah},
editor = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
location = {Snowbird, USA},
pages = {260 -- 276},
publisher = {Springer},
title = {{Symbolic algorithms for qualitative analysis of Markov decision processes with Büchi objectives}},
doi = {10.1007/978-3-642-22110-1_21},
volume = {6806},
year = {2011},
}
@inproceedings{3343,
abstract = {We present faster and dynamic algorithms for the following problems arising in probabilistic verification: Computation of the maximal end-component (mec) decomposition of Markov decision processes (MDPs), and of the almost sure winning set for reachability and parity objectives in MDPs. We achieve the following running time for static algorithms in MDPs with graphs of n vertices and m edges: (1) O(m · min{ √m, n2/3 }) for the mec decomposition, improving the longstanding O(m·n) bound; (2) O(m·n2/3) for reachability objectives, improving the previous O(m · √m) bound for m > n4/3; and (3) O(m · min{ √m, n2/3 } · log(d)) for parity objectives with d priorities, improving the previous O(m · √m · d) bound. We also give incremental and decremental algorithms in linear time for mec decomposition and reachability objectives and O(m · log d) time for parity ob jectives.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
location = {San Francisco, USA},
pages = {1318 -- 1336},
publisher = {SIAM},
title = {{Faster and dynamic algorithms for maximal end component decomposition and related graph problems in probabilistic verification}},
doi = {10.1137/1.9781611973082.101},
year = {2011},
}
@inproceedings{3344,
abstract = {Games played on graphs provide the mathematical framework to analyze several important problems in computer science as well as mathematics, such as the synthesis problem of Church, model checking of open reactive systems and many others. On the basis of mode of interaction of the players these games can be classified as follows: (a) turn-based (players make moves in turns); and (b) concurrent (players make moves simultaneously). On the basis of the information available to the players these games can be classified as follows: (a) perfect-information (players have perfect view of the game); and (b) partial-information (players have partial view of the game). In this talk we will consider all these classes of games with reachability objectives, where the goal of one player is to reach a set of target vertices of the graph, and the goal of the opponent player is to prevent the player from reaching the target. We will survey the results for various classes of games, and the results range from linear time decision algorithms to EXPTIME-complete problems to undecidable problems.},
author = {Chatterjee, Krishnendu},
editor = {Delzanno, Giorgo and Potapov, Igor},
location = {Genoa, Italy},
pages = {1 -- 1},
publisher = {Springer},
title = {{Graph games with reachability objectives}},
doi = {10.1007/978-3-642-24288-5_1},
volume = {6945},
year = {2011},
}
@inproceedings{3345,
abstract = {We consider Markov Decision Processes (MDPs) with mean-payoff parity and energy parity objectives. In system design, the parity objective is used to encode ω-regular specifications, and the mean-payoff and energy objectives can be used to model quantitative resource constraints. The energy condition re- quires that the resource level never drops below 0, and the mean-payoff condi- tion requires that the limit-average value of the resource consumption is within a threshold. While these two (energy and mean-payoff) classical conditions are equivalent for two-player games, we show that they differ for MDPs. We show that the problem of deciding whether a state is almost-sure winning (i.e., winning with probability 1) in energy parity MDPs is in NP ∩ coNP, while for mean- payoff parity MDPs, the problem is solvable in polynomial time, improving a recent PSPACE bound.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
location = {Warsaw, Poland},
pages = {206 -- 218},
publisher = {Springer},
title = {{Energy and mean-payoff parity Markov Decision Processes}},
doi = {10.1007/978-3-642-22993-0_21},
volume = {6907},
year = {2011},
}
@inproceedings{3346,
abstract = {We study Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) functions. We consider two different objectives, namely, expectation and satisfaction objectives. Given an MDP with k reward functions, in the expectation objective the goal is to maximize the expected limit-average value, and in the satisfaction objective the goal is to maximize the probability of runs such that the limit-average value stays above a given vector. We show that under the expectation objective, in contrast to the single-objective case, both randomization and memory are necessary for strategies, and that finite-memory randomized strategies are sufficient. Under the satisfaction objective, in contrast to the single-objective case, infinite memory is necessary for strategies, and that randomized memoryless strategies are sufficient for epsilon-approximation, for all epsilon>;0. We further prove that the decision problems for both expectation and satisfaction objectives can be solved in polynomial time and the trade-off curve (Pareto curve) can be epsilon-approximated in time polynomial in the size of the MDP and 1/epsilon, and exponential in the number of reward functions, for all epsilon>;0. Our results also reveal flaws in previous work for MDPs with multiple mean-payoff functions under the expectation objective, correct the flaws and obtain improved results.},
author = {Brázdil, Tomáš and Brožek, Václav and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Two views on multiple mean payoff objectives in Markov Decision Processes}},
doi = {10.1109/LICS.2011.10},
year = {2011},
}
@inproceedings{3347,
abstract = {The class of omega-regular languages provides a robust specification language in verification. Every omega-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens "eventually". Finitary liveness was proposed by Alur and Henzinger as a stronger formulation of liveness. It requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider automata with finitary acceptance conditions defined by finitary Buchi, parity and Streett languages. We study languages expressible by such automata: we give their topological complexity and present a regular-expression characterization. We compare the expressive power of finitary automata and give optimal algorithms for classical decisions questions. We show that the finitary languages are Sigma 2-complete; we present a complete picture of the expressive power of various classes of automata with finitary and infinitary acceptance conditions; we show that the languages defined by finitary parity automata exactly characterize the star-free fragment of omega B-regular languages; and we show that emptiness is NLOGSPACE-complete and universality as well as language inclusion are PSPACE-complete for finitary parity and Streett automata.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Tarragona, Spain},
pages = {216 -- 226},
publisher = {Springer},
title = {{Finitary languages}},
doi = {10.1007/978-3-642-21254-3_16},
volume = {6638},
year = {2011},
}
@inproceedings{3348,
abstract = {We study synthesis of controllers for real-time systems, where the objective is to stay in a given safe set. The problem is solved by obtaining winning strategies in the setting of concurrent two-player timed automaton games with safety objectives. To prevent a player from winning by blocking time, we restrict each player to strategies that ensure that the player cannot be responsible for causing a zeno run. We construct winning strategies for the controller which require access only to (1) the system clocks (thus, controllers which require their own internal infinitely precise clocks are not necessary), and (2) a linear (in the number of clocks) number of memory bits. Precisely, we show that for safety objectives, a memory of size (3 · |C|+lg(|C|+1)) bits suffices for winning controller strategies, where C is the set of clocks of the timed automaton game, significantly improving the previous known exponential bound. We also settle the open question of whether winning region controller strategies require memory for safety objectives by showing with an example the necessity of memory for region strategies to win for safety objectives.},
author = {Chatterjee, Krishnendu and Prabhu, Vinayak},
location = {Chicago, USA},
pages = {221 -- 230},
publisher = {Springer},
title = {{Synthesis of memory efficient real time controllers for safety objectives}},
doi = {10.1145/1967701.1967734},
year = {2011},
}
@inproceedings{3349,
abstract = {Games on graphs provide a natural model for reactive non-terminating systems. In such games, the interaction of two players on an arena results in an infinite path that describes a run of the system. Different settings are used to model various open systems in computer science, as for instance turn-based or concurrent moves, and deterministic or stochastic transitions. In this paper, we are interested in turn-based games, and specifically in deterministic parity games and stochastic reachability games (also known as simple stochastic games). We present a simple, direct and efficient reduction from deterministic parity games to simple stochastic games: it yields an arena whose size is linear up to a logarithmic factor in size of the original arena.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Minori, Italy},
pages = {74 -- 86},
publisher = {EPTCS},
title = {{A reduction from parity games to simple stochastic games}},
doi = {10.4204/EPTCS.54.6},
volume = {54},
year = {2011},
}
@inproceedings{3350,
abstract = {A controller for a discrete game with ω-regular objectives requires attention if, intuitively, it requires measuring the state and switching from the current control action. Minimum attention controllers are preferable in modern shared implementations of cyber-physical systems because they produce the least burden on system resources such as processor time or communication bandwidth. We give algorithms to compute minimum attention controllers for ω-regular objectives in imperfect information discrete two-player games. We show a polynomial-time reduction from minimum attention controller synthesis to synthesis of controllers for mean-payoff parity objectives in games of incomplete information. This gives an optimal EXPTIME-complete synthesis algorithm. We show that the minimum attention controller problem is decidable for infinite state systems with finite bisimulation quotients. In particular, the problem is decidable for timed and rectangular automata.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
editor = {Fahrenberg, Uli and Tripakis, Stavros},
location = {Aalborg, Denmark},
pages = {145 -- 159},
publisher = {Springer},
title = {{Minimum attention controller synthesis for omega regular objectives}},
doi = {10.1007/978-3-642-24310-3_11},
volume = {6919},
year = {2011},
}
@inproceedings{3351,
abstract = {In two-player games on graph, the players construct an infinite path through the game graph and get a reward computed by a payoff function over infinite paths. Over weighted graphs, the typical and most studied payoff functions compute the limit-average or the discounted sum of the rewards along the path. Besides their simple definition, these two payoff functions enjoy the property that memoryless optimal strategies always exist. In an attempt to construct other simple payoff functions, we define a class of payoff functions which compute an (infinite) weighted average of the rewards. This new class contains both the limit-average and the discounted sum functions, and we show that they are the only members of this class which induce memoryless optimal strategies, showing that there is essentially no other simple payoff functions.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Singh, Rohit},
editor = {Owe, Olaf and Steffen, Martin and Telle, Jan Arne},
location = {Oslo, Norway},
pages = {148 -- 159},
publisher = {Springer},
title = {{On memoryless quantitative objectives}},
doi = {10.1007/978-3-642-22953-4_13},
volume = {6914},
year = {2011},
}
@article{3352,
abstract = {Exploring the connection of biology with reactive systems to better understand living systems.},
author = {Fisher, Jasmin and Harel, David and Henzinger, Thomas A},
journal = {Communications of the ACM},
number = {10},
pages = {72 -- 82},
publisher = {ACM},
title = {{Biology as reactivity}},
doi = {10.1145/2001269.2001289},
volume = {54},
year = {2011},
}
@article{3353,
abstract = {Compositional theories are crucial when designing large and complex systems from smaller components. In this work we propose such a theory for synchronous concurrent systems. Our approach follows so-called interface theories, which use game-theoretic interpretations of composition and refinement. These are appropriate for systems with distinct inputs and outputs, and explicit conditions on inputs that must be enforced during composition. Our interfaces model systems that execute in an infinite sequence of synchronous rounds. At each round, a contract must be satisfied. The contract is simply a relation specifying the set of valid input/output pairs. Interfaces can be composed by parallel, serial or feedback composition. A refinement relation between interfaces is defined, and shown to have two main properties: (1) it is preserved by composition, and (2) it is equivalent to substitutability, namely, the ability to replace an interface by another one in any context. Shared refinement and abstraction operators, corresponding to greatest lower and least upper bounds with respect to refinement, are also defined. Input-complete interfaces, that impose no restrictions on inputs, and deterministic interfaces, that produce a unique output for any legal input, are discussed as special cases, and an interesting duality between the two classes is exposed. A number of illustrative examples are provided, as well as algorithms to compute compositions, check refinement, and so on, for finite-state interfaces.},
author = {Tripakis, Stavros and Lickly, Ben and Henzinger, Thomas A and Lee, Edward},
journal = {ACM Transactions on Programming Languages and Systems (TOPLAS)},
number = {4},
publisher = {ACM},
title = {{A theory of synchronous relational interfaces}},
doi = {10.1145/1985342.1985345},
volume = {33},
year = {2011},
}
@article{3354,
abstract = {We consider two-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine the successor state. We consider ω-regular winning conditions specified as parity objectives. Both players are allowed to use randomization when choosing their moves. We study the computation of the limit-winning set of states, consisting of the states where the sup-inf value of the game for player 1 is 1: in other words, a state is limit-winning if player 1 can ensure a probability of winning arbitrarily close to 1. We show that the limit-winning set can be computed in O(n2d+2) time, where n is the size of the game structure and 2d is the number of priorities (or colors). The membership problem of whether a state belongs to the limit-winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms are considerably more involved than those for turn-based games. This is because concurrent games do not satisfy two of the most fundamental properties of turn-based parity games. First, in concurrent games limit-winning strategies require randomization; and second, they require infinite memory.},
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Henzinger, Thomas A},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Qualitative concurrent parity games}},
doi = {10.1145/1970398.1970404},
volume = {12},
year = {2011},
}
@inproceedings{3355,
abstract = {Byzantine Fault Tolerant (BFT) protocols aim to improve the reliability of distributed systems. They enable systems to tolerate arbitrary failures in a bounded number of nodes. BFT protocols are usually proven correct for certain safety and liveness properties. However, recent studies have shown that the performance of state-of-the-art BFT protocols decreases drastically in the presence of even a single malicious node. This motivates a formal quantitative analysis of BFT protocols to investigate their performance characteristics under different scenarios. We present HyPerf, a new hybrid methodology based on model checking and simulation techniques for evaluating the performance of BFT protocols. We build a transition system corresponding to a BFT protocol and systematically explore the set of behaviors allowed by the protocol. We associate certain timing information with different operations in the protocol, like cryptographic operations and message transmission. After an elaborate state exploration, we use the time information to evaluate the performance characteristics of the protocol using simulation techniques. We integrate our framework in Mace, a tool for building and verifying distributed systems. We evaluate the performance of PBFT using our framework. We describe two different use-cases of our methodology. For the benign operation of the protocol, we use the time information as random variables to compute the probability distribution of the execution times. In the presence of faults, we estimate the worst-case performance of the protocol for various attacks that can be employed by malicious nodes. Our results show the importance of hybrid techniques in systematically analyzing the performance of large-scale systems.},
author = {Halalai, Raluca and Henzinger, Thomas A and Singh, Vasu},
location = {Aachen, Germany},
pages = {255 -- 264},
publisher = {IEEE},
title = {{Quantitative evaluation of BFT protocols}},
doi = {10.1109/QEST.2011.40},
year = {2011},
}
@inproceedings{3356,
abstract = {There is recently a significant effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions, aiming for a general and flexible framework for quantitative-oriented specifications. In the heart of quantitative objectives lies the accumulation of values along a computation. It is either the accumulated summation, as with the energy objectives, or the accumulated average, as with the mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point of time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire computation. We study the border of decidability for extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities by prefix-accumulation assertions and extending LTL with path-accumulation assertions, result in temporal logics whose model-checking problem is decidable. The extended logics allow to significantly extend the currently known energy and mean-payoff objectives. Moreover, the prefix-accumulation assertions may be refined with "controlled-accumulation", allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that the fragment we point to is, in a sense, the maximal logic whose extension with prefix-accumulation assertions permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, and in particular CTL and LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Temporal specifications with accumulative values}},
doi = {10.1109/LICS.2011.33},
year = {2011},
}
@inproceedings{3357,
abstract = {We consider two-player graph games whose objectives are request-response condition, i.e conjunctions of conditions of the form "if a state with property Rq is visited, then later a state with property Rp is visited". The winner of such games can be decided in EXPTIME and the problem is known to be NP-hard. In this paper, we close this gap by showing that this problem is, in fact, EXPTIME-complete. We show that the problem becomes PSPACE-complete if we only consider games played on DAGs, and NP-complete or PTIME-complete if there is only one player (depending on whether he wants to enforce or spoil the request-response condition). We also present near-optimal bounds on the memory needed to design winning strategies for each player, in each case.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Horn, Florian},
editor = {Dediu, Adrian-Horia and Inenaga, Shunsuke and Martín-Vide, Carlos},
location = {Tarragona, Spain},
pages = {227 -- 237},
publisher = {Springer},
title = {{The complexity of request-response games}},
doi = {10.1007/978-3-642-21254-3_17},
volume = {6638},
year = {2011},
}
@inproceedings{3358,
abstract = {The static scheduling problem often arises as a fundamental problem in real-time systems and grid computing. We consider the problem of statically scheduling a large job expressed as a task graph on a large number of computing nodes, such as a data center. This paper solves the large-scale static scheduling problem using abstraction refinement, a technique commonly used in formal verification to efficiently solve computationally hard problems. A scheduler based on abstraction refinement first attempts to solve the scheduling problem with abstract representations of the job and the computing resources. As abstract representations are generally small, the scheduling can be done reasonably fast. If the obtained schedule does not meet specified quality conditions (like data center utilization or schedule makespan) then the scheduler refines the job and data center abstractions and, again solves the scheduling problem. We develop different schedulers based on abstraction refinement. We implemented these schedulers and used them to schedule task graphs from various computing domains on simulated data centers with realistic topologies. We compared the speed of scheduling and the quality of the produced schedules with our abstraction refinement schedulers against a baseline scheduler that does not use any abstraction. We conclude that abstraction refinement techniques give a significant speed-up compared to traditional static scheduling heuristics, at a reasonable cost in the quality of the produced schedules. We further used our static schedulers in an actual system that we deployed on Amazon EC2 and compared it against the Hadoop dynamic scheduler for large MapReduce jobs. Our experiments indicate that there is great potential for static scheduling techniques.},
author = {Henzinger, Thomas A and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
location = {Salzburg, Austria},
pages = {329 -- 342},
publisher = {ACM},
title = {{Scheduling large jobs by abstraction refinement}},
doi = {10.1145/1966445.1966476},
year = {2011},
}
@inproceedings{3359,
abstract = {Motivated by improvements in constraint-solving technology and by the increase of routinely available computational power, partial-program synthesis is emerging as an effective approach for increasing programmer productivity. The goal of the approach is to allow the programmer to specify a part of her intent imperatively (that is, give a partial program) and a part of her intent declaratively, by specifying which conditions need to be achieved or maintained. The task of the synthesizer is to construct a program that satisfies the specification. As an example, consider a partial program where threads access shared data without using any synchronization mechanism, and a declarative specification that excludes data races and deadlocks. The task of the synthesizer is then to place locks into the program code in order for the program to meet the specification.
In this paper, we argue that quantitative objectives are needed in partial-program synthesis in order to produce higher-quality programs, while enabling simpler specifications. Returning to the example, the synthesizer could construct a naive solution that uses one global lock for shared data. This can be prevented either by constraining the solution space further (which is error-prone and partly defeats the point of synthesis), or by optimizing a quantitative objective that models performance. Other quantitative notions useful in synthesis include fault tolerance, robustness, resource (memory, power) consumption, and information flow.},
author = {Cerny, Pavol and Henzinger, Thomas A},
location = {Taipei; Taiwan},
pages = {149 -- 154},
publisher = {ACM},
title = {{From boolean to quantitative synthesis}},
doi = {10.1145/2038642.2038666},
year = {2011},
}
@inproceedings{3360,
abstract = {A discounted-sum automaton (NDA) is a nondeterministic finite automaton with edge weights, which values a run by the discounted sum of visited edge weights. More precisely, the weight in the i-th position of the run is divided by lambda^i, where the discount factor lambda is a fixed rational number greater than 1. Discounted summation is a common and useful measuring scheme, especially for infinite sequences, which reflects the assumption that earlier weights are more important than later weights. Determinizing automata is often essential, for example, in formal verification, where there are polynomial algorithms for comparing two deterministic NDAs, while the equivalence problem for NDAs is not known to be decidable. Unfortunately, however, discounted-sum automata are, in general, not determinizable: it is currently known that for every rational discount factor 1 < lambda < 2, there is an NDA with lambda (denoted lambda-NDA) that cannot be determinized. We provide positive news, showing that every NDA with an integral factor is determinizable. We also complete the picture by proving that the integers characterize exactly the discount factors that guarantee determinizability: we show that for every non-integral rational factor lambda, there is a nondeterminizable lambda-NDA. Finally, we prove that the class of NDAs with integral discount factors enjoys closure under the algebraic operations min, max, addition, and subtraction, which is not the case for general NDAs nor for deterministic NDAs. This shows that for integral discount factors, the class of NDAs forms an attractive specification formalism in quantitative formal verification. All our results hold equally for automata over finite words and for automata over infinite words. },
author = {Boker, Udi and Henzinger, Thomas A},
location = {Bergen, Norway},
pages = {82 -- 96},
publisher = {Springer},
title = {{Determinizing discounted-sum automata}},
doi = {10.4230/LIPIcs.CSL.2011.82},
volume = {12},
year = {2011},
}
@inproceedings{3361,
abstract = {In this paper, we investigate the computational complexity of quantitative information flow (QIF) problems. Information-theoretic quantitative relaxations of noninterference (based on Shannon entropy)have been introduced to enable more fine-grained reasoning about programs in situations where limited information flow is acceptable. The QIF bounding problem asks whether the information flow in a given program is bounded by a constant $d$. Our first result is that the QIF bounding problem is PSPACE-complete. The QIF memoryless synthesis problem asks whether it is possible to resolve nondeterministic choices in a given partial program in such a way that in the resulting deterministic program, the quantitative information flow is bounded by a given constant $d$. Our second result is that the QIF memoryless synthesis problem is also EXPTIME-complete. The QIF memoryless synthesis problem generalizes to QIF general synthesis problem which does not impose the memoryless requirement (that is, by allowing the synthesized program to have more variables then the original partial program). Our third result is that the QIF general synthesis problem is EXPTIME-hard.},
author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A},
location = {Cernay-la-Ville, France},
pages = {205 -- 217},
publisher = {IEEE},
title = {{The complexity of quantitative information flow problems}},
doi = {10.1109/CSF.2011.21},
year = {2011},
}
@inproceedings{3362,
abstract = {State-transition systems communicating by shared variables have been the underlying model of choice for applications of model checking. Such formalisms, however, have difficulty with modeling process creation or death and communication reconfigurability. Here, we introduce “dynamic reactive modules” (DRM), a state-transition modeling formalism that supports dynamic reconfiguration and creation/death of processes. The resulting formalism supports two types of variables, data variables and reference variables. Reference variables enable changing the connectivity between processes and referring to instances of processes. We show how this new formalism supports parallel composition and refinement through trace containment. DRM provide a natural language for modeling (and ultimately reasoning about) biological systems and multiple threads communicating through shared variables.},
author = {Fisher, Jasmin and Henzinger, Thomas A and Nickovic, Dejan and Piterman, Nir and Singh, Anmol and Vardi, Moshe},
location = {Aachen, Germany},
pages = {404 -- 418},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Dynamic reactive modules}},
doi = {10.1007/978-3-642-23217-6_27},
volume = {6901},
year = {2011},
}