@inproceedings{3360,
abstract = {A discounted-sum automaton (NDA) is a nondeterministic finite automaton with edge weights, which values a run by the discounted sum of visited edge weights. More precisely, the weight in the i-th position of the run is divided by lambda^i, where the discount factor lambda is a fixed rational number greater than 1. Discounted summation is a common and useful measuring scheme, especially for infinite sequences, which reflects the assumption that earlier weights are more important than later weights. Determinizing automata is often essential, for example, in formal verification, where there are polynomial algorithms for comparing two deterministic NDAs, while the equivalence problem for NDAs is not known to be decidable. Unfortunately, however, discounted-sum automata are, in general, not determinizable: it is currently known that for every rational discount factor 1 < lambda < 2, there is an NDA with lambda (denoted lambda-NDA) that cannot be determinized. We provide positive news, showing that every NDA with an integral factor is determinizable. We also complete the picture by proving that the integers characterize exactly the discount factors that guarantee determinizability: we show that for every non-integral rational factor lambda, there is a nondeterminizable lambda-NDA. Finally, we prove that the class of NDAs with integral discount factors enjoys closure under the algebraic operations min, max, addition, and subtraction, which is not the case for general NDAs nor for deterministic NDAs. This shows that for integral discount factors, the class of NDAs forms an attractive specification formalism in quantitative formal verification. All our results hold equally for automata over finite words and for automata over infinite words. },
author = {Boker, Udi and Henzinger, Thomas A},
location = {Bergen, Norway},
pages = {82 -- 96},
publisher = {Springer},
title = {{Determinizing discounted-sum automata}},
doi = {10.4230/LIPIcs.CSL.2011.82},
volume = {12},
year = {2011},
}
@misc{5385,
abstract = {There is recently a significant effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions, aiming for a general and flexible framework for quantitative-oriented specifications. In the heart of quantitative objectives lies the accumulation of values along a computation. It is either the accumulated summation, as with the energy objectives, or the accumulated average, as with the mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point of time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire computation. We study the border of decidability for extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities by prefix-accumulation assertions and extending LTL with path-accumulation assertions, result in temporal logics whose model-checking problem is decidable. The extended logics allow to significantly extend the currently known energy and mean-payoff objectives. Moreover, the prefix-accumulation assertions may be refined with “controlled-accumulation”, allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that the fragment we point to is, in a sense, the maximal logic whose extension with prefix-accumulation assertions permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, and in particular CTL and LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
issn = {2664-1690},
pages = {14},
publisher = {IST Austria},
title = {{Temporal specifications with accumulative values}},
doi = {10.15479/AT:IST-2011-0003},
year = {2011},
}
@inbook{3311,
abstract = {Alpha shapes have been conceived in 1981 as an attempt to define the shape of a finite set of point in the plane. Since then, connections to diverse areas in the sciences and engineering have developed, including to pattern recognition, digital shape sampling and processing, and structural molecular biology. This survey begins with a historical account and discusses geometric, algorithmic, topological, and combinatorial aspects of alpha shapes in this sequence.},
author = {Herbert Edelsbrunner},
booktitle = {Tessellations in the Sciences},
publisher = {Springer},
title = {{Alpha shapes - a survey}},
year = {2011},
}
@inproceedings{3330,
abstract = {We consider the problem of approximating all real roots of a square-free polynomial f. Given isolating intervals, our algorithm refines each of them to a width at most 2-L, that is, each of the roots is approximated to L bits after the binary point. Our method provides a certified answer for arbitrary real polynomials, only requiring finite approximations of the polynomial coefficient and choosing a suitable working precision adaptively. In this way, we get a correct algorithm that is simple to implement and practically efficient. Our algorithm uses the quadratic interval refinement method; we adapt that method to be able to cope with inaccuracies when evaluating f, without sacrificing its quadratic convergence behavior. We prove a bound on the bit complexity of our algorithm in terms of degree, coefficient size and discriminant. Our bound improves previous work on integer polynomials by a factor of deg f and essentially matches best known theoretical bounds on root approximation which are obtained by very sophisticated algorithms.},
author = {Kerber, Michael and Sagraloff, Michael},
location = {California, USA},
pages = {209 -- 216},
publisher = {Springer},
title = {{Root refinement for real polynomials}},
doi = {10.1145/1993886.1993920 },
year = {2011},
}
@inbook{3335,
abstract = {We study the topology of the Megaparsec Cosmic Web in terms of the scale-dependent Betti numbers, which formalize the topological information content of the cosmic mass distribution. While the Betti numbers do not fully quantify topology, they extend the information beyond conventional cosmological studies of topology in terms of genus and Euler characteristic. The richer information content of Betti numbers goes along the availability of fast algorithms to compute them. For continuous density fields, we determine the scale-dependence of Betti numbers by invoking the cosmologically familiar filtration of sublevel or superlevel sets defined by density thresholds. For the discrete galaxy distribution, however, the analysis is based on the alpha shapes of the particles. These simplicial complexes constitute an ordered sequence of nested subsets of the Delaunay tessellation, a filtration defined by the scale parameter, α. As they are homotopy equivalent to the sublevel sets of the distance field, they are an excellent tool for assessing the topological structure of a discrete point distribution. In order to develop an intuitive understanding for the behavior of Betti numbers as a function of α, and their relation to the morphological patterns in the Cosmic Web, we first study them within the context of simple heuristic Voronoi clustering models. These can be tuned to consist of specific morphological elements of the Cosmic Web, i.e. clusters, filaments, or sheets. To elucidate the relative prominence of the various Betti numbers in different stages of morphological evolution, we introduce the concept of alpha tracks. Subsequently, we address the topology of structures emerging in the standard LCDM scenario and in cosmological scenarios with alternative dark energy content. The evolution of the Betti numbers is shown to reflect the hierarchical evolution of the Cosmic Web. We also demonstrate that the scale-dependence of the Betti numbers yields a promising measure of cosmological parameters, with a potential to help in determining the nature of dark energy and to probe primordial non-Gaussianities. We also discuss the expected Betti numbers as a function of the density threshold for superlevel sets of a Gaussian random field. Finally, we introduce the concept of persistent homology. It measures scale levels of the mass distribution and allows us to separate small from large scale features. Within the context of the hierarchical cosmic structure formation, persistence provides a natural formalism for a multiscale topology study of the Cosmic Web.},
author = {Van De Weygaert, Rien and Vegter, Gert and Edelsbrunner, Herbert and Jones, Bernard and Pranav, Pratyush and Park, Changbom and Hellwing, Wojciech and Eldering, Bob and Kruithof, Nico and Bos, Patrick and Hidding, Johan and Feldbrugge, Job and Ten Have, Eline and Van Engelen, Matti and Caroli, Manuel and Teillaud, Monique},
booktitle = {Transactions on Computational Science XIV},
editor = {Gavrilova, Marina and Tan, Kenneth and Mostafavi, Mir},
pages = {60 -- 101},
publisher = {Springer},
title = {{Alpha, Betti and the Megaparsec Universe: On the topology of the Cosmic Web}},
doi = {10.1007/978-3-642-25249-5_3},
volume = {6970},
year = {2011},
}
@inproceedings{3328,
abstract = {We report on a generic uni- and bivariate algebraic kernel that is publicly available with CGAL 3.7. It comprises complete, correct, though efficient state-of-the-art implementations on polynomials, roots of polynomial systems, and the support to analyze algebraic curves defined by bivariate polynomials. The kernel design is generic, that is, various number types and substeps can be exchanged. It is accompanied with a ready-to-use interface to enable arrangements induced by algebraic curves, that have already been used as basis for various geometric applications, as arrangements on Dupin cyclides or the triangulation of algebraic surfaces. We present two novel applications: arrangements of rotated algebraic curves and Boolean set operations on polygons bounded by segments of algebraic curves. We also provide experiments showing that our general implementation is competitive and even often clearly outperforms existing implementations that are explicitly tailored for specific types of non-linear curves that are available in CGAL.},
author = {Berberich, Eric and Hemmer, Michael and Kerber, Michael},
location = {Paris, France},
pages = {179 -- 186},
publisher = {ACM},
title = {{A generic algebraic kernel for non linear geometric applications}},
doi = {10.1145/1998196.1998224},
year = {2011},
}
@article{3784,
abstract = {Advanced stages of Scyllarus phyllosoma larvae were collected by demersal trawling during fishery research surveys in the western Mediterranean Sea in 2003–2005. Nucleotide sequence analysis of the mitochondrial 16S rDNA gene allowed the final-stage phyllosoma of Scyllarus arctus to be identified among these larvae. Its morphology is described and illustrated. This constitutes the second complete description of a Scyllaridae phyllosoma with its specific identity being validated by molecular techniques (the first was S. pygmaeus). These results also solved a long lasting taxonomic anomaly of several species assigned to the ancient genus Phyllosoma Leach, 1814. Detailed examination indicated that the final-stage phyllosoma of S. arctus shows closer affinities with the American scyllarid Scyllarus depressus or with the Australian Scyllarus sp. b (sensu Phillips et al., 1981) than to its sympatric species S. pygmaeus.},
author = {Palero, Ferran and Guerao, Guillermo and Clark, Paul and Abello, Pere},
journal = {Journal of the Marine Biological Association of the United Kingdom},
number = {2},
pages = {485 -- 492},
publisher = {Cambridge University Press},
title = {{Scyllarus arctus (Crustacea: Decapoda: Scyllaridae) final stage phyllosoma identified by DNA analysis, with morphological description}},
doi = {10.1017/S0025315410000287},
volume = {91},
year = {2011},
}
@inbook{3791,
abstract = {During the development of multicellular organisms, cell fate specification is followed by the sorting of different cell types into distinct domains from where the different tissues and organs are formed. Cell sorting involves both the segregation of a mixed population of cells with different fates and properties into distinct domains, and the active maintenance of their segregated state. Because of its biological importance and apparent resemblance to fluid segregation in physics, cell sorting was extensively studied by both biologists and physicists over the last decades. Different theories were developed that try to explain cell sorting on the basis of the physical properties of the constituent cells. However, only recently the molecular and cellular mechanisms that control the physical properties driving cell sorting, have begun to be unraveled. In this review, we will provide an overview of different cell-sorting processes in development and discuss how these processes can be explained by the different sorting theories, and how these theories in turn can be connected to the molecular and cellular mechanisms driving these processes.},
author = {Krens, Gabriel and Heisenberg, Carl-Philipp J},
booktitle = {Current Topics in Developmental Biology},
editor = {Labouesse, Michel},
pages = {189 -- 213},
publisher = {Elsevier},
title = {{Cell sorting in development}},
doi = {10.1016/B978-0-12-385065-2.00006-2},
volume = {95},
year = {2011},
}
@inbook{3796,
abstract = {We address the problem of covering ℝ n with congruent balls, while minimizing the number of balls that contain an average point. Considering the 1-parameter family of lattices defined by stretching or compressing the integer grid in diagonal direction, we give a closed formula for the covering density that depends on the distortion parameter. We observe that our family contains the thinnest lattice coverings in dimensions 2 to 5. We also consider the problem of packing congruent balls in ℝ n , for which we give a closed formula for the packing density as well. Again we observe that our family contains optimal configurations, this time densest packings in dimensions 2 and 3.},
author = {Edelsbrunner, Herbert and Kerber, Michael},
booktitle = {Rainbow of Computer Science},
editor = {Calude, Cristian and Rozenberg, Grzegorz and Salomaa, Arto},
pages = {20 -- 35},
publisher = {Springer},
title = {{Covering and packing with spheres by diagonal distortion in R^n}},
doi = {10.1007/978-3-642-19391-0_2},
volume = {6570},
year = {2011},
}
@article{3385,
author = {Sixt, Michael K},
journal = {Immunology Letters},
number = {1},
pages = {32 -- 34},
publisher = {Elsevier},
title = {{Interstitial locomotion of leukocytes}},
doi = {10.1016/j.imlet.2011.02.013},
volume = {138},
year = {2011},
}
@article{3392,
abstract = {Migrating lymphocytes acquire a polarized phenotype with a leading and a trailing edge, or uropod. Although in vitro experiments in cell lines or activated primary cell cultures have established that Rho-p160 coiled-coil kinase (ROCK)-myosin II-mediated uropod contractility is required for integrin de-adhesion on two-dimensional surfaces and nuclear propulsion through narrow pores in three-dimensional matrices, less is known about the role of these two events during the recirculation of primary, nonactivated lymphocytes. Using pharmacological antagonists of ROCK and myosin II, we report that inhibition of uropod contractility blocked integrin-independent mouse T cell migration through narrow, but not large, pores in vitro. T cell crawling on chemokine-coated endothelial cells under shear was severely impaired by ROCK inhibition, whereas transendothelial migration was only reduced through endothelial cells with high, but not low, barrier properties. Using three-dimensional thick-tissue imaging and dynamic two-photon microscopy of T cell motility in lymphoid tissue, we demonstrated a significant role for uropod contractility in intraluminal crawling and transendothelial migration through lymph node, but not bone marrow, endothelial cells. Finally, we demonstrated that ICAM-1, but not anatomical constraints or integrin-independent interactions, reduced parenchymal motility of inhibitor-treated T cells within the dense lymphoid microenvironment, thus assigning context-dependent roles for uropod contraction during lymphocyte recirculation.},
author = {Soriano, Silvia and Hons, Miroslav and Schumann, Kathrin and Kumar, Varsha and Dennier, Timo and Lyck, Ruth and Sixt, Michael K and Stein, Jens},
journal = {Journal of Immunology},
number = {5},
pages = {2356 -- 2364},
publisher = {American Association of Immunologists},
title = {{In vivo analysis of uropod function during physiological T cell trafficking}},
doi = {10.4049/jimmunol.1100935},
volume = {187},
year = {2011},
}
@article{3397,
abstract = {Recent advances in microscopy techniques and biophysical measurements have provided novel insight into the molecular, cellular and biophysical basis of cell adhesion. However, comparably little is known about a core element of cell–cell adhesion—the energy of adhesion at the cell–cell contact. In this review, we discuss approaches to understand the nature and regulation of adhesion energy, and propose strategies to determine adhesion energy between cells in vitro and in vivo.},
author = {Maître, Jean-Léon and Heisenberg, Carl-Philipp J},
journal = {Current Opinion in Cell Biology},
number = {5},
pages = {508 -- 514},
publisher = {Elsevier},
title = {{The role of adhesion energy in controlling cell-cell contacts}},
doi = {10.1016/j.ceb.2011.07.004},
volume = {23},
year = {2011},
}
@article{3405,
abstract = {Glutamate is the major excitatory neurotransmitter in the mammalian central nervous system and gates non-selective cation channels. The origins of glutamate receptors are not well understood as they differ structurally and functionally from simple bacterial ligand-gated ion channels. Here we report the discovery of an ionotropic glutamate receptor that combines the typical eukaryotic domain architecture with the 'TXVGYG' signature sequence of the selectivity filter found in K+ channels. This receptor exhibits functional properties intermediate between bacterial and eukaryotic glutamate-gated ion channels, suggesting a link in the evolution of ionotropic glutamate receptors.},
author = {Janovjak, Harald L and Sandoz, Guillaume and Isacoff, Ehud},
journal = {Nature Communications},
number = {232},
pages = {1 -- 6},
publisher = {Nature Publishing Group},
title = {{Modern ionotropic glutamate receptor with a K+ selectivity signature sequence}},
doi = {10.1038/ncomms1231},
volume = {2},
year = {2011},
}
@article{3429,
abstract = {Transcription factors are central to sustaining pluripotency, yet little is known about transcription factor dynamics in defining pluripotency in the early mammalian embryo. Here, we establish a fluorescence decay after photoactivation (FDAP) assay to quantitatively study the kinetic behaviour of Oct4, a key transcription factor controlling pre-implantation development in the mouse embryo. FDAP measurements reveal that each cell in a developing embryo shows one of two distinct Oct4 kinetics, before there are any morphologically distinguishable differences or outward signs of lineage patterning. The differences revealed by FDAP are due to differences in the accessibility of Oct4 to its DNA binding sites in the nucleus. Lineage tracing of the cells in the two distinct sub-populations demonstrates that the Oct4 kinetics predict lineages of the early embryo. Cells with slower Oct4 kinetics are more likely to give rise to the pluripotent cell lineage that contributes to the inner cell mass. Those with faster Oct4 kinetics contribute mostly to the extra-embryonic lineage. Our findings identify Oct4 kinetics, rather than differences in total transcription factor expression levels, as a predictive measure of developmental cell lineage patterning in the early mouse embryo.},
author = {Plachta, Nicolas and Bollenbach, Mark Tobias and Pease, Shirley and Fraser, Scott and Pantazis, Periklis},
journal = {Nature Cell Biology},
number = {2},
pages = {117 -- 123},
publisher = {Nature Publishing Group},
title = {{Oct4 kinetics predict cell lineage patterning in the early mammalian embryo}},
doi = {10.1038/ncb2154},
volume = {13},
year = {2011},
}
@article{3373,
abstract = {The use of optical traps to measure or apply forces on the molecular level requires a precise knowledge of the trapping force field. Close to the trap center, this field is typically approximated as linear in the displacement of the trapped microsphere. However, applications demanding high forces at low laser intensities can probe the light-microsphere interaction beyond the linear regime. Here, we measured the full nonlinear force and displacement response of an optical trap in two dimensions using a dual-beam optical trap setup with back-focal-plane photodetection. We observed a substantial stiffening of the trap beyond the linear regime that depends on microsphere size, in agreement with Mie theory calculations. Surprisingly, we found that the linear detection range for forces exceeds the one for displacement by far. Our approach allows for a complete calibration of an optical trap.},
author = {Jahnel, Marcus and Behrndt, Martin and Jannasch, Anita and Schaeffer, Erik and Grill, Stephan},
journal = {Optics Letters},
number = {7},
pages = {1260 -- 1262},
publisher = {OSA},
title = {{Measuring the complete force field of an optical trap}},
doi = {10.1364/OL.36.001260},
volume = {36},
year = {2011},
}
@article{3378,
abstract = {The theory of intersection homology was developed to study the singularities of a topologically stratified space. This paper in- corporates this theory into the already developed framework of persistent homology. We demonstrate that persistent intersec- tion homology gives useful information about the relationship between an embedded stratified space and its singularities. We give, and prove the correctness of, an algorithm for the computa- tion of the persistent intersection homology groups of a filtered simplicial complex equipped with a stratification by subcom- plexes. We also derive, from Poincare ́ Duality, some structural results about persistent intersection homology.},
author = {Bendich, Paul and Harer, John},
journal = {Foundations of Computational Mathematics},
number = {3},
pages = {305 -- 336},
publisher = {Springer},
title = {{Persistent intersection homology}},
doi = {10.1007/s10208-010-9081-1},
volume = {11},
year = {2011},
}
@article{3380,
abstract = {Linkage between markers and genes that affect a phenotype of interest may be determined by examining differences in marker allele frequency in the extreme progeny of a cross between two inbred lines. This strategy is usually employed when pooling is used to reduce genotyping costs. When the cross progeny are asexual, the extreme progeny may be selected by multiple generations of asexual reproduction and selection. We analyse this method of measuring phenotype in asexual progeny and examine the changes in marker allele frequency due to selection over many generations. Stochasticity in marker frequency in the selected population arises due to the finite initial population size. We derive the distribution of marker frequency as a result of selection at a single major locus, and show that in order to avoid spurious changes in marker allele frequency in the selected population, the initial population size should be in the low to mid hundreds.},
author = {Logeswaran, Sayanthan and Barton, Nicholas H},
journal = {Genetical Research},
number = {3},
pages = {221 -- 232},
publisher = {Cambridge University Press},
title = {{Mapping Mendelian traits in asexual progeny using changes in marker allele frequency}},
doi = {10.1017/S0016672311000115},
volume = {93},
year = {2011},
}
@article{518,
abstract = {Cancer stem cells or cancer initiating cells are believed to contribute to cancer recurrence after therapy. MicroRNAs (miRNAs) are short RNA molecules with fundamental roles in gene regulation. The role of miRNAs in cancer stem cells is only poorly understood. Here, we report miRNA expression profiles of glioblastoma stem cell-containing CD133 + cell populations. We find that miR-9, miR-9 * (referred to as miR-9/9 *), miR-17 and miR-106b are highly abundant in CD133 + cells. Furthermore, inhibition of miR-9/9 * or miR-17 leads to reduced neurosphere formation and stimulates cell differentiation. Calmodulin-binding transcription activator 1 (CAMTA1) is a putative transcription factor, which induces the expression of the anti-proliferative cardiac hormone natriuretic peptide A (NPPA). We identify CAMTA1 as an miR-9/9 * and miR-17 target. CAMTA1 expression leads to reduced neurosphere formation and tumour growth in nude mice, suggesting that CAMTA1 can function as tumour suppressor. Consistently, CAMTA1 and NPPA expression correlate with patient survival. Our findings could provide a basis for novel strategies of glioblastoma therapy.},
author = {Schraivogel, Daniel and Weinmann, Lasse and Beier, Dagmar and Tabatabai, Ghazaleh and Eichner, Alexander and Zhu, Jia and Anton, Martina and Sixt, Michael K and Weller, Michael and Beier, Christoph and Meister, Gunter},
journal = {EMBO Journal},
number = {20},
pages = {4309 -- 4322},
publisher = {Wiley-Blackwell},
title = {{CAMTA1 is a novel tumour suppressor regulated by miR-9/9 * in glioblastoma stem cells}},
doi = {10.1038/emboj.2011.301},
volume = {30},
year = {2011},
}
@misc{5386,
abstract = {We introduce TopoCut: a new way to integrate knowledge about topological properties (TPs) into random field image segmentation model. Instead of including TPs as additional constraints during minimization of the energy function, we devise an efficient algorithm for modifying the unary potentials such that the resulting segmentation is guaranteed with the desired properties. Our method is more flexible in the sense that it handles more topology constraints than previous methods, which were only able to enforce pairwise or global connectivity. In particular, our method is very fast, making it for the first time possible to enforce global topological properties in practical image segmentation tasks.},
author = {Chen, Chao and Freedman, Daniel and Lampert, Christoph},
issn = {2664-1690},
pages = {69},
publisher = {IST Austria},
title = {{Enforcing topological constraints in random field image segmentation}},
doi = {10.15479/AT:IST-2011-0002},
year = {2011},
}
@inproceedings{3323,
abstract = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
author = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
location = {Wrocław, Poland},
pages = {476 -- 491},
publisher = {Springer},
title = {{An efficient decision procedure for imperative tree data structures}},
doi = {10.1007/978-3-642-22438-6_36},
volume = {6803},
year = {2011},
}
@misc{5381,
abstract = {In two-player finite-state stochastic games of partial obser- vation on graphs, in every state of the graph, the players simultaneously choose an action, and their joint actions determine a probability distri- bution over the successor states. The game is played for infinitely many rounds and thus the players construct an infinite path in the graph. We consider reachability objectives where the first player tries to ensure a target state to be visited almost-surely (i.e., with probability 1) or pos- itively (i.e., with positive probability), no matter the strategy of the second player.
We classify such games according to the information and to the power of randomization available to the players. On the basis of information, the game can be one-sided with either (a) player 1, or (b) player 2 having partial observation (and the other player has perfect observation), or two- sided with (c) both players having partial observation. On the basis of randomization, (a) the players may not be allowed to use randomization (pure strategies), or (b) they may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) they may use full randomization.
Our main results for pure strategies are as follows: (1) For one-sided games with player 2 perfect observation we show that (in contrast to full randomized strategies) belief-based (subset-construction based) strate- gies are not sufficient, and present an exponential upper bound on mem- ory both for almost-sure and positive winning strategies; we show that the problem of deciding the existence of almost-sure and positive winning strategies for player 1 is EXPTIME-complete and present symbolic algo- rithms that avoid the explicit exponential construction. (2) For one-sided games with player 1 perfect observation we show that non-elementary memory is both necessary and sufficient for both almost-sure and posi- tive winning strategies. (3) We show that for the general (two-sided) case finite-memory strategies are sufficient for both positive and almost-sure winning, and at least non-elementary memory is required. We establish the equivalence of the almost-sure winning problems for pure strategies and for randomized strategies with actions invisible. Our equivalence re- sult exhibit serious flaws in previous results in the literature: we show a non-elementary memory lower bound for almost-sure winning whereas an exponential upper bound was previously claimed.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
issn = {2664-1690},
pages = {43},
publisher = {IST Austria},
title = {{Partial-observation stochastic games: How to win when belief fails}},
doi = {10.15479/AT:IST-2011-0007},
year = {2011},
}
@inproceedings{3316,
abstract = {In addition to being correct, a system should be robust, that is, it should behave reasonably even after receiving unexpected inputs. In this paper, we summarize two formal notions of robustness that we have introduced previously for reactive systems. One of the notions is based on assigning costs for failures on a user-provided notion of incorrect transitions in a specification. Here, we define a system to be robust if a finite number of incorrect inputs does not lead to an infinite number of incorrect outputs. We also give a more refined notion of robustness that aims to minimize the ratio of output failures to input failures. The second notion is aimed at liveness. In contrast to the previous notion, it has no concept of recovery from an error. Instead, it compares the ratio of the number of liveness constraints that the system violates to the number of liveness constraints that the environment violates.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Jobstmann, Barbara},
booktitle = {6th IEEE International Symposium on Industrial and Embedded Systems},
location = {Vasteras, Sweden},
pages = {176 -- 185},
publisher = {IEEE},
title = {{Specification-centered robustness}},
doi = {10.1109/SIES.2011.5953660},
year = {2011},
}
@inproceedings{3366,
abstract = {We present an algorithmic method for the quantitative, performance-aware synthesis of concurrent programs. The input consists of a nondeterministic partial program and of a parametric performance model. The nondeterminism allows the programmer to omit which (if any) synchronization construct is used at a particular program location. The performance model, specified as a weighted automaton, can capture system architectures by assigning different costs to actions such as locking, context switching, and memory and cache accesses. The quantitative synthesis problem is to automatically resolve the nondeterminism of the partial program so that both correctness is guaranteed and performance is optimal. As is standard for shared memory concurrency, correctness is formalized "specification free", in particular as race freedom or deadlock freedom. For worst-case (average-case) performance, we show that the problem can be reduced to 2-player graph games (with probabilistic transitions) with quantitative objectives. While we show, using game-theoretic methods, that the synthesis problem is Nexp-complete, we present an algorithmic method and an implementation that works efficiently for concurrent programs and performance models of practical interest. We have implemented a prototype tool and used it to synthesize finite-state concurrent programs that exhibit different programming patterns, for several performance models representing different architectures. },
author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A and Radhakrishna, Arjun and Singh, Rohit},
editor = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
location = {Snowbird, USA},
pages = {243 -- 259},
publisher = {Springer},
title = {{Quantitative synthesis for concurrent programs}},
doi = {10.1007/978-3-642-22110-1_20 },
volume = {6806},
year = {2011},
}
@inproceedings{3359,
abstract = {Motivated by improvements in constraint-solving technology and by the increase of routinely available computational power, partial-program synthesis is emerging as an effective approach for increasing programmer productivity. The goal of the approach is to allow the programmer to specify a part of her intent imperatively (that is, give a partial program) and a part of her intent declaratively, by specifying which conditions need to be achieved or maintained. The task of the synthesizer is to construct a program that satisfies the specification. As an example, consider a partial program where threads access shared data without using any synchronization mechanism, and a declarative specification that excludes data races and deadlocks. The task of the synthesizer is then to place locks into the program code in order for the program to meet the specification.
In this paper, we argue that quantitative objectives are needed in partial-program synthesis in order to produce higher-quality programs, while enabling simpler specifications. Returning to the example, the synthesizer could construct a naive solution that uses one global lock for shared data. This can be prevented either by constraining the solution space further (which is error-prone and partly defeats the point of synthesis), or by optimizing a quantitative objective that models performance. Other quantitative notions useful in synthesis include fault tolerance, robustness, resource (memory, power) consumption, and information flow.},
author = {Cerny, Pavol and Henzinger, Thomas A},
location = {Taipei; Taiwan},
pages = {149 -- 154},
publisher = {ACM},
title = {{From boolean to quantitative synthesis}},
doi = {10.1145/2038642.2038666},
year = {2011},
}
@inproceedings{3361,
abstract = {In this paper, we investigate the computational complexity of quantitative information flow (QIF) problems. Information-theoretic quantitative relaxations of noninterference (based on Shannon entropy)have been introduced to enable more fine-grained reasoning about programs in situations where limited information flow is acceptable. The QIF bounding problem asks whether the information flow in a given program is bounded by a constant $d$. Our first result is that the QIF bounding problem is PSPACE-complete. The QIF memoryless synthesis problem asks whether it is possible to resolve nondeterministic choices in a given partial program in such a way that in the resulting deterministic program, the quantitative information flow is bounded by a given constant $d$. Our second result is that the QIF memoryless synthesis problem is also EXPTIME-complete. The QIF memoryless synthesis problem generalizes to QIF general synthesis problem which does not impose the memoryless requirement (that is, by allowing the synthesized program to have more variables then the original partial program). Our third result is that the QIF general synthesis problem is EXPTIME-hard.},
author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A},
location = {Cernay-la-Ville, France},
pages = {205 -- 217},
publisher = {IEEE},
title = {{The complexity of quantitative information flow problems}},
doi = {10.1109/CSF.2011.21},
year = {2011},
}
@inproceedings{3347,
abstract = {The class of omega-regular languages provides a robust specification language in verification. Every omega-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens "eventually". Finitary liveness was proposed by Alur and Henzinger as a stronger formulation of liveness. It requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider automata with finitary acceptance conditions defined by finitary Buchi, parity and Streett languages. We study languages expressible by such automata: we give their topological complexity and present a regular-expression characterization. We compare the expressive power of finitary automata and give optimal algorithms for classical decisions questions. We show that the finitary languages are Sigma 2-complete; we present a complete picture of the expressive power of various classes of automata with finitary and infinitary acceptance conditions; we show that the languages defined by finitary parity automata exactly characterize the star-free fragment of omega B-regular languages; and we show that emptiness is NLOGSPACE-complete and universality as well as language inclusion are PSPACE-complete for finitary parity and Streett automata.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Tarragona, Spain},
pages = {216 -- 226},
publisher = {Springer},
title = {{Finitary languages}},
doi = {10.1007/978-3-642-21254-3_16},
volume = {6638},
year = {2011},
}
@article{3354,
abstract = {We consider two-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine the successor state. We consider ω-regular winning conditions specified as parity objectives. Both players are allowed to use randomization when choosing their moves. We study the computation of the limit-winning set of states, consisting of the states where the sup-inf value of the game for player 1 is 1: in other words, a state is limit-winning if player 1 can ensure a probability of winning arbitrarily close to 1. We show that the limit-winning set can be computed in O(n2d+2) time, where n is the size of the game structure and 2d is the number of priorities (or colors). The membership problem of whether a state belongs to the limit-winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms are considerably more involved than those for turn-based games. This is because concurrent games do not satisfy two of the most fundamental properties of turn-based parity games. First, in concurrent games limit-winning strategies require randomization; and second, they require infinite memory.},
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Henzinger, Thomas A},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Qualitative concurrent parity games}},
doi = {10.1145/1970398.1970404},
volume = {12},
year = {2011},
}
@misc{5379,
abstract = {Computing the winning set for Büchi objectives in alternating games on graphs is a central problem in computer aided verification with a large number of applications. The long standing best known upper bound for solving the problem is ̃O(n·m), where n is the number of vertices and m is the number of edges in the graph. We are the first to break the ̃O(n·m) boundary by presenting a new technique that reduces the running time to O(n2). This bound also leads to O(n2) time algorithms for computing the set of almost-sure winning vertices for Büchi objectives (1) in alternating games with probabilistic transitions (improving an earlier bound of O(n·m)), (2) in concurrent graph games with constant actions (improving an earlier bound of O(n3)), and (3) in Markov decision processes (improving for m > n4/3 an earlier bound of O(min(m1.5, m·n2/3)). We also show that the same technique can be used to compute the maximal end-component decomposition of a graph in time O(n2), which is an improvement over earlier bounds for m > n4/3. Finally, we show how to maintain the winning set for Büchi objectives in alternating games under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per operation. This is the first dynamic algorithm for this problem.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
issn = {2664-1690},
pages = {20},
publisher = {IST Austria},
title = {{An O(n2) time algorithm for alternating Büchi games}},
doi = {10.15479/AT:IST-2011-0009},
year = {2011},
}
@inproceedings{3342,
abstract = {We consider Markov decision processes (MDPs) with ω-regular specifications given as parity objectives. We consider the problem of computing the set of almost-sure winning states from where the objective can be ensured with probability 1. The algorithms for the computation of the almost-sure winning set for parity objectives iteratively use the solutions for the almost-sure winning set for Büchi objectives (a special case of parity objectives). Our contributions are as follows: First, we present the first subquadratic symbolic algorithm to compute the almost-sure winning set for MDPs with Büchi objectives; our algorithm takes O(nm) symbolic steps as compared to the previous known algorithm that takes O(n 2) symbolic steps, where n is the number of states and m is the number of edges of the MDP. In practice MDPs often have constant out-degree, and then our symbolic algorithm takes O(nn) symbolic steps, as compared to the previous known O(n 2) symbolic steps algorithm. Second, we present a new algorithm, namely win-lose algorithm, with the following two properties: (a) the algorithm iteratively computes subsets of the almost-sure winning set and its complement, as compared to all previous algorithms that discover the almost-sure winning set upon termination; and (b) requires O(nK) symbolic steps, where K is the maximal number of edges of strongly connected components (scc’s) of the MDP. The win-lose algorithm requires symbolic computation of scc’s. Third, we improve the algorithm for symbolic scc computation; the previous known algorithm takes linear symbolic steps, and our new algorithm improves the constants associated with the linear number of steps. In the worst case the previous known algorithm takes 5·n symbolic steps, whereas our new algorithm takes 4 ·n symbolic steps.},
author = {Chatterjee, Krishnendu and Henzinger, Monika and Joglekar, Manas and Nisarg, Shah},
editor = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
location = {Snowbird, USA},
pages = {260 -- 276},
publisher = {Springer},
title = {{Symbolic algorithms for qualitative analysis of Markov decision processes with Büchi objectives}},
doi = {10.1007/978-3-642-22110-1_21},
volume = {6806},
year = {2011},
}
@inproceedings{3266,
abstract = {We present a joint image segmentation and labeling model (JSL) which, given a bag of figure-ground segment hypotheses extracted at multiple image locations and scales, constructs a joint probability distribution over both the compatible image interpretations (tilings or image segmentations) composed from those segments, and over their labeling into categories. The process of drawing samples from the joint distribution can be interpreted as first sampling tilings, modeled as maximal cliques, from a graph connecting spatially non-overlapping segments in the bag [1], followed by sampling labels for those segments, conditioned on the choice of a particular tiling. We learn the segmentation and labeling parameters jointly, based on Maximum Likelihood with a novel Incremental Saddle Point estimation procedure. The partition function over tilings and labelings is increasingly more accurately approximated by including incorrect configurations that a not-yet-competent model rates probable during learning. We show that the proposed methodologymatches the current state of the art in the Stanford dataset [2], as well as in VOC2010, where 41.7% accuracy on the test set is achieved.},
author = {Ion, Adrian and Carreira, Joao and Sminchisescu, Cristian},
booktitle = {NIPS Proceedings},
location = {Granada, Spain},
pages = {1827 -- 1835},
publisher = {Neural Information Processing Systems Foundation},
title = {{Probabilistic joint image segmentation and labeling}},
volume = {24},
year = {2011},
}
@phdthesis{3273,
author = {Maître, Jean-Léon},
publisher = {IST Austria},
title = {{Mechanics of adhesion and de‐adhesion in zebrafish germ layer progenitors}},
year = {2011},
}
@inproceedings{3297,
abstract = {Animating detailed liquid surfaces has always been a challenge for computer graphics researchers and visual effects artists. Over the past few years, researchers in this field have focused on mesh-based surface tracking to synthesize extremely detailed liquid surfaces as efficiently as possible. This course provides a solid understanding of the steps required to create a fluid simulator with a mesh-based liquid surface.
The course begins with an overview of several existing liquid-surface-tracking techniques and the pros and cons of each method. Then it explains how to embed a triangle mesh into a finite-difference-based fluid simulator and describes several methods for allowing the liquid surface to merge together or break apart. The final section showcases the benefits and further applications of a mesh-based liquid surface, highlighting state-of-the-art methods for tracking colors and textures, maintaining liquid volume, preserving small surface features, and simulating realistic surface-tension waves.},
author = {Wojtan, Christopher J and Müller Fischer, Matthias and Brochu, Tyson},
location = {Vancouver, BC, Canada},
publisher = {ACM},
title = {{Liquid simulation with mesh-based surface tracking}},
doi = {10.1145/2037636.2037644},
year = {2011},
}
@inproceedings{3324,
abstract = {Automated termination provers often use the following schema to prove that a program terminates: construct a relational abstraction of the program's transition relation and then show that the relational abstraction is well-founded. The focus of current tools has been on developing sophisticated techniques for constructing the abstractions while relying on known decidable logics (such as linear arithmetic) to express them. We believe we can significantly increase the class of programs that are amenable to automated termination proofs by identifying more expressive decidable logics for reasoning about well-founded relations. We therefore present a new decision procedure for reasoning about multiset orderings, which are among the most powerful orderings used to prove termination. We show that, using our decision procedure, one can automatically prove termination of natural abstractions of programs.},
author = {Piskac, Ruzica and Wies, Thomas},
editor = {Jhala, Ranjit and Schmidt, David},
location = {Texas, USA},
pages = {371 -- 386},
publisher = {Springer},
title = {{Decision procedures for automating termination proofs}},
doi = {10.1007/978-3-642-18275-4_26},
volume = {6538},
year = {2011},
}
@inproceedings{3329,
abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance µ in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution shape P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(n log n)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. An alternative algorithm, based purely on rational arithmetic, answers the same deconstruction problem, up to an uncertainty parameter, and its running time depends on the parameter δ (in addition to the other input parameters: n, δ and the radius of the disk). If the input shape is found to be approximable, the rational-arithmetic algorithm also computes an approximate solution shape for the problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one. Our study is motivated by applications from two different domains. However, since the offset operation has numerous uses, we anticipate that the reverse question that we study here will be still more broadly applicable. We present results obtained with our implementation of the rational-arithmetic algorithm.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
booktitle = {Proceedings of the twenty-seventh annual symposium on Computational geometry},
location = {Paris, France},
pages = {187 -- 196},
publisher = {ACM},
title = {{Deconstructing approximate offsets}},
doi = {10.1145/1998196.1998225},
year = {2011},
}
@inproceedings{3355,
abstract = {Byzantine Fault Tolerant (BFT) protocols aim to improve the reliability of distributed systems. They enable systems to tolerate arbitrary failures in a bounded number of nodes. BFT protocols are usually proven correct for certain safety and liveness properties. However, recent studies have shown that the performance of state-of-the-art BFT protocols decreases drastically in the presence of even a single malicious node. This motivates a formal quantitative analysis of BFT protocols to investigate their performance characteristics under different scenarios. We present HyPerf, a new hybrid methodology based on model checking and simulation techniques for evaluating the performance of BFT protocols. We build a transition system corresponding to a BFT protocol and systematically explore the set of behaviors allowed by the protocol. We associate certain timing information with different operations in the protocol, like cryptographic operations and message transmission. After an elaborate state exploration, we use the time information to evaluate the performance characteristics of the protocol using simulation techniques. We integrate our framework in Mace, a tool for building and verifying distributed systems. We evaluate the performance of PBFT using our framework. We describe two different use-cases of our methodology. For the benign operation of the protocol, we use the time information as random variables to compute the probability distribution of the execution times. In the presence of faults, we estimate the worst-case performance of the protocol for various attacks that can be employed by malicious nodes. Our results show the importance of hybrid techniques in systematically analyzing the performance of large-scale systems.},
author = {Halalai, Raluca and Henzinger, Thomas A and Singh, Vasu},
location = {Aachen, Germany},
pages = {255 -- 264},
publisher = {IEEE},
title = {{Quantitative evaluation of BFT protocols}},
doi = {10.1109/QEST.2011.40},
year = {2011},
}
@inproceedings{3343,
abstract = {We present faster and dynamic algorithms for the following problems arising in probabilistic verification: Computation of the maximal end-component (mec) decomposition of Markov decision processes (MDPs), and of the almost sure winning set for reachability and parity objectives in MDPs. We achieve the following running time for static algorithms in MDPs with graphs of n vertices and m edges: (1) O(m · min{ √m, n2/3 }) for the mec decomposition, improving the longstanding O(m·n) bound; (2) O(m·n2/3) for reachability objectives, improving the previous O(m · √m) bound for m > n4/3; and (3) O(m · min{ √m, n2/3 } · log(d)) for parity objectives with d priorities, improving the previous O(m · √m · d) bound. We also give incremental and decremental algorithms in linear time for mec decomposition and reachability objectives and O(m · log d) time for parity ob jectives.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
location = {San Francisco, USA},
pages = {1318 -- 1336},
publisher = {SIAM},
title = {{Faster and dynamic algorithms for maximal end component decomposition and related graph problems in probabilistic verification}},
doi = {10.1137/1.9781611973082.101},
year = {2011},
}
@article{3778,
author = {Barton, Nicholas H},
journal = {Heredity},
number = {2},
pages = {205 -- 206},
publisher = {Nature Publishing Group},
title = {{Estimating linkage disequilibria}},
doi = {10.1038/hdy.2010.67},
volume = {106},
year = {2011},
}
@article{3386,
abstract = {Evolutionary theories of ageing predict that life span increases with decreasing extrinsic mortality, and life span variation among queens in ant species seems to corroborate this prediction: queens, which are the only reproductive in a colony, live much longer than queens in multi-queen colonies. The latter often inhabit ephemeral nest sites and accordingly are assumed to experience a higher mortality risk. Yet, all prior studies compared queens from different single- and multi-queen species. Here, we demonstrate an effect of queen number on longevity and fecundity within a single, socially plastic species, where queens experience the similar level of extrinsic mortality. Queens from single- and two-queen colonies had significantly longer lifespan and higher fecundity than queens living in associations of eight queens. As queens also differ neither in morphology nor the mode of colony foundation, our study shows that the social environment itself strongly affects ageing rate.},
author = {Schrempf, Alexandra and Cremer, Sylvia and Heinze, Jürgen},
journal = {Journal of Evolutionary Biology},
number = {7},
pages = {1455 -- 1461},
publisher = {Wiley-Blackwell},
title = {{Social influence on age and reproduction reduced lifespan and fecundity in multi queen ant colonies}},
doi = {10.1111/j.1420-9101.2011.02278.x},
volume = {24},
year = {2011},
}
@article{3393,
abstract = {Unlike unconditionally advantageous “Fisherian” variants that tend to spread throughout a species range once introduced anywhere, “bistable” variants, such as chromosome translocations, have two alternative stable frequencies, absence and (near) fixation. Analogous to populations with Allee effects, bistable variants tend to increase locally only once they become sufficiently common, and their spread depends on their rate of increase averaged over all frequencies. Several proposed manipulations of insect populations, such as using Wolbachia or “engineered underdominance” to suppress vector-borne diseases, produce bistable rather than Fisherian dynamics. We synthesize and extend theoretical analyses concerning three features of their spatial behavior: rate of spread, conditions to initiate spread from a localized introduction, and wave stopping caused by variation in population densities or dispersal rates. Unlike Fisherian variants, bistable variants tend to spread spatially only for particular parameter combinations and initial conditions. Wave initiation requires introduction over an extended region, while subsequent spatial spread is slower than for Fisherian waves and can easily be halted by local spatial inhomogeneities. We present several new results, including robust sufficient conditions to initiate (and stop) spread, using a one-parameter cubic approximation applicable to several models. The results have both basic and applied implications.},
author = {Barton, Nicholas H and Turelli, Michael},
journal = {American Naturalist},
number = {3},
pages = {E48 -- E75},
publisher = {University of Chicago Press},
title = {{Spatial waves of advance with bistable dynamics: Cytoplasmic and genetic analogues of Allee effects}},
doi = {10.1086/661246},
volume = {178},
year = {2011},
}
@article{3374,
abstract = {Genetic regulatory networks enable cells to respond to changes in internal and external conditions by dynamically coordinating their gene expression profiles. Our ability to make quantitative measurements in these biochemical circuits has deepened our understanding of what kinds of computations genetic regulatory networks can perform, and with what reliability. These advances have motivated researchers to look for connections between the architecture and function of genetic regulatory networks. Transmitting information between a network's inputs and outputs has been proposed as one such possible measure of function, relevant in certain biological contexts. Here we summarize recent developments in the application of information theory to gene regulatory networks. We first review basic concepts in information theory necessary for understanding recent work. We then discuss the functional complexity of gene regulation, which arises from the molecular nature of the regulatory interactions. We end by reviewing some experiments that support the view that genetic networks responsible for early development of multicellular organisms might be maximizing transmitted 'positional information'.},
author = {Tkacik, Gasper and Walczak, Aleksandra},
journal = {Journal of Physics: Condensed Matter},
number = {15},
publisher = {IOP Publishing Ltd.},
title = {{Information transmission in genetic regulatory networks a review}},
doi = {10.1088/0953-8984/23/15/153102},
volume = {23},
year = {2011},
}
@article{3379,
abstract = {The process of gastrulation is highly conserved across vertebrates on both the genetic and morphological levels, despite great variety in embryonic shape and speed of development. This mechanism spatially separates the germ layers and establishes the organizational foundation for future development. Mesodermal identity is specified in a superficial layer of cells, the epiblast, where cells maintain an epithelioid morphology. These cells involute to join the deeper hypoblast layer where they adopt a migratory, mesenchymal morphology. Expression of a cascade of related transcription factors orchestrates the parallel genetic transition from primitive to mature mesoderm. Although the early and late stages of this process are increasingly well understood, the transition between them has remained largely mysterious. We present here the first high resolution in vivo observations of the blebby transitional morphology of involuting mesodermal cells in a vertebrate embryo. We further demonstrate that the zebrafish spadetail mutation creates a reversible block in the maturation program, stalling cells in the transition state. This mutation creates an ideal system for dissecting the specific properties of cells undergoing the morphological transition of maturing mesoderm, as we demonstrate with a direct measurement of cell–cell adhesion.},
author = {Row, Richard and Maître, Jean-Léon and Martin, Benjamin and Stockinger, Petra and Heisenberg, Carl-Philipp J and Kimelman, David},
journal = {Developmental Biology},
number = {1},
pages = {102 -- 110},
publisher = {Elsevier},
title = {{Completion of the epithelial to mesenchymal transition in zebrafish mesoderm requires Spadetail}},
doi = {10.1016/j.ydbio.2011.03.025},
volume = {354},
year = {2011},
}
@article{3381,
abstract = {In this survey, we compare several languages for specifying Markovian population models such as queuing networks and chemical reaction networks. All these languages — matrix descriptions, stochastic Petri nets, stoichiometric equations, stochastic process algebras, and guarded command models — describe continuous-time Markov chains, but they differ according to important properties, such as compositionality, expressiveness and succinctness, executability, and ease of use. Moreover, they provide different support for checking the well-formedness of a model and for analyzing a model.},
author = {Henzinger, Thomas A and Jobstmann, Barbara and Wolf, Verena},
journal = {IJFCS: International Journal of Foundations of Computer Science},
number = {4},
pages = {823 -- 841},
publisher = {World Scientific Publishing},
title = {{Formalisms for specifying Markovian population models}},
doi = {10.1142/S0129054111008441},
volume = {22},
year = {2011},
}
@inproceedings{3336,
abstract = {We introduce TopoCut: a new way to integrate knowledge about topological properties (TPs) into random field image segmentation model. Instead of including TPs as additional constraints during minimization of the energy function, we devise an efficient algorithm for modifying the unary potentials such that the resulting segmentation is guaranteed with the desired properties. Our method is more flexible in the sense that it handles more topology constraints than previous methods, which were only able to enforce pairwise or global connectivity. In particular, our method is very fast, making it for the first time possible to enforce global topological properties in practical image segmentation tasks.},
author = {Chen, Chao and Freedman, Daniel and Lampert, Christoph},
booktitle = {CVPR: Computer Vision and Pattern Recognition},
location = {Colorado Springs, CO, USA},
pages = {2089 -- 2096},
publisher = {IEEE},
title = {{Enforcing topological constraints in random field image segmentation}},
doi = {10.1109/CVPR.2011.5995503},
year = {2011},
}
@inproceedings{3348,
abstract = {We study synthesis of controllers for real-time systems, where the objective is to stay in a given safe set. The problem is solved by obtaining winning strategies in the setting of concurrent two-player timed automaton games with safety objectives. To prevent a player from winning by blocking time, we restrict each player to strategies that ensure that the player cannot be responsible for causing a zeno run. We construct winning strategies for the controller which require access only to (1) the system clocks (thus, controllers which require their own internal infinitely precise clocks are not necessary), and (2) a linear (in the number of clocks) number of memory bits. Precisely, we show that for safety objectives, a memory of size (3 · |C|+lg(|C|+1)) bits suffices for winning controller strategies, where C is the set of clocks of the timed automaton game, significantly improving the previous known exponential bound. We also settle the open question of whether winning region controller strategies require memory for safety objectives by showing with an example the necessity of memory for region strategies to win for safety objectives.},
author = {Chatterjee, Krishnendu and Prabhu, Vinayak},
location = {Chicago, USA},
pages = {221 -- 230},
publisher = {Springer},
title = {{Synthesis of memory efficient real time controllers for safety objectives}},
doi = {10.1145/1967701.1967734},
year = {2011},
}
@inproceedings{3350,
abstract = {A controller for a discrete game with ω-regular objectives requires attention if, intuitively, it requires measuring the state and switching from the current control action. Minimum attention controllers are preferable in modern shared implementations of cyber-physical systems because they produce the least burden on system resources such as processor time or communication bandwidth. We give algorithms to compute minimum attention controllers for ω-regular objectives in imperfect information discrete two-player games. We show a polynomial-time reduction from minimum attention controller synthesis to synthesis of controllers for mean-payoff parity objectives in games of incomplete information. This gives an optimal EXPTIME-complete synthesis algorithm. We show that the minimum attention controller problem is decidable for infinite state systems with finite bisimulation quotients. In particular, the problem is decidable for timed and rectangular automata.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
editor = {Fahrenberg, Uli and Tripakis, Stavros},
location = {Aalborg, Denmark},
pages = {145 -- 159},
publisher = {Springer},
title = {{Minimum attention controller synthesis for omega regular objectives}},
doi = {10.1007/978-3-642-24310-3_11},
volume = {6919},
year = {2011},
}
@misc{5382,
abstract = {We consider two-player stochastic games played on a finite state space for an infinite num- ber of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine a probability distribution over the successor states. We also consider the important special case of turn-based stochastic games where players make moves in turns, rather than concurrently. We study concurrent games with ω-regular winning conditions specified as parity objectives. The value for player 1 for a parity objective is the maximal probability with which the player can guarantee the satisfaction of the objective against all strategies of the opponent. We study the problem of continuity and robustness of the value function in concurrent and turn-based stochastic parity games with respect to imprecision in the transition probabilities. We present quantitative bounds on the difference of the value function (in terms of the imprecision of the transition probabilities) and show the value continuity for structurally equivalent concurrent games (two games are structurally equivalent if the support of the transition func- tion is same and the probabilities differ). We also show robustness of optimal strategies for structurally equivalent turn-based stochastic parity games. Finally we show that the value continuity property breaks without the structurally equivalent assumption (even for Markov chains) and show that our quantitative bound is asymptotically optimal. Hence our results are tight (the assumption is both necessary and sufficient) and optimal (our quantitative bound is asymptotically optimal).},
author = {Chatterjee, Krishnendu},
issn = {2664-1690},
pages = {18},
publisher = {IST Austria},
title = {{Robustness of structurally equivalent concurrent parity games}},
doi = {10.15479/AT:IST-2011-0006},
year = {2011},
}
@misc{5387,
abstract = {We consider Markov Decision Processes (MDPs) with mean-payoff parity and energy parity objectives. In system design, the parity objective is used to encode ω-regular specifications, and the mean-payoff and energy objectives can be used to model quantitative resource constraints. The energy condition re- quires that the resource level never drops below 0, and the mean-payoff condi- tion requires that the limit-average value of the resource consumption is within a threshold. While these two (energy and mean-payoff) classical conditions are equivalent for two-player games, we show that they differ for MDPs. We show that the problem of deciding whether a state is almost-sure winning (i.e., winning with probability 1) in energy parity MDPs is in NP ∩ coNP, while for mean- payoff parity MDPs, the problem is solvable in polynomial time, improving a recent PSPACE bound.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
issn = {2664-1690},
pages = {20},
publisher = {IST Austria},
title = {{Energy and mean-payoff parity Markov decision processes}},
doi = {10.15479/AT:IST-2011-0001},
year = {2011},
}
@inproceedings{3362,
abstract = {State-transition systems communicating by shared variables have been the underlying model of choice for applications of model checking. Such formalisms, however, have difficulty with modeling process creation or death and communication reconfigurability. Here, we introduce “dynamic reactive modules” (DRM), a state-transition modeling formalism that supports dynamic reconfiguration and creation/death of processes. The resulting formalism supports two types of variables, data variables and reference variables. Reference variables enable changing the connectivity between processes and referring to instances of processes. We show how this new formalism supports parallel composition and refinement through trace containment. DRM provide a natural language for modeling (and ultimately reasoning about) biological systems and multiple threads communicating through shared variables.},
author = {Fisher, Jasmin and Henzinger, Thomas A and Nickovic, Dejan and Piterman, Nir and Singh, Anmol and Vardi, Moshe},
editor = {Katoen, Joost-Pieter and König, Barbara},
location = {Aachen, Germany},
pages = {404 -- 418},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Dynamic reactive modules}},
doi = {10.1007/978-3-642-23217-6_27},
volume = {6901},
year = {2011},
}
@misc{3312,
abstract = {We study the 3D reconstruction of plant roots from multiple 2D images. To meet the challenge caused by the delicate nature of thin branches, we make three innovations to cope with the sensitivity to image quality and calibration. First, we model the background as a harmonic function to improve the segmentation of the root in each 2D image. Second, we develop the concept of the regularized visual hull which reduces the effect of jittering and refraction by ensuring consistency with one 2D image. Third, we guarantee connectedness through adjustments to the 3D reconstruction that minimize global error. Our software is part of a biological phenotype/genotype study of agricultural root systems. It has been tested on more than 40 plant roots and results are promising in terms of reconstruction quality and efficiency.},
author = {Zheng, Ying and Gu, Steve and Edelsbrunner, Herbert and Tomasi, Carlo and Benfey, Philip},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Detailed reconstruction of 3D plant root shape}},
doi = {10.1109/ICCV.2011.6126475},
year = {2011},
}
@inproceedings{3367,
abstract = {In this paper, we present the first output-sensitive algorithm to compute the persistence diagram of a filtered simplicial complex. For any Γ>0, it returns only those homology classes with persistence at least Γ. Instead of the classical reduction via column operations, our algorithm performs rank computations on submatrices of the boundary matrix. For an arbitrary constant δ ∈ (0,1), the running time is O(C(1-δ)ΓR(n)log n), where C(1-δ)Γ is the number of homology classes with persistence at least (1-δ)Γ, n is the total number of simplices, and R(n) is the complexity of computing the rank of an n x n matrix with O(n) nonzero entries. Depending on the choice of the rank algorithm, this yields a deterministic O(C(1-δ)Γn2.376) algorithm, a O(C(1-δ)Γn2.28) Las-Vegas algorithm, or a O(C(1-δ)Γn2+ε) Monte-Carlo algorithm for an arbitrary ε>0.},
author = {Chen, Chao and Kerber, Michael},
location = {Paris, France},
pages = {207 -- 216},
publisher = {ACM},
title = {{An output sensitive algorithm for persistent homology}},
doi = {10.1145/1998196.1998228},
year = {2011},
}