@inproceedings{3324,
abstract = {Automated termination provers often use the following schema to prove that a program terminates: construct a relational abstraction of the program's transition relation and then show that the relational abstraction is well-founded. The focus of current tools has been on developing sophisticated techniques for constructing the abstractions while relying on known decidable logics (such as linear arithmetic) to express them. We believe we can significantly increase the class of programs that are amenable to automated termination proofs by identifying more expressive decidable logics for reasoning about well-founded relations. We therefore present a new decision procedure for reasoning about multiset orderings, which are among the most powerful orderings used to prove termination. We show that, using our decision procedure, one can automatically prove termination of natural abstractions of programs.},
author = {Piskac, Ruzica and Wies, Thomas},
editor = {Jhala, Ranjit and Schmidt, David},
location = {Texas, USA},
pages = {371 -- 386},
publisher = {Springer},
title = {{Decision procedures for automating termination proofs}},
doi = {10.1007/978-3-642-18275-4_26},
volume = {6538},
year = {2011},
}
@inproceedings{3329,
abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance µ in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution shape P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(n log n)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. An alternative algorithm, based purely on rational arithmetic, answers the same deconstruction problem, up to an uncertainty parameter, and its running time depends on the parameter δ (in addition to the other input parameters: n, δ and the radius of the disk). If the input shape is found to be approximable, the rational-arithmetic algorithm also computes an approximate solution shape for the problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one. Our study is motivated by applications from two different domains. However, since the offset operation has numerous uses, we anticipate that the reverse question that we study here will be still more broadly applicable. We present results obtained with our implementation of the rational-arithmetic algorithm.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
booktitle = {Proceedings of the twenty-seventh annual symposium on Computational geometry},
location = {Paris, France},
pages = {187 -- 196},
publisher = {ACM},
title = {{Deconstructing approximate offsets}},
doi = {10.1145/1998196.1998225},
year = {2011},
}
@inproceedings{3355,
abstract = {Byzantine Fault Tolerant (BFT) protocols aim to improve the reliability of distributed systems. They enable systems to tolerate arbitrary failures in a bounded number of nodes. BFT protocols are usually proven correct for certain safety and liveness properties. However, recent studies have shown that the performance of state-of-the-art BFT protocols decreases drastically in the presence of even a single malicious node. This motivates a formal quantitative analysis of BFT protocols to investigate their performance characteristics under different scenarios. We present HyPerf, a new hybrid methodology based on model checking and simulation techniques for evaluating the performance of BFT protocols. We build a transition system corresponding to a BFT protocol and systematically explore the set of behaviors allowed by the protocol. We associate certain timing information with different operations in the protocol, like cryptographic operations and message transmission. After an elaborate state exploration, we use the time information to evaluate the performance characteristics of the protocol using simulation techniques. We integrate our framework in Mace, a tool for building and verifying distributed systems. We evaluate the performance of PBFT using our framework. We describe two different use-cases of our methodology. For the benign operation of the protocol, we use the time information as random variables to compute the probability distribution of the execution times. In the presence of faults, we estimate the worst-case performance of the protocol for various attacks that can be employed by malicious nodes. Our results show the importance of hybrid techniques in systematically analyzing the performance of large-scale systems.},
author = {Halalai, Raluca and Henzinger, Thomas A and Singh, Vasu},
location = {Aachen, Germany},
pages = {255 -- 264},
publisher = {IEEE},
title = {{Quantitative evaluation of BFT protocols}},
doi = {10.1109/QEST.2011.40},
year = {2011},
}
@inproceedings{3343,
abstract = {We present faster and dynamic algorithms for the following problems arising in probabilistic verification: Computation of the maximal end-component (mec) decomposition of Markov decision processes (MDPs), and of the almost sure winning set for reachability and parity objectives in MDPs. We achieve the following running time for static algorithms in MDPs with graphs of n vertices and m edges: (1) O(m · min{ √m, n2/3 }) for the mec decomposition, improving the longstanding O(m·n) bound; (2) O(m·n2/3) for reachability objectives, improving the previous O(m · √m) bound for m > n4/3; and (3) O(m · min{ √m, n2/3 } · log(d)) for parity objectives with d priorities, improving the previous O(m · √m · d) bound. We also give incremental and decremental algorithms in linear time for mec decomposition and reachability objectives and O(m · log d) time for parity ob jectives.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
location = {San Francisco, USA},
pages = {1318 -- 1336},
publisher = {SIAM},
title = {{Faster and dynamic algorithms for maximal end component decomposition and related graph problems in probabilistic verification}},
doi = {10.1137/1.9781611973082.101},
year = {2011},
}
@article{3778,
author = {Barton, Nicholas H},
journal = {Heredity},
number = {2},
pages = {205 -- 206},
publisher = {Nature Publishing Group},
title = {{Estimating linkage disequilibria}},
doi = {10.1038/hdy.2010.67},
volume = {106},
year = {2011},
}
@article{3386,
abstract = {Evolutionary theories of ageing predict that life span increases with decreasing extrinsic mortality, and life span variation among queens in ant species seems to corroborate this prediction: queens, which are the only reproductive in a colony, live much longer than queens in multi-queen colonies. The latter often inhabit ephemeral nest sites and accordingly are assumed to experience a higher mortality risk. Yet, all prior studies compared queens from different single- and multi-queen species. Here, we demonstrate an effect of queen number on longevity and fecundity within a single, socially plastic species, where queens experience the similar level of extrinsic mortality. Queens from single- and two-queen colonies had significantly longer lifespan and higher fecundity than queens living in associations of eight queens. As queens also differ neither in morphology nor the mode of colony foundation, our study shows that the social environment itself strongly affects ageing rate.},
author = {Schrempf, Alexandra and Cremer, Sylvia and Heinze, Jürgen},
journal = {Journal of Evolutionary Biology},
number = {7},
pages = {1455 -- 1461},
publisher = {Wiley-Blackwell},
title = {{Social influence on age and reproduction reduced lifespan and fecundity in multi queen ant colonies}},
doi = {10.1111/j.1420-9101.2011.02278.x},
volume = {24},
year = {2011},
}
@article{3393,
abstract = {Unlike unconditionally advantageous “Fisherian” variants that tend to spread throughout a species range once introduced anywhere, “bistable” variants, such as chromosome translocations, have two alternative stable frequencies, absence and (near) fixation. Analogous to populations with Allee effects, bistable variants tend to increase locally only once they become sufficiently common, and their spread depends on their rate of increase averaged over all frequencies. Several proposed manipulations of insect populations, such as using Wolbachia or “engineered underdominance” to suppress vector-borne diseases, produce bistable rather than Fisherian dynamics. We synthesize and extend theoretical analyses concerning three features of their spatial behavior: rate of spread, conditions to initiate spread from a localized introduction, and wave stopping caused by variation in population densities or dispersal rates. Unlike Fisherian variants, bistable variants tend to spread spatially only for particular parameter combinations and initial conditions. Wave initiation requires introduction over an extended region, while subsequent spatial spread is slower than for Fisherian waves and can easily be halted by local spatial inhomogeneities. We present several new results, including robust sufficient conditions to initiate (and stop) spread, using a one-parameter cubic approximation applicable to several models. The results have both basic and applied implications.},
author = {Barton, Nicholas H and Turelli, Michael},
journal = {American Naturalist},
number = {3},
pages = {E48 -- E75},
publisher = {University of Chicago Press},
title = {{Spatial waves of advance with bistable dynamics: Cytoplasmic and genetic analogues of Allee effects}},
doi = {10.1086/661246},
volume = {178},
year = {2011},
}
@article{3374,
abstract = {Genetic regulatory networks enable cells to respond to changes in internal and external conditions by dynamically coordinating their gene expression profiles. Our ability to make quantitative measurements in these biochemical circuits has deepened our understanding of what kinds of computations genetic regulatory networks can perform, and with what reliability. These advances have motivated researchers to look for connections between the architecture and function of genetic regulatory networks. Transmitting information between a network's inputs and outputs has been proposed as one such possible measure of function, relevant in certain biological contexts. Here we summarize recent developments in the application of information theory to gene regulatory networks. We first review basic concepts in information theory necessary for understanding recent work. We then discuss the functional complexity of gene regulation, which arises from the molecular nature of the regulatory interactions. We end by reviewing some experiments that support the view that genetic networks responsible for early development of multicellular organisms might be maximizing transmitted 'positional information'.},
author = {Tkacik, Gasper and Walczak, Aleksandra},
journal = {Journal of Physics: Condensed Matter},
number = {15},
publisher = {IOP Publishing Ltd.},
title = {{Information transmission in genetic regulatory networks a review}},
doi = {10.1088/0953-8984/23/15/153102},
volume = {23},
year = {2011},
}
@article{3379,
abstract = {The process of gastrulation is highly conserved across vertebrates on both the genetic and morphological levels, despite great variety in embryonic shape and speed of development. This mechanism spatially separates the germ layers and establishes the organizational foundation for future development. Mesodermal identity is specified in a superficial layer of cells, the epiblast, where cells maintain an epithelioid morphology. These cells involute to join the deeper hypoblast layer where they adopt a migratory, mesenchymal morphology. Expression of a cascade of related transcription factors orchestrates the parallel genetic transition from primitive to mature mesoderm. Although the early and late stages of this process are increasingly well understood, the transition between them has remained largely mysterious. We present here the first high resolution in vivo observations of the blebby transitional morphology of involuting mesodermal cells in a vertebrate embryo. We further demonstrate that the zebrafish spadetail mutation creates a reversible block in the maturation program, stalling cells in the transition state. This mutation creates an ideal system for dissecting the specific properties of cells undergoing the morphological transition of maturing mesoderm, as we demonstrate with a direct measurement of cell–cell adhesion.},
author = {Row, Richard and Maître, Jean-Léon and Martin, Benjamin and Stockinger, Petra and Heisenberg, Carl-Philipp J and Kimelman, David},
journal = {Developmental Biology},
number = {1},
pages = {102 -- 110},
publisher = {Elsevier},
title = {{Completion of the epithelial to mesenchymal transition in zebrafish mesoderm requires Spadetail}},
doi = {10.1016/j.ydbio.2011.03.025},
volume = {354},
year = {2011},
}
@article{3381,
abstract = {In this survey, we compare several languages for specifying Markovian population models such as queuing networks and chemical reaction networks. All these languages — matrix descriptions, stochastic Petri nets, stoichiometric equations, stochastic process algebras, and guarded command models — describe continuous-time Markov chains, but they differ according to important properties, such as compositionality, expressiveness and succinctness, executability, and ease of use. Moreover, they provide different support for checking the well-formedness of a model and for analyzing a model.},
author = {Henzinger, Thomas A and Jobstmann, Barbara and Wolf, Verena},
journal = {IJFCS: International Journal of Foundations of Computer Science},
number = {4},
pages = {823 -- 841},
publisher = {World Scientific Publishing},
title = {{Formalisms for specifying Markovian population models}},
doi = {10.1142/S0129054111008441},
volume = {22},
year = {2011},
}
@inproceedings{3336,
abstract = {We introduce TopoCut: a new way to integrate knowledge about topological properties (TPs) into random field image segmentation model. Instead of including TPs as additional constraints during minimization of the energy function, we devise an efficient algorithm for modifying the unary potentials such that the resulting segmentation is guaranteed with the desired properties. Our method is more flexible in the sense that it handles more topology constraints than previous methods, which were only able to enforce pairwise or global connectivity. In particular, our method is very fast, making it for the first time possible to enforce global topological properties in practical image segmentation tasks.},
author = {Chen, Chao and Freedman, Daniel and Lampert, Christoph},
booktitle = {CVPR: Computer Vision and Pattern Recognition},
location = {Colorado Springs, CO, USA},
pages = {2089 -- 2096},
publisher = {IEEE},
title = {{Enforcing topological constraints in random field image segmentation}},
doi = {10.1109/CVPR.2011.5995503},
year = {2011},
}
@inproceedings{3348,
abstract = {We study synthesis of controllers for real-time systems, where the objective is to stay in a given safe set. The problem is solved by obtaining winning strategies in the setting of concurrent two-player timed automaton games with safety objectives. To prevent a player from winning by blocking time, we restrict each player to strategies that ensure that the player cannot be responsible for causing a zeno run. We construct winning strategies for the controller which require access only to (1) the system clocks (thus, controllers which require their own internal infinitely precise clocks are not necessary), and (2) a linear (in the number of clocks) number of memory bits. Precisely, we show that for safety objectives, a memory of size (3 · |C|+lg(|C|+1)) bits suffices for winning controller strategies, where C is the set of clocks of the timed automaton game, significantly improving the previous known exponential bound. We also settle the open question of whether winning region controller strategies require memory for safety objectives by showing with an example the necessity of memory for region strategies to win for safety objectives.},
author = {Chatterjee, Krishnendu and Prabhu, Vinayak},
location = {Chicago, USA},
pages = {221 -- 230},
publisher = {Springer},
title = {{Synthesis of memory efficient real time controllers for safety objectives}},
doi = {10.1145/1967701.1967734},
year = {2011},
}
@inproceedings{3350,
abstract = {A controller for a discrete game with ω-regular objectives requires attention if, intuitively, it requires measuring the state and switching from the current control action. Minimum attention controllers are preferable in modern shared implementations of cyber-physical systems because they produce the least burden on system resources such as processor time or communication bandwidth. We give algorithms to compute minimum attention controllers for ω-regular objectives in imperfect information discrete two-player games. We show a polynomial-time reduction from minimum attention controller synthesis to synthesis of controllers for mean-payoff parity objectives in games of incomplete information. This gives an optimal EXPTIME-complete synthesis algorithm. We show that the minimum attention controller problem is decidable for infinite state systems with finite bisimulation quotients. In particular, the problem is decidable for timed and rectangular automata.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
editor = {Fahrenberg, Uli and Tripakis, Stavros},
location = {Aalborg, Denmark},
pages = {145 -- 159},
publisher = {Springer},
title = {{Minimum attention controller synthesis for omega regular objectives}},
doi = {10.1007/978-3-642-24310-3_11},
volume = {6919},
year = {2011},
}
@misc{5382,
abstract = {We consider two-player stochastic games played on a finite state space for an infinite num- ber of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine a probability distribution over the successor states. We also consider the important special case of turn-based stochastic games where players make moves in turns, rather than concurrently. We study concurrent games with ω-regular winning conditions specified as parity objectives. The value for player 1 for a parity objective is the maximal probability with which the player can guarantee the satisfaction of the objective against all strategies of the opponent. We study the problem of continuity and robustness of the value function in concurrent and turn-based stochastic parity games with respect to imprecision in the transition probabilities. We present quantitative bounds on the difference of the value function (in terms of the imprecision of the transition probabilities) and show the value continuity for structurally equivalent concurrent games (two games are structurally equivalent if the support of the transition func- tion is same and the probabilities differ). We also show robustness of optimal strategies for structurally equivalent turn-based stochastic parity games. Finally we show that the value continuity property breaks without the structurally equivalent assumption (even for Markov chains) and show that our quantitative bound is asymptotically optimal. Hence our results are tight (the assumption is both necessary and sufficient) and optimal (our quantitative bound is asymptotically optimal).},
author = {Chatterjee, Krishnendu},
issn = {2664-1690},
pages = {18},
publisher = {IST Austria},
title = {{Robustness of structurally equivalent concurrent parity games}},
doi = {10.15479/AT:IST-2011-0006},
year = {2011},
}
@misc{5387,
abstract = {We consider Markov Decision Processes (MDPs) with mean-payoff parity and energy parity objectives. In system design, the parity objective is used to encode ω-regular specifications, and the mean-payoff and energy objectives can be used to model quantitative resource constraints. The energy condition re- quires that the resource level never drops below 0, and the mean-payoff condi- tion requires that the limit-average value of the resource consumption is within a threshold. While these two (energy and mean-payoff) classical conditions are equivalent for two-player games, we show that they differ for MDPs. We show that the problem of deciding whether a state is almost-sure winning (i.e., winning with probability 1) in energy parity MDPs is in NP ∩ coNP, while for mean- payoff parity MDPs, the problem is solvable in polynomial time, improving a recent PSPACE bound.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
issn = {2664-1690},
pages = {20},
publisher = {IST Austria},
title = {{Energy and mean-payoff parity Markov decision processes}},
doi = {10.15479/AT:IST-2011-0001},
year = {2011},
}
@inproceedings{3362,
abstract = {State-transition systems communicating by shared variables have been the underlying model of choice for applications of model checking. Such formalisms, however, have difficulty with modeling process creation or death and communication reconfigurability. Here, we introduce “dynamic reactive modules” (DRM), a state-transition modeling formalism that supports dynamic reconfiguration and creation/death of processes. The resulting formalism supports two types of variables, data variables and reference variables. Reference variables enable changing the connectivity between processes and referring to instances of processes. We show how this new formalism supports parallel composition and refinement through trace containment. DRM provide a natural language for modeling (and ultimately reasoning about) biological systems and multiple threads communicating through shared variables.},
author = {Fisher, Jasmin and Henzinger, Thomas A and Nickovic, Dejan and Piterman, Nir and Singh, Anmol and Vardi, Moshe},
editor = {Katoen, Joost-Pieter and König, Barbara},
location = {Aachen, Germany},
pages = {404 -- 418},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Dynamic reactive modules}},
doi = {10.1007/978-3-642-23217-6_27},
volume = {6901},
year = {2011},
}
@misc{3312,
abstract = {We study the 3D reconstruction of plant roots from multiple 2D images. To meet the challenge caused by the delicate nature of thin branches, we make three innovations to cope with the sensitivity to image quality and calibration. First, we model the background as a harmonic function to improve the segmentation of the root in each 2D image. Second, we develop the concept of the regularized visual hull which reduces the effect of jittering and refraction by ensuring consistency with one 2D image. Third, we guarantee connectedness through adjustments to the 3D reconstruction that minimize global error. Our software is part of a biological phenotype/genotype study of agricultural root systems. It has been tested on more than 40 plant roots and results are promising in terms of reconstruction quality and efficiency.},
author = {Zheng, Ying and Gu, Steve and Edelsbrunner, Herbert and Tomasi, Carlo and Benfey, Philip},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Detailed reconstruction of 3D plant root shape}},
doi = {10.1109/ICCV.2011.6126475},
year = {2011},
}
@inproceedings{3367,
abstract = {In this paper, we present the first output-sensitive algorithm to compute the persistence diagram of a filtered simplicial complex. For any Γ>0, it returns only those homology classes with persistence at least Γ. Instead of the classical reduction via column operations, our algorithm performs rank computations on submatrices of the boundary matrix. For an arbitrary constant δ ∈ (0,1), the running time is O(C(1-δ)ΓR(n)log n), where C(1-δ)Γ is the number of homology classes with persistence at least (1-δ)Γ, n is the total number of simplices, and R(n) is the complexity of computing the rank of an n x n matrix with O(n) nonzero entries. Depending on the choice of the rank algorithm, this yields a deterministic O(C(1-δ)Γn2.376) algorithm, a O(C(1-δ)Γn2.28) Las-Vegas algorithm, or a O(C(1-δ)Γn2+ε) Monte-Carlo algorithm for an arbitrary ε>0.},
author = {Chen, Chao and Kerber, Michael},
location = {Paris, France},
pages = {207 -- 216},
publisher = {ACM},
title = {{An output sensitive algorithm for persistent homology}},
doi = {10.1145/1998196.1998228},
year = {2011},
}
@article{3267,
abstract = {We address the problem of localizing homology classes, namely, finding the cycle representing a given class with the most concise geometric measure. We study the problem with different measures: volume, diameter and radius. For volume, that is, the 1-norm of a cycle, two main results are presented. First, we prove that the problem is NP-hard to approximate within any constant factor. Second, we prove that for homology of dimension two or higher, the problem is NP-hard to approximate even when the Betti number is O(1). The latter result leads to the inapproximability of the problem of computing the nonbounding cycle with the smallest volume and computing cycles representing a homology basis with the minimal total volume. As for the other two measures defined by pairwise geodesic distance, diameter and radius, we show that the localization problem is NP-hard for diameter but is polynomial for radius. Our work is restricted to homology over the ℤ2 field.},
author = {Chen, Chao and Freedman, Daniel},
journal = {Discrete & Computational Geometry},
number = {3},
pages = {425 -- 448},
publisher = {Springer},
title = {{Hardness results for homology localization}},
doi = {10.1007/s00454-010-9322-8},
volume = {45},
year = {2011},
}
@inproceedings{3298,
abstract = {We present a new algorithm for enforcing incompressibility for Smoothed Particle Hydrodynamics (SPH) by preserving uniform density across the domain. We propose a hybrid method that uses a Poisson solve on a coarse grid to enforce a divergence free velocity ﬁeld, followed by a local density correction of the particles. This avoids typical grid artifacts and maintains the Lagrangian nature of SPH by directly transferring pressures onto particles. Our method can be easily integrated with existing SPH techniques such as the incompressible PCISPH method as well as weakly compressible SPH by adding an additional force term. We show that this hybrid method accelerates convergence towards uniform density and permits a signiﬁcantly larger time step compared to earlier approaches while producing similar results. We demonstrate our approach in a variety of scenarios with signiﬁcant pressure gradients such as splashing liquids.},
author = {Raveendran, Karthik and Wojtan, Christopher J and Turk, Greg},
editor = {Spencer, Stephen},
location = {Vancouver, Canada},
pages = {33 -- 42},
publisher = {ACM},
title = {{Hybrid smoothed particle hydrodynamics}},
doi = {10.1145/2019406.2019411},
year = {2011},
}
@inproceedings{3301,
abstract = {The chemical master equation is a differential equation describing the time evolution of the probability distribution over the possible “states” of a biochemical system. The solution of this equation is of interest within the systems biology field ever since the importance of the molec- ular noise has been acknowledged. Unfortunately, most of the systems do not have analytical solutions, and numerical solutions suffer from the course of dimensionality and therefore need to be approximated. Here, we introduce the concept of tail approximation, which retrieves an approximation of the probabilities in the tail of a distribution from the total probability of the tail and its conditional expectation. This approximation method can then be used to numerically compute the solution of the chemical master equation on a subset of the state space, thus fighting the explosion of the state space, for which this problem is renowned.},
author = {Henzinger, Thomas A and Mateescu, Maria},
publisher = {Tampere International Center for Signal Processing},
title = {{Tail approximation for the chemical master equation}},
year = {2011},
}
@inproceedings{3313,
abstract = {Interpreting an image as a function on a compact sub- set of the Euclidean plane, we get its scale-space by diffu- sion, spreading the image over the entire plane. This gener- ates a 1-parameter family of functions alternatively defined as convolutions with a progressively wider Gaussian ker- nel. We prove that the corresponding 1-parameter family of persistence diagrams have norms that go rapidly to zero as time goes to infinity. This result rationalizes experimental observations about scale-space. We hope this will lead to targeted improvements of related computer vision methods.},
author = {Chen, Chao and Edelsbrunner, Herbert},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Diffusion runs low on persistence fast}},
doi = {10.1109/ICCV.2011.6126271},
year = {2011},
}
@article{3318,
abstract = {Parvalbumin is thought to act in a manner similar to EGTA, but how a slow Ca2+ buffer affects nanodomain-coupling regimes at GABAergic synapses is unclear. Direct measurements of parvalbumin concentration and paired recordings in rodent hippocampus and cerebellum revealed that parvalbumin affects synaptic dynamics only when expressed at high levels. Modeling suggests that, in high concentrations, parvalbumin may exert BAPTA-like effects, modulating nanodomain coupling via competition with local saturation of endogenous fixed buffers.},
author = {Eggermann, Emmanuel and Jonas, Peter M},
journal = {Nature Neuroscience},
pages = {20 -- 22},
publisher = {Nature Publishing Group},
title = {{How the “slow” Ca(2+) buffer parvalbumin affects transmitter release in nanodomain coupling regimes at GABAergic synapses}},
doi = {10.1038/nn.3002},
volume = {15},
year = {2011},
}
@article{3320,
abstract = {Powerful statistical models that can be learned efficiently from large amounts of data are currently revolutionizing computer vision. These models possess a rich internal structure reflecting task-specific relations and constraints. This monograph introduces the reader to the most popular classes of structured models in computer vision. Our focus is discrete undirected graphical models which we cover in detail together with a description of algorithms for both probabilistic inference and maximum a posteriori inference. We discuss separately recently successful techniques for prediction in general structured models. In the second part of this monograph we describe methods for parameter learning where we distinguish the classic maximum likelihood based methods from the more recent prediction-based parameter learning methods. We highlight developments to enhance current models and discuss kernelized models and latent variable models. To make the monograph more practical and to provide links to further study we provide examples of successful application of many methods in the computer vision literature.},
author = {Nowozin, Sebastian and Lampert, Christoph},
journal = {Foundations and Trends in Computer Graphics and Vision},
number = {3-4},
pages = {185 -- 365},
publisher = {now},
title = {{Structured learning and prediction in computer vision}},
doi = {10.1561/0600000033},
volume = {6},
year = {2011},
}
@inproceedings{3325,
abstract = {We introduce streaming data string transducers that map input data strings to output data strings in a single left-to-right pass in linear time. Data strings are (unbounded) sequences of data values, tagged with symbols from a finite set, over a potentially infinite data do- main that supports only the operations of equality and ordering. The transducer uses a finite set of states, a finite set of variables ranging over the data domain, and a finite set of variables ranging over data strings. At every step, it can make decisions based on the next in- put symbol, updating its state, remembering the input data value in its data variables, and updating data-string variables by concatenat- ing data-string variables and new symbols formed from data vari- ables, while avoiding duplication. We establish that the problems of checking functional equivalence of two streaming transducers, and of checking whether a streaming transducer satisfies pre/post verification conditions specified by streaming acceptors over in- put/output data-strings, are in PSPACE. We identify a class of imperative and a class of functional pro- grams, manipulating lists of data items, which can be effectively translated to streaming data-string transducers. The imperative pro- grams dynamically modify a singly-linked heap by changing next- pointers of heap-nodes and by adding new nodes. The main re- striction specifies how the next-pointers can be used for traversal. We also identify an expressively equivalent fragment of functional programs that traverse a list using syntactically restricted recursive calls. Our results lead to algorithms for assertion checking and for checking functional equivalence of two programs, written possibly in different programming styles, for commonly used routines such as insert, delete, and reverse.},
author = {Alur, Rajeev and Cerny, Pavol},
location = {Texas, USA},
number = {1},
pages = {599 -- 610},
publisher = {ACM},
title = {{Streaming transducers for algorithmic verification of single pass list processing programs}},
doi = {10.1145/1926385.1926454},
volume = {46},
year = {2011},
}
@article{3382,
abstract = {Dynamic tactile sensing is a fundamental ability to recognize materials and objects. However, while humans are born with partially developed dynamic tactile sensing and quickly master this skill, today's robots remain in their infancy. The development of such a sense requires not only better sensors but the right algorithms to deal with these sensors' data as well. For example, when classifying a material based on touch, the data are noisy, high-dimensional, and contain irrelevant signals as well as essential ones. Few classification methods from machine learning can deal with such problems. In this paper, we propose an efficient approach to infer suitable lower dimensional representations of the tactile data. In order to classify materials based on only the sense of touch, these representations are autonomously discovered using visual information of the surfaces during training. However, accurately pairing vision and tactile samples in real-robot applications is a difficult problem. The proposed approach, therefore, works with weak pairings between the modalities. Experiments show that the resulting approach is very robust and yields significantly higher classification performance based on only dynamic tactile sensing.},
author = {Kroemer, Oliver and Lampert, Christoph and Peters, Jan},
journal = {IEEE Transactions on Robotics},
number = {3},
pages = {545 -- 557},
publisher = {IEEE},
title = {{Learning dynamic tactile sensing with robust vision based training}},
doi = {10.1109/TRO.2011.2121130},
volume = {27},
year = {2011},
}
@article{3387,
abstract = {Background: Supertree methods combine overlapping input trees into a larger supertree. Here, I consider split-based supertree methods that first extract the split information of the input trees and subsequently combine this split information into a phylogeny. Well known split-based supertree methods are matrix representation with parsimony and matrix representation with compatibility. Combining input trees on the same taxon set, as in the consensus setting, is a well-studied task and it is thus desirable to generalize consensus methods to supertree methods. Results: Here, three variants of majority-rule (MR) supertrees that generalize majority-rule consensus trees are investigated. I provide simple formulas for computing the respective score for bifurcating input- and supertrees. These score computations, together with a heuristic tree search minmizing the scores, were implemented in the python program PluMiST (Plus- and Minus SuperTrees) available from http://www.cibiv.at/software/ plumist. The different MR methods were tested by simulation and on real data sets. The search heuristic was successful in combining compatible input trees. When combining incompatible input trees, especially one variant, MR(-) supertrees, performed well. Conclusions: The presented framework allows for an efficient score computation of three majority-rule supertree variants and input trees. I combined the score computation with a heuristic search over the supertree space. The implementation was tested by simulation and on real data sets and showed promising results. Especially the MR(-) variant seems to be a reasonable score for supertree reconstruction. Generalizing these computations to multifurcating trees is an open problem, which may be tackled using this framework.},
author = {Kupczok, Anne},
journal = {BMC Evolutionary Biology},
number = {205},
publisher = {BioMed Central},
title = {{Split based computation of majority rule supertrees}},
doi = {10.1186/1471-2148-11-205},
volume = {11},
year = {2011},
}
@article{3394,
abstract = {Random genetic drift shifts clines in space, alters their width, and distorts their shape. Such random fluctuations complicate inferences from cline width and position. Notably, the effect of genetic drift on the expected shape of the cline is opposite to the naive (but quite common) misinterpretation of classic results on the expected cline. While random drift on average broadens the overall cline in expected allele frequency, it narrows the width of any particular cline. The opposing effects arise because locally, drift drives alleles to fixation—but fluctuations in position widen the expected cline. The effect of genetic drift can be predicted from standardized variance in allele frequencies, averaged across the habitat: 〈F〉. A cline maintained by spatially varying selection (step change) is expected to be narrower by a factor of relative to the cline in the absence of drift. The expected cline is broader by the inverse of this factor. In a tension zone maintained by underdominance, the expected cline width is narrower by about 1 – 〈F〉relative to the width in the absence of drift. Individual clines can differ substantially from the expectation, and we give quantitative predictions for the variance in cline position and width. The predictions apply to clines in almost one-dimensional circumstances such as hybrid zones in rivers, deep valleys, or along a coast line and give a guide to what patterns to expect in two dimensions.},
author = {Polechova, Jitka and Barton, Nicholas H},
journal = {Genetics},
number = {1},
pages = {227 -- 235},
publisher = {Genetics Society of America},
title = {{Genetic drift widens the expected cline but narrows the expected cline width}},
doi = {10.1534/genetics.111.129817},
volume = {189},
year = {2011},
}
@article{3399,
abstract = {Context-dependent adjustment of mating tactics can drastically increase the mating success of behaviourally flexible animals. We used the ant Cardiocondyla obscurior as a model system to study adaptive adjustment of male mating tactics. This species shows a male diphenism of wingless fighter males and peaceful winged males. Whereas the wingless males stay and exclusively mate in the maternal colony, the mating behaviour of winged males is plastic. They copulate with female sexuals in their natal nests early in life but later disperse in search for sexuals outside. In this study, we observed the nest-leaving behaviour of winged males under different conditions and found that they adaptively adjust the timing of their dispersal to the availability of mating partners, as well as the presence, and even the type of competitors in their natal nests. In colonies with virgin female queens winged males stayed longest when they were the only male in the nest. They left earlier when mating partners were not available or when other males were present. In the presence of wingless, locally mating fighter males, winged males dispersed earlier than in the presence of docile, winged competitors. This suggests that C. obscurior males are capable of estimating their local breeding chances and adaptively adjust their dispersal behaviour in both an opportunistic and a risk-sensitive way, thus showing hitherto unknown behavioural plasticity in social insect males.},
author = {Cremer, Sylvia and Schrempf, Alexandra and Heinze, Jürgen},
journal = {PLoS One},
number = {3},
publisher = {Public Library of Science},
title = {{Competition and opportunity shape the reproductive tactics of males in the ant Cardiocondyla obscurior}},
doi = {10.1371/journal.pone.0017323},
volume = {6},
year = {2011},
}
@article{3368,
abstract = {Tissue surface tension (TST) is an important mechanical property influencing cell sorting and tissue envelopment. The study by Manning et al. (1) reported on a mathematical model describing TST on the basis of the balance between adhesive and tensile properties of the constituent cells. The model predicts that, in high-adhesion cell aggregates, surface cells will be stretched to maintain the same area of cell–cell contact as interior bulk cells, resulting in an elongated and flattened cell shape. The authors (1) observed flat and elongated cells at the surface of high-adhesion zebrafish germ-layer explants, which they argue are undifferentiated stretched germ-layer progenitor cells, and they use this observation as a validation of their model.},
author = {Krens, Gabriel and Möllmert, Stephanie and Heisenberg, Carl-Philipp J},
journal = {PNAS},
number = {3},
pages = {E9 -- E10},
publisher = {National Academy of Sciences},
title = {{Enveloping cell layer differentiation at the surface of zebrafish germ layer tissue explants}},
doi = {10.1073/pnas.1010767108},
volume = {108},
year = {2011},
}
@article{3370,
abstract = {Supertree methods are widely applied and give rise to new conclusions about phylogenies (e.g., Bininda-Emonds et al. 2007). Although several desiderata for supertree methods exist (Wilkinson, Thorley, et al. 2004), only few of them have been studied in greater detail, examples include shape bias (Wilkinson et al. 2005) or pareto properties (Wilkinson et al. 2007). Here I look more closely at two matrix representation methods, matrix representation with compatibility (MRC) and matrix representation with parsimony (MRP). Different null models of random data are studied and the resulting tree shapes are investigated. Thereby I consider unrooted trees and a bias in tree shape is determined by a tree balance measure. The measure for unrooted trees is a modification of a tree balance measure for rooted trees. I observe that depending on the underlying null model of random data, the methods may resolve conflict in favor of more balanced tree shapes. The analyses refer only to trees with the same taxon set, also known as the consensus setting (e.g., Wilkinson et al. 2007), but I will be able to draw conclusions on how to deal with missing data.},
author = {Kupczok, Anne},
journal = {Systematic Biology},
number = {2},
pages = {218 -- 225},
publisher = {Oxford University Press},
title = {{Consequences of different null models on the tree shape bias of supertree methods}},
doi = {10.1093/sysbio/syq086},
volume = {60},
year = {2011},
}
@article{3375,
abstract = {By exploiting an analogy between population genetics and statistical mechanics, we study the evolution of a polygenic trait under stabilizing selection, mutation and genetic drift. This requires us to track only four macroscopic variables, instead of the distribution of all the allele frequencies that influence the trait. These macroscopic variables are the expectations of: the trait mean and its square, the genetic variance, and of a measure of heterozygosity, and are derived from a generating function that is in turn derived by maximizing an entropy measure. These four macroscopics are enough to accurately describe the dynamics of the trait mean and of its genetic variance (and in principle of any other quantity). Unlike previous approaches that were based on an infinite series of moments or cumulants, which had to be truncated arbitrarily, our calculations provide a well-defined approximation procedure. We apply the framework to abrupt and gradual changes in the optimum, as well as to changes in the strength of stabilizing selection. Our approximations are surprisingly accurate, even for systems with as few as five loci. We find that when the effects of drift are included, the expected genetic variance is hardly altered by directional selection, even though it fluctuates in any particular instance. We also find hysteresis, showing that even after averaging over the microscopic variables, the macroscopic trajectories retain a memory of the underlying genetic states.},
author = {de Vladar, Harold and Barton, Nicholas H},
journal = {Journal of the Royal Society Interface},
number = {58},
pages = {720 -- 739},
publisher = {Royal Society of London},
title = {{The statistical mechanics of a polygenic character under stabilizing selection mutation and drift}},
doi = {10.1098/rsif.2010.0438},
volume = {8},
year = {2011},
}
@inproceedings{3337,
abstract = {Playing table tennis is a difficult task for robots, especially due to their limitations of acceleration. A key bottleneck is the amount of time needed to reach the desired hitting position and velocity of the racket for returning the incoming ball. Here, it often does not suffice to simply extrapolate the ball's trajectory after the opponent returns it but more information is needed. Humans are able to predict the ball's trajectory based on the opponent's moves and, thus, have a considerable advantage. Hence, we propose to incorporate an anticipation system into robot table tennis players, which enables the robot to react earlier while the opponent is performing the striking movement. Based on visual observation of the opponent's racket movement, the robot can predict the aim of the opponent and adjust its movement generation accordingly. The policies for deciding how and when to react are obtained by reinforcement learning. We conduct experiments with an existing robot player to show that the learned reaction policy can significantly improve the performance of the overall system.},
author = {Wang, Zhikun and Lampert, Christoph and Mülling, Katharina and Schölkopf, Bernhard and Peters, Jan},
location = {San Francisco, USA},
pages = {332 -- 337},
publisher = {IEEE},
title = {{Learning anticipation policies for robot table tennis}},
doi = {10.1109/IROS.2011.6094892},
year = {2011},
}
@inproceedings{3344,
abstract = {Games played on graphs provide the mathematical framework to analyze several important problems in computer science as well as mathematics, such as the synthesis problem of Church, model checking of open reactive systems and many others. On the basis of mode of interaction of the players these games can be classified as follows: (a) turn-based (players make moves in turns); and (b) concurrent (players make moves simultaneously). On the basis of the information available to the players these games can be classified as follows: (a) perfect-information (players have perfect view of the game); and (b) partial-information (players have partial view of the game). In this talk we will consider all these classes of games with reachability objectives, where the goal of one player is to reach a set of target vertices of the graph, and the goal of the opponent player is to prevent the player from reaching the target. We will survey the results for various classes of games, and the results range from linear time decision algorithms to EXPTIME-complete problems to undecidable problems.},
author = {Chatterjee, Krishnendu},
editor = {Delzanno, Giorgo and Potapov, Igor},
location = {Genoa, Italy},
pages = {1 -- 1},
publisher = {Springer},
title = {{Graph games with reachability objectives}},
doi = {10.1007/978-3-642-24288-5_1},
volume = {6945},
year = {2011},
}
@article{3332,
abstract = {Given an algebraic hypersurface O in ℝd, how many simplices are necessary for a simplicial complex isotopic to O? We address this problem and the variant where all vertices of the complex must lie on O. We give asymptotically tight worst-case bounds for algebraic plane curves. Our results gradually improve known bounds in higher dimensions; however, the question for tight bounds remains unsolved for d ≥ 3.},
author = {Kerber, Michael and Sagraloff, Michael},
journal = {Graphs and Combinatorics},
number = {3},
pages = {419 -- 430},
publisher = {Springer},
title = {{A note on the complexity of real algebraic hypersurfaces}},
doi = {10.1007/s00373-011-1020-7},
volume = {27},
year = {2011},
}
@article{3781,
abstract = {We bound the difference in length of two curves in terms of their total curvatures and the Fréchet distance. The bound is independent of the dimension of the ambient Euclidean space, it improves upon a bound by Cohen-Steiner and Edelsbrunner, and it generalizes a result by Fáry and Chakerian.},
author = {Fasy, Brittany Terese},
journal = {Acta Sci. Math. (Szeged)},
number = {1-2},
pages = {359 -- 367},
publisher = {Szegedi Tudományegyetem},
title = {{The difference in length of curves in R^n}},
volume = {77},
year = {2011},
}
@article{490,
abstract = {BioSig is an open source software library for biomedical signal processing. The aim of the BioSig project is to foster research in biomedical signal processing by providing free and open source software tools for many different application areas. Some of the areas where BioSig can be employed are neuroinformatics, brain-computer interfaces, neurophysiology, psychology, cardiovascular systems, and sleep research. Moreover, the analysis of biosignals such as the electroencephalogram (EEG), electrocorticogram (ECoG), electrocardiogram (ECG), electrooculogram (EOG), electromyogram (EMG), or respiration signals is a very relevant element of the BioSig project. Specifically, BioSig provides solutions for data acquisition, artifact processing, quality control, feature extraction, classification, modeling, and data visualization, to name a few. In this paper, we highlight several methods to help students and researchers to work more efficiently with biomedical signals. },
author = {Schlögl, Alois and Vidaurre, Carmen and Sander, Tilmann},
journal = {Computational Intelligence and Neuroscience},
publisher = {Hindawi Publishing Corporation},
title = {{BioSig: The free and open source software library for biomedical signal processing}},
doi = {10.1155/2011/935364},
volume = {2011},
year = {2011},
}
@article{469,
abstract = {Spontaneous release of glutamate is important for maintaining synaptic strength and controlling spike timing in the brain. Mechanisms regulating spontaneous exocytosis remain poorly understood. Extracellular calcium concentration ([Ca2+]o) regulates Ca2+ entry through voltage-activated calcium channels (VACCs) and consequently is a pivotal determinant of action potential-evoked vesicle fusion. Extracellular Ca 2+ also enhances spontaneous release, but via unknown mechanisms. Here we report that external Ca2+ triggers spontaneous glutamate release more weakly than evoked release in mouse neocortical neurons. Blockade of VACCs has no effect on the spontaneous release rate or its dependence on [Ca2+]o. Intracellular [Ca2+] slowly increases in a minority of neurons following increases in [Ca2+]o. Furthermore, the enhancement of spontaneous release by extracellular calcium is insensitive to chelation of intracellular calcium by BAPTA. Activation of the calcium-sensing receptor (CaSR), a G-protein-coupled receptor present in nerve terminals, by several specific agonists increased spontaneous glutamate release. The frequency of spontaneous synaptic transmission was decreased in CaSR mutant neurons. The concentration-effect relationship for extracellular calcium regulation of spontaneous release was well described by a combination of CaSR-dependent and CaSR-independent mechanisms. Overall these results indicate that extracellular Ca2+ does not trigger spontaneous glutamate release by simply increasing calcium influx but stimulates CaSR and thereby promotes resting spontaneous glutamate release. },
author = {Vyleta, Nicholas and Smith, Stephen},
journal = {European Journal of Neuroscience},
number = {12},
pages = {4593 -- 4606},
publisher = {Wiley-Blackwell},
title = {{Spontaneous glutamate release is independent of calcium influx and tonically activated by the calcium-sensing receptor}},
doi = {10.1523/JNEUROSCI.6398-10.2011},
volume = {31},
year = {2011},
}
@misc{5383,
abstract = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
author = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
issn = {2664-1690},
pages = {25},
publisher = {IST Austria},
title = {{On an efficient decision procedure for imperative tree data structures}},
doi = {10.15479/AT:IST-2011-0005},
year = {2011},
}
@unpublished{3363,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by safety, reachability, Büchi, coBüchi, and limit-average conditions. We consider quantitative and qualitative decision problems. We present extensions and adaptations of proofs for probabilistic finite automata and present a complete characterization of the decidability and undecidability frontier of the quantitative and qualitative decision problems for probabilistic automata on infinite words.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Tracol, Mathieu},
pages = {19},
publisher = {ArXiv},
title = {{The decidability frontier for probabilistic automata on infinite words}},
year = {2011},
}
@inproceedings{3349,
abstract = {Games on graphs provide a natural model for reactive non-terminating systems. In such games, the interaction of two players on an arena results in an infinite path that describes a run of the system. Different settings are used to model various open systems in computer science, as for instance turn-based or concurrent moves, and deterministic or stochastic transitions. In this paper, we are interested in turn-based games, and specifically in deterministic parity games and stochastic reachability games (also known as simple stochastic games). We present a simple, direct and efficient reduction from deterministic parity games to simple stochastic games: it yields an arena whose size is linear up to a logarithmic factor in size of the original arena.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Minori, Italy},
pages = {74 -- 86},
publisher = {EPTCS},
title = {{A reduction from parity games to simple stochastic games}},
doi = {10.4204/EPTCS.54.6},
volume = {54},
year = {2011},
}
@inproceedings{3351,
abstract = {In two-player games on graph, the players construct an infinite path through the game graph and get a reward computed by a payoff function over infinite paths. Over weighted graphs, the typical and most studied payoff functions compute the limit-average or the discounted sum of the rewards along the path. Besides their simple definition, these two payoff functions enjoy the property that memoryless optimal strategies always exist. In an attempt to construct other simple payoff functions, we define a class of payoff functions which compute an (infinite) weighted average of the rewards. This new class contains both the limit-average and the discounted sum functions, and we show that they are the only members of this class which induce memoryless optimal strategies, showing that there is essentially no other simple payoff functions.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Singh, Rohit},
editor = {Owe, Olaf and Steffen, Martin and Telle, Jan Arne},
location = {Oslo, Norway},
pages = {148 -- 159},
publisher = {Springer},
title = {{On memoryless quantitative objectives}},
doi = {10.1007/978-3-642-22953-4_13},
volume = {6914},
year = {2011},
}
@inproceedings{3356,
abstract = {There is recently a significant effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions, aiming for a general and flexible framework for quantitative-oriented specifications. In the heart of quantitative objectives lies the accumulation of values along a computation. It is either the accumulated summation, as with the energy objectives, or the accumulated average, as with the mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point of time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire computation. We study the border of decidability for extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities by prefix-accumulation assertions and extending LTL with path-accumulation assertions, result in temporal logics whose model-checking problem is decidable. The extended logics allow to significantly extend the currently known energy and mean-payoff objectives. Moreover, the prefix-accumulation assertions may be refined with "controlled-accumulation", allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that the fragment we point to is, in a sense, the maximal logic whose extension with prefix-accumulation assertions permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, and in particular CTL and LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Temporal specifications with accumulative values}},
doi = {10.1109/LICS.2011.33},
year = {2011},
}
@inproceedings{3782,
abstract = {In cortex surface segmentation, the extracted surface is required to have a particular topology, namely, a two-sphere. We present a new method for removing topology noise of a curve or surface within the level set framework, and thus produce a cortical surface with correct topology. We define a new energy term which quantifies topology noise. We then show how to minimize this term by computing its functional derivative with respect to the level set function. This method differs from existing methods in that it is inherently continuous and not digital; and in the way that our energy directly relates to the topology of the underlying curve or surface, versus existing knot-based measures which are related in a more indirect fashion. The proposed flow is validated empirically.},
author = {Chen, Chao and Freedman, Daniel},
booktitle = { Conference proceedings MCV 2010},
location = {Beijing, China},
pages = {31 -- 42},
publisher = {Springer},
title = {{Topology noise removal for curve and surface evolution}},
doi = {10.1007/978-3-642-18421-5_4},
volume = {6533},
year = {2010},
}
@article{3787,
abstract = {DNA samples were extracted from ethanol and formalin-fixed decapod crustacean tissue using a new method based on Tetramethylsilane (TMS)-Chelex. It is shown that neither an indigestible matrix of cross-linked protein nor soluble PCR inhibitors impede PCR success when dealing with formalin-fixed material. Instead, amplification success from formalin-fixed tissue appears to depend on the presence of unmodified DNA in the extracted sample. A staining method that facilitates the targeting of samples with a high content of unmodified DNA is provided.},
author = {Palero, Ferran and Hall, Sally and Clark, Paul and Johnston, David and Mackenzie Dodds, Jackie and Thatje, Sven},
journal = {Scientia Marina},
number = {3},
pages = {465 -- 470},
publisher = {Consejo Superior de Investigaciones Científicas},
title = {{DNA extraction from formalin-fixed tissue: new light from the deep sea}},
doi = {10.3989/scimar.2010.74n3465},
volume = {74},
year = {2010},
}
@inproceedings{3794,
abstract = {We study the problem of multimodal dimensionality reduction assuming that data samples can be missing at training time, and not all data modalities may be present at application time. Maximum covariance analysis, as a generalization of PCA, has many desirable properties, but its application to practical problems is limited by its need for perfectly paired data. We overcome this limitation by a latent variable approach that allows working with weakly paired data and is still able to efficiently process large datasets using standard numerical routines. The resulting weakly paired maximum covariance analysis often finds better representations than alternative methods, as we show in two exemplary tasks: texture discrimination and transfer learning.},
author = {Lampert, Christoph and Krömer, Oliver},
location = {Heraklion, Crete, Greece},
pages = {566 -- 579},
publisher = {Springer},
title = {{Weakly-paired maximum covariance analysis for multimodal dimensionality reduction and transfer learning}},
doi = {10.1007/978-3-642-15552-9_41},
volume = {6312},
year = {2010},
}
@article{3718,
abstract = {Long-term depression (LTD) is a form of synaptic plasticity that may contribute to information storage in the central nervous system. Here we report that LTD can be elicited in layer 5 pyramidal neurons of the rat prefrontal cortex by pairing low frequency stimulation with a modest postsynaptic depolarization. The induction of LTD required the activation of both metabotropic glutamate receptors of the mGlu1 subtype and voltage-sensitive Ca(2+) channels (VSCCs) of the T/R, P/Q and N types, leading to the stimulation of intracellular inositol trisphosphate (IP3) receptors by IP3 and Ca(2+). The subsequent release of Ca(2+) from intracellular stores activated the protein phosphatase cascade involving calcineurin and protein phosphatase 1. The activation of purinergic P2Y(1) receptors blocked LTD. This effect was prevented by P2Y(1) receptor antagonists and was absent in mice lacking P2Y(1) but not P2Y(2) receptors. We also found that activation of P2Y(1) receptors inhibits Ca(2+) transients via VSCCs in the apical dendrites and spines of pyramidal neurons. In addition, we show that the release of ATP under hypoxia is able to inhibit LTD by acting on postsynaptic P2Y(1) receptors. In conclusion, these data suggest that the reduction of Ca(2+) influx via VSCCs caused by the activation of P2Y(1) receptors by ATP is the possible mechanism for the inhibition of LTD in prefrontal cortex.},
author = {Guzmán, José and Schmidt, Hartmut and Franke, Heike and Krügel, Ute and Eilers, Jens and Illes, Peter and Gerevich, Zoltan},
journal = {Neuropharmacology},
number = {6},
pages = {406 -- 415},
publisher = {Elsevier},
title = {{P2Y1 receptors inhibit long-term depression in the prefrontal cortex.}},
doi = {10.1016/j.neuropharm.2010.05.013},
volume = {59},
year = {2010},
}
@article{3833,
author = {Jonas, Peter M and Hefft, Stefan},
journal = {The European Journal of Neuroscience},
number = {7},
pages = {1194 -- 1195},
publisher = {Wiley-Blackwell},
title = {{GABA release at terminals of CCK-interneurons: synchrony, asynchrony and modulation by cannabinoid receptors (commentary on Ali & Todorova)}},
doi = {10.1111/j.1460-9568.2010.07189.x },
volume = {31},
year = {2010},
}
@inproceedings{3838,
abstract = {We present a numerical approximation technique for the analysis of continuous-time Markov chains that describe net- works of biochemical reactions and play an important role in the stochastic modeling of biological systems. Our approach is based on the construction of a stochastic hybrid model in which certain discrete random variables of the original Markov chain are approximated by continuous deterministic variables. We compute the solution of the stochastic hybrid model using a numerical algorithm that discretizes time and in each step performs a mutual update of the transient prob- ability distribution of the discrete stochastic variables and the values of the continuous deterministic variables. We im- plemented the algorithm and we demonstrate its usefulness and efficiency on several case studies from systems biology.},
author = {Henzinger, Thomas A and Mateescu, Maria and Mikeev, Linar and Wolf, Verena},
location = {Trento, Italy},
pages = {55 -- 65},
publisher = {Springer},
title = {{Hybrid numerical solution of the chemical master equation}},
doi = {10.1145/1839764.1839772},
year = {2010},
}
@inproceedings{3840,
abstract = {Classical formalizations of systems and properties are boolean: given a system and a property, the property is either true or false of the system. Correspondingly, classical methods for system analysis determine the truth value of a property, preferably giving a proof if the property is true, and a counterexample if the property is false; classical methods for system synthesis construct a system for which a property is true; classical methods for system transformation, composition, and abstraction aim to preserve the truth of properties. The boolean view is prevalent even if the system, the property, or both refer to numerical quantities, such as the times or probabilities of events. For example, a timed automaton either satisfies or violates a formula of a real-time logic; a stochastic process either satisfies or violates a formula of a probabilistic logic. The classical black-and-white view partitions the world into "correct" and "incorrect" systems, offering few nuances. In reality, of several systems that satisfy a property in the boolean sense, often some are more desirable than others, and of the many systems that violate a property, usually some are less objectionable than others. For instance, among the systems that satisfy the response property that every request be granted, we may prefer systems that grant requests quickly (the quicker, the better), or we may prefer systems that issue few unnecessary grants (the fewer, the better); and among the systems that violate the response property, we may prefer systems that serve many initial requests (the more, the better), or we may prefer systems that serve many requests in the long run (the greater the fraction of served to unserved requests, the better). Formally, while a boolean notion of correctness is given by a preorder on systems and properties, a quantitative notion of correctness is defined by a directed metric on systems and properties, where the distance between a system and a property provides a measure of "fit" or "desirability." There are many ways how such distances can be defined. In a linear-time framework, one assigns numerical values to individual behaviors before assigning values to systems and properties, which are sets of behaviors. For example, the value of a single behavior may be a discounted value, which is largely determined by a prefix of the behavior, e.g., by the number of requests that are granted before the first request that is not granted; or a limit value, which is independent of any finite prefix. A limit value may be an average, such as the average response time over an infinite sequence of requests and grants, or a supremum, such as the worst-case response time. Similarly, the value of a set of behaviors may be an extremum or an average across the values of all behaviors in the set: in this way one can measure the worst of all possible average-case response times, or the average of all possible worst-case response times, etc. Accordingly, the distance between two sets of behaviors may be defined as the worst or average difference between the values of corresponding behaviors. In summary, we propagate replacing boolean specifications for the correctness of systems with quantitative measures for the desirability of systems. In quantitative analysis, the aim is to compute the distance between a system and a property (or between two systems, or two properties); in quantitative synthesis, the objective is to construct a system that has minimal distance from a given property. Multiple quantitative measures can be prioritized (e.g., combined lexicographically into a single measure) or studied along the Pareto curve. Quantitative transformations, compositions, and abstractions of systems are useful if they allow us to bound the induced change in distance from a property. We present some initial results in some of these directions. We also give some potential applications, which not only generalize tradiditional correctness concerns in the functional, timed, and probabilistic domains, but also capture such system measures as resource use, performance, cost, reliability, and robustness.},
author = {Henzinger, Thomas A},
location = {Madrid, Spain},
number = {1},
pages = {157 -- 158},
publisher = {ACM},
title = {{From boolean to quantitative notions of correctness}},
doi = {10.1145/1706299.1706319},
volume = {45},
year = {2010},
}
@inproceedings{3852,
abstract = {We introduce two-level discounted games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted game and the lower level game is an undiscounted reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. We show the existence of pure memoryless optimal strategies for both players and an ordered field property for such games. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted games can be decided in NP intersected coNP. We also give an alternate strategy improvement algorithm to compute the value. },
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
location = {Minori, Italy},
pages = {22 -- 29},
publisher = {EPTCS},
title = {{Discounting in games across time scales}},
doi = {10.4204/EPTCS.25.6},
volume = {25},
year = {2010},
}
@inproceedings{3845,
abstract = {This paper presents Aligators, a tool for the generation of universally quantified array invariants. Aligators leverages recurrence solving and algebraic techniques to carry out inductive reasoning over array content. The Aligators’ loop extraction module allows treatment of multi-path loops by exploiting their commutativity and serializability properties. Our experience in applying Aligators on a collection of loops from open source software projects indicates the applicability of recurrence and algebraic solving techniques for reasoning about arrays.},
author = {Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura and Rybalchenko, Andrey},
location = {Yogyakarta, Indonesia},
pages = {348 -- 356},
publisher = {Springer},
title = {{Aligators for arrays}},
doi = {10.1007/978-3-642-16242-8_25},
volume = {6397},
year = {2010},
}
@article{4134,
abstract = {All species are restricted in their distribution. Currently, ecological models can only explain such limits if patches vary in quality, leading to asymmetrical dispersal, or if genetic variation is too low at the margins for adaptation. However, population genetic models suggest that the increase in genetic variance resulting from dispersal should allow adaptation to almost any ecological gradient. Clearly therefore, these models miss something that prevents evolution in natural populations. We developed an individual-based simulation to explore stochastic effects in these models. At high carrying capacities, our simulations largely agree with deterministic predictions. However, when carrying capacity is low, the population fails to establish for a wide range of parameter values where adaptation was expected from previous models. Stochastic or transient effects appear critical around the boundaries in parameter space between simulation behaviours. Dispersal, gradient steepness, and population density emerge as key factors determining adaptation on an ecological gradient. },
author = {Bridle, Jon and Polechova, Jitka and Kawata, Masakado and Butlin, Roger},
journal = {Ecology Letters},
number = {4},
pages = {485 -- 494},
publisher = {Wiley-Blackwell},
title = {{Why is adaptation prevented at ecological margins? New insights from individual-based simulations}},
doi = {10.1111/j.1461-0248.2010.01442.x},
volume = {13},
year = {2010},
}
@inproceedings{4362,
abstract = {Software transactional memories (STMs) promise simple and efficient concurrent programming. Several correctness properties have been proposed for STMs. Based on a bounded conflict graph algorithm for verifying correctness of STMs, we develop TRACER, a tool for runtime verification of STM implementations. The novelty of TRACER lies in the way it combines coarse and precise runtime analyses to guarantee sound and complete verification in an efficient manner. We implement TRACER in the TL2 STM implementation. We evaluate the performance of TRACER on STAMP benchmarks. While a precise runtime verification technique based on conflict graphs results in an average slowdown of 60x, the two-level approach of TRACER performs complete verification with an average slowdown of around 25x across different benchmarks.},
author = {Singh, Vasu},
editor = {Sokolsky, Oleg and Rosu, Grigore and Tilmann, Nikolai and Barringer, Howard and Falcone, Ylies and Finkbeiner, Bernd and Havelund, Klaus and Lee, Insup and Pace, Gordon},
location = {St. Julians, Malta},
pages = {421 -- 435},
publisher = {Springer},
title = {{Runtime verification for software transactional memories}},
doi = {10.1007/978-3-642-16612-9_32},
volume = {6418},
year = {2010},
}
@inproceedings{4381,
abstract = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We claim that, in order to realize the full potential of cloud computing, the user must be presented with a pricing model that offers flexibility at the requirements level, such as a choice between different degrees of execution speed and the cloud provider must be presented with a programming model that offers flexibility at the execution level, such as a choice between different scheduling policies. In such a flexible framework, with each job, the user purchases a virtual computer with the desired speed and cost characteristics, and the cloud provider can optimize the utilization of resources across a stream of jobs from different users. We designed a flexible framework to test our hypothesis, which is called FlexPRICE (Flexible Provisioning of Resources in a Cloud Environment) and works as follows. A user presents a job to the cloud. The cloud finds different schedules to execute the job and presents a set of quotes to the user in terms of price and duration for the execution. The user then chooses a particular quote and the cloud is obliged to execute the job according to the chosen quote. FlexPRICE thus hides the complexity of the actual scheduling decisions from the user, but still provides enough flexibility to meet the users actual demands. We implemented FlexPRICE in a simulator called PRICES that allows us to experiment with our framework. We observe that FlexPRICE provides a wide range of execution options-from fast and expensive to slow and cheap-- for the whole spectrum of data-intensive and computation-intensive jobs. We also observe that the set of quotes computed by FlexPRICE do not vary as the number of simultaneous jobs increases.},
author = {Henzinger, Thomas A and Tomar, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
location = {Miami, USA},
pages = {83 -- 90},
publisher = {IEEE},
title = {{FlexPRICE: Flexible provisioning of resources in a cloud environment}},
doi = {10.1109/CLOUD.2010.71 },
year = {2010},
}
@inproceedings{489,
abstract = {Graph games of infinite length are a natural model for open reactive processes: one player represents the controller, trying to ensure a given specification, and the other represents a hostile environment. The evolution of the system depends on the decisions of both players, supplemented by chance. In this work, we focus on the notion of randomised strategy. More specifically, we show that three natural definitions may lead to very different results: in the most general cases, an almost-surely winning situation may become almost-surely losing if the player is only allowed to use a weaker notion of strategy. In more reasonable settings, translations exist, but they require infinite memory, even in simple cases. Finally, some traditional problems becomes undecidable for the strongest type of strategies.},
author = {Cristau, Julien and David, Claire and Horn, Florian},
booktitle = {Proceedings of GandALF 2010},
location = {Minori, Amalfi Coast, Italy},
pages = {30 -- 39},
publisher = {Open Publishing Association},
title = {{How do we remember the past in randomised strategies? }},
doi = {10.4204/EPTCS.25.7},
volume = {25},
year = {2010},
}
@inproceedings{3857,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by safety, reachability, Büchi, coBüchi, and limit-average conditions. We consider quantitative and qualitative decision problems. We present extensions and adaptations of proofs for probabilistic finite automata and present an almost complete characterization of the decidability and undecidability frontier of the quantitative and qualitative decision problems for probabilistic automata on infinite words.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A},
location = {Singapore, Singapore},
pages = {1 -- 16},
publisher = {Springer},
title = {{Probabilistic Automata on infinite words: decidability and undecidability results}},
doi = {10.1007/978-3-642-15643-4_1},
volume = {6252},
year = {2010},
}
@misc{5391,
abstract = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each node consists an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free imple- mentation and proved that the corrected version is linearizable.},
author = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
issn = {2664-1690},
pages = {27},
publisher = {IST Austria},
title = {{Model checking of linearizability of concurrent list implementations}},
doi = {10.15479/AT:IST-2010-0001},
year = {2010},
}
@inproceedings{3864,
abstract = {Often one has a preference order among the different systems that satisfy a given specification. Under a probabilistic assumption about the possible inputs, such a preference order is naturally expressed by a weighted automaton, which assigns to each word a value, such that a system is preferred if it generates a higher expected value. We solve the following optimal-synthesis problem: given an omega-regular specification, a Markov chain that describes the distribution of inputs, and a weighted automaton that measures how well a system satisfies the given specification tinder the given input assumption, synthesize a system that optimizes the measured value. For safety specifications and measures that are defined by mean-payoff automata, the optimal-synthesis problem amounts to finding a strategy in a Markov decision process (MDP) that is optimal for a long-run average reward objective, which can be done in polynomial time. For general omega-regular specifications, the solution rests on a new, polynomial-time algorithm for computing optimal strategies in MDPs with mean-payoff parity objectives. We present some experimental results showing optimal systems that were automatically generated in this way.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Singh, Rohit},
location = {Edinburgh, United Kingdom},
pages = {380 -- 395},
publisher = {Springer},
title = {{Measuring and synthesizing systems in probabilistic environments}},
doi = {10.1007/978-3-642-14295-6_34},
volume = {6174},
year = {2010},
}
@inproceedings{4393,
abstract = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the implementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
location = {Paris, France},
pages = {235 -- 268},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Simulation distances}},
doi = {10.1007/978-3-642-15375-4_18},
volume = {6269},
year = {2010},
}
@misc{5389,
abstract = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the im- plementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
issn = {2664-1690},
pages = {24},
publisher = {IST Austria},
title = {{Simulation distances}},
doi = {10.15479/AT:IST-2010-0003},
year = {2010},
}
@article{3303,
abstract = {Biological traits result in part from interactions between different genetic loci. This can lead to sign epistasis, in which a beneficial adaptation involves a combination of individually deleterious or neutral mutations; in this case, a population must cross a “fitness valley” to adapt. Recombination can assist this process by combining mutations from different individuals or retard it by breaking up the adaptive combination. Here, we analyze the simplest fitness valley, in which an adaptation requires one mutation at each of two loci to provide a fitness benefit. We present a theoretical analysis of the effect of recombination on the valley-crossing process across the full spectrum of possible parameter regimes. We find that low recombination rates can speed up valley crossing relative to the asexual case, while higher recombination rates slow down valley crossing, with the transition between the two regimes occurring when the recombination rate between the loci is approximately equal to the selective advantage provided by the adaptation. In large populations, if the recombination rate is high and selection against single mutants is substantial, the time to cross the valley grows exponentially with population size, effectively meaning that the population cannot acquire the adaptation. Recombination at the optimal (low) rate can reduce the valley-crossing time by up to several orders of magnitude relative to that in an asexual population. },
author = {Weissman, Daniel and Feldman, Marcus and Fisher, Daniel},
journal = {Genetics},
number = {4},
pages = {1389 -- 1410},
publisher = {Genetics Society of America},
title = {{The rate of fitness-valley crossing in sexual populations}},
doi = {10.1534/genetics.110.123240},
volume = {186},
year = {2010},
}
@inproceedings{3719,
abstract = {The induction of a signaling pathway is characterized by transient complex formation and mutual posttranslational modification of proteins. To faithfully capture this combinatorial process in a math- ematical model is an important challenge in systems biology. Exploiting the limited context on which most binding and modification events are conditioned, attempts have been made to reduce the com- binatorial complexity by quotienting the reachable set of molecular species, into species aggregates while preserving the deterministic semantics of the thermodynamic limit. Recently we proposed a quotienting that also preserves the stochastic semantics and that is complete in the sense that the semantics of individual species can be recovered from the aggregate semantics. In this paper we prove that this quotienting yields a sufficient condition for weak lumpability and that it gives rise to a backward Markov bisimulation between the original and aggregated transition system. We illustrate the framework on a case study of the EGF/insulin receptor crosstalk.},
author = {Feret, Jérôme and Henzinger, Thomas A and Koeppl, Heinz and Petrov, Tatjana},
location = {Jena, Germany},
pages = {142--161},
publisher = {Open Publishing Association},
title = {{Lumpability abstractions of rule-based systems}},
volume = {40},
year = {2010},
}
@article{3776,
abstract = {The prevalence of recombination in eukaryotes poses one of the most puzzling questions in biology. The most compelling general explanation is that recombination facilitates selection by breaking down the negative associations generated by random drift (i.e. Hill-Robertson interference, HRI). I classify the effects of HRI owing to: deleterious mutation, balancing selection and selective sweeps on: neutral diversity, rates of adaptation and the mutation load. These effects are mediated primarily by the density of deleterious mutations and of selective sweeps. Sequence polymorphism and divergence suggest that these rates may be high enough to cause significant interference even in genomic regions of high recombination. However, neither seems able to generate enough variance in fitness to select strongly for high rates of recombination. It is plausible that spatial and temporal fluctuations in selection generate much more fitness variance, and hence selection for recombination, than can be explained by uniformly deleterious mutations or species-wide selective sweeps.},
author = {Barton, Nicholas H},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1552},
pages = {2559 -- 2569},
publisher = {Royal Society},
title = {{Genetic linkage and natural selection}},
doi = {10.1098/rstb.2010.0106},
volume = {365},
year = {2010},
}
@article{3783,
abstract = {MICROSATELIGHT is a Perl/Tk pipeline with a graphical user interface that facilitates several tasks when scoring microsatellites. It implements new subroutines in R and PERL and takes advantage of features provided by previously developed freeware. MICROSATELIGHT takes raw genotype data and automates the peak identification through PeakScanner. The PeakSelect subroutine assigns peaks to different microsatellite markers according to their multiplex group, fluorochrome type, and size range. After peak selection, binning of alleles can be carried out 1) automatically through AlleloBin or 2) by manual bin definition through Binator. In both cases, several features for quality checking and further binning improvement are provided. The genotype table can then be converted into input files for several population genetics programs through CREATE. Finally, Hardy–Weinberg equilibrium tests and confidence intervals for null allele frequency can be obtained through GENEPOP. MICROSATELIGHT is the only freely available public-domain software that facilitates full multiplex microsatellite scoring, from electropherogram files to user-defined text files to be used with population genetics software. MICROSATELIGHT has been created for the Windows XP operating system and has been successfully tested under Windows 7. It is available at http://sourceforge.net/projects/microsatelight/.},
author = {Palero, Ferran and González Candelas, Fernando and Pascual, Marta},
journal = {Journal of Heredity},
number = {2},
pages = {247 -- 249},
publisher = {Oxford University Press},
title = {{Microsatelight – Pipeline to expedite microsatellite analysis}},
doi = {10.1093/jhered/esq111},
volume = {102},
year = {2010},
}
@article{3788,
abstract = {Cell sorting is a widespread phenomenon pivotal to the early development of multicellular organisms. In vitro cell sorting studies have been instrumental in revealing the cellular properties driving this process. However, these studies have as yet been limited to two-dimensional analysis of three-dimensional cell sorting events. Here we describe a method to record the sorting of primary zebrafish ectoderm and mesoderm germ layer progenitor cells in three dimensions over time, and quantitatively analyze their sorting behavior using an order parameter related to heterotypic interface length. We investigate the cell population size dependence of sorted aggregates and find that the germ layer progenitor cells engulfed in the final configuration display a relationship between total interfacial length and system size according to a simple geometrical argument, subject to a finite-size effect.},
author = {Klopper, Abigail and Krens, Gabriel and Grill, Stephan and Heisenberg, Carl-Philipp J},
journal = {The European Physical Journal E: Soft Matter and Biological Physics},
number = {2},
pages = {99 -- 103},
publisher = {Springer},
title = {{Finite-size corrections to scaling behavior in sorted cell aggregates}},
doi = {10.1140/epje/i2010-10642-y},
volume = {33},
year = {2010},
}
@article{3790,
abstract = {Cell shape and motility are primarily controlled by cellular mechanics. The attachment of the plasma membrane to the underlying actomyosin cortex has been proposed to be important for cellular processes involving membrane deformation. However, little is known about the actual function of membrane-to-cortex attachment (MCA) in cell protrusion formation and migration, in particular in the context of the developing embryo. Here, we use a multidisciplinary approach to study MCA in zebrafish mesoderm and endoderm (mesendoderm) germ layer progenitor cells, which migrate using a combination of different protrusion types, namely, lamellipodia, filopodia, and blebs, during zebrafish gastrulation. By interfering with the activity of molecules linking the cortex to the membrane and measuring resulting changes in MCA by atomic force microscopy, we show that reducing MCA in mesendoderm progenitors increases the proportion of cellular blebs and reduces the directionality of cell migration. We propose that MCA is a key parameter controlling the relative proportions of different cell protrusion types in mesendoderm progenitors, and thus is key in controlling directed migration during gastrulation.},
author = {Diz Muñoz, Alba and Krieg, Michael and Bergert, Martin and Ibarlucea Benitez, Itziar and Müller, Daniel and Paluch, Ewa and Heisenberg, Carl-Philipp J},
journal = {PLoS Biology},
number = {11},
publisher = {Public Library of Science},
title = {{Control of directed cell migration in vivo by membrane-to-cortex attachment}},
doi = {10.1371/journal.pbio.1000544},
volume = {8},
year = {2010},
}
@inbook{3795,
abstract = {The (apparent) contour of a smooth mapping from a 2-manifold to the plane, f: M → R2 , is the set of critical values, that is, the image of the points at which the gradients of the two component functions are linearly dependent. Assuming M is compact and orientable and measuring difference with the erosion distance, we prove that the contour is stable.},
author = {Edelsbrunner, Herbert and Morozov, Dmitriy and Patel, Amit},
booktitle = {Topological Data Analysis and Visualization: Theory, Algorithms and Applications},
pages = {27 -- 42},
publisher = {Springer},
title = {{The stability of the apparent contour of an orientable 2-manifold}},
doi = {10.1007/978-3-642-15014-2_3},
year = {2010},
}
@article{3834,
abstract = {Background
The chemical master equation (CME) is a system of ordinary differential equations that describes the evolution of a network of chemical reactions as a stochastic process. Its solution yields the probability density vector of the system at each point in time. Solving the CME numerically is in many cases computationally expensive or even infeasible as the number of reachable states can be very large or infinite. We introduce the sliding window method, which computes an approximate solution of the CME by performing a sequence of local analysis steps. In each step, only a manageable subset of states is considered, representing a "window" into the state space. In subsequent steps, the window follows the direction in which the probability mass moves, until the time period of interest has elapsed. We construct the window based on a deterministic approximation of the future behavior of the system by estimating upper and lower bounds on the populations of the chemical species.
Results
In order to show the effectiveness of our approach, we apply it to several examples previously described in the literature. The experimental results show that the proposed method speeds up the analysis considerably, compared to a global analysis, while still providing high accuracy.
Conclusions
The sliding window method is a novel approach to address the performance problems of numerical algorithms for the solution of the chemical master equation. The method efficiently approximates the probability distributions at the time points of interest for a variety of chemically reacting systems, including systems for which no upper bound on the population sizes of the chemical species is known a priori.},
author = {Wolf, Verena and Goel, Rushil and Mateescu, Maria and Henzinger, Thomas A},
journal = {BMC Systems Biology},
number = {42},
pages = {1 -- 19},
publisher = {BioMed Central},
title = {{Solving the chemical master equation using sliding windows}},
doi = {10.1186/1752-0509-4-42},
volume = {4},
year = {2010},
}
@inproceedings{3839,
abstract = {We present a loop property generation method for loops iterating over multi-dimensional arrays. When used on matrices, our method is able to infer their shapes (also called types), such as upper-triangular, diagonal, etc. To gen- erate loop properties, we first transform a nested loop iterating over a multi- dimensional array into an equivalent collection of unnested loops. Then, we in- fer quantified loop invariants for each unnested loop using a generalization of a recurrence-based invariant generation technique. These loop invariants give us conditions on matrices from which we can derive matrix types automatically us- ing theorem provers. Invariant generation is implemented in the software package Aligator and types are derived by theorem provers and SMT solvers, including Vampire and Z3. When run on the Java matrix package JAMA, our tool was able to infer automatically all matrix types describing the matrix shapes guaranteed by JAMA’s API.},
author = {Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura and Voronkov, Andrei},
location = {Madrid, Spain},
pages = {163 -- 179},
publisher = {Springer},
title = {{Invariant and type inference for matrices}},
doi = {10.1007/978-3-642-11319-2_14},
volume = {5944},
year = {2010},
}
@inproceedings{3853,
abstract = {Quantitative languages are an extension of boolean languages that assign to each word a real number. Mean-payoff automata are finite automata with numerical weights on transitions that assign to each infinite path the long-run average of the transition weights. When the mode of branching of the automaton is deterministic, nondeterministic, or alternating, the corresponding class of quantitative languages is not robust as it is not closed under the pointwise operations of max, min, sum, and numerical complement. Nondeterministic and alternating mean-payoff automata are not decidable either, as the quantitative generalization of the problems of universality and language inclusion is undecidable. We introduce a new class of quantitative languages, defined by mean-payoff automaton expressions, which is robust and decidable: it is closed under the four pointwise operations, and we show that all decision problems are decidable for this class. Mean-payoff automaton expressions subsume deterministic meanpayoff automata, and we show that they have expressive power incomparable to nondeterministic and alternating mean-payoff automata. We also present for the first time an algorithm to compute distance between two quantitative languages, and in our case the quantitative languages are given as mean-payoff automaton expressions.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Edelsbrunner, Herbert and Henzinger, Thomas A and Rannou, Philippe},
location = {Paris, France},
pages = {269 -- 283},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Mean-payoff automaton expressions}},
doi = {10.1007/978-3-642-15375-4_19},
volume = {6269},
year = {2010},
}
@inproceedings{3858,
abstract = {We consider two-player zero-sum games on graphs. On the basis of the information available to the players these games can be classified as follows: (a) partial-observation (both players have partial view of the game); (b) one-sided partial-observation (one player has partial-observation and the other player has complete-observation); and (c) complete-observation (both players have com- plete view of the game). We survey the complexity results for the problem of de- ciding the winner in various classes of partial-observation games with ω-regular winning conditions specified as parity objectives. We present a reduction from the class of parity objectives that depend on sequence of states of the game to the sub-class of parity objectives that only depend on the sequence of observations. We also establish that partial-observation acyclic games are PSPACE-complete.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
editor = {Fermüller, Christian and Voronkov, Andrei},
location = {Yogyakarta, Indonesia},
pages = {1 -- 14},
publisher = {Springer},
title = {{The complexity of partial-observation parity games}},
doi = {10.1007/978-3-642-16242-8_1},
volume = {6397},
year = {2010},
}
@inproceedings{3860,
abstract = {In mean-payoff games, the objective of the protagonist is to ensure that the limit average of an infinite sequence of numeric weights is nonnegative. In energy games, the objective is to ensure that the running sum of weights is always nonnegative. Generalized mean-payoff and energy games replace individual weights by tuples, and the limit average (resp. running sum) of each coordinate must be (resp. remain) nonnegative. These games have applications in the synthesis of resource-bounded processes with multiple resources. We prove the finite-memory determinacy of generalized energy games and show the inter- reducibility of generalized mean-payoff and energy games for finite-memory strategies. We also improve the computational complexity for solving both classes of games with finite-memory strategies: while the previously best known upper bound was EXPSPACE, and no lower bound was known, we give an optimal coNP-complete bound. For memoryless strategies, we show that the problem of deciding the existence of a winning strategy for the protagonist is NP-complete.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A and Raskin, Jean},
location = {Chennai, India},
pages = {505 -- 516},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Generalized mean-payoff and energy games}},
doi = {10.4230/LIPIcs.FSTTCS.2010.505},
volume = {8},
year = {2010},
}
@inproceedings{3865,
abstract = {We introduce a technique for debugging multi-threaded C programs and analyzing the impact of source code changes, and its implementation in the prototype tool DIRECT. Our approach uses a combination of source code instrumentation and runtime management. The source code along with a test harness is instrumented to monitor Operating System (OS) and user defined function calls. DIRECT tracks all concurrency control primitives and, optionally, data from the program. DIRECT maintains an abstract global state that combines information from every thread, including the sequence of function calls and concurrency primitives executed. The runtime manager can insert delays, provoking thread inter-leavings that may exhibit bugs that are difficult to reach otherwise. The runtime manager collects an approximation of the reachable state space and uses this approximation to assess the impact of change in a new version of the program.},
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Raman, Vishwanath and Sánchez, César},
editor = {Rosenblum, David and Taenzer, Gabriele},
location = {Paphos, Cyprus},
pages = {293 -- 307},
publisher = {Springer},
title = {{Analyzing the impact of change in multi-threaded programs}},
doi = {10.1007/978-3-642-12029-9_21},
volume = {6013},
year = {2010},
}
@article{4243,
abstract = {We investigate a new model for populations evolving in a spatial continuum. This model can be thought of as a spatial version of the Lambda-Fleming-Viot process. It explicitly incorporates both small scale reproduction events and large scale extinction-recolonisation events. The lineages ancestral to a sample from a population evolving according to this model can be described in terms of a spatial version of the Lambda-coalescent. Using a technique of Evans (1997), we prove existence and uniqueness in law for the model. We then investigate the asymptotic behaviour of the genealogy of a finite number of individuals sampled uniformly at random (or more generally `far enough apart') from a two-dimensional torus of sidelength L as L tends to infinity. Under appropriate conditions (and on a suitable timescale) we can obtain as limiting genealogical processes a Kingman coalescent, a more general Lambda-coalescent or a system of coalescing Brownian motions (with a non-local coalescence mechanism).},
author = {Barton, Nicholas H and Etheridge, Alison and Véber, Amandine},
journal = {Electronic Journal of Probability},
number = {7},
pages = {162 -- 216},
publisher = {Institute of Mathematical Statistics},
title = {{A new model for evolution in a spatial continuum}},
doi = {10.1214/EJP.v15-741},
volume = {15},
year = {2010},
}
@inproceedings{4382,
abstract = {Transactional memory (TM) has shown potential to simplify the task of writing concurrent programs. Inspired by classical work on databases, formal definitions of the semantics of TM executions have been proposed. Many of these definitions assumed that accesses to shared data are solely performed through transactions. In practice, due to legacy code and concurrency libraries, transactions in a TM have to share data with non-transactional operations. The semantics of such interaction, while widely discussed by practitioners, lacks a clear formal specification. Those interactions can vary, sometimes in subtle ways, between TM implementations and underlying memory models. We propose a correctness condition for TMs, parametrized opacity, to formally capture the now folklore notion of strong atomicity by stipulating the two following intuitive requirements: first, every transaction appears as if it is executed instantaneously with respect to other transactions and non-transactional operations, and second, non-transactional operations conform to the given underlying memory model. We investigate the inherent cost of implementing parametrized opacity. We first prove that parametrized opacity requires either instrumenting non-transactional operations (for most memory models) or writing to memory by transactions using potentially expensive read-modify-write instructions (such as compare-and-swap). Then, we show that for a class of practical relaxed memory models, parametrized opacity can indeed be implemented with constant-time instrumentation of non-transactional writes and no instrumentation of non-transactional reads. We show that, in practice, parametrizing the notion of correctness allows developing more efficient TM implementations.},
author = {Guerraoui, Rachid and Henzinger, Thomas A and Kapalka, Michal and Singh, Vasu},
location = {Santorini, Greece},
pages = {263 -- 272},
publisher = {ACM},
title = {{Transactions in the jungle}},
doi = {10.1145/1810479.1810529},
year = {2010},
}
@article{3772,
author = {Barton, Nicholas H},
journal = {PLoS Genetics},
number = {6},
publisher = {Public Library of Science},
title = {{Understanding adaptation in large populations}},
doi = {10.1371/journal.pgen.1000987},
volume = {6},
year = {2010},
}
@article{3777,
abstract = {Under the classical view, selection depends more or less directly on mutation: standing genetic variance is maintained by a balance between selection and mutation, and adaptation is fuelled by new favourable mutations. Recombination is favoured if it breaks negative associations among selected alleles, which interfere with adaptation. Such associations may be generated by negative epistasis, or by random drift (leading to the Hill-Robertson effect). Both deterministic and stochastic explanations depend primarily on the genomic mutation rate, U. This may be large enough to explain high recombination rates in some organisms, but seems unlikely to be so in general. Random drift is a more general source of negative linkage disequilibria, and can cause selection for recombination even in large populations, through the chance loss of new favourable mutations. The rate of species-wide substitutions is much too low to drive this mechanism, but local fluctuations in selection, combined with gene flow, may suffice. These arguments are illustrated by comparing the interaction between good and bad mutations at unlinked loci under the infinitesimal model.},
author = {Barton, Nicholas H},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1544},
pages = {1281 -- 1294},
publisher = {Royal Society},
title = {{Mutation and the evolution of recombination}},
doi = {10.1098/rstb.2009.0320},
volume = {365},
year = {2010},
}
@article{3789,
abstract = {The development of multicellular organisms is dependent on the tight coordination between tissue growth and morphogenesis. The stereotypical orientation of cell divisions has been proposed to be a fundamental mechanism by which proliferating and growing tissues take shape. However, the actual contribution of stereotypical division orientation (SDO) to tissue morphogenesis is unclear. In zebrafish, cell divisions with stereotypical orientation have been implicated in both body-axis elongation and neural rod formation [1, 2], although there is little direct evidence for a critical function of SDO in either of these processes. Here we show that SDO is required for formation of the neural rod midline during neurulation but dispensable for elongation of the body axis during gastrulation. Our data indicate that SDO during both gastrulation and neurulation is dependent on the noncanonical Wnt receptor Frizzled 7 (Fz7) and that interfering with cell division orientation leads to severe defects in neural rod midline formation but not body-axis elongation. These findings suggest a novel function for Fz7-controlled cell division orientation in neural rod midline formation during neurulation. },
author = {Quesada-Hernández, Elena and Caneparo, Luca and Schneider, Sylvia and Winkler, Sylke and Liebling, Michael and Fraser, Scott and Heisenberg, Carl-Philipp J},
journal = {Current Biology},
number = {21},
pages = {1966 -- 1972},
publisher = {Cell Press},
title = {{Stereotypical cell division orientation controls neural rod midline formation in zebrafish}},
doi = {10.1016/j.cub.2010.10.009},
volume = {20},
year = {2010},
}
@article{3498,
abstract = {Purpose
Calcifying tendinitis is a common condition of the shoulder. In many cases, arthroscopic reduction in the deposit is indicated. The localization of the deposit is sometimes challenging and time-consuming. Pre-operative ultrasound (US)-guided needle placement in the deposit and pre-operative US marking of the deposit at the skin with a ballpoint are described and recommended methods to alleviate the procedure without using ionizing radiation by fluoroscopy.
Methods
Intra-operative sonography of the shoulder is introduced as a new method to localize the calcific deposit with high accuracy. After standard arthroscopic buresectomy, the surgeon performs an ultrasound examination under sterile conditions to localize the deposits. A ventral longitudinal US section is recommended, and the upper arm is rotated until the deposit is visible. Subsequently, perpendicular to the skin at the position of the transducer, a needle is introduced under arthroscopic and ultrasound visualization to puncture the deposit.
Results
The presence of snow-white crystals at the tip of the needle proves the exact localization. Consecutively, the curettage can be accomplished. Another intra-operative sonography evaluates possible calcific remnants and the tendon structure.
Conclusion
This new technique may alleviate arthroscopic calcific deposit curettage by visualizing the deposit without using ionizing radiation. Additionally, soft tissue damage due to decreased number of punctures to detect the deposit may be achieved. Both factors may contribute to reduced operation time.},
author = {Sabeti Aschraf, M. and Gonano, C. and Nemecek, E. and Cichocki, Lisa and Schueller Weidekamm, C.},
journal = {Knee Surgery, Sports Traumatology, Arthroscopy},
number = {12},
pages = {1792 -- 1794},
publisher = {Springer},
title = {{Intra-operative ultrasound facilitates the localization of the calcific deposit during arthroscopic treatment of calcifying tendinitis}},
doi = {10.1007/s00167-010-1227-9},
volume = {18},
year = {2010},
}
@inproceedings{3847,
abstract = {The importance of stochasticity within biological systems has been shown repeatedly during the last years and has raised the need for efficient stochastic tools. We present SABRE, a tool for stochastic analysis of biochemical reaction networks. SABRE implements fast adaptive uniformization (FAU), a direct numerical approximation algorithm for computing transient solutions of biochemical reaction networks. Biochemical reactions networks represent biological systems studied at a molecular level and these reactions can be modeled as transitions of a Markov chain. SABRE accepts as input the formalism of guarded commands, which it interprets either as continuous-time or as discrete-time Markov chains. Besides operating in a stochastic mode, SABRE may also perform a deterministic analysis by directly computing a mean-field approximation of the system under study. We illustrate the different functionalities of SABRE by means of biological case studies.},
author = {Didier, Frédéric and Henzinger, Thomas A and Mateescu, Maria and Wolf, Verena},
location = {Williamsburg, USA},
pages = {193 -- 194},
publisher = {IEEE},
title = {{SABRE: A tool for the stochastic analysis of biochemical reaction networks}},
doi = {10.1109/QEST.2010.33},
year = {2010},
}
@inproceedings{3866,
abstract = {Systems ought to behave reasonably even in circumstances that are not anticipated in their specifications. We propose a definition of robustness for liveness specifications which prescribes, for any number of environment assumptions that are violated, a minimal number of system guarantees that must still be fulfilled. This notion of robustness can be formulated and realized using a Generalized Reactivity formula. We present an algorithm for synthesizing robust systems from such formulas. For the important special case of Generalized Reactivity formulas of rank 1, our algorithm improves the complexity of [PPS06] for large specifications with a small number of assumptions and guarantees.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Jobstmann, Barbara},
editor = {Touili, Tayssir and Cook, Byron and Jackson, Paul},
location = {Edinburgh, UK},
pages = {410 -- 424},
publisher = {Springer},
title = {{Robustness in the presence of liveness}},
doi = {10.1007/978-3-642-14295-6_36},
volume = {6174},
year = {2010},
}
@article{3842,
abstract = {Within systems biology there is an increasing interest in the stochastic behavior of biochemical reaction networks. An appropriate stochastic description is provided by the chemical master equation, which represents a continuous-time Markov chain (CTMC). The uniformization technique is an efficient method to compute probability distributions of a CTMC if the number of states is manageable. However, the size of a CTMC that represents a biochemical reaction network is usually far beyond what is feasible. In this paper we present an on-the-fly variant of uniformization, where we improve the original algorithm at the cost of a small approximation error. By means of several examples, we show that our approach is particularly well-suited for biochemical reaction networks.},
author = {Didier, Frédéric and Henzinger, Thomas A and Mateescu, Maria and Wolf, Verena},
journal = {IET Systems Biology},
number = {6},
pages = {441 -- 452},
publisher = {Institution of Engineering and Technology},
title = {{Fast adaptive uniformization of the chemical master equation}},
doi = {10.1049/iet-syb.2010.0005},
volume = {4},
year = {2010},
}
@inproceedings{3854,
abstract = {Graph games of infinite length provide a natural model for open reactive systems: one player (Eve) represents the controller and the other player (Adam) represents the environment. The evolution of the system depends on the decisions of both players. The specification for the system is usually given as an ω-regular language L over paths and Eve’s goal is to ensure that the play belongs to L irrespective of Adam’s behaviour. The classical notion of winning strategies fails to capture several interesting scenarios. For example, strong fairness (Streett) conditions are specified by a number of request-grant pairs and require every pair that is requested infinitely often to be granted infinitely often: Eve might win just by preventing Adam from making any new request, but a “better” strategy would allow Adam to make as many requests as possible and still ensure fairness. To address such questions, we introduce the notion of obliging games, where Eve has to ensure a strong condition Φ, while always allowing Adam to satisfy a weak condition Ψ. We present a linear time reduction of obliging games with two Muller conditions Φ and Ψ to classical Muller games. We consider obliging Streett games and show they are co-NP complete, and show a natural quantitative optimisation problem for obliging Streett games is in FNP. We also show how obliging games can provide new and interesting semantics for multi-player games.},
author = {Chatterjee, Krishnendu and Horn, Florian and Löding, Christof},
location = {Paris, France},
pages = {284 -- 296},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Obliging games}},
doi = {10.1007/978-3-642-15375-4_20},
volume = {6269},
year = {2010},
}
@proceedings{3859,
abstract = {This book constitutes the proceedings of the 8th International Conference on Formal Modeling and Analysis of Timed Systems, FORMATS 2010, held in Klosterneuburg, Austria in September 2010. The 14 papers presented were carefully reviewed and selected from 31 submissions. In addition, the volume contains 3 invited talks and 2 invited tutorials.The aim of FORMATS is to promote the study of fundamental and practical aspects of timed systems, and to bring together researchers from different disciplines that share an interest in the modeling and analysis of timed systems. Typical topics include foundations and semantics, methods and tools, and applications.},
editor = {Chatterjee, Krishnendu and Henzinger, Thomas A},
location = {Klosterneuburg, Austria},
publisher = {Springer},
title = {{Formal modeling and analysis of timed systems}},
doi = {10.1007/978-3-642-15297-9},
volume = {6246},
year = {2010},
}
@phdthesis{3962,
author = {Pflicke, Holger},
publisher = {IST Austria},
title = {{Dendritic cell migration across basement membranes in the skin}},
year = {2010},
}
@article{3861,
abstract = {We introduce strategy logic, a logic that treats strategies in two-player games as explicit first-order objects. The explicit treatment of strategies allows us to specify properties of nonzero-sum games in a simple and natural way. We show that the one-alternation fragment of strategy logic is strong enough to express the existence of Nash equilibria and secure equilibria, and subsumes other logics that were introduced to reason about games, such as ATL, ATL*, and game logic. We show that strategy logic is decidable, by constructing tree automata that recognize sets of strategies. While for the general logic, our decision procedure is nonelementary, for the simple fragment that is used above we show that the complexity is polynomial in the size of the game graph and optimal in the size of the formula (ranging from polynomial to 2EXPTIME depending on the form of the formula).},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Piterman, Nir},
journal = {Information and Computation},
number = {6},
pages = {677 -- 693},
publisher = {Elsevier},
title = {{Strategy logic}},
doi = {10.1016/j.ic.2009.07.004},
volume = {208},
year = {2010},
}
@inproceedings{4369,
abstract = {In this paper we propose a novel technique for constructing timed automata from properties expressed in the logic mtl, under bounded-variability assumptions. We handle full mtl and include all future operators. Our construction is based on separation of the continuous time monitoring of the input sequence and discrete predictions regarding the future. The separation of the continuous from the discrete allows us to determinize our automata in an exponential construction that does not increase the number of clocks. This leads to a doubly exponential construction from mtl to deterministic timed automata, compared with triply exponential using existing approaches. We offer an alternative to the existing approach to linear real-time model checking, which has never been implemented. It further offers a unified framework for model checking, runtime monitoring, and synthesis, in an approach that can reuse tools, implementations, and insights from the discrete setting.},
author = {Nickovic, Dejan and Piterman, Nir},
editor = {Henzinger, Thomas A. and Chatterjee, Krishnendu},
location = {Klosterneuburg, Austria},
pages = {152 -- 167},
publisher = {Springer},
title = {{From MTL to deterministic timed automata}},
doi = {10.1007/978-3-642-15297-9_13},
volume = {6246},
year = {2010},
}
@article{474,
abstract = {Classical models of gene flow fail in three ways: they cannot explain large-scale patterns; they predict much more genetic diversity than is observed; and they assume that loosely linked genetic loci evolve independently. We propose a new model that deals with these problems. Extinction events kill some fraction of individuals in a region. These are replaced by offspring from a small number of parents, drawn from the preexisting population. This model of evolution forwards in time corresponds to a backwards model, in which ancestral lineages jump to a new location if they are hit by an event, and may coalesce with other lineages that are hit by the same event. We derive an expression for the identity in allelic state, and show that, over scales much larger than the largest event, this converges to the classical value derived by Wright and Malécot. However, rare events that cover large areas cause low genetic diversity, large-scale patterns, and correlations in ancestry between unlinked loci.},
author = {Barton, Nicholas H and Kelleher, Jerome and Etheridge, Alison},
journal = {Evolution},
number = {9},
pages = {2701 -- 2715},
publisher = {Wiley-Blackwell},
title = {{A new model for extinction and recolonization in two dimensions: Quantifying phylogeography}},
doi = {10.1111/j.1558-5646.2010.01019.x},
volume = {64},
year = {2010},
}
@inproceedings{4390,
abstract = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each vertex stores an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free implementation and proved that the corrected version is linearizable.},
author = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
location = {Edinburgh, UK},
pages = {465 -- 479},
publisher = {Springer},
title = {{Model checking of linearizability of concurrent list implementations}},
doi = {10.1007/978-3-642-14295-6_41},
volume = {6174},
year = {2010},
}
@inproceedings{4388,
abstract = {GIST is a tool that (a) solves the qualitative analysis problem of turn-based probabilistic games with ω-regular objectives; and (b) synthesizes reasonable environment assumptions for synthesis of unrealizable specifications. Our tool provides the first and efficient implementations of several reduction-based techniques to solve turn-based probabilistic games, and uses the analysis of turn-based probabilistic games for synthesizing environment assumptions for unrealizable specifications.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Radhakrishna, Arjun},
location = {Edinburgh, UK},
pages = {665 -- 669},
publisher = {Springer},
title = {{GIST: A solver for probabilistic games}},
doi = {10.1007/978-3-642-14295-6_57},
volume = {6174},
year = {2010},
}
@article{3773,
abstract = {If distinct biological species are to coexist in sympatry, they must be reproductively isolated and must exploit different limiting resources. A two-niche Levene model is analysed, in which habitat preference and survival depend on underlying additive traits. The population genetics of preference and viability are equivalent. However, there is a linear trade-off between the chances of settling in either niche, whereas viabilities may be constrained arbitrarily. With a convex trade-off, a sexual population evolves a single generalist genotype, whereas with a concave trade-off, disruptive selection favours maximal variance. A pure habitat preference evolves to global linkage equilibrium if mating occurs in a single pool, but remarkably, evolves to pairwise linkage equilibrium within niches if mating is within those niches--independent of the genetics. With a concave trade-off, the population shifts sharply between a unimodal distribution with high gene flow and a bimodal distribution with strong isolation, as the underlying genetic variance increases. However, these alternative states are only simultaneously stable for a narrow parameter range. A sharp threshold is only seen if survival in the 'wrong' niche is low; otherwise, strong isolation is impossible. Gene flow from divergent demes makes speciation much easier in parapatry than in sympatry.},
author = {Barton, Nicholas H},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1547},
pages = {1825 -- 1840},
publisher = {Royal Society},
title = {{What role does natural selection play in speciation?}},
doi = {10.1098/rstb.2010.0001},
volume = {365},
year = {2010},
}
@article{3785,
abstract = {Most fisheries involving spiny lobsters of the genus Palinurus have been over exploited during the last decades, so there is a raising concern about management decisions for these valuable resources. A total of 13 microsatellite DNA loci recently developed in Palinurus elephas were assayed in order to assess genetic diversity levels in every known species of the genus. Microsatellite markers gave amplifications and showed polymorphism in all species, with gene diversity values varying from 0.65060.077 SD (Palinurus barbarae) to 0.79260.051 SD (Palinurus elephas). Most importantly, when depth distribution was taken into account, shallower waters pecies consistently showed larger historical effective population sizes than their deeper-water counterparts. This could explain why deeper-water species are more sensitive to overfishing, and would indicate that overexploitation may have a larger impact on their long-term genetic diversity.},
author = {Palero, Ferran and Abello, Pere and Macpherson, E. and Matthee, C. and Pascual, Marta},
journal = {Journal of Crustacean Biology},
number = {4},
pages = {658 -- 663},
publisher = {BioOne},
title = {{Genetic diversity levels in fishery-exploited spiny lobsters of the Genus Palinurus (Decapoda: Achelata)}},
doi = {10.1651/09-3192.1 },
volume = {30},
year = {2010},
}
@article{3792,
abstract = {The yolk syncytial layer (YSL) plays crucial roles in early zebrafish development. The YSL is a transient extra-embryonic syncytial tissue that forms during early cleavage stages and persists until larval stages. During gastrulation, the YSL undergoes highly dynamic movements, which are tightly coordinated with the movements of the overlying germ layer progenitor cells, and has critical functions in cell fate specification and morphogenesis of the early germ layers. Movement coordination between the YSL and blastoderm cells is dependent on contact between these tissues, and is probably required for the patterning and morphogenetic function of the YSL. In this review, we will discuss recent advances in elucidating the molecular and cellular mechanisms underlying the YSL morphogenesis and movement coordination between the YSL and blastoderm during early development.},
author = {Carvalho, Lara and Heisenberg, Carl-Philipp J},
journal = {Trends in Cell Biology},
number = {10},
pages = {586 -- 592},
publisher = {Cell Press},
title = {{The yolk syncytial layer in early, zebrafish development}},
doi = {10.1016/j.tcb.2010.06.009},
volume = {20},
year = {2010},
}
@inproceedings{3848,
abstract = {We define the robustness of a level set homology class of a function f:XR as the magnitude of a perturbation necessary to kill the class. Casting this notion into a group theoretic framework, we compute the robustness for each class, using a connection to extended persistent homology. The special case X=R3 has ramifications in medical imaging and scientific visualization.},
author = {Bendich, Paul and Edelsbrunner, Herbert and Morozov, Dmitriy and Patel, Amit},
location = {Liverpool, UK},
pages = {1 -- 10},
publisher = {Springer},
title = {{The robustness of level sets}},
doi = {10.1007/978-3-642-15775-2_1},
volume = {6346},
year = {2010},
}
@inproceedings{3850,
abstract = {Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance ε in Hausdorff distance, as the Minkowski sum of another polygonal shape with a disk of fixed radius? If it does, we also seek a preferably simple solution shape P;P’s offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give a decision algorithm for fixed radius in O(nlogn) time that handles any polygonal shape. For convex shapes, the complexity drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
location = {Dortmund, Germany},
pages = {12 -- 23},
publisher = {TU Dortmund},
title = {{Polygonal reconstruction from approximate offsets}},
year = {2010},
}
@article{3901,
abstract = {We are interested in 3-dimensional images given as arrays of voxels with intensity values. Extending these values to acontinuous function, we study the robustness of homology classes in its level and interlevel sets, that is, the amount of perturbationneeded to destroy these classes. The structure of the homology classes and their robustness, over all level and interlevel sets, can bevisualized by a triangular diagram of dots obtained by computing the extended persistence of the function. We give a fast hierarchicalalgorithm using the dual complexes of oct-tree approximations of the function. In addition, we show that for balanced oct-trees, thedual complexes are geometrically realized in $R^3$ and can thus be used to construct level and interlevel sets. We apply these tools tostudy 3-dimensional images of plant root systems.},
author = {Bendich, Paul and Edelsbrunner, Herbert and Kerber, Michael},
journal = {IEEE Transactions of Visualization and Computer Graphics},
number = {6},
pages = {1251 -- 1260},
publisher = {IEEE},
title = {{Computing robustness and persistence for images}},
doi = {10.1109/TVCG.2010.139},
volume = {16},
year = {2010},
}
@book{4346,
abstract = {With the term "Library 2.0" the editors mean an institution which applies the principles of the Web 2.0 such as openness, re-use, collaboration and interaction in the entire organization. Libraries are extending their service offerings and work processes to include the potential of Web 2.0 technologies. This changes the job description and self-image of librarians. The collective volume offers a complete overview of the topic Library 2.0 and the current state of developments from a technological, sociological, information theoretical and practice-oriented perspective.},
author = {Danowski, Patrick and Bergmann, Julia},
publisher = {De Gruyter},
title = {{Handbuch Bibliothek 2.0}},
year = {2010},
}
@inbook{4339,
abstract = {Mit diesem Buch möchten wir einen Überblick der aktuellen Diskussion zum Thema Bibliothek 2.0 geben und den Stand der tatsächlichen Umsetzung der Web 2.0-Ansätze in deutschsprachigen Bibliotheken beleuchten. An dieser Stelle ist die Frage erlaubt, warum es zu einer Zeit, in der es bereits die ersten "Web 3.0"- Konferenzen gibt, eines Handbuches der Bibliothek 2.0 noch bedarf. Und warum es überhaupt ein deutschsprachiges Handbuch zur Bibliothek 2.0 braucht, wo es doch bereits verschiedenste Publikationen zu diesem Thema aus anderen Ländern, insbesondere des angloamerikanischen Raums gibt. Ist dazu nicht bereits alles gesagt?},
author = {Bergmann, Julia and Danowski, Patrick},
booktitle = {Handbuch Bibliothek 2.0},
editor = {Bergmann, Julia and Danowski, Patrick},
pages = {5 -- 20},
publisher = {De Gruyter},
title = {{Ist Bibliothek 2.0 überhaupt noch relevant? – Eine Einleitung in das Handbuch}},
doi = {10.1515/9783110232103},
year = {2010},
}
@inproceedings{4396,
abstract = {Shape analysis is a promising technique to prove program properties about recursive data structures. The challenge is to automatically determine the data-structure type, and to supply the shape analysis with the necessary information about the data structure. We present a stepwise approach to the selection of instrumentation predicates for a TVLA-based shape analysis, which takes us a step closer towards the fully automatic verification of data structures. The approach uses two techniques to guide the refinement of shape abstractions: (1) during program exploration, an explicit heap analysis collects sample instances of the heap structures, which are used to identify the data structures that are manipulated by the program; and (2) during abstraction refinement along an infeasible error path, we consider different possible heap abstractions and choose the coarsest one that eliminates the infeasible path. We have implemented this combined approach for automatic shape refinement as an extension of the software model checker BLAST. Example programs from a data-structure library that manipulate doubly-linked lists and trees were successfully verified by our tool.},
author = {Beyer, Dirk and Henzinger, Thomas A and Théoduloz, Grégory and Zufferey, Damien},
editor = {Rosenblum, David and Taenzer, Gabriele},
location = {Paphos, Cyprus},
pages = {263 -- 277},
publisher = {Springer},
title = {{Shape refinement through explicit heap analysis}},
doi = {10.1007/978-3-642-12029-9_19},
volume = {6013},
year = {2010},
}