@article{3397,
abstract = {Recent advances in microscopy techniques and biophysical measurements have provided novel insight into the molecular, cellular and biophysical basis of cell adhesion. However, comparably little is known about a core element of cell–cell adhesion—the energy of adhesion at the cell–cell contact. In this review, we discuss approaches to understand the nature and regulation of adhesion energy, and propose strategies to determine adhesion energy between cells in vitro and in vivo.},
author = {Maître, Jean-Léon and Heisenberg, Carl-Philipp J},
journal = {Current Opinion in Cell Biology},
number = {5},
pages = {508 -- 514},
publisher = {Elsevier},
title = {{The role of adhesion energy in controlling cell-cell contacts}},
doi = {10.1016/j.ceb.2011.07.004},
volume = {23},
year = {2011},
}
@article{3399,
abstract = {Context-dependent adjustment of mating tactics can drastically increase the mating success of behaviourally flexible animals. We used the ant Cardiocondyla obscurior as a model system to study adaptive adjustment of male mating tactics. This species shows a male diphenism of wingless fighter males and peaceful winged males. Whereas the wingless males stay and exclusively mate in the maternal colony, the mating behaviour of winged males is plastic. They copulate with female sexuals in their natal nests early in life but later disperse in search for sexuals outside. In this study, we observed the nest-leaving behaviour of winged males under different conditions and found that they adaptively adjust the timing of their dispersal to the availability of mating partners, as well as the presence, and even the type of competitors in their natal nests. In colonies with virgin female queens winged males stayed longest when they were the only male in the nest. They left earlier when mating partners were not available or when other males were present. In the presence of wingless, locally mating fighter males, winged males dispersed earlier than in the presence of docile, winged competitors. This suggests that C. obscurior males are capable of estimating their local breeding chances and adaptively adjust their dispersal behaviour in both an opportunistic and a risk-sensitive way, thus showing hitherto unknown behavioural plasticity in social insect males.},
author = {Cremer, Sylvia and Schrempf, Alexandra and Heinze, Jürgen},
journal = {PLoS One},
number = {3},
publisher = {Public Library of Science},
title = {{Competition and opportunity shape the reproductive tactics of males in the ant Cardiocondyla obscurior}},
doi = {10.1371/journal.pone.0017323},
volume = {6},
year = {2011},
}
@article{3405,
abstract = {Glutamate is the major excitatory neurotransmitter in the mammalian central nervous system and gates non-selective cation channels. The origins of glutamate receptors are not well understood as they differ structurally and functionally from simple bacterial ligand-gated ion channels. Here we report the discovery of an ionotropic glutamate receptor that combines the typical eukaryotic domain architecture with the 'TXVGYG' signature sequence of the selectivity filter found in K+ channels. This receptor exhibits functional properties intermediate between bacterial and eukaryotic glutamate-gated ion channels, suggesting a link in the evolution of ionotropic glutamate receptors.},
author = {Janovjak, Harald L and Sandoz, Guillaume and Isacoff, Ehud},
journal = {Nature Communications},
number = {232},
pages = {1 -- 6},
publisher = {Nature Publishing Group},
title = {{Modern ionotropic glutamate receptor with a K+ selectivity signature sequence}},
doi = {10.1038/ncomms1231},
volume = {2},
year = {2011},
}
@article{3429,
abstract = {Transcription factors are central to sustaining pluripotency, yet little is known about transcription factor dynamics in defining pluripotency in the early mammalian embryo. Here, we establish a fluorescence decay after photoactivation (FDAP) assay to quantitatively study the kinetic behaviour of Oct4, a key transcription factor controlling pre-implantation development in the mouse embryo. FDAP measurements reveal that each cell in a developing embryo shows one of two distinct Oct4 kinetics, before there are any morphologically distinguishable differences or outward signs of lineage patterning. The differences revealed by FDAP are due to differences in the accessibility of Oct4 to its DNA binding sites in the nucleus. Lineage tracing of the cells in the two distinct sub-populations demonstrates that the Oct4 kinetics predict lineages of the early embryo. Cells with slower Oct4 kinetics are more likely to give rise to the pluripotent cell lineage that contributes to the inner cell mass. Those with faster Oct4 kinetics contribute mostly to the extra-embryonic lineage. Our findings identify Oct4 kinetics, rather than differences in total transcription factor expression levels, as a predictive measure of developmental cell lineage patterning in the early mouse embryo.},
author = {Plachta, Nicolas and Bollenbach, Mark Tobias and Pease, Shirley and Fraser, Scott and Pantazis, Periklis},
journal = {Nature Cell Biology},
number = {2},
pages = {117 -- 123},
publisher = {Nature Publishing Group},
title = {{Oct4 kinetics predict cell lineage patterning in the early mammalian embryo}},
doi = {10.1038/ncb2154},
volume = {13},
year = {2011},
}
@article{3505,
abstract = {Cell migration on two-dimensional (2D) substrates follows entirely different rules than cell migration in three-dimensional (3D) environments. This is especially relevant for leukocytes that are able to migrate in the absence of adhesion receptors within the confined geometry of artificial 3D extracellular matrix scaffolds and within the interstitial space in vivo. Here, we describe in detail a simple and economical protocol to visualize dendritic cell migration in 3D collagen scaffolds along chemotactic gradients. This method can be adapted to other cell types and may serve as a physiologically relevant paradigm for the directed locomotion of most amoeboid cells.},
author = {Sixt, Michael K and Lämmermann, Tim},
journal = {Cell Migration},
pages = {149 -- 165},
publisher = {Springer},
title = {{In vitro analysis of chemotactic leukocyte migration in 3D environments}},
doi = {10.1007/978-1-61779-207-6_11},
volume = {769},
year = {2011},
}
@phdthesis{3275,
abstract = {Chemokines organize immune cell trafficking by inducing either directed (tactic) or random (kinetic) migration and by activating integrins in order to support surface adhesion (haptic). Beyond that the same chemokines can establish clearly defined functional areas in secondary lymphoid organs. Until now it is unclear how chemokines can fulfill such diverse functions. One decisive prerequisite to explain these capacities is to know how chemokines are presented in tissue. In theory chemokines could occur either soluble or immobilized, and could be distributed either homogenously or as a concentration gradient. To dissect if and how the presenting mode of chemokines influences immune cells, I tested the response of dendritic cells (DCs) to differentially displayed chemokines. DCs are antigen presenting cells that reside in the periphery and migrate into draining lymph nodes (LNs) once exposed to inflammatory stimuli to activate naïve T cells. DCs are guided to and within the LN by the chemokine receptor CCR7, which has two ligands, the chemokines CCL19 and CCL21. Both CCR7 ligands are expressed by fibroblastic reticular cells in the LN, but differ in their ability to bind to heparan sulfate residues. CCL21 has a highly charged C-terminal extension, which mediates binding to anionic surfaces, whereas CCL19 is lacking such residues and likely distributes as a soluble molecule. This study shows that surface-bound CCL21 causes random, haptokinetic DC motility, which is confined to the chemokine coated area by insideout activation of β2 integrins that mediate cell binding to the surface. CCL19 on the other hand forms concentration gradients which trigger directional, chemotactic movement, but no surface adhesion. In addition DCs can actively manipulate this system by recruiting and activating serine proteases on their surfaces, which create - by proteolytically removing the adhesive C-terminus - a solubilized variant of CCL21 that functionally resembles CCL19. By generating a CCL21 concentration gradient DCs establish a positive feedback loop to recruit further DCs from the periphery to the CCL21 coated region. In addition DCs can sense chemotactic gradients as well as immobilized haptokinetic fields at the same time and integrate these signals. The result is chemotactically biased haptokinesis - directional migration confined to a chemokine coated track or area - which could explain the dynamic but spatially tightly controlled swarming leukocyte locomotion patterns that have been observed in lymphatic organs by intravital microscopists. The finding that DCs can approach soluble cues in a non-adhesive manner while they attach to surfaces coated with immobilized cues raises the question how these cells transmit intracellular forces to the environment, especially in the non-adherent migration mode. In order to migrate, cells have to generate and transmit force to the extracellular substrate. Force transmission is the prerequisite to procure an expansion of the leading edge and a forward motion of the whole cell body. In the current conceptions actin polymerization at the leading edge is coupled to extracellular ligands via the integrin family of transmembrane receptors, which allows the transmission of intracellular force. Against the paradigm of force transmission during migration, leukocytes, like DCs, are able to migrate in threedimensional environments without using integrin transmembrane receptors (Lämmermann et al., 2008). This reflects the biological function of leukocytes, as they can invade almost all tissues, whereby their migration has to be independent from the extracellular environment. How the cells can achieve this is unclear. For this study I examined DC migration in a defined threedimensional environment and highlighted actin-dynamics with the probe Lifeact-GFP. The result was that chemotactic DCs can switch between integrin-dependent and integrin- independent locomotion and can thereby adapt to the adhesive properties of their environment. If the cells are able to couple their actin cytoskeleton to the substrate, actin polymerization is entirely converted into protrusion. Without coupling the actin cortex undergoes slippage and retrograde actin flow can be observed. But retrograde actin flow can be completely compensated by higher actin polymerization rate keeping the migration velocity and the shape of the cells unaltered. Mesenchymal cells like fibroblast cannot balance the loss of adhesive interaction, cannot protrude into open space and, therefore, strictly depend on integrinmediated force coupling. This leukocyte specific phenomenon of “adaptive force transmission” endows these cells with the unique ability to transit and invade almost every type of tissue. },
author = {Schumann, Kathrin},
pages = {141},
publisher = {IST Austria},
title = {{The role of chemotactic gradients in dendritic cell migration}},
year = {2011},
}
@article{6496,
abstract = {We report the switching behavior of the full bacterial flagellum system that includes the filament and the motor in wild-type Escherichia coli cells. In sorting the motor behavior by the clockwise bias, we find that the distributions of the clockwise (CW) and counterclockwise (CCW) intervals are either exponential or nonexponential with long tails. At low bias, CW intervals are exponentially distributed and CCW intervals exhibit long tails. At intermediate CW bias (0.5) both CW and CCW intervals are mainly exponentially distributed. A simple model suggests that these two distinct switching behaviors are governed by the presence of signaling noise within the chemotaxis network. Low noise yields exponentially distributed intervals, whereas large noise yields nonexponential behavior with long tails. These drastically different motor statistics may play a role in optimizing bacterial behavior for a wide range of environmental conditions.},
author = {Park, Heungwon and Oikonomou, Panos and Guet, Calin C and Cluzel, Philippe},
issn = {0006-3495},
journal = {Biophysical Journal},
number = {10},
pages = {2336--2340},
publisher = {Elsevier},
title = {{Noise underlies switching behavior of the bacterial flagellum}},
doi = {10.1016/j.bpj.2011.09.040},
volume = {101},
year = {2011},
}
@article{2409,
abstract = {Background: The availability of many gene alignments with overlapping taxon sets raises the question of which strategy is the best to infer species phylogenies from multiple gene information. Methods and programs abound that use the gene alignment in different ways to reconstruct the species tree. In particular, different methods combine the original data at different points along the way from the underlying sequences to the final tree. Accordingly, they are classified into superalignment, supertree and medium-level approaches. Here, we present a simulation study to compare different methods from each of these three approaches.
Results: We observe that superalignment methods usually outperform the other approaches over a wide range of parameters including sparse data and gene-specific evolutionary parameters. In the presence of high incongruency among gene trees, however, other combination methods show better performance than the superalignment approach. Surprisingly, some supertree and medium-level methods exhibit, on average, worse results than a single gene phylogeny with complete taxon information.
Conclusions: For some methods, using the reconstructed gene tree as an estimation of the species tree is superior to the combination of incomplete information. Superalignment usually performs best since it is less susceptible to stochastic error. Supertree methods can outperform superalignment in the presence of gene-tree conflict.},
author = {Kupczok, Anne and Schmidt, Heiko and Von Haeseler, Arndt},
journal = {Algorithms for Molecular Biology},
number = {1},
publisher = {BioMed Central},
title = {{Accuracy of phylogeny reconstruction methods combining overlapping gene data sets }},
doi = {10.1186/1748-7188-5-37},
volume = {5},
year = {2010},
}
@article{474,
abstract = {Classical models of gene flow fail in three ways: they cannot explain large-scale patterns; they predict much more genetic diversity than is observed; and they assume that loosely linked genetic loci evolve independently. We propose a new model that deals with these problems. Extinction events kill some fraction of individuals in a region. These are replaced by offspring from a small number of parents, drawn from the preexisting population. This model of evolution forwards in time corresponds to a backwards model, in which ancestral lineages jump to a new location if they are hit by an event, and may coalesce with other lineages that are hit by the same event. We derive an expression for the identity in allelic state, and show that, over scales much larger than the largest event, this converges to the classical value derived by Wright and Malécot. However, rare events that cover large areas cause low genetic diversity, large-scale patterns, and correlations in ancestry between unlinked loci.},
author = {Barton, Nicholas H and Kelleher, Jerome and Etheridge, Alison},
journal = {Evolution},
number = {9},
pages = {2701 -- 2715},
publisher = {Wiley-Blackwell},
title = {{A new model for extinction and recolonization in two dimensions: Quantifying phylogeography}},
doi = {10.1111/j.1558-5646.2010.01019.x},
volume = {64},
year = {2010},
}
@inproceedings{488,
abstract = {Streaming string transducers [1] define (partial) functions from input strings to output strings. A streaming string transducer makes a single pass through the input string and uses a finite set of variables that range over strings from the output alphabet. At every step, the transducer processes an input symbol, and updates all the variables in parallel using assignments whose right-hand-sides are concatenations of output symbols and variables with the restriction that a variable can be used at most once in a right-hand-side expression. It has been shown that streaming string transducers operating on strings over infinite data domains are of interest in algorithmic verification of list-processing programs, as they lead to PSPACE decision procedures for checking pre/post conditions and for checking semantic equivalence, for a well-defined class of heap-manipulating programs. In order to understand the theoretical expressiveness of streaming transducers, we focus on streaming transducers processing strings over finite alphabets, given the existence of a robust and well-studied class of "regular" transductions for this case. Such regular transductions can be defined either by two-way deterministic finite-state transducers, or using a logical MSO-based characterization. Our main result is that the expressiveness of streaming string transducers coincides exactly with this class of regular transductions. },
author = {Alur, Rajeev and Cerny, Pavol},
location = {Chennai, India},
pages = {1 -- 12},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Expressiveness of streaming string transducers}},
doi = {10.4230/LIPIcs.FSTTCS.2010.1},
volume = {8},
year = {2010},
}
@inproceedings{489,
abstract = {Graph games of infinite length are a natural model for open reactive processes: one player represents the controller, trying to ensure a given specification, and the other represents a hostile environment. The evolution of the system depends on the decisions of both players, supplemented by chance. In this work, we focus on the notion of randomised strategy. More specifically, we show that three natural definitions may lead to very different results: in the most general cases, an almost-surely winning situation may become almost-surely losing if the player is only allowed to use a weaker notion of strategy. In more reasonable settings, translations exist, but they require infinite memory, even in simple cases. Finally, some traditional problems becomes undecidable for the strongest type of strategies.},
author = {Cristau, Julien and David, Claire and Horn, Florian},
booktitle = {Proceedings of GandALF 2010},
location = {Minori, Amalfi Coast, Italy},
pages = {30 -- 39},
publisher = {Open Publishing Association},
title = {{How do we remember the past in randomised strategies? }},
doi = {10.4204/EPTCS.25.7},
volume = {25},
year = {2010},
}
@article{533,
abstract = {Any programming error that can be revealed before compiling a program saves precious time for the programmer. While integrated development environments already do a good job by detecting, e.g., data-flow abnormalities, current static analysis tools suffer from false positives ("noise") or require strong user interaction. We propose to avoid this deficiency by defining a new class of errors. A program fragment is doomed if its execution will inevitably fail, regardless of which state it is started in. We use a formal verification method to identify such errors fully automatically and, most significantly, without producing noise. We report on experiments with a prototype tool.},
author = {Hoenicke, Jochen and Leino, Kari and Podelski, Andreas and Schäf, Martin and Wies, Thomas},
journal = {Formal Methods in System Design},
number = {2-3},
pages = {171 -- 199},
publisher = {Springer},
title = {{Doomed program points}},
doi = {10.1007/s10703-010-0102-0},
volume = {37},
year = {2010},
}
@misc{5388,
abstract = {We present an algorithmic method for the synthesis of concurrent programs that are optimal with respect to quantitative performance measures. The input consists of a sequential sketch, that is, a program that does not contain synchronization constructs, and of a parametric performance model that assigns costs to actions such as locking, context switching, and idling. The quantitative synthesis problem is to automatically introduce synchronization constructs into the sequential sketch so that both correctness is guaranteed and worst-case (or average-case) performance is optimized. Correctness is formalized as race freedom or linearizability.
We show that for worst-case performance, the problem can be modeled
as a 2-player graph game with quantitative (limit-average) objectives, and
for average-case performance, as a 2 1/2 -player graph game (with probabilistic transitions). In both cases, the optimal correct program is derived from an optimal strategy in the corresponding quantitative game. We prove that the respective game problems are computationally expensive (NP-complete), and present several techniques that overcome the theoretical difficulty in cases of concurrent programs of practical interest.
We have implemented a prototype tool and used it for the automatic syn- thesis of programs that access a concurrent list. For certain parameter val- ues, our method automatically synthesizes various classical synchronization schemes for implementing a concurrent list, such as fine-grained locking or a lazy algorithm. For other parameter values, a new, hybrid synchronization style is synthesized, which uses both the lazy approach and coarse-grained locks (instead of standard fine-grained locks). The trade-off occurs because while fine-grained locking tends to decrease the cost that is due to waiting for locks, it increases cache size requirements.},
author = {Chatterjee, Krishnendu and Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun and Singh, Rohit},
issn = {2664-1690},
pages = {17},
publisher = {IST Austria},
title = {{Quantitative synthesis for concurrent programs}},
doi = {10.15479/AT:IST-2010-0004},
year = {2010},
}
@misc{5389,
abstract = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the im- plementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
issn = {2664-1690},
pages = {24},
publisher = {IST Austria},
title = {{Simulation distances}},
doi = {10.15479/AT:IST-2010-0003},
year = {2010},
}
@misc{5390,
abstract = {The class of ω regular languages provide a robust specification language in verification. Every ω-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens “eventually.” Two main strengths of the classical, infinite-limit formulation of liveness are robustness (independence from the granularity of transitions) and simplicity (abstraction of complicated time bounds). However, the classical liveness formulation suffers from the drawback that the time until something good happens may be unbounded. A stronger formulation of liveness, so-called finitary liveness, overcomes this drawback, while still retaining robustness and simplicity. Finitary liveness requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider the finitary parity and Streett (fairness) conditions. We present the topological, automata-theoretic and logical characterization of finitary languages defined by finitary parity and Streett conditions. We (a) show that the finitary parity and Streett languages are Σ2-complete; (b) present a complete characterization of the expressive power of various classes of automata with finitary and infinitary conditions (in particular we show that non-deterministic finitary parity and Streett automata cannot be determinized to deterministic finitary parity or Streett automata); and (c) show that the languages defined by non-deterministic finitary parity automata exactly characterize the star-free fragment of ωB-regular languages.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
issn = {2664-1690},
pages = {21},
publisher = {IST Austria},
title = {{Topological, automata-theoretic and logical characterization of finitary languages}},
doi = {10.15479/AT:IST-2010-0002},
year = {2010},
}
@misc{5391,
abstract = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each node consists an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free imple- mentation and proved that the corrected version is linearizable.},
author = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
issn = {2664-1690},
pages = {27},
publisher = {IST Austria},
title = {{Model checking of linearizability of concurrent list implementations}},
doi = {10.15479/AT:IST-2010-0001},
year = {2010},
}
@article{3718,
abstract = {Long-term depression (LTD) is a form of synaptic plasticity that may contribute to information storage in the central nervous system. Here we report that LTD can be elicited in layer 5 pyramidal neurons of the rat prefrontal cortex by pairing low frequency stimulation with a modest postsynaptic depolarization. The induction of LTD required the activation of both metabotropic glutamate receptors of the mGlu1 subtype and voltage-sensitive Ca(2+) channels (VSCCs) of the T/R, P/Q and N types, leading to the stimulation of intracellular inositol trisphosphate (IP3) receptors by IP3 and Ca(2+). The subsequent release of Ca(2+) from intracellular stores activated the protein phosphatase cascade involving calcineurin and protein phosphatase 1. The activation of purinergic P2Y(1) receptors blocked LTD. This effect was prevented by P2Y(1) receptor antagonists and was absent in mice lacking P2Y(1) but not P2Y(2) receptors. We also found that activation of P2Y(1) receptors inhibits Ca(2+) transients via VSCCs in the apical dendrites and spines of pyramidal neurons. In addition, we show that the release of ATP under hypoxia is able to inhibit LTD by acting on postsynaptic P2Y(1) receptors. In conclusion, these data suggest that the reduction of Ca(2+) influx via VSCCs caused by the activation of P2Y(1) receptors by ATP is the possible mechanism for the inhibition of LTD in prefrontal cortex.},
author = {Guzmán, José and Schmidt, Hartmut and Franke, Heike and Krügel, Ute and Eilers, Jens and Illes, Peter and Gerevich, Zoltan},
journal = {Neuropharmacology},
number = {6},
pages = {406 -- 415},
publisher = {Elsevier},
title = {{P2Y1 receptors inhibit long-term depression in the prefrontal cortex.}},
doi = {10.1016/j.neuropharm.2010.05.013},
volume = {59},
year = {2010},
}
@inproceedings{3719,
abstract = {The induction of a signaling pathway is characterized by transient complex formation and mutual posttranslational modification of proteins. To faithfully capture this combinatorial process in a math- ematical model is an important challenge in systems biology. Exploiting the limited context on which most binding and modification events are conditioned, attempts have been made to reduce the com- binatorial complexity by quotienting the reachable set of molecular species, into species aggregates while preserving the deterministic semantics of the thermodynamic limit. Recently we proposed a quotienting that also preserves the stochastic semantics and that is complete in the sense that the semantics of individual species can be recovered from the aggregate semantics. In this paper we prove that this quotienting yields a sufficient condition for weak lumpability and that it gives rise to a backward Markov bisimulation between the original and aggregated transition system. We illustrate the framework on a case study of the EGF/insulin receptor crosstalk.},
author = {Feret, Jérôme and Henzinger, Thomas A and Koeppl, Heinz and Petrov, Tatjana},
location = {Jena, Germany},
pages = {142--161},
publisher = {Open Publishing Association},
title = {{Lumpability abstractions of rule-based systems}},
volume = {40},
year = {2010},
}
@article{3772,
author = {Barton, Nicholas H},
journal = {PLoS Genetics},
number = {6},
publisher = {Public Library of Science},
title = {{Understanding adaptation in large populations}},
doi = {10.1371/journal.pgen.1000987},
volume = {6},
year = {2010},
}
@article{3773,
abstract = {If distinct biological species are to coexist in sympatry, they must be reproductively isolated and must exploit different limiting resources. A two-niche Levene model is analysed, in which habitat preference and survival depend on underlying additive traits. The population genetics of preference and viability are equivalent. However, there is a linear trade-off between the chances of settling in either niche, whereas viabilities may be constrained arbitrarily. With a convex trade-off, a sexual population evolves a single generalist genotype, whereas with a concave trade-off, disruptive selection favours maximal variance. A pure habitat preference evolves to global linkage equilibrium if mating occurs in a single pool, but remarkably, evolves to pairwise linkage equilibrium within niches if mating is within those niches--independent of the genetics. With a concave trade-off, the population shifts sharply between a unimodal distribution with high gene flow and a bimodal distribution with strong isolation, as the underlying genetic variance increases. However, these alternative states are only simultaneously stable for a narrow parameter range. A sharp threshold is only seen if survival in the 'wrong' niche is low; otherwise, strong isolation is impossible. Gene flow from divergent demes makes speciation much easier in parapatry than in sympatry.},
author = {Barton, Nicholas H},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1547},
pages = {1825 -- 1840},
publisher = {Royal Society},
title = {{What role does natural selection play in speciation?}},
doi = {10.1098/rstb.2010.0001},
volume = {365},
year = {2010},
}
@article{3774,
abstract = {1. Hybridisation with an invasive species has the potential to alter the phenotype and hence the ecology of a native counterpart. 2. Here data from populations of native red deer Cervus elaphus and invasive sika deer Cervus nippon in Scotland is used to assess the extent to which hybridisation between them is causing phenotypic change. This is done by regression of phenotypic traits against genetic hybrid scores. 3. Hybridisation is causing increases in the body weight of sika-like deer and decreases in the body weight of red-like females. Hybridisation is causing increases in jaw length and increases in incisor arcade breadth in sika-like females. Hybridisation is also causing decreases in incisor arcade breadth in red-like females. 4. There is currently no evidence that hybridisation is causing changes in the kidney fat weight or pregnancy rates of either population. 5. Increased phenotypic similarity between the two species is likely to lead to further hybridisation. The ecological consequences of this are difficult to predict.},
author = {Senn, Helen and Swanson, Graeme and Goodman, Simon and Barton, Nicholas H and Pemberton, Josephine},
journal = {Journal of Animal Ecology},
number = {2},
pages = {414 -- 425},
publisher = {Wiley-Blackwell},
title = {{Phenotypic correlates of hybridisation between red and sika deer (genus Cervus)}},
doi = {10.1111/j.1365-2656.2009.01633.x},
volume = {79},
year = {2010},
}
@article{3776,
abstract = {The prevalence of recombination in eukaryotes poses one of the most puzzling questions in biology. The most compelling general explanation is that recombination facilitates selection by breaking down the negative associations generated by random drift (i.e. Hill-Robertson interference, HRI). I classify the effects of HRI owing to: deleterious mutation, balancing selection and selective sweeps on: neutral diversity, rates of adaptation and the mutation load. These effects are mediated primarily by the density of deleterious mutations and of selective sweeps. Sequence polymorphism and divergence suggest that these rates may be high enough to cause significant interference even in genomic regions of high recombination. However, neither seems able to generate enough variance in fitness to select strongly for high rates of recombination. It is plausible that spatial and temporal fluctuations in selection generate much more fitness variance, and hence selection for recombination, than can be explained by uniformly deleterious mutations or species-wide selective sweeps.},
author = {Barton, Nicholas H},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1552},
pages = {2559 -- 2569},
publisher = {Royal Society},
title = {{Genetic linkage and natural selection}},
doi = {10.1098/rstb.2010.0106},
volume = {365},
year = {2010},
}
@article{3777,
abstract = {Under the classical view, selection depends more or less directly on mutation: standing genetic variance is maintained by a balance between selection and mutation, and adaptation is fuelled by new favourable mutations. Recombination is favoured if it breaks negative associations among selected alleles, which interfere with adaptation. Such associations may be generated by negative epistasis, or by random drift (leading to the Hill-Robertson effect). Both deterministic and stochastic explanations depend primarily on the genomic mutation rate, U. This may be large enough to explain high recombination rates in some organisms, but seems unlikely to be so in general. Random drift is a more general source of negative linkage disequilibria, and can cause selection for recombination even in large populations, through the chance loss of new favourable mutations. The rate of species-wide substitutions is much too low to drive this mechanism, but local fluctuations in selection, combined with gene flow, may suffice. These arguments are illustrated by comparing the interaction between good and bad mutations at unlinked loci under the infinitesimal model.},
author = {Barton, Nicholas H},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1544},
pages = {1281 -- 1294},
publisher = {Royal Society},
title = {{Mutation and the evolution of recombination}},
doi = {10.1098/rstb.2009.0320},
volume = {365},
year = {2010},
}
@article{3779,
abstract = {Crosses between closely related species give two contrasting results. One result is that species hybrids may be inferior to their parents, for example, being less fertile [1]. The other is that F1 hybrids may display superior performance (heterosis), for example with increased vigour [2]. Although various hypotheses have been proposed to account for these two aspects of hybridisation, their biological basis is still poorly understood [3]. To gain further insights into this issue, we analysed the role that variation in gene expression may play. We took a conserved trait, flower asymmetry in Antirrhinum, and determined the extent to which the underlying regulatory genes varied in expression among closely related species. We show that expression of both genes analysed, CYC and RAD, varies significantly between species because of cis-acting differences. By making a quantitative genotype-phenotype map, using a range of mutant alleles, we demonstrate that the species lie on a plateau in gene expression-morphology space, so that the variation has no detectable phenotypic effect. However, phenotypic differences can be revealed by shifting genotypes off the plateau through genetic crosses. Our results can be readily explained if genomes are free to evolve within an effectively neutral zone in gene expression space. The consequences of this drift will be negligible for individual loci, but when multiple loci across the genome are considered, we show that the variation may have significant effects on phenotype and fitness, causing a significant drift load. By considering these consequences for various gene-expression-fitness landscapes, we conclude that F1 hybrids might be expected to show increased performance with regard to conserved traits, such as basic physiology, but reduced performance with regard to others. Thus, our study provides a new way of explaining how various aspects of hybrid performance may arise through natural variation in gene activity.},
author = {Rosas, Ulises and Barton, Nicholas H and Copsey, Lucy and Barbier De Reuille, Pierre and Coen, Enrico},
journal = {PLoS Biology},
number = {7},
publisher = {Public Library of Science},
title = {{Cryptic variation between species and the basis of hybrid performance}},
doi = {10.1371/journal.pbio.1000429},
volume = {8},
year = {2010},
}
@inproceedings{3782,
abstract = {In cortex surface segmentation, the extracted surface is required to have a particular topology, namely, a two-sphere. We present a new method for removing topology noise of a curve or surface within the level set framework, and thus produce a cortical surface with correct topology. We define a new energy term which quantifies topology noise. We then show how to minimize this term by computing its functional derivative with respect to the level set function. This method differs from existing methods in that it is inherently continuous and not digital; and in the way that our energy directly relates to the topology of the underlying curve or surface, versus existing knot-based measures which are related in a more indirect fashion. The proposed flow is validated empirically.},
author = {Chen, Chao and Freedman, Daniel},
booktitle = { Conference proceedings MCV 2010},
location = {Beijing, China},
pages = {31 -- 42},
publisher = {Springer},
title = {{Topology noise removal for curve and surface evolution}},
doi = {10.1007/978-3-642-18421-5_4},
volume = {6533},
year = {2010},
}
@article{3783,
abstract = {MICROSATELIGHT is a Perl/Tk pipeline with a graphical user interface that facilitates several tasks when scoring microsatellites. It implements new subroutines in R and PERL and takes advantage of features provided by previously developed freeware. MICROSATELIGHT takes raw genotype data and automates the peak identification through PeakScanner. The PeakSelect subroutine assigns peaks to different microsatellite markers according to their multiplex group, fluorochrome type, and size range. After peak selection, binning of alleles can be carried out 1) automatically through AlleloBin or 2) by manual bin definition through Binator. In both cases, several features for quality checking and further binning improvement are provided. The genotype table can then be converted into input files for several population genetics programs through CREATE. Finally, Hardy–Weinberg equilibrium tests and confidence intervals for null allele frequency can be obtained through GENEPOP. MICROSATELIGHT is the only freely available public-domain software that facilitates full multiplex microsatellite scoring, from electropherogram files to user-defined text files to be used with population genetics software. MICROSATELIGHT has been created for the Windows XP operating system and has been successfully tested under Windows 7. It is available at http://sourceforge.net/projects/microsatelight/.},
author = {Palero, Ferran and González Candelas, Fernando and Pascual, Marta},
journal = {Journal of Heredity},
number = {2},
pages = {247 -- 249},
publisher = {Oxford University Press},
title = {{Microsatelight – Pipeline to expedite microsatellite analysis}},
doi = {10.1093/jhered/esq111},
volume = {102},
year = {2010},
}
@article{3785,
abstract = {Most fisheries involving spiny lobsters of the genus Palinurus have been over exploited during the last decades, so there is a raising concern about management decisions for these valuable resources. A total of 13 microsatellite DNA loci recently developed in Palinurus elephas were assayed in order to assess genetic diversity levels in every known species of the genus. Microsatellite markers gave amplifications and showed polymorphism in all species, with gene diversity values varying from 0.65060.077 SD (Palinurus barbarae) to 0.79260.051 SD (Palinurus elephas). Most importantly, when depth distribution was taken into account, shallower waters pecies consistently showed larger historical effective population sizes than their deeper-water counterparts. This could explain why deeper-water species are more sensitive to overfishing, and would indicate that overexploitation may have a larger impact on their long-term genetic diversity.},
author = {Palero, Ferran and Abello, Pere and Macpherson, E. and Matthee, C. and Pascual, Marta},
journal = {Journal of Crustacean Biology},
number = {4},
pages = {658 -- 663},
publisher = {BioOne},
title = {{Genetic diversity levels in fishery-exploited spiny lobsters of the Genus Palinurus (Decapoda: Achelata)}},
doi = {10.1651/09-3192.1},
volume = {30},
year = {2010},
}
@article{3787,
abstract = {DNA samples were extracted from ethanol and formalin-fixed decapod crustacean tissue using a new method based on Tetramethylsilane (TMS)-Chelex. It is shown that neither an indigestible matrix of cross-linked protein nor soluble PCR inhibitors impede PCR success when dealing with formalin-fixed material. Instead, amplification success from formalin-fixed tissue appears to depend on the presence of unmodified DNA in the extracted sample. A staining method that facilitates the targeting of samples with a high content of unmodified DNA is provided.},
author = {Palero, Ferran and Hall, Sally and Clark, Paul and Johnston, David and Mackenzie Dodds, Jackie and Thatje, Sven},
journal = {Scientia Marina},
number = {3},
pages = {465 -- 470},
publisher = {Consejo Superior de Investigaciones Científicas},
title = {{DNA extraction from formalin-fixed tissue: new light from the deep sea}},
doi = {10.3989/scimar.2010.74n3465},
volume = {74},
year = {2010},
}
@article{3788,
abstract = {Cell sorting is a widespread phenomenon pivotal to the early development of multicellular organisms. In vitro cell sorting studies have been instrumental in revealing the cellular properties driving this process. However, these studies have as yet been limited to two-dimensional analysis of three-dimensional cell sorting events. Here we describe a method to record the sorting of primary zebrafish ectoderm and mesoderm germ layer progenitor cells in three dimensions over time, and quantitatively analyze their sorting behavior using an order parameter related to heterotypic interface length. We investigate the cell population size dependence of sorted aggregates and find that the germ layer progenitor cells engulfed in the final configuration display a relationship between total interfacial length and system size according to a simple geometrical argument, subject to a finite-size effect.},
author = {Klopper, Abigail and Krens, Gabriel and Grill, Stephan and Heisenberg, Carl-Philipp J},
journal = {The European Physical Journal E: Soft Matter and Biological Physics},
number = {2},
pages = {99 -- 103},
publisher = {Springer},
title = {{Finite-size corrections to scaling behavior in sorted cell aggregates}},
doi = {10.1140/epje/i2010-10642-y},
volume = {33},
year = {2010},
}
@article{3789,
abstract = {The development of multicellular organisms is dependent on the tight coordination between tissue growth and morphogenesis. The stereotypical orientation of cell divisions has been proposed to be a fundamental mechanism by which proliferating and growing tissues take shape. However, the actual contribution of stereotypical division orientation (SDO) to tissue morphogenesis is unclear. In zebrafish, cell divisions with stereotypical orientation have been implicated in both body-axis elongation and neural rod formation [1, 2], although there is little direct evidence for a critical function of SDO in either of these processes. Here we show that SDO is required for formation of the neural rod midline during neurulation but dispensable for elongation of the body axis during gastrulation. Our data indicate that SDO during both gastrulation and neurulation is dependent on the noncanonical Wnt receptor Frizzled 7 (Fz7) and that interfering with cell division orientation leads to severe defects in neural rod midline formation but not body-axis elongation. These findings suggest a novel function for Fz7-controlled cell division orientation in neural rod midline formation during neurulation. },
author = {Quesada-Hernández, Elena and Caneparo, Luca and Schneider, Sylvia and Winkler, Sylke and Liebling, Michael and Fraser, Scott and Heisenberg, Carl-Philipp J},
journal = {Current Biology},
number = {21},
pages = {1966 -- 1972},
publisher = {Cell Press},
title = {{Stereotypical cell division orientation controls neural rod midline formation in zebrafish}},
doi = {10.1016/j.cub.2010.10.009},
volume = {20},
year = {2010},
}
@article{3790,
abstract = {Cell shape and motility are primarily controlled by cellular mechanics. The attachment of the plasma membrane to the underlying actomyosin cortex has been proposed to be important for cellular processes involving membrane deformation. However, little is known about the actual function of membrane-to-cortex attachment (MCA) in cell protrusion formation and migration, in particular in the context of the developing embryo. Here, we use a multidisciplinary approach to study MCA in zebrafish mesoderm and endoderm (mesendoderm) germ layer progenitor cells, which migrate using a combination of different protrusion types, namely, lamellipodia, filopodia, and blebs, during zebrafish gastrulation. By interfering with the activity of molecules linking the cortex to the membrane and measuring resulting changes in MCA by atomic force microscopy, we show that reducing MCA in mesendoderm progenitors increases the proportion of cellular blebs and reduces the directionality of cell migration. We propose that MCA is a key parameter controlling the relative proportions of different cell protrusion types in mesendoderm progenitors, and thus is key in controlling directed migration during gastrulation.},
author = {Diz Muñoz, Alba and Krieg, Michael and Bergert, Martin and Ibarlucea Benitez, Itziar and Müller, Daniel and Paluch, Ewa and Heisenberg, Carl-Philipp J},
journal = {PLoS Biology},
number = {11},
publisher = {Public Library of Science},
title = {{Control of directed cell migration in vivo by membrane-to-cortex attachment}},
doi = {10.1371/journal.pbio.1000544},
volume = {8},
year = {2010},
}
@inproceedings{3793,
abstract = {Recent progress in per-pixel object class labeling of natural images can be attributed to the use of multiple types of image features and sound statistical learning approaches. Within the latter, Conditional Random Fields (CRF) are prominently used for their ability to represent interactions between random variables. Despite their popularity in computer vision, parameter learning for CRFs has remained difficult, popular approaches being cross-validation and piecewise training.
In this work, we propose a simple yet expressive tree-structured CRF based on a recent hierarchical image segmentation method. Our model combines and weights multiple image features within a hierarchical representation and allows simple and efficient globally-optimal learning of ≈ 105 parameters. The tractability of our model allows us to pose and answer some of the open questions regarding parameter learning applying to CRF-based approaches. The key findings for learning CRF models are, from the obvious to the surprising, i) multiple image features always help, ii) the limiting dimension with respect to current models is the amount of training data, iii) piecewise training is competitive, iv) current methods for max-margin training fail for models with many parameters.
},
author = {Nowozin, Sebastian and Gehler, Peter and Lampert, Christoph},
location = {Heraklion, Crete, Greece},
pages = {98 -- 111},
publisher = {Springer},
title = {{On parameter learning in CRF-based approaches to object class image segmentation}},
doi = {10.1007/978-3-642-15567-3_8},
volume = {6316},
year = {2010},
}
@inproceedings{3794,
abstract = {We study the problem of multimodal dimensionality reduction assuming that data samples can be missing at training time, and not all data modalities may be present at application time. Maximum covariance analysis, as a generalization of PCA, has many desirable properties, but its application to practical problems is limited by its need for perfectly paired data. We overcome this limitation by a latent variable approach that allows working with weakly paired data and is still able to efficiently process large datasets using standard numerical routines. The resulting weakly paired maximum covariance analysis often finds better representations than alternative methods, as we show in two exemplary tasks: texture discrimination and transfer learning.},
author = {Lampert, Christoph and Krömer, Oliver},
location = {Heraklion, Crete, Greece},
pages = {566 -- 579},
publisher = {Springer},
title = {{Weakly-paired maximum covariance analysis for multimodal dimensionality reduction and transfer learning}},
doi = {10.1007/978-3-642-15552-9_41},
volume = {6312},
year = {2010},
}
@inbook{3795,
abstract = {The (apparent) contour of a smooth mapping from a 2-manifold to the plane, f: M → R2 , is the set of critical values, that is, the image of the points at which the gradients of the two component functions are linearly dependent. Assuming M is compact and orientable and measuring difference with the erosion distance, we prove that the contour is stable.},
author = {Edelsbrunner, Herbert and Morozov, Dmitriy and Patel, Amit},
booktitle = {Topological Data Analysis and Visualization: Theory, Algorithms and Applications},
pages = {27 -- 42},
publisher = {Springer},
title = {{The stability of the apparent contour of an orientable 2-manifold}},
doi = {10.1007/978-3-642-15014-2_3},
year = {2010},
}
@article{3832,
abstract = {A recent paper by von Engelhardt et al. identifies a novel auxiliary subunit of native AMPARs, termedCKAMP44. Unlike other auxiliary subunits, CKAMP44 accelerates desensitization and prolongs recovery from desensitization. CKAMP44 is highly expressed in hippocampal dentate gyrus granule cells and decreases the paired-pulse ratio at perforant path input synapses. Thus, both principal and auxiliary AMPAR subunits control the time course of signaling at glutamatergic synapses.},
author = {Guzmán, José and Jonas, Peter M},
journal = {Neuron},
number = {1},
pages = {8 -- 10},
publisher = {Elsevier},
title = {{Beyond TARPs: The growing list of auxiliary AMPAR subunits}},
doi = {10.1016/j.neuron.2010.04.003},
volume = {66},
year = {2010},
}
@article{3833,
author = {Jonas, Peter M and Hefft, Stefan},
journal = {The European Journal of Neuroscience},
number = {7},
pages = {1194 -- 1195},
publisher = {Wiley-Blackwell},
title = {{GABA release at terminals of CCK-interneurons: synchrony, asynchrony and modulation by cannabinoid receptors (commentary on Ali & Todorova)}},
doi = {10.1111/j.1460-9568.2010.07189.x},
volume = {31},
year = {2010},
}
@article{3834,
abstract = {Background
The chemical master equation (CME) is a system of ordinary differential equations that describes the evolution of a network of chemical reactions as a stochastic process. Its solution yields the probability density vector of the system at each point in time. Solving the CME numerically is in many cases computationally expensive or even infeasible as the number of reachable states can be very large or infinite. We introduce the sliding window method, which computes an approximate solution of the CME by performing a sequence of local analysis steps. In each step, only a manageable subset of states is considered, representing a "window" into the state space. In subsequent steps, the window follows the direction in which the probability mass moves, until the time period of interest has elapsed. We construct the window based on a deterministic approximation of the future behavior of the system by estimating upper and lower bounds on the populations of the chemical species.
Results
In order to show the effectiveness of our approach, we apply it to several examples previously described in the literature. The experimental results show that the proposed method speeds up the analysis considerably, compared to a global analysis, while still providing high accuracy.
Conclusions
The sliding window method is a novel approach to address the performance problems of numerical algorithms for the solution of the chemical master equation. The method efficiently approximates the probability distributions at the time points of interest for a variety of chemically reacting systems, including systems for which no upper bound on the population sizes of the chemical species is known a priori.},
author = {Wolf, Verena and Goel, Rushil and Mateescu, Maria and Henzinger, Thomas A},
journal = {BMC Systems Biology},
number = {42},
pages = {1 -- 19},
publisher = {BioMed Central},
title = {{Solving the chemical master equation using sliding windows}},
doi = {10.1186/1752-0509-4-42},
volume = {4},
year = {2010},
}
@inproceedings{3838,
abstract = {We present a numerical approximation technique for the analysis of continuous-time Markov chains that describe net- works of biochemical reactions and play an important role in the stochastic modeling of biological systems. Our approach is based on the construction of a stochastic hybrid model in which certain discrete random variables of the original Markov chain are approximated by continuous deterministic variables. We compute the solution of the stochastic hybrid model using a numerical algorithm that discretizes time and in each step performs a mutual update of the transient prob- ability distribution of the discrete stochastic variables and the values of the continuous deterministic variables. We im- plemented the algorithm and we demonstrate its usefulness and efficiency on several case studies from systems biology.},
author = {Henzinger, Thomas A and Mateescu, Maria and Mikeev, Linar and Wolf, Verena},
location = {Trento, Italy},
pages = {55 -- 65},
publisher = {Springer},
title = {{Hybrid numerical solution of the chemical master equation}},
doi = {10.1145/1839764.1839772},
year = {2010},
}
@inproceedings{3839,
abstract = {We present a loop property generation method for loops iterating over multi-dimensional arrays. When used on matrices, our method is able to infer their shapes (also called types), such as upper-triangular, diagonal, etc. To gen- erate loop properties, we first transform a nested loop iterating over a multi- dimensional array into an equivalent collection of unnested loops. Then, we in- fer quantified loop invariants for each unnested loop using a generalization of a recurrence-based invariant generation technique. These loop invariants give us conditions on matrices from which we can derive matrix types automatically us- ing theorem provers. Invariant generation is implemented in the software package Aligator and types are derived by theorem provers and SMT solvers, including Vampire and Z3. When run on the Java matrix package JAMA, our tool was able to infer automatically all matrix types describing the matrix shapes guaranteed by JAMA’s API.},
author = {Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura and Voronkov, Andrei},
location = {Madrid, Spain},
pages = {163 -- 179},
publisher = {Springer},
title = {{Invariant and type inference for matrices}},
doi = {10.1007/978-3-642-11319-2_14},
volume = {5944},
year = {2010},
}
@inproceedings{3840,
abstract = {Classical formalizations of systems and properties are boolean: given a system and a property, the property is either true or false of the system. Correspondingly, classical methods for system analysis determine the truth value of a property, preferably giving a proof if the property is true, and a counterexample if the property is false; classical methods for system synthesis construct a system for which a property is true; classical methods for system transformation, composition, and abstraction aim to preserve the truth of properties. The boolean view is prevalent even if the system, the property, or both refer to numerical quantities, such as the times or probabilities of events. For example, a timed automaton either satisfies or violates a formula of a real-time logic; a stochastic process either satisfies or violates a formula of a probabilistic logic. The classical black-and-white view partitions the world into "correct" and "incorrect" systems, offering few nuances. In reality, of several systems that satisfy a property in the boolean sense, often some are more desirable than others, and of the many systems that violate a property, usually some are less objectionable than others. For instance, among the systems that satisfy the response property that every request be granted, we may prefer systems that grant requests quickly (the quicker, the better), or we may prefer systems that issue few unnecessary grants (the fewer, the better); and among the systems that violate the response property, we may prefer systems that serve many initial requests (the more, the better), or we may prefer systems that serve many requests in the long run (the greater the fraction of served to unserved requests, the better). Formally, while a boolean notion of correctness is given by a preorder on systems and properties, a quantitative notion of correctness is defined by a directed metric on systems and properties, where the distance between a system and a property provides a measure of "fit" or "desirability." There are many ways how such distances can be defined. In a linear-time framework, one assigns numerical values to individual behaviors before assigning values to systems and properties, which are sets of behaviors. For example, the value of a single behavior may be a discounted value, which is largely determined by a prefix of the behavior, e.g., by the number of requests that are granted before the first request that is not granted; or a limit value, which is independent of any finite prefix. A limit value may be an average, such as the average response time over an infinite sequence of requests and grants, or a supremum, such as the worst-case response time. Similarly, the value of a set of behaviors may be an extremum or an average across the values of all behaviors in the set: in this way one can measure the worst of all possible average-case response times, or the average of all possible worst-case response times, etc. Accordingly, the distance between two sets of behaviors may be defined as the worst or average difference between the values of corresponding behaviors. In summary, we propagate replacing boolean specifications for the correctness of systems with quantitative measures for the desirability of systems. In quantitative analysis, the aim is to compute the distance between a system and a property (or between two systems, or two properties); in quantitative synthesis, the objective is to construct a system that has minimal distance from a given property. Multiple quantitative measures can be prioritized (e.g., combined lexicographically into a single measure) or studied along the Pareto curve. Quantitative transformations, compositions, and abstractions of systems are useful if they allow us to bound the induced change in distance from a property. We present some initial results in some of these directions. We also give some potential applications, which not only generalize tradiditional correctness concerns in the functional, timed, and probabilistic domains, but also capture such system measures as resource use, performance, cost, reliability, and robustness.},
author = {Henzinger, Thomas A},
location = {Madrid, Spain},
number = {1},
pages = {157 -- 158},
publisher = {ACM},
title = {{From boolean to quantitative notions of correctness}},
doi = {10.1145/1706299.1706319},
volume = {45},
year = {2010},
}
@article{3842,
abstract = {Within systems biology there is an increasing interest in the stochastic behavior of biochemical reaction networks. An appropriate stochastic description is provided by the chemical master equation, which represents a continuous-time Markov chain (CTMC). The uniformization technique is an efficient method to compute probability distributions of a CTMC if the number of states is manageable. However, the size of a CTMC that represents a biochemical reaction network is usually far beyond what is feasible. In this paper we present an on-the-fly variant of uniformization, where we improve the original algorithm at the cost of a small approximation error. By means of several examples, we show that our approach is particularly well-suited for biochemical reaction networks.},
author = {Didier, Frédéric and Henzinger, Thomas A and Mateescu, Maria and Wolf, Verena},
journal = {IET Systems Biology},
number = {6},
pages = {441 -- 452},
publisher = {Institution of Engineering and Technology},
title = {{Fast adaptive uniformization of the chemical master equation}},
doi = {10.1049/iet-syb.2010.0005},
volume = {4},
year = {2010},
}
@inproceedings{3845,
abstract = {This paper presents Aligators, a tool for the generation of universally quantified array invariants. Aligators leverages recurrence solving and algebraic techniques to carry out inductive reasoning over array content. The Aligators’ loop extraction module allows treatment of multi-path loops by exploiting their commutativity and serializability properties. Our experience in applying Aligators on a collection of loops from open source software projects indicates the applicability of recurrence and algebraic solving techniques for reasoning about arrays.},
author = {Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura and Rybalchenko, Andrey},
location = {Yogyakarta, Indonesia},
pages = {348 -- 356},
publisher = {Springer},
title = {{Aligators for arrays}},
doi = {10.1007/978-3-642-16242-8_25},
volume = {6397},
year = {2010},
}
@inproceedings{3847,
abstract = {The importance of stochasticity within biological systems has been shown repeatedly during the last years and has raised the need for efficient stochastic tools. We present SABRE, a tool for stochastic analysis of biochemical reaction networks. SABRE implements fast adaptive uniformization (FAU), a direct numerical approximation algorithm for computing transient solutions of biochemical reaction networks. Biochemical reactions networks represent biological systems studied at a molecular level and these reactions can be modeled as transitions of a Markov chain. SABRE accepts as input the formalism of guarded commands, which it interprets either as continuous-time or as discrete-time Markov chains. Besides operating in a stochastic mode, SABRE may also perform a deterministic analysis by directly computing a mean-field approximation of the system under study. We illustrate the different functionalities of SABRE by means of biological case studies.},
author = {Didier, Frédéric and Henzinger, Thomas A and Mateescu, Maria and Wolf, Verena},
location = {Williamsburg, USA},
pages = {193 -- 194},
publisher = {IEEE},
title = {{SABRE: A tool for the stochastic analysis of biochemical reaction networks}},
doi = {10.1109/QEST.2010.33},
year = {2010},
}
@inproceedings{3848,
abstract = {We define the robustness of a level set homology class of a function f:XR as the magnitude of a perturbation necessary to kill the class. Casting this notion into a group theoretic framework, we compute the robustness for each class, using a connection to extended persistent homology. The special case X=R3 has ramifications in medical imaging and scientific visualization.},
author = {Bendich, Paul and Edelsbrunner, Herbert and Morozov, Dmitriy and Patel, Amit},
location = {Liverpool, UK},
pages = {1 -- 10},
publisher = {Springer},
title = {{The robustness of level sets}},
doi = {10.1007/978-3-642-15775-2_1},
volume = {6346},
year = {2010},
}
@inproceedings{3849,
abstract = {Using ideas from persistent homology, the robustness of a level set of a real-valued function is defined in terms of the magnitude of the perturbation necessary to kill the classes. Prior work has shown that the homology and robustness information can be read off the extended persistence diagram of the function. This paper extends these results to a non-uniform error model in which perturbations vary in their magnitude across the domain.},
author = {Bendich, Paul and Edelsbrunner, Herbert and Kerber, Michael and Patel, Amit},
location = {Brno, Czech Republic},
pages = {12 -- 23},
publisher = {Springer},
title = {{Persistent homology under non-uniform error}},
doi = {10.1007/978-3-642-15155-2_2},
volume = {6281},
year = {2010},
}
@inproceedings{3850,
abstract = {Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance ε in Hausdorff distance, as the Minkowski sum of another polygonal shape with a disk of fixed radius? If it does, we also seek a preferably simple solution shape P;P’s offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give a decision algorithm for fixed radius in O(nlogn) time that handles any polygonal shape. For convex shapes, the complexity drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
location = {Dortmund, Germany},
pages = {12 -- 23},
publisher = {TU Dortmund},
title = {{Polygonal reconstruction from approximate offsets}},
year = {2010},
}
@inproceedings{3851,
abstract = {Energy parity games are infinite two-player turn-based games played on weighted graphs. The objective of the game combines a (qualitative) parity condition with the (quantitative) requirement that the sum of the weights (i.e., the level of energy in the game) must remain positive. Beside their own interest in the design and synthesis of resource-constrained omega-regular specifications, energy parity games provide one of the simplest model of games with combined qualitative and quantitative objective. Our main results are as follows: (a) exponential memory is sufficient and may be necessary for winning strategies in energy parity games; (b) the problem of deciding the winner in energy parity games can be solved in NP ∩ coNP; and (c) we give an algorithm to solve energy parity by reduction to energy games. We also show that the problem of deciding the winner in energy parity games is polynomially equivalent to the problem of deciding the winner in mean-payoff parity games, which can thus be solved in NP ∩ coNP. As a consequence we also obtain a conceptually simple algorithm to solve mean-payoff parity games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
location = {Bordeaux, France},
pages = {599 -- 610},
publisher = {Springer},
title = {{Energy parity games}},
doi = {10.1007/978-3-642-14162-1_50},
volume = {6199},
year = {2010},
}
@inproceedings{3852,
abstract = {We introduce two-level discounted games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted game and the lower level game is an undiscounted reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. We show the existence of pure memoryless optimal strategies for both players and an ordered field property for such games. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted games can be decided in NP intersected coNP. We also give an alternate strategy improvement algorithm to compute the value. },
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
location = {Minori, Italy},
pages = {22 -- 29},
publisher = {EPTCS},
title = {{Discounting in games across time scales}},
doi = {10.4204/EPTCS.25.6},
volume = {25},
year = {2010},
}
@inproceedings{3853,
abstract = {Quantitative languages are an extension of boolean languages that assign to each word a real number. Mean-payoff automata are finite automata with numerical weights on transitions that assign to each infinite path the long-run average of the transition weights. When the mode of branching of the automaton is deterministic, nondeterministic, or alternating, the corresponding class of quantitative languages is not robust as it is not closed under the pointwise operations of max, min, sum, and numerical complement. Nondeterministic and alternating mean-payoff automata are not decidable either, as the quantitative generalization of the problems of universality and language inclusion is undecidable. We introduce a new class of quantitative languages, defined by mean-payoff automaton expressions, which is robust and decidable: it is closed under the four pointwise operations, and we show that all decision problems are decidable for this class. Mean-payoff automaton expressions subsume deterministic meanpayoff automata, and we show that they have expressive power incomparable to nondeterministic and alternating mean-payoff automata. We also present for the first time an algorithm to compute distance between two quantitative languages, and in our case the quantitative languages are given as mean-payoff automaton expressions.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Edelsbrunner, Herbert and Henzinger, Thomas A and Rannou, Philippe},
location = {Paris, France},
pages = {269 -- 283},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Mean-payoff automaton expressions}},
doi = {10.1007/978-3-642-15375-4_19},
volume = {6269},
year = {2010},
}
@inproceedings{3854,
abstract = {Graph games of infinite length provide a natural model for open reactive systems: one player (Eve) represents the controller and the other player (Adam) represents the environment. The evolution of the system depends on the decisions of both players. The specification for the system is usually given as an ω-regular language L over paths and Eve’s goal is to ensure that the play belongs to L irrespective of Adam’s behaviour. The classical notion of winning strategies fails to capture several interesting scenarios. For example, strong fairness (Streett) conditions are specified by a number of request-grant pairs and require every pair that is requested infinitely often to be granted infinitely often: Eve might win just by preventing Adam from making any new request, but a “better” strategy would allow Adam to make as many requests as possible and still ensure fairness. To address such questions, we introduce the notion of obliging games, where Eve has to ensure a strong condition Φ, while always allowing Adam to satisfy a weak condition Ψ. We present a linear time reduction of obliging games with two Muller conditions Φ and Ψ to classical Muller games. We consider obliging Streett games and show they are co-NP complete, and show a natural quantitative optimisation problem for obliging Streett games is in FNP. We also show how obliging games can provide new and interesting semantics for multi-player games.},
author = {Chatterjee, Krishnendu and Horn, Florian and Löding, Christof},
location = {Paris, France},
pages = {284 -- 296},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Obliging games}},
doi = {10.1007/978-3-642-15375-4_20},
volume = {6269},
year = {2010},
}