@inproceedings{3782,
abstract = {In cortex surface segmentation, the extracted surface is required to have a particular topology, namely, a two-sphere. We present a new method for removing topology noise of a curve or surface within the level set framework, and thus produce a cortical surface with correct topology. We define a new energy term which quantifies topology noise. We then show how to minimize this term by computing its functional derivative with respect to the level set function. This method differs from existing methods in that it is inherently continuous and not digital; and in the way that our energy directly relates to the topology of the underlying curve or surface, versus existing knot-based measures which are related in a more indirect fashion. The proposed flow is validated empirically.},
author = {Chen, Chao and Freedman, Daniel},
booktitle = { Conference proceedings MCV 2010},
location = {Beijing, China},
pages = {31 -- 42},
publisher = {Springer},
title = {{Topology noise removal for curve and surface evolution}},
doi = {10.1007/978-3-642-18421-5_4},
volume = {6533},
year = {2010},
}
@article{3787,
abstract = {DNA samples were extracted from ethanol and formalin-fixed decapod crustacean tissue using a new method based on Tetramethylsilane (TMS)-Chelex. It is shown that neither an indigestible matrix of cross-linked protein nor soluble PCR inhibitors impede PCR success when dealing with formalin-fixed material. Instead, amplification success from formalin-fixed tissue appears to depend on the presence of unmodified DNA in the extracted sample. A staining method that facilitates the targeting of samples with a high content of unmodified DNA is provided.},
author = {Palero, Ferran and Hall, Sally and Clark, Paul and Johnston, David and Mackenzie Dodds, Jackie and Thatje, Sven},
journal = {Scientia Marina},
number = {3},
pages = {465 -- 470},
publisher = {Consejo Superior de Investigaciones Científicas},
title = {{DNA extraction from formalin-fixed tissue: new light from the deep sea}},
doi = {10.3989/scimar.2010.74n3465},
volume = {74},
year = {2010},
}
@inproceedings{3794,
abstract = {We study the problem of multimodal dimensionality reduction assuming that data samples can be missing at training time, and not all data modalities may be present at application time. Maximum covariance analysis, as a generalization of PCA, has many desirable properties, but its application to practical problems is limited by its need for perfectly paired data. We overcome this limitation by a latent variable approach that allows working with weakly paired data and is still able to efficiently process large datasets using standard numerical routines. The resulting weakly paired maximum covariance analysis often finds better representations than alternative methods, as we show in two exemplary tasks: texture discrimination and transfer learning.},
author = {Lampert, Christoph and Krömer, Oliver},
location = {Heraklion, Crete, Greece},
pages = {566 -- 579},
publisher = {Springer},
title = {{Weakly-paired maximum covariance analysis for multimodal dimensionality reduction and transfer learning}},
doi = {10.1007/978-3-642-15552-9_41},
volume = {6312},
year = {2010},
}
@article{3718,
abstract = {Long-term depression (LTD) is a form of synaptic plasticity that may contribute to information storage in the central nervous system. Here we report that LTD can be elicited in layer 5 pyramidal neurons of the rat prefrontal cortex by pairing low frequency stimulation with a modest postsynaptic depolarization. The induction of LTD required the activation of both metabotropic glutamate receptors of the mGlu1 subtype and voltage-sensitive Ca(2+) channels (VSCCs) of the T/R, P/Q and N types, leading to the stimulation of intracellular inositol trisphosphate (IP3) receptors by IP3 and Ca(2+). The subsequent release of Ca(2+) from intracellular stores activated the protein phosphatase cascade involving calcineurin and protein phosphatase 1. The activation of purinergic P2Y(1) receptors blocked LTD. This effect was prevented by P2Y(1) receptor antagonists and was absent in mice lacking P2Y(1) but not P2Y(2) receptors. We also found that activation of P2Y(1) receptors inhibits Ca(2+) transients via VSCCs in the apical dendrites and spines of pyramidal neurons. In addition, we show that the release of ATP under hypoxia is able to inhibit LTD by acting on postsynaptic P2Y(1) receptors. In conclusion, these data suggest that the reduction of Ca(2+) influx via VSCCs caused by the activation of P2Y(1) receptors by ATP is the possible mechanism for the inhibition of LTD in prefrontal cortex.},
author = {Guzmán, José and Schmidt, Hartmut and Franke, Heike and Krügel, Ute and Eilers, Jens and Illes, Peter and Gerevich, Zoltan},
journal = {Neuropharmacology},
number = {6},
pages = {406 -- 415},
publisher = {Elsevier},
title = {{P2Y1 receptors inhibit long-term depression in the prefrontal cortex.}},
doi = {10.1016/j.neuropharm.2010.05.013},
volume = {59},
year = {2010},
}
@article{3833,
author = {Jonas, Peter M and Hefft, Stefan},
journal = {The European Journal of Neuroscience},
number = {7},
pages = {1194 -- 1195},
publisher = {Wiley-Blackwell},
title = {{GABA release at terminals of CCK-interneurons: synchrony, asynchrony and modulation by cannabinoid receptors (commentary on Ali & Todorova)}},
doi = {10.1111/j.1460-9568.2010.07189.x },
volume = {31},
year = {2010},
}
@inproceedings{3838,
abstract = {We present a numerical approximation technique for the analysis of continuous-time Markov chains that describe net- works of biochemical reactions and play an important role in the stochastic modeling of biological systems. Our approach is based on the construction of a stochastic hybrid model in which certain discrete random variables of the original Markov chain are approximated by continuous deterministic variables. We compute the solution of the stochastic hybrid model using a numerical algorithm that discretizes time and in each step performs a mutual update of the transient prob- ability distribution of the discrete stochastic variables and the values of the continuous deterministic variables. We im- plemented the algorithm and we demonstrate its usefulness and efficiency on several case studies from systems biology.},
author = {Henzinger, Thomas A and Mateescu, Maria and Mikeev, Linar and Wolf, Verena},
location = {Trento, Italy},
pages = {55 -- 65},
publisher = {Springer},
title = {{Hybrid numerical solution of the chemical master equation}},
doi = {10.1145/1839764.1839772},
year = {2010},
}
@inproceedings{3840,
abstract = {Classical formalizations of systems and properties are boolean: given a system and a property, the property is either true or false of the system. Correspondingly, classical methods for system analysis determine the truth value of a property, preferably giving a proof if the property is true, and a counterexample if the property is false; classical methods for system synthesis construct a system for which a property is true; classical methods for system transformation, composition, and abstraction aim to preserve the truth of properties. The boolean view is prevalent even if the system, the property, or both refer to numerical quantities, such as the times or probabilities of events. For example, a timed automaton either satisfies or violates a formula of a real-time logic; a stochastic process either satisfies or violates a formula of a probabilistic logic. The classical black-and-white view partitions the world into "correct" and "incorrect" systems, offering few nuances. In reality, of several systems that satisfy a property in the boolean sense, often some are more desirable than others, and of the many systems that violate a property, usually some are less objectionable than others. For instance, among the systems that satisfy the response property that every request be granted, we may prefer systems that grant requests quickly (the quicker, the better), or we may prefer systems that issue few unnecessary grants (the fewer, the better); and among the systems that violate the response property, we may prefer systems that serve many initial requests (the more, the better), or we may prefer systems that serve many requests in the long run (the greater the fraction of served to unserved requests, the better). Formally, while a boolean notion of correctness is given by a preorder on systems and properties, a quantitative notion of correctness is defined by a directed metric on systems and properties, where the distance between a system and a property provides a measure of "fit" or "desirability." There are many ways how such distances can be defined. In a linear-time framework, one assigns numerical values to individual behaviors before assigning values to systems and properties, which are sets of behaviors. For example, the value of a single behavior may be a discounted value, which is largely determined by a prefix of the behavior, e.g., by the number of requests that are granted before the first request that is not granted; or a limit value, which is independent of any finite prefix. A limit value may be an average, such as the average response time over an infinite sequence of requests and grants, or a supremum, such as the worst-case response time. Similarly, the value of a set of behaviors may be an extremum or an average across the values of all behaviors in the set: in this way one can measure the worst of all possible average-case response times, or the average of all possible worst-case response times, etc. Accordingly, the distance between two sets of behaviors may be defined as the worst or average difference between the values of corresponding behaviors. In summary, we propagate replacing boolean specifications for the correctness of systems with quantitative measures for the desirability of systems. In quantitative analysis, the aim is to compute the distance between a system and a property (or between two systems, or two properties); in quantitative synthesis, the objective is to construct a system that has minimal distance from a given property. Multiple quantitative measures can be prioritized (e.g., combined lexicographically into a single measure) or studied along the Pareto curve. Quantitative transformations, compositions, and abstractions of systems are useful if they allow us to bound the induced change in distance from a property. We present some initial results in some of these directions. We also give some potential applications, which not only generalize tradiditional correctness concerns in the functional, timed, and probabilistic domains, but also capture such system measures as resource use, performance, cost, reliability, and robustness.},
author = {Henzinger, Thomas A},
location = {Madrid, Spain},
number = {1},
pages = {157 -- 158},
publisher = {ACM},
title = {{From boolean to quantitative notions of correctness}},
doi = {10.1145/1706299.1706319},
volume = {45},
year = {2010},
}
@inproceedings{3852,
abstract = {We introduce two-level discounted games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted game and the lower level game is an undiscounted reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. We show the existence of pure memoryless optimal strategies for both players and an ordered field property for such games. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted games can be decided in NP intersected coNP. We also give an alternate strategy improvement algorithm to compute the value. },
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
location = {Minori, Italy},
pages = {22 -- 29},
publisher = {EPTCS},
title = {{Discounting in games across time scales}},
doi = {10.4204/EPTCS.25.6},
volume = {25},
year = {2010},
}
@inproceedings{3845,
abstract = {This paper presents Aligators, a tool for the generation of universally quantified array invariants. Aligators leverages recurrence solving and algebraic techniques to carry out inductive reasoning over array content. The Aligators’ loop extraction module allows treatment of multi-path loops by exploiting their commutativity and serializability properties. Our experience in applying Aligators on a collection of loops from open source software projects indicates the applicability of recurrence and algebraic solving techniques for reasoning about arrays.},
author = {Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura and Rybalchenko, Andrey},
location = {Yogyakarta, Indonesia},
pages = {348 -- 356},
publisher = {Springer},
title = {{Aligators for arrays}},
doi = {10.1007/978-3-642-16242-8_25},
volume = {6397},
year = {2010},
}
@article{4134,
abstract = {All species are restricted in their distribution. Currently, ecological models can only explain such limits if patches vary in quality, leading to asymmetrical dispersal, or if genetic variation is too low at the margins for adaptation. However, population genetic models suggest that the increase in genetic variance resulting from dispersal should allow adaptation to almost any ecological gradient. Clearly therefore, these models miss something that prevents evolution in natural populations. We developed an individual-based simulation to explore stochastic effects in these models. At high carrying capacities, our simulations largely agree with deterministic predictions. However, when carrying capacity is low, the population fails to establish for a wide range of parameter values where adaptation was expected from previous models. Stochastic or transient effects appear critical around the boundaries in parameter space between simulation behaviours. Dispersal, gradient steepness, and population density emerge as key factors determining adaptation on an ecological gradient. },
author = {Bridle, Jon and Polechova, Jitka and Kawata, Masakado and Butlin, Roger},
journal = {Ecology Letters},
number = {4},
pages = {485 -- 494},
publisher = {Wiley-Blackwell},
title = {{Why is adaptation prevented at ecological margins? New insights from individual-based simulations}},
doi = {10.1111/j.1461-0248.2010.01442.x},
volume = {13},
year = {2010},
}
@inproceedings{4362,
abstract = {Software transactional memories (STMs) promise simple and efficient concurrent programming. Several correctness properties have been proposed for STMs. Based on a bounded conflict graph algorithm for verifying correctness of STMs, we develop TRACER, a tool for runtime verification of STM implementations. The novelty of TRACER lies in the way it combines coarse and precise runtime analyses to guarantee sound and complete verification in an efficient manner. We implement TRACER in the TL2 STM implementation. We evaluate the performance of TRACER on STAMP benchmarks. While a precise runtime verification technique based on conflict graphs results in an average slowdown of 60x, the two-level approach of TRACER performs complete verification with an average slowdown of around 25x across different benchmarks.},
author = {Singh, Vasu},
editor = {Sokolsky, Oleg and Rosu, Grigore and Tilmann, Nikolai and Barringer, Howard and Falcone, Ylies and Finkbeiner, Bernd and Havelund, Klaus and Lee, Insup and Pace, Gordon},
location = {St. Julians, Malta},
pages = {421 -- 435},
publisher = {Springer},
title = {{Runtime verification for software transactional memories}},
doi = {10.1007/978-3-642-16612-9_32},
volume = {6418},
year = {2010},
}
@inproceedings{4381,
abstract = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We claim that, in order to realize the full potential of cloud computing, the user must be presented with a pricing model that offers flexibility at the requirements level, such as a choice between different degrees of execution speed and the cloud provider must be presented with a programming model that offers flexibility at the execution level, such as a choice between different scheduling policies. In such a flexible framework, with each job, the user purchases a virtual computer with the desired speed and cost characteristics, and the cloud provider can optimize the utilization of resources across a stream of jobs from different users. We designed a flexible framework to test our hypothesis, which is called FlexPRICE (Flexible Provisioning of Resources in a Cloud Environment) and works as follows. A user presents a job to the cloud. The cloud finds different schedules to execute the job and presents a set of quotes to the user in terms of price and duration for the execution. The user then chooses a particular quote and the cloud is obliged to execute the job according to the chosen quote. FlexPRICE thus hides the complexity of the actual scheduling decisions from the user, but still provides enough flexibility to meet the users actual demands. We implemented FlexPRICE in a simulator called PRICES that allows us to experiment with our framework. We observe that FlexPRICE provides a wide range of execution options-from fast and expensive to slow and cheap-- for the whole spectrum of data-intensive and computation-intensive jobs. We also observe that the set of quotes computed by FlexPRICE do not vary as the number of simultaneous jobs increases.},
author = {Henzinger, Thomas A and Tomar, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
location = {Miami, USA},
pages = {83 -- 90},
publisher = {IEEE},
title = {{FlexPRICE: Flexible provisioning of resources in a cloud environment}},
doi = {10.1109/CLOUD.2010.71 },
year = {2010},
}
@inproceedings{489,
abstract = {Graph games of infinite length are a natural model for open reactive processes: one player represents the controller, trying to ensure a given specification, and the other represents a hostile environment. The evolution of the system depends on the decisions of both players, supplemented by chance. In this work, we focus on the notion of randomised strategy. More specifically, we show that three natural definitions may lead to very different results: in the most general cases, an almost-surely winning situation may become almost-surely losing if the player is only allowed to use a weaker notion of strategy. In more reasonable settings, translations exist, but they require infinite memory, even in simple cases. Finally, some traditional problems becomes undecidable for the strongest type of strategies.},
author = {Cristau, Julien and David, Claire and Horn, Florian},
booktitle = {Proceedings of GandALF 2010},
location = {Minori, Amalfi Coast, Italy},
pages = {30 -- 39},
publisher = {Open Publishing Association},
title = {{How do we remember the past in randomised strategies? }},
doi = {10.4204/EPTCS.25.7},
volume = {25},
year = {2010},
}
@inproceedings{3857,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by safety, reachability, Büchi, coBüchi, and limit-average conditions. We consider quantitative and qualitative decision problems. We present extensions and adaptations of proofs for probabilistic finite automata and present an almost complete characterization of the decidability and undecidability frontier of the quantitative and qualitative decision problems for probabilistic automata on infinite words.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A},
location = {Singapore, Singapore},
pages = {1 -- 16},
publisher = {Springer},
title = {{Probabilistic Automata on infinite words: decidability and undecidability results}},
doi = {10.1007/978-3-642-15643-4_1},
volume = {6252},
year = {2010},
}
@misc{5391,
abstract = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each node consists an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free imple- mentation and proved that the corrected version is linearizable.},
author = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
issn = {2664-1690},
pages = {27},
publisher = {IST Austria},
title = {{Model checking of linearizability of concurrent list implementations}},
doi = {10.15479/AT:IST-2010-0001},
year = {2010},
}
@inproceedings{3864,
abstract = {Often one has a preference order among the different systems that satisfy a given specification. Under a probabilistic assumption about the possible inputs, such a preference order is naturally expressed by a weighted automaton, which assigns to each word a value, such that a system is preferred if it generates a higher expected value. We solve the following optimal-synthesis problem: given an omega-regular specification, a Markov chain that describes the distribution of inputs, and a weighted automaton that measures how well a system satisfies the given specification tinder the given input assumption, synthesize a system that optimizes the measured value. For safety specifications and measures that are defined by mean-payoff automata, the optimal-synthesis problem amounts to finding a strategy in a Markov decision process (MDP) that is optimal for a long-run average reward objective, which can be done in polynomial time. For general omega-regular specifications, the solution rests on a new, polynomial-time algorithm for computing optimal strategies in MDPs with mean-payoff parity objectives. We present some experimental results showing optimal systems that were automatically generated in this way.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Singh, Rohit},
location = {Edinburgh, United Kingdom},
pages = {380 -- 395},
publisher = {Springer},
title = {{Measuring and synthesizing systems in probabilistic environments}},
doi = {10.1007/978-3-642-14295-6_34},
volume = {6174},
year = {2010},
}
@inproceedings{4393,
abstract = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the implementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
location = {Paris, France},
pages = {235 -- 268},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Simulation distances}},
doi = {10.1007/978-3-642-15375-4_18},
volume = {6269},
year = {2010},
}
@misc{5389,
abstract = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the im- plementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
issn = {2664-1690},
pages = {24},
publisher = {IST Austria},
title = {{Simulation distances}},
doi = {10.15479/AT:IST-2010-0003},
year = {2010},
}
@article{3303,
abstract = {Biological traits result in part from interactions between different genetic loci. This can lead to sign epistasis, in which a beneficial adaptation involves a combination of individually deleterious or neutral mutations; in this case, a population must cross a “fitness valley” to adapt. Recombination can assist this process by combining mutations from different individuals or retard it by breaking up the adaptive combination. Here, we analyze the simplest fitness valley, in which an adaptation requires one mutation at each of two loci to provide a fitness benefit. We present a theoretical analysis of the effect of recombination on the valley-crossing process across the full spectrum of possible parameter regimes. We find that low recombination rates can speed up valley crossing relative to the asexual case, while higher recombination rates slow down valley crossing, with the transition between the two regimes occurring when the recombination rate between the loci is approximately equal to the selective advantage provided by the adaptation. In large populations, if the recombination rate is high and selection against single mutants is substantial, the time to cross the valley grows exponentially with population size, effectively meaning that the population cannot acquire the adaptation. Recombination at the optimal (low) rate can reduce the valley-crossing time by up to several orders of magnitude relative to that in an asexual population. },
author = {Weissman, Daniel and Feldman, Marcus and Fisher, Daniel},
journal = {Genetics},
number = {4},
pages = {1389 -- 1410},
publisher = {Genetics Society of America},
title = {{The rate of fitness-valley crossing in sexual populations}},
doi = {10.1534/genetics.110.123240},
volume = {186},
year = {2010},
}
@inproceedings{3719,
abstract = {The induction of a signaling pathway is characterized by transient complex formation and mutual posttranslational modification of proteins. To faithfully capture this combinatorial process in a math- ematical model is an important challenge in systems biology. Exploiting the limited context on which most binding and modification events are conditioned, attempts have been made to reduce the com- binatorial complexity by quotienting the reachable set of molecular species, into species aggregates while preserving the deterministic semantics of the thermodynamic limit. Recently we proposed a quotienting that also preserves the stochastic semantics and that is complete in the sense that the semantics of individual species can be recovered from the aggregate semantics. In this paper we prove that this quotienting yields a sufficient condition for weak lumpability and that it gives rise to a backward Markov bisimulation between the original and aggregated transition system. We illustrate the framework on a case study of the EGF/insulin receptor crosstalk.},
author = {Feret, Jérôme and Henzinger, Thomas A and Koeppl, Heinz and Petrov, Tatjana},
location = {Jena, Germany},
pages = {142--161},
publisher = {Open Publishing Association},
title = {{Lumpability abstractions of rule-based systems}},
volume = {40},
year = {2010},
}