@article{4237,
abstract = {The growth function of populations is central in biomathematics. The main dogma is the existence of density-dependence mechanisms, which can be modelled with distinct functional forms that depend on the size of the Population. One important class of regulatory functions is the theta-logistic, which generalizes the logistic equation. Using this model as a motivation, this paper introduces a simple dynamical reformulation that generalizes many growth functions. The reformulation consists of two equations, one for population size, and one for the growth rate. Furthermore, the model shows that although population is density-dependent, the dynamics of the growth rate does not depend either on population size, nor on the carrying capacity. Actually, the growth equation is uncoupled from the population size equation, and the model has only two parameters, a Malthusian parameter rho and a competition coefficient theta. Distinct sign combinations of these parameters reproduce not only the family of theta-logistics, but also the van Bertalanffy, Gompertz and Potential Growth equations, among other possibilities. It is also shown that, except for two critical points, there is a general size-scaling relation that includes those appearing in the most important allometric theories, including the recently proposed Metabolic Theory of Ecology. With this model, several issues of general interest are discussed such as the growth of animal population, extinctions, cell growth and allometry, and the effect of environment over a population. (c) 2005 Elsevier Ltd. All rights reserved.},
author = {de Vladar, Harold},
journal = {Journal of Theoretical Biology},
number = {2},
pages = {245 -- 256},
publisher = {Elsevier},
title = {{Density-dependence as a size-independent regulatory mechanism}},
doi = {3802},
volume = {238},
year = {2006},
}
@misc{4250,
abstract = {A recent analysis has shown that divergence between human and chimpanzee varies greatly across the genome. Although this is consistent with ‘hybridisation’ between the diverging human and chimp lineages, such observations can be explained more simply by the null model of allopatric speciation.},
author = {Nicholas Barton},
booktitle = {Current Biology},
number = {16},
pages = {647 -- 650},
publisher = {Cell Press},
title = {{Evolutionary Biology: How did the human species form?}},
doi = {10.1016/j.cub.2006.07.032},
volume = {16},
year = {2006},
}
@article{4345,
abstract = {Der Artikel beschäftigt sich mit dem Konzept der Bibliothek 2.0 (bzw. Library 2.0). Er skizziert anhand einiger Beispiele die Entwicklung zum Web 2.0 und beschreibt, wie Web 2.0-Technologien und -Anwendungen in Bibliotheken eingesetzt werden. Im Mittelpunkt stehen Social-Tagging-Systeme, benutzerorientierte Erweiterungen von Bibliothekskatalogen und Dokumentenservern sowie der Einsatz von Weblogs an Bibliotheken. Ferner werden neue Anforderungen an Bibliothekare diskutiert.},
author = {Patrick Danowski and Heller,Lambert},
journal = {Bibliotheksdienst},
number = {11},
pages = {1250 -- 1271},
publisher = {Zentral- und Landesbibliothek Berlin},
title = {{Bibliothek 2.0 - Die Bibliothek der Zukunft?}},
doi = {424},
volume = {40},
year = {2006},
}
@article{4351,
abstract = {BACKGROUND: Character mapping on phylogenies has played an important, if not critical role, in our understanding of molecular, morphological, and behavioral evolution. Until very recently we have relied on parsimony to infer character changes. Parsimony has a number of serious limitations that are drawbacks to our understanding. Recent statistical methods have been developed that free us from these limitations enabling us to overcome the problems of parsimony by accommodating uncertainty in evolutionary time, ancestral states, and the phylogeny. RESULTS: SIMMAP has been developed to implement stochastic character mapping that is useful to both molecular evolutionists, systematists, and bioinformaticians. Researchers can address questions about positive selection, patterns of amino acid substitution, character association, and patterns of morphological evolution. CONCLUSION: Stochastic character mapping, as implemented in the SIMMAP software, enables users to address questions that require mapping characters onto phylogenies using a probabilistic approach that does not rely on parsimony. Analyses can be performed using a fully Bayesian approach that is not reliant on considering a single topology, set of substitution model parameters, or reconstruction of ancestral states. Uncertainty in these quantities is accommodated by using MCMC samples from their respective posterior distributions.},
author = {Jonathan Bollback},
journal = {BMC Bioinformatics},
publisher = {BioMed Central},
title = {{SIMMAP: stochastic character mapping of discrete traits on phylogenies}},
doi = {10.1186/1471-2105-7-88},
volume = {7},
year = {2006},
}
@article{4352,
abstract = {Anopheles darlingi is the primary malaria vector in Latin America, and is especially important in Amazonian Brazil. Historically, control efforts have been focused on indoor house spraying using a variety of insecticides, but since the mid-1990s there has been a shift to patient treatment and focal insecticide fogging. Anopheles darlingi was believed to have been significantly reduced in a gold-mining community, Peixoto de Azevedo (in Mato Grosso State), in the early 1990s by insecticide use during a severe malaria epidemic. In contrast, although An. darlingi was eradicated from some districts of the city of Belem (the capital of Para State) in 1968 to reduce malaria, populations around the water protection area in the eastern district were treated only briefly. To investigate the population structure of An. darlingi including evidence for a population bottleneck in Peixoto, we analyzed eight microsatellite loci of 256 individuals from seven locations in Brazil: three in Amapa State, three in Para State, and one in Mato Grosso State. Allelic diversity and mean expected heterozygosity were high for all populations (mean number alleles/locus and H(E) were 13.5 and 0.834, respectively) and did not differ significantly between locations. Significant heterozygote deficits were associated with linkage disequilibrium, most likely due to either the Wahlund effect or selection. We found no evidence for a population bottleneck in Peixoto, possibly because the reduction was not extreme enough to be detected. Overall estimates of long-term N(e) varied from 92.4 individuals under the linkage disequilibrium model to infinity under the heterozygote excess model. Fixation indices and analysis of molecular variance demonstrated significant differentiation between locations north and south of the Amazon River, suggesting a degree of genetic isolation between them, attributed to isolation by distance.},
author = {Conn, Jan E and Vineis, Joseph H and Jonathan Bollback and Onyabe, David Y and Wilkerson, Richard C and Povoa, Marinete M},
journal = {The American Journal of Tropical Medicine and Hygiene},
number = {5},
pages = {798 -- 806},
publisher = {American Society of Tropical Medicine and Hygiene},
title = {{Population structure of the malaria vector Anopheles darlingi in a malaria-endemic region of eastern Amazonian Brazil}},
volume = {74},
year = {2006},
}
@inproceedings{4359,
author = {Thomas Wies and Kuncak, Viktor and Lam,Patrick and Podelski,Andreas and Rinard,Martin},
pages = {157 -- 173},
publisher = {Springer},
title = {{Field Constraint Analysis}},
doi = {1551},
year = {2006},
}
@inproceedings{4373,
author = {Maler, Oded and Dejan Nickovic and Pnueli,Amir},
pages = {2 -- 16},
publisher = {Springer},
title = {{Real Time Temporal Logic: Past, Present, Future}},
doi = {1571},
year = {2006},
}
@inproceedings{4374,
author = {Maler, Oded and Dejan Nickovic and Pnueli,Amir},
pages = {274 -- 289},
publisher = {Springer},
title = {{From MITL to Timed Automata}},
doi = {1570},
year = {2006},
}
@article{4248,
abstract = {In finite populations, genetic drift generates interference between selected loci, causing advantageous alleles to be found more often on different chromosomes than on the same chromosome, which reduces the rate of adaptation. This “Hill–Robertson effect” generates indirect selection to increase recombination rates. We present a new method to quantify the strength of this selection. Our model represents a new beneficial allele (A) entering a population as a single copy, while another beneficial allele (B) is sweeping at another locus. A third locus affects the recombination rate between selected loci. Using a branching process model, we calculate the probability distribution of the number of copies of A on the different genetic backgrounds, after it is established but while it is still rare. Then, we use a deterministic model to express the change in frequency of the recombination modifier, due to hitchhiking, as A goes to fixation. We show that this method can give good estimates of selection for recombination. Moreover, it shows that recombination is selected through two different effects: it increases the fixation probability of new alleles, and it accelerates selective sweeps. The relative importance of these two effects depends on the relative times of occurrence of the beneficial alleles.},
author = {Roze, Denis and Nicholas Barton},
journal = {Genetics},
number = {3},
pages = {1793 -- 1811},
publisher = {Genetics Society of America},
title = {{The Hill-Robertson effect and the evolution of recombination}},
doi = {10.1534/genetics.106.058586 },
volume = {173},
year = {2006},
}
@inproceedings{4401,
author = {Alur, Rajeev and Pavol Cerny and Zdancewic,Steve},
pages = {107 -- 118},
publisher = {Springer},
title = {{Preserving Secrecy Under Refinement}},
doi = {1543},
year = {2006},
}
@inproceedings{4406,
abstract = {We propose and evaluate a new algorithm for checking the universality of nondeterministic finite automata. In contrast to the standard algorithm, which uses the subset construction to explicitly determinize the automaton, we keep the determinization step implicit. Our algorithm computes the least fixed point of a monotone function on the lattice of antichains of state sets. We evaluate the performance of our algorithm experimentally using the random automaton model recently proposed by Tabakov and Vardi. We show that on the difficult instances of this probabilistic model, the antichain algorithm outperforms the standard one by several orders of magnitude. We also show how variations of the antichain method can be used for solving the language-inclusion problem for nondeterministic finite automata, and the emptiness problem for alternating finite automata.},
author = {De Wulf, Martin and Doyen, Laurent and Thomas Henzinger and Raskin, Jean-François},
pages = {17 -- 30},
publisher = {Springer},
title = {{Antichains: A new algorithm for checking universality of finite automata}},
doi = {10.1007/11817963_5},
volume = {4144},
year = {2006},
}
@inproceedings{4431,
abstract = {We summarize some current trends in embedded systems design and point out some of their characteristics, such as the chasm between analytical and computational models, and the gap between safety-critical and best-effort engineering practices. We call for a coherent scientific foundation for embedded systems design, and we discuss a few key demands on such a foundation: the need for encompassing several manifestations of heterogeneity, and the need for constructivity in design. We believe that the development of a satisfactory Embedded Systems Design Science provides a timely challenge and opportunity for reinvigorating computer science.},
author = {Thomas Henzinger and Sifakis, Joseph},
pages = {1 -- 15},
publisher = {Springer},
title = {{The embedded systems design challenge}},
doi = {10.1007/11813040_1},
volume = {4085},
year = {2006},
}
@inproceedings{4432,
abstract = {We add freeze quantifiers to the game logic ATL in order to specify real-time objectives for games played on timed structures. We define the semantics of the resulting logic TATL by restricting the players to physically meaningful strategies, which do not prevent time from diverging. We show that TATL can be model checked over timed automaton games. We also specify timed optimization problems for physically meaningful strategies, and we show that for timed automaton games, the optimal answers can be approximated to within any degree of precision.},
author = {Thomas Henzinger and Prabhu, Vinayak S},
pages = {1 -- 17},
publisher = {Springer},
title = {{Timed alternating-time temporal logic}},
doi = {10.1007/11867340_1},
volume = {4202},
year = {2006},
}
@inproceedings{4436,
abstract = {We present an assume-guarantee interface algebra for real-time components. In our formalism a component implements a set of task sequences that share a resource. A component interface consists of an arrival rate function and a latency for each task sequence, and a capacity function for the shared resource. The interface specifies that the component guarantees certain task latencies depending on assumptions about task arrival rates and allocated resource capacities. Our algebra defines compatibility and refinement relations on interfaces. Interface compatibility can be checked on partial designs, even when some component interfaces are yet unknown. In this case interface composition computes as new assumptions the weakest constraints on the unknown components that are necessary to satisfy the specified guarantees. Interface refinement is defined in a way that ensures that compatible interfaces can be refined and implemented independently. Our algebra thus formalizes an interface-based design methodology that supports both the incremental addition of new components and the independent stepwise refinement of existing components. We demonstrate the flexibility and efficiency of the framework through simulation experiments.},
author = {Thomas Henzinger and Matic, Slobodan},
pages = {253 -- 266},
publisher = {IEEE},
title = {{An interface algebra for real-time components}},
doi = {10.1109/RTAS.2006.11},
year = {2006},
}
@inproceedings{4437,
abstract = {The synthesis of reactive systems requires the solution of two-player games on graphs with ω-regular objectives. When the objective is specified by a linear temporal logic formula or nondeterministic Büchi automaton, then previous algorithms for solving the game require the construction of an equivalent deterministic automaton. However, determinization for automata on infinite words is extremely complicated, and current implementations fail to produce deterministic automata even for relatively small inputs. We show how to construct, from a given nondeterministic Büchi automaton, an equivalent nondeterministic parity automaton that is good for solving games with objective . The main insight is that a nondeterministic automaton is good for solving games if it fairly simulates the equivalent deterministic automaton. In this way, we omit the determinization step in game solving and reactive synthesis. The fact that our automata are nondeterministic makes them surprisingly simple, amenable to symbolic implementation, and allows an incremental search for winning strategies.},
author = {Thomas Henzinger and Piterman, Nir},
pages = {395 -- 410},
publisher = {Springer},
title = {{Solving games without determinization}},
doi = {10.1007/11874683_26},
volume = {4207},
year = {2006},
}
@article{4451,
abstract = {One source of complexity in the μ-calculus is its ability to specify an unbounded number of switches between universal (AX) and existential (EX) branching modes. We therefore study the problems of satisfiability, validity, model checking, and implication for the universal and existential fragments of the μ-calculus, in which only one branching mode is allowed. The universal fragment is rich enough to express most specifications of interest, and therefore improved algorithms are of practical importance. We show that while the satisfiability and validity problems become indeed simpler for the existential and universal fragments, this is, unfortunately, not the case for model checking and implication. We also show the corresponding results for the alternation-free fragment of the μ-calculus, where no alternations between least and greatest fixed points are allowed. Our results imply that efforts to find a polynomial-time model-checking algorithm for the μ-calculus can be replaced by efforts to find such an algorithm for the universal or existential fragment.},
author = {Thomas Henzinger and Kupferman, Orna and Majumdar, Ritankar S},
journal = {Theoretical Computer Science},
number = {2},
pages = {173 -- 186},
publisher = {Elsevier},
title = {{On the universal and existential fragments of the mu-calculus}},
doi = {10.1016/j.tcs.2005.11.015},
volume = {354},
year = {2006},
}
@inproceedings{4523,
abstract = {We consider the problem if a given program satisfies a specified safety property. Interesting programs have infinite state spaces, with inputs ranging over infinite domains, and for these programs the property checking problem is undecidable. Two broad approaches to property checking are testing and verification. Testing tries to find inputs and executions which demonstrate violations of the property. Verification tries to construct a formal proof which shows that all executions of the program satisfy the property. Testing works best when errors are easy to find, but it is often difficult to achieve sufficient coverage for correct programs. On the other hand, verification methods are most successful when proofs are easy to find, but they are often inefficient at discovering errors. We propose a new algorithm, Synergy, which combines testing and verification. Synergy unifies several ideas from the literature, including counterexample-guided model checking, directed testing, and partition refinement.This paper presents a description of the Synergy algorithm, its theoretical properties, a comparison with related algorithms, and a prototype implementation called Yogi.},
author = {Gulavani, Bhargav S and Thomas Henzinger and Kannan, Yamini and Nori, Aditya V and Rajamani, Sriram K},
pages = {117 -- 127},
publisher = {ACM},
title = {{Synergy: A new algorithm for property checking}},
doi = {10.1145/1181775.1181790},
year = {2006},
}
@inproceedings{4526,
abstract = {We designed and implemented a new programming language called Hierarchical Timing Language (HTL) for hard realtime systems. Critical timing constraints are specified within the language,and ensured by the compiler. Programs in HTL are extensible in two dimensions without changing their timing behavior: new program modules can be added, and individual program tasks can be refined. The mechanism supporting time invariance under parallel composition is that different program modules communicate at specified instances of time. Time invariance under refinement is achieved by conservative scheduling of the top level. HTL is a coordination language, in that individual tasks can be implemented in "foreign" languages. As a case study, we present a distributed HTL implementation of an automotive steer-by-wire controller.},
author = {Ghosal, Arkadeb and Thomas Henzinger and Iercan, Daniel and Kirsch, Christoph M and Sangiovanni-Vincentelli, Alberto},
pages = {132 -- 141},
publisher = {ACM},
title = {{A hierarchical coordination language for interacting real-time tasks}},
doi = {10.1145/1176887.1176907},
year = {2006},
}
@inproceedings{4528,
abstract = {Computational modeling of biological systems is becoming increasingly common as scientists attempt to understand biological phenomena in their full complexity. Here we distinguish between two types of biological models mathematical and computational - according to their different representations of biological phenomena and their diverse potential. We call the approach of constructing computational models of biological systems executable biology, as it focuses on the design of executable computer algorithms that mimic biological phenomena. We give an overview of the main modeling efforts in this direction, and discuss some of the new challenges that executable biology poses for computer science and biology. We argue that for executable biology to reach its full potential as a mainstream biological technique, formal and algorithmic approaches must be integrated into biological research, driving biology towards a more precise engineering discipline.},
author = {Fisher, Jasmin and Thomas Henzinger},
pages = {1675 -- 1682},
publisher = {IEEE},
title = {{Executable biology}},
doi = {10.1109/WSC.2006.322942},
year = {2006},
}
@inproceedings{4538,
abstract = {A stochastic graph game is played by two players on a game graph with probabilistic transitions. We consider stochastic graph games with ω-regular winning conditions specified as parity objectives. These games lie in NP ∩ coNP. We present a strategy improvement algorithm for stochastic parity games; this is the first non-brute-force algorithm for solving these games. From the strategy improvement algorithm we obtain a randomized subexponential-time algorithm to solve such games.},
author = {Krishnendu Chatterjee and Thomas Henzinger},
pages = {512 -- 523},
publisher = {Springer},
title = {{Strategy improvement and randomized subexponential algorithms for stochastic parity games}},
doi = {10.1007/11672142_42},
volume = {3884},
year = {2006},
}
@inproceedings{4539,
abstract = {Games on graphs with ω-regular objectives provide a model for the control and synthesis of reactive systems. Every ω-regular objective can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens “eventually.” Two main strengths of the classical, infinite-limit formulation of liveness are robustness (independence from the granularity of transitions) and simplicity (abstraction of complicated time bounds). However, the classical liveness formulation suffers from the drawback that the time until something good happens may be unbounded. A stronger formulation of liveness, so-called finitary liveness, overcomes this drawback, while still retaining robustness and simplicity. Finitary liveness requires that there exists an unknown, fixed bound b such that something good happens within b transitions. While for one-shot liveness (reachability) objectives, classical and finitary liveness coincide, for repeated liveness (Büchi) objectives, the finitary formulation is strictly stronger. In this work we study games with finitary parity and Streett (fairness) objectives. We prove the determinacy of these games, present algorithms for solving these games, and characterize the memory requirements of winning strategies. Our algorithms can be used, for example, for synthesizing controllers that do not let the response time of a system increase without bound.},
author = {Krishnendu Chatterjee and Thomas Henzinger},
pages = {257 -- 271},
publisher = {Springer},
title = {{Finitary winning in omega-regular games}},
doi = {10.1007/11691372_17},
volume = {3920},
year = {2006},
}
@unpublished{573,
abstract = {Mitchison and Jozsa recently suggested that the "chained-Zeno" counterfactual computation protocol recently proposed by Hosten et al. is counterfactual for only one output of the computer. This claim was based on the existing abstract algebraic definition of counterfactual computation, and indeed according to this definition, their argument is correct. However, a more general definition (physically adequate) for counterfactual computation is implicitly assumed by Hosten et. al. Here we explain in detail why the protocol is counterfactual and how the "history tracking" method of the existing description inadequately represents the physics underlying the protocol. Consequently, we propose a modified definition of counterfactual computation. Finally, we comment on one of the most interesting aspects of the error-correcting protocol. },
author = {Hosten, Onur and Rakher, Matthew and Barreiro, Julio and Peters, Nicholas and Kwiat, Paul},
pages = {12},
publisher = {ArXiv},
title = {{Counterfactual computation revisited}},
year = {2006},
}
@unpublished{574,
abstract = {Vaidman, in a recent article adopts the method of 'quantum weak measurements in pre- and postselected ensembles' to ascertain whether or not the chained-Zeno counterfactual computation scheme proposed by Hosten et al. is counterfactual; which has been the topic of a debate on the definition of counterfactuality. We disagree with his conclusion, which brings up some interesting aspects of quantum weak measurements and some concerns about the way they are interpreted. },
author = {Hosten, Onur and Kwiat, Paul},
pages = {2},
publisher = {ArXiv},
title = {{Weak measurements and counterfactual computation}},
year = {2006},
}
@inproceedings{577,
abstract = {Visible light photon counters (VLPCs) and solid-state photomultipliers (SSPMs) are high-efficiency single-photon detectors which have multi-photon counting capability. While both the VLPCs and the SSPMs have inferred internal quantum efficiencies above 93%, the actual measured values for both the detectors were in fact limited to less than 88%, attributed to in-coupling losses. We are currently improving this overall detection efficiency via a) custom anti-reflection coating the detectors and the in-coupling fibers, b) implementing a novel cryogenic design to reduce transmission losses and, c) using low-noise electronics to obtain a better signal-to-noise ratio.},
author = {Rangarajan, Radhika and Altepeter, Joseph B and Jeffrey, Evan R and Stoutimore, Micah J and Peters, Nicholas A and Onur Hosten and Kwiat, Paul G},
publisher = {SPIE},
title = {{High-efficiency single-photon detectors}},
doi = {10.1117/12.686117},
volume = {6372},
year = {2006},
}
@inproceedings{578,
abstract = {A source of single photons allows secure quantum key distribution, in addition, to being a critical resource for linear optics quantum computing. We describe our progress on deterministically creating single photons from spontaneous parametric downconversion, an extension of the Pittman, Jacobs and Franson scheme [Phys. Rev A, v66, 042303 (2002)]. Their idea was to conditionally prepare single photons by measuring one member of a spontaneously emitted photon pair and storing the remaining conditionally prepared photon until a predetermined time, when it would be "deterministically" released from storage. Our approach attempts to improve upon this by recycling the pump pulse in order to decrease the possibility of multiple-pair generation, while maintaining a high probability of producing a single pair. Many of the challenges we discuss are central to other quantum information technologies, including the need for low-loss optical storage, switching and detection, and fast feed-forward control.},
author = {Peters, Nicholas A and Arnold, Keith J and VanDevender, Aaron P and Jeffrey, Evan R and Rangarajan, Radhika and Onur Hosten and Barreiro, Julio T and Altepeter, Joseph B and Kwiat, Paul G},
publisher = {SPIE},
title = {{Towards a quasi-deterministic single-photon source}},
doi = {10.1117/12.684702},
volume = {6305},
year = {2006},
}
@article{579,
abstract = {The logic underlying the coherent nature of quantum information processing often deviates from intuitive reasoning, leading to surprising effects. Counterfactual computation constitutes a striking example: the potential outcome of a quantum computation can be inferred, even if the computer is not run 1. Relying on similar arguments to interaction-free measurements 2 (or quantum interrogation3), counterfactual computation is accomplished by putting the computer in a superposition of 'running' and 'not running' states, and then interfering the two histories. Conditional on the as-yet-unknown outcome of the computation, it is sometimes possible to counterfactually infer information about the solution. Here we demonstrate counterfactual computation, implementing Grover's search algorithm with an all-optical approach4. It was believed that the overall probability of such counterfactual inference is intrinsically limited1,5, so that it could not perform better on average than random guesses. However, using a novel 'chained' version of the quantum Zeno effect6, we show how to boost the counterfactual inference probability to unity, thereby beating the random guessing limit. Our methods are general and apply to any physical system, as illustrated by a discussion of trapped-ion systems. Finally, we briefly show that, in certain circumstances, counterfactual computation can eliminate errors induced by decoherence. },
author = {Onur Hosten and Rakher, Matthew T and Barreiro, Julio T and Peters, Nicholas A and Kwiat, Paul G},
journal = {Nature},
number = {7079},
pages = {949 -- 952},
publisher = {Nature Publishing Group},
title = {{Counterfactual quantum computation through quantum interrogation}},
doi = {10.1038/nature04523},
volume = {439},
year = {2006},
}
@inproceedings{583,
abstract = {Visible light photon counters (VLPCs) and solid-state photomultipliers (SSPMs) facilitate efficient single-photon detection. We are attempting to improve their efficiency, previously limited to < 88% by coupling losses, via anti-reflection coatings, better electronics and cryogenics.},
author = {Rangarajan, Radhika and Peters, Nicholas A and Onur Hosten and Altepeter, Joseph B and Jeffrey, Evan R and Kwiat, Paul G},
publisher = {IEEE},
title = {{Improved single-photon detection}},
doi = {10.1109/CLEO.2006.4628641},
year = {2006},
}
@article{6151,
author = {Salecker, Iris and Häusser, Michael and de Bono, Mario},
issn = {1469-221X},
journal = {EMBO reports},
number = {6},
pages = {585--589},
publisher = {Wiley},
title = {{On the axonal road to circuit function and behaviour: Workshop on the assembly and function of neuronal circuits}},
doi = {10.1038/sj.embor.7400713},
volume = {7},
year = {2006},
}
@article{6152,
author = {Rogers, Candida and Persson, Annelie and Cheung, Benny and de Bono, Mario},
issn = {0960-9822},
journal = {Current Biology},
number = {7},
pages = {649--659},
publisher = {Elsevier},
title = {{Behavioral motifs and neural pathways coordinating O2 responses and aggregation in C. elegans}},
doi = {10.1016/j.cub.2006.03.023},
volume = {16},
year = {2006},
}
@inproceedings{7326,
abstract = {Often the properties of a single cell are considered as representative for a complete polymer electrolyte fuel cell stack or even a fuel cell system. In some cases this comes close, however, in many real cases differences on several scales become important. Cell interaction phenomena in fuel cell stacks that arise from inequalities between adjacent cells are investigated in detail experimentally. For that, a specialized 2-cell stack with advanced localized diagnostics was developed. The results show that inequalities propagate by electrical coupling, inhomogeneous cell polarization and inducing in-plane current in the common bipolar plate. The effects of the different loss-mechanisms are analyzed and quantified. },
author = {Büchi, Felix N. and Freunberger, Stefan Alexander and Santis, Marco},
booktitle = {ECS Transactions},
location = {Cancun, Mexico},
number = {1},
pages = {963--968},
publisher = {ECS},
title = {{What is learned beyond the scale of single cells?}},
doi = {10.1149/1.2356215},
volume = {3},
year = {2006},
}
@article{7327,
abstract = {Propagation of performance changes to adjacent cells in polymer electrolyte fuel cell stacks is studied by means of voltage monitoring and local current density measurements in peripheral cells of the stack. A technical fuel cell stack has been modified by implementing two independent reactant and coolant supplies in order to deliberately change the performance of one cell (anomalous cell) and study the coupling phenomena to adjacent cells (coupling cells), while keeping the working conditions of the later cell-group unaltered.
Two anomalies are studied: (i) air starvation and (ii) thermal anomaly, in a single anomalous cell in the stack and their coupling to adjacent cells. The results have shown that anomalies inducing considerable changes in the local current density of the anomalous cell (such as air starvation) propagate to adjacent cells affecting their performance. The propagation of local current density changes takes place via the common bipolar plate due to its finite thickness and in-plane conductivity. Consequently, anomalies which do not strongly influence the local current density distribution (such as a thermal anomaly under the studied working conditions) do not propagate to adjacent cells.},
author = {Santis, Marco and Freunberger, Stefan Alexander and Papra, Matthias and Wokaun, Alexander and Büchi, Felix N.},
issn = {0378-7753},
journal = {Journal of Power Sources},
number = {2},
pages = {1076--1083},
publisher = {Elsevier},
title = {{Experimental investigation of coupling phenomena in polymer electrolyte fuel cell stacks}},
doi = {10.1016/j.jpowsour.2006.06.007},
volume = {161},
year = {2006},
}
@article{7328,
abstract = {An experimental technique for measuring the current density distribution with a resolution smaller than the channel/rib scale of the flow field in polymer electrolyte fuel cells (PEFCs) is presented. The electron conductors in a plane perpendicular to the channel direction are considered as two-dimensional resistors. Hence, the current density is obtained from the solution of Laplace's equation with the potentials at current collector and reaction layer as boundary conditions. Using ohmic drop for calculating the local current, detailed knowledge of all resistances involved is of prime importance. In particular, the contact resistance between the gas diffusion layer (GDL) and flow field rib, as well as GDL bulk conductivity, are strongly dependent on clamping pressure. They represent a substantial amount of the total ohmic drop and therefore require careful consideration. The detailed experimental setup as well as the concise procedure for quantitative data evaluation is described. Finally, the method is applied successfully to a cell operated on pure oxygen and air up to high current densities. The results show that electrical and ionic resistances seem to govern the current distribution at low current regimes, whereas mass transport limitations locally hamper the current production at high loads.},
author = {Freunberger, Stefan Alexander and Reum, Mathias and Evertz, Jörg and Wokaun, Alexander and Büchi, Felix N.},
issn = {0013-4651},
journal = {Journal of The Electrochemical Society},
number = {11},
publisher = {The Electrochemical Society},
title = {{Measuring the current distribution in PEFCs with sub-millimeter resolution}},
doi = {10.1149/1.2345591},
volume = {153},
year = {2006},
}
@article{7329,
abstract = {A novel measurement principle for measuring the current distribution in polymer electrolyte fuel cells (PEFCs) is introduced. It allows, in contrast to all other known techniques, for the first time for a resolution smaller than the channel/rib scale of the flow field in PEFCs. The current density is obtained by considering the electron conductors in the cell as a two-dimensional resistor with the voltage drop caused by the current. The method was applied to a cell operated on oxygen up to high current densities. The results show that the ohmic resistances govern the current distribution in the low current regime, whereas mass transport limitations hamper the current production under the land at high loads.},
author = {Freunberger, Stefan Alexander and Reum, Mathias and Wokaun, Alexander and Büchi, Felix N.},
issn = {1388-2481},
journal = {Electrochemistry Communications},
number = {9},
pages = {1435--1438},
publisher = {Elsevier},
title = {{Expanding current distribution measurement in PEFCs to sub-millimeter resolution}},
doi = {10.1016/j.elecom.2006.05.032},
volume = {8},
year = {2006},
}
@article{7330,
abstract = {Polymer electrolyte fuel cells (PE fuel cells) working with air at low stoichiometries (<2.0) and standard electrochemical components show a high degree of inhomogeneity in the current density distribution over the active area. An inhomogeneous current density distribution leads to a non-uniform utilization of the active area, which could negatively affect the time of life of the cells. Furthermore, it is also believed to lower cell performance. In this work, the homogenization of the current density, realized by means of tailored cathodes with along-the-air-channel redistributed catalyst loadings, is investigated. The air stoichiometry range for which a homogenization of the current density is achieved depends upon the gradient with which the catalyst is redistributed along the air channel. A gentle increasing catalyst loading profile homogenizes the current density at relatively higher air stoichiometries, while a steeper profile is suited better for lower air stoichiometries. The results show that a homogenization of the current density by means of redistributed catalyst loading has negative effects on cell performance. Model calculations corroborate the experimental findings on homogenization of the current density and deliver an explanation for the decrease in cell performance.},
author = {Santis, M. and Freunberger, Stefan Alexander and Reiner, A. and Büchi, F.N.},
issn = {0013-4686},
journal = {Electrochimica Acta},
number = {25},
pages = {5383--5393},
publisher = {Elsevier},
title = {{Homogenization of the current density in polymer electrolyte fuel cells by in-plane cathode catalyst gradients}},
doi = {10.1016/j.electacta.2006.02.008},
volume = {51},
year = {2006},
}
@article{7331,
abstract = {A previously developed mathematical model for water management and current density distribution in a polymer electrolyte fuel cell (PEFCs) is employed to investigate the effects of cooling strategies on cell performance. The model describes a two-dimensional slice through the cell along the channels and through the entire cell sandwich including the coolant channels and the bipolar plate. Arbitrary flow arrangements of fuel, oxidant, and coolant stream directions can be described. Due to the serious impact of temperature on all processes in the PEFC, both the relative direction of the coolant stream to the gas streams and its mass flow turns out to significantly affect the cell performance. Besides influencing the electrochemical reaction and all kinds of mass transfer temperature, variations predominantly alter the local membrane hydration distribution and subseqently its conductivity.},
author = {Freunberger, Stefan Alexander and Wokaun, Alexander and Büchi, Felix N.},
issn = {0013-4651},
journal = {Journal of The Electrochemical Society},
number = {5},
publisher = {The Electrochemical Society},
title = {{In-plane effects in large-scale PEFCs: II. The influence of cooling strategy on cell performance}},
doi = {10.1149/1.2185282},
volume = {153},
year = {2006},
}
@article{7332,
abstract = {A quasi-two-dimensional, along-the-channel mass and heat-transfer model for a proton exchange membrane fuel cell (PEFC) is described and validated against experimental current distribution data. The model is formulated in a dimensional manner, i.e., local transport phenomena are treated one-dimensional in through-plane direction and coupled in-plane by convective transport in the gas and coolant channels. Thus, a two-dimensional slice running through the repetitive unit of a cell from the anode channel via membrane-electrode assembly (MEA) and cathode channel to the coolant channel and from inlet to outlet is modeled. The aim of the work is to elucidate the influence of operating conditions such as feed gas humidities and stoichiometric ratios on the along-the-channel current density distribution and to identify the distinct underlying voltage loss mechanisms. Furthermore, a complicated technical flow field is modeled by a combination of co- and counterflow subdomains and compared with experimental current densities.},
author = {Freunberger, Stefan Alexander and Santis, Marco and Schneider, Ingo A. and Wokaun, Alexander and Büchi, Felix N.},
issn = {0013-4651},
journal = {Journal of The Electrochemical Society},
number = {2},
publisher = {The Electrochemical Society},
title = {{In-plane effects in large-scale PEMFCs}},
doi = {10.1149/1.2150150},
volume = {153},
year = {2006},
}
@article{8488,
abstract = {We demonstrate for different protein samples that three-dimensional HNCO and HNCA correlation spectra may be recorded in a few minutes acquisition time using the band-selective excitation short-transient sequences presented here. This opens new perspectives for the NMR structural investigation of unstable protein samples and real-time site-resolved studies of protein kinetics.},
author = {Schanda, Paul and Van Melckebeke, Hélène and Brutscher, Bernhard},
issn = {0002-7863},
journal = {Journal of the American Chemical Society},
keywords = {Colloid and Surface Chemistry, Biochemistry, General Chemistry, Catalysis},
number = {28},
pages = {9042--9043},
publisher = {American Chemical Society},
title = {{Speeding up three-dimensional protein NMR experiments to a few minutes}},
doi = {10.1021/ja062025p},
volume = {128},
year = {2006},
}
@article{8489,
abstract = {Structure elucidation of proteins by either NMR or X‐ray crystallography often requires the screening of a large number of samples for promising protein constructs and optimum solution conditions. For large‐scale screening of protein samples in solution, robust methods are needed that allow a rapid assessment of the folding of a polypeptide under diverse sample conditions. Here we present HET‐SOFAST NMR, a highly sensitive new method for semi‐quantitative characterization of the structural compactness and heterogeneity of polypeptide chains in solution. On the basis of one‐dimensional 1H HET‐SOFAST NMR data, obtained on well‐folded, molten globular, partially‐ and completely unfolded proteins, we define empirical thresholds that can be used as quantitative benchmarks for protein compactness. For 15N‐enriched protein samples, two‐dimensional 1H‐15N HET‐SOFAST correlation spectra provide site‐specific information about the structural heterogeneity along the polypeptide chain.},
author = {Schanda, Paul and Forge, Vincent and Brutscher, Bernhard},
issn = {0749-1581},
journal = {Magnetic Resonance in Chemistry},
number = {S1},
pages = {S177--S184},
publisher = {Wiley},
title = {{HET-SOFAST NMR for fast detection of structural compactness and heterogeneity along polypeptide chains}},
doi = {10.1002/mrc.1825},
volume = {44},
year = {2006},
}
@article{8490,
abstract = {We demonstrate the feasibility of recording 1H–15N correlation spectra of proteins in only one second of acquisition time. The experiment combines recently proposed SOFAST-HMQC with Hadamard-type 15N frequency encoding. This allows site-resolved real-time NMR studies of kinetic processes in proteins with an increased time resolution. The sensitivity of the experiment is sufficient to be applicable to a wide range of molecular systems available at millimolar concentration on a high magnetic field spectrometer.},
author = {Schanda, Paul and Brutscher, Bernhard},
issn = {1090-7807},
journal = {Journal of Magnetic Resonance},
keywords = {Nuclear and High Energy Physics, Biophysics, Biochemistry, Condensed Matter Physics},
number = {2},
pages = {334--339},
publisher = {Elsevier},
title = {{Hadamard frequency-encoded SOFAST-HMQC for ultrafast two-dimensional protein NMR}},
doi = {10.1016/j.jmr.2005.10.007},
volume = {178},
year = {2006},
}
@article{8513,
author = {Kaloshin, Vadim and Saprykina, Maria},
issn = {1553-5231},
journal = {Discrete & Continuous Dynamical Systems - A},
number = {2},
pages = {611--640},
publisher = {American Institute of Mathematical Sciences (AIMS)},
title = {{Generic 3-dimensional volume-preserving diffeomorphisms with superexponential growth of number of periodic orbits}},
doi = {10.3934/dcds.2006.15.611},
volume = {15},
year = {2006},
}
@article{8514,
abstract = {We study the extent to which the Hausdorff dimension of a compact subset of an infinite-dimensional Banach space is affected by a typical mapping into a finite-dimensional space. It is possible that the dimension drops under all such mappings, but the amount by which it typically drops is controlled by the ‘thickness exponent’ of the set, which was defined by Hunt and Kaloshin (Nonlinearity12 (1999), 1263–1275). More precisely, let $X$ be a compact subset of a Banach space $B$ with thickness exponent $\tau$ and Hausdorff dimension $d$. Let $M$ be any subspace of the (locally) Lipschitz functions from $B$ to $\mathbb{R}^{m}$ that contains the space of bounded linear functions. We prove that for almost every (in the sense of prevalence) function $f \in M$, the Hausdorff dimension of $f(X)$ is at least $\min\{ m, d / (1 + \tau) \}$. We also prove an analogous result for a certain part of the dimension spectra of Borel probability measures supported on $X$. The factor $1 / (1 + \tau)$ can be improved to $1 / (1 + \tau / 2)$ if $B$ is a Hilbert space. Since dimension cannot increase under a (locally) Lipschitz function, these theorems become dimension preservation results when $\tau = 0$. We conjecture that many of the attractors associated with the evolution equations of mathematical physics have thickness exponent zero. We also discuss the sharpness of our results in the case $\tau > 0$.},
author = {OTT, WILLIAM and HUNT, BRIAN and Kaloshin, Vadim},
issn = {0143-3857},
journal = {Ergodic Theory and Dynamical Systems},
number = {3},
pages = {869--891},
publisher = {Cambridge University Press},
title = {{The effect of projections on fractal sets and measures in Banach spaces}},
doi = {10.1017/s0143385705000714},
volume = {26},
year = {2006},
}
@inproceedings{8515,
abstract = {We consider the evolution of a set carried by a space periodic incompressible stochastic flow in a Euclidean space. We
report on three main results obtained in [8, 9, 10] concerning long time behaviour for a typical realization of the stochastic flow. First, at time t most of the particles are at a distance of order √t away from the origin. Moreover, we prove a Central Limit Theorem for the evolution of a measure carried by the flow, which holds for almost every realization of the flow. Second, we show the existence of a zero measure full Hausdorff dimension set of points, which
escape to infinity at a linear rate. Third, in the 2-dimensional case, we study the set of points visited by the original set by time t. Such a set, when scaled down by the factor of t, has a limiting non random shape.},
author = {Kaloshin, Vadim and DOLGOPYAT, D. and KORALOV, L.},
booktitle = {XIVth International Congress on Mathematical Physics},
isbn = {9789812562012},
location = {Lisbon, Portugal},
pages = {290--295},
publisher = {World Scientific},
title = {{Long time behaviour of periodic stochastic flows}},
doi = {10.1142/9789812704016_0026},
year = {2006},
}
@article{854,
abstract = {Phylogenetic relationships between the extinct woolly mammoth (Mammuthus primigenius), and the Asian (Elephas maximus) and African savanna (Loxodonta africana) elephants remain unresolved. Here, we report the sequence of the complete mitochondrial genome (16,842 base pairs) of a woolly mammoth extracted from permafrost-preserved remains from the Pleistocene epoch - the oldest mitochondrial genome sequence determined to date. We demonstrate that well-preserved mitochondrial genome fragments, as long as ∼1,600-1700 base pairs, can be retrieved from pre-Holocene remains of an extinct species. Phylogenetic reconstruction of the Elephantinae clade suggests that M. primigenius and E. maximus are sister species that diverged soon after their common ancestor split from the L. africana lineage. Low nucleotide diversity found between independently determined mitochondrial genomic sequences of woolly mammoths separated geographically and in time suggests that north-eastern Siberia was occupied by a relatively homogeneous population of M. primigenius throughout the late Pleistocene.},
author = {Rogaev, Evgeny I and Moliaka, Yuri K and Malyarchuk, Boris A and Fyodor Kondrashov and Derenko, Miroslava V and Chumakov, Ilya M and Grigorenko, Anastasia P},
journal = {PLoS Biology},
number = {3},
pages = {0403 -- 0410},
publisher = {Public Library of Science},
title = {{Complete mitochondrial genome and phylogeny of pleistocene mammoth Mammuthus primigenius}},
doi = {10.1371/journal.pbio.0040073},
volume = {4},
year = {2006},
}
@article{873,
abstract = {New genes commonly appear through complete or partial duplications of pre-existing genes. Duplications of long DNA segments are constantly produced by rare mutations, may become fixed in a population by selection or random drift, and are subject to divergent evolution of the paralogous sequences after fixation, although gene conversion can impede this process. New data shed some light on each of these processes. Mutations which involve duplications can occur through at least two different mechanisms, backward strand slippage during DNA replication and unequal crossing-over. The background rate of duplication of a complete gene in humans is 10-9-10-10 per generation, although many genes located within hot-spots of large-scale mutation are duplicated much more often. Many gene duplications affect fitness strongly, and are responsible, through gene dosage effects, for a number of genetic diseases. However, high levels of intrapopulation polymorphism caused by presence or absence of long, gene-containing DNA segments imply that some duplications are not under strong selection. The polymorphism to fixation ratios appear to be approximately the same for gene duplications and for presumably selectively neutral nucleotide substitutions, which, according to the McDonald-Kreitman test, is consistent with selective neutrality of duplications. However, this pattern can also be due to negative selection against most of segregating duplications and positive selection for at least some duplications which become fixed. Patterns in post-fixation evolution of duplicated genes do not easily reveal the causes of fixations. Many gene duplications which became fixed recently in a variety of organisms were positively selected because the increased expression of the corresponding genes was beneficial. The effects of gene dosage provide a unified framework for studying all phases of the life history of a gene duplication. Application of well-known methods of evolutionary genetics to accumulating data on new, polymorphic, and fixed duplication will enhance our understanding of the role of natural selection in the evolution by gene duplication.},
author = {Fyodor Kondrashov and Kondrashov, Alexey S},
journal = {Journal of Theoretical Biology},
number = {2},
pages = {141 -- 151},
publisher = {Elsevier},
title = {{Role of selection in fixation of gene duplications}},
doi = {10.1016/j.jtbi.2005.08.033},
volume = {239},
year = {2006},
}
@article{868,
abstract = {Background: The glyoxylate cycle is thought to be present in bacteria, protists, plants, fungi, and nematodes, but not in other Metazoa. However, activity of the glyoxylate cycle enzymes, malate synthase (MS) and isocitrate lyase (ICL), in animal tissues has been reported. In order to clarify the status of the MS and ICL genes in animals and get an insight into their evolution, we undertook a comparative-genomic study. Results: Using sequence similarity searches, we identified MS genes in arthropods, echinoderms, and vertebrates, including platypus and opossum, but not in the numerous sequenced genomes of placental mammals. The regions of the placental mammals' genomes expected to code for malate synthase, as determined by comparison of the gene orders in vertebrate genomes, show clear similarity to the opossum MS sequence but contain stop codons, indicating that the MS gene became a pseudogene in placental mammals. By contrast, the ICL gene is undetectable in animals other than the nematodes that possess a bifunctional, fused ICL-MS gene. Examination of phylogenetic trees of MS and ICL suggests multiple horizontal gene transfer events that probably went in both directions between several bacterial and eukaryotic lineages. The strongest evidence was obtained for the acquisition of the bifunctional ICL-MS gene from an as yet unknown bacterial source with the corresponding operonic organization by the common ancestor of the nematodes. Conclusion: The distribution of the MS and ICL genes in animals suggests that either they encode alternative enzymes of the glyoxylate cycle that are not orthologous to the known MS and ICL or the animal MS acquired a new function that remains to be characterized. Regardless of the ultimate solution to this conundrum, the genes for the glyoxylate cycle enzymes present a remarkable variety of evolutionary events including unusual horizontal gene transfer from bacteria to animals.},
author = {Fyodor Kondrashov and Koonin, Eugene V and Morgunov, Igor G and Finogenova, Tatiana V and Kondrashova, Marie N},
journal = {Biology Direct},
publisher = {BioMed Central},
title = {{Evolution of glyoxylate cycle enzymes in Metazoa Evidence of multiple horizontal transfer events and pseudogene formation}},
doi = {10.1186/1745-6150-1-31},
volume = {1},
year = {2006},
}
@article{869,
abstract = {The impact of synonymous nucleotide substitutions on fitness in mammals remains controversial. Despite some indications of selective constraint, synonymous sites are often assumed to be neutral, and the rate of their evolution is used as a proxy for mutation rate. We subdivide all sites into four classes in terms of the mutable CpG context, nonCpG, postC, preG, and postCpreG, and compare four-fold synonymous sites and intron sites residing outside transposable elements. The distribution of the rate of evolution across all synonymous sites is trimodal. Rate of evolution at nonCpG synonymous sites, not preceded by C and not followed by G, is ∼10% below that at such intron sites. In contrast, rate of evolution at postCpreG synonymous sites is ∼30% above that at such intron sites. Finally, synonymous and intron postC and preG sites evolve at similar rates. The relationship between the levels of polymorphism at the corresponding synonymous and intron sites is very similar to that between their rates of evolution. Within every class, synonymous sites are occupied by G or C much more often than intron sites, whose nucleotide composition is consistent with neutral mutation-drift equilibrium. These patterns suggest that synonymous sites are under weak selection in favor of G and C, with the average coefficient s∼0.25/Ne∼10-5, where Ne is the effective population size. Such selection decelerates evolution and reduces variability at sites with symmetric mutation, but has the opposite effects at sites where the favored nucleotides are more mutable. The amino-acid composition of proteins dictates that many synonymous sites are CpGprone, which causes them, on average, to evolve faster and to be more polymorphic than intron sites. An average genotype carries ∼107 suboptimal nucleotides at synonymous sites, implying synergistic epistasis in selection against them.},
author = {Fyodor Kondrashov and Ogurtsov, Aleksey Yu and Kondrashov, Alexey S},
journal = {Journal of Theoretical Biology},
number = {4},
pages = {616 -- 626},
publisher = {Elsevier},
title = {{Selection in favor of nucleotides G and C diversifies evolution rates and levels of polymorphism at mammalian synonymous sites}},
doi = {10.1016/j.jtbi.2005.10.020},
volume = {240},
year = {2006},
}
@article{903,
abstract = {Background: Carcinogenesis typically involves multiple somatic mutations in caretaker (DNA repair) and gatekeeper (tumor suppressors and oncogenes) genes. Analysis of mutation spectra of the tumor suppressor that is most commonly mutated in human cancers, p53, unexpectedly suggested that somatic evolution of the p53 gene during tumorigenesis is dominated by positive selection for gain of function. This conclusion is supported by accumulating experimental evidence of evolution of new functions of p53 in tumors. These findings prompted a genome-wide analysis of possible positive selection during tumor evolution. Methods: A comprehensive analysis of probable somatic mutations in the sequences of Expressed Sequence Tags (ESTs) from malignant tumors and normal tissues was performed in order to access the prevalence of positive selection in cancer evolution. For each EST, the numbers of synonymous and non-synonymous substitutions were calculated. In order to identify genes with a signature of positive selection in cancers, these numbers were compared to: i) expected numbers and ii) the numbers for the respective genes in the ESTs from normal tissues. Results: We identified 112 genes with a signature of positive selection in cancers, i.e., a significantly elevated ratio of non-synonymous to synonymous substitutions, in tumors as compared to 37 such genes in an approximately equal-sized EST collection from normal tissues. A substantial fraction of the tumor-specific positive-selection candidates have experimentally demonstrated or strongly predicted links to cancer. Conclusion: The results of EST analysis should be interpreted with extreme caution given the noise introduced by sequencing errors and undetected polymorphisms. Furthermore, an inherent limitation of EST analysis is that multiple mutations amenable to statistical analysis can be detected only in relatively highly expressed genes. Nevertheless, the present results suggest that positive selection might affect a substantial number of genes during tumorigenic somatic evolution.},
author = {Babenko, Vladimir N and Basu, Malay K and Fyodor Kondrashov and Rogozin, Igor B and Koonin, Eugene V},
journal = {BMC Cancer},
publisher = {BioMed Central},
title = {{Signs of positive selection of somatic mutations in human cancers detected by EST sequence analysis}},
doi = {10.1186/1471-2407-6-36},
volume = {6},
year = {2006},
}