@article{4253, abstract = {We consider a single genetic locus which carries two alleles, labelled P and Q. This locus experiences selection and mutation. It is linked to a second neutral locus with recombination rate r. If r = 0, this reduces to the study of a single selected locus. Assuming a Moran model for the population dynamics, we pass to a diffusion approximation and, assuming that the allele frequencies at the selected locus have reached stationarity, establish the joint generating function for the genealogy of a sample from the population and the frequency of the P allele. In essence this is the joint generating function for a coalescent and the random background in which it evolves. We use this to characterize, for the diffusion approximation, the probability of identity in state at the neutral locus of a sample of two individuals (whose type at the selected locus is known) as solutions to a system of ordinary differential equations. The only subtlety is to find the boundary conditions for this system. Finally, numerical examples are presented that illustrate the accuracy and predictions of the diffusion approximation. In particular, a comparison is made between this approach and one in which the frequencies at the selected locus are estimated by their value in the absence of fluctuations and a classical structured coalescent model is used.}, author = {Nicholas Barton and Etheridge, Alison M and Sturm, Anja K}, journal = {Annals of Applied Probability}, number = {2}, pages = {754 -- 785}, publisher = {Institute of Mathematical Statistics}, title = {{Coalescence in a Random Background}}, volume = {14}, year = {2004}, } @misc{3142, abstract = {Assembly of neuronal circuits is controlled by the sequential acquisition of neuronal subpopulation-specific identities at progressive developmental steps. Whereas neuronal features involved in initial phases of differentiation are already established at cell-cycle exit, recent findings, based mainly on work in the peripheral nervous system, suggest that the timely integration of signals encountered en route to targets and from the target region itself is essential to control late steps in connectivity. As neurons project towards their targets they require target-derived signals to establish mature axonal projections and acquire neuronal traits such as the expression of distinct combinations of neurotransmitters. Recent evidence presented in this review shows that this principle, of a signaling interplay between target-derived signals and neuronal cell bodies, is often mediated through transcriptional events and is evolutionarily conserved.}, author = {Simon Hippenmeyer and Kramer, Ina and Arber, Silvia}, booktitle = {Trends in Neurosciences}, number = {8}, pages = {482 -- 488}, publisher = {Elsevier}, title = {{Control of neuronal phenotype: What targets tell the cell bodies}}, doi = {10.1016/j.tins.2004.05.012}, volume = {27}, year = {2004}, } @article{3178, abstract = {Minimum cut/maximum flow algorithms on graphs have emerged as an increasingly useful tool for exactor approximate energy minimization in low-level vision. The combinatorial optimization literature provides many min-cut/max-flow algorithms with different polynomial time complexity. Their practical efficiency, however, has to date been studied mainly outside the scope of computer vision. The goal of this paper is to provide an experimental comparison of the efficiency of min-cut/max flow algorithms for applications in vision. We compare the running times of several standard algorithms, as well as a new algorithm that we have recently developed. The algorithms we study include both Goldberg-Tarjan style "push -relabel" methods and algorithms based on Ford-Fulkerson style "augmenting paths." We benchmark these algorithms on a number of typical graphs in the contexts of image restoration, stereo, and segmentation. In many cases, our new algorithm works several times faster than any of the other methods, making near real-time performance possible. An implementation of our max-flow/min-cut algorithm is available upon request for research purposes.}, author = {Boykov, Yuri and Vladimir Kolmogorov}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {9}, pages = {1124 -- 1137}, publisher = {IEEE}, title = {{An experimental comparison of min-cut/max-flow algorithms for energy minimization in vision}}, doi = {10.1109/TPAMI.2004.60}, volume = {26}, year = {2004}, } @article{3173, abstract = {In the last few years, several new algorithms based on graph cuts have been developed to solve energy minimization problems in computer vision. Each of these techniques constructs a graph such that the minimum cut on the graph also minimizes the energy. Yet, because these graph constructions are complex and highly specific to a particular energy function, graph cuts have seen limited application to date. In this paper, we give a characterization of the energy functions that can be minimized by graph cuts. Our results are restricted to functions of binary variables. However, our work generalizes many previous constructions and is easily applicable to vision problems that involve large numbers of labels, such as stereo, motion, image restoration, and scene reconstruction. We give a precise characterization of what energy functions can be minimized using graph cuts, among the energy functions that can be written as a sum of terms containing three or fewer binary variables. We also provide a general-purpose construction to minimize such an energy function. Finally, we give a necessary condition for any energy function of binary variables to be minimized by graph cuts. Researchers who are considering the use of graph cuts to optimize a particular energy function can use our results to determine if this is possible and then follow our construction to create the appropriate graph. A software implementation is freely available.}, author = {Vladimir Kolmogorov and Zabih, Ramin}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {2}, pages = {147 -- 159}, publisher = {IEEE}, title = {{What energy functions can be minimized via graph cuts? }}, doi = {10.1109/TPAMI.2004.1262177}, volume = {26}, year = {2004}, } @article{3172, abstract = {The simultaneous multiple volume (SMV) approach in navigator-gated MRI allows the use of the whole motion range or the entire scan time for the reconstruction of final images by simultaneously acquiring different image volumes at different motion states. The motion tolerance range for each volume is kept small, thus SMV substantially increases the scan efficiency of navigator methods while maintaining the effectiveness of motion suppression. This article reports a general implementation of the SMV approach using a multiprocessor scheduling algorithm. Each motion state is regarded as a processor and each volume is regarded as a job. An efficient scheduling that completes all jobs in minimal time is maintained even when the motion pattern changes. Initial experiments demonstrated that SMV significantly increased the scan efficiency of navigatorgated MRI.}, author = {Vladimir Kolmogorov and Nguyen, Thành D and Nuval, Anthony and Spincemaille, Pascal and Prince, Martin R and Zabih, Ramin and Wang, Yusu}, journal = {Magnetic Resonance in Medicine}, number = {2}, pages = {362 -- 367}, publisher = {Wiley-Blackwell}, title = {{Multiprocessor scheduling implementation of the simultaneous multiple volume SMV navigator method}}, doi = {10.1002/mrm.20162}, volume = {52}, year = {2004}, } @inproceedings{3177, abstract = {Feature space clustering is a popular approach to image segmentation, in which a feature vector of local properties (such as intensity, texture or motion) is computed at each pixel. The feature space is then clustered, and each pixel is labeled with the cluster that contains its feature vector. A major limitation of this approach is that feature space clusters generally lack spatial coherence (i.e., they do not correspond to a compact grouping of pixels). In this paper, we propose a segmentation algorithm that operates simultaneously in feature space and in image space. We define an energy function over both a set of clusters and a labeling of pixels with clusters. In our framework, a pixel is labeled with a single cluster (rather than, for example, a distribution over clusters). Our energy function penalizes clusters that are a poor fit to the data in feature space, and also penalizes clusters whose pixels lack spatial coherence. The energy function can be efficiently minimized using graph cuts. Our algorithm can incorporate both parametric and non-parametric clustering methods. It can be applied to many optimization-based clustering methods, including k-means and k-medians, and can handle models which are very close in feature space. Preliminary results are presented on segmenting real and synthetic images, using both parametric and non-parametric clustering.}, author = {Zabih, Ramin and Vladimir Kolmogorov}, pages = {437 -- 444}, publisher = {IEEE}, title = {{Spatially coherent clustering using graph cuts}}, doi = {10.1109/CVPR.2004.1315196}, volume = {2}, year = {2004}, } @inproceedings{3179, abstract = {The problem of efficient, interactive foreground/background segmentation in still images is of great practical importance in image editing. Classical image segmentation tools use either texture (colour) information, e.g. Magic Wand, or edge (contrast) information, e.g. Intelligent Scissors. Recently, an approach based on optimization by graph-cut has been developed which successfully combines both types of information. In this paper we extend the graph-cut approach in three respects. First, we have developed a more powerful, iterative version of the optimisation. Secondly, the power of the iterative algorithm is used to simplify substantially the user interaction needed for a given quality of result. Thirdly, a robust algorithm for "border matting" has been developed to estimate simultaneously the alpha-matte around an object boundary and the colours of foreground pixels. We show that for moderately difficult examples the proposed method outperforms competitive tools.}, author = {Rother, Carsten and Vladimir Kolmogorov and Blake, Andrew}, number = {3}, pages = {309 -- 314}, publisher = {ACM}, title = {{"GrabCut" - Interactive foreground extraction using iterated graph cuts }}, doi = {10.1145/1015706.1015720}, volume = {23}, year = {2004}, } @article{3420, abstract = {Single-molecule force-spectroscopy was employed to unfold and refold single sodium-proton antiporters (NhaA) of Escherichia coli from membrane patches. Although transmembrane α-helices and extracellular polypeptide loops exhibited sufficient stability to individually establish potential barriers against unfolding, two helices predominantly unfolded pairwise, thereby acting as one structural unit. Many of the potential barriers were detected unfolding NhaA either from the C-terminal or the N-terminal end. It was found that some molecular interactions stabilizing secondary structural elements were directional, while others were not. Additionally, some interactions appeared to occur between the secondary structural elements. After unfolding ten of the 12 helices, the extracted polypeptide was allowed to refold back into the membrane. After five seconds, the refolded polypeptide established all secondary structure elements of the native protein. One helical pair showed a characteristic spring like “snap in” into its folded conformation, while the refolding process of other helices was not detected in particular. Additionally, individual helices required characteristic periods of time to fold. Correlating these results with the primary structure of NhaA allowed us to obtain the first insights into how potential barriers establish and determine the folding kinetics of the secondary structure elements.}, author = {Kedrov, Alexej and Ziegler, Christine and Harald Janovjak and Kühlbrandt, Werner and Mueller, Daniel J}, journal = {Journal of Molecular Biology}, number = {5}, pages = {1143 -- 1152}, publisher = {Elsevier}, title = {{Controlled unfolding and refolding of a single sodium/proton antiporter using atomic force microscopy}}, doi = {10.1016/j.jmb.2004.05.026}, volume = {340}, year = {2004}, } @article{3419, abstract = {The folding and stability of transmembrane proteins is a fundamental and unsolved biological problem. Here, single bacteriorhodopsin molecules were mechanically unfolded from native purple membranes using atomic force microscopy and force spectroscopy. The energy landscape of individual transmembrane α helices and polypeptide loops was mapped by monitoring the pulling speed dependence of the unfolding forces and applying Monte Carlo simulations. Single helices formed independently stable units stabilized by a single potential barrier. Mechanical unfolding of the helices was triggered by 3.9–7.7 Å extension, while natural unfolding rates were of the order of 10−3 s−1. Besides acting as individually stable units, helices associated pairwise, establishing a collective potential barrier. The unfolding pathways of individual proteins reflect distinct pulling speed-dependent unfolding routes in their energy landscapes. These observations support the two-stage model of membrane protein folding in which α helices insert into the membrane as stable units and then assemble into the functional protein.}, author = {Harald Janovjak and Struckmeier, Jens and Hubain, Maurice and Kessler, Max and Kedrov, Alexej and Mueller, Daniel J}, journal = {Structure}, number = {5}, pages = {871 -- 879}, publisher = {Cell Press}, title = {{Probing the energy landscape of the membrane protein bacteriorhodopsin}}, doi = {10.1016/j.str.2004.03.016}, volume = {12}, year = {2004}, } @inbook{3575, abstract = {The Jacobi set of two Morse functions defined on a common - manifold is the set of critical points of the restrictions of one func- tion to the level sets of the other function. Equivalently, it is the set of points where the gradients of the functions are parallel. For a generic pair of Morse functions, the Jacobi set is a smoothly embed- ded 1-manifold. We give a polynomial-time algorithm that com- putes the piecewise linear analog of the Jacobi set for functions specified at the vertices of a triangulation, and we generalize all results to more than two but at most Morse functions.}, author = {Herbert Edelsbrunner and Harer, John}, booktitle = {Foundations of Computational Mathematics}, pages = {37 -- 57}, publisher = {Springer}, title = {{Jacobi sets of multiple Morse functions}}, doi = {10.1017/CBO9781139106962.003}, volume = {312}, year = {2004}, } @inbook{3574, author = {Herbert Edelsbrunner}, booktitle = {Handbook of Discrete and Computational Geometry}, pages = {1395 -- 1412}, publisher = {CRC Press}, title = {{Biological applications of computational topology}}, year = {2004}, } @misc{3595, abstract = {Genome sizes vary enormously. This variation in DNA content correlates with effective population size, suggesting that deleterious additions to the genome can accumulate in small populations. On this view, the increased complexity of biological functions associated with large genomes partly reflects evolutionary degeneration.}, author = {Charlesworth, Brian and Nicholas Barton}, booktitle = {Current Biology}, number = {6}, pages = {R233 -- R235}, publisher = {Cell Press}, title = {{Genome size: Does bigger mean worse?}}, doi = {10.1016/j.cub.2004.02.054}, volume = {14}, year = {2004}, } @article{3614, abstract = {We analyze the changes in the mean and variance components of a quantitative trait caused by changes in allele frequencies, concentrating on the effects of genetic drift. We use a general representation of epistasis and dominance that allows an arbitrary relation between genotype and phenotype for any number of diallelic loci. We assume initial and final Hardy-Weinberg and linkage equilibrium in our analyses of drift-induced changes. Random drift generates transient linkage disequilibria that cause correlations between allele frequency fluctuations at different loci. However, we show that these have negligible effects, at least for interactions among small numbers of loci. Our analyses are based on diffusion approximations that summarize the effects of drift in terms of F, the inbreeding coefficient, interpreted as the expected proportional decrease in heterozygosity at each locus. For haploids, the variance of the trait mean after a population bottleneck is var(Δz̄) =inline imagewhere n is the number of loci contributing to the trait variance, VA(1)=VA is the additive genetic variance, and VA(k) is the kth-order additive epistatic variance. The expected additive genetic variance after the bottleneck, denoted (V*A), is closely related to var(Δz̄); (V*A) (1 –F)inline imageThus, epistasis inflates the expected additive variance above VA(1 –F), the expectation under additivity. For haploids (and diploids without dominance), the expected value of every variance component is inflated by the existence of higher order interactions (e.g., third-order epistasis inflates (V*AA)). This is not true in general with diploidy, because dominance alone can reduce (V*A) below VA(1 –F) (e.g., when dominant alleles are rare). Without dominance, diploidy produces simple expressions: var(Δz̄)=inline image=1 (2F) kVA(k) and (V*A) = (1 –F)inline imagek(2F)k-1VA(k) With dominance (and even without epistasis), var(Δz̄)and (V*A) no longer depend solely on the variance components in the base population. For small F, the expected additive variance simplifies to (V*A)(1 –F) VA+ 4FVAA+2FVD+2FCAD, where CAD is a sum of two terms describing covariances between additive effects and dominance and additive × dominance interactions. Whether population bottlenecks lead to expected increases in additive variance depends primarily on the ratio of nonadditive to additive genetic variance in the base population, but dominance precludes simple predictions based solely on variance components. We illustrate these results using a model in which genotypic values are drawn at random, allowing extreme and erratic epistatic interactions. Although our analyses clarify the conditions under which drift is expected to increase VA, we question the evolutionary importance of such increases.}, author = {Nicholas Barton and Turelli, Michael}, journal = {Evolution; International Journal of Organic Evolution}, number = {10}, pages = {2111 -- 2132}, publisher = {Wiley-Blackwell}, title = {{Effects of allele frequency changes on variance components under a general model of epistasis}}, doi = {10.1111/j.0014-3820.2004.tb01591.x}, volume = {58}, year = {2004}, } @article{3615, abstract = {We investigate three alternative selection-based scenarios proposed to maintain polygenic variation: pleiotropic balancing selection, G x E interactions (with spatial or temporal variation in allelic effects), and sex-dependent allelic effects. Each analysis assumes an additive polygenic trait with n diallelic loci under stabilizing selection. We allow loci to have different effects and consider equilibria at which the population mean departs from the stabilizing-selection optimum. Under weak selection, each model produces essentially identical, approximate allele-frequency dynamics. Variation is maintained under pleiotropic balancing selection only at loci for which the strength of balancing selection exceeds the effective strength of stabilizing selection. In addition, for all models, polymorphism requires that the population mean be close enough to the optimum that directional selection does not overwhelm balancing selection. This balance allows many simultaneously stable equilibria, and we explore their properties numerically. Both spatial and temporal G x E can maintain variation at loci for which the coefficient of variation (across environments) of the effect of a substitution exceeds a critical value greater than one. The critical value depends on the correlation between substitution effects at different loci. For large positive correlations (e.g., ρ2ij > 3/4), even extreme fluctuations in allelic effects cannot maintain variation. Surprisingly, this constraint on correlations implies that sex-dependent allelic effects cannot maintain polygenic variation. We present numerical results that support our analytical approximations and discuss our results in connection to relevant data and alternative variance-maintaining mechanisms.}, author = {Turelli, Michael and Nicholas Barton}, journal = {Genetics}, number = {2}, pages = {1053 -- 1079}, publisher = {Genetics Society of America}, title = {{Polygenic variation maintained by balancing selection: pleiotropy, sex-dependent allelic effects and GxE interactions}}, doi = {10.1534/genetics.166.2.1053}, volume = {166}, year = {2004}, } @article{3807, abstract = {The time course of Mg(2+) block and unblock of NMDA receptors (NMDARs) determines the extent they are activated by depolarization. Here, we directly measure the rate of NMDAR channel opening in response to depolarizations at different times after brief (1 ms) and sustained (4.6 s) applications of glutamate to nucleated patches from neocortical pyramidal neurons. The kinetics of Mg(2+) unblock were found to be non-instantaneous and complex, consisting of a prominent fast component (time constant approximately 100 micros) and slower components (time constants 4 and approximately 300 ms), the relative amplitudes of which depended on the timing of the depolarizing pulse. Fitting a kinetic model to these data indicated that Mg(2+) not only blocks the NMDAR channel, but reduces both the open probability and affinity for glutamate, while enhancing desensitization. These effects slow the rate of NMDAR channel opening in response to depolarization in a time-dependent manner such that the slower components of Mg(2+) unblock are enhanced during depolarizations at later times after glutamate application. One physiological consequence of this is that brief depolarizations occurring earlier in time after glutamate application are better able to open NMDAR channels. This finding has important implications for spike-timing-dependent synaptic plasticity (STDP), where the precise (millisecond) timing of action potentials relative to synaptic inputs determines the magnitude and sign of changes in synaptic strength. Indeed, we find that STDP timing curves of NMDAR channel activation elicited by realistic dendritic action potential waveforms are narrower than expected assuming instantaneous Mg(2+) unblock, indicating that slow Mg(2+) unblock of NMDAR channels makes the STDP timing window more precise.}, author = {Kampa, Bjorn M and Clements, John and Peter Jonas and Stuart, Greg J}, journal = {Journal of Physiology}, number = {Pt 2}, pages = {337 -- 45}, publisher = {Wiley-Blackwell}, title = {{Kinetics of Mg(2+) unblock of NMDA receptors: implications for spike-timing dependent synaptic plasticity}}, doi = {10.1113/jphysiol.2003.058842 }, volume = {556}, year = {2004}, } @article{3809, abstract = {Neural stem cells in various regions of the vertebrate brain continuously generate neurons throughout life. In the mammalian hippocampus, a region important for spatial and episodic memory, thousands of new granule cells are produced per day, with the exact number depending on environmental conditions and physical exercise. The survival of these neurons is improved by learning and conversely learning may be promoted by neurogenesis. Although it has been suggested that newly generated neurons may have specific properties to facilitate learning, the cellular and synaptic mechanisms of plasticity in these neurons are largely unknown. Here we show that young granule cells in the adult hippocampus differ substantially from mature granule cells in both active and passive membrane properties. In young neurons, T-type Ca2+ channels can generate isolated Ca2+ spikes and boost fast Na+ action potentials, contributing to the induction of synaptic plasticity. Associative long-term potentiation can be induced more easily in young neurons than in mature neurons under identical conditions. Thus, newly generated neurons express unique mechanisms to facilitate synaptic plasticity, which may be important for the formation of new memories.}, author = {Schmidt-Hieber, Christoph and Peter Jonas and Bischofberger, Josef}, journal = {Nature}, number = {6988}, pages = {184 -- 7}, publisher = {Nature Publishing Group}, title = {{Enhanced synaptic plasticity in newly generated granule cells of the adult hippocampus}}, doi = {10.1038/nature02553}, volume = {429}, year = {2004}, } @article{3805, abstract = {The operation of neuronal networks crucially depends on a fast time course of signaling in inhibitory interneurons. Synapses that excite interneurons generate fast currents, owing to the expression of glutamate receptors of specific subunit composition. Interneurons generate brief action potentials in response to transient synaptic activation and discharge repetitively at very high frequencies during sustained stimulation. The ability to generate short-duration action potentials at high frequencies depends on the expression of specific voltage-gated K+ channels. Factors facilitating fast action potential initiation following synaptic excitation include depolarized interneuron resting potential, subthreshold conductances and active dendrites. Finally, GABA release at interneuron output synapses is rapid and highly synchronized, leading to a faster inhibition in postsynaptic interneurons than in principal cells. Thus, the expression of distinct transmitter receptors and voltage-gated ion channels ensures that interneurons operate with high speed and temporal precision.}, author = {Peter Jonas and Bischofberger, Josef and Fricker, Desdemona and Miles, Richard}, journal = {Trends in Neurosciences}, number = {1}, pages = {30 -- 40}, publisher = {Elsevier}, title = {{Interneuron Diversity series: Fast in, fast out--temporal and spatial signal processing in hippocampal interneurons}}, doi = {doi:10.1016/j.tins.2003.10.010}, volume = {27}, year = {2004}, } @article{3918, abstract = {Wingless (ergatoid) males of the tramp ant Cardiocondyla minutior attack and kill their young ergatoid rivals and thus attempt to monopolize mating with female sexuals reared in the colony. Because of the different strength of local mate competition in colonies with one or several reproductive queens, we expected the production of new ergatoid males to vary with queen number. Sex ratios were mostly female-biased, but in contrast to the sympatric species C. obscurior (Cremer and Heinze, 2002) neither the percentage of ergatoid males nor of female sexuals among the first 20 sexuals produced varied considerably with queen number. As in C. obscurior, experimental colony fragmentation led to the production of winged males, whereas in unfragmented control colonies only ergatoid males eclosed.}, author = {Heinze, Jürgen and Böttcher, A. and Cremer, Sylvia}, journal = {Insectes Sociaux}, number = {3}, pages = {275 -- 278}, publisher = {Springer}, title = {{Production of winged and wingless males in the ant, Cardiocondyla minutior}}, doi = {10.1007/s00040-004-0740-6}, volume = {51}, year = {2004}, } @inproceedings{3988, abstract = {We give an algorithm that locally improves the fit between two proteins modeled as space-filling diagrams. The algorithm defines the fit in purely geometric terms and improves by applying a rigid motion to one of the two proteins. Our implementation of the algorithm takes between three and ten seconds and converges with high likelihood to the correct docked configuration, provided it starts at a position away from the correct one by at most 18 degrees of rotation and at most 3.0Angstrom of translation. The speed and convergence radius make this an attractive algorithm to use in combination with a coarse sampling of the six-dimensional space of rigid motions.}, author = {Choi, Vicky and Agarwal, Pankaj K and Herbert Edelsbrunner and Rudolph, Johannes}, pages = {218 -- 229}, publisher = {Springer}, title = {{Local search heuristic for rigid protein docking}}, doi = {10.1007/978-3-540-30219-3_19}, volume = {3240}, year = {2004}, } @article{3986, abstract = {The motion of a biomolecule greatly depends on the engulfing solution, which is mostly water. Instead of representing individual water molecules, it is desirable to develop implicit solvent models that nevertheless accurately represent the contribution of the solvent interaction to the motion. In such models, hydrophobicity is expressed as a weighted sum of atomic surface areas. The derivatives of these weighted areas contribute to the force that drives the motion. In this paper we give formulas for the weighted and unweighted area derivatives of a molecule modeled as a space-filling diagram made up of balls in motion. Other than the radii and the centers of the balls, the formulas are given in terms of the sizes of circular arcs of the boundary and edges of the power diagram. We also give inclusion-exclusion formulas for these sizes.}, author = {Bryant, Robert and Herbert Edelsbrunner and Koehl, Patrice and Levitt, Michael}, journal = {Discrete & Computational Geometry}, number = {3}, pages = {293 -- 308}, publisher = {Springer}, title = {{The area derivative of a space-filling diagram}}, doi = {10.1007/s00454-004-1099-1}, volume = {32}, year = {2004}, } @article{3984, abstract = {We combine topological and geometric methods to construct a multiresolution representation for a function over a two-dimensional domain. In a preprocessing stage, we create the Morse-Smale complex of the function and progressively simplify its topology by cancelling pairs of critical points. Based on a simple notion of dependency among these cancellations, we construct a hierarchical data structure supporting traversal and reconstruction operations similarly to traditional geometry-based representations. We use this data structure to extract topologically valid approximations that satisfy error bounds provided at runtime.}, author = {Bremer, Peer-Timo and Herbert Edelsbrunner and Hamann, Bernd and Pascucci, Valerio}, journal = {IEEE Transactions on Visualization and Computer Graphics}, number = {4}, pages = {385 -- 396}, publisher = {IEEE}, title = {{A topological hierarchy for functions on triangulated surfaces}}, doi = {10.1109/TVCG.2004.3}, volume = {10}, year = {2004}, } @article{3987, abstract = {We consider scientific data sets that describe density functions over three-dimensional geometric domains. Such data sets are often large and coarsened representations are needed for visualization and analysis. Assuming a tetrahedral mesh representation, we construct such representations with a simplification algorithm that combines three goals: the approximation of the function, the preservation of the mesh topology, and the improvement of the mesh quality. The third goal is achieved with a novel extension of the well-known quadric error metric. We perform a number of computational experiments to understand the effect of mesh quality improvement on the density map approximation. In addition, we study the effect of geometric simplification on the topological features of the function by monitoring its critical points.}, author = {Natarajan, Vijay and Herbert Edelsbrunner}, journal = {IEEE Transactions on Visualization and Computer Graphics}, number = {5}, pages = {587 -- 597}, publisher = {IEEE}, title = {{Simplification of three-dimensional density maps}}, doi = {10.1109/TVCG.2004.32}, volume = {10}, year = {2004}, } @article{3985, abstract = {Given a Morse function f over a 2-manifold with or without boundary, the Reeb graph is obtained by contracting the connected components of the level sets to points. We prove tight upper and lower bounds on the number of loops in the Reeb graph that depend on the genus, the number of boundary components, and whether or not the 2-manifold is orientable. We also give an algorithm that constructs the Reeb graph in time O(n log n), where n is the number of edges in the triangulation used to represent the 2-manifold and the Morse function.}, author = {Cole-McLaughlin, Kree and Herbert Edelsbrunner and Harer, John and Natarajan, Vijay and Pascucci, Valerio}, journal = {Discrete & Computational Geometry}, number = {2}, pages = {231 -- 244}, publisher = {Springer}, title = {{Loops in Reeb graphs of 2-manifolds}}, doi = {10.1007/s00454-004-1122-6}, volume = {32}, year = {2004}, } @inproceedings{3989, abstract = {We introduce local and global comparison measures for a collection of k less than or equal to d real-valued smooth functions on a common d-dimensional Riemannian manifold. For k = d = 2 we relate the measures to the set of critical points of one function restricted to the level sets of the other. The definition of the measures extends to piecewise linear functions for which they ace easy to compute. The computation of the measures forms the centerpiece of a software tool which we use to study scientific datasets.}, author = {Herbert Edelsbrunner and Harer, John and Natarajan, Vijay and Pascucci, Valerio}, pages = {275 -- 280}, publisher = {IEEE}, title = {{Local and global comparison of continuous functions}}, doi = {10.1109/VISUAL.2004.68}, year = {2004}, } @article{4172, abstract = {During vertebrate gastrulation, a relatively limited number of blastodermal cells undergoes a stereotypical set of cellular movements that leads to formation of the three germ layers: ectoderm, mesoderm and endoderm. Gastrulation, therefore, provides a unique developmental system in which to study cell movements in vivo in a fairly simple cellular context. Recent advances have been made in elucidating the cellular and molecular mechanisms that underlie cell movements during zebrafish gastrulation. These findings can be compared with observations made in other model systems to identify potential general mechanisms of cell migration during development.}, author = {Montero, Juan and Heisenberg, Carl-Philipp J}, journal = {Trends in Cell Biology}, number = {11}, pages = {620 -- 627}, publisher = {Cell Press}, title = {{Gastrulation dynamics: cells move into focus}}, doi = {10.1016/j.tcb.2004.09.008}, volume = {14}, year = {2004}, } @article{4238, abstract = {The dynamical basis of tumoral growth has been controversial. Many models have been proposed to explain cancer development. The descriptions employ exponential, potential, logistic or Gompertzian growth laws. Some of these models are concerned with the interaction between cancer and the immunological, system. Among other properties, these models are concerned with the microscopic behavior of tumors and the emergence of cancer. We propose a modification of a previous model by Stepanova, which describes the specific immunological response against cancer. The modification consists of the substitution of a Gompertian law for the exponential rate used for tumoral growth. This modification is motivated by the numerous works confirming that Gompertz's equation correctly describes solid tumor growth. The modified model predicts that near zero, tumors always tend to grow. Immunological contraposition never suffices to induce a complete regression of the tumor. Instead, a stable microscopic equilibrium between cancer and immunological activity can be attained. In other words, our model predicts that the theory of immune surveillance is plausible. A macroscopic equilibrium in which the system develops cancer is also possible. In this case, immunological activity is depleted. This is consistent with the phenomena of cancer tolerance. Both equilibrium points can coexist or can exist without the other. In all cases the fixed point at zero tumor size is unstable. Since immunity cannot induce a complete tumor regression, a therapy is required. We include constant-dose therapies and show that they are insufficient. Final levels of immunocompetent cells and tumoral cells are finite, thus post-treatment regrowth of the tumor is certain. We also evaluate late-intensification therapies which are successful. They induce an asymptotic regression to zero tumor size. Immune response is also suppressed by the therapy, and thus plays a negligible role in the remission. We conclude that treatment evaluation should be successful without taking into account immunological effects. (C) 2003 Elsevier Ltd. All rights reserved.}, author = {de Vladar, Harold and González, J.}, journal = {Journal of Theoretical Biology}, number = {3}, pages = {335 -- 348}, publisher = {Elsevier}, title = {{Dynamic response of cancer under the influence of immunological activity and therapy}}, doi = {3801}, volume = {227}, year = {2004}, } @inbook{4230, author = {Harold Vladar and Cipriani, Roberto and Scharifker, Benjamin and Bubis, Jose}, booktitle = {Life in the Universe From the Miller Experiment to the Search for Life on Other Worlds}, editor = {Hanslmeier,A. and Kempe,S. and Seckbach,J.}, pages = {83 -- 87}, publisher = {Springer}, title = {{A mechanism for the prebiotic emergence of proteins}}, year = {2004}, } @phdthesis{4236, author = {de Vladar, Harold}, publisher = {Centro de estudios avazados, IVIC}, title = {{Métodos no lineales y sus aplicaciones en dinámicas aleatorias de poblaciones celulares}}, doi = {3810}, year = {2004}, } @inproceedings{4372, author = {Maler, Oded and Dejan Nickovic}, pages = {152 -- 166}, publisher = {Springer}, title = {{Monitoring Temporal Properties of Continuous Signals}}, doi = {1572}, year = {2004}, } @phdthesis{4424, abstract = {The enormous cost and ubiquity of software errors necessitates the need for techniques and tools that can precisely analyze large systems and prove that they meet given specifications, or if they don't, return counterexample behaviors showing how the system fails. Recent advances in model checking, decision procedures, program analysis and type systems, and a shift of focus to partial specifications common to several systems (e.g., memory safety and race freedom) have resulted in several practical verification methods. However, these methods are either precise or they are scalable, depending on whether they track the values of variables or only a fixed small set of dataflow facts (e.g., types), and are usually insufficient for precisely verifying large programs. We describe a new technique called Lazy Abstraction (LA) which achieves both precision and scalability by localizing the use of precise information. LA automatically builds, explores and refines a single abstract model of the program in a way that different parts of the model exhibit different degrees of precision, namely just enough to verify the desired property. The algorithm automatically mines the information required by partitioning mechanical proofs of unsatisfiability of spurious counterexamples into Craig Interpolants. For multithreaded systems, we give a new technique based on analyzing the behavior of a single thread executing in a context which is an abstraction of the other (arbitrarily many) threads. We define novel context models and show how to automatically infer them and analyze the full system (thread + context) using LA. LA is implemented in BLAST. We have run BLAST on Windows and Linux Device Drivers to verify API conformance properties, and have used it to find (or guarantee the absence of) data races in multithreaded Networked Embedded Systems (NESC) applications. BLAST is able to prove the absence of races in several cases where earlier methods, which depend on lock-based synchronization, fail.}, author = {Jhala, Ranjit}, pages = {1 -- 165}, publisher = {University of California, Berkeley}, title = {{Program verification by lazy abstraction}}, year = {2004}, } @inproceedings{4445, abstract = {We present a type system for E code, which is an assembly language that manages the release, interaction, and termination of real-time tasks. E code specifies a deadline for each task, and the type system ensures that the deadlines are path-insensitive. We show that typed E programs allow, for given worst-case execution times of tasks, a simple schedulability analysis. Moreover, the real-time programming language Giotto can be compiled into typed E~code. This shows that typed E~code identifies an easily schedulable yet expressive class of real-time programs. We have extended the Giotto compiler to generate typed E code, and enabled the run-time system for E code to perform a type and schedulability check before executing the code.}, author = {Thomas Henzinger and Kirsch, Christoph M}, pages = {104 -- 113}, publisher = {ACM}, title = {{A typed assembly language for real-time programs}}, doi = {10.1145/1017753.1017774}, year = {2004}, } @inproceedings{4458, abstract = {The success of model checking for large programs depends crucially on the ability to efficiently construct parsimonious abstractions. A predicate abstraction is parsimonious if at each control location, it specifies only relationships between current values of variables, and only those which are required for proving correctness. Previous methods for automatically refining predicate abstractions until sufficient precision is obtained do not systematically construct parsimonious abstractions: predicates usually contain symbolic variables, and are added heuristically and often uniformly to many or all control locations at once. We use Craig interpolation to efficiently construct, from a given abstract error trace which cannot be concretized, a parsominous abstraction that removes the trace. At each location of the trace, we infer the relevant predicates as an interpolant between the two formulas that define the past and the future segment of the trace. Each interpolant is a relationship between current values of program variables, and is relevant only at that particular program location. It can be found by a linear scan of the proof of infeasibility of the trace.We develop our method for programs with arithmetic and pointer expressions, and call-by-value function calls. For function calls, Craig interpolation offers a systematic way of generating relevant predicates that contain only the local variables of the function and the values of the formal parameters when the function was called. We have extended our model checker Blast with predicate discovery by Craig interpolation, and applied it successfully to C programs with more than 130,000 lines of code, which was not possible with approaches that build less parsimonious abstractions.}, author = {Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S and McMillan, Kenneth L}, pages = {232 -- 244}, publisher = {ACM}, title = {{Abstractions from proofs}}, doi = {10.1145/964001.964021}, year = {2004}, } @inbook{4461, abstract = {One of the central axioms of extreme programming is the disciplined use of regression testing during stepwise software development. Due to recent progress in software model checking, it has become possible to supplement this process with automatic checks for behavioral safety properties of programs, such as conformance with locking idioms and other programming protocols and patterns. For efficiency reasons, all checks must be incremental, i.e., they must reuse partial results from previous checks in order to avoid all unnecessary repetition of expensive verification tasks. We show that the lazy-abstraction algorithm, and its implementation in Blast, can be extended to support the fully automatic and incremental checking of temporal safety properties during software development.}, author = {Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S and Sanvido, Marco A}, booktitle = {Verification: Theory and Practice}, pages = {332 -- 358}, publisher = {Springer}, title = {{Extreme model checking}}, doi = {10.1007/978-3-540-39910-0_16}, volume = {2772}, year = {2004}, } @inproceedings{4459, abstract = {Software model checking has been successful for sequential programs, where predicate abstraction offers suitable models, and counterexample-guided abstraction refinement permits the automatic inference of models. When checking concurrent programs, we need to abstract threads as well as the contexts in which they execute. Stateless context models, such as predicates on global variables, prove insufficient for showing the absence of race conditions in many examples. We therefore use richer context models, which combine (1) predicates for abstracting data state, (2) control flow quotients for abstracting control state, and (3) counters for abstracting an unbounded number of threads. We infer suitable context models automatically by a combination of counterexample-guided abstraction refinement, bisimulation minimization, circular assume-guarantee reasoning, and parametric reasoning about an unbounded number of threads. This algorithm, called CIRC, has been implemented in BLAST and succeeds in checking many examples of NESC code for data races. In particular, BLAST proves the absence of races in several cases where previous race checkers give false positives.}, author = {Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S}, pages = {1 -- 13}, publisher = {ACM}, title = {{Race checking by context inference}}, doi = {10.1145/996841.996844}, year = {2004}, } @inproceedings{4525, abstract = {We present a new high-level programming language, called xGiotto, for programming applications with hard real-time constraints. Like its predecessor, xGiotto is based on the LET (logical execution time) assumption: the programmer specifies when the outputs of a task become available, and the compiler checks if the specification can be implemented on a given platform. However, while the predecessor language xGiotto was purely time-triggered, xGiotto accommodates also asynchronous events. Indeed, through a mechanism called event scoping, events are the main structuring principle of the new language. The xGiotto compiler and run-time system implement event scoping through a tree-based event filter. The compiler also checks programs for determinism (absence of race conditions).}, author = {Ghosal, Arkadeb and Thomas Henzinger and Kirsch, Christoph M and Sanvido, Marco A}, pages = {167 -- 170}, publisher = {Springer}, title = {{Event-driven programming with logical execution times}}, doi = {10.1007/978-3-540-24743-2_24}, volume = {2993}, year = {2004}, } @inproceedings{4555, abstract = {Strategies in repeated games can be classified as to whether or not they use memory and/or randomization. We consider Markov decision processes and 2-player graph games, both of the deterministic and probabilistic varieties. We characterize when memory and/or randomization are required for winning with respect to various classes of w-regular objectives, noting particularly when the use of memory can be traded for the use of randomization. In particular, we show that Markov decision processes allow randomized memoryless optimal strategies for all M?ller objectives. Furthermore, we show that 2-player probabilistic graph games allow randomized memoryless strategies for winning with probability 1 those M?ller objectives which are upward-closed. Upward-closure means that if a set α of infinitely repeating vertices is winning, then all supersets of α are also winning.}, author = {Krishnendu Chatterjee and de Alfaro, Luca and Thomas Henzinger}, pages = {206 -- 217}, publisher = {IEEE}, title = {{Trading memory for randomness}}, doi = {10.1109/QEST.2004.10051}, year = {2004}, } @inproceedings{4558, abstract = {We study perfect-information stochastic parity games. These are two-player nonterminating games which are played on a graph with turn-based probabilistic transitions. A play results in an infinite path and the conflicting goals of the two players are ω-regular path properties, formalized as parity winning conditions. The qualitative solution of such a game amounts to computing the set of vertices from which a player has a strategy to win with probability 1 (or with positive probability). The quantitative solution amounts to computing the value of the game in every vertex, i.e., the highest probability with which a player can guarantee satisfaction of his own objective in a play that starts from the vertex.For the important special case of one-player stochastic parity games (parity Markov decision processes) we give polynomial-time algorithms both for the qualitative and the quantitative solution. The running time of the qualitative solution is O(d · m3/2) for graphs with m edges and d priorities. The quantitative solution is based on a linear-programming formulation.For the two-player case, we establish the existence of optimal pure memoryless strategies. This has several important ramifications. First, it implies that the values of the games are rational. This is in contrast to the concurrent stochastic parity games of de Alfaro et al.; there, values are in general algebraic numbers, optimal strategies do not exist, and ε-optimal strategies have to be mixed and with infinite memory. Second, the existence of optimal pure memoryless strategies together with the polynomial-time solution forone-player case implies that the quantitative two-player stochastic parity game problem is in NP ∩ co-NP. This generalizes a result of Condon for stochastic games with reachability objectives. It also constitutes an exponential improvement over the best previous algorithm, which is based on a doubly exponential procedure of de Alfaro and Majumdar for concurrent stochastic parity games and provides only ε-approximations of the values.}, author = {Krishnendu Chatterjee and Jurdziński, Marcin and Thomas Henzinger}, pages = {121 -- 130}, publisher = {SIAM}, title = {{Quantitative stochastic parity games}}, year = {2004}, } @article{4556, abstract = {We study the problem of determining stack boundedness and the exact maximum stack size for three classes of interrupt-driven programs. Interrupt-driven programs are used in many real-time applications that require responsive interrupt handling. In order to ensure responsiveness, programmers often enable interrupt processing in the body of lower-priority interrupt handlers. In such programs a programming error can allow interrupt handlers to be interrupted in a cyclic fashion to lead to an unbounded stack, causing the system to crash. For a restricted class of interrupt-driven programs, we show that there is a polynomial-time procedure to check stack boundedness, while determining the exact maximum stack size is PSPACE-complete. For a larger class of programs, the two problems are both PSPACE-complete, and for the largest class of programs we consider, the two problems are PSPACE-hard and can be solved in exponential time. While the complexities are high, our algorithms are exponential only in the number of handlers, and polynomial in the size of the program.}, author = {Krishnendu Chatterjee and Ma, Di and Majumdar, Ritankar S and Zhao, Tian and Thomas Henzinger and Palsberg, Jens}, journal = {Information and Computation}, number = {2}, pages = {144 -- 174}, publisher = {Elsevier}, title = {{Stack size analysis for interrupt-driven programs}}, doi = {10.1016/j.ic.2004.06.001}, volume = {194}, year = {2004}, } @inproceedings{4578, abstract = {BLAST is an automatic verification tool for checking temporal safety properties of C programs. Blast is based on lazy predicate abstraction driven by interpolation-based predicate discovery. In this paper, we present the Blast specification language. The language specifies program properties at two levels of precision. At the lower level, monitor automata are used to specify temporal safety properties of program executions (traces). At the higher level, relational reachability queries over program locations are used to combine lower-level trace properties. The two-level specification language can be used to break down a verification task into several independent calls of the model-checking engine. In this way, each call to the model checker may have to analyze only part of the program, or part of the specification, and may thus succeed in a reduction of the number of predicates needed for the analysis. In addition, the two-level specification language provides a means for structuring and maintaining specifications. }, author = {Beyer, Dirk and Chlipala, Adam J and Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S}, pages = {2 -- 18}, publisher = {Springer}, title = {{The BLAST query language for software verification}}, doi = {10.1007/978-3-540-27864-1_2}, volume = {3148}, year = {2004}, } @inproceedings{4577, abstract = {While model checking has been successful in uncovering subtle bugs in code, its adoption in software engineering practice has been hampered by the absence of a simple interface to the programmer in an integrated development environment. We describe an integration of the software model checker BLAST into the Eclipse development environment. We provide a verification interface for practical solutions for some typical program analysis problems - assertion checking, reachability analysis, dead code analysis, and test generation - directly on the source code. The analysis is completely automatic, and assumes no knowledge of model checking or formal notation. Moreover, the interface supports incremental program verification to support incremental design and evolution of code.}, author = {Beyer, Dirk and Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S}, pages = {251 -- 255}, publisher = {IEEE}, title = {{An eclipse plug-in for model checking}}, doi = {10.1109/WPC.2004.1311069 }, year = {2004}, } @inproceedings{4581, abstract = {We have extended the software model checker BLAST to automatically generate test suites that guarantee full coverage with respect to a given predicate. More precisely, given a C program and a target predicate p, BLAST determines the set L of program locations which program execution can reach with p true, and automatically generates a set of test vectors that exhibit the truth of p at all locations in L. We have used BLAST to generate test suites and to detect dead code in C programs with up to 30 K lines of code. The analysis and test vector generation is fully automatic (no user intervention) and exact (no false positives).}, author = {Beyer, Dirk and Chlipala, Adam J and Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S}, pages = {326 -- 335}, publisher = {IEEE}, title = {{Generating tests from counterexamples}}, doi = {10.1109/ICSE.2004.1317455}, year = {2004}, } @inproceedings{4629, abstract = {Temporal logic is two-valued: a property is either true or false. When applied to the analysis of stochastic systems, or systems with imprecise formal models, temporal logic is therefore fragile: even small changes in the model can lead to opposite truth values for a specification. We present a generalization of the branching-time logic Ctl which achieves robustness with respect to model perturbations by giving a quantitative interpretation to predicates and logical operators, and by discounting the importance of events according to how late they occur. In every state, the value of a formula is a real number in the interval [0,1], where 1 corresponds to truth and 0 to falsehood. The boolean operators and and or are replaced by min and max, the path quantifiers ∃ and ∀ determine sup and inf over all paths from a given state, and the temporal operators and □ specify sup and inf over a given path; a new operator averages all values along a path. Furthermore, all path operators are discounted by a parameter that can be chosen to give more weight to states that are closer to the beginning of the path. We interpret the resulting logic Dctl over transition systems, Markov chains, and Markov decision processes. We present two semantics for Dctl: a path semantics, inspired by the standard interpretation of state and path formulas in CTL, and a fixpoint semantics, inspired by the μ-calculus evaluation of CTL formulas. We show that, while these semantics coincide for CTL, they differ for Dctl, and we provide model-checking algorithms for both semantics.}, author = {de Alfaro, Luca and Faella, Marco and Thomas Henzinger and Majumdar, Ritankar S and Stoelinga, Mariëlle}, pages = {77 -- 92}, publisher = {Springer}, title = {{Model checking discounted temporal properties}}, doi = {10.1007/978-3-540-24730-2_6}, volume = {2988}, year = {2004}, } @article{6155, abstract = {The genome of the nematode Caenorhabditis elegans encodes seven soluble guanylate cyclases (sGCs) [1]. In mammals, sGCs function as α/β heterodimers activated by gaseous ligands binding to a haem prosthetic group 2, 3. The principal activator is nitric oxide, which acts through sGCs to regulate diverse cellular events. In C. elegans the function of sGCs is mysterious: the worm genome does not appear to encode nitric oxide synthase, and all C. elegans sGC subunits are more closely related to mammalian β than α subunits [1]. Here, we show that two of the seven C. elegans sGCs, GCY-35 and GCY-36, promote aggregation behavior. gcy-35 and gcy-36 are expressed in a small number of neurons. These include the body cavity neurons AQR, PQR, and URX, which are directly exposed to the blood equivalent of C. elegans and regulate aggregation behavior [4]. We show that GCY-35 and GCY-36 act as α-like and β-like sGC subunits and that their function in the URX sensory neurons is sufficient for strong nematode aggregation. Neither GCY-35 nor GCY-36 is absolutely required for C. elegans to aggregate. Instead, these molecules may transduce one of several pathways that induce C. elegans to aggregate or may modulate aggregation by responding to cues in C. elegans body fluid.}, author = {Cheung, Benny H.H and Arellano-Carbajal, Fausto and Rybicki, Irene and de Bono, Mario}, issn = {0960-9822}, journal = {Current Biology}, number = {12}, pages = {1105--1111}, publisher = {Elsevier}, title = {{Soluble guanylate cyclases act in neurons exposed to the body fluid to promote C. elegans aggregation behavior}}, doi = {10.1016/j.cub.2004.06.027}, volume = {14}, year = {2004}, } @article{7334, abstract = {Fundamental and phenomenological models for cells, stacks, and complete systems of PEFC and SOFC are reviewed and their predictive power is assessed by comparing model simulations against experiments. Computationally efficient models suited for engineering design include the (1+1) dimensionality approach, which decouples the membrane in-plane and through-plane processes, and the volume-averaged-method (VAM) that considers only the lumped effect of pre-selected system components. The former model was shown to capture the measured lateral current density inhomogeneities in a PEFC and the latter was used for the optimization of commercial SOFC systems. State Space Modeling (SSM) was used to identify the main reaction pathways in SOFC and, in conjunction with the implementation of geometrically well-defined electrodes, has opened a new direction for the understanding of electrochemical reactions. Furthermore, SSM has advanced the understanding of the COpoisoning-induced anode impedance in PEFC. Detailed numerical models such as the Lattice Boltzmann (LB) method for transport in porous media and the full 3-D Computational Fluid Dynamics (CFD) Navier-Stokes simulations are addressed. These models contain all components of the relevant physics and they can improve the understanding of the related phenomena, a necessary condition for the development of both appropriate simplified models as well as reliable technologies. Within the LB framework, a technique for the characterization and computer-reconstruction of the porous electrode structure was developed using advanced pattern recognition algorithms. In CFD modeling, 3-D simulations were used to investigate SOFC with internal methane steam reforming and have exemplified the significance of porous and novel fractal channel distributors for the fuel and oxidant delivery, as well as for the cooling of PEFC. As importantly, the novel concept has been put forth of functionally designed, fractal-shaped fuel cells, showing promise of significant performance improvements over the conventional rectangular shaped units. Thermo-economic modeling for the optimization of PEFC is finally addressed. }, author = {Mantzaras, John and Freunberger, Stefan Alexander and Büchi, Felix N. and Roos, Markus and Brandstätter, Wilhelm and Prestat, Michel and Gauckler, Ludwig J. and Andreaus, Bernhard and Hajbolouri, Faegheh and Senn, Stephan M. and Poulikakos, Dimos and Chaniotis, Andreas K. and Larrain, Diego and Autissier, Nordahl and Maréchal, François}, issn = {0009-4293}, journal = {CHIMIA International Journal for Chemistry}, number = {12}, pages = {857--868}, publisher = {Swiss Chemical Society}, title = {{Fuel cell modeling and simulations}}, doi = {10.2533/000942904777677029}, volume = {58}, year = {2004}, } @article{7333, abstract = {The analysis of the complete H2/air polymer electrolyte fuel cell system shows that process air humidification is one of the biggest obstacles for a high performance portable system in the kW range. Therefore, a new concept, with passive process air humidification integrated into the stack, has been developed. Humidification in each cell makes the process independent from the number of cells and the operation mode, thus making the concept fully scalable. Without external humidification the system is simpler, smaller, and cheaper. The humidification of the process air is achieved by transfer of product water from the exhaust air, through part of the membrane, to the dry intake air. Tests have shown that cells using the concept of internal humidification and operated with dry air at 70 ° have almost the same performance as when operated with external humidification. A 42‐cell stack with this internal humidification concept was built and integrated into a portable 1 kW power generator system.}, author = {Santis, M. and Schmid, D. and Ruge, M. and Freunberger, Stefan Alexander and Büchi, F.N.}, issn = {1615-6846}, journal = {Fuel Cells}, number = {3}, pages = {214--218}, publisher = {Wiley}, title = {{Modular stack-internal air humidification concept-verification in a 1 kW stack}}, doi = {10.1002/fuce.200400028}, volume = {4}, year = {2004}, } @article{864, abstract = {We present a method for prediction of functional sites in a set of aligned protein sequences. The method selects sites which are both well conserved and clustered together in space, as inferred from the 3D structures of proteins included in the alignment. We tested the method using 86 alignments from the NCBI CDD database, where the sites of experimentally determined ligand and/or macromolecular interactions are annotated. In agreement with earlier investigations, we found that functional site predictions are most successful when overall background sequence conservation is low, such that sites under evolutionary constraint become apparent. In addition, we found that averaging of conservation values across spatially clustered sites improves predictions under certain conditions: that is, when overall conservation is relatively high and when the site in question involves a large macromolecular binding interface. Under these conditions it is better to look for clusters of conserved sites than to look for particular conserved sites.}, author = {Panchenko, Anna R and Fyodor Kondrashov and Bryant, Stephen H}, journal = {Protein Science}, number = {4}, pages = {884 -- 892}, publisher = {Wiley-Blackwell}, title = {{Prediction of functional sites by analysis of sequence and structure conservation}}, doi = {10.1110/ps.03465504}, volume = {13}, year = {2004}, } @article{870, abstract = {Only a fraction of eukaryotic genes affect the phenotype drastically. We compared 18 parameters in 1273 human morbid genes, known to cause diseases, and in the remaining 16 580 unambiguous human genes. Morbid genes evolve more slowly, have wider phylogenetic distributions, are more similar to essential genes of Drosophila melanogaster, code for longer proteins containing more alanine and glycine and less histidine, lysine and methionine, possess larger numbers of longer introns with more accurate splicing signals and have higher and broader expressions. These differences make it possible to classify as non-morbid 34% of human genes with unknown morbidity, when only 5% of known morbid genes are incorrectly classified as non-morbid. This classification can help to identify disease-causing genes among multiple candidates.}, author = {Fyodor Kondrashov and Ogurtsov, Aleksey Yu and Kondrashov, Alexey S}, journal = {Nucleic Acids Research}, number = {5}, pages = {1731 -- 1737}, publisher = {Oxford University Press}, title = {{Bioinformatical assay of human gene morbidity}}, doi = {10.1093/nar/gkh330}, volume = {32}, year = {2004}, } @article{875, abstract = {The dominance of wild-type alleles and the concomitant recessivity of deleterious mutant alleles might have evolved by natural selection or could be a by-product of the molecular and physiological mechanisms of gene action. We compared the properties of human haplosufficient genes, whose wild-type alleles are dominant over loss-of-function alleles, with haploinsufficient (recessive wild-type) genes, which produce an abnormal phenotype when heterozygous for a loss-of-function allele. The fraction of haplosufficient genes is the highest among the genes that encode enzymes, which is best compatible with the physiological theory. Haploinsufficient genes, on average, have more paralogs than haplosufficient genes, supporting the idea that gene dosage could be important for the initial fixation of duplications. Thus, haplo(in)sufficiency of a gene and its propensity for duplication might have a common evolutionary basis.}, author = {Fyodor Kondrashov and Koonin, Eugene V}, journal = {Trends in Genetics}, number = {7}, pages = {287 -- 291}, publisher = {Elsevier}, title = {{A common framework for understanding the origin of genetic dominance and evolutionary fates of gene duplications}}, doi = {10.1016/j.tig.2004.05.001}, volume = {20}, year = {2004}, } @article{889, abstract = {The function of protein and RNA molecules depends on complex epistatic interactions between sites. Therefore, the deleterious effect of a mutation can be suppressed by a compensatory second-site substitution. In relating a list of 86 pathogenic mutations in human IRNAs encoded by mitochondrial genes to the sequences of their mammalian orthologs, we noted that 52 pathogenic mutations were present in normal tRNAs of one or several nonhuman mammals. We found at least five mechanisms of compensation for 32 pathogenic mutations that destroyed a Watson-Crick pair in one of the four tRNA stems: restoration of the affected Watson-Crick interaction (25 cases), strengthening of another pair (4 cases), creation of a new pair (8 cases), changes of multiple interactions in the affected stem (11 cases) and changes involving the interaction between the loop and stem structures (3 cases). A pathogenic mutation and its compensating substitution are fixed in a lineage in rapid succession, and often a compensatory interaction evolves convergently in different clades. At least 10%, and perhaps as many as 50%, of all nucleotide substitutions in evolving mammalian (RNAs participate in such interactions, indicating that the evolution of tRNAs proceeds along highly epistatic fitness ridges.}, author = {Kern, Andrew D and Fyodor Kondrashov}, journal = {Nature Genetics}, number = {11}, pages = {1207 -- 1212}, publisher = {Nature Publishing Group}, title = {{Mechanisms and convergence of compensatory evolution in mammalian mitochondrial tRNAs}}, doi = {10.1038/ng1451}, volume = {36}, year = {2004}, } @article{9493, abstract = {In a number of organisms, transgenes containing transcribed inverted repeats (IRs) that produce hairpin RNA can trigger RNA-mediated silencing, which is associated with 21-24 nucleotide small interfering RNAs (siRNAs). In plants, IR-driven RNA silencing also causes extensive cytosine methylation of homologous DNA in both the transgene "trigger" and any other homologous DNA sequences--"targets". Endogenous genomic sequences, including transposable elements and repeated elements, are also subject to RNA-mediated silencing. The RNA silencing gene ARGONAUTE4 (AGO4) is required for maintenance of DNA methylation at several endogenous loci and for the establishment of methylation at the FWA gene. Here, we show that mutation of AGO4 substantially reduces the maintenance of DNA methylation triggered by IR transgenes, but AGO4 loss-of-function does not block the initiation of DNA methylation by IRs. AGO4 primarily affects non-CG methylation of the target sequences, while the IR trigger sequences lose methylation in all sequence contexts. Finally, we find that AGO4 and the DRM methyltransferase genes are required for maintenance of siRNAs at a subset of endogenous sequences, but AGO4 is not required for the accumulation of IR-induced siRNAs or a number of endogenous siRNAs, suggesting that AGO4 may function downstream of siRNA production.}, author = {Zilberman, Daniel and Cao, Xiaofeng and Johansen, Lisa K. and Xie, Zhixin and Carrington, James C. and Jacobsen, Steven E.}, issn = {1879-0445}, journal = {Current Biology}, number = {13}, pages = {1214--1220}, publisher = {Elsevier}, title = {{Role of Arabidopsis ARGONAUTE4 in RNA-directed DNA methylation triggered by inverted repeats}}, doi = {10.1016/j.cub.2004.06.055}, volume = {14}, year = {2004}, }