@phdthesis{4236,
author = {de Vladar, Harold},
publisher = {Centro de estudios avazados, IVIC},
title = {{Métodos no lineales y sus aplicaciones en dinámicas aleatorias de poblaciones celulares}},
doi = {3810},
year = {2004},
}
@article{4238,
abstract = {The dynamical basis of tumoral growth has been controversial. Many models have been proposed to explain cancer development. The descriptions employ exponential, potential, logistic or Gompertzian growth laws. Some of these models are concerned with the interaction between cancer and the immunological, system. Among other properties, these models are concerned with the microscopic behavior of tumors and the emergence of cancer. We propose a modification of a previous model by Stepanova, which describes the specific immunological response against cancer. The modification consists of the substitution of a Gompertian law for the exponential rate used for tumoral growth. This modification is motivated by the numerous works confirming that Gompertz's equation correctly describes solid tumor growth. The modified model predicts that near zero, tumors always tend to grow. Immunological contraposition never suffices to induce a complete regression of the tumor. Instead, a stable microscopic equilibrium between cancer and immunological activity can be attained. In other words, our model predicts that the theory of immune surveillance is plausible. A macroscopic equilibrium in which the system develops cancer is also possible. In this case, immunological activity is depleted. This is consistent with the phenomena of cancer tolerance. Both equilibrium points can coexist or can exist without the other. In all cases the fixed point at zero tumor size is unstable. Since immunity cannot induce a complete tumor regression, a therapy is required. We include constant-dose therapies and show that they are insufficient. Final levels of immunocompetent cells and tumoral cells are finite, thus post-treatment regrowth of the tumor is certain. We also evaluate late-intensification therapies which are successful. They induce an asymptotic regression to zero tumor size. Immune response is also suppressed by the therapy, and thus plays a negligible role in the remission. We conclude that treatment evaluation should be successful without taking into account immunological effects. (C) 2003 Elsevier Ltd. All rights reserved.},
author = {de Vladar, Harold and González, J.},
journal = {Journal of Theoretical Biology},
number = {3},
pages = {335 -- 348},
publisher = {Elsevier},
title = {{Dynamic response of cancer under the influence of immunological activity and therapy}},
doi = {3801},
volume = {227},
year = {2004},
}
@inbook{4239,
author = {Harold Vladar and Cipriani, Roberto and Scharifker, Benjamin and Bubis, Jose},
booktitle = {Life in the Universe From the Miller Experiment to the Search for Life on Other Worlds},
editor = {Seckbach,J. and Chela-Flores,J. and Owen,T. and Raulin,F.},
pages = {83 -- 87},
publisher = {Springer},
title = {{A Mechanism for the Prebiotic Emergence of Proteins}},
doi = {3807},
volume = {7},
year = {2004},
}
@article{4253,
abstract = {We consider a single genetic locus which carries two alleles, labelled P and Q. This locus experiences selection and mutation. It is linked to a second neutral locus with recombination rate r. If r = 0, this reduces to the study of a single selected locus. Assuming a Moran model for the population dynamics, we pass to a diffusion approximation and, assuming that the allele frequencies at the selected locus have reached stationarity, establish the joint generating function for the genealogy of a sample from the population and the frequency of the P allele. In essence this is the joint generating function for a coalescent and the random background in which it evolves. We use this to characterize, for the diffusion approximation, the probability of identity in state at the neutral locus of a sample of two individuals (whose type at the selected locus is known) as solutions to a system of ordinary differential equations. The only subtlety is to find the boundary conditions for this system. Finally, numerical examples are presented that illustrate the accuracy and predictions of the diffusion approximation. In particular, a comparison is made between this approach and one in which the frequencies at the selected locus are estimated by their value in the absence of fluctuations and a classical structured coalescent model is used.},
author = {Nicholas Barton and Etheridge, Alison M and Sturm, Anja K},
journal = {Annals of Applied Probability},
number = {2},
pages = {754 -- 785},
publisher = {Institute of Mathematical Statistics},
title = {{Coalescence in a Random Background}},
volume = {14},
year = {2004},
}
@inproceedings{4372,
author = {Maler, Oded and Dejan Nickovic},
pages = {152 -- 166},
publisher = {Springer},
title = {{Monitoring Temporal Properties of Continuous Signals}},
doi = {1572},
year = {2004},
}
@phdthesis{4424,
abstract = {The enormous cost and ubiquity of software errors necessitates the need for techniques and tools that can precisely analyze large systems and prove that they meet given specifications, or if they don't, return counterexample behaviors showing how the system fails. Recent advances in model checking, decision procedures, program analysis and type systems, and a shift of focus to partial specifications common to several systems (e.g., memory safety and race freedom) have resulted in several practical verification methods. However, these methods are either precise or they are scalable, depending on whether they track the values of variables or only a fixed small set of dataflow facts (e.g., types), and are usually insufficient for precisely verifying large programs.
We describe a new technique called Lazy Abstraction (LA) which achieves both precision and scalability by localizing the use of precise information. LA automatically builds, explores and refines a single abstract model of the program in a way that different parts of the model exhibit different degrees of precision, namely just enough to verify the desired property. The algorithm automatically mines the information required by partitioning mechanical proofs of unsatisfiability of spurious counterexamples into Craig Interpolants. For multithreaded systems, we give a new technique based on analyzing the behavior of a single thread executing in a context which is an abstraction of the other (arbitrarily many) threads. We define novel context models and show how to automatically infer them and analyze the full system (thread + context) using LA.
LA is implemented in BLAST. We have run BLAST on Windows and Linux Device Drivers to verify API conformance properties, and have used it to find (or guarantee the absence of) data races in multithreaded Networked Embedded Systems (NESC) applications. BLAST is able to prove the absence of races in several cases where earlier methods, which depend on lock-based synchronization, fail.},
author = {Jhala, Ranjit},
pages = {1 -- 165},
publisher = {University of California, Berkeley},
title = {{Program verification by lazy abstraction}},
year = {2004},
}
@inproceedings{4445,
abstract = {We present a type system for E code, which is an assembly language that manages the release, interaction, and termination of real-time tasks. E code specifies a deadline for each task, and the type system ensures that the deadlines are path-insensitive. We show that typed E programs allow, for given worst-case execution times of tasks, a simple schedulability analysis. Moreover, the real-time programming language Giotto can be compiled into typed E~code. This shows that typed E~code identifies an easily schedulable yet expressive class of real-time programs. We have extended the Giotto compiler to generate typed E code, and enabled the run-time system for E code to perform a type and schedulability check before executing the code.},
author = {Thomas Henzinger and Kirsch, Christoph M},
pages = {104 -- 113},
publisher = {ACM},
title = {{A typed assembly language for real-time programs}},
doi = {10.1145/1017753.1017774},
year = {2004},
}
@inproceedings{4458,
abstract = {The success of model checking for large programs depends crucially on the ability to efficiently construct parsimonious abstractions. A predicate abstraction is parsimonious if at each control location, it specifies only relationships between current values of variables, and only those which are required for proving correctness. Previous methods for automatically refining predicate abstractions until sufficient precision is obtained do not systematically construct parsimonious abstractions: predicates usually contain symbolic variables, and are added heuristically and often uniformly to many or all control locations at once. We use Craig interpolation to efficiently construct, from a given abstract error trace which cannot be concretized, a parsominous abstraction that removes the trace. At each location of the trace, we infer the relevant predicates as an interpolant between the two formulas that define the past and the future segment of the trace. Each interpolant is a relationship between current values of program variables, and is relevant only at that particular program location. It can be found by a linear scan of the proof of infeasibility of the trace.We develop our method for programs with arithmetic and pointer expressions, and call-by-value function calls. For function calls, Craig interpolation offers a systematic way of generating relevant predicates that contain only the local variables of the function and the values of the formal parameters when the function was called. We have extended our model checker Blast with predicate discovery by Craig interpolation, and applied it successfully to C programs with more than 130,000 lines of code, which was not possible with approaches that build less parsimonious abstractions.},
author = {Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S and McMillan, Kenneth L},
pages = {232 -- 244},
publisher = {ACM},
title = {{Abstractions from proofs}},
doi = {10.1145/964001.964021},
year = {2004},
}
@inproceedings{4459,
abstract = {Software model checking has been successful for sequential programs, where predicate abstraction offers suitable models, and counterexample-guided abstraction refinement permits the automatic inference of models. When checking concurrent programs, we need to abstract threads as well as the contexts in which they execute. Stateless context models, such as predicates on global variables, prove insufficient for showing the absence of race conditions in many examples. We therefore use richer context models, which combine (1) predicates for abstracting data state, (2) control flow quotients for abstracting control state, and (3) counters for abstracting an unbounded number of threads. We infer suitable context models automatically by a combination of counterexample-guided abstraction refinement, bisimulation minimization, circular assume-guarantee reasoning, and parametric reasoning about an unbounded number of threads. This algorithm, called CIRC, has been implemented in BLAST and succeeds in checking many examples of NESC code for data races. In particular, BLAST proves the absence of races in several cases where previous race checkers give false positives.},
author = {Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S},
pages = {1 -- 13},
publisher = {ACM},
title = {{Race checking by context inference}},
doi = {10.1145/996841.996844},
year = {2004},
}
@inbook{4461,
abstract = {One of the central axioms of extreme programming is the disciplined use of regression testing during stepwise software development. Due to recent progress in software model checking, it has become possible to supplement this process with automatic checks for behavioral safety properties of programs, such as conformance with locking idioms and other programming protocols and patterns. For efficiency reasons, all checks must be incremental, i.e., they must reuse partial results from previous checks in order to avoid all unnecessary repetition of expensive verification tasks. We show that the lazy-abstraction algorithm, and its implementation in Blast, can be extended to support the fully automatic and incremental checking of temporal safety properties during software development.},
author = {Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S and Sanvido, Marco A},
booktitle = {Verification: Theory and Practice},
pages = {332 -- 358},
publisher = {Springer},
title = {{Extreme model checking}},
doi = {10.1007/978-3-540-39910-0_16},
volume = {2772},
year = {2004},
}
@inproceedings{4525,
abstract = {We present a new high-level programming language, called xGiotto, for programming applications with hard real-time constraints. Like its predecessor, xGiotto is based on the LET (logical execution time) assumption: the programmer specifies when the outputs of a task become available, and the compiler checks if the specification can be implemented on a given platform. However, while the predecessor language xGiotto was purely time-triggered, xGiotto accommodates also asynchronous events. Indeed, through a mechanism called event scoping, events are the main structuring principle of the new language. The xGiotto compiler and run-time system implement event scoping through a tree-based event filter. The compiler also checks programs for determinism (absence of race conditions).},
author = {Ghosal, Arkadeb and Thomas Henzinger and Kirsch, Christoph M and Sanvido, Marco A},
pages = {167 -- 170},
publisher = {Springer},
title = {{Event-driven programming with logical execution times}},
doi = {10.1007/978-3-540-24743-2_24},
volume = {2993},
year = {2004},
}
@inproceedings{4555,
abstract = {Strategies in repeated games can be classified as to whether or not they use memory and/or randomization. We consider Markov decision processes and 2-player graph games, both of the deterministic and probabilistic varieties. We characterize when memory and/or randomization are required for winning with respect to various classes of w-regular objectives, noting particularly when the use of memory can be traded for the use of randomization. In particular, we show that Markov decision processes allow randomized memoryless optimal strategies for all M?ller objectives. Furthermore, we show that 2-player probabilistic graph games allow randomized memoryless strategies for winning with probability 1 those M?ller objectives which are upward-closed. Upward-closure means that if a set α of infinitely repeating vertices is winning, then all supersets of α are also winning.},
author = {Krishnendu Chatterjee and de Alfaro, Luca and Thomas Henzinger},
pages = {206 -- 217},
publisher = {IEEE},
title = {{Trading memory for randomness}},
doi = {10.1109/QEST.2004.10051},
year = {2004},
}
@article{4556,
abstract = {We study the problem of determining stack boundedness and the exact maximum stack size for three classes of interrupt-driven programs. Interrupt-driven programs are used in many real-time applications that require responsive interrupt handling. In order to ensure responsiveness, programmers often enable interrupt processing in the body of lower-priority interrupt handlers. In such programs a programming error can allow interrupt handlers to be interrupted in a cyclic fashion to lead to an unbounded stack, causing the system to crash. For a restricted class of interrupt-driven programs, we show that there is a polynomial-time procedure to check stack boundedness, while determining the exact maximum stack size is PSPACE-complete. For a larger class of programs, the two problems are both PSPACE-complete, and for the largest class of programs we consider, the two problems are PSPACE-hard and can be solved in exponential time. While the complexities are high, our algorithms are exponential only in the number of handlers, and polynomial in the size of the program.},
author = {Krishnendu Chatterjee and Ma, Di and Majumdar, Ritankar S and Zhao, Tian and Thomas Henzinger and Palsberg, Jens},
journal = {Information and Computation},
number = {2},
pages = {144 -- 174},
publisher = {Elsevier},
title = {{Stack size analysis for interrupt-driven programs}},
doi = {10.1016/j.ic.2004.06.001},
volume = {194},
year = {2004},
}
@inproceedings{4558,
abstract = {We study perfect-information stochastic parity games. These are two-player nonterminating games which are played on a graph with turn-based probabilistic transitions. A play results in an infinite path and the conflicting goals of the two players are ω-regular path properties, formalized as parity winning conditions. The qualitative solution of such a game amounts to computing the set of vertices from which a player has a strategy to win with probability 1 (or with positive probability). The quantitative solution amounts to computing the value of the game in every vertex, i.e., the highest probability with which a player can guarantee satisfaction of his own objective in a play that starts from the vertex.For the important special case of one-player stochastic parity games (parity Markov decision processes) we give polynomial-time algorithms both for the qualitative and the quantitative solution. The running time of the qualitative solution is O(d · m3/2) for graphs with m edges and d priorities. The quantitative solution is based on a linear-programming formulation.For the two-player case, we establish the existence of optimal pure memoryless strategies. This has several important ramifications. First, it implies that the values of the games are rational. This is in contrast to the concurrent stochastic parity games of de Alfaro et al.; there, values are in general algebraic numbers, optimal strategies do not exist, and ε-optimal strategies have to be mixed and with infinite memory. Second, the existence of optimal pure memoryless strategies together with the polynomial-time solution forone-player case implies that the quantitative two-player stochastic parity game problem is in NP ∩ co-NP. This generalizes a result of Condon for stochastic games with reachability objectives. It also constitutes an exponential improvement over the best previous algorithm, which is based on a doubly exponential procedure of de Alfaro and Majumdar for concurrent stochastic parity games and provides only ε-approximations of the values.},
author = {Krishnendu Chatterjee and Jurdziński, Marcin and Thomas Henzinger},
pages = {121 -- 130},
publisher = {SIAM},
title = {{Quantitative stochastic parity games}},
year = {2004},
}
@inproceedings{4577,
abstract = {While model checking has been successful in uncovering subtle bugs in code, its adoption in software engineering practice has been hampered by the absence of a simple interface to the programmer in an integrated development environment. We describe an integration of the software model checker BLAST into the Eclipse development environment. We provide a verification interface for practical solutions for some typical program analysis problems - assertion checking, reachability analysis, dead code analysis, and test generation - directly on the source code. The analysis is completely automatic, and assumes no knowledge of model checking or formal notation. Moreover, the interface supports incremental program verification to support incremental design and evolution of code.},
author = {Beyer, Dirk and Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S},
pages = {251 -- 255},
publisher = {IEEE},
title = {{An eclipse plug-in for model checking}},
doi = {10.1109/WPC.2004.1311069 },
year = {2004},
}
@inproceedings{4578,
abstract = {BLAST is an automatic verification tool for checking temporal safety properties of C programs. Blast is based on lazy predicate abstraction driven by interpolation-based predicate discovery. In this paper, we present the Blast specification language. The language specifies program properties at two levels of precision. At the lower level, monitor automata are used to specify temporal safety properties of program executions (traces). At the higher level, relational reachability queries over program locations are used to combine lower-level trace properties. The two-level specification language can be used to break down a verification task into several independent calls of the model-checking engine. In this way, each call to the model checker may have to analyze only part of the program, or part of the specification, and may thus succeed in a reduction of the number of predicates needed for the analysis. In addition, the two-level specification language provides a means for structuring and maintaining specifications. },
author = {Beyer, Dirk and Chlipala, Adam J and Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S},
pages = {2 -- 18},
publisher = {Springer},
title = {{The BLAST query language for software verification}},
doi = {10.1007/978-3-540-27864-1_2},
volume = {3148},
year = {2004},
}
@inproceedings{4581,
abstract = {We have extended the software model checker BLAST to automatically generate test suites that guarantee full coverage with respect to a given predicate. More precisely, given a C program and a target predicate p, BLAST determines the set L of program locations which program execution can reach with p true, and automatically generates a set of test vectors that exhibit the truth of p at all locations in L. We have used BLAST to generate test suites and to detect dead code in C programs with up to 30 K lines of code. The analysis and test vector generation is fully automatic (no user intervention) and exact (no false positives).},
author = {Beyer, Dirk and Chlipala, Adam J and Thomas Henzinger and Jhala, Ranjit and Majumdar, Ritankar S},
pages = {326 -- 335},
publisher = {IEEE},
title = {{Generating tests from counterexamples}},
doi = {10.1109/ICSE.2004.1317455},
year = {2004},
}
@article{2997,
abstract = {Polar transport-dependent local accumulation of auxin provides positional cues for multiple plant patterning processes. This directional auxin flow depends on the polar subcellular localization of the PIN auxin efflux regulators. Overexpression of the PINOID protein kinase induces a basal-to-apical shift in PIN localization, resulting in the loss of auxin gradients and strong defects in embryo and seedling roots. Conversely, pid loss of function induces an apical-to-basal shift in PIN1 polar targeting at the inflorescence apex, accompanied by defective organogenesis. Our results show that a PINOID-dependent binary switch controls PIN polarity and mediates changes in auxin flow to create local gradients for patterning processes.},
author = {Jirí Friml and Yang, Xiong and Michniewicz, Marta and Weijers, Dolf and Quint, Ab and Tietz, Olaf and Benjamins, René and Ouwerkerk, Pieter B and Ljung, Karin and Sandberg, Göran and Hooykaas, Paul J and Palme, Klaus and Offringa, Remko},
journal = {Science},
number = {5697},
pages = {862 -- 865},
publisher = {American Association for the Advancement of Science},
title = {{A PINOID-dependent binary switch in apical-basal PIN polar targeting directs auxin efflux}},
doi = {10.1126/science.1100618},
volume = {306},
year = {2004},
}
@article{2998,
abstract = {The packaging of the genomic DNA into chromatin in the cell nucleus requires machineries that facilitate DNA-dependent processes such as transcription in the presence of repressive chromatin structures. Using co-immunoprecipitation we have identified in Arabidopsis thaliana cells the FAcilitates Chromatin Transcription (FACT) complex, consisting of the 120-kDa Spt16 and the 71-kDa SSRP1 proteins. Indirect immunofluorecence analyses revealed that both FACT subunits co-localize to nuclei of the majority of cell types in embryos, shoots and roots, whereas FACT is not present in terminally differentiated cells such as mature trichoblasts or cells of the root cap. In the nucleus, Spt16 and SSRP1 are found in the cytologically defined euchromatin of interphase cells independent of the status of DNA replication, but the proteins are not associated with heterochromatic chromocentres and condensed mitotic chromosomes. FACT can be detected by chromatin immunoprecipitation over the entire transcribed region (5′-UTR, coding sequence, 3′-UTR) of actively transcribed genes, whereas it does not occur at transcriptionally inactive heterochromatic regions and intergenic regions. FACT localizes to inducible genes only after induction of transcription, and the association of the complex with the genes correlates with the level of transcription. Collectively, these results indicate that FACT assists transcription elongation through plant chromatin.},
author = {Duroux, Meg and Houben, Andreas and Růžička, Kamil and Jirí Friml and Grasser, Klaus D},
journal = {Plant Journal},
number = {5},
pages = {660 -- 671},
publisher = {Wiley-Blackwell},
title = {{The chromatin remodelling complex FACT associates with actively transcribed regions of the Arabidopsis genome}},
doi = {10.1111/j.1365-313X.2004.02242.x},
volume = {40},
year = {2004},
}
@article{2999,
abstract = {Embryogenesis of flowering plants establishes a basic body plan with apical-basal, radial and bilateral patterns from the single-celled zygote. Arabidopsis embryogenesis exhibits a nearly invariant cell division pattern and therefore is an ideal system for studies of early plant development. However, plant embryos are difficult to access for experimental manipulation, as they develop deeply inside maternal tissues. Here we present a method for the culture of zygotic Arabidopsis embryos in vitro. The technique omits excision of the embryo by culturing the entire ovule, thus greatly facilitating the time and effort involved. It enables external manipulation of embryo development and culture from the earliest developmental stages up to maturity. Administration of various chemical treatments as well as the use of different molecular markers is demonstrated together with standard techniques for visualizing gene expression and protein localization in in vitro cultivated embryos. The presented set of techniques allows for so far unavailable molecular physiology approaches in the study of early plant development.},
author = {Sauer, Michael and Jirí Friml},
journal = {Plant Journal},
number = {5},
pages = {835 -- 843},
publisher = {Wiley-Blackwell},
title = {{In vitro culture of Arabidopsis embryos within their ovules}},
doi = {10.1111/j.1365-313X.2004.02248.x},
volume = {40},
year = {2004},
}
@misc{3142,
abstract = {Assembly of neuronal circuits is controlled by the sequential acquisition of neuronal subpopulation-specific identities at progressive developmental steps. Whereas neuronal features involved in initial phases of differentiation are already established at cell-cycle exit, recent findings, based mainly on work in the peripheral nervous system, suggest that the timely integration of signals encountered en route to targets and from the target region itself is essential to control late steps in connectivity. As neurons project towards their targets they require target-derived signals to establish mature axonal projections and acquire neuronal traits such as the expression of distinct combinations of neurotransmitters. Recent evidence presented in this review shows that this principle, of a signaling interplay between target-derived signals and neuronal cell bodies, is often mediated through transcriptional events and is evolutionarily conserved.},
author = {Simon Hippenmeyer and Kramer, Ina and Arber, Silvia},
booktitle = {Trends in Neurosciences},
number = {8},
pages = {482 -- 488},
publisher = {Elsevier},
title = {{Control of neuronal phenotype: What targets tell the cell bodies}},
doi = {10.1016/j.tins.2004.05.012},
volume = {27},
year = {2004},
}
@article{3172,
abstract = {The simultaneous multiple volume (SMV) approach in navigator-gated MRI allows the use of the whole motion range or the entire scan time for the reconstruction of final images by simultaneously acquiring different image volumes at different motion states. The motion tolerance range for each volume is kept small, thus SMV substantially increases the scan efficiency of navigator methods while maintaining the effectiveness of motion suppression. This article reports a general implementation of the SMV approach using a multiprocessor scheduling algorithm. Each motion state is regarded as a processor and each volume is regarded as a job. An efficient scheduling that completes all jobs in minimal time is maintained even when the motion pattern changes. Initial experiments demonstrated that SMV significantly increased the scan efficiency of navigatorgated MRI.},
author = {Vladimir Kolmogorov and Nguyen, Thành D and Nuval, Anthony and Spincemaille, Pascal and Prince, Martin R and Zabih, Ramin and Wang, Yusu},
journal = {Magnetic Resonance in Medicine},
number = {2},
pages = {362 -- 367},
publisher = {Wiley-Blackwell},
title = {{Multiprocessor scheduling implementation of the simultaneous multiple volume SMV navigator method}},
doi = {10.1002/mrm.20162},
volume = {52},
year = {2004},
}
@article{3173,
abstract = {In the last few years, several new algorithms based on graph cuts have been developed to solve energy minimization problems in computer vision. Each of these techniques constructs a graph such that the minimum cut on the graph also minimizes the energy. Yet, because these graph constructions are complex and highly specific to a particular energy function, graph cuts have seen limited application to date. In this paper, we give a characterization of the energy functions that can be minimized by graph cuts. Our results are restricted to functions of binary variables. However, our work generalizes many previous constructions and is easily applicable to vision problems that involve large numbers of labels, such as stereo, motion, image restoration, and scene reconstruction. We give a precise characterization of what energy functions can be minimized using graph cuts, among the energy functions that can be written as a sum of terms containing three or fewer binary variables. We also provide a general-purpose construction to minimize such an energy function. Finally, we give a necessary condition for any energy function of binary variables to be minimized by graph cuts. Researchers who are considering the use of graph cuts to optimize a particular energy function can use our results to determine if this is possible and then follow our construction to create the appropriate graph. A software implementation is freely available.},
author = {Vladimir Kolmogorov and Zabih, Ramin},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {2},
pages = {147 -- 159},
publisher = {IEEE},
title = {{What energy functions can be minimized via graph cuts? }},
doi = {10.1109/TPAMI.2004.1262177},
volume = {26},
year = {2004},
}
@inproceedings{3177,
abstract = {Feature space clustering is a popular approach to image segmentation, in which a feature vector of local properties (such as intensity, texture or motion) is computed at each pixel. The feature space is then clustered, and each pixel is labeled with the cluster that contains its feature vector. A major limitation of this approach is that feature space clusters generally lack spatial coherence (i.e., they do not correspond to a compact grouping of pixels). In this paper, we propose a segmentation algorithm that operates simultaneously in feature space and in image space. We define an energy function over both a set of clusters and a labeling of pixels with clusters. In our framework, a pixel is labeled with a single cluster (rather than, for example, a distribution over clusters). Our energy function penalizes clusters that are a poor fit to the data in feature space, and also penalizes clusters whose pixels lack spatial coherence. The energy function can be efficiently minimized using graph cuts. Our algorithm can incorporate both parametric and non-parametric clustering methods. It can be applied to many optimization-based clustering methods, including k-means and k-medians, and can handle models which are very close in feature space. Preliminary results are presented on segmenting real and synthetic images, using both parametric and non-parametric clustering.},
author = {Zabih, Ramin and Vladimir Kolmogorov},
pages = {437 -- 444},
publisher = {IEEE},
title = {{Spatially coherent clustering using graph cuts}},
doi = {10.1109/CVPR.2004.1315196},
volume = {2},
year = {2004},
}
@article{3178,
abstract = {Minimum cut/maximum flow algorithms on graphs have emerged as an increasingly useful tool for exactor approximate energy minimization in low-level vision. The combinatorial optimization literature provides many min-cut/max-flow algorithms with different polynomial time complexity. Their practical efficiency, however, has to date been studied mainly outside the scope of computer vision. The goal of this paper is to provide an experimental comparison of the efficiency of min-cut/max flow algorithms for applications in vision. We compare the running times of several standard algorithms, as well as a new algorithm that we have recently developed. The algorithms we study include both Goldberg-Tarjan style "push -relabel" methods and algorithms based on Ford-Fulkerson style "augmenting paths." We benchmark these algorithms on a number of typical graphs in the contexts of image restoration, stereo, and segmentation. In many cases, our new algorithm works several times faster than any of the other methods, making near real-time performance possible. An implementation of our max-flow/min-cut algorithm is available upon request for research purposes.},
author = {Boykov, Yuri and Vladimir Kolmogorov},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {9},
pages = {1124 -- 1137},
publisher = {IEEE},
title = {{An experimental comparison of min-cut/max-flow algorithms for energy minimization in vision}},
doi = {10.1109/TPAMI.2004.60},
volume = {26},
year = {2004},
}
@inproceedings{3179,
abstract = {The problem of efficient, interactive foreground/background segmentation in still images is of great practical importance in image editing. Classical image segmentation tools use either texture (colour) information, e.g. Magic Wand, or edge (contrast) information, e.g. Intelligent Scissors. Recently, an approach based on optimization by graph-cut has been developed which successfully combines both types of information. In this paper we extend the graph-cut approach in three respects. First, we have developed a more powerful, iterative version of the optimisation. Secondly, the power of the iterative algorithm is used to simplify substantially the user interaction needed for a given quality of result. Thirdly, a robust algorithm for "border matting" has been developed to estimate simultaneously the alpha-matte around an object boundary and the colours of foreground pixels. We show that for moderately difficult examples the proposed method outperforms competitive tools.},
author = {Rother, Carsten and Vladimir Kolmogorov and Blake, Andrew},
number = {3},
pages = {309 -- 314},
publisher = {ACM},
title = {{"GrabCut" - Interactive foreground extraction using iterated graph cuts }},
doi = {10.1145/1015706.1015720},
volume = {23},
year = {2004},
}
@inproceedings{3208,
abstract = {A new technique for proving the adaptive indistinguishability of two systems, each composed of some component systems, is presented, using only the fact that corresponding component systems are non-adaptively indistinguishable. The main tool is the definition of a special monotone condition for a random system F, relative to another random system G, whose probability of occurring for a given distinguisher D is closely related to the distinguishing advantage ε of D for F and G, namely it is lower and upper bounded by ε and (1+ln1), respectively.
A concrete instantiation of this result shows that the cascade of two random permutations (with the second one inverted) is indistinguishable from a uniform random permutation by adaptive distinguishers which may query the system from both sides, assuming the components’ security only against non-adaptive one-sided distinguishers.
As applications we provide some results in various fields as almost k-wise independent probability spaces, decorrelation theory and computational indistinguishability (i.e., pseudo-randomness).},
author = {Maurer, Ueli M and Krzysztof Pietrzak},
pages = {410 -- 427},
publisher = {Springer},
title = {{Composition of random systems: When two weak make one strong}},
doi = {10.1007/978-3-540-24638-1_23},
volume = {2951},
year = {2004},
}
@article{3419,
abstract = {The folding and stability of transmembrane proteins is a fundamental and unsolved biological problem. Here, single bacteriorhodopsin molecules were mechanically unfolded from native purple membranes using atomic force microscopy and force spectroscopy. The energy landscape of individual transmembrane α helices and polypeptide loops was mapped by monitoring the pulling speed dependence of the unfolding forces and applying Monte Carlo simulations. Single helices formed independently stable units stabilized by a single potential barrier. Mechanical unfolding of the helices was triggered by 3.9–7.7 Å extension, while natural unfolding rates were of the order of 10−3 s−1. Besides acting as individually stable units, helices associated pairwise, establishing a collective potential barrier. The unfolding pathways of individual proteins reflect distinct pulling speed-dependent unfolding routes in their energy landscapes. These observations support the two-stage model of membrane protein folding in which α helices insert into the membrane as stable units and then assemble into the functional protein.},
author = {Harald Janovjak and Struckmeier, Jens and Hubain, Maurice and Kessler, Max and Kedrov, Alexej and Mueller, Daniel J},
journal = {Structure},
number = {5},
pages = {871 -- 879},
publisher = {Cell Press},
title = {{Probing the energy landscape of the membrane protein bacteriorhodopsin}},
doi = {10.1016/j.str.2004.03.016},
volume = {12},
year = {2004},
}
@article{3420,
abstract = {Single-molecule force-spectroscopy was employed to unfold and refold single sodium-proton antiporters (NhaA) of Escherichia coli from membrane patches. Although transmembrane α-helices and extracellular polypeptide loops exhibited sufficient stability to individually establish potential barriers against unfolding, two helices predominantly unfolded pairwise, thereby acting as one structural unit. Many of the potential barriers were detected unfolding NhaA either from the C-terminal or the N-terminal end. It was found that some molecular interactions stabilizing secondary structural elements were directional, while others were not. Additionally, some interactions appeared to occur between the secondary structural elements. After unfolding ten of the 12 helices, the extracted polypeptide was allowed to refold back into the membrane. After five seconds, the refolded polypeptide established all secondary structure elements of the native protein. One helical pair showed a characteristic spring like “snap in” into its folded conformation, while the refolding process of other helices was not detected in particular. Additionally, individual helices required characteristic periods of time to fold. Correlating these results with the primary structure of NhaA allowed us to obtain the first insights into how potential barriers establish and determine the folding kinetics of the secondary structure elements.},
author = {Kedrov, Alexej and Ziegler, Christine and Harald Janovjak and Kühlbrandt, Werner and Mueller, Daniel J},
journal = {Journal of Molecular Biology},
number = {5},
pages = {1143 -- 1152},
publisher = {Elsevier},
title = {{Controlled unfolding and refolding of a single sodium/proton antiporter using atomic force microscopy}},
doi = {10.1016/j.jmb.2004.05.026},
volume = {340},
year = {2004},
}
@inbook{3574,
author = {Herbert Edelsbrunner},
booktitle = {Handbook of Discrete and Computational Geometry},
pages = {1395 -- 1412},
publisher = {CRC Press},
title = {{Biological applications of computational topology}},
year = {2004},
}
@inbook{3575,
abstract = {The Jacobi set of two Morse functions defined on a common - manifold is the set of critical points of the restrictions of one func- tion to the level sets of the other function. Equivalently, it is the set of points where the gradients of the functions are parallel. For a generic pair of Morse functions, the Jacobi set is a smoothly embed- ded 1-manifold. We give a polynomial-time algorithm that com- putes the piecewise linear analog of the Jacobi set for functions specified at the vertices of a triangulation, and we generalize all results to more than two but at most Morse functions.},
author = {Herbert Edelsbrunner and Harer, John},
booktitle = {Foundations of Computational Mathematics},
pages = {37 -- 57},
publisher = {Springer},
title = {{Jacobi sets of multiple Morse functions}},
doi = {10.1017/CBO9781139106962.003},
volume = {312},
year = {2004},
}
@inbook{3587,
author = {Ulrich, Florian and Heisenberg, Carl-Philipp J},
booktitle = {Fish development and genetics : the zebrafish and medaka models},
editor = {Korzh, Vladimir and Gong, Zhiyuan},
pages = {39 -- 86},
publisher = {World Scientific Publishing},
title = {{Gastrulation in zebrafish}},
volume = {2},
year = {2004},
}
@misc{3595,
abstract = {Genome sizes vary enormously. This variation in DNA content correlates with effective population size, suggesting that deleterious additions to the genome can accumulate in small populations. On this view, the increased complexity of biological functions associated with large genomes partly reflects evolutionary degeneration.},
author = {Charlesworth, Brian and Nicholas Barton},
booktitle = {Current Biology},
number = {6},
pages = {R233 -- R235},
publisher = {Cell Press},
title = {{Genome size: Does bigger mean worse?}},
doi = {10.1016/j.cub.2004.02.054},
volume = {14},
year = {2004},
}
@article{3614,
abstract = {We analyze the changes in the mean and variance components of a quantitative trait caused by changes in allele frequencies, concentrating on the effects of genetic drift. We use a general representation of epistasis and dominance that allows an arbitrary relation between genotype and phenotype for any number of diallelic loci. We assume initial and final Hardy-Weinberg and linkage equilibrium in our analyses of drift-induced changes. Random drift generates transient linkage disequilibria that cause correlations between allele frequency fluctuations at different loci. However, we show that these have negligible effects, at least for interactions among small numbers of loci. Our analyses are based on diffusion approximations that summarize the effects of drift in terms of F, the inbreeding coefficient, interpreted as the expected proportional decrease in heterozygosity at each locus. For haploids, the variance of the trait mean after a population bottleneck is var(Δz̄) =inline imagewhere n is the number of loci contributing to the trait variance, VA(1)=VA is the additive genetic variance, and VA(k) is the kth-order additive epistatic variance. The expected additive genetic variance after the bottleneck, denoted (V*A), is closely related to var(Δz̄); (V*A) (1 –F)inline imageThus, epistasis inflates the expected additive variance above VA(1 –F), the expectation under additivity. For haploids (and diploids without dominance), the expected value of every variance component is inflated by the existence of higher order interactions (e.g., third-order epistasis inflates (V*AA)). This is not true in general with diploidy, because dominance alone can reduce (V*A) below VA(1 –F) (e.g., when dominant alleles are rare). Without dominance, diploidy produces simple expressions: var(Δz̄)=inline image=1 (2F) kVA(k) and (V*A) = (1 –F)inline imagek(2F)k-1VA(k) With dominance (and even without epistasis), var(Δz̄)and (V*A) no longer depend solely on the variance components in the base population. For small F, the expected additive variance simplifies to (V*A)(1 –F) VA+ 4FVAA+2FVD+2FCAD, where CAD is a sum of two terms describing covariances between additive effects and dominance and additive × dominance interactions. Whether population bottlenecks lead to expected increases in additive variance depends primarily on the ratio of nonadditive to additive genetic variance in the base population, but dominance precludes simple predictions based solely on variance components. We illustrate these results using a model in which genotypic values are drawn at random, allowing extreme and erratic epistatic interactions. Although our analyses clarify the conditions under which drift is expected to increase VA, we question the evolutionary importance of such increases.},
author = {Nicholas Barton and Turelli, Michael},
journal = {Evolution; International Journal of Organic Evolution},
number = {10},
pages = {2111 -- 2132},
publisher = {Wiley-Blackwell},
title = {{Effects of allele frequency changes on variance components under a general model of epistasis}},
doi = {10.1111/j.0014-3820.2004.tb01591.x},
volume = {58},
year = {2004},
}
@article{3615,
abstract = {We investigate three alternative selection-based scenarios proposed to maintain polygenic variation: pleiotropic balancing selection, G x E interactions (with spatial or temporal variation in allelic effects), and sex-dependent allelic effects. Each analysis assumes an additive polygenic trait with n diallelic loci under stabilizing selection. We allow loci to have different effects and consider equilibria at which the population mean departs from the stabilizing-selection optimum. Under weak selection, each model produces essentially identical, approximate allele-frequency dynamics. Variation is maintained under pleiotropic balancing selection only at loci for which the strength of balancing selection exceeds the effective strength of stabilizing selection. In addition, for all models, polymorphism requires that the population mean be close enough to the optimum that directional selection does not overwhelm balancing selection. This balance allows many simultaneously stable equilibria, and we explore their properties numerically. Both spatial and temporal G x E can maintain variation at loci for which the coefficient of variation (across environments) of the effect of a substitution exceeds a critical value greater than one. The critical value depends on the correlation between substitution effects at different loci. For large positive correlations (e.g., ρ2ij > 3/4), even extreme fluctuations in allelic effects cannot maintain variation. Surprisingly, this constraint on correlations implies that sex-dependent allelic effects cannot maintain polygenic variation. We present numerical results that support our analytical approximations and discuss our results in connection to relevant data and alternative variance-maintaining mechanisms.},
author = {Turelli, Michael and Nicholas Barton},
journal = {Genetics},
number = {2},
pages = {1053 -- 1079},
publisher = {Genetics Society of America},
title = {{Polygenic variation maintained by balancing selection: pleiotropy, sex-dependent allelic effects and GxE interactions}},
doi = {10.1534/genetics.166.2.1053},
volume = {166},
year = {2004},
}
@misc{3616,
author = {Nicholas Barton},
booktitle = {Current Biology},
number = {15},
pages = {R603 -- R604},
publisher = {Cell Press},
title = {{Speciation: Why, how, where and when?}},
doi = {10.1016/j.cub.2004.07.037},
volume = {14},
year = {2004},
}
@article{3617,
abstract = {The coalescent process can describe the effects of selection at linked loci only if selection is so strong that genotype frequencies evolve deterministically. Here, we develop methods proposed by Kaplan, Darden, and Hudson to find the effects of weak selection. We show that the overall effect is given by an extension to Price's equation: the change in properties such as moments of coalescence times is equal to the covariance between those properties and the fitness of the sample of genes. The distribution of coalescence times differs substantially between allelic classes, even in the absence of selection. However, the average coalescence time between randomly chosen genes is insensitive to the current allele frequency and is affected significantly by purifying selection only if deleterious mutations are common and selection is strong (i.e., the product of population size and selection coefficient, Ns > 3). Balancing selection increases mean coalescence times, but the effect becomes large only when mutation rates between allelic classes are low and when selection is extremely strong. Our analysis supports previous simulations that show that selection has surprisingly little effect on genealogies. Moreover, small fluctuations in allele frequency due to random drift can greatly reduce any such effects. This will make it difficult to detect the action of selection from neutral variation alone.},
author = {Nicholas Barton and Etheridge, Alison M},
journal = {Genetics},
number = {2},
pages = {1115 -- 1131},
publisher = {Genetics Society of America},
title = {{The effect of selection on genealogies}},
doi = {10.1534/genetics.166.2.1115},
volume = {166},
year = {2004},
}
@inproceedings{3688,
abstract = {Capturing images of documents using handheld digital cameras has a variety of applications in academia, research, knowledge management, retail, and office settings. The ultimate goal of such systems is to achieve image quality comparable to that currently achieved with flatbed scanners even for curved, warped, or curled pages. This can be achieved by high-accuracy 3D modeling of the page surface, followed by a "flattening" of the surface. A number of previous systems have either assumed only perspective distortions, or used techniques like structured lighting, shading, or side-imaging for obtaining 3D shape. This paper describes a system for handheld camera-based document capture using general purpose stereo vision methods followed by a new document dewarping technique. Examples of shape modeling and dewarping of book images is shown.},
author = {Ulges, Adrian and Christoph Lampert and Breuel,Thomas M},
pages = {198 -- 200},
publisher = {ACM},
title = {{Document capture using stereo vision}},
doi = {10.1145/1030397.1030434},
year = {2004},
}