@article{828,
abstract = {The plant root system is essential for providing anchorage to the soil, supplying minerals and water, and synthesizing metabolites. It is a dynamic organ modulated by external cues such as environmental signals, water and nutrients availability, salinity and others. Lateral roots (LRs) are initiated from the primary root post-embryonically, after which they progress through discrete developmental stages which can be independently controlled, providing a high level of plasticity during root system formation. Within this review, main contributions are presented, from the classical forward genetic screens to the more recent high-throughput approaches, combined with computer model predictions, dissecting how LRs and thereby root system architecture is established and developed.},
author = {Cuesta, Candela and Wabnik, Krzysztof T and Benková, Eva},
journal = {Frontiers in Plant Science},
publisher = {Frontiers Research Foundation},
title = {{Systems approaches to study root architecture dynamics}},
doi = {10.3389/fpls.2013.00537},
volume = {4},
year = {2013},
}
@article{8462,
abstract = {The transition of proteins from their soluble functional state to amyloid fibrils and aggregates is associated with the onset of several human diseases. Protein aggregation often requires some structural reshaping and the subsequent formation of intermolecular contacts. Therefore, the study of the conformation of excited protein states and their ability to form oligomers is of primary importance for understanding the molecular basis of amyloid fibril formation. Here, we investigated the oligomerization processes that occur along the folding of the amyloidogenic human protein β2-microglobulin. The combination of real-time two-dimensional NMR data with real-time small-angle X-ray scattering measurements allowed us to derive thermodynamic and kinetic information on protein oligomerization of different conformational states populated along the folding pathways. In particular, we could demonstrate that a long-lived folding intermediate (I-state) has a higher propensity to oligomerize compared to the native state. Our data agree well with a simple five-state kinetic model that involves only monomeric and dimeric species. The dimers have an elongated shape with the dimerization interface located at the apical side of β2-microglobulin close to Pro32, the residue that has a trans conformation in the I-state and a cis conformation in the native (N) state. Our experimental data suggest that partial unfolding in the apical half of the protein close to Pro32 leads to an excited state conformation with enhanced propensity for oligomerization. This excited state becomes more populated in the transient I-state due to the destabilization of the native conformation by the trans-Pro32 configuration.},
author = {Rennella, E. and Cutuil, T. and Schanda, Paul and Ayala, I. and Gabel, F. and Forge, V. and Corazza, A. and Esposito, G. and Brutscher, B.},
issn = {0022-2836},
journal = {Journal of Molecular Biology},
keywords = {Molecular Biology},
number = {15},
pages = {2722--2736},
publisher = {Elsevier},
title = {{Oligomeric states along the folding pathways of β2-microglobulin: Kinetics, thermodynamics, and structure}},
doi = {10.1016/j.jmb.2013.04.028},
volume = {425},
year = {2013},
}
@inproceedings{2000,
abstract = {In this work we present a flexible tool for tumor progression, which simulates the evolutionary dynamics of cancer. Tumor progression implements a multi-type branching process where the key parameters are the fitness landscape, the mutation rate, and the average time of cell division. The fitness of a cancer cell depends on the mutations it has accumulated. The input to our tool could be any fitness landscape, mutation rate, and cell division time, and the tool produces the growth dynamics and all relevant statistics.},
author = {Reiter, Johannes and Božić, Ivana and Chatterjee, Krishnendu and Nowak, Martin},
booktitle = {Proceedings of 25th Int. Conf. on Computer Aided Verification},
location = {St. Petersburg, Russia},
pages = {101 -- 106},
publisher = {Springer},
title = {{TTP: Tool for tumor progression}},
doi = {10.1007/978-3-642-39799-8_6},
volume = {8044},
year = {2013},
}
@article{2009,
abstract = {Traditional statistical methods for confidentiality protection of statistical databases do not scale well to deal with GWAS databases especially in terms of guarantees regarding protection from linkage to external information. The more recent concept of differential privacy, introduced by the cryptographic community, is an approach which provides a rigorous definition of privacy with meaningful privacy guarantees in the presence of arbitrary external information, although the guarantees may come at a serious price in terms of data utility. Building on such notions, we propose new methods to release aggregate GWAS data without compromising an individual’s privacy. We present methods for releasing differentially private minor allele frequencies, chi-square statistics and p-values. We compare these approaches on simulated data and on a GWAS study of canine hair length involving 685 dogs. We also propose a privacy-preserving method for finding genome-wide associations based on a differentially-private approach to penalized logistic regression.},
author = {Uhler, Caroline and Slavkovic, Aleksandra and Fienberg, Stephen},
journal = {Journal of Privacy and Confidentiality },
number = {1},
pages = {137 -- 166},
publisher = {Carnegie Mellon University},
title = {{Privacy-preserving data sharing for genome-wide association studies}},
doi = {10.29012/jpc.v5i1.629},
volume = {5},
year = {2013},
}
@article{2010,
abstract = {Many algorithms for inferring causality rely heavily on the faithfulness assumption. The main justification for imposing this assumption is that the set of unfaithful distributions has Lebesgue measure zero, since it can be seen as a collection of hypersurfaces in a hypercube. However, due to sampling error the faithfulness condition alone is not sufficient for statistical estimation, and strong-faithfulness has been proposed and assumed to achieve uniform or high-dimensional consistency. In contrast to the plain faithfulness assumption, the set of distributions that is not strong-faithful has nonzero Lebesgue measure and in fact, can be surprisingly large as we show in this paper. We study the strong-faithfulness condition from a geometric and combinatorial point of view and give upper and lower bounds on the Lebesgue measure of strong-faithful distributions for various classes of directed acyclic graphs. Our results imply fundamental limitations for the PC-algorithm and potentially also for other algorithms based on partial correlation testing in the Gaussian case.},
author = {Uhler, Caroline and Raskutti, Garvesh and Bühlmann, Peter and Yu, Bin},
journal = {The Annals of Statistics},
number = {2},
pages = {436 -- 463},
publisher = {Institute of Mathematical Statistics},
title = {{Geometry of the faithfulness assumption in causal inference}},
doi = {10.1214/12-AOS1080},
volume = {41},
year = {2013},
}
@inproceedings{2181,
abstract = {There is a trade-off between performance and correctness in implementing concurrent data structures. Better performance may be achieved at the expense of relaxing correctness, by redefining the semantics of data structures. We address such a redefinition of data structure semantics and present a systematic and formal framework for obtaining new data structures by quantitatively relaxing existing ones. We view a data structure as a sequential specification S containing all "legal" sequences over an alphabet of method calls. Relaxing the data structure corresponds to defining a distance from any sequence over the alphabet to the sequential specification: the k-relaxed sequential specification contains all sequences over the alphabet within distance k from the original specification. In contrast to other existing work, our relaxations are semantic (distance in terms of data structure states). As an instantiation of our framework, we present two simple yet generic relaxation schemes, called out-of-order and stuttering relaxation, along with several ways of computing distances. We show that the out-of-order relaxation, when further instantiated to stacks, queues, and priority queues, amounts to tolerating bounded out-of-order behavior, which cannot be captured by a purely syntactic relaxation (distance in terms of sequence manipulation, e.g. edit distance). We give concurrent implementations of relaxed data structures and demonstrate that bounded relaxations provide the means for trading correctness for performance in a controlled way. The relaxations are monotonic which further highlights the trade-off: increasing k increases the number of permitted sequences, which as we demonstrate can lead to better performance. Finally, since a relaxed stack or queue also implements a pool, we actually have new concurrent pool implementations that outperform the state-of-the-art ones.},
author = {Henzinger, Thomas A and Kirsch, Christoph and Payer, Hannes and Sezgin, Ali and Sokolova, Ana},
booktitle = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming language},
isbn = {978-1-4503-1832-7},
location = {Rome, Italy},
pages = {317 -- 328},
publisher = {ACM},
title = {{Quantitative relaxation of concurrent data structures}},
doi = {10.1145/2429069.2429109},
year = {2013},
}
@inproceedings{2182,
abstract = {We propose a general framework for abstraction with respect to quantitative properties, such as worst-case execution time, or power consumption. Our framework provides a systematic way for counter-example guided abstraction refinement for quantitative properties. The salient aspect of the framework is that it allows anytime verification, that is, verification algorithms that can be stopped at any time (for example, due to exhaustion of memory), and report approximations that improve monotonically when the algorithms are given more time. We instantiate the framework with a number of quantitative abstractions and refinement schemes, which differ in terms of how much quantitative information they keep from the original system. We introduce both state-based and trace-based quantitative abstractions, and we describe conditions that define classes of quantitative properties for which the abstractions provide over-approximations. We give algorithms for evaluating the quantitative properties on the abstract systems. We present algorithms for counter-example based refinements for quantitative properties for both state-based and segment-based abstractions. We perform a case study on worst-case execution time of executables to evaluate the anytime verification aspect and the quantitative abstractions we proposed.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
booktitle = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming language},
location = {Rome, Italy},
pages = {115 -- 128},
publisher = {ACM},
title = {{Quantitative abstraction refinement}},
doi = {10.1145/2429069.2429085},
year = {2013},
}
@inproceedings{2209,
abstract = {A straight skeleton is a well-known geometric structure, and several algorithms exist to construct the straight skeleton for a given polygon or planar straight-line graph. In this paper, we ask the reverse question: Given the straight skeleton (in form of a planar straight-line graph, with some rays to infinity), can we reconstruct a planar straight-line graph for which this was the straight skeleton? We show how to reduce this problem to the problem of finding a line that intersects a set of convex polygons. We can find these convex polygons and all such lines in $O(nlog n)$ time in the Real RAM computer model, where $n$ denotes the number of edges of the input graph. We also explain how our approach can be used for recognizing Voronoi diagrams of points, thereby completing a partial solution provided by Ash and Bolker in 1985.
},
author = {Biedl, Therese and Held, Martin and Huber, Stefan},
location = {St. Petersburg, Russia},
pages = {37 -- 46},
publisher = {IEEE},
title = {{Recognizing straight skeletons and Voronoi diagrams and reconstructing their input}},
doi = {10.1109/ISVD.2013.11},
year = {2013},
}
@inproceedings{2210,
abstract = {A straight skeleton is a well-known geometric structure, and several algorithms exist to construct the straight skeleton for a given polygon. In this paper, we ask the reverse question: Given the straight skeleton (in form of a tree with a drawing in the plane, but with the exact position of the leaves unspecified), can we reconstruct the polygon? We show that in most cases there exists at most one polygon; in the remaining case there is an infinite number of polygons determined by one angle that can range in an interval. We can find this (set of) polygon(s) in linear time in the Real RAM computer model.},
author = {Biedl, Therese and Held, Martin and Huber, Stefan},
booktitle = {29th European Workshop on Computational Geometry},
location = {Braunschweig, Germany},
pages = {95 -- 98},
publisher = {TU Braunschweig},
title = {{Reconstructing polygons from embedded straight skeletons}},
year = {2013},
}
@inproceedings{2237,
abstract = {We describe new extensions of the Vampire theorem prover for computing tree interpolants. These extensions generalize Craig interpolation in Vampire, and can also be used to derive sequence interpolants. We evaluated our implementation on a large number of examples over the theory of linear integer arithmetic and integer-indexed arrays, with and without quantifiers. When compared to other methods, our experiments show that some examples could only be solved by our implementation.},
author = {Blanc, Régis and Gupta, Ashutosh and Kovács, Laura and Kragl, Bernhard},
location = {Stellenbosch, South Africa},
pages = {173 -- 181},
publisher = {Springer},
title = {{Tree interpolation in Vampire}},
doi = {10.1007/978-3-642-45221-5_13},
volume = {8312},
year = {2013},
}