@article{1576,
abstract = {Gene expression is controlled primarily by interactions between transcription factor proteins (TFs) and the regulatory DNA sequence, a process that can be captured well by thermodynamic models of regulation. These models, however, neglect regulatory crosstalk: the possibility that noncognate TFs could initiate transcription, with potentially disastrous effects for the cell. Here, we estimate the importance of crosstalk, suggest that its avoidance strongly constrains equilibrium models of TF binding, and propose an alternative nonequilibrium scheme that implements kinetic proofreading to suppress erroneous initiation. This proposal is consistent with the observed covalent modifications of the transcriptional apparatus and predicts increased noise in gene expression as a trade-off for improved specificity. Using information theory, we quantify this trade-off to find when optimal proofreading architectures are favored over their equilibrium counterparts. Such architectures exhibit significant super-Poisson noise at low expression in steady state.},
author = {Cepeda Humerez, Sarah A and Rieckh, Georg and Tkacik, Gasper},
journal = {Physical Review Letters},
number = {24},
publisher = {American Physical Society},
title = {{Stochastic proofreading mechanism alleviates crosstalk in transcriptional regulation}},
doi = {10.1103/PhysRevLett.115.248101},
volume = {115},
year = {2015},
}
@article{6736,
abstract = {Motivated by the significant performance gains which polar codes experience under successive cancellation list decoding, their scaling exponent is studied as a function of the list size. In particular, the error probability is fixed, and the tradeoff between the block length and back-off from capacity is analyzed. A lower bound is provided on the error probability under MAP decoding with list size L for any binary-input memoryless output-symmetric channel and for any class of linear codes such that their minimum distance is unbounded as the block length grows large. Then, it is shown that under MAP decoding, although the introduction of a list can significantly improve the involved constants, the scaling exponent itself, i.e., the speed at which capacity is approached, stays unaffected for any finite list size. In particular, this result applies to polar codes, since their minimum distance tends to infinity as the block length increases. A similar result is proved for genie-aided successive cancellation decoding when transmission takes place over the binary erasure channel, namely, the scaling exponent remains constant for any fixed number of helps from the genie. Note that since genie-aided successive cancellation decoding might be strictly worse than successive cancellation list decoding, the problem of establishing the scaling exponent of the latter remains open.},
author = {Mondelli, Marco and Hassani, Hamed and Urbanke, Rudiger},
journal = {IEEE Transactions on Information Theory},
number = {9},
pages = {4838--4851},
publisher = {IEEE},
title = {{Scaling exponent of list decoders with applications to polar codes}},
doi = {10.1109/tit.2015.2453315},
volume = {61},
year = {2015},
}
@article{6737,
abstract = {This paper presents polar coding schemes for the two-user discrete memoryless broadcast channel (DM-BC) which achieve Marton's region with both common and private messages. This is the best achievable rate region known to date, and it is tight for all classes of two-user DM-BCs whose capacity regions are known. To accomplish this task, we first construct polar codes for both the superposition as well as binning strategy. By combining these two schemes, we obtain Marton's region with private messages only. Finally, we show how to handle the case of common information. The proposed coding schemes possess the usual advantages of polar codes, i.e., they have low encoding and decoding complexity and a superpolynomial decay rate of the error probability. We follow the lead of Goela, Abbe, and Gastpar, who recently introduced polar codes emulating the superposition and binning schemes. To align the polar indices, for both schemes, their solution involves some degradedness constraints that are assumed to hold between the auxiliary random variables and channel outputs. To remove these constraints, we consider the transmission of k blocks and employ a chaining construction that guarantees the proper alignment of the polarized indices. The techniques described in this paper are quite general, and they can be adopted to many other multiterminal scenarios whenever there polar indices need to be aligned.},
author = {Mondelli, Marco and Hassani, Hamed and Sason, Igal and Urbanke, Rudiger},
journal = {IEEE Transactions on Information Theory},
number = {2},
pages = {783--800},
publisher = {IEEE},
title = {{Achieving Marton’s region for broadcast channels using polar codes}},
doi = {10.1109/tit.2014.2368555},
volume = {61},
year = {2015},
}
@article{7070,
abstract = {Torque magnetization measurements on YBa2Cu3Oy (YBCO) at doping y=6.67 (p=0.12), in dc fields (B) up to 33 T and temperatures down to 4.5 K, show that weak diamagnetism persists above the extrapolated irreversibility field Hirr(T=0)≈24 T. The differential susceptibility dM/dB, however, is more rapidly suppressed for B≳16 T than expected from the properties of the low field superconducting state, and saturates at a low value for fields B≳24 T. In addition, torque measurements on a p=0.11 YBCO crystal in pulsed field up to 65 T and temperatures down to 8 K show similar behavior, with no additional features at higher fields. We offer two candidate scenarios to explain these observations: (a) superconductivity survives but is heavily suppressed at high field by competition with charge-density-wave (CDW) order; (b) static superconductivity disappears near 24 T and is followed by a region of fluctuating superconductivity, which causes dM/dB to saturate at high field. The diamagnetic signal observed above 50 T for the p=0.11 crystal at 40 K and below may be caused by changes in the normal state susceptibility rather than bulk or fluctuating superconductivity. There will be orbital (Landau) diamagnetism from electron pockets and possibly a reduction in spin susceptibility caused by the stronger three-dimensional ordered CDW.},
author = {Yu, Jing Fei and Ramshaw, B. J. and Kokanović, I. and Modic, Kimberly A and Harrison, N. and Day, James and Liang, Ruixing and Hardy, W. N. and Bonn, D. A. and McCollam, A. and Julian, S. R. and Cooper, J. R.},
issn = {1098-0121},
journal = {Physical Review B},
number = {18},
publisher = {APS},
title = {{Magnetization of underdoped YBa2Cu3Oy above the irreversibility field}},
doi = {10.1103/physrevb.92.180509},
volume = {92},
year = {2015},
}
@article{7739,
abstract = {Currently, there is much debate on the genetic architecture of quantitative traits in wild populations. Is trait variation influenced by many genes of small effect or by a few genes of major effect? Where is additive genetic variation located in the genome? Do the same loci cause similar phenotypic variation in different populations? Great tits (Parus major) have been studied extensively in long‐term studies across Europe and consequently are considered an ecological ‘model organism’. Recently, genomic resources have been developed for the great tit, including a custom SNP chip and genetic linkage map. In this study, we used a suite of approaches to investigate the genetic architecture of eight quantitative traits in two long‐term study populations of great tits—one in the Netherlands and the other in the United Kingdom. Overall, we found little evidence for the presence of genes of large effects in either population. Instead, traits appeared to be influenced by many genes of small effect, with conservative estimates of the number of contributing loci ranging from 31 to 310. Despite concordance between population‐specific heritabilities, we found no evidence for the presence of loci having similar effects in both populations. While population‐specific genetic architectures are possible, an undetected shared architecture cannot be rejected because of limited power to map loci of small and moderate effects. This study is one of few examples of genetic architecture analysis in replicated wild populations and highlights some of the challenges and limitations researchers will face when attempting similar molecular quantitative genetic studies in free‐living populations.},
author = {Santure, Anna W. and Poissant, Jocelyn and De Cauwer, Isabelle and van Oers, Kees and Robinson, Matthew Richard and Quinn, John L. and Groenen, Martien A. M. and Visser, Marcel E. and Sheldon, Ben C. and Slate, Jon},
issn = {0962-1083},
journal = {Molecular Ecology},
pages = {6148--6162},
publisher = {Wiley},
title = {{Replicated analysis of the genetic architecture of quantitative traits in two wild great tit populations}},
doi = {10.1111/mec.13452},
volume = {24},
year = {2015},
}
@article{7741,
abstract = {Phenotypes expressed in a social context are not only a function of the individual, but can also be shaped by the phenotypes of social partners. These social effects may play a major role in the evolution of cooperative breeding if social partners differ in the quality of care they provide and if individual carers adjust their effort in relation to that of other carers. When applying social effects models to wild study systems, it is also important to explore sources of individual plasticity that could masquerade as social effects. We studied offspring provisioning rates of parents and helpers in a wild population of long-tailed tits Aegithalos caudatus using a quantitative genetic framework to identify these social effects and partition them into genetic, permanent environment and current environment components. Controlling for other effects, individuals were consistent in their provisioning effort at a given nest, but adjusted their effort based on who was in their social group, indicating the presence of social effects. However, these social effects differed between years and social contexts, indicating a current environment effect, rather than indicating a genetic or permanent environment effect. While this study reveals the importance of examining environmental and genetic sources of social effects, the framework we present is entirely general, enabling a greater understanding of potentially important social effects within any ecological population.},
author = {Adams, Mark James and Robinson, Matthew Richard and Mannarelli, Maria-Elena and Hatchwell, Ben J.},
issn = {0962-8452},
journal = {Proceedings of the Royal Society B: Biological Sciences},
number = {1810},
publisher = {The Royal Society},
title = {{Social genetic and social environment effects on parental and helper care in a cooperatively breeding bird}},
doi = {10.1098/rspb.2015.0689},
volume = {282},
year = {2015},
}
@article{7742,
abstract = {Across-nation differences in the mean values for complex traits are common1,2,3,4,5,6,7,8, but the reasons for these differences are unknown. Here we find that many independent loci contribute to population genetic differences in height and body mass index (BMI) in 9,416 individuals across 14 European countries. Using discovery data on over 250,000 individuals and unbiased effect size estimates from 17,500 sibling pairs, we estimate that 24% (95% credible interval (CI) = 9%, 41%) and 8% (95% CI = 4%, 16%) of the captured additive genetic variance for height and BMI, respectively, reflect population genetic differences. Population genetic divergence differed significantly from that in a null model (height, P < 3.94 × 10−8; BMI, P < 5.95 × 10−4), and we find an among-population genetic correlation for tall and slender individuals (r = −0.80, 95% CI = −0.95, −0.60), consistent with correlated selection for both phenotypes. Observed differences in height among populations reflected the predicted genetic means (r = 0.51; P < 0.001), but environmental differences across Europe masked genetic differentiation for BMI (P < 0.58).},
author = {Robinson, Matthew Richard and Hemani, Gibran and Medina-Gomez, Carolina and Mezzavilla, Massimo and Esko, Tonu and Shakhbazov, Konstantin and Powell, Joseph E and Vinkhuyzen, Anna and Berndt, Sonja I and Gustafsson, Stefan and Justice, Anne E and Kahali, Bratati and Locke, Adam E and Pers, Tune H and Vedantam, Sailaja and Wood, Andrew R and van Rheenen, Wouter and Andreassen, Ole A and Gasparini, Paolo and Metspalu, Andres and Berg, Leonard H van den and Veldink, Jan H and Rivadeneira, Fernando and Werge, Thomas M and Abecasis, Goncalo R and Boomsma, Dorret I and Chasman, Daniel I and de Geus, Eco J C and Frayling, Timothy M and Hirschhorn, Joel N and Hottenga, Jouke Jan and Ingelsson, Erik and Loos, Ruth J F and Magnusson, Patrik K E and Martin, Nicholas G and Montgomery, Grant W and North, Kari E and Pedersen, Nancy L and Spector, Timothy D and Speliotes, Elizabeth K and Goddard, Michael E and Yang, Jian and Visscher, Peter M},
issn = {1061-4036},
journal = {Nature Genetics},
number = {11},
pages = {1357--1362},
publisher = {Springer Nature},
title = {{Population genetic differentiation of height and body mass index across Europe}},
doi = {10.1038/ng.3401},
volume = {47},
year = {2015},
}
@inproceedings{776,
abstract = {High-performance concurrent priority queues are essential for applications such as task scheduling and discrete event simulation. Unfortunately, even the best performing implementations do not scale past a number of threads in the single digits. This is because of the sequential bottleneck in accessing the elements at the head of the queue in order to perform a DeleteMin operation. In this paper, we present the SprayList, a scalable priority queue with relaxed ordering semantics. Starting from a non-blocking SkipList, the main innovation behind our design is that the DeleteMin operations avoid a sequential bottleneck by "spraying" themselves onto the head of the SkipList list in a coordinated fashion. The spraying is implemented using a carefully designed random walk, so that DeleteMin returns an element among the first O(plog3p) in the list, with high probability, where p is the number of threads. We prove that the running time of a DeleteMin operation is O(log3p), with high probability, independent of the size of the list. Our experiments show that the relaxed semantics allow the data structure to scale for high thread counts, comparable to a classic unordered SkipList. Furthermore, we observe that, for reasonably parallel workloads, the scalability benefits of relaxation considerably outweigh the additional work due to out-of-order execution.},
author = {Alistarh, Dan-Adrian and Kopinsky, Justin and Li, Jerry and Shavit, Nir},
pages = {11 -- 20},
publisher = {ACM},
title = {{The SprayList: A scalable relaxed priority queue}},
doi = {10.1145/2688500.2688523},
volume = {2015-January},
year = {2015},
}
@article{7765,
abstract = {We introduce a principle unique to disordered solids wherein the contribution of any bond to one global perturbation is uncorrelated with its contribution to another. Coupled with sufficient variability in the contributions of different bonds, this “independent bond-level response” paves the way for the design of real materials with unusual and exquisitely tuned properties. To illustrate this, we choose two global perturbations: compression and shear. By applying a bond removal procedure that is both simple and experimentally relevant to remove a very small fraction of bonds, we can drive disordered spring networks to both the incompressible and completely auxetic limits of mechanical behavior.},
author = {Goodrich, Carl Peter and Liu, Andrea J. and Nagel, Sidney R.},
issn = {0031-9007},
journal = {Physical Review Letters},
number = {22},
publisher = {American Physical Society},
title = {{The principle of independent bond-level response: Tuning by pruning to exploit disorder for global behavior}},
doi = {10.1103/physrevlett.114.225501},
volume = {114},
year = {2015},
}
@article{7766,
abstract = {We study the vibrational properties near a free surface of disordered spring networks derived from jammed sphere packings. In bulk systems, without surfaces, it is well understood that such systems have a plateau in the density of vibrational modes extending down to a frequency scale ω*. This frequency is controlled by ΔZ = 〈Z〉 − 2d, the difference between the average coordination of the spheres and twice the spatial dimension, d, of the system, which vanishes at the jamming transition. In the presence of a free surface we find that there is a density of disordered vibrational modes associated with the surface that extends far below ω*. The total number of these low-frequency surface modes is controlled by ΔZ, and the profile of their decay into the bulk has two characteristic length scales, which diverge as ΔZ−1/2 and ΔZ−1 as the jamming transition is approached.},
author = {Sussman, Daniel M. and Goodrich, Carl Peter and Liu, Andrea J. and Nagel, Sidney R.},
issn = {1744-683X},
journal = {Soft Matter},
number = {14},
pages = {2745--2751},
publisher = {Royal Society of Chemistry},
title = {{Disordered surface vibrations in jammed sphere packings}},
doi = {10.1039/c4sm02905d},
volume = {11},
year = {2015},
}
@article{7767,
abstract = {We present a model of soft active particles that leads to a rich array of collective behavior found also in dense biological swarms of bacteria and other unicellular organisms. Our model uses only local interactions, such as Vicsek-type nearest-neighbor alignment, short-range repulsion, and a local boundary term. Changing the relative strength of these interactions leads to migrating swarms, rotating swarms, and jammed swarms, as well as swarms that exhibit run-and-tumble motion, alternating between migration and either rotating or jammed states. Interestingly, although a migrating swarm moves slower than an individual particle, the diffusion constant can be up to three orders of magnitude larger, suggesting that collective motion can be highly advantageous, for example, when searching for food.},
author = {van Drongelen, Ruben and Pal, Anshuman and Goodrich, Carl Peter and Idema, Timon},
issn = {1539-3755},
journal = {Physical Review E},
number = {3},
publisher = {American Physical Society},
title = {{Collective dynamics of soft active particles}},
doi = {10.1103/physreve.91.032706},
volume = {91},
year = {2015},
}
@inproceedings{777,
abstract = {In many applications, the data is of rich structure that can be represented by a hypergraph, where the data items are represented by vertices and the associations among items are represented by hyperedges. Equivalently, we are given an input bipartite graph with two types of vertices: items, and associations (which we refer to as topics). We consider the problem of partitioning the set of items into a given number of components such that the maximum number of topics covered by a component is minimized. This is a clustering problem with various applications, e.g. partitioning of a set of information objects such as documents, images, and videos, and load balancing in the context of modern computation platforms.Inthis paper, we focus on the streaming computation model for this problem, in which items arrive online one at a time and each item must be assigned irrevocably to a component at its arrival time. Motivated by scalability requirements, we focus on the class of streaming computation algorithms with memory limited to be at most linear in the number of components. We show that a greedy assignment strategy is able to recover a hidden co-clustering of items under a natural set of recovery conditions. We also report results of an extensive empirical evaluation, which demonstrate that this greedy strategy yields superior performance when compared with alternative approaches.},
author = {Alistarh, Dan-Adrian and Iglesias, Jennifer and Vojnović, Milan},
pages = {1900 -- 1908},
publisher = {Neural Information Processing Systems},
title = {{Streaming min-max hypergraph partitioning}},
volume = {2015-January},
year = {2015},
}
@unpublished{7779,
abstract = {The fact that a disordered material is not constrained in its properties in
the same way as a crystal presents significant and yet largely untapped
potential for novel material design. However, unlike their crystalline
counterparts, disordered solids are not well understood. One of the primary
obstacles is the lack of a theoretical framework for thinking about disorder
and its relation to mechanical properties. To this end, we study an idealized
system of frictionless athermal soft spheres that, when compressed, undergoes a
jamming phase transition with diverging length scales and clean power-law
signatures. This critical point is the cornerstone of a much larger "jamming
scenario" that has the potential to provide the essential theoretical
foundation necessary for a unified understanding of the mechanics of disordered
solids. We begin by showing that jammed sphere packings have a valid linear
regime despite the presence of "contact nonlinearities." We then investigate
the critical nature of the transition, focusing on diverging length scales and
finite-size effects. Next, we argue that jamming plays the same role for
disordered solids as the perfect crystal plays for crystalline solids. Not only
can it be considered an idealized starting point for understanding disordered
materials, but it can even influence systems that have a relatively high amount
of crystalline order. The behavior of solids can thus be thought of as existing
on a spectrum, with the perfect crystal and the jamming transition at opposing
ends. Finally, we introduce a new principle wherein the contribution of an
individual bond to one global property is independent of its contribution to
another. This principle allows the different global responses of a disordered
system to be manipulated independently and provides a great deal of flexibility
in designing materials with unique, textured and tunable properties.},
author = {Goodrich, Carl Peter},
booktitle = {arXiv:1510.08820},
pages = {242},
title = {{Unearthing the anticrystal: Criticality in the linear response of disordered solids}},
year = {2015},
}
@inproceedings{778,
abstract = {Several Hybrid Transactional Memory (HyTM) schemes have recently been proposed to complement the fast, but best-effort nature of Hardware Transactional Memory (HTM) with a slow, reliable software backup. However, the costs of providing concurrency between hardware and software transactions in HyTM are still not well understood. In this paper, we propose a general model for HyTM implementations, which captures the ability of hardware transactions to buffer memory accesses. The model allows us to formally quantify and analyze the amount of overhead (instrumentation) caused by the potential presence of software transactions.We prove that (1) it is impossible to build a strictly serializable HyTM implementation that has both uninstrumented reads and writes, even for very weak progress guarantees, and (2) the instrumentation cost incurred by a hardware transaction in any progressive opaque HyTM is linear in the size of the transaction’s data set.We further describe two implementations which exhibit optimal instrumentation costs for two different progress conditions. In sum, this paper proposes the first formal HyTM model and captures for the first time the trade-off between the degree of hardware-software TM concurrency and the amount of instrumentation overhead.},
author = {Alistarh, Dan-Adrian and Kopinsky, Justin and Kuznetsov, Petr and Ravi, Srivatsan and Shavit, Nir},
pages = {185 -- 199},
publisher = {Springer},
title = {{Inherent limitations of hybrid transactional memory}},
doi = {10.1007/978-3-662-48653-5_13},
volume = {9363},
year = {2015},
}
@inproceedings{779,
abstract = {The concurrent memory reclamation problem is that of devising a way for a deallocating thread to verify that no other concurrent threads hold references to a memory block being deallocated. To date, in the absence of automatic garbage collection, there is no satisfactory solution to this problem; existing tracking methods like hazard pointers, reference counters, or epoch-based techniques like RCU, are either prohibitively expensive or require significant programming expertise, to the extent that implementing them efficiently can be worthy of a publication. None of the existing techniques are automatic or even semi-automated. In this paper, we take a new approach to concurrent memory reclamation: instead of manually tracking access to memory locations as done in techniques like hazard pointers, or restricting shared accesses to specific epoch boundaries as in RCU, our algorithm, called ThreadScan, leverages operating system signaling to automatically detect which memory locations are being accessed by concurrent threads. Initial empirical evidence shows that ThreadScan scales surprisingly well and requires negligible programming effort beyond the standard use of Malloc and Free.},
author = {Alistarh, Dan-Adrian and Matveev, Alexander and Leiserson, William and Shavit, Nir},
pages = {123 -- 132},
publisher = {ACM},
title = {{ThreadScan: Automatic and scalable memory reclamation}},
doi = {10.1145/2755573.2755600},
volume = {2015-June},
year = {2015},
}
@unpublished{8183,
abstract = {We study conditions under which a finite simplicial complex $K$ can be mapped to $\mathbb R^d$ without higher-multiplicity intersections. An almost $r$-embedding is a map $f: K\to \mathbb R^d$ such that the images of any $r$
pairwise disjoint simplices of $K$ do not have a common point. We show that if $r$ is not a prime power and $d\geq 2r+1$, then there is a counterexample to the topological Tverberg conjecture, i.e., there is an almost $r$-embedding of
the $(d+1)(r-1)$-simplex in $\mathbb R^d$. This improves on previous constructions of counterexamples (for $d\geq 3r$) based on a series of papers by M. \"Ozaydin, M. Gromov, P. Blagojevi\'c, F. Frick, G. Ziegler, and the second and fourth present authors. The counterexamples are obtained by proving the following algebraic criterion in codimension 2: If $r\ge3$ and if $K$ is a finite $2(r-1)$-complex then there exists an almost $r$-embedding $K\to \mathbb R^{2r}$ if and only if there exists a general position PL map $f:K\to \mathbb R^{2r}$ such that the algebraic intersection number of the $f$-images of any $r$ pairwise disjoint simplices of $K$ is zero. This result can be restated in terms of cohomological obstructions or equivariant maps, and extends an analogous codimension 3 criterion by the second and fourth authors. As another application we classify ornaments $f:S^3 \sqcup S^3\sqcup S^3\to \mathbb R^5$ up to ornament
concordance. It follows from work of M. Freedman, V. Krushkal and P. Teichner that the analogous criterion for $r=2$ is false. We prove a lemma on singular higher-dimensional Borromean rings, yielding an elementary proof of the counterexample.},
author = {Avvakumov, Sergey and Mabillard, Isaac and Skopenkov, A. and Wagner, Uli},
booktitle = {arXiv},
title = {{Eliminating higher-multiplicity intersections, III. Codimension 2}},
year = {2015},
}
@article{8242,
author = {Einhorn, Lukas and Fazekas, Judit and Muhr, Martina and Schoos, Alexandra and Oida, Kumiko and Singer, Josef and Panakova, Lucia and Manzano-Szalai, Krisztina and Jensen-Jarolim, Erika},
issn = {0091-6749},
journal = {Journal of Allergy and Clinical Immunology},
number = {2},
publisher = {Elsevier},
title = {{Generation of recombinant FcεRIα of dog, cat and horse for component-resolved allergy diagnosis in veterinary patients}},
doi = {10.1016/j.jaci.2014.12.1263},
volume = {135},
year = {2015},
}
@article{1602,
abstract = {Interprocedural analysis is at the heart of numerous applications in programming languages, such as alias analysis, constant propagation, etc. Recursive state machines (RSMs) are standard models for interprocedural analysis. We consider a general framework with RSMs where the transitions are labeled from a semiring, and path properties are algebraic with semiring operations. RSMs with algebraic path properties can model interprocedural dataflow analysis problems, the shortest path problem, the most probable path problem, etc. The traditional algorithms for interprocedural analysis focus on path properties where the starting point is fixed as the entry point of a specific method. In this work, we consider possible multiple queries as required in many applications such as in alias analysis. The study of multiple queries allows us to bring in a very important algorithmic distinction between the resource usage of the one-time preprocessing vs for each individual query. The second aspect that we consider is that the control flow graphs for most programs have constant treewidth. Our main contributions are simple and implementable algorithms that supportmultiple queries for algebraic path properties for RSMs that have constant treewidth. Our theoretical results show that our algorithms have small additional one-time preprocessing, but can answer subsequent queries significantly faster as compared to the current best-known solutions for several important problems, such as interprocedural reachability and shortest path. We provide a prototype implementation for interprocedural reachability and intraprocedural shortest path that gives a significant speed-up on several benchmarks.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas and Goyal, Prateesh},
journal = {ACM SIGPLAN Notices},
location = {Mumbai, India},
number = {1},
pages = {97 -- 109},
publisher = {ACM},
title = {{Faster algorithms for algebraic path properties in recursive state machines with constant treewidth}},
doi = {10.1145/2676726.2676979},
volume = {50},
year = {2015},
}
@article{1604,
abstract = {We consider the quantitative analysis problem for interprocedural control-flow graphs (ICFGs). The input consists of an ICFG, a positive weight function that assigns every transition a positive integer-valued number, and a labelling of the transitions (events) as good, bad, and neutral events. The weight function assigns to each transition a numerical value that represents ameasure of how good or bad an event is. The quantitative analysis problem asks whether there is a run of the ICFG where the ratio of the sum of the numerical weights of good events versus the sum of weights of bad events in the long-run is at least a given threshold (or equivalently, to compute the maximal ratio among all valid paths in the ICFG). The quantitative analysis problem for ICFGs can be solved in polynomial time, and we present an efficient and practical algorithm for the problem. We show that several problems relevant for static program analysis, such as estimating the worst-case execution time of a program or the average energy consumption of a mobile application, can be modeled in our framework. We have implemented our algorithm as a tool in the Java Soot framework. We demonstrate the effectiveness of our approach with two case studies. First, we show that our framework provides a sound approach (no false positives) for the analysis of inefficiently-used containers. Second, we show that our approach can also be used for static profiling of programs which reasons about methods that are frequently invoked. Our experimental results show that our tool scales to relatively large benchmarks, and discovers relevant and useful information that can be used to optimize performance of the programs.},
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Velner, Yaron},
isbn = {978-1-4503-3300-9},
journal = {Proceedings of the 42nd Annual ACM SIGPLAN-SIGACT },
location = {Mumbai, India},
number = {1},
pages = {539 -- 551},
publisher = {ACM},
title = {{Quantitative interprocedural analysis}},
doi = {10.1145/2676726.2676968},
volume = {50},
year = {2015},
}
@inproceedings{1607,
abstract = {We consider the core algorithmic problems related to verification of systems with respect to three classical quantitative properties, namely, the mean-payoff property, the ratio property, and the minimum initial credit for energy property. The algorithmic problem given a graph and a quantitative property asks to compute the optimal value (the infimum value over all traces) from every node of the graph. We consider graphs with constant treewidth, and it is well-known that the control-flow graphs of most programs have constant treewidth. Let n denote the number of nodes of a graph, m the number of edges (for constant treewidth graphs m=O(n)) and W the largest absolute value of the weights. Our main theoretical results are as follows. First, for constant treewidth graphs we present an algorithm that approximates the mean-payoff value within a multiplicative factor of ϵ in time O(n⋅log(n/ϵ)) and linear space, as compared to the classical algorithms that require quadratic time. Second, for the ratio property we present an algorithm that for constant treewidth graphs works in time O(n⋅log(|a⋅b|))=O(n⋅log(n⋅W)), when the output is ab, as compared to the previously best known algorithm with running time O(n2⋅log(n⋅W)). Third, for the minimum initial credit problem we show that (i) for general graphs the problem can be solved in O(n2⋅m) time and the associated decision problem can be solved in O(n⋅m) time, improving the previous known O(n3⋅m⋅log(n⋅W)) and O(n2⋅m) bounds, respectively; and (ii) for constant treewidth graphs we present an algorithm that requires O(n⋅logn) time, improving the previous known O(n4⋅log(n⋅W)) bound. We have implemented some of our algorithms and show that they present a significant speedup on standard benchmarks.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
location = {San Francisco, CA, USA},
pages = {140 -- 157},
publisher = {Springer},
title = {{Faster algorithms for quantitative verification in constant treewidth graphs}},
doi = {10.1007/978-3-319-21690-4_9},
volume = {9206},
year = {2015},
}