@inproceedings{782,
abstract = {In this work, we consider the following random process, mo- Tivated by the analysis of lock-free concurrent algorithms under high memory contention. In each round, a new scheduling step is allocated to one of n threads, according to a distribution p = (p1; p2; : : : ; pn), where thread i is scheduled with probability pi. When some thread first reaches a set threshold of executed steps, it registers a win, completing its current operation, and resets its step count to 1. At the same time, threads whose step count was close to the threshold also get reset because of the win, but to 0 steps, being penalized for almost winning. We are interested in two questions: how often does some thread complete an operation (system latency), and how often does a specific thread complete an operation (individual latency)? We provide asymptotically tight bounds for the system and individual latency of this general concurrency pattern, for arbitrary scheduling distributions p. Surprisingly, a sim- ple characterization exists: in expectation, the system will complete a new operation every Θ(1/p 2) steps, while thread i will complete a new operation every Θ(1/2=p i ) steps. The proof is interesting in its own right, as it requires a careful analysis of how the higher norms of the vector p inuence the thread step counts and latencies in this random process. Our result offers a simple connection between the scheduling distribution and the average performance of concurrent algorithms, which has several applications.},
author = {Alistarh, Dan and Sauerwald, Thomas and Vojnović, Milan},
pages = {251 -- 260},
publisher = {ACM},
title = {{Lock-Free algorithms under stochastic schedulers}},
doi = {10.1145/2767386.2767430},
volume = {2015-July},
year = {2015},
}
@article{814,
abstract = {Human immunodeficiency virus type 1 (HIV-1) assembly proceeds in two stages. First, the 55 kilodalton viral Gag polyprotein assembles into a hexameric protein lattice at the plasma membrane of the infected cell, inducing budding and release of an immature particle. Second, Gag is cleaved by the viral protease, leading to internal rearrangement of the virus into the mature, infectious form. Immature and mature HIV-1 particles are heterogeneous in size and morphology, preventing high-resolution analysis of their protein arrangement in situ by conventional structural biology methods. Here we apply cryo-electron tomography and sub-tomogram averaging methods to resolve the structure of the capsid lattice within intact immature HIV-1 particles at subnanometre resolution, allowing unambiguous positioning of all Î±-helices. The resulting model reveals tertiary and quaternary structural interactions that mediate HIV-1 assembly. Strikingly, these interactions differ from those predicted by the current model based on in vitro-assembled arrays of Gag-derived proteins from Mason-Pfizer monkey virus. To validate this difference, we solve the structure of the capsid lattice within intact immature Mason-Pfizer monkey virus particles. Comparison with the immature HIV-1 structure reveals that retroviral capsid proteins, while having conserved tertiary structures, adopt different quaternary arrangements during virus assembly. The approach demonstrated here should be applicable to determine structures of other proteins at subnanometre resolution within heterogeneous environments.},
author = {Florian Schur and Hagen, Wim J and Rumlová, Michaela and Ruml, Tomáš and Müller B and Kraüsslich, Hans Georg and Briggs, John A},
journal = {Nature},
number = {7535},
pages = {505 -- 508},
publisher = {Nature Publishing Group},
title = {{Structure of the immature HIV-1 capsid in intact virus particles at 8.8 Å resolution}},
doi = {10.1038/nature13838},
volume = {517},
year = {2015},
}
@article{1591,
abstract = {Auxin participates in a multitude of developmental processes, as well as responses to environmental cues. Compared with other plant hormones, auxin exhibits a unique property, as it undergoes directional, cell-to-cell transport facilitated by plasma membrane-localized transport proteins. Among them, a prominent role has been ascribed to the PIN family of auxin efflux facilitators. PIN proteins direct polar auxin transport on account of their asymmetric subcellular localizations. In this review, we provide an overview of the multiple developmental roles of PIN proteins, including the atypical endoplasmic reticulum-localized members of the family, and look at the family from an evolutionary perspective. Next, we cover the cell biological and molecular aspects of PIN function, in particular the establishment of their polar subcellular localization. Hormonal and environmental inputs into the regulation of PIN action are summarized as well.},
author = {Adamowski, Maciek and Friml, Jirí},
journal = {Plant Cell},
number = {1},
pages = {20 -- 32},
publisher = {American Society of Plant Biologists},
title = {{PIN-dependent auxin transport: Action, regulation, and evolution}},
doi = {10.1105/tpc.114.134874},
volume = {27},
year = {2015},
}
@inproceedings{1510,
abstract = {The concept of well group in a special but important case captures homological properties of the zero set of a continuous map f from K to R^n on a compact space K that are invariant with respect to perturbations of f. The perturbations are arbitrary continuous maps within L_infty distance r from f for a given r > 0. The main drawback of the approach is that the computability of well groups was shown only when dim K = n or n = 1. Our contribution to the theory of well groups is twofold: on the one hand we improve on the computability issue, but on the other hand we present a range of examples where the well groups are incomplete invariants, that is, fail to capture certain important robust properties of the zero set. For the first part, we identify a computable subgroup of the well group that is obtained by cap product with the pullback of the orientation of R^n by f. In other words, well groups can be algorithmically approximated from below. When f is smooth and dim K < 2n-2, our approximation of the (dim K-n)th well group is exact. For the second part, we find examples of maps f, f' from K to R^n with all well groups isomorphic but whose perturbations have different zero sets. We discuss on a possible replacement of the well groups of vector valued maps by an invariant of a better descriptive power and computability status. },
author = {Franek, Peter and Krcál, Marek},
location = {Eindhoven, Netherlands},
pages = {842 -- 856},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{On computability and triviality of well groups}},
doi = {10.4230/LIPIcs.SOCG.2015.842},
volume = {34},
year = {2015},
}
@misc{5430,
abstract = {We consider the core algorithmic problems related to verification of systems with respect to three classical quantitative properties, namely, the mean- payoff property, the ratio property, and the minimum initial credit for energy property. The algorithmic problem given a graph and a quantitative property asks to compute the optimal value (the infimum value over all traces) from every node of the graph. We consider graphs with constant treewidth, and it is well-known that the control-flow graphs of most programs have constant treewidth. Let n denote the number of nodes of a graph, m the number of edges (for constant treewidth graphs m = O ( n ) ) and W the largest absolute value of the weights. Our main theoretical results are as follows. First, for constant treewidth graphs we present an algorithm that approximates the mean-payoff value within a mul- tiplicative factor of ∊ in time O ( n · log( n/∊ )) and linear space, as compared to the classical algorithms that require quadratic time. Second, for the ratio property we present an algorithm that for constant treewidth graphs works in time O ( n · log( | a · b · n | )) = O ( n · log( n · W )) , when the output is a b , as compared to the previously best known algorithm with running time O ( n 2 · log( n · W )) . Third, for the minimum initial credit problem we show that (i) for general graphs the problem can be solved in O ( n 2 · m ) time and the associated decision problem can be solved in O ( n · m ) time, improving the previous known O ( n 3 · m · log( n · W )) and O ( n 2 · m ) bounds, respectively; and (ii) for constant treewidth graphs we present an algorithm that requires O ( n · log n ) time, improving the previous known O ( n 4 · log( n · W )) bound. We have implemented some of our algorithms and show that they present a significant speedup on standard benchmarks.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
issn = {2664-1690},
pages = {31},
publisher = {IST Austria},
title = {{Faster algorithms for quantitative verification in constant treewidth graphs}},
doi = {10.15479/AT:IST-2015-319-v1-1},
year = {2015},
}
@inproceedings{1541,
abstract = {We present XSpeed a parallel state-space exploration algorithm for continuous systems with linear dynamics and nondeterministic inputs. The motivation of having parallel algorithms is to exploit the computational power of multi-core processors to speed-up performance. The parallelization is achieved on two fronts. First, we propose a parallel implementation of the support function algorithm by sampling functions in parallel. Second, we propose a parallel state-space exploration by slicing the time horizon and computing the reachable states in the time slices in parallel. The second method can be however applied only to a class of linear systems with invertible dynamics and fixed input. A GP-GPU implementation is also presented following a lazy evaluation strategy on support functions. The parallel algorithms are implemented in the tool XSpeed. We evaluated the performance on two benchmarks including an 28 dimension Helicopter model. Comparison with the sequential counterpart shows a maximum speed-up of almost 7× on a 6 core, 12 thread Intel Xeon CPU E5-2420 processor. Our GP-GPU implementation shows a maximum speed-up of 12× over the sequential implementation and 53× over SpaceEx (LGG scenario), the state of the art tool for reachability analysis of linear hybrid systems. Experiments illustrate that our parallel algorithm with time slicing not only speeds-up performance but also improves precision.},
author = {Ray, Rajarshi and Gurung, Amit and Das, Binayak and Bartocci, Ezio and Bogomolov, Sergiy and Grosu, Radu},
location = {Haifa, Israel},
pages = {3 -- 18},
publisher = {Springer},
title = {{XSpeed: Accelerating reachability analysis on multi-core processors}},
doi = {10.1007/978-3-319-26287-1_1},
volume = {9434},
year = {2015},
}
@article{1553,
abstract = {Cell movement has essential functions in development, immunity, and cancer. Various cell migration patterns have been reported, but no general rule has emerged so far. Here, we show on the basis of experimental data in vitro and in vivo that cell persistence, which quantifies the straightness of trajectories, is robustly coupled to cell migration speed. We suggest that this universal coupling constitutes a generic law of cell migration, which originates in the advection of polarity cues by an actin cytoskeleton undergoing flows at the cellular scale. Our analysis relies on a theoretical model that we validate by measuring the persistence of cells upon modulation of actin flow speeds and upon optogenetic manipulation of the binding of an actin regulator to actin filaments. Beyond the quantitative prediction of the coupling, the model yields a generic phase diagram of cellular trajectories, which recapitulates the full range of observed migration patterns.},
author = {Maiuri, Paolo and Rupprecht, Jean and Wieser, Stefan and Ruprecht, Verena and Bénichou, Olivier and Carpi, Nicolas and Coppey, Mathieu and De Beco, Simon and Gov, Nir and Heisenberg, Carl-Philipp J and Lage Crespo, Carolina and Lautenschlaeger, Franziska and Le Berre, Maël and Lennon Duménil, Ana and Raab, Matthew and Thiam, Hawa and Piel, Matthieu and Sixt, Michael K and Voituriez, Raphaël},
journal = {Cell},
number = {2},
pages = {374 -- 386},
publisher = {Cell Press},
title = {{Actin flows mediate a universal coupling between cell speed and cell persistence}},
doi = {10.1016/j.cell.2015.01.056},
volume = {161},
year = {2015},
}
@article{1539,
abstract = {Many stochastic models of biochemical reaction networks contain some chemical species for which the number of molecules that are present in the system can only be finite (for instance due to conservation laws), but also other species that can be present in arbitrarily large amounts. The prime example of such networks are models of gene expression, which typically contain a small and finite number of possible states for the promoter but an infinite number of possible states for the amount of mRNA and protein. One of the main approaches to analyze such models is through the use of equations for the time evolution of moments of the chemical species. Recently, a new approach based on conditional moments of the species with infinite state space given all the different possible states of the finite species has been proposed. It was argued that this approach allows one to capture more details about the full underlying probability distribution with a smaller number of equations. Here, I show that the result that less moments provide more information can only stem from an unnecessarily complicated description of the system in the classical formulation. The foundation of this argument will be the derivation of moment equations that describe the complete probability distribution over the finite state space but only low-order moments over the infinite state space. I will show that the number of equations that is needed is always less than what was previously claimed and always less than the number of conditional moment equations up to the same order. To support these arguments, a symbolic algorithm is provided that can be used to derive minimal systems of unconditional moment equations for models with partially finite state space. },
author = {Ruess, Jakob},
journal = {Journal of Chemical Physics},
number = {24},
publisher = {American Institute of Physics},
title = {{Minimal moment equations for stochastic models of biochemical reaction networks with partially finite state space}},
doi = {10.1063/1.4937937},
volume = {143},
year = {2015},
}
@inproceedings{1609,
abstract = {The synthesis problem asks for the automatic construction of a system from its specification. In the traditional setting, the system is “constructed from scratch” rather than composed from reusable components. However, this is rare in practice, and almost every non-trivial software system relies heavily on the use of libraries of reusable components. Recently, Lustig and Vardi introduced dataflow and controlflow synthesis from libraries of reusable components. They proved that dataflow synthesis is undecidable, while controlflow synthesis is decidable. The problem of controlflow synthesis from libraries of probabilistic components was considered by Nain, Lustig and Vardi, and was shown to be decidable for qualitative analysis (that asks that the specification be satisfied with probability 1). Our main contribution for controlflow synthesis from probabilistic components is to establish better complexity bounds for the qualitative analysis problem, and to show that the more general quantitative problem is undecidable. For the qualitative analysis, we show that the problem (i) is EXPTIME-complete when the specification is given as a deterministic parity word automaton, improving the previously known 2EXPTIME upper bound; and (ii) belongs to UP ∩ coUP and is parity-games hard, when the specification is given directly as a parity condition on the components, improving the previously known EXPTIME upper bound.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Vardi, Moshe},
location = {Kyoto, Japan},
pages = {108 -- 120},
publisher = {Springer},
title = {{The complexity of synthesis from probabilistic components}},
doi = {10.1007/978-3-662-47666-6_9},
volume = {9135},
year = {2015},
}
@inproceedings{1692,
abstract = {Computing an approximation of the reachable states of a hybrid system is a challenge, mainly because overapproximating the solutions of ODEs with a finite number of sets does not scale well. Using template polyhedra can greatly reduce the computational complexity, since it replaces complex operations on sets with a small number of optimization problems. However, the use of templates may make the over-approximation too conservative. Spurious transitions, which are falsely considered reachable, are particularly detrimental to performance and accuracy, and may exacerbate the state explosion problem. In this paper, we examine how spurious transitions can be avoided with minimal computational effort. To this end, detecting spurious transitions is reduced to the well-known problem of showing that two convex sets are disjoint by finding a hyperplane that separates them. We generalize this to owpipes by considering hyperplanes that evolve with time in correspondence to the dynamics of the system. The approach is implemented in the model checker SpaceEx and demonstrated on examples.},
author = {Frehse, Goran and Bogomolov, Sergiy and Greitschus, Marius and Strump, Thomas and Podelski, Andreas},
booktitle = {Proceedings of the 18th International Conference on Hybrid Systems: Computation and Control},
isbn = {978-1-4503-3433-4},
location = {Seattle, WA, United States},
pages = {149 -- 158},
publisher = {ACM},
title = {{Eliminating spurious transitions in reachability with support functions}},
doi = {10.1145/2728606.2728622},
year = {2015},
}
@article{1697,
abstract = {Motion tracking is a challenge the visual system has to solve by reading out the retinal population. It is still unclear how the information from different neurons can be combined together to estimate the position of an object. Here we recorded a large population of ganglion cells in a dense patch of salamander and guinea pig retinas while displaying a bar moving diffusively. We show that the bar’s position can be reconstructed from retinal activity with a precision in the hyperacuity regime using a linear decoder acting on 100+ cells. We then took advantage of this unprecedented precision to explore the spatial structure of the retina’s population code. The classical view would have suggested that the firing rates of the cells form a moving hill of activity tracking the bar’s position. Instead, we found that most ganglion cells in the salamander fired sparsely and idiosyncratically, so that their neural image did not track the bar. Furthermore, ganglion cell activity spanned an area much larger than predicted by their receptive fields, with cells coding for motion far in their surround. As a result, population redundancy was high, and we could find multiple, disjoint subsets of neurons that encoded the trajectory with high precision. This organization allows for diverse collections of ganglion cells to represent high-accuracy motion information in a form easily read out by downstream neural circuits.},
author = {Marre, Olivier and Botella Soler, Vicente and Simmons, Kristina and Mora, Thierry and Tkacik, Gasper and Berry, Michael},
journal = {PLoS Computational Biology},
number = {7},
publisher = {Public Library of Science},
title = {{High accuracy decoding of dynamical motion from a large retinal population}},
doi = {10.1371/journal.pcbi.1004304},
volume = {11},
year = {2015},
}
@article{1673,
abstract = {When a new mutant arises in a population, there is a probability it outcompetes the residents and fixes. The structure of the population can affect this fixation probability. Suppressing population structures reduce the difference between two competing variants, while amplifying population structures enhance the difference. Suppressors are ubiquitous and easy to construct, but amplifiers for the large population limit are more elusive and only a few examples have been discovered. Whether or not a population structure is an amplifier of selection depends on the probability distribution for the placement of the invading mutant. First, we prove that there exist only bounded amplifiers for adversarial placement-that is, for arbitrary initial conditions. Next, we show that the Star population structure, which is known to amplify for mutants placed uniformly at random, does not amplify for mutants that arise through reproduction and are therefore placed proportional to the temperatures of the vertices. Finally, we construct population structures that amplify for all mutational events that arise through reproduction, uniformly at random, or through some combination of the two. },
author = {Adlam, Ben and Chatterjee, Krishnendu and Nowak, Martin},
journal = {Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences},
number = {2181},
publisher = {Royal Society of London},
title = {{Amplifiers of selection}},
doi = {10.1098/rspa.2015.0114},
volume = {471},
year = {2015},
}
@inproceedings{1630,
abstract = {We present a method to learn and propagate shape placements in 2D polygonal scenes from a few examples provided by a user. The placement of a shape is modeled as an oriented bounding box. Simple geometric relationships between this bounding box and nearby scene polygons define a feature set for the placement. The feature sets of all example placements are then used to learn a probabilistic model over all possible placements and scenes. With this model, we can generate a new set of placements with similar geometric relationships in any given scene. We introduce extensions that enable propagation and generation of shapes in 3D scenes, as well as the application of a learned modeling session to large scenes without additional user interaction. These concepts allow us to generate complex scenes with thousands of objects with relatively little user interaction.},
author = {Guerrero, Paul and Jeschke, Stefan and Wimmer, Michael and Wonka, Peter},
location = {Los Angeles, CA, United States},
number = {4},
publisher = {ACM},
title = {{Learning shape placements by example}},
doi = {10.1145/2766933},
volume = {34},
year = {2015},
}
@article{1678,
abstract = {High-throughput live-cell screens are intricate elements of systems biology studies and drug discovery pipelines. Here, we demonstrate an optogenetics-assisted method that avoids the need for chemical activators and reporters, reduces the number of operational steps and increases information content in a cell-based small-molecule screen against human protein kinases, including an orphan receptor tyrosine kinase. This blueprint for all-optical screening can be adapted to many drug targets and cellular processes.},
author = {Inglés Prieto, Álvaro and Gschaider-Reichhart, Eva and Muellner, Markus and Nowak, Matthias and Nijman, Sebastian and Grusch, Michael and Janovjak, Harald L},
journal = {Nature Chemical Biology},
number = {12},
pages = {952 -- 954},
publisher = {Nature Publishing Group},
title = {{Light-assisted small-molecule screening against protein kinases}},
doi = {10.1038/nchembio.1933},
volume = {11},
year = {2015},
}
@article{1680,
abstract = {We consider the satisfiability problem for modal logic over first-order definable classes of frames.We confirm the conjecture from Hemaspaandra and Schnoor [2008] that modal logic is decidable over classes definable by universal Horn formulae. We provide a full classification of Horn formulae with respect to the complexity of the corresponding satisfiability problem. It turns out, that except for the trivial case of inconsistent formulae, local satisfiability is eitherNP-complete or PSPACE-complete, and global satisfiability is NP-complete, PSPACE-complete, or ExpTime-complete. We also show that the finite satisfiability problem for modal logic over Horn definable classes of frames is decidable. On the negative side, we show undecidability of two related problems. First, we exhibit a simple universal three-variable formula defining the class of frames over which modal logic is undecidable. Second, we consider the satisfiability problem of bimodal logic over Horn definable classes of frames, and also present a formula leading to undecidability.},
author = {Michaliszyn, Jakub and Otop, Jan and Kieroňski, Emanuel},
journal = {ACM Transactions on Computational Logic},
number = {1},
publisher = {ACM},
title = {{On the decidability of elementary modal logics}},
doi = {10.1145/2817825},
volume = {17},
year = {2015},
}
@article{1856,
abstract = {The traditional synthesis question given a specification asks for the automatic construction of a system that satisfies the specification, whereas often there exists a preference order among the different systems that satisfy the given specification. Under a probabilistic assumption about the possible inputs, such a preference order is naturally expressed by a weighted automaton, which assigns to each word a value, such that a system is preferred if it generates a higher expected value. We solve the following optimal synthesis problem: given an omega-regular specification, a Markov chain that describes the distribution of inputs, and a weighted automaton that measures how well a system satisfies the given specification under the input assumption, synthesize a system that optimizes the measured value. For safety specifications and quantitative measures that are defined by mean-payoff automata, the optimal synthesis problem reduces to finding a strategy in a Markov decision process (MDP) that is optimal for a long-run average reward objective, which can be achieved in polynomial time. For general omega-regular specifications along with mean-payoff automata, the solution rests on a new, polynomial-time algorithm for computing optimal strategies in MDPs with mean-payoff parity objectives. Our algorithm constructs optimal strategies that consist of two memoryless strategies and a counter. The counter is in general not bounded. To obtain a finite-state system, we show how to construct an ε-optimal strategy with a bounded counter, for all ε > 0. Furthermore, we show how to decide in polynomial time if it is possible to construct an optimal finite-state system (i.e., a system without a counter) for a given specification. We have implemented our approach and the underlying algorithms in a tool that takes qualitative and quantitative specifications and automatically constructs a system that satisfies the qualitative specification and optimizes the quantitative specification, if such a system exists. We present some experimental results showing optimal systems that were automatically generated in this way.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Singh, Rohit},
journal = {Journal of the ACM},
number = {1},
publisher = {ACM},
title = {{Measuring and synthesizing systems in probabilistic environments}},
doi = {10.1145/2699430},
volume = {62},
year = {2015},
}
@inproceedings{1882,
abstract = {We provide a framework for compositional and iterative design and verification of systems with quantitative information, such as rewards, time or energy. It is based on disjunctive modal transition systems where we allow actions to bear various types of quantitative information. Throughout the design process the actions can be further refined and the information made more precise. We show how to compute the results of standard operations on the systems, including the quotient (residual), which has not been previously considered for quantitative non-deterministic systems. Our quantitative framework has close connections to the modal nu-calculus and is compositional with respect to general notions of distances between systems and the standard operations.},
author = {Fahrenberg, Uli and Kretinsky, Jan and Legay, Axel and Traonouez, Louis},
location = {Bertinoro, Italy},
pages = {306 -- 324},
publisher = {Springer},
title = {{Compositionality for quantitative specifications}},
doi = {10.1007/978-3-319-15317-9_19},
volume = {8997},
year = {2015},
}
@inproceedings{1729,
abstract = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of such sequences produced under a non-preemptive scheduler. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and rules for inserting synchronization. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient, and, since it does not require explicit specifications, is more practical than the conventional approach based on user-provided assertions.},
author = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten},
location = {San Francisco, CA, United States},
pages = {180 -- 197},
publisher = {Springer},
title = {{From non-preemptive to preemptive scheduling using synchronization synthesis}},
doi = {10.1007/978-3-319-21668-3_11},
volume = {9207},
year = {2015},
}
@article{1731,
abstract = {We consider two-player zero-sum games on graphs. These games can be classified on the basis of the information of the players and on the mode of interaction between them. On the basis of information the classification is as follows: (a) partial-observation (both players have partial view of the game); (b) one-sided complete-observation (one player has complete observation); and (c) complete-observation (both players have complete view of the game). On the basis of mode of interaction we have the following classification: (a) concurrent (both players interact simultaneously); and (b) turn-based (both players interact in turn). The two sources of randomness in these games are randomness in transition function and randomness in strategies. In general, randomized strategies are more powerful than deterministic strategies, and randomness in transitions gives more general classes of games. In this work we present a complete characterization for the classes of games where randomness is not helpful in: (a) the transition function probabilistic transition can be simulated by deterministic transition); and (b) strategies (pure strategies are as powerful as randomized strategies). As consequence of our characterization we obtain new undecidability results for these games. },
author = {Chatterjee, Krishnendu and Doyen, Laurent and Gimbert, Hugo and Henzinger, Thomas A},
journal = {Information and Computation},
number = {12},
pages = {3 -- 16},
publisher = {Elsevier},
title = {{Randomness for free}},
doi = {10.1016/j.ic.2015.06.003},
volume = {245},
year = {2015},
}
@inproceedings{1661,
abstract = {The computation of the winning set for one-pair Streett objectives and for k-pair Streett objectives in (standard) graphs as well as in game graphs are central problems in computer-aided verification, with application to the verification of closed systems with strong fairness conditions, the verification of open systems, checking interface compatibility, well-formed ness of specifications, and the synthesis of reactive systems. We give faster algorithms for the computation of the winning set for (1) one-pair Streett objectives (aka parity-3 problem) in game graphs and (2) for k-pair Streett objectives in graphs. For both problems this represents the first improvement in asymptotic running time in 15 years.},
author = {Chatterjee, Krishnendu and Henzinger, Monika and Loitzenbauer, Veronika},
booktitle = {Proceedings - Symposium on Logic in Computer Science},
location = {Kyoto, Japan},
publisher = {IEEE},
title = {{Improved algorithms for one-pair and k-pair Streett objectives}},
doi = {10.1109/LICS.2015.34},
volume = {2015-July},
year = {2015},
}
@misc{5435,
abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives.
There have been two different views: (i) the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector.
We consider the problem where the goal is to optimize the expectation under the constraint that the satisfaction semantics is ensured, and thus consider a generalization that unifies the existing semantics. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., ensures certain probabilistic guarantee).
Our main results are algorithms for the decision problem which are always polynomial in the size of the MDP.
We also show that an approximation of the Pareto-curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Finally, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem.},
author = {Chatterjee, Krishnendu and Komarkova, Zuzana and Kretinsky, Jan},
issn = {2664-1690},
pages = {51},
publisher = {IST Austria},
title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}},
doi = {10.15479/AT:IST-2015-318-v2-1},
year = {2015},
}
@article{523,
abstract = {We consider two-player games played on weighted directed graphs with mean-payoff and total-payoff objectives, two classical quantitative objectives. While for single-dimensional games the complexity and memory bounds for both objectives coincide, we show that in contrast to multi-dimensional mean-payoff games that are known to be coNP-complete, multi-dimensional total-payoff games are undecidable. We introduce conservative approximations of these objectives, where the payoff is considered over a local finite window sliding along a play, instead of the whole play. For single dimension, we show that (i) if the window size is polynomial, deciding the winner takes polynomial time, and (ii) the existence of a bounded window can be decided in NP ∩ coNP, and is at least as hard as solving mean-payoff games. For multiple dimensions, we show that (i) the problem with fixed window size is EXPTIME-complete, and (ii) there is no primitive-recursive algorithm to decide the existence of a bounded window.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Randour, Mickael and Raskin, Jean},
journal = {Information and Computation},
number = {6},
pages = {25 -- 52},
publisher = {Elsevier},
title = {{Looking at mean-payoff and total-payoff through windows}},
doi = {10.1016/j.ic.2015.03.010},
volume = {242},
year = {2015},
}
@article{1604,
abstract = {We consider the quantitative analysis problem for interprocedural control-flow graphs (ICFGs). The input consists of an ICFG, a positive weight function that assigns every transition a positive integer-valued number, and a labelling of the transitions (events) as good, bad, and neutral events. The weight function assigns to each transition a numerical value that represents ameasure of how good or bad an event is. The quantitative analysis problem asks whether there is a run of the ICFG where the ratio of the sum of the numerical weights of good events versus the sum of weights of bad events in the long-run is at least a given threshold (or equivalently, to compute the maximal ratio among all valid paths in the ICFG). The quantitative analysis problem for ICFGs can be solved in polynomial time, and we present an efficient and practical algorithm for the problem. We show that several problems relevant for static program analysis, such as estimating the worst-case execution time of a program or the average energy consumption of a mobile application, can be modeled in our framework. We have implemented our algorithm as a tool in the Java Soot framework. We demonstrate the effectiveness of our approach with two case studies. First, we show that our framework provides a sound approach (no false positives) for the analysis of inefficiently-used containers. Second, we show that our approach can also be used for static profiling of programs which reasons about methods that are frequently invoked. Our experimental results show that our tool scales to relatively large benchmarks, and discovers relevant and useful information that can be used to optimize performance of the programs.},
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Velner, Yaron},
isbn = {978-1-4503-3300-9},
journal = {Proceedings of the 42nd Annual ACM SIGPLAN-SIGACT },
location = {Mumbai, India},
number = {1},
pages = {539 -- 551},
publisher = {ACM},
title = {{Quantitative interprocedural analysis}},
doi = {10.1145/2676726.2676968},
volume = {50},
year = {2015},
}
@article{333,
abstract = {We present a hybrid intercalation battery based on a sodium/magnesium (Na/Mg) dual salt electrolyte, metallic magnesium anode, and a cathode based on FeS2 nanocrystals (NCs). Compared to lithium or sodium, metallic magnesium anode is safer due to dendrite-free electroplating and offers extremely high volumetric (3833 mAh cm-3) and gravimetric capacities (2205 mAh g-1). Na-ion cathodes, FeS2 NCs in the present study, may serve as attractive alternatives to Mg-ion cathodes due to the higher voltage of operation and fast, highly reversible insertion of Na-ions. In this proof-of-concept study, electrochemical cycling of the Na/Mg hybrid battery was characterized by high rate capability, high Coulombic efficiency of 99.8%, and high energy density. In particular, with an average discharge voltage of ∼1.1 V and a cathodic capacity of 189 mAh g-1 at a current of 200 mA g-1, the presented Mg/FeS2 hybrid battery delivers energy densities of up to 210 Wh kg-1, comparable to commercial Li-ion batteries and approximately twice as high as state-of-the-art Mg-ion batteries based on Mo6S8 cathodes. Further significant gains in the energy density are expected from the development of Na/Mg electrolytes with a broader electrochemical stability window. Fully based on Earth-abundant elements, hybrid Na-Mg batteries are highly promising for large-scale stationary energy storage. },
author = {Walter, Marc and Kravchyk, Kostiantyn and Ibáñez, Maria and Kovalenko, Maksym},
journal = {Chemistry of Materials},
number = {21},
pages = {7452 -- 7458},
publisher = {ACS},
title = {{Efficient and inexpensive sodium magnesium hybrid battery}},
doi = {10.1021/acs.chemmater.5b03531},
volume = {27},
year = {2015},
}
@article{2006,
abstract = {The monotone secant conjecture posits a rich class of polynomial systems, all of whose solutions are real. These systems come from the Schubert calculus on flag manifolds, and the monotone secant conjecture is a compelling generalization of the Shapiro conjecture for Grassmannians (Theorem of Mukhin, Tarasov, and Varchenko). We present some theoretical evidence for this conjecture, as well as computational evidence obtained by 1.9 teraHertz-years of computing, and we discuss some of the phenomena we observed in our data. },
author = {Hein, Nicolas and Hillar, Christopher and Martin Del Campo Sanchez, Abraham and Sottile, Frank and Teitler, Zach},
journal = {Experimental Mathematics},
number = {3},
pages = {261 -- 269},
publisher = {Taylor & Francis},
title = {{The monotone secant conjecture in the real Schubert calculus}},
doi = {10.1080/10586458.2014.980044},
volume = {24},
year = {2015},
}
@article{1832,
abstract = {Linearizability of concurrent data structures is usually proved by monolithic simulation arguments relying on the identification of the so-called linearization points. Regrettably, such proofs, whether manual or automatic, are often complicated and scale poorly to advanced non-blocking concurrency patterns, such as helping and optimistic updates. In response, we propose a more modular way of checking linearizability of concurrent queue algorithms that does not involve identifying linearization points. We reduce the task of proving linearizability with respect to the queue specification to establishing four basic properties, each of which can be proved independently by simpler arguments. As a demonstration of our approach, we verify the Herlihy and Wing queue, an algorithm that is challenging to verify by a simulation proof. },
author = {Chakraborty, Soham and Henzinger, Thomas A and Sezgin, Ali and Vafeiadis, Viktor},
journal = {Logical Methods in Computer Science},
number = {1},
publisher = {International Federation of Computational Logic},
title = {{Aspect-oriented linearizability proofs}},
doi = {10.2168/LMCS-11(1:20)2015},
volume = {11},
year = {2015},
}
@misc{5442,
abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural properties that arise in program analysis.
We consider that each component of the concurrent system is a graph with constant treewidth, and it is known that the controlflow graphs of most programs have constant treewidth. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis problems (e.g., alias analysis). The study of multiple queries allows us to consider the tradeoff between the resource usage of the \emph{one-time} preprocessing and for \emph{each individual} query. The traditional approaches construct the product graph of all components and apply the best-known graph algorithm on the product. In the traditional approach, even the answer to a single query requires the transitive closure computation (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time.
Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time,
each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results that show that the worst-case running times of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (such as improving
the worst-case bounds for the shortest path problem in general graphs whose current best-known bound has not been improved in five decades). Finally, we provide a prototype implementation of our algorithms which significantly outperforms the existing algorithmic methods on several benchmarks.},
author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3 and Anonymous, 4},
issn = {2664-1690},
pages = {22},
publisher = {IST Austria},
title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}},
year = {2015},
}
@inproceedings{1820,
abstract = {We consider partially observable Markov decision processes (POMDPs) with a set of target states and every transition is associated with an integer cost. The optimization objec- tive we study asks to minimize the expected total cost till the target set is reached, while ensuring that the target set is reached almost-surely (with probability 1). We show that for integer costs approximating the optimal cost is undecidable. For positive costs, our results are as follows: (i) we establish matching lower and upper bounds for the optimal cost and the bound is double exponential; (ii) we show that the problem of approximating the optimal cost is decidable and present ap- proximation algorithms developing on the existing algorithms for POMDPs with finite-horizon objectives. While the worst- case running time of our algorithm is double exponential, we present efficient stopping criteria for the algorithm and show experimentally that it performs well in many examples.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Gupta, Raghav and Kanodia, Ayush},
booktitle = {Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence },
location = {Austin, TX, USA},
pages = {3496--3502},
publisher = {AAAI Press},
title = {{Optimal cost almost-sure reachability in POMDPs}},
volume = {5},
year = {2015},
}
@article{7739,
abstract = {Currently, there is much debate on the genetic architecture of quantitative traits in wild populations. Is trait variation influenced by many genes of small effect or by a few genes of major effect? Where is additive genetic variation located in the genome? Do the same loci cause similar phenotypic variation in different populations? Great tits (Parus major) have been studied extensively in long‐term studies across Europe and consequently are considered an ecological ‘model organism’. Recently, genomic resources have been developed for the great tit, including a custom SNP chip and genetic linkage map. In this study, we used a suite of approaches to investigate the genetic architecture of eight quantitative traits in two long‐term study populations of great tits—one in the Netherlands and the other in the United Kingdom. Overall, we found little evidence for the presence of genes of large effects in either population. Instead, traits appeared to be influenced by many genes of small effect, with conservative estimates of the number of contributing loci ranging from 31 to 310. Despite concordance between population‐specific heritabilities, we found no evidence for the presence of loci having similar effects in both populations. While population‐specific genetic architectures are possible, an undetected shared architecture cannot be rejected because of limited power to map loci of small and moderate effects. This study is one of few examples of genetic architecture analysis in replicated wild populations and highlights some of the challenges and limitations researchers will face when attempting similar molecular quantitative genetic studies in free‐living populations.},
author = {Santure, Anna W. and Poissant, Jocelyn and De Cauwer, Isabelle and van Oers, Kees and Robinson, Matthew Richard and Quinn, John L. and Groenen, Martien A. M. and Visser, Marcel E. and Sheldon, Ben C. and Slate, Jon},
issn = {0962-1083},
journal = {Molecular Ecology},
pages = {6148--6162},
publisher = {Wiley},
title = {{Replicated analysis of the genetic architecture of quantitative traits in two wild great tit populations}},
doi = {10.1111/mec.13452},
volume = {24},
year = {2015},
}
@article{7741,
abstract = {Phenotypes expressed in a social context are not only a function of the individual, but can also be shaped by the phenotypes of social partners. These social effects may play a major role in the evolution of cooperative breeding if social partners differ in the quality of care they provide and if individual carers adjust their effort in relation to that of other carers. When applying social effects models to wild study systems, it is also important to explore sources of individual plasticity that could masquerade as social effects. We studied offspring provisioning rates of parents and helpers in a wild population of long-tailed tits Aegithalos caudatus using a quantitative genetic framework to identify these social effects and partition them into genetic, permanent environment and current environment components. Controlling for other effects, individuals were consistent in their provisioning effort at a given nest, but adjusted their effort based on who was in their social group, indicating the presence of social effects. However, these social effects differed between years and social contexts, indicating a current environment effect, rather than indicating a genetic or permanent environment effect. While this study reveals the importance of examining environmental and genetic sources of social effects, the framework we present is entirely general, enabling a greater understanding of potentially important social effects within any ecological population.},
author = {Adams, Mark James and Robinson, Matthew Richard and Mannarelli, Maria-Elena and Hatchwell, Ben J.},
issn = {0962-8452},
journal = {Proceedings of the Royal Society B: Biological Sciences},
number = {1810},
publisher = {The Royal Society},
title = {{Social genetic and social environment effects on parental and helper care in a cooperatively breeding bird}},
doi = {10.1098/rspb.2015.0689},
volume = {282},
year = {2015},
}
@article{7765,
abstract = {We introduce a principle unique to disordered solids wherein the contribution of any bond to one global perturbation is uncorrelated with its contribution to another. Coupled with sufficient variability in the contributions of different bonds, this “independent bond-level response” paves the way for the design of real materials with unusual and exquisitely tuned properties. To illustrate this, we choose two global perturbations: compression and shear. By applying a bond removal procedure that is both simple and experimentally relevant to remove a very small fraction of bonds, we can drive disordered spring networks to both the incompressible and completely auxetic limits of mechanical behavior.},
author = {Goodrich, Carl Peter and Liu, Andrea J. and Nagel, Sidney R.},
issn = {0031-9007},
journal = {Physical Review Letters},
number = {22},
publisher = {American Physical Society},
title = {{The principle of independent bond-level response: Tuning by pruning to exploit disorder for global behavior}},
doi = {10.1103/physrevlett.114.225501},
volume = {114},
year = {2015},
}
@inproceedings{1647,
abstract = {Round-optimal blind signatures are notoriously hard to construct in the standard model, especially in the malicious-signer model, where blindness must hold under adversarially chosen keys. This is substantiated by several impossibility results. The only construction that can be termed theoretically efficient, by Garg and Gupta (Eurocrypt’14), requires complexity leveraging, inducing an exponential security loss. We present a construction of practically efficient round-optimal blind signatures in the standard model. It is conceptually simple and builds on the recent structure-preserving signatures on equivalence classes (SPSEQ) from Asiacrypt’14. While the traditional notion of blindness follows from standard assumptions, we prove blindness under adversarially chosen keys under an interactive variant of DDH. However, we neither require non-uniform assumptions nor complexity leveraging. We then show how to extend our construction to partially blind signatures and to blind signatures on message vectors, which yield a construction of one-show anonymous credentials à la “anonymous credentials light” (CCS’13) in the standard model. Furthermore, we give the first SPS-EQ construction under noninteractive assumptions and show how SPS-EQ schemes imply conventional structure-preserving signatures, which allows us to apply optimality results for the latter to SPS-EQ.},
author = {Fuchsbauer, Georg and Hanser, Christian and Slamanig, Daniel},
location = {Santa Barbara, CA, United States},
pages = {233 -- 253},
publisher = {Springer},
title = {{Practical round-optimal blind signatures in the standard model}},
doi = {10.1007/978-3-662-48000-7_12},
volume = {9216},
year = {2015},
}
@article{1577,
abstract = {Contrary to the pattern seen in mammalian sex chromosomes, where most Y-linked genes have X-linked homologs, the Drosophila X and Y chromosomes appear to be unrelated. Most of the Y-linked genes have autosomal paralogs, so autosome-to-Y transposition must be the main source of Drosophila Y-linked genes. Here we show how these genes were acquired. We found a previously unidentified gene (flagrante delicto Y, FDY) that originated from a recent duplication of the autosomal gene vig2 to the Y chromosome of Drosophila melanogaster. Four contiguous genes were duplicated along with vig2, but they became pseudogenes through the accumulation of deletions and transposable element insertions, whereas FDY remained functional, acquired testis-specific expression, and now accounts for ∼20% of the vig2-like mRNA in testis. FDY is absent in the closest relatives of D. melanogaster, and DNA sequence divergence indicates that the duplication to the Y chromosome occurred ∼2 million years ago. Thus, FDY provides a snapshot of the early stages of the establishment of a Y-linked gene and demonstrates how the Drosophila Y has been accumulating autosomal genes.},
author = {Carvalho, Antonio and Vicoso, Beatriz and Russo, Claudia and Swenor, Bonnielin and Clark, Andrew},
journal = {PNAS},
number = {40},
pages = {12450 -- 12455},
publisher = {National Academy of Sciences},
title = {{Birth of a new gene on the Y chromosome of Drosophila melanogaster}},
doi = {10.1073/pnas.1516543112},
volume = {112},
year = {2015},
}
@article{1837,
abstract = {Transition to turbulence in straight pipes occurs in spite of the linear stability of the laminar Hagen-Poiseuille flow if both the amplitude of flow perturbations and the Reynolds number Re exceed a minimum threshold (subcritical transition). As the pipe curvature increases, centrifugal effects become important, modifying the basic flow as well as the most unstable linear modes. If the curvature (tube-to-coiling diameter d/D) is sufficiently large, a Hopf bifurcation (supercritical instability) is encountered before turbulence can be excited (subcritical instability). We trace the instability thresholds in the Re - d/D parameter space in the range 0.01 ≤ d/D\ ≤ 0.1 by means of laser-Doppler velocimetry and determine the point where the subcritical and supercritical instabilities meet. Two different experimental set-ups are used: a closed system where the pipe forms an axisymmetric torus and an open system employing a helical pipe. Implications for the measurement of friction factors in curved pipes are discussed.},
author = {Kühnen, Jakob and Braunshier, P and Schwegel, M and Kuhlmann, Hendrik and Hof, Björn},
journal = {Journal of Fluid Mechanics},
number = {5},
publisher = {Cambridge University Press},
title = {{Subcritical versus supercritical transition to turbulence in curved pipes}},
doi = {10.1017/jfm.2015.184},
volume = {770},
year = {2015},
}
@inproceedings{1659,
abstract = {The target discounted-sum problem is the following: Given a rational discount factor 0 < λ < 1 and three rational values a, b, and t, does there exist a finite or an infinite sequence w ε(a, b)∗ or w ε(a, b)w, such that Σ|w| i=0 w(i)λi equals t? The problem turns out to relate to many fields of mathematics and computer science, and its decidability question is surprisingly hard to solve. We solve the finite version of the problem, and show the hardness of the infinite version, linking it to various areas and open problems in mathematics and computer science: β-expansions, discounted-sum automata, piecewise affine maps, and generalizations of the Cantor set. We provide some partial results to the infinite version, among which are solutions to its restriction to eventually-periodic sequences and to the cases that λ λ 1/2 or λ = 1/n, for every n ε N. We use our results for solving some open problems on discounted-sum automata, among which are the exact-value problem for nondeterministic automata over finite words and the universality and inclusion problems for functional automata.},
author = {Boker, Udi and Henzinger, Thomas A and Otop, Jan},
booktitle = {LICS},
issn = {1043-6871 },
location = {Kyoto, Japan},
pages = {750 -- 761},
publisher = {IEEE},
title = {{The target discounted-sum problem}},
doi = {10.1109/LICS.2015.74},
year = {2015},
}
@article{1851,
abstract = {We consider mating strategies for females who search for males sequentially during a season of limited length. We show that the best strategy rejects a given male type if encountered before a time-threshold but accepts him after. For frequency-independent benefits, we obtain the optimal time-thresholds explicitly for both discrete and continuous distributions of males, and allow for mistakes being made in assessing the correct male type. When the benefits are indirect (genes for the offspring) and the population is under frequency-dependent ecological selection, the benefits depend on the mating strategy of other females as well. This case is particularly relevant to speciation models that seek to explore the stability of reproductive isolation by assortative mating under frequency-dependent ecological selection. We show that the indirect benefits are to be quantified by the reproductive values of couples, and describe how the evolutionarily stable time-thresholds can be found. We conclude with an example based on the Levene model, in which we analyze the evolutionarily stable assortative mating strategies and the strength of reproductive isolation provided by them.},
author = {Priklopil, Tadeas and Kisdi, Eva and Gyllenberg, Mats},
journal = {Evolution},
number = {4},
pages = {1015 -- 1026},
publisher = {Wiley-Blackwell},
title = {{Evolutionarily stable mating decisions for sequentially searching females and the stability of reproductive isolation by assortative mating}},
doi = {10.1111/evo.12618},
volume = {69},
year = {2015},
}
@inproceedings{1628,
abstract = {We propose a method for fabricating deformable objects with spatially varying elasticity using 3D printing. Using a single, relatively stiff printer material, our method designs an assembly of smallscale microstructures that have the effect of a softer material at the object scale, with properties depending on the microstructure used in each part of the object. We build on work in the area of metamaterials, using numerical optimization to design tiled microstructures with desired properties, but with the key difference that our method designs families of related structures that can be interpolated to smoothly vary the material properties over a wide range. To create an object with spatially varying elastic properties, we tile the object's interior with microstructures drawn from these families, generating a different microstructure for each cell using an efficient algorithm to select compatible structures for neighboring cells. We show results computed for both 2D and 3D objects, validating several 2D and 3D printed structures using standard material tests as well as demonstrating various example applications.},
author = {Schumacher, Christian and Bickel, Bernd and Rys, Jan and Marschner, Steve and Daraio, Chiara and Gross, Markus},
location = {Los Angeles, CA, USA},
number = {4},
publisher = {ACM},
title = {{Microstructures to control elasticity in 3D printing}},
doi = {10.1145/2766926},
volume = {34},
year = {2015},
}
@article{120,
abstract = {Clustering of fine particles is of crucial importance in settings ranging from the early stages of planet formation to the coagulation of industrial powders and airborne pollutants. Models of such clustering typically focus on inelastic deformation and cohesion. However, even in charge-neutral particle systems comprising grains of the same dielectric material, tribocharging can generate large amounts of net positive or negative charge on individual particles, resulting in long-range electrostatic forces. The effects of such forces on cluster formation are not well understood and have so far not been studied in situ. Here we report the first observations of individual collide-and-capture events between charged submillimetre particles, including Kepler-like orbits. Charged particles can become trapped in their mutual electrostatic energy well and aggregate via multiple bounces. This enables the initiation of clustering at relative velocities much larger than the upper limit for sticking after a head-on collision, a long-standing issue known from pre-planetary dust aggregation. Moreover, Coulomb interactions together with dielectric polarization are found to stabilize characteristic molecule-like configurations, providing new insights for the modelling of clustering dynamics in a wide range of microscopic dielectric systems, such as charged polarizable ions, biomolecules and colloids.},
author = {Lee, Victor and Waitukaitis, Scott R and Miskin, Marc and Jaeger, Heinrich},
journal = {Nature Physics},
number = {9},
pages = {733 -- 737},
publisher = {Nature Publishing Group},
title = {{Direct observation of particle interactions and clustering in charged granular streams}},
doi = {10.1038/nphys3396},
volume = {11},
year = {2015},
}
@article{1314,
abstract = {We derive a posteriori estimates for the modeling error caused by the assumption of perfect incompressibility in the incompressible Navier-Stokes equation: Real fluids are never perfectly incompressible but always feature at least some low amount of compressibility. Thus, their behavior is described by the compressible Navier-Stokes equation, the pressure being a steep function of the density. We rigorously estimate the difference between an approximate solution to the incompressible Navier-Stokes equation and any weak solution to the compressible Navier-Stokes equation in the sense of Lions (without assuming any additional regularity of solutions). Heuristics and numerical results suggest that our error estimates are of optimal order in the case of "well-behaved" flows and divergence-free approximations of the velocity field. Thus, we expect our estimates to justify the idealization of fluids as perfectly incompressible also in practical situations.},
author = {Fischer, Julian L},
journal = {SIAM Journal on Numerical Analysis},
number = {5},
pages = {2178 -- 2205},
publisher = {Society for Industrial and Applied Mathematics },
title = {{A posteriori modeling error estimates for the assumption of perfect incompressibility in the Navier-Stokes equation}},
doi = {10.1137/140966654},
volume = {53},
year = {2015},
}
@article{1542,
abstract = {The theory of population genetics and evolutionary computation have been evolving separately for nearly 30 years. Many results have been independently obtained in both fields and many others are unique to its respective field. We aim to bridge this gap by developing a unifying framework for evolutionary processes that allows both evolutionary algorithms and population genetics models to be cast in the same formal framework. The framework we present here decomposes the evolutionary process into its several components in order to facilitate the identification of similarities between different models. In particular, we propose a classification of evolutionary operators based on the defining properties of the different components. We cast several commonly used operators from both fields into this common framework. Using this, we map different evolutionary and genetic algorithms to different evolutionary regimes and identify candidates with the most potential for the translation of results between the fields. This provides a unified description of evolutionary processes and represents a stepping stone towards new tools and results to both fields. },
author = {Paixao, Tiago and Badkobeh, Golnaz and Barton, Nicholas H and Çörüş, Doğan and Dang, Duccuong and Friedrich, Tobias and Lehre, Per and Sudholt, Dirk and Sutton, Andrew and Trubenova, Barbora},
journal = { Journal of Theoretical Biology},
pages = {28 -- 43},
publisher = {Elsevier},
title = {{Toward a unifying framework for evolutionary processes}},
doi = {10.1016/j.jtbi.2015.07.011},
volume = {383},
year = {2015},
}
@article{1547,
abstract = {Let G be a graph on the vertex set V(G) = {x1,…,xn} with the edge set E(G), and let R = K[x1,…, xn] be the polynomial ring over a field K. Two monomial ideals are associated to G, the edge ideal I(G) generated by all monomials xixj with {xi,xj} ∈ E(G), and the vertex cover ideal IG generated by monomials ∏xi∈Cxi for all minimal vertex covers C of G. A minimal vertex cover of G is a subset C ⊂ V(G) such that each edge has at least one vertex in C and no proper subset of C has the same property. Indeed, the vertex cover ideal of G is the Alexander dual of the edge ideal of G. In this paper, for an unmixed bipartite graph G we consider the lattice of vertex covers LG and we explicitly describe the minimal free resolution of the ideal associated to LG which is exactly the vertex cover ideal of G. Then we compute depth, projective dimension, regularity and extremal Betti numbers of R/I(G) in terms of the associated lattice.},
author = {Mohammadi, Fatemeh and Moradi, Somayeh},
issn = {2234-3016},
journal = {Bulletin of the Korean Mathematical Society},
number = {3},
pages = {977 -- 986},
publisher = {Korean Mathematical Society},
title = {{Resolution of unmixed bipartite graphs}},
doi = {10.4134/BKMS.2015.52.3.977},
volume = {52},
year = {2015},
}
@article{1554,
abstract = {The visualization of hormonal signaling input and output is key to understanding how multicellular development is regulated. The plant signaling molecule auxin triggers many growth and developmental responses, but current tools lack the sensitivity or precision to visualize these. We developed a set of fluorescent reporters that allow sensitive and semiquantitative readout of auxin responses at cellular resolution in Arabidopsis thaliana. These generic tools are suitable for any transformable plant species.},
author = {Liao, Cheyang and Smet, Wouter and Brunoud, Géraldine and Yoshida, Saiko and Vernoux, Teva and Weijers, Dolf},
journal = {Nature Methods},
number = {3},
pages = {207 -- 210},
publisher = {Nature Publishing Group},
title = {{Reporters for sensitive and quantitative measurement of auxin response}},
doi = {10.1038/nmeth.3279},
volume = {12},
year = {2015},
}
@article{1559,
abstract = {There are deep, yet largely unexplored, connections between computer science and biology. Both disciplines examine how information proliferates in time and space. Central results in computer science describe the complexity of algorithms that solve certain classes of problems. An algorithm is deemed efficient if it can solve a problem in polynomial time, which means the running time of the algorithm is a polynomial function of the length of the input. There are classes of harder problems for which the fastest possible algorithm requires exponential time. Another criterion is the space requirement of the algorithm. There is a crucial distinction between algorithms that can find a solution, verify a solution, or list several distinct solutions in given time and space. The complexity hierarchy that is generated in this way is the foundation of theoretical computer science. Precise complexity results can be notoriously difficult. The famous question whether polynomial time equals nondeterministic polynomial time (i.e., P = NP) is one of the hardest open problems in computer science and all of mathematics. Here, we consider simple processes of ecological and evolutionary spatial dynamics. The basic question is: What is the probability that a new invader (or a new mutant)will take over a resident population?We derive precise complexity results for a variety of scenarios. We therefore show that some fundamental questions in this area cannot be answered by simple equations (assuming that P is not equal to NP).},
author = {Ibsen-Jensen, Rasmus and Chatterjee, Krishnendu and Nowak, Martin},
journal = {PNAS},
number = {51},
pages = {15636 -- 15641},
publisher = {National Academy of Sciences},
title = {{Computational complexity of ecological and evolutionary spatial dynamics}},
doi = {10.1073/pnas.1511366112},
volume = {112},
year = {2015},
}
@article{1561,
abstract = {Replication-deficient recombinant adenoviruses are potent vectors for the efficient transient expression of exogenous genes in resting immune cells. However, most leukocytes are refractory to efficient adenoviral transduction as they lack expression of the coxsackie/adenovirus receptor (CAR). To circumvent this obstacle, we generated the R26/CAG-CARΔ1StopF (where R26 is ROSA26 and CAG is CMV early enhancer/chicken β actin promoter) knock-in mouse line. This strain allows monitoring of in situ Cre recombinase activity through expression of CARΔ1. Simultaneously, CARΔ1 expression permits selective and highly efficient adenoviral transduction of immune cell populations, such as mast cells or T cells, directly ex vivo in bulk cultures without prior cell purification or activation. Furthermore, we show that CARΔ1 expression dramatically improves adenoviral infection of in vitro differentiated conventional and plasmacytoid dendritic cells (DCs), basophils, mast cells, as well as Hoxb8-immortalized hematopoietic progenitor cells. This novel dual function mouse strain will hence be a valuable tool to rapidly dissect the function of specific genes in leukocyte physiology.},
author = {Heger, Klaus and Kober, Maike and Rieß, David and Drees, Christoph and De Vries, Ingrid and Bertossi, Arianna and Roers, Axel and Sixt, Michael K and Schmidt Supprian, Marc},
journal = {European Journal of Immunology},
number = {6},
pages = {1614 -- 1620},
publisher = {Wiley},
title = {{A novel Cre recombinase reporter mouse strain facilitates selective and efficient infection of primary immune cells with adenoviral vectors}},
doi = {10.1002/eji.201545457},
volume = {45},
year = {2015},
}
@article{1566,
abstract = {Deposits of misfolded proteins in the human brain are associated with the development of many neurodegenerative diseases. Recent studies show that these proteins have common traits even at the monomer level. Among them, a polyglutamine region that is present in huntingtin is known to exhibit a correlation between the length of the chain and the severity as well as the earliness of the onset of Huntington disease. Here, we apply bias exchange molecular dynamics to generate structures of polyglutamine expansions of several lengths and characterize the resulting independent conformations. We compare the properties of these conformations to those of the standard proteins, as well as to other homopolymeric tracts. We find that, similar to the previously studied polyvaline chains, the set of possible transient folds is much broader than the set of known-to-date folds, although the conformations have different structures. We show that the mechanical stability is not related to any simple geometrical characteristics of the structures. We demonstrate that long polyglutamine expansions result in higher mechanical stability than the shorter ones. They also have a longer life span and are substantially more prone to form knotted structures. The knotted region has an average length of 35 residues, similar to the typical threshold for most polyglutamine-related diseases. Similarly, changes in shape and mechanical stability appear once the total length of the peptide exceeds this threshold of 35 glutamine residues. We suggest that knotted conformers may also harm the cellular machinery and thus lead to disease.},
author = {Gómez Sicilia, Àngel and Sikora, Mateusz K and Cieplak, Marek and Carrión Vázquez, Mariano},
journal = {PLoS Computational Biology},
number = {10},
publisher = {Public Library of Science},
title = {{An exploration of the universe of polyglutamine structures}},
doi = {10.1371/journal.pcbi.1004541},
volume = {11},
year = {2015},
}
@article{1504,
abstract = {Let Q = (Q1, . . . , Qn) be a random vector drawn from the uniform distribution on the set of all n! permutations of {1, 2, . . . , n}. Let Z = (Z1, . . . , Zn), where Zj is the mean zero variance one random variable obtained by centralizing and normalizing Qj , j = 1, . . . , n. Assume that Xi , i = 1, . . . ,p are i.i.d. copies of 1/√ p Z and X = Xp,n is the p × n random matrix with Xi as its ith row. Then Sn = XX is called the p × n Spearman's rank correlation matrix which can be regarded as a high dimensional extension of the classical nonparametric statistic Spearman's rank correlation coefficient between two independent random variables. In this paper, we establish a CLT for the linear spectral statistics of this nonparametric random matrix model in the scenario of high dimension, namely, p = p(n) and p/n→c ∈ (0,∞) as n→∞.We propose a novel evaluation scheme to estimate the core quantity in Anderson and Zeitouni's cumulant method in [Ann. Statist. 36 (2008) 2553-2576] to bypass the so-called joint cumulant summability. In addition, we raise a two-step comparison approach to obtain the explicit formulae for the mean and covariance functions in the CLT. Relying on this CLT, we then construct a distribution-free statistic to test complete independence for components of random vectors. Owing to the nonparametric property, we can use this test on generally distributed random variables including the heavy-tailed ones.},
author = {Bao, Zhigang and Lin, Liang and Pan, Guangming and Zhou, Wang},
journal = {Annals of Statistics},
number = {6},
pages = {2588 -- 2623},
publisher = {Institute of Mathematical Statistics},
title = {{Spectral statistics of large dimensional spearman s rank correlation matrix and its application}},
doi = {10.1214/15-AOS1353},
volume = {43},
year = {2015},
}
@article{1509,
abstract = {The Auxin Binding Protein1 (ABP1) has been identified based on its ability to bind auxin with high affinity and studied for a long time as a prime candidate for the extracellular auxin receptor responsible for mediating in particular the fast non-transcriptional auxin responses. However, the contradiction between the embryo-lethal phenotypes of the originally described Arabidopsis T-DNA insertional knock-out alleles (abp1-1 and abp1-1s) and the wild type-like phenotypes of other recently described loss-of-function alleles (abp1-c1 and abp1-TD1) questions the biological importance of ABP1 and relevance of the previous genetic studies. Here we show that there is no hidden copy of the ABP1 gene in the Arabidopsis genome but the embryo-lethal phenotypes of abp1-1 and abp1-1s alleles are very similar to the knock-out phenotypes of the neighboring gene, BELAYA SMERT (BSM). Furthermore, the allelic complementation test between bsm and abp1 alleles shows that the embryo-lethality in the abp1-1 and abp1-1s alleles is caused by the off-target disruption of the BSM locus by the T-DNA insertions. This clarifies the controversy of different phenotypes among published abp1 knock-out alleles and asks for reflections on the developmental role of ABP1.},
author = {Michalko, Jaroslav and Dravecka, Marta and Bollenbach, Tobias and Friml, Jirí},
journal = {F1000 Research },
publisher = {F1000 Research Ltd. },
title = {{Embryo-lethal phenotypes in early abp1 mutants are due to disruption of the neighboring BSM gene}},
doi = {10.12688/f1000research.7143.1},
volume = {4},
year = {2015},
}
@article{1530,
abstract = {In growing cells, protein synthesis and cell growth are typically not synchronous, and, thus, protein concentrations vary over the cell division cycle. We have developed a theoretical description of genetic regulatory systems in bacteria that explicitly considers the cell division cycle to investigate its impact on gene expression. We calculate the cell-to-cell variations arising from cells being at different stages in the division cycle for unregulated genes and for basic regulatory mechanisms. These variations contribute to the extrinsic noise observed in single-cell experiments, and are most significant for proteins with short lifetimes. Negative autoregulation buffers against variation of protein concentration over the division cycle, but the effect is found to be relatively weak. Stronger buffering is achieved by an increased protein lifetime. Positive autoregulation can strongly amplify such variation if the parameters are set to values that lead to resonance-like behaviour. For cooperative positive autoregulation, the concentration variation over the division cycle diminishes the parameter region of bistability and modulates the switching times between the two stable states. The same effects are seen for a two-gene mutual-repression toggle switch. By contrast, an oscillatory circuit, the repressilator, is only weakly affected by the division cycle.},
author = {Bierbaum, Veronika and Klumpp, Stefan},
journal = {Physical Biology},
number = {6},
publisher = {IOP Publishing Ltd.},
title = {{Impact of the cell division cycle on gene circuits}},
doi = {10.1088/1478-3975/12/6/066003},
volume = {12},
year = {2015},
}
@article{1573,
abstract = {We present a new, simpler proof of the unconditional uniqueness of solutions to the cubic Gross-Pitaevskii hierarchy in ℝ3. One of the main tools in our analysis is the quantum de Finetti theorem. Our uniqueness result is equivalent to the one established in the celebrated works of Erdos, Schlein, and Yau.},
author = {Chen, Thomas and Hainzl, Christian and Pavlović, Nataša and Seiringer, Robert},
journal = {Communications on Pure and Applied Mathematics},
number = {10},
pages = {1845 -- 1884},
publisher = {Wiley},
title = {{Unconditional uniqueness for the cubic gross pitaevskii hierarchy via quantum de finetti}},
doi = {10.1002/cpa.21552},
volume = {68},
year = {2015},
}
@article{1578,
abstract = {We prove that the dual of the digital Voronoi diagram constructed by flooding the plane from the data points gives a geometrically and topologically correct dual triangulation. This provides the proof of correctness for recently developed GPU algorithms that outperform traditional CPU algorithms for constructing two-dimensional Delaunay triangulations.},
author = {Cao, Thanhtung and Edelsbrunner, Herbert and Tan, Tiowseng},
journal = {Computational Geometry},
number = {7},
pages = {507 -- 519},
publisher = {Elsevier},
title = {{Triangulations from topologically correct digital Voronoi diagrams}},
doi = {10.1016/j.comgeo.2015.04.001},
volume = {48},
year = {2015},
}