@article{1677, abstract = {We consider real symmetric and complex Hermitian random matrices with the additional symmetry hxy = hN-y,N-x. The matrix elements are independent (up to the fourfold symmetry) and not necessarily identically distributed. This ensemble naturally arises as the Fourier transform of a Gaussian orthogonal ensemble. Italso occurs as the flip matrix model - an approximation of the two-dimensional Anderson model at small disorder. We show that the density of states converges to the Wigner semicircle law despite the new symmetry type. We also prove the local version of the semicircle law on the optimal scale.}, author = {Alt, Johannes}, journal = {Journal of Mathematical Physics}, number = {10}, publisher = {American Institute of Physics}, title = {{The local semicircle law for random matrices with a fourfold symmetry}}, doi = {10.1063/1.4932606}, volume = {56}, year = {2015}, } @article{1678, abstract = {High-throughput live-cell screens are intricate elements of systems biology studies and drug discovery pipelines. Here, we demonstrate an optogenetics-assisted method that avoids the need for chemical activators and reporters, reduces the number of operational steps and increases information content in a cell-based small-molecule screen against human protein kinases, including an orphan receptor tyrosine kinase. This blueprint for all-optical screening can be adapted to many drug targets and cellular processes.}, author = {Inglés Prieto, Álvaro and Gschaider-Reichhart, Eva and Muellner, Markus and Nowak, Matthias and Nijman, Sebastian and Grusch, Michael and Janovjak, Harald L}, journal = {Nature Chemical Biology}, number = {12}, pages = {952 -- 954}, publisher = {Nature Publishing Group}, title = {{Light-assisted small-molecule screening against protein kinases}}, doi = {10.1038/nchembio.1933}, volume = {11}, year = {2015}, } @article{1576, abstract = {Gene expression is controlled primarily by interactions between transcription factor proteins (TFs) and the regulatory DNA sequence, a process that can be captured well by thermodynamic models of regulation. These models, however, neglect regulatory crosstalk: the possibility that noncognate TFs could initiate transcription, with potentially disastrous effects for the cell. Here, we estimate the importance of crosstalk, suggest that its avoidance strongly constrains equilibrium models of TF binding, and propose an alternative nonequilibrium scheme that implements kinetic proofreading to suppress erroneous initiation. This proposal is consistent with the observed covalent modifications of the transcriptional apparatus and predicts increased noise in gene expression as a trade-off for improved specificity. Using information theory, we quantify this trade-off to find when optimal proofreading architectures are favored over their equilibrium counterparts. Such architectures exhibit significant super-Poisson noise at low expression in steady state.}, author = {Cepeda Humerez, Sarah A and Rieckh, Georg and Tkacik, Gasper}, journal = {Physical Review Letters}, number = {24}, publisher = {American Physical Society}, title = {{Stochastic proofreading mechanism alleviates crosstalk in transcriptional regulation}}, doi = {10.1103/PhysRevLett.115.248101}, volume = {115}, year = {2015}, } @unpublished{8183, abstract = {We study conditions under which a finite simplicial complex $K$ can be mapped to $\mathbb R^d$ without higher-multiplicity intersections. An almost $r$-embedding is a map $f: K\to \mathbb R^d$ such that the images of any $r$ pairwise disjoint simplices of $K$ do not have a common point. We show that if $r$ is not a prime power and $d\geq 2r+1$, then there is a counterexample to the topological Tverberg conjecture, i.e., there is an almost $r$-embedding of the $(d+1)(r-1)$-simplex in $\mathbb R^d$. This improves on previous constructions of counterexamples (for $d\geq 3r$) based on a series of papers by M. \"Ozaydin, M. Gromov, P. Blagojevi\'c, F. Frick, G. Ziegler, and the second and fourth present authors. The counterexamples are obtained by proving the following algebraic criterion in codimension 2: If $r\ge3$ and if $K$ is a finite $2(r-1)$-complex then there exists an almost $r$-embedding $K\to \mathbb R^{2r}$ if and only if there exists a general position PL map $f:K\to \mathbb R^{2r}$ such that the algebraic intersection number of the $f$-images of any $r$ pairwise disjoint simplices of $K$ is zero. This result can be restated in terms of cohomological obstructions or equivariant maps, and extends an analogous codimension 3 criterion by the second and fourth authors. As another application we classify ornaments $f:S^3 \sqcup S^3\sqcup S^3\to \mathbb R^5$ up to ornament concordance. It follows from work of M. Freedman, V. Krushkal and P. Teichner that the analogous criterion for $r=2$ is false. We prove a lemma on singular higher-dimensional Borromean rings, yielding an elementary proof of the counterexample.}, author = {Avvakumov, Sergey and Mabillard, Isaac and Skopenkov, A. and Wagner, Uli}, booktitle = {arXiv}, title = {{Eliminating higher-multiplicity intersections, III. Codimension 2}}, year = {2015}, } @misc{5441, abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural problems that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, a property satisfied by the controlflow graphs of most programs. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis. The study of multiple queries allows us to consider the tradeoff between the resource usage of the one-time preprocessing and for each individual query. The traditional approach constructs the product graph of all components and applies the best-known graph algorithm on the product. In this approach, even the answer to a single query requires the transitive closure (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results showing that the worst-case running time of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (i.e., improving the worst-case bound for the shortest path problem in general graphs). Preliminary experimental results show that our algorithms perform favorably on several benchmarks.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Goharshady, Amir and Pavlogiannis, Andreas}, issn = {2664-1690}, pages = {24}, publisher = {IST Austria}, title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}}, doi = {10.15479/AT:IST-2015-340-v1-1}, year = {2015}, } @misc{5442, abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural properties that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, and it is known that the controlflow graphs of most programs have constant treewidth. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis problems (e.g., alias analysis). The study of multiple queries allows us to consider the tradeoff between the resource usage of the \emph{one-time} preprocessing and for \emph{each individual} query. The traditional approaches construct the product graph of all components and apply the best-known graph algorithm on the product. In the traditional approach, even the answer to a single query requires the transitive closure computation (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results that show that the worst-case running times of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (such as improving the worst-case bounds for the shortest path problem in general graphs whose current best-known bound has not been improved in five decades). Finally, we provide a prototype implementation of our algorithms which significantly outperforms the existing algorithmic methods on several benchmarks.}, author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3 and Anonymous, 4}, issn = {2664-1690}, pages = {22}, publisher = {IST Austria}, title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}}, year = {2015}, } @inproceedings{1689, abstract = {We consider the problem of computing the set of initial states of a dynamical system such that there exists a control strategy to ensure that the trajectories satisfy a temporal logic specification with probability 1 (almost-surely). We focus on discrete-time, stochastic linear dynamics and specifications given as formulas of the Generalized Reactivity(1) fragment of Linear Temporal Logic over linear predicates in the states of the system. We propose a solution based on iterative abstraction-refinement, and turn-based 2-player probabilistic games. While the theoretical guarantee of our algorithm after any finite number of iterations is only a partial solution, we show that if our algorithm terminates, then the result is the set of satisfying initial states. Moreover, for any (partial) solution our algorithm synthesizes witness control strategies to ensure almost-sure satisfaction of the temporal logic specification. We demonstrate our approach on an illustrative case study.}, author = {Svoreňová, Mária and Kretinsky, Jan and Chmelik, Martin and Chatterjee, Krishnendu and Cěrná, Ivana and Belta, Cǎlin}, booktitle = {Proceedings of the 18th International Conference on Hybrid Systems: Computation and Control}, location = {Seattle, WA, United States}, pages = {259 -- 268}, publisher = {ACM}, title = {{Temporal logic control for stochastic linear systems using abstraction refinement of probabilistic games}}, doi = {10.1145/2728606.2728608}, year = {2015}, } @inproceedings{1729, abstract = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of such sequences produced under a non-preemptive scheduler. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and rules for inserting synchronization. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient, and, since it does not require explicit specifications, is more practical than the conventional approach based on user-provided assertions.}, author = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten}, location = {San Francisco, CA, United States}, pages = {180 -- 197}, publisher = {Springer}, title = {{From non-preemptive to preemptive scheduling using synchronization synthesis}}, doi = {10.1007/978-3-319-21668-3_11}, volume = {9207}, year = {2015}, } @inproceedings{1835, abstract = {The behaviour of gene regulatory networks (GRNs) is typically analysed using simulation-based statistical testing-like methods. In this paper, we demonstrate that we can replace this approach by a formal verification-like method that gives higher assurance and scalability. We focus on Wagner’s weighted GRN model with varying weights, which is used in evolutionary biology. In the model, weight parameters represent the gene interaction strength that may change due to genetic mutations. For a property of interest, we synthesise the constraints over the parameter space that represent the set of GRNs satisfying the property. We experimentally show that our parameter synthesis procedure computes the mutational robustness of GRNs –an important problem of interest in evolutionary biology– more efficiently than the classical simulation method. We specify the property in linear temporal logics. We employ symbolic bounded model checking and SMT solving to compute the space of GRNs that satisfy the property, which amounts to synthesizing a set of linear constraints on the weights.}, author = {Giacobbe, Mirco and Guet, Calin C and Gupta, Ashutosh and Henzinger, Thomas A and Paixao, Tiago and Petrov, Tatjana}, location = {London, United Kingdom}, pages = {469 -- 483}, publisher = {Springer}, title = {{Model checking gene regulatory networks}}, doi = {10.1007/978-3-662-46681-0_47}, volume = {9035}, year = {2015}, } @article{1509, abstract = {The Auxin Binding Protein1 (ABP1) has been identified based on its ability to bind auxin with high affinity and studied for a long time as a prime candidate for the extracellular auxin receptor responsible for mediating in particular the fast non-transcriptional auxin responses. However, the contradiction between the embryo-lethal phenotypes of the originally described Arabidopsis T-DNA insertional knock-out alleles (abp1-1 and abp1-1s) and the wild type-like phenotypes of other recently described loss-of-function alleles (abp1-c1 and abp1-TD1) questions the biological importance of ABP1 and relevance of the previous genetic studies. Here we show that there is no hidden copy of the ABP1 gene in the Arabidopsis genome but the embryo-lethal phenotypes of abp1-1 and abp1-1s alleles are very similar to the knock-out phenotypes of the neighboring gene, BELAYA SMERT (BSM). Furthermore, the allelic complementation test between bsm and abp1 alleles shows that the embryo-lethality in the abp1-1 and abp1-1s alleles is caused by the off-target disruption of the BSM locus by the T-DNA insertions. This clarifies the controversy of different phenotypes among published abp1 knock-out alleles and asks for reflections on the developmental role of ABP1.}, author = {Michalko, Jaroslav and Dravecka, Marta and Bollenbach, Tobias and Friml, Jirí}, journal = {F1000 Research }, publisher = {F1000 Research}, title = {{Embryo-lethal phenotypes in early abp1 mutants are due to disruption of the neighboring BSM gene}}, doi = {10.12688/f1000research.7143.1}, volume = {4}, year = {2015}, } @article{1681, abstract = {In many social situations, individuals endeavor to find the single best possible partner, but are constrained to evaluate the candidates in sequence. Examples include the search for mates, economic partnerships, or any other long-term ties where the choice to interact involves two parties. Surprisingly, however, previous theoretical work on mutual choice problems focuses on finding equilibrium solutions, while ignoring the evolutionary dynamics of decisions. Empirically, this may be of high importance, as some equilibrium solutions can never be reached unless the population undergoes radical changes and a sufficient number of individuals change their decisions simultaneously. To address this question, we apply a mutual choice sequential search problem in an evolutionary game-theoretical model that allows one to find solutions that are favored by evolution. As an example, we study the influence of sequential search on the evolutionary dynamics of cooperation. For this, we focus on the classic snowdrift game and the prisoner’s dilemma game.}, author = {Priklopil, Tadeas and Chatterjee, Krishnendu}, issn = {2073-4336}, journal = {Games}, number = {4}, pages = {413 -- 437}, publisher = {MDPI}, title = {{Evolution of decisions in population games with sequentially searching individuals}}, doi = {10.3390/g6040413}, volume = {6}, year = {2015}, } @article{1655, abstract = {Quantifying behaviors of robots which were generated autonomously from task-independent objective functions is an important prerequisite for objective comparisons of algorithms and movements of animals. The temporal sequence of such a behavior can be considered as a time series and hence complexity measures developed for time series are natural candidates for its quantification. The predictive information and the excess entropy are such complexity measures. They measure the amount of information the past contains about the future and thus quantify the nonrandom structure in the temporal sequence. However, when using these measures for systems with continuous states one has to deal with the fact that their values will depend on the resolution with which the systems states are observed. For deterministic systems both measures will diverge with increasing resolution. We therefore propose a new decomposition of the excess entropy in resolution dependent and resolution independent parts and discuss how they depend on the dimensionality of the dynamics, correlations and the noise level. For the practical estimation we propose to use estimates based on the correlation integral instead of the direct estimation of the mutual information based on next neighbor statistics because the latter allows less control of the scale dependencies. Using our algorithm we are able to show how autonomous learning generates behavior of increasing complexity with increasing learning duration.}, author = {Martius, Georg S and Olbrich, Eckehard}, journal = {Entropy}, number = {10}, pages = {7266 -- 7297}, publisher = {MDPI}, title = {{Quantifying emergent behavior of autonomous robots}}, doi = {10.3390/e17107266}, volume = {17}, year = {2015}, } @article{1834, abstract = {Huge body of evidences demonstrated that volatile anesthetics affect the hippocampal neurogenesis and neurocognitive functions, and most of them showed impairment at anesthetic dose. Here, we investigated the effect of low dose (1.8%) sevoflurane on hippocampal neurogenesis and dentate gyrus-dependent learning. Neonatal rats at postnatal day 4 to 6 (P4-6) were treated with 1.8% sevoflurane for 6 hours. Neurogenesis was quantified by bromodeoxyuridine labeling and electrophysiology recording. Four and seven weeks after treatment, the Morris water maze and contextual-fear discrimination learning tests were performed to determine the influence on spatial learning and pattern separation. A 6-hour treatment with 1.8% sevoflurane promoted hippocampal neurogenesis and increased the survival of newborn cells and the proportion of immature granular cells in the dentate gyrus of neonatal rats. Sevoflurane-treated rats performed better during the training days of the Morris water maze test and in contextual-fear discrimination learning test. These results suggest that a subanesthetic dose of sevoflurane promotes hippocampal neurogenesis in neonatal rats and facilitates their performance in dentate gyrus-dependent learning tasks.}, author = {Chen, Chong and Wang, Chao and Zhao, Xuan and Zhou, Tao and Xu, Dao and Wang, Zhi and Wang, Ying}, journal = {ASN Neuro}, number = {2}, publisher = {SAGE Publications}, title = {{Low-dose sevoflurane promoteshippocampal neurogenesis and facilitates the development of dentate gyrus-dependent learning in neonatal rats}}, doi = {10.1177/1759091415575845}, volume = {7}, year = {2015}, } @article{1635, abstract = {We calculate a Ricci curvature lower bound for some classical examples of random walks, namely, a chain on a slice of the n-dimensional discrete cube (the so-called Bernoulli-Laplace model) and the random transposition shuffle of the symmetric group of permutations on n letters.}, author = {Erbar, Matthias and Maas, Jan and Tetali, Prasad}, journal = {Annales de la faculté des sciences de Toulouse}, number = {4}, pages = {781 -- 800}, publisher = {Faculté des sciences de Toulouse}, title = {{Discrete Ricci curvature bounds for Bernoulli-Laplace and random transposition models}}, doi = {10.5802/afst.1464}, volume = {24}, year = {2015}, } @article{14303, abstract = {Scaffolded DNA origami enables the fabrication of a variety of complex nanostructures that promise utility in diverse fields of application, ranging from biosensing over advanced therapeutics to metamaterials. The broad applicability of DNA origami as a material beyond the level of proof-of-concept studies critically depends, among other factors, on the availability of large amounts of pure single-stranded scaffold DNA. Here, we present a method for the efficient production of M13 bacteriophage-derived genomic DNA using high-cell-density fermentation of Escherichia coli in stirred-tank bioreactors. We achieve phage titers of up to 1.6 × 1014 plaque-forming units per mL. Downstream processing yields up to 410 mg of high-quality single-stranded DNA per one liter reaction volume, thus upgrading DNA origami-based nanotechnology from the milligram to the gram scale.}, author = {Kick, B and Praetorius, Florian M and Dietz, H and Weuster-Botz, D}, issn = {1530-6992}, journal = {Nano Letters}, number = {7}, pages = {4672--4676}, publisher = {ACS Publications}, title = {{Efficient production of single-stranded phage DNA as scaffolds for DNA origami}}, doi = {10.1021/acs.nanolett.5b01461}, volume = {15}, year = {2015}, } @inproceedings{1603, abstract = {For deterministic systems, a counterexample to a property can simply be an error trace, whereas counterexamples in probabilistic systems are necessarily more complex. For instance, a set of erroneous traces with a sufficient cumulative probability mass can be used. Since these are too large objects to understand and manipulate, compact representations such as subchains have been considered. In the case of probabilistic systems with non-determinism, the situation is even more complex. While a subchain for a given strategy (or scheduler, resolving non-determinism) is a straightforward choice, we take a different approach. Instead, we focus on the strategy itself, and extract the most important decisions it makes, and present its succinct representation. The key tools we employ to achieve this are (1) introducing a concept of importance of a state w.r.t. the strategy, and (2) learning using decision trees. There are three main consequent advantages of our approach. Firstly, it exploits the quantitative information on states, stressing the more important decisions. Secondly, it leads to a greater variability and degree of freedom in representing the strategies. Thirdly, the representation uses a self-explanatory data structure. In summary, our approach produces more succinct and more explainable strategies, as opposed to e.g. binary decision diagrams. Finally, our experimental results show that we can extract several rules describing the strategy even for very large systems that do not fit in memory, and based on the rules explain the erroneous behaviour.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Chmelik, Martin and Fellner, Andreas and Kretinsky, Jan}, location = {San Francisco, CA, United States}, pages = {158 -- 177}, publisher = {Springer}, title = {{Counterexample explanation by learning small strategies in Markov decision processes}}, doi = {10.1007/978-3-319-21690-4_10}, volume = {9206}, year = {2015}, } @misc{5549, abstract = {This repository contains the experimental part of the CAV 2015 publication Counterexample Explanation by Learning Small Strategies in Markov Decision Processes. We extended the probabilistic model checker PRISM to represent strategies of Markov Decision Processes as Decision Trees. The archive contains a java executable version of the extended tool (prism_dectree.jar) together with a few examples of the PRISM benchmark library. To execute the program, please have a look at the README.txt, which provides instructions and further information on the archive. The archive contains scripts that (if run often enough) reproduces the data presented in the publication.}, author = {Fellner, Andreas}, keywords = {Markov Decision Process, Decision Tree, Probabilistic Verification, Counterexample Explanation}, publisher = {Institute of Science and Technology Austria}, title = {{Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes}}, doi = {10.15479/AT:ISTA:28}, year = {2015}, } @inproceedings{1512, abstract = {We show that very weak topological assumptions are enough to ensure the existence of a Helly-type theorem. More precisely, we show that for any non-negative integers b and d there exists an integer h(b,d) such that the following holds. If F is a finite family of subsets of R^d such that the ith reduced Betti number (with Z_2 coefficients in singular homology) of the intersection of any proper subfamily G of F is at most b for every non-negative integer i less or equal to (d-1)/2, then F has Helly number at most h(b,d). These topological conditions are sharp: not controlling any of these first Betti numbers allow for families with unbounded Helly number. Our proofs combine homological non-embeddability results with a Ramsey-based approach to build, given an arbitrary simplicial complex K, some well-behaved chain map from C_*(K) to C_*(R^d). Both techniques are of independent interest.}, author = {Goaoc, Xavier and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli}, location = {Eindhoven, Netherlands}, pages = {507 -- 521}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Bounding Helly numbers via Betti numbers}}, doi = {10.4230/LIPIcs.SOCG.2015.507}, volume = {34}, year = {2015}, } @article{271, abstract = {We show that a non-singular integral form of degree d is soluble non-trivially over the integers if and only if it is soluble non-trivially over the reals and the p-adic numbers, provided that the form has at least (d-\sqrt{d}/2)2^d variables. This improves on a longstanding result of Birch.}, author = {Browning, Timothy D and Prendiville, Sean}, issn = {0075-4102}, journal = {Journal fur die Reine und Angewandte Mathematik}, number = {731}, pages = {203 -- 234}, publisher = {Walter de Gruyter}, title = {{Improvements in Birch's theorem on forms in many variables}}, doi = {10.1515/crelle-2014-0122}, volume = {2017}, year = {2015}, } @inproceedings{1675, abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto’92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs - so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model (with one additional mild assumption required for the proof to go through), using graphs with high “pebbling complexity” and Merkle hash-trees. We discuss some applications, including follow-up work where a decentralized digital currency scheme called Spacecoin is constructed that uses PoS (instead of wasteful PoW like in Bitcoin) to prevent double spending. The main technical contribution of this work is the construction of (directed, loop-free) graphs on N vertices with in-degree O(log logN) such that even if one places Θ(N) pebbles on the nodes of the graph, there’s a constant fraction of nodes that needs Θ(N) steps to be pebbled (where in every step one can put a pebble on a node if all its parents have a pebble).}, author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z}, booktitle = {35th Annual Cryptology Conference}, isbn = {9783662479995}, issn = {0302-9743}, location = {Santa Barbara, CA, United States}, pages = {585 -- 605}, publisher = {Springer}, title = {{Proofs of space}}, doi = {10.1007/978-3-662-48000-7_29}, volume = {9216}, year = {2015}, } @article{15160, abstract = {The circadian clock orchestrates global changes in transcriptional regulation on a daily basis via the bHLH-PAS transcription factor CLOCK:BMAL1. Pathways driven by other bHLH-PAS transcription factors have a homologous repressor that modulates activity on a tissue-specific basis, but none have been identified for CLOCK:BMAL1. We show here that the cancer/testis antigen PASD1 fulfills this role to suppress circadian rhythms. PASD1 is evolutionarily related to CLOCK and interacts with the CLOCK:BMAL1 complex to repress transcriptional activation. Expression of PASD1 is restricted to germline tissues in healthy individuals but can be induced in cells of somatic origin upon oncogenic transformation. Reducing PASD1 in human cancer cells significantly increases the amplitude of transcriptional oscillations to generate more robust circadian rhythms. Our results describe a function for a germline-specific protein in regulation of the circadian clock and provide a molecular link from oncogenic transformation to suppression of circadian rhythms.}, author = {Michael, Alicia Kathleen and Harvey, Stacy L. and Sammons, Patrick J. and Anderson, Amanda P. and Kopalle, Hema M. and Banham, Alison H. and Partch, Carrie L.}, issn = {1097-2765}, journal = {Molecular Cell}, keywords = {Cell Biology, Molecular Biology}, number = {5}, pages = {743--754}, publisher = {Elsevier}, title = {{Cancer/Testis antigen PASD1 silences the circadian clock}}, doi = {10.1016/j.molcel.2015.03.031}, volume = {58}, year = {2015}, } @article{15159, abstract = {It is widely recognized that BMAL1 is an essential subunit of the primary transcription factor that drives rhythmic circadian transcription in the nucleus. In a surprising turn, Lipton et al. now show that BMAL1 rhythmically interacts with translational machinery in the cytosol to stimulate protein synthesis in response to mTOR signaling.}, author = {Michael, Alicia Kathleen and Asimgil, Hande and Partch, Carrie L.}, issn = {0968-0004}, journal = {Trends in Biochemical Sciences}, keywords = {Molecular Biology, Biochemistry}, number = {9}, pages = {489--490}, publisher = {Elsevier}, title = {{Cytosolic BMAL1 moonlights as a translation factor}}, doi = {10.1016/j.tibs.2015.07.006}, volume = {40}, year = {2015}, } @article{1619, abstract = {The emergence of drug resistant pathogens is a serious public health problem. It is a long-standing goal to predict rates of resistance evolution and design optimal treatment strategies accordingly. To this end, it is crucial to reveal the underlying causes of drug-specific differences in the evolutionary dynamics leading to resistance. However, it remains largely unknown why the rates of resistance evolution via spontaneous mutations and the diversity of mutational paths vary substantially between drugs. Here we comprehensively quantify the distribution of fitness effects (DFE) of mutations, a key determinant of evolutionary dynamics, in the presence of eight antibiotics representing the main modes of action. Using precise high-throughput fitness measurements for genome-wide Escherichia coli gene deletion strains, we find that the width of the DFE varies dramatically between antibiotics and, contrary to conventional wisdom, for some drugs the DFE width is lower than in the absence of stress. We show that this previously underappreciated divergence in DFE width among antibiotics is largely caused by their distinct drug-specific dose-response characteristics. Unlike the DFE, the magnitude of the changes in tolerated drug concentration resulting from genome-wide mutations is similar for most drugs but exceptionally small for the antibiotic nitrofurantoin, i.e., mutations generally have considerably smaller resistance effects for nitrofurantoin than for other drugs. A population genetics model predicts that resistance evolution for drugs with this property is severely limited and confined to reproducible mutational paths. We tested this prediction in laboratory evolution experiments using the “morbidostat”, a device for evolving bacteria in well-controlled drug environments. Nitrofurantoin resistance indeed evolved extremely slowly via reproducible mutations—an almost paradoxical behavior since this drug causes DNA damage and increases the mutation rate. Overall, we identified novel quantitative characteristics of the evolutionary landscape that provide the conceptual foundation for predicting the dynamics of drug resistance evolution.}, author = {Chevereau, Guillaume and Dravecka, Marta and Batur, Tugce and Guvenek, Aysegul and Ayhan, Dilay and Toprak, Erdal and Bollenbach, Mark Tobias}, journal = {PLoS Biology}, number = {11}, publisher = {Public Library of Science}, title = {{Quantifying the determinants of evolutionary dynamics leading to drug resistance}}, doi = {10.1371/journal.pbio.1002299}, volume = {13}, year = {2015}, } @article{10382, abstract = {Protein oligomers have been implicated as toxic agents in a wide range of amyloid-related diseases. However, it has remained unsolved whether the oligomers are a necessary step in the formation of amyloid fibrils or just a dangerous byproduct. Analogously, it has not been resolved if the amyloid nucleation process is a classical one-step nucleation process or a two-step process involving prenucleation clusters. We use coarse-grained computer simulations to study the effect of nonspecific attractions between peptides on the primary nucleation process underlying amyloid fibrillization. We find that, for peptides that do not attract, the classical one-step nucleation mechanism is possible but only at nonphysiologically high peptide concentrations. At low peptide concentrations, which mimic the physiologically relevant regime, attractive interpeptide interactions are essential for fibril formation. Nucleation then inevitably takes place through a two-step mechanism involving prefibrillar oligomers. We show that oligomers not only help peptides meet each other but also, create an environment that facilitates the conversion of monomers into the β-sheet–rich form characteristic of fibrils. Nucleation typically does not proceed through the most prevalent oligomers but through an oligomer size that is only observed in rare fluctuations, which is why such aggregates might be hard to capture experimentally. Finally, we find that the nucleation of amyloid fibrils cannot be described by classical nucleation theory: in the two-step mechanism, the critical nucleus size increases with increases in both concentration and interpeptide interactions, which is in direct contrast with predictions from classical nucleation theory.}, author = {Šarić, Anđela and Chebaro, Yassmine C. and Knowles, Tuomas P. J. and Frenkel, Daan}, issn = {1091-6490}, journal = {Proceedings of the National Academy of Sciences}, keywords = {multidisciplinary}, number = {50}, pages = {17869--17874}, publisher = {National Academy of Sciences}, title = {{Crucial role of nonspecific interactions in amyloid nucleation}}, doi = {10.1073/pnas.1410159111}, volume = {111}, year = {2014}, } @article{10383, abstract = {We use numerical simulations to compute the equation of state of a suspension of spherical self-propelled nanoparticles in two and three dimensions. We study in detail the effect of excluded volume interactions and confinement as a function of the system's temperature, concentration, and strength of the propulsion. We find a striking nonmonotonic dependence of the pressure on the temperature and provide simple scaling arguments to predict and explain the occurrence of such anomalous behavior. We explicitly show how our results have important implications for the effective forces on passive components suspended in a bath of active particles.}, author = {Mallory, S. A. and Šarić, Anđela and Valeriani, C. and Cacciuto, A.}, issn = {1550-2376}, journal = {Physical Review E}, number = {5}, publisher = {American Physical Society}, title = {{Anomalous thermomechanical properties of a self-propelled colloidal fluid}}, doi = {10.1103/physreve.89.052303}, volume = {89}, year = {2014}, } @article{1058, abstract = {Diffraction-unlimited far-field super-resolution fluorescence (nanoscopy) methods typically rely on transiently transferring fluorophores between two states, whereby this transfer is usually laid out as a switch. However, depending on whether this is induced in a spatially controlled manner using a pattern of light (coordinate-targeted) or stochastically on a single-molecule basis, specific requirements on the fluorophores are imposed. Therefore, the fluorophores are usually utilized just for one class of methods only. In this study we demonstrate that the reversibly switchable fluorescent protein Dreiklang enables live-cell recordings in both spatially controlled and stochastic modes. We show that the Dreiklang chromophore entails three different light-induced switching mechanisms, namely a reversible photochemical one, off-switching by stimulated emission, and a reversible transfer to a long-lived dark state from the S1 state, all of which can be utilized to overcome the diffraction barrier. We also find that for the single-molecule- based stochastic GSDIM approach (ground-state depletion followed by individual molecule return), Dreiklang provides a larger number of on-off localization events as compared to its progenitor Citrine. Altogether, Dreiklang is a versatile probe for essentially all popular forms of live-cell fluorescence nanoscopy.}, author = {Jensen, Nickels and Danzl, Johann G and Willig, Katrin and Lavoie Cardinal, Flavie and Brakemann, Tanja and Hell, Stefan and Jakobs, Stefan}, journal = {ChemPhysChem}, number = {4}, pages = {756 -- 762}, publisher = {Wiley-Blackwell}, title = {{Coordinate-targeted and coordinate-stochastic super-resolution microscopy with the reversibly switchable fluorescent protein dreiklang}}, doi = {10.1002/cphc.201301034}, volume = {15}, year = {2014}, } @article{10815, abstract = {In the last several decades, developmental biology has clarified the molecular mechanisms of embryogenesis and organogenesis. In particular, it has demonstrated that the “tool-kit genes” essential for regulating developmental processes are not only highly conserved among species, but are also used as systems at various times and places in an organism to control distinct developmental events. Therefore, mutations in many of these tool-kit genes may cause congenital diseases involving morphological abnormalities. This link between genes and abnormal morphological phenotypes underscores the importance of understanding how cells behave and contribute to morphogenesis as a result of gene function. Recent improvements in live imaging and in quantitative analyses of cellular dynamics will advance our understanding of the cellular pathogenesis of congenital diseases associated with aberrant morphologies. In these studies, it is critical to select an appropriate model organism for the particular phenomenon of interest.}, author = {Hashimoto, Masakazu and Morita, Hitoshi and Ueno, Naoto}, issn = {0914-3505}, journal = {Congenital Anomalies}, keywords = {Developmental Biology, Embryology, General Medicine, Pediatrics, Perinatology, and Child Health}, number = {1}, pages = {1--7}, publisher = {Wiley}, title = {{Molecular and cellular mechanisms of development underlying congenital diseases}}, doi = {10.1111/cga.12039}, volume = {54}, year = {2014}, } @book{10811, abstract = {Auxin is an important signaling compound in plants and vital for plant development and growth. The present book, Auxin and its Role in Plant Development, provides the reader with detailed and comprehensive insight into the functioning of the molecule on the whole and specifically in plant development. In the first part, the functioning, metabolism and signaling pathways of auxin in plants are explained, the second part depicts the specific role of auxin in plant development and the third part describes the interaction and functioning of the signaling compound upon stimuli of the environment. Each chapter is written by international experts in the respective field and designed for scientists and researchers in plant biology, plant development and cell biology to summarize the recent progress in understanding the role of auxin and suggest future perspectives for auxin research.}, editor = {Zažímalová, Eva and Petrášek, Jan and Benková, Eva}, isbn = {9783709115251}, pages = {444}, publisher = {Springer Nature}, title = {{Auxin and Its Role in Plant Development}}, doi = {10.1007/978-3-7091-1526-8}, year = {2014}, } @inproceedings{10884, abstract = {We revisit the parameterized model checking problem for token-passing systems and specifications in indexed CTL  ∗ \X. Emerson and Namjoshi (1995, 2003) have shown that parameterized model checking of indexed CTL  ∗ \X in uni-directional token rings can be reduced to checking rings up to some cutoff size. Clarke et al. (2004) have shown a similar result for general topologies and indexed LTL \X, provided processes cannot choose the directions for sending or receiving the token. We unify and substantially extend these results by systematically exploring fragments of indexed CTL  ∗ \X with respect to general topologies. For each fragment we establish whether a cutoff exists, and for some concrete topologies, such as rings, cliques and stars, we infer small cutoffs. Finally, we show that the problem becomes undecidable, and thus no cutoffs exist, if processes are allowed to choose the directions in which they send or from which they receive the token.}, author = {Aminof, Benjamin and Jacobs, Swen and Khalimov, Ayrat and Rubin, Sasha}, booktitle = {Verification, Model Checking, and Abstract Interpretation}, isbn = {9783642540127}, issn = {1611-3349}, location = {San Diego, CA, United States}, pages = {262--281}, publisher = {Springer Nature}, title = {{Parameterized model checking of token-passing systems}}, doi = {10.1007/978-3-642-54013-4_15}, volume = {8318}, year = {2014}, } @inbook{10893, abstract = {Saddle periodic orbits are an essential and stable part of the topological skeleton of a 3D vector field. Nevertheless, there is currently no efficient algorithm to robustly extract these features. In this chapter, we present a novel technique to extract saddle periodic orbits. Exploiting the analytic properties of such an orbit, we propose a scalar measure based on the finite-time Lyapunov exponent (FTLE) that indicates its presence. Using persistent homology, we can then extract the robust cycles of this field. These cycles thereby represent the saddle periodic orbits of the given vector field. We discuss the different existing FTLE approximation schemes regarding their applicability to this specific problem and propose an adapted version of FTLE called Normalized Velocity Separation. Finally, we evaluate our method using simple analytic vector field data.}, author = {Kasten, Jens and Reininghaus, Jan and Reich, Wieland and Scheuermann, Gerik}, booktitle = {Topological Methods in Data Analysis and Visualization III }, editor = {Bremer, Peer-Timo and Hotz, Ingrid and Pascucci, Valerio and Peikert, Ronald}, isbn = {9783319040981}, issn = {2197-666X}, pages = {55--69}, publisher = {Springer}, title = {{Toward the extraction of saddle periodic orbits}}, doi = {10.1007/978-3-319-04099-8_4}, volume = {1}, year = {2014}, } @article{11080, abstract = {The spindle assembly checkpoint prevents separation of sister chromatids until each kinetochore is attached to the mitotic spindle. Rodriguez-Bravo et al. report that the nuclear pore complex scaffolds spindle assembly checkpoint signaling in interphase, providing a store of inhibitory signals that limits the speed of the subsequent mitosis.}, author = {Buchwalter, Abigail and HETZER, Martin W}, issn = {0092-8674}, journal = {Cell}, keywords = {General Biochemistry, Genetics and Molecular Biology}, number = {5}, pages = {868--869}, publisher = {Elsevier}, title = {{Nuclear pores set the speed limit for mitosis}}, doi = {10.1016/j.cell.2014.02.004}, volume = {156}, year = {2014}, } @article{11082, abstract = {The nuclear pore complex (NPC) plays a critical role in gene expression by mediating import of transcription regulators into the nucleus and export of RNA transcripts to the cytoplasm. Emerging evidence suggests that in addition to mediating transport, a subset of nucleoporins (Nups) engage in transcriptional activation and elongation at genomic loci that are not associated with NPCs. The underlying mechanism and regulation of Nup mobility on and off nuclear pores remain unclear. Here we show that Nup50 is a mobile Nup with a pronounced presence both at the NPC and in the nucleoplasm that can move between these different localizations. Strikingly, the dynamic behavior of Nup50 in both locations is dependent on active transcription by RNA polymerase II and requires the N-terminal half of the protein, which contains importin α– and Nup153-binding domains. However, Nup50 dynamics are independent of importin α, Nup153, and Nup98, even though the latter two proteins also exhibit transcription-dependent mobility. Of interest, depletion of Nup50 from C2C12 myoblasts does not affect cell proliferation but inhibits differentiation into myotubes. Taken together, our results suggest a transport-independent role for Nup50 in chromatin biology that occurs away from the NPC.}, author = {Buchwalter, Abigail L. and Liang, Yun and HETZER, Martin W}, issn = {1059-1524}, journal = {Molecular Biology of the Cell}, keywords = {Cell Biology, Molecular Biology}, number = {16}, pages = {2472--2484}, publisher = {American Society for Cell Biology}, title = {{Nup50 is required for cell differentiation and exhibits transcription-dependent dynamics}}, doi = {10.1091/mbc.e14-04-0865}, volume = {25}, year = {2014}, } @article{11081, abstract = {In eukaryotic cells the nuclear genome is enclosed by the nuclear envelope (NE). In metazoans, the NE breaks down in mitosis and it has been assumed that the physical barrier separating nucleoplasm and cytoplasm remains intact during the rest of the cell cycle and cell differentiation. However, recent studies suggest that nonmitotic NE remodeling plays a critical role in development, virus infection, laminopathies, and cancer. Although the mechanisms underlying these NE restructuring events are currently being defined, one common theme is activation of protein kinase C family members in the interphase nucleus to disrupt the nuclear lamina, demonstrating the importance of the lamina in maintaining nuclear integrity.}, author = {Hatch, Emily and HETZER, Martin W}, issn = {1540-8140}, journal = {Journal of Cell Biology}, keywords = {Cell Biology}, number = {2}, pages = {133--141}, publisher = {Rockefeller University Press}, title = {{Breaching the nuclear envelope in development and disease}}, doi = {10.1083/jcb.201402003}, volume = {205}, year = {2014}, } @article{11583, abstract = {Candidate galaxies at redshifts of z ∼ 10 are now being found in extremely deep surveys, probing very small areas. As a consequence, candidates are very faint, making spectroscopic confirmation practically impossible. In order to overcome such limitations, we have undertaken the CF-HiZELS survey, which is a large-area, medium-depth near-infrared narrow-band survey targeted at z = 8.8 Lyman α (Lyα) emitters (LAEs) and covering 10 deg2 in part of the SSA22 field with the Canada–France–Hawaii Telescope (CFHT). We surveyed a comoving volume of 4.7 × 106 Mpc3 to a Lyα luminosity limit of 6.3 × 1043舁erg舁s−1. We look for Lyα candidates by applying the following criteria: (i) clear emission-line source, (ii) no optical detections (ugriz from CFHTLS), (iii) no visible detection in the optical stack (ugriz > 27), (iv) visually checked reliable NBJ and J detections and (v) J − K ≤ 0. We compute photometric redshifts and remove a significant amount of dusty lower redshift line-emitters at z ∼ 1.4 or 2.2. A total of 13 Lyα candidates were found, of which two are marked as strong candidates, but the majority have very weak constraints on their spectral energy distributions. Using follow-up observations with SINFONI/VLT, we are able to exclude the most robust candidates as LAEs. We put a strong constraint on the Lyα luminosity function at z ∼ 9 and make realistic predictions for ongoing and future surveys. Our results show that surveys for the highest redshift LAEs are susceptible of multiple contaminations and that spectroscopic follow-up is absolutely necessary.}, author = {Matthee, Jorryt J and Sobral, David and Swinbank, A. M. and Smail, Ian and Best, P. N. and Kim, Jae-Woo and Franx, Marijn and Milvang-Jensen, Bo and Fynbo, Johan}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, cosmology: observations, dark ages, reionization, first stars}, number = {3}, pages = {2375--2387}, publisher = {Oxford University Press}, title = {{A 10 deg2 Lyman α survey at z=8.8 with spectroscopic follow-up: Strong constraints on the luminosity function and implications for other surveys}}, doi = {10.1093/mnras/stu392}, volume = {440}, year = {2014}, } @article{11582, abstract = {We have observed a sample of typical z ∼ 1 star-forming galaxies, selected from the HiZELS survey, with the new K-band Multi-Object Spectrograph (KMOS) near-infrared, multi-integral field unit instrument on the Very Large Telescope (VLT), in order to obtain their dynamics and metallicity gradients. The majority of our galaxies have a metallicity gradient consistent with being flat or negative (i.e. higher metallicity cores than outskirts). Intriguingly, we find a trend between metallicity gradient and specific star formation rate (sSFR), such that galaxies with a high sSFR tend to have relatively metal poor centres, a result which is strengthened when combined with data sets from the literature. This result appears to explain the discrepancies reported between different high-redshift studies and varying claims for evolution. From a galaxy evolution perspective, the trend we see would mean that a galaxy's sSFR is governed by the amount of metal-poor gas that can be funnelled into its core, triggered either by merging or through efficient accretion. In fact, merging may play a significant role as it is the starburst galaxies at all epochs, which have the more positive metallicity gradients. Our results may help to explain the origin of the fundamental metallicity relation, in which galaxies at a fixed mass are observed to have lower metallicities at higher star formation rates, especially if the metallicity is measured in an aperture encompassing only the central regions of the galaxy. Finally, we note that this study demonstrates the power of KMOS as an efficient instrument for large-scale resolved galaxy surveys.}, author = {Stott, John P. and Sobral, David and Swinbank, A. M. and Smail, Ian and Bower, Richard and Best, Philip N. and Sharples, Ray M. and Geach, James E. and Matthee, Jorryt J}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: abundances, galaxies: evolution, galaxies: kinematics and dynamics}, number = {3}, pages = {2695--2704}, publisher = {Oxford University Press}, title = {{A relationship between specific star formation rate and metallicity gradient within z ∼ 1 galaxies from KMOS-HiZELS}}, doi = {10.1093/mnras/stu1343}, volume = {443}, year = {2014}, } @article{11750, abstract = {We report on the magnetic properties of a hot-pressed FeSb 2 sample. We find a significant increase in the magnetic susceptibility in our sample when compared with the values previously reported for the polycrystalline sample. The pronounced Curie tail at low temperature corresponds to 0.2% of Fe 2+ impurities per mole. In the intrinsic conductivity region, the susceptibility due to free carriers shows thermally activated behavior and is consistent with the data reported for single crystal FeSb 2 . Based on our data and analysis, while the enhanced magnetic susceptibility in our sample comes mainly from a small amount of unreacted Fe, the contribution from the enhanced carrier density due to lattice and strain defects arising from the ball milling process is also significant. Existence of an unreacted Fe phase is evidenced by small coercivity values of ~100 observed at 50 and 300 K.}, author = {Pokharel, Mani and Zhao, Huaizhou and Modic, Kimberly A and Ren, Zhifeng and Opeil, Cyril}, issn = {1941-0069}, journal = {IEEE Transactions on Magnetics}, number = {5}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Magnetic properties of hot-pressed FeSb2}}, doi = {10.1109/TMAG.2013.2292607}, volume = {50}, year = {2014}, } @inproceedings{11789, abstract = {We study a weighted online bipartite matching problem: G(V 1, V 2, E) is a weighted bipartite graph where V 1 is known beforehand and the vertices of V 2 arrive online. The goal is to match vertices of V 2 as they arrive to vertices in V 1, so as to maximize the sum of weights of edges in the matching. If assignments to V 1 cannot be changed, no bounded competitive ratio is achievable. We study the weighted online matching problem with free disposal, where vertices in V 1 can be assigned multiple times, but only get credit for the maximum weight edge assigned to them over the course of the algorithm. For this problem, the greedy algorithm is 0.5-competitive and determining whether a better competitive ratio is achievable is a well known open problem. We identify an interesting special case where the edge weights are decomposable as the product of two factors, one corresponding to each end point of the edge. This is analogous to the well studied related machines model in the scheduling literature, although the objective functions are different. For this case of decomposable edge weights, we design a 0.5664 competitive randomized algorithm in complete bipartite graphs. We show that such instances with decomposable weights are non-trivial by establishing upper bounds of 0.618 for deterministic and 0.8 for randomized algorithms. A tight competitive ratio of 1 − 1/e ≈ 0.632 was known previously for both the 0-1 case as well as the case where edge weights depend on the offline vertices only, but for these cases, reassignments cannot change the quality of the solution. Beating 0.5 for weighted matching where reassignments are necessary has been a significant challenge. We thus give the first online algorithm with competitive ratio strictly better than 0.5 for a non-trivial case of weighted matching with free disposal.}, author = {Charikar, Moses and Henzinger, Monika H and Nguyễn, Huy L.}, booktitle = {22nd Annual European Symposium on Algorithms}, isbn = {978-366244776-5}, issn = {0302-9743}, location = {Wroclaw, Poland}, pages = {260 -- 271}, publisher = {Springer Nature}, title = {{Online bipartite matching with decomposable weights}}, doi = {10.1007/978-3-662-44777-2_22}, volume = {8737}, year = {2014}, } @inproceedings{11790, abstract = {Assume a seller wants to sell a digital product in a social network where a buyer’s valuation of the item has positive network externalities from her neighbors that already have the item. The goal of the seller is to maximize his revenue. Previous work on this problem [7] studies the case where clients are offered the item in sequence and have to pay personalized prices. This is highly infeasible in large scale networks such as the Facebook graph: (1) Offering items to the clients one after the other consumes a large amount of time, and (2) price-discrimination of clients could appear unfair to them and result in negative client reaction or could conflict with legal requirements. We study a setting dealing with these issues. Specifically, the item is offered in parallel to multiple clients at the same time and at the same price. This is called a round. We show that with O(logn) rounds, where n is the number of clients, a constant factor of the revenue with price discrimination can be achieved and that this is not possible with o(logn) rounds. Moreover we show that it is APX-hard to maximize the revenue and we give constant factor approximation algorithms for various further settings of limited price discrimination.}, author = {Cigler, Luděk and Dvořák, Wolfgang and Henzinger, Monika H and Starnberger, Martin}, booktitle = {10th International Conference of Web and Internet Economics}, issn = {0302-9743}, location = {Beijing, China}, pages = {44 -- 57}, publisher = {Springer Nature}, title = {{Limiting price discrimination when selling products with positive network externalities}}, doi = {10.1007/978-3-319-13129-0_4}, volume = {8877}, year = {2014}, } @article{118, abstract = {While the penetration of objects into granular media is well-studied, there is little understanding of how objects settle in gravities, geff, different from that of Earth - a scenario potentially relevant to the geomorphology of planets and asteroids and also to their exploration using man-made devices. By conducting experiments in an accelerating frame, we explore geff ranging from 0.4 g to 1.2 g. Surprisingly, we find that the rest depth is independent of geff and also that the time required for the object to come to rest scales like geff-1/2. With discrete element modeling simulations, we reproduce the experimental results and extend the range of geff to objects as small as asteroids and as large as Jupiter. Our results shed light on the initial stage of sedimentation into dry granular media across a range of celestial bodies and also have implications for the design of man-made, extraterrestrial vehicles and structures. Key Points The settling depth in granular media is independent of gravity The settling time scales like g-1/2 Layering driven by granular sedimentation should be similar.}, author = {Altshuler, Ernesto and Torres, H and González_Pita, A and Sánchez, Colina G and Pérez Penichet, Carlos and Waitukaitis, Scott R and Hidalgo, Rauól}, journal = {Geophysical Research Letters}, number = {9}, pages = {3032 -- 3037}, publisher = {Wiley-Blackwell}, title = {{Settling into dry granular media in different gravities}}, doi = {10.1002/2014GL059229}, volume = {41}, year = {2014}, } @inproceedings{11855, abstract = {The decremental single-source shortest paths (SSSP) problem concerns maintaining the distances between a given source node s to every node in an n-node m-edge graph G undergoing edge deletions. While its static counterpart can be easily solved in near-linear time, this decremental problem is much more challenging even in the undirected unweighted case. In this case, the classic O(mn) total update time of Even and Shiloach (JACM 1981) has been the fastest known algorithm for three decades. With the loss of a (1 + ε)-approximation factor, the running time was recently improved to O(n 2+o(1) ) by Bernstein and Roditty (SODA 2011), and more recently to O(n 1.8+o(1) + m 1+o(1) ) by Henzinger, Krinninger, and Nanongkai (SODA 2014). In this paper, we finally bring the running time of this case down to near-linear: We give a (1 + ε)-approximation algorithm with O(m 1+o(1) ) total update time, thus obtaining near-linear time. Moreover, we obtain O(m 1+o(1) log W) time for the weighted case, where the edge weights are integers from 1 to W. The only prior work on weighted graphs in o(mn log W) time is the O(mn 0.986 log W)-time algorithm by Henzinger, Krinninger, and Nanongkai (STOC 2014) which works for the general weighted directed case. In contrast to the previous results which rely on maintaining a sparse emulator, our algorithm relies on maintaining a so-called sparse (d, ε)-hop set introduced by Cohen (JACM 2000) in the PRAM literature. A (d, ε)-hop set of a graph G = (V, E) is a set E' of weighted edges such that the distance between any pair of nodes in G can be (1 + ε)-approximated by their d-hop distance (given by a path containing at most d edges) on G'=(V, E∪E'). Our algorithm can maintain an (n o(1) , ε)-hop set of near-linear size in near-linear time under edge deletions. It is the first of its kind to the best of our knowledge. To maintain the distances on this hop set, we develop a monotone bounded-hop Even-Shiloach tree. It results from extending and combining the monotone Even-Shiloach tree of Henzinger, Krinninger, and Nanongkai (FOCS 2013) with the bounded-hop SSSP technique of Bernstein (STOC 2013). These two new tools might be of independent interest.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {55th Annual Symposium on Foundations of Computer Science}, issn = {0272-5428}, location = {Philadelphia, PA, United States}, pages = {146--155}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Decremental single-source shortest paths on undirected graphs in near-linear total update time}}, doi = {10.1109/focs.2014.24}, year = {2014}, } @inproceedings{11870, abstract = {We consider dynamic algorithms for maintaining Single-Source Reachability (SSR) and approximate Single-Source Shortest Paths (SSSP) on n-node m-edge directed graphs under edge deletions (decremental algorithms). The previous fastest algorithm for SSR and SSSP goes back three decades to Even and Shiloach (JACM 1981); it has O(1) query time and O(mn) total update time (i.e., linear amortized update time if all edges are deleted). This algorithm serves as a building block for several other dynamic algorithms. The question whether its total update time can be improved is a major, long standing, open problem. In this paper, we answer this question affirmatively. We obtain a randomized algorithm which, in a simplified form, achieves an Õ(mn0.984) expected total update time for SSR and (1 + ε)-approximate SSSP, where Õ(·) hides poly log n. We also extend our algorithm to achieve roughly the same running time for Strongly Connected Components (SCC), improving the algorithm of Roditty and Zwick (FOCS 2002), and an algorithm that improves the Õ (mn log W)-time algorithm of Bernstein (STOC 2013) for approximating SSSP on weighted directed graphs, where the edge weights are integers from 1 to W. All our algorithms have constant query time in the worst case.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {46th Annual ACM Symposium on Theory of Computing}, isbn = {978-145032710-7}, issn = {0737-8017}, location = {New York, NY, United States}, publisher = {Association for Computing Machinery}, title = {{Sublinear-time decremental algorithms for single-source reachability and shortest paths on directed graphs}}, doi = {10.1145/2591796.2591869}, year = {2014}, } @inproceedings{11876, abstract = {We study dynamic (1 + ∊)-approximation algorithms for the single-source shortest paths problem in an unweighted undirected n-node m-edge graph under edge deletions. The fastest algorithm for this problem is an algorithm with O(n2+o(1)) total update time and constant query time by Bernstein and Roditty (SODA 2011). In this paper, we improve the total update time to O(n1.8+o(1) + m1+o(1)) while keeping the query time constant. This running time is essentially tight when m = Ω(n1.8) since we need Ω(m) time even in the static setting. For smaller values of m, the running time of our algorithm is subquadratic, and is the first that breaks through the quadratic time barrier. In obtaining this result, we develop a fast algorithm for what we call center cover data structure. We also make non-trivial extensions to our previous techniques called lazy-update and monotone Even-Shiloach trees (ICALP 2013 and FOCS 2013). As by-products of our new techniques, we obtain two new results for the decremental all-pairs shortest-paths problem. Our first result is the first approximation algorithm whose total update time is faster than Õ(mn) for all values of m. Our second result is a new trade-off between the total update time and the additive approximation guarantee.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {25th Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {978-1-61197-338-9}, location = {Portland, OR, United States}, pages = {1053--1072}, publisher = {Society for Industrial and Applied Mathematics}, title = {{A subquadratic-time algorithm for decremental single-source shortest paths}}, doi = {10.1137/1.9781611973402.79}, year = {2014}, } @inproceedings{11875, abstract = {We present the first deterministic data structures for maintaining approximate minimum vertex cover and maximum matching in a fully dynamic graph in time per update. In particular, for minimum vertex cover we provide deterministic data structures for maintaining a (2 + ε) approximation in O(log n/ε2) amortized time per update. For maximum matching, we show how to maintain a (3 + e) approximation in O(m1/3/ε2) amortized time per update, and a (4 + ε) approximation in O(m1/3/ε2) worst-case time per update. Our data structure for fully dynamic minimum vertex cover is essentially near-optimal and settles an open problem by Onak and Rubinfeld [13].}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.}, booktitle = {26th Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {978-1-61197-374-7}, location = {San Diego, CA, United States}, pages = {785--804}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Deterministic fully dynamic data structures for vertex cover and matching}}, doi = {10.1137/1.9781611973730.54}, year = {2014}, } @article{119, abstract = {Observations of flowing granular matter have suggested that same-material tribocharging depends on particle size, typically rendering large grains positive and small ones negative. Models assuming the transfer of trapped electrons can account for this trend, but have not been validated. Tracking individual grains in an electric field, we show quantitatively that charge is transferred based on size between materially identical grains. However, the surface density of trapped electrons, measured independently by thermoluminescence techniques, is orders of magnitude too small to account for the scale of charge transferred. This reveals that trapped electrons are not a necessary ingredient for same-material tribocharging.}, author = {Waitukaitis, Scott R and Lee, Victor and Pierson, James and Forman, Steven and Jaeger, Heinrich}, journal = {APS Physics, Physical Review Letters}, number = {21}, publisher = {American Physical Society}, title = {{Size-dependent same-material tribocharging in insulating grains}}, doi = {10.1103/PhysRevLett.112.218001}, volume = {112}, year = {2014}, } @article{11968, abstract = {Membrane phospholipids typically contain fatty acids (FAs) of 16 and 18 carbon atoms. This particular chain length is evolutionarily highly conserved and presumably provides maximum stability and dynamic properties to biological membranes in response to nutritional or environmental cues. Here, we show that the relative proportion of C16 versus C18 FAs is regulated by the activity of acetyl-CoA carboxylase (Acc1), the first and rate-limiting enzyme of FA de novo synthesis. Acc1 activity is attenuated by AMPK/Snf1-dependent phosphorylation, which is required to maintain an appropriate acyl-chain length distribution. Moreover, we find that the transcriptional repressor Opi1 preferentially binds to C16 over C18 phosphatidic acid (PA) species: thus, C16-chain containing PA sequesters Opi1 more effectively to the ER, enabling AMPK/Snf1 control of PA acyl-chain length to determine the degree of derepression of Opi1 target genes. These findings reveal an unexpected regulatory link between the major energy-sensing kinase, membrane lipid composition, and transcription.}, author = {Hofbauer, Harald F. and Schopf, Florian H. and Schleifer, Hannes and Knittelfelder, Oskar L. and Pieber, Bartholomäus and Rechberger, Gerald N. and Wolinski, Heimo and Gaspar, Maria L. and Kappe, C. Oliver and Stadlmann, Johannes and Mechtler, Karl and Zenz, Alexandra and Lohner, Karl and Tehlivets, Oksana and Henry, Susan A. and Kohlwein, Sepp D.}, issn = {1878-1551}, journal = {Developmental Cell}, number = {6}, pages = {P729--739}, publisher = {Elsevier}, title = {{Regulation of gene expression through a transcriptional repressor that senses acyl-chain length in membrane phospholipids}}, doi = {10.1016/j.devcel.2014.04.025}, volume = {29}, year = {2014}, } @article{11967, abstract = {An experimentally easy to perform method for the generation of alumina-supported Fe3O4 nanoparticles [(6±1) nm size, 0.67 wt %]and the use of this material in hydrazine-mediated heterogeneously catalyzed reductions of nitroarenes to anilines under batch and continuous-flow conditions is presented. The bench-stable, reusable nano-Fe3O4@Al2O3 catalyst can selectively reduce functionalized nitroarenes at 1 mol % catalyst loading by using a 20 mol % excess of hydrazine hydrate in an elevated temperature regime (150 °C, reaction time 2–6 min in batch). For continuous-flow processing, the catalyst material is packed into dedicated cartridges and used in a commercially available high-temperature/-pressure flow device. In continuous mode, reaction times can be reduced to less than 1 min at 150 °C (30 bar back pressure) in a highly intensified process. The nano-Fe3O4@Al2O3 catalyst demonstrated stable reduction of nitrobenzene (0.5 M in MeOH) for more than 10 h on stream at a productivity of 30 mmol h−1 (0.72 mol per day). Importantly, virtually no leaching of the catalytically active material could be observed by inductively coupled plasma MS monitoring.}, author = {Moghaddam, Mojtaba Mirhosseini and Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. Oliver}, issn = {1864-564X}, journal = {ChemSusChem}, number = {11}, pages = {3122--3131}, publisher = {Wiley}, title = {{Immobilized iron oxide nanoparticles as stable and reusable catalysts for hydrazine-mediated nitro reductions in continuous flow}}, doi = {10.1002/cssc.201402455}, volume = {7}, year = {2014}, } @article{11987, abstract = {A method for the direct lithiation of terminal alkynes and heterocycles with subsequent carboxylation in a continuous flow format was developed. This method provides carboxylic acids at ambient conditions within less than five seconds with only little excess of the organometallic base and CO2.}, author = {Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. O.}, issn = {2046-2069}, journal = {RSC Advances}, number = {26}, publisher = {Royal Society of Chemistry}, title = {{Flash carboxylation: Fast lithiation–carboxylation sequence at room temperature in continuous flow}}, doi = {10.1039/c4ra01442a}, volume = {4}, year = {2014}, } @article{1309, abstract = {We show that weak solutions of the Derrida-Lebowitz-Speer-Spohn (DLSS) equation display infinite speed of support propagation. We apply our method to the case of the quantum drift-diffusion equation which augments the DLSS equation with a drift term and possibly a second-order diffusion term. The proof is accomplished using weighted entropy estimates, Hardy's inequality and a family of singular weight functions to derive a differential inequality; the differential inequality shows exponential growth of the weighted entropy, with the growth constant blowing up very fast as the singularity of the weight becomes sharper. To the best of our knowledge, this is the first example of a nonnegativity-preserving higher-order parabolic equation displaying infinite speed of support propagation.}, author = {Julian Fischer}, journal = {Nonlinear Differential Equations and Applications}, number = {1}, pages = {27 -- 50}, publisher = {Birkhäuser}, title = {{Infinite speed of support propagation for the Derrida-Lebowitz-Speer-Spohn equation and quantum drift-diffusion models}}, doi = {10.1007/s00030-013-0235-0}, volume = {21}, year = {2014}, } @article{1312, abstract = {We derive upper bounds on the waiting time of solutions to the thin-film equation in the regime of weak slippage n ∈ [2, 32\11). In particular, we give sufficient conditions on the initial data for instantaneous forward motion of the free boundary. For n ∈ (2, 32\11), our estimates are sharp, for n = 2, they are sharp up to a logarithmic correction term. Note that the case n = 2 corresponds-with a grain of salt-to the assumption of the Navier slip condition at the fluid-solid interface. We also obtain results in the regime of strong slippage n ∈ (1,2); however, in this regime we expect them not to be optimal. Our method is based on weighted backward entropy estimates, Hardy's inequality and singular weight functions; we deduce a differential inequality which would enforce blowup of the weighted entropy if the contact line were to remain stationary for too long.}, author = {Julian Fischer}, journal = {Archive for Rational Mechanics and Analysis}, number = {3}, pages = {771 -- 818}, publisher = {Springer}, title = {{Upper bounds on waiting times for the Thin-film equation: The case of weak slippage}}, doi = {10.1007/s00205-013-0690-0}, volume = {211}, year = {2014}, } @article{1375, abstract = {We consider directed graphs where each edge is labeled with an integer weight and study the fundamental algorithmic question of computing the value of a cycle with minimum mean weight. Our contributions are twofold: (1) First we show that the algorithmic question is reducible to the problem of a logarithmic number of min-plus matrix multiplications of n×n-matrices, where n is the number of vertices of the graph. (2) Second, when the weights are nonnegative, we present the first (1+ε)-approximation algorithm for the problem and the running time of our algorithm is Õ(nωlog3(nW/ε)/ε),1 where O(nω) is the time required for the classic n×n-matrix multiplication and W is the maximum value of the weights. With an additional O(log(nW/ε)) factor in space a cycle with approximately optimal weight can be computed within the same time bound.}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Krinninger, Sebastian and Loitzenbauer, Veronika and Raskin, Michael}, journal = {Theoretical Computer Science}, number = {C}, pages = {104 -- 116}, publisher = {Elsevier}, title = {{Approximating the minimum cycle mean}}, doi = {10.1016/j.tcs.2014.06.031}, volume = {547}, year = {2014}, }