@article{1677, abstract = {We consider real symmetric and complex Hermitian random matrices with the additional symmetry hxy = hN-y,N-x. The matrix elements are independent (up to the fourfold symmetry) and not necessarily identically distributed. This ensemble naturally arises as the Fourier transform of a Gaussian orthogonal ensemble. Italso occurs as the flip matrix model - an approximation of the two-dimensional Anderson model at small disorder. We show that the density of states converges to the Wigner semicircle law despite the new symmetry type. We also prove the local version of the semicircle law on the optimal scale.}, author = {Alt, Johannes}, journal = {Journal of Mathematical Physics}, number = {10}, publisher = {American Institute of Physics}, title = {{The local semicircle law for random matrices with a fourfold symmetry}}, doi = {10.1063/1.4932606}, volume = {56}, year = {2015}, } @article{1678, abstract = {High-throughput live-cell screens are intricate elements of systems biology studies and drug discovery pipelines. Here, we demonstrate an optogenetics-assisted method that avoids the need for chemical activators and reporters, reduces the number of operational steps and increases information content in a cell-based small-molecule screen against human protein kinases, including an orphan receptor tyrosine kinase. This blueprint for all-optical screening can be adapted to many drug targets and cellular processes.}, author = {Inglés Prieto, Álvaro and Gschaider-Reichhart, Eva and Muellner, Markus and Nowak, Matthias and Nijman, Sebastian and Grusch, Michael and Janovjak, Harald L}, journal = {Nature Chemical Biology}, number = {12}, pages = {952 -- 954}, publisher = {Nature Publishing Group}, title = {{Light-assisted small-molecule screening against protein kinases}}, doi = {10.1038/nchembio.1933}, volume = {11}, year = {2015}, } @article{1576, abstract = {Gene expression is controlled primarily by interactions between transcription factor proteins (TFs) and the regulatory DNA sequence, a process that can be captured well by thermodynamic models of regulation. These models, however, neglect regulatory crosstalk: the possibility that noncognate TFs could initiate transcription, with potentially disastrous effects for the cell. Here, we estimate the importance of crosstalk, suggest that its avoidance strongly constrains equilibrium models of TF binding, and propose an alternative nonequilibrium scheme that implements kinetic proofreading to suppress erroneous initiation. This proposal is consistent with the observed covalent modifications of the transcriptional apparatus and predicts increased noise in gene expression as a trade-off for improved specificity. Using information theory, we quantify this trade-off to find when optimal proofreading architectures are favored over their equilibrium counterparts. Such architectures exhibit significant super-Poisson noise at low expression in steady state.}, author = {Cepeda Humerez, Sarah A and Rieckh, Georg and Tkacik, Gasper}, journal = {Physical Review Letters}, number = {24}, publisher = {American Physical Society}, title = {{Stochastic proofreading mechanism alleviates crosstalk in transcriptional regulation}}, doi = {10.1103/PhysRevLett.115.248101}, volume = {115}, year = {2015}, } @unpublished{8183, abstract = {We study conditions under which a finite simplicial complex $K$ can be mapped to $\mathbb R^d$ without higher-multiplicity intersections. An almost $r$-embedding is a map $f: K\to \mathbb R^d$ such that the images of any $r$ pairwise disjoint simplices of $K$ do not have a common point. We show that if $r$ is not a prime power and $d\geq 2r+1$, then there is a counterexample to the topological Tverberg conjecture, i.e., there is an almost $r$-embedding of the $(d+1)(r-1)$-simplex in $\mathbb R^d$. This improves on previous constructions of counterexamples (for $d\geq 3r$) based on a series of papers by M. \"Ozaydin, M. Gromov, P. Blagojevi\'c, F. Frick, G. Ziegler, and the second and fourth present authors. The counterexamples are obtained by proving the following algebraic criterion in codimension 2: If $r\ge3$ and if $K$ is a finite $2(r-1)$-complex then there exists an almost $r$-embedding $K\to \mathbb R^{2r}$ if and only if there exists a general position PL map $f:K\to \mathbb R^{2r}$ such that the algebraic intersection number of the $f$-images of any $r$ pairwise disjoint simplices of $K$ is zero. This result can be restated in terms of cohomological obstructions or equivariant maps, and extends an analogous codimension 3 criterion by the second and fourth authors. As another application we classify ornaments $f:S^3 \sqcup S^3\sqcup S^3\to \mathbb R^5$ up to ornament concordance. It follows from work of M. Freedman, V. Krushkal and P. Teichner that the analogous criterion for $r=2$ is false. We prove a lemma on singular higher-dimensional Borromean rings, yielding an elementary proof of the counterexample.}, author = {Avvakumov, Sergey and Mabillard, Isaac and Skopenkov, A. and Wagner, Uli}, booktitle = {arXiv}, title = {{Eliminating higher-multiplicity intersections, III. Codimension 2}}, year = {2015}, } @misc{5441, abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural problems that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, a property satisfied by the controlflow graphs of most programs. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis. The study of multiple queries allows us to consider the tradeoff between the resource usage of the one-time preprocessing and for each individual query. The traditional approach constructs the product graph of all components and applies the best-known graph algorithm on the product. In this approach, even the answer to a single query requires the transitive closure (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results showing that the worst-case running time of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (i.e., improving the worst-case bound for the shortest path problem in general graphs). Preliminary experimental results show that our algorithms perform favorably on several benchmarks.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Goharshady, Amir and Pavlogiannis, Andreas}, issn = {2664-1690}, pages = {24}, publisher = {IST Austria}, title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}}, doi = {10.15479/AT:IST-2015-340-v1-1}, year = {2015}, } @misc{5442, abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural properties that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, and it is known that the controlflow graphs of most programs have constant treewidth. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis problems (e.g., alias analysis). The study of multiple queries allows us to consider the tradeoff between the resource usage of the \emph{one-time} preprocessing and for \emph{each individual} query. The traditional approaches construct the product graph of all components and apply the best-known graph algorithm on the product. In the traditional approach, even the answer to a single query requires the transitive closure computation (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results that show that the worst-case running times of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (such as improving the worst-case bounds for the shortest path problem in general graphs whose current best-known bound has not been improved in five decades). Finally, we provide a prototype implementation of our algorithms which significantly outperforms the existing algorithmic methods on several benchmarks.}, author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3 and Anonymous, 4}, issn = {2664-1690}, pages = {22}, publisher = {IST Austria}, title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}}, year = {2015}, } @inproceedings{1689, abstract = {We consider the problem of computing the set of initial states of a dynamical system such that there exists a control strategy to ensure that the trajectories satisfy a temporal logic specification with probability 1 (almost-surely). We focus on discrete-time, stochastic linear dynamics and specifications given as formulas of the Generalized Reactivity(1) fragment of Linear Temporal Logic over linear predicates in the states of the system. We propose a solution based on iterative abstraction-refinement, and turn-based 2-player probabilistic games. While the theoretical guarantee of our algorithm after any finite number of iterations is only a partial solution, we show that if our algorithm terminates, then the result is the set of satisfying initial states. Moreover, for any (partial) solution our algorithm synthesizes witness control strategies to ensure almost-sure satisfaction of the temporal logic specification. We demonstrate our approach on an illustrative case study.}, author = {Svoreňová, Mária and Kretinsky, Jan and Chmelik, Martin and Chatterjee, Krishnendu and Cěrná, Ivana and Belta, Cǎlin}, booktitle = {Proceedings of the 18th International Conference on Hybrid Systems: Computation and Control}, location = {Seattle, WA, United States}, pages = {259 -- 268}, publisher = {ACM}, title = {{Temporal logic control for stochastic linear systems using abstraction refinement of probabilistic games}}, doi = {10.1145/2728606.2728608}, year = {2015}, } @inproceedings{1729, abstract = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of such sequences produced under a non-preemptive scheduler. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and rules for inserting synchronization. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient, and, since it does not require explicit specifications, is more practical than the conventional approach based on user-provided assertions.}, author = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten}, location = {San Francisco, CA, United States}, pages = {180 -- 197}, publisher = {Springer}, title = {{From non-preemptive to preemptive scheduling using synchronization synthesis}}, doi = {10.1007/978-3-319-21668-3_11}, volume = {9207}, year = {2015}, } @inproceedings{1835, abstract = {The behaviour of gene regulatory networks (GRNs) is typically analysed using simulation-based statistical testing-like methods. In this paper, we demonstrate that we can replace this approach by a formal verification-like method that gives higher assurance and scalability. We focus on Wagner’s weighted GRN model with varying weights, which is used in evolutionary biology. In the model, weight parameters represent the gene interaction strength that may change due to genetic mutations. For a property of interest, we synthesise the constraints over the parameter space that represent the set of GRNs satisfying the property. We experimentally show that our parameter synthesis procedure computes the mutational robustness of GRNs –an important problem of interest in evolutionary biology– more efficiently than the classical simulation method. We specify the property in linear temporal logics. We employ symbolic bounded model checking and SMT solving to compute the space of GRNs that satisfy the property, which amounts to synthesizing a set of linear constraints on the weights.}, author = {Giacobbe, Mirco and Guet, Calin C and Gupta, Ashutosh and Henzinger, Thomas A and Paixao, Tiago and Petrov, Tatjana}, location = {London, United Kingdom}, pages = {469 -- 483}, publisher = {Springer}, title = {{Model checking gene regulatory networks}}, doi = {10.1007/978-3-662-46681-0_47}, volume = {9035}, year = {2015}, } @article{1509, abstract = {The Auxin Binding Protein1 (ABP1) has been identified based on its ability to bind auxin with high affinity and studied for a long time as a prime candidate for the extracellular auxin receptor responsible for mediating in particular the fast non-transcriptional auxin responses. However, the contradiction between the embryo-lethal phenotypes of the originally described Arabidopsis T-DNA insertional knock-out alleles (abp1-1 and abp1-1s) and the wild type-like phenotypes of other recently described loss-of-function alleles (abp1-c1 and abp1-TD1) questions the biological importance of ABP1 and relevance of the previous genetic studies. Here we show that there is no hidden copy of the ABP1 gene in the Arabidopsis genome but the embryo-lethal phenotypes of abp1-1 and abp1-1s alleles are very similar to the knock-out phenotypes of the neighboring gene, BELAYA SMERT (BSM). Furthermore, the allelic complementation test between bsm and abp1 alleles shows that the embryo-lethality in the abp1-1 and abp1-1s alleles is caused by the off-target disruption of the BSM locus by the T-DNA insertions. This clarifies the controversy of different phenotypes among published abp1 knock-out alleles and asks for reflections on the developmental role of ABP1.}, author = {Michalko, Jaroslav and Dravecka, Marta and Bollenbach, Tobias and Friml, Jirí}, journal = {F1000 Research }, publisher = {F1000 Research}, title = {{Embryo-lethal phenotypes in early abp1 mutants are due to disruption of the neighboring BSM gene}}, doi = {10.12688/f1000research.7143.1}, volume = {4}, year = {2015}, } @article{1681, abstract = {In many social situations, individuals endeavor to find the single best possible partner, but are constrained to evaluate the candidates in sequence. Examples include the search for mates, economic partnerships, or any other long-term ties where the choice to interact involves two parties. Surprisingly, however, previous theoretical work on mutual choice problems focuses on finding equilibrium solutions, while ignoring the evolutionary dynamics of decisions. Empirically, this may be of high importance, as some equilibrium solutions can never be reached unless the population undergoes radical changes and a sufficient number of individuals change their decisions simultaneously. To address this question, we apply a mutual choice sequential search problem in an evolutionary game-theoretical model that allows one to find solutions that are favored by evolution. As an example, we study the influence of sequential search on the evolutionary dynamics of cooperation. For this, we focus on the classic snowdrift game and the prisoner’s dilemma game.}, author = {Priklopil, Tadeas and Chatterjee, Krishnendu}, issn = {2073-4336}, journal = {Games}, number = {4}, pages = {413 -- 437}, publisher = {MDPI}, title = {{Evolution of decisions in population games with sequentially searching individuals}}, doi = {10.3390/g6040413}, volume = {6}, year = {2015}, } @article{1655, abstract = {Quantifying behaviors of robots which were generated autonomously from task-independent objective functions is an important prerequisite for objective comparisons of algorithms and movements of animals. The temporal sequence of such a behavior can be considered as a time series and hence complexity measures developed for time series are natural candidates for its quantification. The predictive information and the excess entropy are such complexity measures. They measure the amount of information the past contains about the future and thus quantify the nonrandom structure in the temporal sequence. However, when using these measures for systems with continuous states one has to deal with the fact that their values will depend on the resolution with which the systems states are observed. For deterministic systems both measures will diverge with increasing resolution. We therefore propose a new decomposition of the excess entropy in resolution dependent and resolution independent parts and discuss how they depend on the dimensionality of the dynamics, correlations and the noise level. For the practical estimation we propose to use estimates based on the correlation integral instead of the direct estimation of the mutual information based on next neighbor statistics because the latter allows less control of the scale dependencies. Using our algorithm we are able to show how autonomous learning generates behavior of increasing complexity with increasing learning duration.}, author = {Martius, Georg S and Olbrich, Eckehard}, journal = {Entropy}, number = {10}, pages = {7266 -- 7297}, publisher = {MDPI}, title = {{Quantifying emergent behavior of autonomous robots}}, doi = {10.3390/e17107266}, volume = {17}, year = {2015}, } @article{1834, abstract = {Huge body of evidences demonstrated that volatile anesthetics affect the hippocampal neurogenesis and neurocognitive functions, and most of them showed impairment at anesthetic dose. Here, we investigated the effect of low dose (1.8%) sevoflurane on hippocampal neurogenesis and dentate gyrus-dependent learning. Neonatal rats at postnatal day 4 to 6 (P4-6) were treated with 1.8% sevoflurane for 6 hours. Neurogenesis was quantified by bromodeoxyuridine labeling and electrophysiology recording. Four and seven weeks after treatment, the Morris water maze and contextual-fear discrimination learning tests were performed to determine the influence on spatial learning and pattern separation. A 6-hour treatment with 1.8% sevoflurane promoted hippocampal neurogenesis and increased the survival of newborn cells and the proportion of immature granular cells in the dentate gyrus of neonatal rats. Sevoflurane-treated rats performed better during the training days of the Morris water maze test and in contextual-fear discrimination learning test. These results suggest that a subanesthetic dose of sevoflurane promotes hippocampal neurogenesis in neonatal rats and facilitates their performance in dentate gyrus-dependent learning tasks.}, author = {Chen, Chong and Wang, Chao and Zhao, Xuan and Zhou, Tao and Xu, Dao and Wang, Zhi and Wang, Ying}, journal = {ASN Neuro}, number = {2}, publisher = {SAGE Publications}, title = {{Low-dose sevoflurane promoteshippocampal neurogenesis and facilitates the development of dentate gyrus-dependent learning in neonatal rats}}, doi = {10.1177/1759091415575845}, volume = {7}, year = {2015}, } @article{1635, abstract = {We calculate a Ricci curvature lower bound for some classical examples of random walks, namely, a chain on a slice of the n-dimensional discrete cube (the so-called Bernoulli-Laplace model) and the random transposition shuffle of the symmetric group of permutations on n letters.}, author = {Erbar, Matthias and Maas, Jan and Tetali, Prasad}, journal = {Annales de la faculté des sciences de Toulouse}, number = {4}, pages = {781 -- 800}, publisher = {Faculté des sciences de Toulouse}, title = {{Discrete Ricci curvature bounds for Bernoulli-Laplace and random transposition models}}, doi = {10.5802/afst.1464}, volume = {24}, year = {2015}, } @article{14303, abstract = {Scaffolded DNA origami enables the fabrication of a variety of complex nanostructures that promise utility in diverse fields of application, ranging from biosensing over advanced therapeutics to metamaterials. The broad applicability of DNA origami as a material beyond the level of proof-of-concept studies critically depends, among other factors, on the availability of large amounts of pure single-stranded scaffold DNA. Here, we present a method for the efficient production of M13 bacteriophage-derived genomic DNA using high-cell-density fermentation of Escherichia coli in stirred-tank bioreactors. We achieve phage titers of up to 1.6 × 1014 plaque-forming units per mL. Downstream processing yields up to 410 mg of high-quality single-stranded DNA per one liter reaction volume, thus upgrading DNA origami-based nanotechnology from the milligram to the gram scale.}, author = {Kick, B and Praetorius, Florian M and Dietz, H and Weuster-Botz, D}, issn = {1530-6992}, journal = {Nano Letters}, number = {7}, pages = {4672--4676}, publisher = {ACS Publications}, title = {{Efficient production of single-stranded phage DNA as scaffolds for DNA origami}}, doi = {10.1021/acs.nanolett.5b01461}, volume = {15}, year = {2015}, } @inproceedings{1603, abstract = {For deterministic systems, a counterexample to a property can simply be an error trace, whereas counterexamples in probabilistic systems are necessarily more complex. For instance, a set of erroneous traces with a sufficient cumulative probability mass can be used. Since these are too large objects to understand and manipulate, compact representations such as subchains have been considered. In the case of probabilistic systems with non-determinism, the situation is even more complex. While a subchain for a given strategy (or scheduler, resolving non-determinism) is a straightforward choice, we take a different approach. Instead, we focus on the strategy itself, and extract the most important decisions it makes, and present its succinct representation. The key tools we employ to achieve this are (1) introducing a concept of importance of a state w.r.t. the strategy, and (2) learning using decision trees. There are three main consequent advantages of our approach. Firstly, it exploits the quantitative information on states, stressing the more important decisions. Secondly, it leads to a greater variability and degree of freedom in representing the strategies. Thirdly, the representation uses a self-explanatory data structure. In summary, our approach produces more succinct and more explainable strategies, as opposed to e.g. binary decision diagrams. Finally, our experimental results show that we can extract several rules describing the strategy even for very large systems that do not fit in memory, and based on the rules explain the erroneous behaviour.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Chmelik, Martin and Fellner, Andreas and Kretinsky, Jan}, location = {San Francisco, CA, United States}, pages = {158 -- 177}, publisher = {Springer}, title = {{Counterexample explanation by learning small strategies in Markov decision processes}}, doi = {10.1007/978-3-319-21690-4_10}, volume = {9206}, year = {2015}, } @misc{5549, abstract = {This repository contains the experimental part of the CAV 2015 publication Counterexample Explanation by Learning Small Strategies in Markov Decision Processes. We extended the probabilistic model checker PRISM to represent strategies of Markov Decision Processes as Decision Trees. The archive contains a java executable version of the extended tool (prism_dectree.jar) together with a few examples of the PRISM benchmark library. To execute the program, please have a look at the README.txt, which provides instructions and further information on the archive. The archive contains scripts that (if run often enough) reproduces the data presented in the publication.}, author = {Fellner, Andreas}, keywords = {Markov Decision Process, Decision Tree, Probabilistic Verification, Counterexample Explanation}, publisher = {Institute of Science and Technology Austria}, title = {{Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes}}, doi = {10.15479/AT:ISTA:28}, year = {2015}, } @inproceedings{1512, abstract = {We show that very weak topological assumptions are enough to ensure the existence of a Helly-type theorem. More precisely, we show that for any non-negative integers b and d there exists an integer h(b,d) such that the following holds. If F is a finite family of subsets of R^d such that the ith reduced Betti number (with Z_2 coefficients in singular homology) of the intersection of any proper subfamily G of F is at most b for every non-negative integer i less or equal to (d-1)/2, then F has Helly number at most h(b,d). These topological conditions are sharp: not controlling any of these first Betti numbers allow for families with unbounded Helly number. Our proofs combine homological non-embeddability results with a Ramsey-based approach to build, given an arbitrary simplicial complex K, some well-behaved chain map from C_*(K) to C_*(R^d). Both techniques are of independent interest.}, author = {Goaoc, Xavier and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli}, location = {Eindhoven, Netherlands}, pages = {507 -- 521}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Bounding Helly numbers via Betti numbers}}, doi = {10.4230/LIPIcs.SOCG.2015.507}, volume = {34}, year = {2015}, } @article{271, abstract = {We show that a non-singular integral form of degree d is soluble non-trivially over the integers if and only if it is soluble non-trivially over the reals and the p-adic numbers, provided that the form has at least (d-\sqrt{d}/2)2^d variables. This improves on a longstanding result of Birch.}, author = {Browning, Timothy D and Prendiville, Sean}, issn = {0075-4102}, journal = {Journal fur die Reine und Angewandte Mathematik}, number = {731}, pages = {203 -- 234}, publisher = {Walter de Gruyter}, title = {{Improvements in Birch's theorem on forms in many variables}}, doi = {10.1515/crelle-2014-0122}, volume = {2017}, year = {2015}, } @inproceedings{1675, abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto’92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs - so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model (with one additional mild assumption required for the proof to go through), using graphs with high “pebbling complexity” and Merkle hash-trees. We discuss some applications, including follow-up work where a decentralized digital currency scheme called Spacecoin is constructed that uses PoS (instead of wasteful PoW like in Bitcoin) to prevent double spending. The main technical contribution of this work is the construction of (directed, loop-free) graphs on N vertices with in-degree O(log logN) such that even if one places Θ(N) pebbles on the nodes of the graph, there’s a constant fraction of nodes that needs Θ(N) steps to be pebbled (where in every step one can put a pebble on a node if all its parents have a pebble).}, author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z}, booktitle = {35th Annual Cryptology Conference}, isbn = {9783662479995}, issn = {0302-9743}, location = {Santa Barbara, CA, United States}, pages = {585 -- 605}, publisher = {Springer}, title = {{Proofs of space}}, doi = {10.1007/978-3-662-48000-7_29}, volume = {9216}, year = {2015}, }