@inproceedings{302, abstract = {At ITCS 2013, Mahmoody, Moran and Vadhan [MMV13] introduce and construct publicly verifiable proofs of sequential work, which is a protocol for proving that one spent sequential computational work related to some statement. The original motivation for such proofs included non-interactive time-stamping and universally verifiable CPU benchmarks. A more recent application, and our main motivation, are blockchain designs, where proofs of sequential work can be used – in combination with proofs of space – as a more ecological and economical substitute for proofs of work which are currently used to secure Bitcoin and other cryptocurrencies. The construction proposed by [MMV13] is based on a hash function and can be proven secure in the random oracle model, or assuming inherently sequential hash-functions, which is a new standard model assumption introduced in their work. In a proof of sequential work, a prover gets a “statement” χ, a time parameter N and access to a hash-function H, which for the security proof is modelled as a random oracle. Correctness requires that an honest prover can make a verifier accept making only N queries to H, while soundness requires that any prover who makes the verifier accept must have made (almost) N sequential queries to H. Thus a solution constitutes a proof that N time passed since χ was received. Solutions must be publicly verifiable in time at most polylogarithmic in N. The construction of [MMV13] is based on “depth-robust” graphs, and as a consequence has rather poor concrete parameters. But the major drawback is that the prover needs not just N time, but also N space to compute a proof. In this work we propose a proof of sequential work which is much simpler, more efficient and achieves much better concrete bounds. Most importantly, the space required can be as small as log (N) (but we get better soundness using slightly more memory than that). An open problem stated by [MMV13] that our construction does not solve either is achieving a “unique” proof, where even a cheating prover can only generate a single accepting proof. This property would be extremely useful for applications to blockchains.}, author = {Cohen, Bram and Pietrzak, Krzysztof Z}, location = {Tel Aviv, Israel}, pages = {451 -- 467}, publisher = {Springer}, title = {{Simple proofs of sequential work}}, doi = {10.1007/978-3-319-78375-8_15}, volume = {10821}, year = {2018}, } @inproceedings{298, abstract = {Memory-hard functions (MHF) are functions whose evaluation cost is dominated by memory cost. MHFs are egalitarian, in the sense that evaluating them on dedicated hardware (like FPGAs or ASICs) is not much cheaper than on off-the-shelf hardware (like x86 CPUs). MHFs have interesting cryptographic applications, most notably to password hashing and securing blockchains. Alwen and Serbinenko [STOC’15] define the cumulative memory complexity (cmc) of a function as the sum (over all time-steps) of the amount of memory required to compute the function. They advocate that a good MHF must have high cmc. Unlike previous notions, cmc takes into account that dedicated hardware might exploit amortization and parallelism. Still, cmc has been critizised as insufficient, as it fails to capture possible time-memory trade-offs; as memory cost doesn’t scale linearly, functions with the same cmc could still have very different actual hardware cost. In this work we address this problem, and introduce the notion of sustained-memory complexity, which requires that any algorithm evaluating the function must use a large amount of memory for many steps. We construct functions (in the parallel random oracle model) whose sustained-memory complexity is almost optimal: our function can be evaluated using n steps and O(n/log(n)) memory, in each step making one query to the (fixed-input length) random oracle, while any algorithm that can make arbitrary many parallel queries to the random oracle, still needs Ω(n/log(n)) memory for Ω(n) steps. As has been done for various notions (including cmc) before, we reduce the task of constructing an MHFs with high sustained-memory complexity to proving pebbling lower bounds on DAGs. Our main technical contribution is the construction is a family of DAGs on n nodes with constant indegree with high “sustained-space complexity”, meaning that any parallel black-pebbling strategy requires Ω(n/log(n)) pebbles for at least Ω(n) steps. Along the way we construct a family of maximally “depth-robust” DAGs with maximum indegree O(logn) , improving upon the construction of Mahmoody et al. [ITCS’13] which had maximum indegree O(log2n⋅}, author = {Alwen, Joel F and Blocki, Jeremiah and Pietrzak, Krzysztof Z}, location = {Tel Aviv, Israel}, pages = {99 -- 130}, publisher = {Springer}, title = {{Sustained space complexity}}, doi = {10.1007/978-3-319-78375-8_4}, volume = {10821}, year = {2018}, } @article{5980, abstract = {The problem of private set-intersection (PSI) has been traditionally treated as an instance of the more general problem of multi-party computation (MPC). Consequently, in order to argue security, or compose these protocols one has to rely on the general theory that was developed for the purpose of MPC. The pursuit of efficient protocols, however, has resulted in designs that exploit properties pertaining to PSI. In almost all practical applications where a PSI protocol is deployed, it is expected to be executed multiple times, possibly on related inputs. In this work we initiate a dedicated study of PSI in the multi-interaction (MI) setting. In this model a server sets up the common system parameters and executes set-intersection multiple times with potentially different clients. We discuss a few attacks that arise when protocols are naïvely composed in this manner and, accordingly, craft security definitions for the MI setting and study their inter-relation. Finally, we suggest a set of protocols that are MI-secure, at the same time almost as efficient as their parent, stand-alone, protocols.}, author = {Chatterjee, Sanjit and Kamath Hosdurg, Chethan and Kumar, Vikas}, journal = {American Institute of Mathematical Sciences}, number = {1}, pages = {17--47}, publisher = {AIMS}, title = {{Private set-intersection with common set-up}}, doi = {10.3934/amc.2018002}, volume = {12}, year = {2018}, } @inproceedings{6941, abstract = {Bitcoin has become the most successful cryptocurrency ever deployed, and its most distinctive feature is that it is decentralized. Its underlying protocol (Nakamoto consensus) achieves this by using proof of work, which has the drawback that it causes the consumption of vast amounts of energy to maintain the ledger. Moreover, Bitcoin mining dynamics have become less distributed over time. Towards addressing these issues, we propose SpaceMint, a cryptocurrency based on proofs of space instead of proofs of work. Miners in SpaceMint dedicate disk space rather than computation. We argue that SpaceMint’s design solves or alleviates several of Bitcoin’s issues: most notably, its large energy consumption. SpaceMint also rewards smaller miners fairly according to their contribution to the network, thus incentivizing more distributed participation. This paper adapts proof of space to enable its use in cryptocurrency, studies the attacks that can arise against a Bitcoin-like blockchain that uses proof of space, and proposes a new blockchain format and transaction types to address these attacks. Our prototype shows that initializing 1 TB for mining takes about a day (a one-off setup cost), and miners spend on average just a fraction of a second per block mined. Finally, we provide a game-theoretic analysis modeling SpaceMint as an extensive game (the canonical game-theoretic notion for games that take place over time) and show that this stylized game satisfies a strong equilibrium notion, thereby arguing for SpaceMint ’s stability and consensus.}, author = {Park, Sunoo and Kwon, Albert and Fuchsbauer, Georg and Gazi, Peter and Alwen, Joel F and Pietrzak, Krzysztof Z}, booktitle = {22nd International Conference on Financial Cryptography and Data Security}, isbn = {9783662583869}, issn = {1611-3349}, location = {Nieuwpoort, Curacao}, pages = {480--499}, publisher = {Springer Nature}, title = {{SpaceMint: A cryptocurrency based on proofs of space}}, doi = {10.1007/978-3-662-58387-6_26}, volume = {10957}, year = {2018}, } @inproceedings{1175, abstract = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation. Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.}, author = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc}, editor = {Papadimitriou, Christos}, issn = {18688969}, location = {Berkeley, CA, United States}, pages = {38:1--38--21}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Cumulative space in black-white pebbling and resolution}}, doi = {10.4230/LIPIcs.ITCS.2017.38}, volume = {67}, year = {2017}, } @inproceedings{605, abstract = {Position based cryptography (PBC), proposed in the seminal work of Chandran, Goyal, Moriarty, and Ostrovsky (SIAM J. Computing, 2014), aims at constructing cryptographic schemes in which the identity of the user is his geographic position. Chandran et al. construct PBC schemes for secure positioning and position-based key agreement in the bounded-storage model (Maurer, J. Cryptology, 1992). Apart from bounded memory, their security proofs need a strong additional restriction on the power of the adversary: he cannot compute joint functions of his inputs. Removing this assumption is left as an open problem. We show that an answer to this question would resolve a long standing open problem in multiparty communication complexity: finding a function that is hard to compute with low communication complexity in the simultaneous message model, but easy to compute in the fully adaptive model. On a more positive side: we also show some implications in the other direction, i.e.: we prove that lower bounds on the communication complexity of certain multiparty problems imply existence of PBC primitives. Using this result we then show two attractive ways to “bypass” our hardness result: the first uses the random oracle model, the second weakens the locality requirement in the bounded-storage model to online computability. The random oracle construction is arguably one of the simplest proposed so far in this area. Our results indicate that constructing improved provably secure protocols for PBC requires a better understanding of multiparty communication complexity. This is yet another example where negative results in one area (in our case: lower bounds in multiparty communication complexity) can be used to construct secure cryptographic schemes.}, author = {Brody, Joshua and Dziembowski, Stefan and Faust, Sebastian and Pietrzak, Krzysztof Z}, editor = {Kalai, Yael and Reyzin, Leonid}, isbn = {978-331970499-9}, location = {Baltimore, MD, United States}, pages = {56 -- 81}, publisher = {Springer}, title = {{Position based cryptography and multiparty communication complexity}}, doi = {10.1007/978-3-319-70500-2_3}, volume = {10677}, year = {2017}, } @inproceedings{609, abstract = {Several cryptographic schemes and applications are based on functions that are both reasonably efficient to compute and moderately hard to invert, including client puzzles for Denial-of-Service protection, password protection via salted hashes, or recent proof-of-work blockchain systems. Despite their wide use, a definition of this concept has not yet been distilled and formalized explicitly. Instead, either the applications are proven directly based on the assumptions underlying the function, or some property of the function is proven, but the security of the application is argued only informally. The goal of this work is to provide a (universal) definition that decouples the efforts of designing new moderately hard functions and of building protocols based on them, serving as an interface between the two. On a technical level, beyond the mentioned definitions, we instantiate the model for four different notions of hardness. We extend the work of Alwen and Serbinenko (STOC 2015) by providing a general tool for proving security for the first notion of memory-hard functions that allows for provably secure applications. The tool allows us to recover all of the graph-theoretic techniques developed for proving security under the older, non-composable, notion of security used by Alwen and Serbinenko. As an application of our definition of moderately hard functions, we prove the security of two different schemes for proofs of effort (PoE). We also formalize and instantiate the concept of a non-interactive proof of effort (niPoE), in which the proof is not bound to a particular communication context but rather any bit-string chosen by the prover.}, author = {Alwen, Joel F and Tackmann, Björn}, editor = {Kalai, Yael and Reyzin, Leonid}, isbn = {978-331970499-9}, location = {Baltimore, MD, United States}, pages = {493 -- 526}, publisher = {Springer}, title = {{Moderately hard functions: Definition, instantiations, and applications}}, doi = {10.1007/978-3-319-70500-2_17}, volume = {10677}, year = {2017}, } @inproceedings{635, abstract = {Memory-hard functions (MHFs) are hash algorithms whose evaluation cost is dominated by memory cost. As memory, unlike computation, costs about the same across different platforms, MHFs cannot be evaluated at significantly lower cost on dedicated hardware like ASICs. MHFs have found widespread applications including password hashing, key derivation, and proofs-of-work. This paper focuses on scrypt, a simple candidate MHF designed by Percival, and described in RFC 7914. It has been used within a number of cryptocurrencies (e.g., Litecoin and Dogecoin) and has been an inspiration for Argon2d, one of the winners of the recent password-hashing competition. Despite its popularity, no rigorous lower bounds on its memory complexity are known. We prove that scrypt is optimally memory-hard, i.e., its cumulative memory complexity (cmc) in the parallel random oracle model is Ω(n2w), where w and n are the output length and number of invocations of the underlying hash function, respectively. High cmc is a strong security target for MHFs introduced by Alwen and Serbinenko (STOC’15) which implies high memory cost even for adversaries who can amortize the cost over many evaluations and evaluate the underlying hash functions many times in parallel. Our proof is the first showing optimal memory-hardness for any MHF. Our result improves both quantitatively and qualitatively upon the recent work by Alwen et al. (EUROCRYPT’16) who proved a weaker lower bound of Ω(n2w/ log2 n) for a restricted class of adversaries.}, author = {Alwen, Joel F and Chen, Binchi and Pietrzak, Krzysztof Z and Reyzin, Leonid and Tessaro, Stefano}, editor = {Coron, Jean-Sébastien and Buus Nielsen, Jesper}, isbn = {978-331956616-0}, location = {Paris, France}, pages = {33 -- 62}, publisher = {Springer}, title = {{Scrypt is maximally memory hard}}, doi = {10.1007/978-3-319-56617-7_2}, volume = {10212}, year = {2017}, } @inproceedings{640, abstract = {Data-independent Memory Hard Functions (iMHFS) are finding a growing number of applications in security; especially in the domain of password hashing. An important property of a concrete iMHF is specified by fixing a directed acyclic graph (DAG) Gn on n nodes. The quality of that iMHF is then captured by the following two pebbling complexities of Gn: – The parallel cumulative pebbling complexity Π∥cc(Gn) must be as high as possible (to ensure that the amortized cost of computing the function on dedicated hardware is dominated by the cost of memory). – The sequential space-time pebbling complexity Πst(Gn) should be as close as possible to Π∥cc(Gn) (to ensure that using many cores in parallel and amortizing over many instances does not give much of an advantage). In this paper we construct a family of DAGs with best possible parameters in an asymptotic sense, i.e., where Π∥cc(Gn) = Ω(n2/ log(n)) (which matches a known upper bound) and Πst(Gn) is within a constant factor of Π∥cc(Gn). Our analysis relies on a new connection between the pebbling complexity of a DAG and its depth-robustness (DR) – a well studied combinatorial property. We show that high DR is sufficient for high Π∥cc. Alwen and Blocki (CRYPTO’16) showed that high DR is necessary and so, together, these results fully characterize DAGs with high Π∥cc in terms of DR. Complementing these results, we provide new upper and lower bounds on the Π∥cc of several important candidate iMHFs from the literature. We give the first lower bounds on the memory hardness of the Catena and Balloon Hashing functions in a parallel model of computation and we give the first lower bounds of any kind for (a version) of Argon2i. Finally we describe a new class of pebbling attacks improving on those of Alwen and Blocki (CRYPTO’16). By instantiating these attacks we upperbound the Π∥cc of the Password Hashing Competition winner Argon2i and one of the Balloon Hashing functions by O (n1.71). We also show an upper bound of O(n1.625) for the Catena functions and the two remaining Balloon Hashing functions.}, author = {Alwen, Joel F and Blocki, Jeremiah and Pietrzak, Krzysztof Z}, editor = {Coron, Jean-Sébastien and Buus Nielsen, Jesper}, isbn = {978-331956616-0}, location = {Paris, France}, pages = {3 -- 32}, publisher = {Springer}, title = {{Depth-robust graphs and their cumulative memory complexity}}, doi = {10.1007/978-3-319-56617-7_1}, volume = {10212}, year = {2017}, } @inproceedings{648, abstract = {Pseudoentropy has found a lot of important applications to cryptography and complexity theory. In this paper we focus on the foundational problem that has not been investigated so far, namely by how much pseudoentropy (the amount seen by computationally bounded attackers) differs from its information-theoretic counterpart (seen by unbounded observers), given certain limits on attacker’s computational power? We provide the following answer for HILL pseudoentropy, which exhibits a threshold behavior around the size exponential in the entropy amount:– If the attacker size (s) and advantage () satisfy s (formula presented) where k is the claimed amount of pseudoentropy, then the pseudoentropy boils down to the information-theoretic smooth entropy. – If s (formula presented) then pseudoentropy could be arbitrarily bigger than the information-theoretic smooth entropy. Besides answering the posted question, we show an elegant application of our result to the complexity theory, namely that it implies the clas-sical result on the existence of functions hard to approximate (due to Pippenger). In our approach we utilize non-constructive techniques: the duality of linear programming and the probabilistic method.}, author = {Skórski, Maciej}, editor = {Jäger, Gerhard and Steila, Silvia}, isbn = {978-331955910-0}, location = {Bern, Switzerland}, pages = {600 -- 613}, publisher = {Springer}, title = {{On the complexity of breaking pseudoentropy}}, doi = {10.1007/978-3-319-55911-7_43}, volume = {10185}, year = {2017}, }