@inproceedings{9969, abstract = {Payment channel networks are a promising approach to improve the scalability of cryptocurrencies: they allow to perform transactions in a peer-to-peer fashion, along multihop routes in the network, without requiring consensus on the blockchain. However, during the discovery of cost-efficient routes for the transaction, critical information may be revealed about the transacting entities. This paper initiates the study of privacy-preserving route discovery mechanisms for payment channel networks. In particular, we present LightPIR, an approach which allows a client to learn the shortest (or cheapest in terms of fees) path between two nodes without revealing any information about the endpoints of the transaction to the servers. The two main observations which allow for an efficient solution in LightPIR are that: (1) surprisingly, hub labelling algorithms – which were developed to preprocess “street network like” graphs so one can later efficiently compute shortest paths – also perform well for the graphs underlying payment channel networks, and that (2) hub labelling algorithms can be conveniently combined with private information retrieval. LightPIR relies on a simple hub labeling heuristic on top of existing hub labeling algorithms which leverages the specific topological features of cryptocurrency networks to further minimize storage and bandwidth overheads. In a case study considering the Lightning network, we show that our approach is an order of magnitude more efficient compared to a privacy-preserving baseline based on using private information retrieval on a database that stores all pairs shortest paths.}, author = {Pietrzak, Krzysztof Z and Salem, Iosif and Schmid, Stefan and Yeo, Michelle X}, isbn = {978-1-6654-4501-6}, issn = {1861-2288}, location = {Espoo and Helsinki, Finland}, publisher = {IEEE}, title = {{LightPIR: Privacy-preserving route discovery for payment channel networks}}, doi = {10.23919/IFIPNetworking52078.2021.9472205}, year = {2021}, } @inproceedings{8322, abstract = {Reverse firewalls were introduced at Eurocrypt 2015 by Miro-nov and Stephens-Davidowitz, as a method for protecting cryptographic protocols against attacks on the devices of the honest parties. In a nutshell: a reverse firewall is placed outside of a device and its goal is to “sanitize” the messages sent by it, in such a way that a malicious device cannot leak its secrets to the outside world. It is typically assumed that the cryptographic devices are attacked in a “functionality-preserving way” (i.e. informally speaking, the functionality of the protocol remains unchanged under this attacks). In their paper, Mironov and Stephens-Davidowitz construct a protocol for passively-secure two-party computations with firewalls, leaving extension of this result to stronger models as an open question. In this paper, we address this problem by constructing a protocol for secure computation with firewalls that has two main advantages over the original protocol from Eurocrypt 2015. Firstly, it is a multiparty computation protocol (i.e. it works for an arbitrary number n of the parties, and not just for 2). Secondly, it is secure in much stronger corruption settings, namely in the active corruption model. More precisely: we consider an adversary that can fully corrupt up to 𝑛−1 parties, while the remaining parties are corrupt in a functionality-preserving way. Our core techniques are: malleable commitments and malleable non-interactive zero-knowledge, which in particular allow us to create a novel protocol for multiparty augmented coin-tossing into the well with reverse firewalls (that is based on a protocol of Lindell from Crypto 2001).}, author = {Chakraborty, Suvradip and Dziembowski, Stefan and Nielsen, Jesper Buus}, booktitle = {Advances in Cryptology – CRYPTO 2020}, isbn = {9783030568795}, issn = {16113349}, location = {Santa Barbara, CA, United States}, pages = {732--762}, publisher = {Springer Nature}, title = {{Reverse firewalls for actively secure MPCs}}, doi = {10.1007/978-3-030-56880-1_26}, volume = {12171}, year = {2020}, } @inproceedings{8339, abstract = {Discrete Gaussian distributions over lattices are central to lattice-based cryptography, and to the computational and mathematical aspects of lattices more broadly. The literature contains a wealth of useful theorems about the behavior of discrete Gaussians under convolutions and related operations. Yet despite their structural similarities, most of these theorems are formally incomparable, and their proofs tend to be monolithic and written nearly “from scratch,” making them unnecessarily hard to verify, understand, and extend. In this work we present a modular framework for analyzing linear operations on discrete Gaussian distributions. The framework abstracts away the particulars of Gaussians, and usually reduces proofs to the choice of appropriate linear transformations and elementary linear algebra. To showcase the approach, we establish several general properties of discrete Gaussians, and show how to obtain all prior convolution theorems (along with some new ones) as straightforward corollaries. As another application, we describe a self-reduction for Learning With Errors (LWE) that uses a fixed number of samples to generate an unlimited number of additional ones (having somewhat larger error). The distinguishing features of our reduction are its simple analysis in our framework, and its exclusive use of discrete Gaussians without any loss in parameters relative to a prior mixed discrete-and-continuous approach. As a contribution of independent interest, for subgaussian random matrices we prove a singular value concentration bound with explicitly stated constants, and we give tighter heuristics for specific distributions that are commonly used for generating lattice trapdoors. These bounds yield improvements in the concrete bit-security estimates for trapdoor lattice cryptosystems.}, author = {Genise, Nicholas and Micciancio, Daniele and Peikert, Chris and Walter, Michael}, booktitle = {23rd IACR International Conference on the Practice and Theory of Public-Key Cryptography}, isbn = {9783030453732}, issn = {16113349}, location = {Edinburgh, United Kingdom}, pages = {623--651}, publisher = {Springer Nature}, title = {{Improved discrete Gaussian and subgaussian analysis for lattice cryptography}}, doi = {10.1007/978-3-030-45374-9_21}, volume = {12110}, year = {2020}, } @inproceedings{8987, abstract = {Currently several projects aim at designing and implementing protocols for privacy preserving automated contact tracing to help fight the current pandemic. Those proposal are quite similar, and in their most basic form basically propose an app for mobile phones which broadcasts frequently changing pseudorandom identifiers via (low energy) Bluetooth, and at the same time, the app stores IDs broadcast by phones in its proximity. Only if a user is tested positive, they upload either the beacons they did broadcast (which is the case in decentralized proposals as DP-3T, east and west coast PACT or Covid watch) or received (as in Popp-PT or ROBERT) during the last two weeks or so. Vaudenay [eprint 2020/399] observes that this basic scheme (he considers the DP-3T proposal) succumbs to relay and even replay attacks, and proposes more complex interactive schemes which prevent those attacks without giving up too many privacy aspects. Unfortunately interaction is problematic for this application for efficiency and security reasons. The countermeasures that have been suggested so far are either not practical or give up on key privacy aspects. We propose a simple non-interactive variant of the basic protocol that (security) Provably prevents replay and (if location data is available) relay attacks. (privacy) The data of all parties (even jointly) reveals no information on the location or time where encounters happened. (efficiency) The broadcasted message can fit into 128 bits and uses only basic crypto (commitments and secret key authentication). Towards this end we introduce the concept of “delayed authentication”, which basically is a message authentication code where verification can be done in two steps, where the first doesn’t require the key, and the second doesn’t require the message.}, author = {Pietrzak, Krzysztof Z}, booktitle = {Progress in Cryptology}, isbn = {9783030652760}, issn = {16113349}, location = {Bangalore, India}, pages = {3--15}, publisher = {Springer Nature}, title = {{Delayed authentication: Preventing replay and relay attacks in private contact tracing}}, doi = {10.1007/978-3-030-65277-7_1}, volume = {12578}, year = {2020}, } @inproceedings{7966, abstract = {For 1≤m≤n, we consider a natural m-out-of-n multi-instance scenario for a public-key encryption (PKE) scheme. An adversary, given n independent instances of PKE, wins if he breaks at least m out of the n instances. In this work, we are interested in the scaling factor of PKE schemes, SF, which measures how well the difficulty of breaking m out of the n instances scales in m. That is, a scaling factor SF=ℓ indicates that breaking m out of n instances is at least ℓ times more difficult than breaking one single instance. A PKE scheme with small scaling factor hence provides an ideal target for mass surveillance. In fact, the Logjam attack (CCS 2015) implicitly exploited, among other things, an almost constant scaling factor of ElGamal over finite fields (with shared group parameters). For Hashed ElGamal over elliptic curves, we use the generic group model to argue that the scaling factor depends on the scheme's granularity. In low granularity, meaning each public key contains its independent group parameter, the scheme has optimal scaling factor SF=m; In medium and high granularity, meaning all public keys share the same group parameter, the scheme still has a reasonable scaling factor SF=√m. Our findings underline that instantiating ElGamal over elliptic curves should be preferred to finite fields in a multi-instance scenario. As our main technical contribution, we derive new generic-group lower bounds of Ω(√(mp)) on the difficulty of solving both the m-out-of-n Gap Discrete Logarithm and the m-out-of-n Gap Computational Diffie-Hellman problem over groups of prime order p, extending a recent result by Yun (EUROCRYPT 2015). We establish the lower bound by studying the hardness of a related computational problem which we call the search-by-hypersurface problem.}, author = {Auerbach, Benedikt and Giacon, Federico and Kiltz, Eike}, booktitle = {Advances in Cryptology – EUROCRYPT 2020}, isbn = {9783030457266}, issn = {1611-3349}, pages = {475--506}, publisher = {Springer Nature}, title = {{Everybody’s a target: Scalability in public-key encryption}}, doi = {10.1007/978-3-030-45727-3_16}, volume = {12107}, year = {2020}, } @phdthesis{7896, abstract = {A search problem lies in the complexity class FNP if a solution to the given instance of the problem can be verified efficiently. The complexity class TFNP consists of all search problems in FNP that are total in the sense that a solution is guaranteed to exist. TFNP contains a host of interesting problems from fields such as algorithmic game theory, computational topology, number theory and combinatorics. Since TFNP is a semantic class, it is unlikely to have a complete problem. Instead, one studies its syntactic subclasses which are defined based on the combinatorial principle used to argue totality. Of particular interest is the subclass PPAD, which contains important problems like computing Nash equilibrium for bimatrix games and computational counterparts of several fixed-point theorems as complete. In the thesis, we undertake the study of averagecase hardness of TFNP, and in particular its subclass PPAD. Almost nothing was known about average-case hardness of PPAD before a series of recent results showed how to achieve it using a cryptographic primitive called program obfuscation. However, it is currently not known how to construct program obfuscation from standard cryptographic assumptions. Therefore, it is desirable to relax the assumption under which average-case hardness of PPAD can be shown. In the thesis we take a step in this direction. First, we show that assuming the (average-case) hardness of a numbertheoretic problem related to factoring of integers, which we call Iterated-Squaring, PPAD is hard-on-average in the random-oracle model. Then we strengthen this result to show that the average-case hardness of PPAD reduces to the (adaptive) soundness of the Fiat-Shamir Transform, a well-known technique used to compile a public-coin interactive protocol into a non-interactive one. As a corollary, we obtain average-case hardness for PPAD in the random-oracle model assuming the worst-case hardness of #SAT. Moreover, the above results can all be strengthened to obtain average-case hardness for the class CLS ⊆ PPAD. Our main technical contribution is constructing incrementally-verifiable procedures for computing Iterated-Squaring and #SAT. By incrementally-verifiable, we mean that every intermediate state of the computation includes a proof of its correctness, and the proof can be updated and verified in polynomial time. Previous constructions of such procedures relied on strong, non-standard assumptions. Instead, we introduce a technique called recursive proof-merging to obtain the same from weaker assumptions. }, author = {Kamath Hosdurg, Chethan}, issn = {2663-337X}, pages = {126}, publisher = {Institute of Science and Technology Austria}, title = {{On the average-case hardness of total search problems}}, doi = {10.15479/AT:ISTA:7896}, year = {2020}, } @article{5887, abstract = {Cryptographic security is usually defined as a guarantee that holds except when a bad event with negligible probability occurs, and nothing is guaranteed in that bad case. However, in settings where such failure can happen with substantial probability, one needs to provide guarantees even for the bad case. A typical example is where a (possibly weak) password is used instead of a secure cryptographic key to protect a session, the bad event being that the adversary correctly guesses the password. In a situation with multiple such sessions, a per-session guarantee is desired: any session for which the password has not been guessed remains secure, independently of whether other sessions have been compromised. A new formalism for stating such gracefully degrading security guarantees is introduced and applied to analyze the examples of password-based message authentication and password-based encryption. While a natural per-message guarantee is achieved for authentication, the situation of password-based encryption is more delicate: a per-session confidentiality guarantee only holds against attackers for which the distribution of password-guessing effort over the sessions is known in advance. In contrast, for more general attackers without such a restriction, a strong, composable notion of security cannot be achieved.}, author = {Demay, Gregory and Gazi, Peter and Maurer, Ueli and Tackmann, Bjorn}, issn = {0926227X}, journal = {Journal of Computer Security}, number = {1}, pages = {75--111}, publisher = {IOS Press}, title = {{Per-session security: Password-based cryptography revisited}}, doi = {10.3233/JCS-181131}, volume = {27}, year = {2019}, } @inproceedings{6528, abstract = {We construct a verifiable delay function (VDF) by showing how the Rivest-Shamir-Wagner time-lock puzzle can be made publicly verifiable. Concretely, we give a statistically sound public-coin protocol to prove that a tuple (N,x,T,y) satisfies y=x2T (mod N) where the prover doesn’t know the factorization of N and its running time is dominated by solving the puzzle, that is, compute x2T, which is conjectured to require T sequential squarings. To get a VDF we make this protocol non-interactive using the Fiat-Shamir heuristic.The motivation for this work comes from the Chia blockchain design, which uses a VDF as akey ingredient. For typical parameters (T≤2 40, N= 2048), our proofs are of size around 10K B, verification cost around three RSA exponentiations and computing the proof is 8000 times faster than solving the puzzle even without any parallelism.}, author = {Pietrzak, Krzysztof Z}, booktitle = {10th Innovations in Theoretical Computer Science Conference}, isbn = {978-3-95977-095-8}, issn = {1868-8969}, location = {San Diego, CA, United States}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Simple verifiable delay functions}}, doi = {10.4230/LIPICS.ITCS.2019.60}, volume = {124}, year = {2019}, } @inbook{6726, abstract = {Randomness is an essential part of any secure cryptosystem, but many constructions rely on distributions that are not uniform. This is particularly true for lattice based cryptosystems, which more often than not make use of discrete Gaussian distributions over the integers. For practical purposes it is crucial to evaluate the impact that approximation errors have on the security of a scheme to provide the best possible trade-off between security and performance. Recent years have seen surprising results allowing to use relatively low precision while maintaining high levels of security. A key insight in these results is that sampling a distribution with low relative error can provide very strong security guarantees. Since floating point numbers provide guarantees on the relative approximation error, they seem a suitable tool in this setting, but it is not obvious which sampling algorithms can actually profit from them. While previous works have shown that inversion sampling can be adapted to provide a low relative error (Pöppelmann et al., CHES 2014; Prest, ASIACRYPT 2017), other works have called into question if this is possible for other sampling techniques (Zheng et al., Eprint report 2018/309). In this work, we consider all sampling algorithms that are popular in the cryptographic setting and analyze the relationship of floating point precision and the resulting relative error. We show that all of the algorithms either natively achieve a low relative error or can be adapted to do so.}, author = {Walter, Michael}, booktitle = {Progress in Cryptology – AFRICACRYPT 2019}, editor = {Buchmann, J and Nitaj, A and Rachidi, T}, isbn = {978-3-0302-3695-3}, issn = {0302-9743}, location = {Rabat, Morocco}, pages = {157--180}, publisher = {Springer Nature}, title = {{Sampling the integers with low relative error}}, doi = {10.1007/978-3-030-23696-0_9}, volume = {11627}, year = {2019}, } @inproceedings{7136, abstract = {It is well established that the notion of min-entropy fails to satisfy the \emph{chain rule} of the form H(X,Y)=H(X|Y)+H(Y), known for Shannon Entropy. Such a property would help to analyze how min-entropy is split among smaller blocks. Problems of this kind arise for example when constructing extractors and dispersers. We show that any sequence of variables exhibits a very strong strong block-source structure (conditional distributions of blocks are nearly flat) when we \emph{spoil few correlated bits}. This implies, conditioned on the spoiled bits, that \emph{splitting-recombination properties} hold. In particular, we have many nice properties that min-entropy doesn't obey in general, for example strong chain rules, "information can't hurt" inequalities, equivalences of average and worst-case conditional entropy definitions and others. Quantitatively, for any sequence X1,…,Xt of random variables over an alphabet X we prove that, when conditioned on m=t⋅O(loglog|X|+loglog(1/ϵ)+logt) bits of auxiliary information, all conditional distributions of the form Xi|X 1 (the original distribution is only 1-wise independent). We observe that the security of PMAC will not increase in general, even if the masks are chosen from a 2-wise independent distribution, and then prove that the security increases to O(q<2/2n), if the τi are 4-wise independent. Due to simple extension attacks, this is the best bound one can hope for, using any distribution on the masks. Whether 3-wise independence is already sufficient to get this level of security is left as an open problem.}, author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal}, issn = {2519-173X}, journal = {IACR Transactions on Symmetric Cryptology}, number = {2}, pages = {145--161}, publisher = {Ruhr University Bochum}, title = {{The exact security of PMAC}}, doi = {10.13154/TOSC.V2016.I2.145-161}, volume = {2016}, year = {2017}, } @inproceedings{559, abstract = {Proofs of space (PoS) were suggested as more ecological and economical alternative to proofs of work, which are currently used in blockchain designs like Bitcoin. The existing PoS are based on rather sophisticated graph pebbling lower bounds. Much simpler and in several aspects more efficient schemes based on inverting random functions have been suggested, but they don’t give meaningful security guarantees due to existing time-memory trade-offs. In particular, Hellman showed that any permutation over a domain of size N can be inverted in time T by an algorithm that is given S bits of auxiliary information whenever (Formula presented). For functions Hellman gives a weaker attack with S2· T≈ N2 (e.g., S= T≈ N2/3). To prove lower bounds, one considers an adversary who has access to an oracle f: [ N] → [N] and can make T oracle queries. The best known lower bound is S· T∈ Ω(N) and holds for random functions and permutations. We construct functions that provably require more time and/or space to invert. Specifically, for any constant k we construct a function [N] → [N] that cannot be inverted unless Sk· T∈ Ω(Nk) (in particular, S= T≈ (Formula presented). Our construction does not contradict Hellman’s time-memory trade-off, because it cannot be efficiently evaluated in forward direction. However, its entire function table can be computed in time quasilinear in N, which is sufficient for the PoS application. Our simplest construction is built from a random function oracle g: [N] × [N] → [ N] and a random permutation oracle f: [N] → N] and is defined as h(x) = g(x, x′) where f(x) = π(f(x′)) with π being any involution without a fixed point, e.g. flipping all the bits. For this function we prove that any adversary who gets S bits of auxiliary information, makes at most T oracle queries, and inverts h on an ϵ fraction of outputs must satisfy S2· T∈ Ω(ϵ2N2).}, author = {Abusalah, Hamza M and Alwen, Joel F and Cohen, Bram and Khilko, Danylo and Pietrzak, Krzysztof Z and Reyzin, Leonid}, isbn = {978-331970696-2}, location = {Hong Kong, China}, pages = {357 -- 379}, publisher = {Springer}, title = {{Beyond Hellman’s time-memory trade-offs with applications to proofs of space}}, doi = {10.1007/978-3-319-70697-9_13}, volume = {10625}, year = {2017}, } @inproceedings{637, abstract = {For many cryptographic primitives, it is relatively easy to achieve selective security (where the adversary commits a-priori to some of the choices to be made later in the attack) but appears difficult to achieve the more natural notion of adaptive security (where the adversary can make all choices on the go as the attack progresses). A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption (Panjwani, TCC ’07 and Fuchsbauer et al., CRYPTO ’15), constrained PRFs (Fuchsbauer et al., ASIACRYPT ’14), and Yao garbled circuits (Jafargholi and Wichs, TCC ’16b). Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework that connects all of these works and allows us to present them in a unified and simplified fashion. Moreover, we use the framework to derive a new result for adaptively secure secret sharing over access structures defined via monotone circuits. We envision that further applications will follow in the future. Underlying our framework is the following simple idea. It is well known that selective security, where the adversary commits to n-bits of information about his future choices, automatically implies adaptive security at the cost of amplifying the adversary’s advantage by a factor of up to 2n. However, in some cases the proof of selective security proceeds via a sequence of hybrids, where each pair of adjacent hybrids locally only requires some smaller partial information consisting of m ≪ n bits. The partial information needed might be completely different between different pairs of hybrids, and if we look across all the hybrids we might rely on the entire n-bit commitment. Nevertheless, the above is sufficient to prove adaptive security, at the cost of amplifying the adversary’s advantage by a factor of only 2m ≪ 2n. In all of our examples using the above framework, the different hybrids are captured by some sort of a graph pebbling game and the amount of information that the adversary needs to commit to in each pair of hybrids is bounded by the maximum number of pebbles in play at any point in time. Therefore, coming up with better strategies for proving adaptive security translates to various pebbling strategies for different types of graphs.}, author = {Jafargholi, Zahra and Kamath Hosdurg, Chethan and Klein, Karen and Komargodski, Ilan and Pietrzak, Krzysztof Z and Wichs, Daniel}, editor = {Katz, Jonathan and Shacham, Hovav}, isbn = {978-331963687-0}, location = {Santa Barbara, CA, United States}, pages = {133 -- 163}, publisher = {Springer}, title = {{Be adaptive avoid overcommitting}}, doi = {10.1007/978-3-319-63688-7_5}, volume = {10401}, year = {2017}, } @inproceedings{1174, abstract = {Security of cryptographic applications is typically defined by security games. The adversary, within certain resources, cannot win with probability much better than 0 (for unpredictability applications, like one-way functions) or much better than 1/2 (indistinguishability applications for instance encryption schemes). In so called squared-friendly applications the winning probability of the adversary, for different values of the application secret randomness, is not only close to 0 or 1/2 on average, but also concentrated in the sense that its second central moment is small. The class of squared-friendly applications, which contains all unpredictability applications and many indistinguishability applications, is particularly important for key derivation. Barak et al. observed that for square-friendly applications one can beat the "RT-bound", extracting secure keys with significantly smaller entropy loss. In turn Dodis and Yu showed that in squared-friendly applications one can directly use a "weak" key, which has only high entropy, as a secure key. In this paper we give sharp lower bounds on square security assuming security for "weak" keys. We show that any application which is either (a) secure with weak keys or (b) allows for entropy savings for keys derived by universal hashing, must be square-friendly. Quantitatively, our lower bounds match the positive results of Dodis and Yu and Barak et al. (TCC\'13, CRYPTO\'11) Hence, they can be understood as a general characterization of squared-friendly applications. While the positive results on squared-friendly applications where derived by one clever application of the Cauchy-Schwarz Inequality, for tight lower bounds we need more machinery. In our approach we use convex optimization techniques and some theory of circular matrices.}, author = {Skórski, Maciej}, issn = {18688969}, location = {Hannover, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Lower bounds on key derivation for square-friendly applications}}, doi = {10.4230/LIPIcs.STACS.2017.57}, volume = {66}, year = {2017}, } @inproceedings{1176, abstract = {The algorithm Argon2i-B of Biryukov, Dinu and Khovratovich is currently being considered by the IRTF (Internet Research Task Force) as a new de-facto standard for password hashing. An older version (Argon2i-A) of the same algorithm was chosen as the winner of the recent Password Hashing Competition. An important competitor to Argon2i-B is the recently introduced Balloon Hashing (BH) algorithm of Corrigan-Gibs, Boneh and Schechter. A key security desiderata for any such algorithm is that evaluating it (even using a custom device) requires a large amount of memory amortized across multiple instances. Alwen and Blocki (CRYPTO 2016) introduced a class of theoretical attacks against Argon2i-A and BH. While these attacks yield large asymptotic reductions in the amount of memory, it was not, a priori, clear if (1) they can be extended to the newer Argon2i-B, (2) the attacks are effective on any algorithm for practical parameter ranges (e.g., 1GB of memory) and (3) if they can be effectively instantiated against any algorithm under realistic hardware constrains. In this work we answer all three of these questions in the affirmative for all three algorithms. This is also the first work to analyze the security of Argon2i-B. In more detail, we extend the theoretical attacks of Alwen and Blocki (CRYPTO 2016) to the recent Argon2i-B proposal demonstrating severe asymptotic deficiencies in its security. Next we introduce several novel heuristics for improving the attack's concrete memory efficiency even when on-chip memory bandwidth is bounded. We then simulate our attacks on randomly sampled Argon2i-A, Argon2i-B and BH instances and measure the resulting memory consumption for various practical parameter ranges and for a variety of upperbounds on the amount of parallelism available to the attacker. Finally we describe, implement, and test a new heuristic for applying the Alwen-Blocki attack to functions employing a technique developed by Corrigan-Gibs et al. for improving concrete security of memory-hard functions. We analyze the collected data and show the effects various parameters have on the memory consumption of the attack. In particular, we can draw several interesting conclusions about the level of security provided by these functions. · For the Alwen-Blocki attack to fail against practical memory parameters, Argon2i-B must be instantiated with more than 10 passes on memory - beyond the "paranoid" parameter setting in the current IRTF proposal. · The technique of Corrigan-Gibs for improving security can also be overcome by the Alwen-Blocki attack under realistic hardware constraints. · On a positive note, both the asymptotic and concrete security of Argon2i-B seem to improve on that of Argon2i-A.}, author = {Alwen, Joel F and Blocki, Jeremiah}, isbn = {978-150905761-0}, location = {Paris, France}, publisher = {IEEE}, title = {{Towards practical attacks on Argon2i and balloon hashing}}, doi = {10.1109/EuroSP.2017.47}, year = {2017}, } @article{1187, abstract = {We construct efficient authentication protocols and message authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work—starting with the (Formula presented.) protocol of Hopper and Blum in 2001—until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle attacks. A MAC implies such a (two-round) protocol.}, author = {Kiltz, Eike and Pietrzak, Krzysztof Z and Venturi, Daniele and Cash, David and Jain, Abhishek}, journal = {Journal of Cryptology}, number = {4}, pages = {1238 -- 1275}, publisher = {Springer}, title = {{Efficient authentication from hard learning problems}}, doi = {10.1007/s00145-016-9247-3}, volume = {30}, year = {2017}, } @article{1177, abstract = {Boldyreva, Palacio and Warinschi introduced a multiple forking game as an extension of general forking. The notion of (multiple) forking is a useful abstraction from the actual simulation of cryptographic scheme to the adversary in a security reduction, and is achieved through the intermediary of a so-called wrapper algorithm. Multiple forking has turned out to be a useful tool in the security argument of several cryptographic protocols. However, a reduction employing multiple forking incurs a significant degradation of (Formula presented.) , where (Formula presented.) denotes the upper bound on the underlying random oracle calls and (Formula presented.) , the number of forkings. In this work we take a closer look at the reasons for the degradation with a tighter security bound in mind. We nail down the exact set of conditions for success in the multiple forking game. A careful analysis of the cryptographic schemes and corresponding security reduction employing multiple forking leads to the formulation of ‘dependence’ and ‘independence’ conditions pertaining to the output of the wrapper in different rounds. Based on the (in)dependence conditions we propose a general framework of multiple forking and a General Multiple Forking Lemma. Leveraging (in)dependence to the full allows us to improve the degradation factor in the multiple forking game by a factor of (Formula presented.). By implication, the cost of a single forking involving two random oracles (augmented forking) matches that involving a single random oracle (elementary forking). Finally, we study the effect of these observations on the concrete security of existing schemes employing multiple forking. We conclude that by careful design of the protocol (and the wrapper in the security reduction) it is possible to harness our observations to the full extent.}, author = {Kamath Hosdurg, Chethan and Chatterjee, Sanjit}, journal = {Algorithmica}, number = {4}, pages = {1321 -- 1362}, publisher = {Springer}, title = {{A closer look at multiple-forking: Leveraging (in)dependence for a tighter bound}}, doi = {10.1007/s00453-015-9997-6}, volume = {74}, year = {2016}, } @inproceedings{1179, abstract = {Computational notions of entropy have recently found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The two main types of results which make computational notions so useful are (1) Chain rules, which quantify by how much the computational entropy of a variable decreases if conditioned on some other variable (2) Transformations, which quantify to which extend one type of entropy implies another. Such chain rules and transformations typically lose a significant amount in quality of the entropy, and are the reason why applying these results one gets rather weak quantitative security bounds. In this paper we for the first time prove lower bounds in this context, showing that existing results for transformations are, unfortunately, basically optimal for non-adaptive black-box reductions (and it’s hard to imagine how non black-box reductions or adaptivity could be useful here.) A variable X has k bits of HILL entropy of quality (ϵ,s) if there exists a variable Y with k bits min-entropy which cannot be distinguished from X with advantage ϵ by distinguishing circuits of size s. A weaker notion is Metric entropy, where we switch quantifiers, and only require that for every distinguisher of size s, such a Y exists. We first describe our result concerning transformations. By definition, HILL implies Metric without any loss in quality. Metric entropy often comes up in applications, but must be transformed to HILL for meaningful security guarantees. The best known result states that if a variable X has k bits of Metric entropy of quality (ϵ,s) , then it has k bits of HILL with quality (2ϵ,s⋅ϵ2). We show that this loss of a factor Ω(ϵ−2) in circuit size is necessary. In fact, we show the stronger result that this loss is already necessary when transforming so called deterministic real valued Metric entropy to randomised boolean Metric (both these variants of Metric entropy are implied by HILL without loss in quality). The chain rule for HILL entropy states that if X has k bits of HILL entropy of quality (ϵ,s) , then for any variable Z of length m, X conditioned on Z has k−m bits of HILL entropy with quality (ϵ,s⋅ϵ2/2m). We show that a loss of Ω(2m/ϵ) in circuit size necessary here. Note that this still leaves a gap of ϵ between the known bound and our lower bound.}, author = {Pietrzak, Krzysztof Z and Maciej, Skorski}, location = {Beijing, China}, pages = {183 -- 203}, publisher = {Springer}, title = {{Pseudoentropy: Lower-bounds for chain rules and transformations}}, doi = {10.1007/978-3-662-53641-4_8}, volume = {9985}, year = {2016}, } @inproceedings{1231, abstract = {We study the time-and memory-complexities of the problem of computing labels of (multiple) randomly selected challenge-nodes in a directed acyclic graph. The w-bit label of a node is the hash of the labels of its parents, and the hash function is modeled as a random oracle. Specific instances of this problem underlie both proofs of space [Dziembowski et al. CRYPTO’15] as well as popular memory-hard functions like scrypt. As our main tool, we introduce the new notion of a probabilistic parallel entangled pebbling game, a new type of combinatorial pebbling game on a graph, which is closely related to the labeling game on the same graph. As a first application of our framework, we prove that for scrypt, when the underlying hash function is invoked n times, the cumulative memory complexity (CMC) (a notion recently introduced by Alwen and Serbinenko (STOC’15) to capture amortized memory-hardness for parallel adversaries) is at least Ω(w · (n/ log(n))2). This bound holds for adversaries that can store many natural functions of the labels (e.g., linear combinations), but still not arbitrary functions thereof. We then introduce and study a combinatorial quantity, and show how a sufficiently small upper bound on it (which we conjecture) extends our CMC bound for scrypt to hold against arbitrary adversaries. We also show that such an upper bound solves the main open problem for proofs-of-space protocols: namely, establishing that the time complexity of computing the label of a random node in a graph on n nodes (given an initial kw-bit state) reduces tightly to the time complexity for black pebbling on the same graph (given an initial k-node pebbling).}, author = {Alwen, Joel F and Chen, Binyi and Kamath Hosdurg, Chethan and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z and Tessaro, Stefano}, location = {Vienna, Austria}, pages = {358 -- 387}, publisher = {Springer}, title = {{On the complexity of scrypt and proofs of space in the parallel random oracle model}}, doi = {10.1007/978-3-662-49896-5_13}, volume = {9666}, year = {2016}, } @inproceedings{1233, abstract = {About three decades ago it was realized that implementing private channels between parties which can be adaptively corrupted requires an encryption scheme that is secure against selective opening attacks. Whether standard (IND-CPA) security implies security against selective opening attacks has been a major open question since. The only known reduction from selective opening to IND-CPA security loses an exponential factor. A polynomial reduction is only known for the very special case where the distribution considered in the selective opening security experiment is a product distribution, i.e., the messages are sampled independently from each other. In this paper we give a reduction whose loss is quantified via the dependence graph (where message dependencies correspond to edges) of the underlying message distribution. In particular, for some concrete distributions including Markov distributions, our reduction is polynomial.}, author = {Fuchsbauer, Georg and Heuer, Felix and Kiltz, Eike and Pietrzak, Krzysztof Z}, location = {Tel Aviv, Israel}, pages = {282 -- 305}, publisher = {Springer}, title = {{Standard security does imply security against selective opening for markov distributions}}, doi = {10.1007/978-3-662-49096-9_12}, volume = {9562}, year = {2016}, } @inproceedings{1365, abstract = {A memory-hard function (MHF) f is equipped with a space cost σ and time cost τ parameter such that repeatedly computing fσ,τ on an application specific integrated circuit (ASIC) is not economically advantageous relative to a general purpose computer. Technically we would like that any (generalized) circuit for evaluating an iMHF fσ,τ has area × time (AT) complexity at Θ(σ2 ∗ τ). A data-independent MHF (iMHF) has the added property that it can be computed with almost optimal memory and time complexity by an algorithm which accesses memory in a pattern independent of the input value. Such functions can be specified by fixing a directed acyclic graph (DAG) G on n = Θ(σ ∗ τ) nodes representing its computation graph. In this work we develop new tools for analyzing iMHFs. First we define and motivate a new complexity measure capturing the amount of energy (i.e. electricity) required to compute a function. We argue that, in practice, this measure is at least as important as the more traditional AT-complexity. Next we describe an algorithm A for repeatedly evaluating an iMHF based on an arbitrary DAG G. We upperbound both its energy and AT complexities per instance evaluated in terms of a certain combinatorial property of G. Next we instantiate our attack for several general classes of DAGs which include those underlying many of the most important iMHF candidates in the literature. In particular, we obtain the following results which hold for all choices of parameters σ and τ (and thread-count) such that n = σ ∗ τ. -The Catena-Dragonfly function of [FLW13] has AT and energy complexities O(n1.67). -The Catena-Butterfly function of [FLW13] has complexities is O(n1.67). -The Double-Buffer and the Linear functions of [CGBS16] both have complexities in O(n1.67). -The Argon2i function of [BDK15] (winner of the Password Hashing Competition [PHC]) has complexities O(n7/4 log(n)). -The Single-Buffer function of [CGBS16] has complexities O(n7/4 log(n)). -Any iMHF can be computed by an algorithm with complexities O(n2/ log1 −ε(n)) for all ε > 0. In particular when τ = 1 this shows that the goal of constructing an iMHF with AT-complexity Θ(σ2 ∗ τ ) is unachievable. Along the way we prove a lemma upper-bounding the depth-robustness of any DAG which may prove to be of independent interest.}, author = {Alwen, Joel F and Blocki, Jeremiah}, location = {Santa Barbara, CA, USA}, pages = {241 -- 271}, publisher = {Springer}, title = {{Efficiently computing data-independent memory-hard functions}}, doi = {10.1007/978-3-662-53008-5_9}, volume = {9815}, year = {2016}, } @inproceedings{1366, abstract = {We study the problem of devising provably secure PRNGs with input based on the sponge paradigm. Such constructions are very appealing, as efficient software/hardware implementations of SHA-3 can easily be translated into a PRNG in a nearly black-box way. The only existing sponge-based construction, proposed by Bertoni et al. (CHES 2010), fails to achieve the security notion of robustness recently considered by Dodis et al. (CCS 2013), for two reasons: (1) The construction is deterministic, and thus there are high-entropy input distributions on which the construction fails to extract random bits, and (2) The construction is not forward secure, and presented solutions aiming at restoring forward security have not been rigorously analyzed. We propose a seeded variant of Bertoni et al.’s PRNG with input which we prove secure in the sense of robustness, delivering in particular concrete security bounds. On the way, we make what we believe to be an important conceptual contribution, developing a variant of the security framework of Dodis et al. tailored at the ideal permutation model that captures PRNG security in settings where the weakly random inputs are provided from a large class of possible adversarial samplers which are also allowed to query the random permutation. As a further application of our techniques, we also present an efficient sponge-based key-derivation function (which can be instantiated from SHA-3 in a black-box fashion), which we also prove secure when fed with samples from permutation-dependent distributions.}, author = {Gazi, Peter and Tessaro, Stefano}, location = {Vienna, Austria}, pages = {87 -- 116}, publisher = {Springer}, title = {{Provably robust sponge-based PRNGs and KDFs}}, doi = {10.1007/978-3-662-49890-3_4}, volume = {9665}, year = {2016}, } @article{1592, abstract = {A modular approach to constructing cryptographic protocols leads to simple designs but often inefficient instantiations. On the other hand, ad hoc constructions may yield efficient protocols at the cost of losing conceptual simplicity. We suggest a new design paradigm, structure-preserving cryptography, that provides a way to construct modular protocols with reasonable efficiency while retaining conceptual simplicity. A cryptographic scheme over a bilinear group is called structure-preserving if its public inputs and outputs consist of elements from the bilinear groups and their consistency can be verified by evaluating pairing-product equations. As structure-preserving schemes smoothly interoperate with each other, they are useful as building blocks in modular design of cryptographic applications. This paper introduces structure-preserving commitment and signature schemes over bilinear groups with several desirable properties. The commitment schemes include homomorphic, trapdoor and length-reducing commitments to group elements, and the structure-preserving signature schemes are the first ones that yield constant-size signatures on multiple group elements. A structure-preserving signature scheme is called automorphic if the public keys lie in the message space, which cannot be achieved by compressing inputs via a cryptographic hash function, as this would destroy the mathematical structure we are trying to preserve. Automorphic signatures can be used for building certification chains underlying privacy-preserving protocols. Among a vast number of applications of structure-preserving protocols, we present an efficient round-optimal blind-signature scheme and a group signature scheme with an efficient and concurrently secure protocol for enrolling new members.}, author = {Abe, Masayuki and Fuchsbauer, Georg and Groth, Jens and Haralambiev, Kristiyan and Ohkubo, Miyako}, journal = {Journal of Cryptology}, number = {2}, pages = {363 -- 421}, publisher = {Springer}, title = {{Structure preserving signatures and commitments to group elements}}, doi = {10.1007/s00145-014-9196-7}, volume = {29}, year = {2016}, } @inproceedings{1225, abstract = {At Crypto 2015 Fuchsbauer, Hanser and Slamanig (FHS) presented the first standard-model construction of efficient roundoptimal blind signatures that does not require complexity leveraging. It is conceptually simple and builds on the primitive of structure-preserving signatures on equivalence classes (SPS-EQ). FHS prove the unforgeability of their scheme assuming EUF-CMA security of the SPS-EQ scheme and hardness of a version of the DH inversion problem. Blindness under adversarially chosen keys is proven under an interactive variant of the DDH assumption. We propose a variant of their scheme whose blindness can be proven under a non-interactive assumption, namely a variant of the bilinear DDH assumption. We moreover prove its unforgeability assuming only unforgeability of the underlying SPS-EQ but no additional assumptions as needed for the FHS scheme.}, author = {Fuchsbauer, Georg and Hanser, Christian and Kamath Hosdurg, Chethan and Slamanig, Daniel}, location = {Amalfi, Italy}, pages = {391 -- 408}, publisher = {Springer}, title = {{Practical round-optimal blind signatures in the standard model from weaker assumptions}}, doi = {10.1007/978-3-319-44618-9_21}, volume = {9841}, year = {2016}, }