@inproceedings{9969, abstract = {Payment channel networks are a promising approach to improve the scalability of cryptocurrencies: they allow to perform transactions in a peer-to-peer fashion, along multihop routes in the network, without requiring consensus on the blockchain. However, during the discovery of cost-efficient routes for the transaction, critical information may be revealed about the transacting entities. This paper initiates the study of privacy-preserving route discovery mechanisms for payment channel networks. In particular, we present LightPIR, an approach which allows a client to learn the shortest (or cheapest in terms of fees) path between two nodes without revealing any information about the endpoints of the transaction to the servers. The two main observations which allow for an efficient solution in LightPIR are that: (1) surprisingly, hub labelling algorithms – which were developed to preprocess “street network like” graphs so one can later efficiently compute shortest paths – also perform well for the graphs underlying payment channel networks, and that (2) hub labelling algorithms can be conveniently combined with private information retrieval. LightPIR relies on a simple hub labeling heuristic on top of existing hub labeling algorithms which leverages the specific topological features of cryptocurrency networks to further minimize storage and bandwidth overheads. In a case study considering the Lightning network, we show that our approach is an order of magnitude more efficient compared to a privacy-preserving baseline based on using private information retrieval on a database that stores all pairs shortest paths.}, author = {Pietrzak, Krzysztof Z and Salem, Iosif and Schmid, Stefan and Yeo, Michelle X}, isbn = {978-1-6654-4501-6}, issn = {1861-2288}, location = {Espoo and Helsinki, Finland}, publisher = {IEEE}, title = {{LightPIR: Privacy-preserving route discovery for payment channel networks}}, doi = {10.23919/IFIPNetworking52078.2021.9472205}, year = {2021}, } @inproceedings{8322, abstract = {Reverse firewalls were introduced at Eurocrypt 2015 by Miro-nov and Stephens-Davidowitz, as a method for protecting cryptographic protocols against attacks on the devices of the honest parties. In a nutshell: a reverse firewall is placed outside of a device and its goal is to “sanitize” the messages sent by it, in such a way that a malicious device cannot leak its secrets to the outside world. It is typically assumed that the cryptographic devices are attacked in a “functionality-preserving way” (i.e. informally speaking, the functionality of the protocol remains unchanged under this attacks). In their paper, Mironov and Stephens-Davidowitz construct a protocol for passively-secure two-party computations with firewalls, leaving extension of this result to stronger models as an open question. In this paper, we address this problem by constructing a protocol for secure computation with firewalls that has two main advantages over the original protocol from Eurocrypt 2015. Firstly, it is a multiparty computation protocol (i.e. it works for an arbitrary number n of the parties, and not just for 2). Secondly, it is secure in much stronger corruption settings, namely in the active corruption model. More precisely: we consider an adversary that can fully corrupt up to 𝑛−1 parties, while the remaining parties are corrupt in a functionality-preserving way. Our core techniques are: malleable commitments and malleable non-interactive zero-knowledge, which in particular allow us to create a novel protocol for multiparty augmented coin-tossing into the well with reverse firewalls (that is based on a protocol of Lindell from Crypto 2001).}, author = {Chakraborty, Suvradip and Dziembowski, Stefan and Nielsen, Jesper Buus}, booktitle = {Advances in Cryptology – CRYPTO 2020}, isbn = {9783030568795}, issn = {16113349}, location = {Santa Barbara, CA, United States}, pages = {732--762}, publisher = {Springer Nature}, title = {{Reverse firewalls for actively secure MPCs}}, doi = {10.1007/978-3-030-56880-1_26}, volume = {12171}, year = {2020}, } @inproceedings{8339, abstract = {Discrete Gaussian distributions over lattices are central to lattice-based cryptography, and to the computational and mathematical aspects of lattices more broadly. The literature contains a wealth of useful theorems about the behavior of discrete Gaussians under convolutions and related operations. Yet despite their structural similarities, most of these theorems are formally incomparable, and their proofs tend to be monolithic and written nearly “from scratch,” making them unnecessarily hard to verify, understand, and extend. In this work we present a modular framework for analyzing linear operations on discrete Gaussian distributions. The framework abstracts away the particulars of Gaussians, and usually reduces proofs to the choice of appropriate linear transformations and elementary linear algebra. To showcase the approach, we establish several general properties of discrete Gaussians, and show how to obtain all prior convolution theorems (along with some new ones) as straightforward corollaries. As another application, we describe a self-reduction for Learning With Errors (LWE) that uses a fixed number of samples to generate an unlimited number of additional ones (having somewhat larger error). The distinguishing features of our reduction are its simple analysis in our framework, and its exclusive use of discrete Gaussians without any loss in parameters relative to a prior mixed discrete-and-continuous approach. As a contribution of independent interest, for subgaussian random matrices we prove a singular value concentration bound with explicitly stated constants, and we give tighter heuristics for specific distributions that are commonly used for generating lattice trapdoors. These bounds yield improvements in the concrete bit-security estimates for trapdoor lattice cryptosystems.}, author = {Genise, Nicholas and Micciancio, Daniele and Peikert, Chris and Walter, Michael}, booktitle = {23rd IACR International Conference on the Practice and Theory of Public-Key Cryptography}, isbn = {9783030453732}, issn = {16113349}, location = {Edinburgh, United Kingdom}, pages = {623--651}, publisher = {Springer Nature}, title = {{Improved discrete Gaussian and subgaussian analysis for lattice cryptography}}, doi = {10.1007/978-3-030-45374-9_21}, volume = {12110}, year = {2020}, } @inproceedings{8987, abstract = {Currently several projects aim at designing and implementing protocols for privacy preserving automated contact tracing to help fight the current pandemic. Those proposal are quite similar, and in their most basic form basically propose an app for mobile phones which broadcasts frequently changing pseudorandom identifiers via (low energy) Bluetooth, and at the same time, the app stores IDs broadcast by phones in its proximity. Only if a user is tested positive, they upload either the beacons they did broadcast (which is the case in decentralized proposals as DP-3T, east and west coast PACT or Covid watch) or received (as in Popp-PT or ROBERT) during the last two weeks or so. Vaudenay [eprint 2020/399] observes that this basic scheme (he considers the DP-3T proposal) succumbs to relay and even replay attacks, and proposes more complex interactive schemes which prevent those attacks without giving up too many privacy aspects. Unfortunately interaction is problematic for this application for efficiency and security reasons. The countermeasures that have been suggested so far are either not practical or give up on key privacy aspects. We propose a simple non-interactive variant of the basic protocol that (security) Provably prevents replay and (if location data is available) relay attacks. (privacy) The data of all parties (even jointly) reveals no information on the location or time where encounters happened. (efficiency) The broadcasted message can fit into 128 bits and uses only basic crypto (commitments and secret key authentication). Towards this end we introduce the concept of “delayed authentication”, which basically is a message authentication code where verification can be done in two steps, where the first doesn’t require the key, and the second doesn’t require the message.}, author = {Pietrzak, Krzysztof Z}, booktitle = {Progress in Cryptology}, isbn = {9783030652760}, issn = {16113349}, location = {Bangalore, India}, pages = {3--15}, publisher = {Springer Nature}, title = {{Delayed authentication: Preventing replay and relay attacks in private contact tracing}}, doi = {10.1007/978-3-030-65277-7_1}, volume = {12578}, year = {2020}, } @inproceedings{7966, abstract = {For 1≤m≤n, we consider a natural m-out-of-n multi-instance scenario for a public-key encryption (PKE) scheme. An adversary, given n independent instances of PKE, wins if he breaks at least m out of the n instances. In this work, we are interested in the scaling factor of PKE schemes, SF, which measures how well the difficulty of breaking m out of the n instances scales in m. That is, a scaling factor SF=ℓ indicates that breaking m out of n instances is at least ℓ times more difficult than breaking one single instance. A PKE scheme with small scaling factor hence provides an ideal target for mass surveillance. In fact, the Logjam attack (CCS 2015) implicitly exploited, among other things, an almost constant scaling factor of ElGamal over finite fields (with shared group parameters). For Hashed ElGamal over elliptic curves, we use the generic group model to argue that the scaling factor depends on the scheme's granularity. In low granularity, meaning each public key contains its independent group parameter, the scheme has optimal scaling factor SF=m; In medium and high granularity, meaning all public keys share the same group parameter, the scheme still has a reasonable scaling factor SF=√m. Our findings underline that instantiating ElGamal over elliptic curves should be preferred to finite fields in a multi-instance scenario. As our main technical contribution, we derive new generic-group lower bounds of Ω(√(mp)) on the difficulty of solving both the m-out-of-n Gap Discrete Logarithm and the m-out-of-n Gap Computational Diffie-Hellman problem over groups of prime order p, extending a recent result by Yun (EUROCRYPT 2015). We establish the lower bound by studying the hardness of a related computational problem which we call the search-by-hypersurface problem.}, author = {Auerbach, Benedikt and Giacon, Federico and Kiltz, Eike}, booktitle = {Advances in Cryptology – EUROCRYPT 2020}, isbn = {9783030457266}, issn = {1611-3349}, pages = {475--506}, publisher = {Springer Nature}, title = {{Everybody’s a target: Scalability in public-key encryption}}, doi = {10.1007/978-3-030-45727-3_16}, volume = {12107}, year = {2020}, } @phdthesis{7896, abstract = {A search problem lies in the complexity class FNP if a solution to the given instance of the problem can be verified efficiently. The complexity class TFNP consists of all search problems in FNP that are total in the sense that a solution is guaranteed to exist. TFNP contains a host of interesting problems from fields such as algorithmic game theory, computational topology, number theory and combinatorics. Since TFNP is a semantic class, it is unlikely to have a complete problem. Instead, one studies its syntactic subclasses which are defined based on the combinatorial principle used to argue totality. Of particular interest is the subclass PPAD, which contains important problems like computing Nash equilibrium for bimatrix games and computational counterparts of several fixed-point theorems as complete. In the thesis, we undertake the study of averagecase hardness of TFNP, and in particular its subclass PPAD. Almost nothing was known about average-case hardness of PPAD before a series of recent results showed how to achieve it using a cryptographic primitive called program obfuscation. However, it is currently not known how to construct program obfuscation from standard cryptographic assumptions. Therefore, it is desirable to relax the assumption under which average-case hardness of PPAD can be shown. In the thesis we take a step in this direction. First, we show that assuming the (average-case) hardness of a numbertheoretic problem related to factoring of integers, which we call Iterated-Squaring, PPAD is hard-on-average in the random-oracle model. Then we strengthen this result to show that the average-case hardness of PPAD reduces to the (adaptive) soundness of the Fiat-Shamir Transform, a well-known technique used to compile a public-coin interactive protocol into a non-interactive one. As a corollary, we obtain average-case hardness for PPAD in the random-oracle model assuming the worst-case hardness of #SAT. Moreover, the above results can all be strengthened to obtain average-case hardness for the class CLS ⊆ PPAD. Our main technical contribution is constructing incrementally-verifiable procedures for computing Iterated-Squaring and #SAT. By incrementally-verifiable, we mean that every intermediate state of the computation includes a proof of its correctness, and the proof can be updated and verified in polynomial time. Previous constructions of such procedures relied on strong, non-standard assumptions. Instead, we introduce a technique called recursive proof-merging to obtain the same from weaker assumptions. }, author = {Kamath Hosdurg, Chethan}, issn = {2663-337X}, pages = {126}, publisher = {Institute of Science and Technology Austria}, title = {{On the average-case hardness of total search problems}}, doi = {10.15479/AT:ISTA:7896}, year = {2020}, } @article{5887, abstract = {Cryptographic security is usually defined as a guarantee that holds except when a bad event with negligible probability occurs, and nothing is guaranteed in that bad case. However, in settings where such failure can happen with substantial probability, one needs to provide guarantees even for the bad case. A typical example is where a (possibly weak) password is used instead of a secure cryptographic key to protect a session, the bad event being that the adversary correctly guesses the password. In a situation with multiple such sessions, a per-session guarantee is desired: any session for which the password has not been guessed remains secure, independently of whether other sessions have been compromised. A new formalism for stating such gracefully degrading security guarantees is introduced and applied to analyze the examples of password-based message authentication and password-based encryption. While a natural per-message guarantee is achieved for authentication, the situation of password-based encryption is more delicate: a per-session confidentiality guarantee only holds against attackers for which the distribution of password-guessing effort over the sessions is known in advance. In contrast, for more general attackers without such a restriction, a strong, composable notion of security cannot be achieved.}, author = {Demay, Gregory and Gazi, Peter and Maurer, Ueli and Tackmann, Bjorn}, issn = {0926227X}, journal = {Journal of Computer Security}, number = {1}, pages = {75--111}, publisher = {IOS Press}, title = {{Per-session security: Password-based cryptography revisited}}, doi = {10.3233/JCS-181131}, volume = {27}, year = {2019}, } @inproceedings{6528, abstract = {We construct a verifiable delay function (VDF) by showing how the Rivest-Shamir-Wagner time-lock puzzle can be made publicly verifiable. Concretely, we give a statistically sound public-coin protocol to prove that a tuple (N,x,T,y) satisfies y=x2T (mod N) where the prover doesn’t know the factorization of N and its running time is dominated by solving the puzzle, that is, compute x2T, which is conjectured to require T sequential squarings. To get a VDF we make this protocol non-interactive using the Fiat-Shamir heuristic.The motivation for this work comes from the Chia blockchain design, which uses a VDF as akey ingredient. For typical parameters (T≤2 40, N= 2048), our proofs are of size around 10K B, verification cost around three RSA exponentiations and computing the proof is 8000 times faster than solving the puzzle even without any parallelism.}, author = {Pietrzak, Krzysztof Z}, booktitle = {10th Innovations in Theoretical Computer Science Conference}, isbn = {978-3-95977-095-8}, issn = {1868-8969}, location = {San Diego, CA, United States}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Simple verifiable delay functions}}, doi = {10.4230/LIPICS.ITCS.2019.60}, volume = {124}, year = {2019}, } @inbook{6726, abstract = {Randomness is an essential part of any secure cryptosystem, but many constructions rely on distributions that are not uniform. This is particularly true for lattice based cryptosystems, which more often than not make use of discrete Gaussian distributions over the integers. For practical purposes it is crucial to evaluate the impact that approximation errors have on the security of a scheme to provide the best possible trade-off between security and performance. Recent years have seen surprising results allowing to use relatively low precision while maintaining high levels of security. A key insight in these results is that sampling a distribution with low relative error can provide very strong security guarantees. Since floating point numbers provide guarantees on the relative approximation error, they seem a suitable tool in this setting, but it is not obvious which sampling algorithms can actually profit from them. While previous works have shown that inversion sampling can be adapted to provide a low relative error (Pöppelmann et al., CHES 2014; Prest, ASIACRYPT 2017), other works have called into question if this is possible for other sampling techniques (Zheng et al., Eprint report 2018/309). In this work, we consider all sampling algorithms that are popular in the cryptographic setting and analyze the relationship of floating point precision and the resulting relative error. We show that all of the algorithms either natively achieve a low relative error or can be adapted to do so.}, author = {Walter, Michael}, booktitle = {Progress in Cryptology – AFRICACRYPT 2019}, editor = {Buchmann, J and Nitaj, A and Rachidi, T}, isbn = {978-3-0302-3695-3}, issn = {0302-9743}, location = {Rabat, Morocco}, pages = {157--180}, publisher = {Springer Nature}, title = {{Sampling the integers with low relative error}}, doi = {10.1007/978-3-030-23696-0_9}, volume = {11627}, year = {2019}, } @inproceedings{7136, abstract = {It is well established that the notion of min-entropy fails to satisfy the \emph{chain rule} of the form H(X,Y)=H(X|Y)+H(Y), known for Shannon Entropy. Such a property would help to analyze how min-entropy is split among smaller blocks. Problems of this kind arise for example when constructing extractors and dispersers. We show that any sequence of variables exhibits a very strong strong block-source structure (conditional distributions of blocks are nearly flat) when we \emph{spoil few correlated bits}. This implies, conditioned on the spoiled bits, that \emph{splitting-recombination properties} hold. In particular, we have many nice properties that min-entropy doesn't obey in general, for example strong chain rules, "information can't hurt" inequalities, equivalences of average and worst-case conditional entropy definitions and others. Quantitatively, for any sequence X1,…,Xt of random variables over an alphabet X we prove that, when conditioned on m=t⋅O(loglog|X|+loglog(1/ϵ)+logt) bits of auxiliary information, all conditional distributions of the form Xi|X