@inproceedings{9466, abstract = {In this work, we apply the dynamical systems analysis of Hanrot et al. (CRYPTO’11) to a class of lattice block reduction algorithms that includes (natural variants of) slide reduction and block-Rankin reduction. This implies sharper bounds on the polynomial running times (in the query model) for these algorithms and opens the door to faster practical variants of slide reduction. We give heuristic arguments showing that such variants can indeed speed up slide reduction significantly in practice. This is confirmed by experimental evidence, which also shows that our variants are competitive with state-of-the-art reduction algorithms.}, author = {Walter, Michael}, booktitle = {Public-Key Cryptography – PKC 2021}, isbn = {9783030752446}, issn = {16113349}, location = {Virtual}, pages = {45--67}, publisher = {Springer Nature}, title = {{The convergence of slide-type reductions}}, doi = {10.1007/978-3-030-75245-3_3}, volume = {12710}, year = {2021}, } @inproceedings{9826, abstract = {Automated contract tracing aims at supporting manual contact tracing during pandemics by alerting users of encounters with infected people. There are currently many proposals for protocols (like the “decentralized” DP-3T and PACT or the “centralized” ROBERT and DESIRE) to be run on mobile phones, where the basic idea is to regularly broadcast (using low energy Bluetooth) some values, and at the same time store (a function of) incoming messages broadcasted by users in their proximity. In the existing proposals one can trigger false positives on a massive scale by an “inverse-Sybil” attack, where a large number of devices (malicious users or hacked phones) pretend to be the same user, such that later, just a single person needs to be diagnosed (and allowed to upload) to trigger an alert for all users who were in proximity to any of this large group of devices. We propose the first protocols that do not succumb to such attacks assuming the devices involved in the attack do not constantly communicate, which we observe is a necessary assumption. The high level idea of the protocols is to derive the values to be broadcasted by a hash chain, so that two (or more) devices who want to launch an inverse-Sybil attack will not be able to connect their respective chains and thus only one of them will be able to upload. Our protocols also achieve security against replay, belated replay, and one of them even against relay attacks.}, author = {Auerbach, Benedikt and Chakraborty, Suvradip and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael and Yeo, Michelle X}, booktitle = {Topics in Cryptology – CT-RSA 2021}, isbn = {9783030755386}, issn = {16113349}, location = {Virtual Event}, pages = {399--421}, publisher = {Springer Nature}, title = {{Inverse-Sybil attacks in automated contact tracing}}, doi = {10.1007/978-3-030-75539-3_17}, volume = {12704}, year = {2021}, } @inproceedings{9825, abstract = {The dual attack has long been considered a relevant attack on lattice-based cryptographic schemes relying on the hardness of learning with errors (LWE) and its structured variants. As solving LWE corresponds to finding a nearest point on a lattice, one may naturally wonder how efficient this dual approach is for solving more general closest vector problems, such as the classical closest vector problem (CVP), the variants bounded distance decoding (BDD) and approximate CVP, and preprocessing versions of these problems. While primal, sieving-based solutions to these problems (with preprocessing) were recently studied in a series of works on approximate Voronoi cells [Laa16b, DLdW19, Laa20, DLvW20], for the dual attack no such overview exists, especially for problems with preprocessing. With one of the take-away messages of the approximate Voronoi cell line of work being that primal attacks work well for approximate CVP(P) but scale poorly for BDD(P), one may further wonder if the dual attack suffers the same drawbacks, or if it is perhaps a better solution when trying to solve BDD(P). In this work we provide an overview of cost estimates for dual algorithms for solving these “classical” closest lattice vector problems. Heuristically we expect to solve the search version of average-case CVPP in time and space 20.293𝑑+𝑜(𝑑) in the single-target model. The distinguishing version of average-case CVPP, where we wish to distinguish between random targets and targets planted at distance (say) 0.99⋅𝑔𝑑 from the lattice, has the same complexity in the single-target model, but can be solved in time and space 20.195𝑑+𝑜(𝑑) in the multi-target setting, when given a large number of targets from either target distribution. This suggests an inequivalence between distinguishing and searching, as we do not expect a similar improvement in the multi-target setting to hold for search-CVPP. We analyze three slightly different decoders, both for distinguishing and searching, and experimentally obtain concrete cost estimates for the dual attack in dimensions 50 to 80, which confirm our heuristic assumptions, and show that the hidden order terms in the asymptotic estimates are quite small. Our main take-away message is that the dual attack appears to mirror the approximate Voronoi cell line of work – whereas using approximate Voronoi cells works well for approximate CVP(P) but scales poorly for BDD(P), the dual approach scales well for BDD(P) instances but performs poorly on approximate CVP(P).}, author = {Laarhoven, Thijs and Walter, Michael}, booktitle = {Topics in Cryptology – CT-RSA 2021}, isbn = {9783030755386}, issn = {16113349}, location = {Virtual Event}, pages = {478--502}, publisher = {Springer Nature}, title = {{Dual lattice attacks for closest vector problems (with preprocessing)}}, doi = {10.1007/978-3-030-75539-3_20}, volume = {12704}, year = {2021}, } @inproceedings{10408, abstract = {Key trees are often the best solution in terms of transmission cost and storage requirements for managing keys in a setting where a group needs to share a secret key, while being able to efficiently rotate the key material of users (in order to recover from a potential compromise, or to add or remove users). Applications include multicast encryption protocols like LKH (Logical Key Hierarchies) or group messaging like the current IETF proposal TreeKEM. A key tree is a (typically balanced) binary tree, where each node is identified with a key: leaf nodes hold users’ secret keys while the root is the shared group key. For a group of size N, each user just holds log(N) keys (the keys on the path from its leaf to the root) and its entire key material can be rotated by broadcasting 2log(N) ciphertexts (encrypting each fresh key on the path under the keys of its parents). In this work we consider the natural setting where we have many groups with partially overlapping sets of users, and ask if we can find solutions where the cost of rotating a key is better than in the trivial one where we have a separate key tree for each group. We show that in an asymptotic setting (where the number m of groups is fixed while the number N of users grows) there exist more general key graphs whose cost converges to the cost of a single group, thus saving a factor linear in the number of groups over the trivial solution. As our asymptotic “solution” converges very slowly and performs poorly on concrete examples, we propose an algorithm that uses a natural heuristic to compute a key graph for any given group structure. Our algorithm combines two greedy algorithms, and is thus very efficient: it first converts the group structure into a “lattice graph”, which is then turned into a key graph by repeatedly applying the algorithm for constructing a Huffman code. To better understand how far our proposal is from an optimal solution, we prove lower bounds on the update cost of continuous group-key agreement and multicast encryption in a symbolic model admitting (asymmetric) encryption, pseudorandom generators, and secret sharing as building blocks.}, author = {Alwen, Joel F and Auerbach, Benedikt and Baig, Mirza Ahad and Cueto Noval, Miguel and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {19th International Conference}, isbn = {9-783-0309-0455-5}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {222--253}, publisher = {Springer Nature}, title = {{Grafting key trees: Efficient key management for overlapping groups}}, doi = {10.1007/978-3-030-90456-2_8}, volume = {13044}, year = {2021}, } @inproceedings{10049, abstract = {While messaging systems with strong security guarantees are widely used in practice, designing a protocol that scales efficiently to large groups and enjoys similar security guarantees remains largely open. The two existing proposals to date are ART (Cohn-Gordon et al., CCS18) and TreeKEM (IETF, The Messaging Layer Security Protocol, draft). TreeKEM is the currently considered candidate by the IETF MLS working group, but dynamic group operations (i.e. adding and removing users) can cause efficiency issues. In this paper we formalize and analyze a variant of TreeKEM which we term Tainted TreeKEM (TTKEM for short). The basic idea underlying TTKEM was suggested by Millican (MLS mailing list, February 2018). This version is more efficient than TreeKEM for some natural distributions of group operations, we quantify this through simulations.Our second contribution is two security proofs for TTKEM which establish post compromise and forward secrecy even against adaptive attackers. The security loss (to the underlying PKE) in the Random Oracle Model is a polynomial factor, and a quasipolynomial one in the Standard Model. Our proofs can be adapted to TreeKEM as well. Before our work no security proof for any TreeKEM-like protocol establishing tight security against an adversary who can adaptively choose the sequence of operations was known. We also are the first to prove (or even formalize) active security where the server can arbitrarily deviate from the protocol specification. Proving fully active security – where also the users can arbitrarily deviate – remains open.}, author = {Klein, Karen and Pascual Perez, Guillermo and Walter, Michael and Kamath Hosdurg, Chethan and Capretto, Margarita and Cueto Noval, Miguel and Markov, Ilia and Yeo, Michelle X and Alwen, Joel F and Pietrzak, Krzysztof Z}, booktitle = {2021 IEEE Symposium on Security and Privacy }, location = {San Francisco, CA, United States}, pages = {268--284}, publisher = {IEEE}, title = {{Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement}}, doi = {10.1109/sp40001.2021.00035}, year = {2021}, } @inproceedings{10410, abstract = {The security of cryptographic primitives and protocols against adversaries that are allowed to make adaptive choices (e.g., which parties to corrupt or which queries to make) is notoriously difficult to establish. A broad theoretical framework was introduced by Jafargholi et al. [Crypto’17] for this purpose. In this paper we initiate the study of lower bounds on loss in adaptive security for certain cryptographic protocols considered in the framework. We prove lower bounds that almost match the upper bounds (proven using the framework) for proxy re-encryption, prefix-constrained PRFs and generalized selective decryption, a security game that captures the security of certain group messaging and broadcast encryption schemes. Those primitives have in common that their security game involves an underlying graph that can be adaptively built by the adversary. Some of our lower bounds only apply to a restricted class of black-box reductions which we term “oblivious” (the existing upper bounds are of this restricted type), some apply to the broader but still restricted class of non-rewinding reductions, while our lower bound for proxy re-encryption applies to all black-box reductions. The fact that some of our lower bounds seem to crucially rely on obliviousness or at least a non-rewinding reduction hints to the exciting possibility that the existing upper bounds can be improved by using more sophisticated reductions. Our main conceptual contribution is a two-player multi-stage game called the Builder-Pebbler Game. We can translate bounds on the winning probabilities for various instantiations of this game into cryptographic lower bounds for the above-mentioned primitives using oracle separation techniques.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {19th International Conference}, isbn = {9-783-0309-0452-4}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {550--581}, publisher = {Springer Nature}, title = {{The cost of adaptivity in security games on graphs}}, doi = {10.1007/978-3-030-90453-1_19}, volume = {13043}, year = {2021}, } @inproceedings{10048, abstract = {The security of cryptographic primitives and protocols against adversaries that are allowed to make adaptive choices (e.g., which parties to corrupt or which queries to make) is notoriously difficult to establish. A broad theoretical framework was introduced by Jafargholi et al. [Crypto’17] for this purpose. In this paper we initiate the study of lower bounds on loss in adaptive security for certain cryptographic protocols considered in the framework. We prove lower bounds that almost match the upper bounds (proven using the framework) for proxy re-encryption, prefix-constrained PRFs and generalized selective decryption, a security game that captures the security of certain group messaging and broadcast encryption schemes. Those primitives have in common that their security game involves an underlying graph that can be adaptively built by the adversary. Some of our lower bounds only apply to a restricted class of black-box reductions which we term “oblivious” (the existing upper bounds are of this restricted type), some apply to the broader but still restricted class of non-rewinding reductions, while our lower bound for proxy re-encryption applies to all black-box reductions. The fact that some of our lower bounds seem to crucially rely on obliviousness or at least a non-rewinding reduction hints to the exciting possibility that the existing upper bounds can be improved by using more sophisticated reductions. Our main conceptual contribution is a two-player multi-stage game called the Builder-Pebbler Game. We can translate bounds on the winning probabilities for various instantiations of this game into cryptographic lower bounds for the above-mentioned primitives using oracle separation techniques. }, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {19th Theory of Cryptography Conference 2021}, location = {Raleigh, NC, United States}, publisher = {International Association for Cryptologic Research}, title = {{The cost of adaptivity in security games on graphs}}, year = {2021}, } @inproceedings{8339, abstract = {Discrete Gaussian distributions over lattices are central to lattice-based cryptography, and to the computational and mathematical aspects of lattices more broadly. The literature contains a wealth of useful theorems about the behavior of discrete Gaussians under convolutions and related operations. Yet despite their structural similarities, most of these theorems are formally incomparable, and their proofs tend to be monolithic and written nearly “from scratch,” making them unnecessarily hard to verify, understand, and extend. In this work we present a modular framework for analyzing linear operations on discrete Gaussian distributions. The framework abstracts away the particulars of Gaussians, and usually reduces proofs to the choice of appropriate linear transformations and elementary linear algebra. To showcase the approach, we establish several general properties of discrete Gaussians, and show how to obtain all prior convolution theorems (along with some new ones) as straightforward corollaries. As another application, we describe a self-reduction for Learning With Errors (LWE) that uses a fixed number of samples to generate an unlimited number of additional ones (having somewhat larger error). The distinguishing features of our reduction are its simple analysis in our framework, and its exclusive use of discrete Gaussians without any loss in parameters relative to a prior mixed discrete-and-continuous approach. As a contribution of independent interest, for subgaussian random matrices we prove a singular value concentration bound with explicitly stated constants, and we give tighter heuristics for specific distributions that are commonly used for generating lattice trapdoors. These bounds yield improvements in the concrete bit-security estimates for trapdoor lattice cryptosystems.}, author = {Genise, Nicholas and Micciancio, Daniele and Peikert, Chris and Walter, Michael}, booktitle = {23rd IACR International Conference on the Practice and Theory of Public-Key Cryptography}, isbn = {9783030453732}, issn = {16113349}, location = {Edinburgh, United Kingdom}, pages = {623--651}, publisher = {Springer Nature}, title = {{Improved discrete Gaussian and subgaussian analysis for lattice cryptography}}, doi = {10.1007/978-3-030-45374-9_21}, volume = {12110}, year = {2020}, } @inbook{6726, abstract = {Randomness is an essential part of any secure cryptosystem, but many constructions rely on distributions that are not uniform. This is particularly true for lattice based cryptosystems, which more often than not make use of discrete Gaussian distributions over the integers. For practical purposes it is crucial to evaluate the impact that approximation errors have on the security of a scheme to provide the best possible trade-off between security and performance. Recent years have seen surprising results allowing to use relatively low precision while maintaining high levels of security. A key insight in these results is that sampling a distribution with low relative error can provide very strong security guarantees. Since floating point numbers provide guarantees on the relative approximation error, they seem a suitable tool in this setting, but it is not obvious which sampling algorithms can actually profit from them. While previous works have shown that inversion sampling can be adapted to provide a low relative error (Pöppelmann et al., CHES 2014; Prest, ASIACRYPT 2017), other works have called into question if this is possible for other sampling techniques (Zheng et al., Eprint report 2018/309). In this work, we consider all sampling algorithms that are popular in the cryptographic setting and analyze the relationship of floating point precision and the resulting relative error. We show that all of the algorithms either natively achieve a low relative error or can be adapted to do so.}, author = {Walter, Michael}, booktitle = {Progress in Cryptology – AFRICACRYPT 2019}, editor = {Buchmann, J and Nitaj, A and Rachidi, T}, isbn = {978-3-0302-3695-3}, issn = {0302-9743}, location = {Rabat, Morocco}, pages = {157--180}, publisher = {Springer Nature}, title = {{Sampling the integers with low relative error}}, doi = {10.1007/978-3-030-23696-0_9}, volume = {11627}, year = {2019}, } @inproceedings{7411, abstract = {Proofs of sequential work (PoSW) are proof systems where a prover, upon receiving a statement χ and a time parameter T computes a proof ϕ(χ,T) which is efficiently and publicly verifiable. The proof can be computed in T sequential steps, but not much less, even by a malicious party having large parallelism. A PoSW thus serves as a proof that T units of time have passed since χ was received. PoSW were introduced by Mahmoody, Moran and Vadhan [MMV11], a simple and practical construction was only recently proposed by Cohen and Pietrzak [CP18]. In this work we construct a new simple PoSW in the random permutation model which is almost as simple and efficient as [CP18] but conceptually very different. Whereas the structure underlying [CP18] is a hash tree, our construction is based on skip lists and has the interesting property that computing the PoSW is a reversible computation. The fact that the construction is reversible can potentially be used for new applications like constructing proofs of replication. We also show how to “embed” the sloth function of Lenstra and Weselowski [LW17] into our PoSW to get a PoSW where one additionally can verify correctness of the output much more efficiently than recomputing it (though recent constructions of “verifiable delay functions” subsume most of the applications this construction was aiming at).}, author = {Abusalah, Hamza M and Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {Advances in Cryptology – EUROCRYPT 2019}, isbn = {9783030176556}, issn = {1611-3349}, location = {Darmstadt, Germany}, pages = {277--291}, publisher = {Springer International Publishing}, title = {{Reversible proofs of sequential work}}, doi = {10.1007/978-3-030-17656-3_10}, volume = {11477}, year = {2019}, } @inproceedings{300, abstract = {We introduce a formal quantitative notion of “bit security” for a general type of cryptographic games (capturing both decision and search problems), aimed at capturing the intuition that a cryptographic primitive with k-bit security is as hard to break as an ideal cryptographic function requiring a brute force attack on a k-bit key space. Our new definition matches the notion of bit security commonly used by cryptographers and cryptanalysts when studying search (e.g., key recovery) problems, where the use of the traditional definition is well established. However, it produces a quantitatively different metric in the case of decision (indistinguishability) problems, where the use of (a straightforward generalization of) the traditional definition is more problematic and leads to a number of paradoxical situations or mismatches between theoretical/provable security and practical/common sense intuition. Key to our new definition is to consider adversaries that may explicitly declare failure of the attack. We support and justify the new definition by proving a number of technical results, including tight reductions between several standard cryptographic problems, a new hybrid theorem that preserves bit security, and an application to the security analysis of indistinguishability primitives making use of (approximate) floating point numbers. This is the first result showing that (standard precision) 53-bit floating point numbers can be used to achieve 100-bit security in the context of cryptographic primitives with general indistinguishability-based security definitions. Previous results of this type applied only to search problems, or special types of decision problems.}, author = {Micciancio, Daniele and Walter, Michael}, location = {Tel Aviv, Israel}, pages = {3 -- 28}, publisher = {Springer}, title = {{On the bit security of cryptographic primitives}}, doi = {10.1007/978-3-319-78381-9_1}, volume = {10820}, year = {2018}, }