@article{14820, abstract = {We consider a natural problem dealing with weighted packet selection across a rechargeable link, which e.g., finds applications in cryptocurrency networks. The capacity of a link (u, v) is determined by how many nodes u and v allocate for this link. Specifically, the input is a finite ordered sequence of packets that arrive in both directions along a link. Given (u, v) and a packet of weight x going from u to v, node u can either accept or reject the packet. If u accepts the packet, the capacity on link (u, v) decreases by x. Correspondingly, v's capacity on increases by x. If a node rejects the packet, this will entail a cost affinely linear in the weight of the packet. A link is “rechargeable” in the sense that the total capacity of the link has to remain constant, but the allocation of capacity at the ends of the link can depend arbitrarily on the nodes' decisions. The goal is to minimise the sum of the capacity injected into the link and the cost of rejecting packets. We show that the problem is NP-hard, but can be approximated efficiently with a ratio of (1+E) . (1+3) for some arbitrary E>0.}, author = {Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X}, issn = {0304-3975}, journal = {Theoretical Computer Science}, keywords = {General Computer Science, Theoretical Computer Science}, publisher = {Elsevier}, title = {{Weighted packet selection for rechargeable links in cryptocurrency networks: Complexity and approximation}}, doi = {10.1016/j.tcs.2023.114353}, volume = {989}, year = {2024}, } @inproceedings{15007, abstract = {Traditional blockchains grant the miner of a block full control not only over which transactions but also their order. This constitutes a major flaw discovered with the introduction of decentralized finance and allows miners to perform MEV attacks. In this paper, we address the issue of sandwich attacks by providing a construction that takes as input a blockchain protocol and outputs a new blockchain protocol with the same security but in which sandwich attacks are not profitable. Furthermore, our protocol is fully decentralized with no trusted third parties or heavy cryptography primitives and carries a linear increase in latency and minimum computation overhead.}, author = {Alpos, Orestis and Amores-Sesar, Ignacio and Cachin, Christian and Yeo, Michelle X}, booktitle = {27th International Conference on Principles of Distributed Systems}, isbn = {9783959773089}, issn = {1868-8969}, location = {Tokyo, Japan}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Eating sandwiches: Modular and lightweight elimination of transaction reordering attacks}}, doi = {10.4230/LIPIcs.OPODIS.2023.12}, volume = {286}, year = {2024}, } @inproceedings{13143, abstract = {GIMPS and PrimeGrid are large-scale distributed projects dedicated to searching giant prime numbers, usually of special forms like Mersenne and Proth primes. The numbers in the current search-space are millions of digits large and the participating volunteers need to run resource-consuming primality tests. Once a candidate prime N has been found, the only way for another party to independently verify the primality of N used to be by repeating the expensive primality test. To avoid the need for second recomputation of each primality test, these projects have recently adopted certifying mechanisms that enable efficient verification of performed tests. However, the mechanisms presently in place only detect benign errors and there is no guarantee against adversarial behavior: a malicious volunteer can mislead the project to reject a giant prime as being non-prime. In this paper, we propose a practical, cryptographically-sound mechanism for certifying the non-primality of Proth numbers. That is, a volunteer can – parallel to running the primality test for N – generate an efficiently verifiable proof at a little extra cost certifying that N is not prime. The interactive protocol has statistical soundness and can be made non-interactive using the Fiat-Shamir heuristic. Our approach is based on a cryptographic primitive called Proof of Exponentiation (PoE) which, for a group G, certifies that a tuple (x,y,T)∈G2×N satisfies x2T=y (Pietrzak, ITCS 2019 and Wesolowski, J. Cryptol. 2020). In particular, we show how to adapt Pietrzak’s PoE at a moderate additional cost to make it a cryptographically-sound certificate of non-primality.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Pietrzak, Krzysztof Z}, booktitle = {Public-Key Cryptography - PKC 2023}, isbn = {9783031313677}, issn = {1611-3349}, location = {Atlanta, GA, United States}, pages = {530--553}, publisher = {Springer Nature}, title = {{Certifying giant nonprimes}}, doi = {10.1007/978-3-031-31368-4_19}, volume = {13940}, year = {2023}, } @article{12164, abstract = {A shared-memory counter is a widely-used and well-studied concurrent object. It supports two operations: An Inc operation that increases its value by 1 and a Read operation that returns its current value. In Jayanti et al (SIAM J Comput, 30(2), 2000), Jayanti, Tan and Toueg proved a linear lower bound on the worst-case step complexity of obstruction-free implementations, from read-write registers, of a large class of shared objects that includes counters. The lower bound leaves open the question of finding counter implementations with sub-linear amortized step complexity. In this work, we address this gap. We show that n-process, wait-free and linearizable counters can be implemented from read-write registers with O(log2n) amortized step complexity. This is the first counter algorithm from read-write registers that provides sub-linear amortized step complexity in executions of arbitrary length. Since a logarithmic lower bound on the amortized step complexity of obstruction-free counter implementations exists, our upper bound is within a logarithmic factor of the optimal. The worst-case step complexity of the construction remains linear, which is optimal. This is obtained thanks to a new max register construction with O(logn) amortized step complexity in executions of arbitrary length in which the value stored in the register does not grow too quickly. We then leverage an existing counter algorithm by Aspnes, Attiya and Censor-Hillel [1] in which we “plug” our max register implementation to show that it remains linearizable while achieving O(log2n) amortized step complexity.}, author = {Baig, Mirza Ahad and Hendler, Danny and Milani, Alessia and Travers, Corentin}, issn = {1432-0452}, journal = {Distributed Computing}, keywords = {Computational Theory and Mathematics, Computer Networks and Communications, Hardware and Architecture, Theoretical Computer Science}, pages = {29--43}, publisher = {Springer Nature}, title = {{Long-lived counters with polylogarithmic amortized step complexity}}, doi = {10.1007/s00446-022-00439-5}, volume = {36}, year = {2023}, } @inproceedings{14428, abstract = {Suppose we have two hash functions h1 and h2, but we trust the security of only one of them. To mitigate this worry, we wish to build a hash combiner Ch1,h2 which is secure so long as one of the underlying hash functions is. This question has been well-studied in the regime of collision resistance. In this case, concatenating the two hash function outputs clearly works. Unfortunately, a long series of works (Boneh and Boyen, CRYPTO’06; Pietrzak, Eurocrypt’07; Pietrzak, CRYPTO’08) showed no (noticeably) shorter combiner for collision resistance is possible. In this work, we revisit this pessimistic state of affairs, motivated by the observation that collision-resistance is insufficient for many interesting applications of cryptographic hash functions anyway. We argue the right formulation of the “hash combiner” is to build what we call random oracle (RO) combiners, utilizing stronger assumptions for stronger constructions. Indeed, we circumvent the previous lower bounds for collision resistance by constructing a simple length-preserving RO combiner C˜h1,h2Z1,Z2(M)=h1(M,Z1)⊕h2(M,Z2),where Z1,Z2 are random salts of appropriate length. We show that this extra randomness is necessary for RO combiners, and indeed our construction is somewhat tight with this lower bound. On the negative side, we show that one cannot generically apply the composition theorem to further replace “monolithic” hash functions h1 and h2 by some simpler indifferentiable construction (such as the Merkle-Damgård transformation) from smaller components, such as fixed-length compression functions. Finally, despite this issue, we directly prove collision resistance of the Merkle-Damgård variant of our combiner, where h1 and h2 are replaced by iterative Merkle-Damgård hashes applied to a fixed-length compression function. Thus, we can still subvert the concatenation barrier for collision-resistance combiners while utilizing practically small fixed-length components underneath.}, author = {Dodis, Yevgeniy and Ferguson, Niels and Goldin, Eli and Hall, Peter and Pietrzak, Krzysztof Z}, booktitle = {43rd Annual International Cryptology Conference}, isbn = {9783031385445}, issn = {1611-3349}, location = {Santa Barbara, CA, United States}, pages = {514--546}, publisher = {Springer Nature}, title = {{Random oracle combiners: Breaking the concatenation barrier for collision-resistance}}, doi = {10.1007/978-3-031-38545-2_17}, volume = {14082}, year = {2023}, } @inproceedings{14457, abstract = {Threshold secret sharing allows a dealer to split a secret s into n shares, such that any t shares allow for reconstructing s, but no t-1 shares reveal any information about s. Leakage-resilient secret sharing requires that the secret remains hidden, even when an adversary additionally obtains a limited amount of leakage from every share. Benhamouda et al. (CRYPTO’18) proved that Shamir’s secret sharing scheme is one bit leakage-resilient for reconstruction threshold t≥0.85n and conjectured that the same holds for t = c.n for any constant 0≤c≤1. Nielsen and Simkin (EUROCRYPT’20) showed that this is the best one can hope for by proving that Shamir’s scheme is not secure against one-bit leakage when t0c.n/log(n). In this work, we strengthen the lower bound of Nielsen and Simkin. We consider noisy leakage-resilience, where a random subset of leakages is replaced by uniformly random noise. We prove a lower bound for Shamir’s secret sharing, similar to that of Nielsen and Simkin, which holds even when a constant fraction of leakages is replaced by random noise. To this end, we first prove a lower bound on the share size of any noisy-leakage-resilient sharing scheme. We then use this lower bound to show that there exist universal constants c1, c2, such that for sufficiently large n it holds that Shamir’s secret sharing scheme is not noisy-leakage-resilient for t≤c1.n/log(n), even when a c2 fraction of leakages are replaced by random noise. }, author = {Hoffmann, Charlotte and Simkin, Mark}, booktitle = {8th International Conference on Cryptology and Information Security in Latin America}, isbn = {9783031444685}, issn = {1611-3349}, location = {Quito, Ecuador}, pages = {215--228}, publisher = {Springer Nature}, title = {{Stronger lower bounds for leakage-resilient secret sharing}}, doi = {10.1007/978-3-031-44469-2_11}, volume = {14168}, year = {2023}, } @inproceedings{13238, abstract = {We consider a natural problem dealing with weighted packet selection across a rechargeable link, which e.g., finds applications in cryptocurrency networks. The capacity of a link (u, v) is determined by how much nodes u and v allocate for this link. Specifically, the input is a finite ordered sequence of packets that arrive in both directions along a link. Given (u, v) and a packet of weight x going from u to v, node u can either accept or reject the packet. If u accepts the packet, the capacity on link (u, v) decreases by x. Correspondingly, v’s capacity on (u, v) increases by x. If a node rejects the packet, this will entail a cost affinely linear in the weight of the packet. A link is “rechargeable” in the sense that the total capacity of the link has to remain constant, but the allocation of capacity at the ends of the link can depend arbitrarily on the nodes’ decisions. The goal is to minimise the sum of the capacity injected into the link and the cost of rejecting packets. We show that the problem is NP-hard, but can be approximated efficiently with a ratio of (1+ε)⋅(1+3–√) for some arbitrary ε>0. .}, author = {Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X}, booktitle = {SIROCCO 2023: Structural Information and Communication Complexity }, isbn = {9783031327322}, issn = {1611-3349}, location = {Alcala de Henares, Spain}, pages = {576--594}, publisher = {Springer Nature}, title = {{Weighted packet selection for rechargeable links in cryptocurrency networks: Complexity and approximation}}, doi = {10.1007/978-3-031-32733-9_26}, volume = {13892}, year = {2023}, } @phdthesis{14506, abstract = {Payment channel networks are a promising approach to improve the scalability bottleneck of cryptocurrencies. Two design principles behind payment channel networks are efficiency and privacy. Payment channel networks improve efficiency by allowing users to transact in a peer-to-peer fashion along multi-hop routes in the network, avoiding the lengthy process of consensus on the blockchain. Transacting over payment channel networks also improves privacy as these transactions are not broadcast to the blockchain. Despite the influx of recent protocols built on top of payment channel networks and their analysis, a common shortcoming of many of these protocols is that they typically focus only on either improving efficiency or privacy, but not both. Another limitation on the efficiency front is that the models used to model actions, costs and utilities of users are limited or come with unrealistic assumptions. This thesis aims to address some of the shortcomings of recent protocols and algorithms on payment channel networks, particularly in their privacy and efficiency aspects. We first present a payment route discovery protocol based on hub labelling and private information retrieval that hides the route query and is also efficient. We then present a rebalancing protocol that formulates the rebalancing problem as a linear program and solves the linear program using multiparty computation so as to hide the channel balances. The rebalancing solution as output by our protocol is also globally optimal. We go on to develop more realistic models of the action space, costs, and utilities of both existing and new users that want to join the network. In each of these settings, we also develop algorithms to optimise the utility of these users with good guarantees on the approximation and competitive ratios.}, author = {Yeo, Michelle X}, issn = {2663 - 337X}, pages = {162}, publisher = {Institute of Science and Technology Austria}, title = {{Advances in efficiency and privacy in payment channel network analysis}}, doi = {10.15479/14506}, year = {2023}, } @inproceedings{14490, abstract = {Payment channel networks (PCNs) are a promising solution to the scalability problem of cryptocurrencies. Any two users connected by a payment channel in the network can theoretically send an unbounded number of instant, costless transactions between them. Users who are not directly connected can also transact with each other in a multi-hop fashion. In this work, we study the incentive structure behind the creation of payment channel networks, particularly from the point of view of a single user that wants to join the network. We define a utility function for a new user in terms of expected revenue, expected fees, and the cost of creating channels, and then provide constant factor approximation algorithms that optimise the utility function given a certain budget. Additionally, we take a step back from a single user to the whole network and examine the parameter spaces under which simple graph topologies form a Nash equilibrium.}, author = {Avarikioti, Zeta and Lizurej, Tomasz and Michalak, Tomasz and Yeo, Michelle X}, booktitle = {43rd International Conference on Distributed Computing Systems}, isbn = {9798350339864}, issn = {2575-8411}, location = {Hong Kong, China}, pages = {603--613}, publisher = {IEEE}, title = {{Lightning creation games}}, doi = {10.1109/ICDCS57875.2023.00037}, volume = {2023}, year = {2023}, } @inproceedings{14693, abstract = {Lucas sequences are constant-recursive integer sequences with a long history of applications in cryptography, both in the design of cryptographic schemes and cryptanalysis. In this work, we study the sequential hardness of computing Lucas sequences over an RSA modulus. First, we show that modular Lucas sequences are at least as sequentially hard as the classical delay function given by iterated modular squaring proposed by Rivest, Shamir, and Wagner (MIT Tech. Rep. 1996) in the context of time-lock puzzles. Moreover, there is no obvious reduction in the other direction, which suggests that the assumption of sequential hardness of modular Lucas sequences is strictly weaker than that of iterated modular squaring. In other words, the sequential hardness of modular Lucas sequences might hold even in the case of an algorithmic improvement violating the sequential hardness of iterated modular squaring. Second, we demonstrate the feasibility of constructing practically-efficient verifiable delay functions based on the sequential hardness of modular Lucas sequences. Our construction builds on the work of Pietrzak (ITCS 2019) by leveraging the intrinsic connection between the problem of computing modular Lucas sequences and exponentiation in an appropriate extension field.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Krňák, Tomáš}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486234}, issn = {1611-3349}, location = {Taipei, Taiwan}, pages = {336--362}, publisher = {Springer Nature}, title = {{(Verifiable) delay functions from Lucas sequences}}, doi = {10.1007/978-3-031-48624-1_13}, volume = {14372}, year = {2023}, } @inproceedings{14691, abstract = {Continuous Group-Key Agreement (CGKA) allows a group of users to maintain a shared key. It is the fundamental cryptographic primitive underlying group messaging schemes and related protocols, most notably TreeKEM, the underlying key agreement protocol of the Messaging Layer Security (MLS) protocol, a standard for group messaging by the IETF. CKGA works in an asynchronous setting where parties only occasionally must come online, and their messages are relayed by an untrusted server. The most expensive operation provided by CKGA is that which allows for a user to refresh their key material in order to achieve forward secrecy (old messages are secure when a user is compromised) and post-compromise security (users can heal from compromise). One caveat of early CGKA protocols is that these update operations had to be performed sequentially, with any user wanting to update their key material having had to receive and process all previous updates. Late versions of TreeKEM do allow for concurrent updates at the cost of a communication overhead per update message that is linear in the number of updating parties. This was shown to be indeed necessary when achieving PCS in just two rounds of communication by [Bienstock et al. TCC’20]. The recently proposed protocol CoCoA [Alwen et al. Eurocrypt’22], however, shows that this overhead can be reduced if PCS requirements are relaxed, and only a logarithmic number of rounds is required. The natural question, thus, is whether CoCoA is optimal in this setting. In this work we answer this question, providing a lower bound on the cost (concretely, the amount of data to be uploaded to the server) for CGKA protocols that heal in an arbitrary k number of rounds, that shows that CoCoA is very close to optimal. Additionally, we extend CoCoA to heal in an arbitrary number of rounds, and propose a modification of it, with a reduced communication cost for certain k. We prove our bound in a combinatorial setting where the state of the protocol progresses in rounds, and the state of the protocol in each round is captured by a set system, each set specifying a set of users who share a secret key. We show this combinatorial model is equivalent to a symbolic model capturing building blocks including PRFs and public-key encryption, related to the one used by Bienstock et al. Our lower bound is of order k•n1+1/(k-1)/log(k), where 2≤k≤log(n) is the number of updates per user the protocol requires to heal. This generalizes the n2 bound for k=2 from Bienstock et al.. This bound almost matches the k⋅n1+2/(k-1) or k2⋅n1+1/(k-1) efficiency we get for the variants of the CoCoA protocol also introduced in this paper.}, author = {Auerbach, Benedikt and Cueto Noval, Miguel and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486203}, issn = {1611-3349}, location = {Taipei, Taiwan}, pages = {271--300}, publisher = {Springer Nature}, title = {{On the cost of post-compromise security in concurrent Continuous Group-Key Agreement}}, doi = {10.1007/978-3-031-48621-0_10}, volume = {14371}, year = {2023}, } @inproceedings{14692, abstract = {The generic-group model (GGM) aims to capture algorithms working over groups of prime order that only rely on the group operation, but do not exploit any additional structure given by the concrete implementation of the group. In it, it is possible to prove information-theoretic lower bounds on the hardness of problems like the discrete logarithm (DL) or computational Diffie-Hellman (CDH). Thus, since its introduction, it has served as a valuable tool to assess the concrete security provided by cryptographic schemes based on such problems. A work on the related algebraic-group model (AGM) introduced a method, used by many subsequent works, to adapt GGM lower bounds for one problem to another, by means of conceptually simple reductions. In this work, we propose an alternative approach to extend GGM bounds from one problem to another. Following an idea by Yun [EC15], we show that, in the GGM, the security of a large class of problems can be reduced to that of geometric search-problems. By reducing the security of the resulting geometric-search problems to variants of the search-by-hypersurface problem, for which information theoretic lower bounds exist, we give alternative proofs of several results that used the AGM approach. The main advantage of our approach is that our reduction from geometric search-problems works, as well, for the GGM with preprocessing (more precisely the bit-fixing GGM introduced by Coretti, Dodis and Guo [Crypto18]). As a consequence, this opens up the possibility of transferring preprocessing GGM bounds from one problem to another, also by means of simple reductions. Concretely, we prove novel preprocessing bounds on the hardness of the d-strong discrete logarithm, the d-strong Diffie-Hellman inversion, and multi-instance CDH problems, as well as a large class of Uber assumptions. Additionally, our approach applies to Shoup’s GGM without additional restrictions on the query behavior of the adversary, while the recent works of Zhang, Zhou, and Katz [AC22] and Zhandry [Crypto22] highlight that this is not the case for the AGM approach.}, author = {Auerbach, Benedikt and Hoffmann, Charlotte and Pascual Perez, Guillermo}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486203}, issn = {1611-3349}, pages = {301--330}, publisher = {Springer Nature}, title = {{Generic-group lower bounds via reductions between geometric-search problems: With and without preprocessing}}, doi = {10.1007/978-3-031-48621-0_11}, volume = {14371}, year = {2023}, } @inproceedings{14736, abstract = {Payment channel networks (PCNs) are a promising technology to improve the scalability of cryptocurrencies. PCNs, however, face the challenge that the frequent usage of certain routes may deplete channels in one direction, and hence prevent further transactions. In order to reap the full potential of PCNs, recharging and rebalancing mechanisms are required to provision channels, as well as an admission control logic to decide which transactions to reject in case capacity is insufficient. This paper presents a formal model of this optimisation problem. In particular, we consider an online algorithms perspective, where transactions arrive over time in an unpredictable manner. Our main contributions are competitive online algorithms which come with provable guarantees over time. We empirically evaluate our algorithms on randomly generated transactions to compare the average performance of our algorithms to our theoretical bounds. We also show how this model and approach differs from related problems in classic communication networks.}, author = {Bastankhah, Mahsa and Chatterjee, Krishnendu and Maddah-Ali, Mohammad Ali and Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X}, booktitle = {27th International Conference on Financial Cryptography and Data Security}, isbn = {9783031477539}, issn = {1611-3349}, location = {Bol, Brac, Croatia}, pages = {309--325}, publisher = {Springer Nature}, title = {{R2: Boosting liquidity in payment channel networks with online admission control}}, doi = {10.1007/978-3-031-47754-6_18}, volume = {13950}, year = {2023}, } @inproceedings{11476, abstract = {Messaging platforms like Signal are widely deployed and provide strong security in an asynchronous setting. It is a challenging problem to construct a protocol with similar security guarantees that can efficiently scale to large groups. A major bottleneck are the frequent key rotations users need to perform to achieve post compromise forward security. In current proposals – most notably in TreeKEM (which is part of the IETF’s Messaging Layer Security (MLS) protocol draft) – for users in a group of size n to rotate their keys, they must each craft a message of size log(n) to be broadcast to the group using an (untrusted) delivery server. In larger groups, having users sequentially rotate their keys requires too much bandwidth (or takes too long), so variants allowing any T≤n users to simultaneously rotate their keys in just 2 communication rounds have been suggested (e.g. “Propose and Commit” by MLS). Unfortunately, 2-round concurrent updates are either damaging or expensive (or both); i.e. they either result in future operations being more costly (e.g. via “blanking” or “tainting”) or are costly themselves requiring Ω(T) communication for each user [Bienstock et al., TCC’20]. In this paper we propose CoCoA; a new scheme that allows for T concurrent updates that are neither damaging nor costly. That is, they add no cost to future operations yet they only require Ω(log2(n)) communication per user. To circumvent the [Bienstock et al.] lower bound, CoCoA increases the number of rounds needed to complete all updates from 2 up to (at most) log(n); though typically fewer rounds are needed. The key insight of our protocol is the following: in the (non-concurrent version of) TreeKEM, a delivery server which gets T concurrent update requests will approve one and reject the remaining T−1. In contrast, our server attempts to apply all of them. If more than one user requests to rotate the same key during a round, the server arbitrarily picks a winner. Surprisingly, we prove that regardless of how the server chooses the winners, all previously compromised users will recover after at most log(n) such update rounds. To keep the communication complexity low, CoCoA is a server-aided CGKA. That is, the delivery server no longer blindly forwards packets, but instead actively computes individualized packets tailored to each user. As the server is untrusted, this change requires us to develop new mechanisms ensuring robustness of the protocol.}, author = {Alwen, Joël and Auerbach, Benedikt and Cueto Noval, Miguel and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {Advances in Cryptology – EUROCRYPT 2022}, isbn = {9783031070846}, issn = {1611-3349}, location = {Trondheim, Norway}, pages = {815–844}, publisher = {Springer Nature}, title = {{CoCoA: Concurrent continuous group key agreement}}, doi = {10.1007/978-3-031-07085-3_28}, volume = {13276}, year = {2022}, } @inproceedings{12516, abstract = {The homogeneous continuous LWE (hCLWE) problem is to distinguish samples of a specific high-dimensional Gaussian mixture from standard normal samples. It was shown to be at least as hard as Learning with Errors, but no reduction in the other direction is currently known. We present four new public-key encryption schemes based on the hardness of hCLWE, with varying tradeoffs between decryption and security errors, and different discretization techniques. Our schemes yield a polynomial-time algorithm for solving hCLWE using a Statistical Zero-Knowledge oracle.}, author = {Bogdanov, Andrej and Cueto Noval, Miguel and Hoffmann, Charlotte and Rosen, Alon}, booktitle = {Theory of Cryptography}, isbn = {9783031223648}, issn = {1611-3349}, location = {Chicago, IL, United States}, pages = {565--592}, publisher = {Springer Nature}, title = {{Public-Key Encryption from Homogeneous CLWE}}, doi = {10.1007/978-3-031-22365-5_20}, volume = {13748}, year = {2022}, } @inproceedings{12167, abstract = {Payment channels effectively move the transaction load off-chain thereby successfully addressing the inherent scalability problem most cryptocurrencies face. A major drawback of payment channels is the need to “top up” funds on-chain when a channel is depleted. Rebalancing was proposed to alleviate this issue, where parties with depleting channels move their funds along a cycle to replenish their channels off-chain. Protocols for rebalancing so far either introduce local solutions or compromise privacy. In this work, we present an opt-in rebalancing protocol that is both private and globally optimal, meaning our protocol maximizes the total amount of rebalanced funds. We study rebalancing from the framework of linear programming. To obtain full privacy guarantees, we leverage multi-party computation in solving the linear program, which is executed by selected participants to maintain efficiency. Finally, we efficiently decompose the rebalancing solution into incentive-compatible cycles which conserve user balances when executed atomically.}, author = {Avarikioti, Georgia and Pietrzak, Krzysztof Z and Salem, Iosif and Schmid, Stefan and Tiwari, Samarth and Yeo, Michelle X}, booktitle = {Financial Cryptography and Data Security}, isbn = {9783031182822}, issn = {1611-3349}, location = {Grenada}, pages = {358--373}, publisher = {Springer Nature}, title = {{Hide & Seek: Privacy-preserving rebalancing on payment channel networks}}, doi = {10.1007/978-3-031-18283-9_17}, volume = {13411}, year = {2022}, } @inproceedings{12176, abstract = {A proof of exponentiation (PoE) in a group G of unknown order allows a prover to convince a verifier that a tuple (x,q,T,y)∈G×N×N×G satisfies xqT=y. This primitive has recently found exciting applications in the constructions of verifiable delay functions and succinct arguments of knowledge. The most practical PoEs only achieve soundness either under computational assumptions, i.e., they are arguments (Wesolowski, Journal of Cryptology 2020), or in groups that come with the promise of not having any small subgroups (Pietrzak, ITCS 2019). The only statistically-sound PoE in general groups of unknown order is due to Block et al. (CRYPTO 2021), and can be seen as an elaborate parallel repetition of Pietrzak’s PoE: to achieve λ bits of security, say λ=80, the number of repetitions required (and thus the blow-up in communication) is as large as λ. In this work, we propose a statistically-sound PoE for the case where the exponent q is the product of all primes up to some bound B. We show that, in this case, it suffices to run only λ/log(B) parallel instances of Pietrzak’s PoE, which reduces the concrete proof-size compared to Block et al. by an order of magnitude. Furthermore, we show that in the known applications where PoEs are used as a building block such structured exponents are viable. Finally, we also discuss batching of our PoE, showing that many proofs (for the same G and q but different x and T) can be batched by adding only a single element to the proof per additional statement.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Klein, Karen and Pietrzak, Krzysztof Z}, booktitle = {Advances in Cryptology – CRYPTO 2022}, isbn = {9783031159787}, issn = {1611-3349}, location = {Santa Barbara, CA, United States}, pages = {370--399}, publisher = {Springer Nature}, title = {{Practical statistically-sound proofs of exponentiation in any group}}, doi = {10.1007/978-3-031-15979-4_13}, volume = {13508}, year = {2022}, } @inproceedings{9466, abstract = {In this work, we apply the dynamical systems analysis of Hanrot et al. (CRYPTO’11) to a class of lattice block reduction algorithms that includes (natural variants of) slide reduction and block-Rankin reduction. This implies sharper bounds on the polynomial running times (in the query model) for these algorithms and opens the door to faster practical variants of slide reduction. We give heuristic arguments showing that such variants can indeed speed up slide reduction significantly in practice. This is confirmed by experimental evidence, which also shows that our variants are competitive with state-of-the-art reduction algorithms.}, author = {Walter, Michael}, booktitle = {Public-Key Cryptography – PKC 2021}, isbn = {9783030752446}, issn = {16113349}, location = {Virtual}, pages = {45--67}, publisher = {Springer Nature}, title = {{The convergence of slide-type reductions}}, doi = {10.1007/978-3-030-75245-3_3}, volume = {12710}, year = {2021}, } @inproceedings{9826, abstract = {Automated contract tracing aims at supporting manual contact tracing during pandemics by alerting users of encounters with infected people. There are currently many proposals for protocols (like the “decentralized” DP-3T and PACT or the “centralized” ROBERT and DESIRE) to be run on mobile phones, where the basic idea is to regularly broadcast (using low energy Bluetooth) some values, and at the same time store (a function of) incoming messages broadcasted by users in their proximity. In the existing proposals one can trigger false positives on a massive scale by an “inverse-Sybil” attack, where a large number of devices (malicious users or hacked phones) pretend to be the same user, such that later, just a single person needs to be diagnosed (and allowed to upload) to trigger an alert for all users who were in proximity to any of this large group of devices. We propose the first protocols that do not succumb to such attacks assuming the devices involved in the attack do not constantly communicate, which we observe is a necessary assumption. The high level idea of the protocols is to derive the values to be broadcasted by a hash chain, so that two (or more) devices who want to launch an inverse-Sybil attack will not be able to connect their respective chains and thus only one of them will be able to upload. Our protocols also achieve security against replay, belated replay, and one of them even against relay attacks.}, author = {Auerbach, Benedikt and Chakraborty, Suvradip and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael and Yeo, Michelle X}, booktitle = {Topics in Cryptology – CT-RSA 2021}, isbn = {9783030755386}, issn = {16113349}, location = {Virtual Event}, pages = {399--421}, publisher = {Springer Nature}, title = {{Inverse-Sybil attacks in automated contact tracing}}, doi = {10.1007/978-3-030-75539-3_17}, volume = {12704}, year = {2021}, } @inproceedings{9825, abstract = {The dual attack has long been considered a relevant attack on lattice-based cryptographic schemes relying on the hardness of learning with errors (LWE) and its structured variants. As solving LWE corresponds to finding a nearest point on a lattice, one may naturally wonder how efficient this dual approach is for solving more general closest vector problems, such as the classical closest vector problem (CVP), the variants bounded distance decoding (BDD) and approximate CVP, and preprocessing versions of these problems. While primal, sieving-based solutions to these problems (with preprocessing) were recently studied in a series of works on approximate Voronoi cells [Laa16b, DLdW19, Laa20, DLvW20], for the dual attack no such overview exists, especially for problems with preprocessing. With one of the take-away messages of the approximate Voronoi cell line of work being that primal attacks work well for approximate CVP(P) but scale poorly for BDD(P), one may further wonder if the dual attack suffers the same drawbacks, or if it is perhaps a better solution when trying to solve BDD(P). In this work we provide an overview of cost estimates for dual algorithms for solving these “classical” closest lattice vector problems. Heuristically we expect to solve the search version of average-case CVPP in time and space 20.293𝑑+𝑜(𝑑) in the single-target model. The distinguishing version of average-case CVPP, where we wish to distinguish between random targets and targets planted at distance (say) 0.99⋅𝑔𝑑 from the lattice, has the same complexity in the single-target model, but can be solved in time and space 20.195𝑑+𝑜(𝑑) in the multi-target setting, when given a large number of targets from either target distribution. This suggests an inequivalence between distinguishing and searching, as we do not expect a similar improvement in the multi-target setting to hold for search-CVPP. We analyze three slightly different decoders, both for distinguishing and searching, and experimentally obtain concrete cost estimates for the dual attack in dimensions 50 to 80, which confirm our heuristic assumptions, and show that the hidden order terms in the asymptotic estimates are quite small. Our main take-away message is that the dual attack appears to mirror the approximate Voronoi cell line of work – whereas using approximate Voronoi cells works well for approximate CVP(P) but scales poorly for BDD(P), the dual approach scales well for BDD(P) instances but performs poorly on approximate CVP(P).}, author = {Laarhoven, Thijs and Walter, Michael}, booktitle = {Topics in Cryptology – CT-RSA 2021}, isbn = {9783030755386}, issn = {16113349}, location = {Virtual Event}, pages = {478--502}, publisher = {Springer Nature}, title = {{Dual lattice attacks for closest vector problems (with preprocessing)}}, doi = {10.1007/978-3-030-75539-3_20}, volume = {12704}, year = {2021}, }