@inproceedings{10407, abstract = {Digital hardware Trojans are integrated circuits whose implementation differ from the specification in an arbitrary and malicious way. For example, the circuit can differ from its specified input/output behavior after some fixed number of queries (known as “time bombs”) or on some particular input (known as “cheat codes”). To detect such Trojans, countermeasures using multiparty computation (MPC) or verifiable computation (VC) have been proposed. On a high level, to realize a circuit with specification F one has more sophisticated circuits F⋄ manufactured (where F⋄ specifies a MPC or VC of F ), and then embeds these F⋄ ’s into a master circuit which must be trusted but is relatively simple compared to F . Those solutions impose a significant overhead as F⋄ is much more complex than F , also the master circuits are not exactly trivial. In this work, we show that in restricted settings, where F has no evolving state and is queried on independent inputs, we can achieve a relaxed security notion using very simple constructions. In particular, we do not change the specification of the circuit at all (i.e., F=F⋄ ). Moreover the master circuit basically just queries a subset of its manufactured circuits and checks if they’re all the same. The security we achieve guarantees that, if the manufactured circuits are initially tested on up to T inputs, the master circuit will catch Trojans that try to deviate on significantly more than a 1/T fraction of the inputs. This bound is optimal for the type of construction considered, and we provably achieve it using a construction where 12 instantiations of F need to be embedded into the master. We also discuss an extremely simple construction with just 2 instantiations for which we conjecture that it already achieves the optimal bound.}, author = {Chakraborty, Suvradip and Dziembowski, Stefan and Gałązka, Małgorzata and Lizurej, Tomasz and Pietrzak, Krzysztof Z and Yeo, Michelle X}, isbn = {9-783-0309-0452-4}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {397--428}, publisher = {Springer Nature}, title = {{Trojan-resilience without cryptography}}, doi = {10.1007/978-3-030-90453-1_14}, volume = {13043}, year = {2021}, } @inproceedings{10408, abstract = {Key trees are often the best solution in terms of transmission cost and storage requirements for managing keys in a setting where a group needs to share a secret key, while being able to efficiently rotate the key material of users (in order to recover from a potential compromise, or to add or remove users). Applications include multicast encryption protocols like LKH (Logical Key Hierarchies) or group messaging like the current IETF proposal TreeKEM. A key tree is a (typically balanced) binary tree, where each node is identified with a key: leaf nodes hold users’ secret keys while the root is the shared group key. For a group of size N, each user just holds log(N) keys (the keys on the path from its leaf to the root) and its entire key material can be rotated by broadcasting 2log(N) ciphertexts (encrypting each fresh key on the path under the keys of its parents). In this work we consider the natural setting where we have many groups with partially overlapping sets of users, and ask if we can find solutions where the cost of rotating a key is better than in the trivial one where we have a separate key tree for each group. We show that in an asymptotic setting (where the number m of groups is fixed while the number N of users grows) there exist more general key graphs whose cost converges to the cost of a single group, thus saving a factor linear in the number of groups over the trivial solution. As our asymptotic “solution” converges very slowly and performs poorly on concrete examples, we propose an algorithm that uses a natural heuristic to compute a key graph for any given group structure. Our algorithm combines two greedy algorithms, and is thus very efficient: it first converts the group structure into a “lattice graph”, which is then turned into a key graph by repeatedly applying the algorithm for constructing a Huffman code. To better understand how far our proposal is from an optimal solution, we prove lower bounds on the update cost of continuous group-key agreement and multicast encryption in a symbolic model admitting (asymmetric) encryption, pseudorandom generators, and secret sharing as building blocks.}, author = {Alwen, Joel F and Auerbach, Benedikt and Baig, Mirza Ahad and Cueto Noval, Miguel and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {19th International Conference}, isbn = {9-783-0309-0455-5}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {222--253}, publisher = {Springer Nature}, title = {{Grafting key trees: Efficient key management for overlapping groups}}, doi = {10.1007/978-3-030-90456-2_8}, volume = {13044}, year = {2021}, } @inproceedings{10409, abstract = {We show that Yao’s garbling scheme is adaptively indistinguishable for the class of Boolean circuits of size S and treewidth w with only a SO(w) loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly O(δwlog(S)) , δ being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity. with only a SO(w) loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly O(δwlog(S)) , δ being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z}, booktitle = {19th International Conference}, isbn = {9-783-0309-0452-4}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {486--517}, publisher = {Springer Nature}, title = {{On treewidth, separators and Yao’s garbling}}, doi = {10.1007/978-3-030-90453-1_17}, volume = {13043 }, year = {2021}, } @inproceedings{10609, abstract = {We study Multi-party computation (MPC) in the setting of subversion, where the adversary tampers with the machines of honest parties. Our goal is to construct actively secure MPC protocols where parties are corrupted adaptively by an adversary (as in the standard adaptive security setting), and in addition, honest parties’ machines are compromised. The idea of reverse firewalls (RF) was introduced at EUROCRYPT’15 by Mironov and Stephens-Davidowitz as an approach to protecting protocols against corruption of honest parties’ devices. Intuitively, an RF for a party P is an external entity that sits between P and the outside world and whose scope is to sanitize P ’s incoming and outgoing messages in the face of subversion of their computer. Mironov and Stephens-Davidowitz constructed a protocol for passively-secure two-party computation. At CRYPTO’20, Chakraborty, Dziembowski and Nielsen constructed a protocol for secure computation with firewalls that improved on this result, both by extending it to multi-party computation protocol, and considering active security in the presence of static corruptions. In this paper, we initiate the study of RF for MPC in the adaptive setting. We put forward a definition for adaptively secure MPC in the reverse firewall setting, explore relationships among the security notions, and then construct reverse firewalls for MPC in this stronger setting of adaptive security. We also resolve the open question of Chakraborty, Dziembowski and Nielsen by removing the need for a trusted setup in constructing RF for MPC. Towards this end, we construct reverse firewalls for adaptively secure augmented coin tossing and adaptively secure zero-knowledge protocols and obtain a constant round adaptively secure MPC protocol in the reverse firewall setting without setup. Along the way, we propose a new multi-party adaptively secure coin tossing protocol in the plain model, that is of independent interest.}, author = {Chakraborty, Suvradip and Ganesh, Chaya and Pancholi, Mahak and Sarkar, Pratik}, booktitle = {27th International Conference on the Theory and Application of Cryptology and Information Security}, isbn = {978-3-030-92074-6}, issn = {1611-3349}, location = {Virtual, Singapore}, pages = {335--364}, publisher = {Springer Nature}, title = {{Reverse firewalls for adaptively secure MPC without setup}}, doi = {10.1007/978-3-030-92075-3_12}, volume = {13091}, year = {2021}, } @inproceedings{10041, abstract = {Yao’s garbling scheme is one of the most fundamental cryptographic constructions. Lindell and Pinkas (Journal of Cryptograhy 2009) gave a formal proof of security in the selective setting where the adversary chooses the challenge inputs before seeing the garbled circuit assuming secure symmetric-key encryption (and hence one-way functions). This was followed by results, both positive and negative, concerning its security in the, stronger, adaptive setting. Applebaum et al. (Crypto 2013) showed that it cannot satisfy adaptive security as is, due to a simple incompressibility argument. Jafargholi and Wichs (TCC 2017) considered a natural adaptation of Yao’s scheme (where the output mapping is sent in the online phase, together with the garbled input) that circumvents this negative result, and proved that it is adaptively secure, at least for shallow circuits. In particular, they showed that for the class of circuits of depth δ , the loss in security is at most exponential in δ . The above results all concern the simulation-based notion of security. In this work, we show that the upper bound of Jafargholi and Wichs is basically optimal in a strong sense. As our main result, we show that there exists a family of Boolean circuits, one for each depth δ∈N , such that any black-box reduction proving the adaptive indistinguishability of the natural adaptation of Yao’s scheme from any symmetric-key encryption has to lose a factor that is exponential in δ√ . Since indistinguishability is a weaker notion than simulation, our bound also applies to adaptive simulation. To establish our results, we build on the recent approach of Kamath et al. (Eprint 2021), which uses pebbling lower bounds in conjunction with oracle separations to prove fine-grained lower bounds on loss in cryptographic security.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Wichs, Daniel}, booktitle = {41st Annual International Cryptology Conference, Part II }, isbn = {978-3-030-84244-4}, issn = {1611-3349}, location = {Virtual}, pages = {486--515}, publisher = {Springer Nature}, title = {{Limits on the Adaptive Security of Yao’s Garbling}}, doi = {10.1007/978-3-030-84245-1_17}, volume = {12826}, year = {2021}, } @inproceedings{10049, abstract = {While messaging systems with strong security guarantees are widely used in practice, designing a protocol that scales efficiently to large groups and enjoys similar security guarantees remains largely open. The two existing proposals to date are ART (Cohn-Gordon et al., CCS18) and TreeKEM (IETF, The Messaging Layer Security Protocol, draft). TreeKEM is the currently considered candidate by the IETF MLS working group, but dynamic group operations (i.e. adding and removing users) can cause efficiency issues. In this paper we formalize and analyze a variant of TreeKEM which we term Tainted TreeKEM (TTKEM for short). The basic idea underlying TTKEM was suggested by Millican (MLS mailing list, February 2018). This version is more efficient than TreeKEM for some natural distributions of group operations, we quantify this through simulations.Our second contribution is two security proofs for TTKEM which establish post compromise and forward secrecy even against adaptive attackers. The security loss (to the underlying PKE) in the Random Oracle Model is a polynomial factor, and a quasipolynomial one in the Standard Model. Our proofs can be adapted to TreeKEM as well. Before our work no security proof for any TreeKEM-like protocol establishing tight security against an adversary who can adaptively choose the sequence of operations was known. We also are the first to prove (or even formalize) active security where the server can arbitrarily deviate from the protocol specification. Proving fully active security – where also the users can arbitrarily deviate – remains open.}, author = {Klein, Karen and Pascual Perez, Guillermo and Walter, Michael and Kamath Hosdurg, Chethan and Capretto, Margarita and Cueto Noval, Miguel and Markov, Ilia and Yeo, Michelle X and Alwen, Joel F and Pietrzak, Krzysztof Z}, booktitle = {2021 IEEE Symposium on Security and Privacy }, location = {San Francisco, CA, United States}, pages = {268--284}, publisher = {IEEE}, title = {{Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement}}, doi = {10.1109/sp40001.2021.00035}, year = {2021}, } @inproceedings{10044, abstract = {We show that Yao’s garbling scheme is adaptively indistinguishable for the class of Boolean circuits of size S and treewidth w with only a S^O(w) loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly O(d w log(S)), d being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z}, booktitle = {19th Theory of Cryptography Conference 2021}, location = {Raleigh, NC, United States}, publisher = {International Association for Cryptologic Research}, title = {{On treewidth, separators and Yao's garbling}}, year = {2021}, } @phdthesis{10035, abstract = {Many security definitions come in two flavors: a stronger “adaptive” flavor, where the adversary can arbitrarily make various choices during the course of the attack, and a weaker “selective” flavor where the adversary must commit to some or all of their choices a-priori. For example, in the context of identity-based encryption, selective security requires the adversary to decide on the identity of the attacked party at the very beginning of the game whereas adaptive security allows the attacker to first see the master public key and some secret keys before making this choice. Often, it appears to be much easier to achieve selective security than it is to achieve adaptive security. A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption [Pan07][FJP15], constrained PRFs [FKPR14], and Yao’s garbled circuits [JW16]. Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework (published at Crypto ’17 [JKK+17a]) that connects all of these works and allows us to present them in a unified and simplified fashion. Having the framework in place, we show how to achieve adaptive security for proxy re-encryption schemes (published at PKC ’19 [FKKP19]) and provide the first adaptive security proofs for continuous group key agreement protocols (published at S&P ’21 [KPW+21]). Questioning optimality of our framework, we then show that currently used proof techniques cannot lead to significantly better security guarantees for "graph-building" games (published at TCC ’21 [KKPW21a]). These games cover generalized selective decryption, as well as the security of prominent constructions for constrained PRFs, continuous group key agreement, and proxy re-encryption. Finally, we revisit the adaptive security of Yao’s garbled circuits and extend the analysis of Jafargholi and Wichs in two directions: While they prove adaptive security only for a modified construction with increased online complexity, we provide the first positive results for the original construction by Yao (published at TCC ’21 [KKP21a]). On the negative side, we prove that the results of Jafargholi and Wichs are essentially optimal by showing that no black-box reduction can provide a significantly better security bound (published at Crypto ’21 [KKPW21c]).}, author = {Klein, Karen}, issn = {2663-337X}, pages = {276}, publisher = {Institute of Science and Technology Austria}, title = {{On the adaptive security of graph-based games}}, doi = {10.15479/at:ista:10035}, year = {2021}, } @inproceedings{10410, abstract = {The security of cryptographic primitives and protocols against adversaries that are allowed to make adaptive choices (e.g., which parties to corrupt or which queries to make) is notoriously difficult to establish. A broad theoretical framework was introduced by Jafargholi et al. [Crypto’17] for this purpose. In this paper we initiate the study of lower bounds on loss in adaptive security for certain cryptographic protocols considered in the framework. We prove lower bounds that almost match the upper bounds (proven using the framework) for proxy re-encryption, prefix-constrained PRFs and generalized selective decryption, a security game that captures the security of certain group messaging and broadcast encryption schemes. Those primitives have in common that their security game involves an underlying graph that can be adaptively built by the adversary. Some of our lower bounds only apply to a restricted class of black-box reductions which we term “oblivious” (the existing upper bounds are of this restricted type), some apply to the broader but still restricted class of non-rewinding reductions, while our lower bound for proxy re-encryption applies to all black-box reductions. The fact that some of our lower bounds seem to crucially rely on obliviousness or at least a non-rewinding reduction hints to the exciting possibility that the existing upper bounds can be improved by using more sophisticated reductions. Our main conceptual contribution is a two-player multi-stage game called the Builder-Pebbler Game. We can translate bounds on the winning probabilities for various instantiations of this game into cryptographic lower bounds for the above-mentioned primitives using oracle separation techniques.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {19th International Conference}, isbn = {9-783-0309-0452-4}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {550--581}, publisher = {Springer Nature}, title = {{The cost of adaptivity in security games on graphs}}, doi = {10.1007/978-3-030-90453-1_19}, volume = {13043}, year = {2021}, } @inproceedings{10048, abstract = {The security of cryptographic primitives and protocols against adversaries that are allowed to make adaptive choices (e.g., which parties to corrupt or which queries to make) is notoriously difficult to establish. A broad theoretical framework was introduced by Jafargholi et al. [Crypto’17] for this purpose. In this paper we initiate the study of lower bounds on loss in adaptive security for certain cryptographic protocols considered in the framework. We prove lower bounds that almost match the upper bounds (proven using the framework) for proxy re-encryption, prefix-constrained PRFs and generalized selective decryption, a security game that captures the security of certain group messaging and broadcast encryption schemes. Those primitives have in common that their security game involves an underlying graph that can be adaptively built by the adversary. Some of our lower bounds only apply to a restricted class of black-box reductions which we term “oblivious” (the existing upper bounds are of this restricted type), some apply to the broader but still restricted class of non-rewinding reductions, while our lower bound for proxy re-encryption applies to all black-box reductions. The fact that some of our lower bounds seem to crucially rely on obliviousness or at least a non-rewinding reduction hints to the exciting possibility that the existing upper bounds can be improved by using more sophisticated reductions. Our main conceptual contribution is a two-player multi-stage game called the Builder-Pebbler Game. We can translate bounds on the winning probabilities for various instantiations of this game into cryptographic lower bounds for the above-mentioned primitives using oracle separation techniques. }, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {19th Theory of Cryptography Conference 2021}, location = {Raleigh, NC, United States}, publisher = {International Association for Cryptologic Research}, title = {{The cost of adaptivity in security games on graphs}}, year = {2021}, } @inproceedings{9969, abstract = {Payment channel networks are a promising approach to improve the scalability of cryptocurrencies: they allow to perform transactions in a peer-to-peer fashion, along multihop routes in the network, without requiring consensus on the blockchain. However, during the discovery of cost-efficient routes for the transaction, critical information may be revealed about the transacting entities. This paper initiates the study of privacy-preserving route discovery mechanisms for payment channel networks. In particular, we present LightPIR, an approach which allows a client to learn the shortest (or cheapest in terms of fees) path between two nodes without revealing any information about the endpoints of the transaction to the servers. The two main observations which allow for an efficient solution in LightPIR are that: (1) surprisingly, hub labelling algorithms – which were developed to preprocess “street network like” graphs so one can later efficiently compute shortest paths – also perform well for the graphs underlying payment channel networks, and that (2) hub labelling algorithms can be conveniently combined with private information retrieval. LightPIR relies on a simple hub labeling heuristic on top of existing hub labeling algorithms which leverages the specific topological features of cryptocurrency networks to further minimize storage and bandwidth overheads. In a case study considering the Lightning network, we show that our approach is an order of magnitude more efficient compared to a privacy-preserving baseline based on using private information retrieval on a database that stores all pairs shortest paths.}, author = {Pietrzak, Krzysztof Z and Salem, Iosif and Schmid, Stefan and Yeo, Michelle X}, isbn = {978-1-6654-4501-6}, issn = {1861-2288}, location = {Espoo and Helsinki, Finland}, publisher = {IEEE}, title = {{LightPIR: Privacy-preserving route discovery for payment channel networks}}, doi = {10.23919/IFIPNetworking52078.2021.9472205}, year = {2021}, } @inproceedings{8322, abstract = {Reverse firewalls were introduced at Eurocrypt 2015 by Miro-nov and Stephens-Davidowitz, as a method for protecting cryptographic protocols against attacks on the devices of the honest parties. In a nutshell: a reverse firewall is placed outside of a device and its goal is to “sanitize” the messages sent by it, in such a way that a malicious device cannot leak its secrets to the outside world. It is typically assumed that the cryptographic devices are attacked in a “functionality-preserving way” (i.e. informally speaking, the functionality of the protocol remains unchanged under this attacks). In their paper, Mironov and Stephens-Davidowitz construct a protocol for passively-secure two-party computations with firewalls, leaving extension of this result to stronger models as an open question. In this paper, we address this problem by constructing a protocol for secure computation with firewalls that has two main advantages over the original protocol from Eurocrypt 2015. Firstly, it is a multiparty computation protocol (i.e. it works for an arbitrary number n of the parties, and not just for 2). Secondly, it is secure in much stronger corruption settings, namely in the active corruption model. More precisely: we consider an adversary that can fully corrupt up to 𝑛−1 parties, while the remaining parties are corrupt in a functionality-preserving way. Our core techniques are: malleable commitments and malleable non-interactive zero-knowledge, which in particular allow us to create a novel protocol for multiparty augmented coin-tossing into the well with reverse firewalls (that is based on a protocol of Lindell from Crypto 2001).}, author = {Chakraborty, Suvradip and Dziembowski, Stefan and Nielsen, Jesper Buus}, booktitle = {Advances in Cryptology – CRYPTO 2020}, isbn = {9783030568795}, issn = {16113349}, location = {Santa Barbara, CA, United States}, pages = {732--762}, publisher = {Springer Nature}, title = {{Reverse firewalls for actively secure MPCs}}, doi = {10.1007/978-3-030-56880-1_26}, volume = {12171}, year = {2020}, } @inproceedings{8339, abstract = {Discrete Gaussian distributions over lattices are central to lattice-based cryptography, and to the computational and mathematical aspects of lattices more broadly. The literature contains a wealth of useful theorems about the behavior of discrete Gaussians under convolutions and related operations. Yet despite their structural similarities, most of these theorems are formally incomparable, and their proofs tend to be monolithic and written nearly “from scratch,” making them unnecessarily hard to verify, understand, and extend. In this work we present a modular framework for analyzing linear operations on discrete Gaussian distributions. The framework abstracts away the particulars of Gaussians, and usually reduces proofs to the choice of appropriate linear transformations and elementary linear algebra. To showcase the approach, we establish several general properties of discrete Gaussians, and show how to obtain all prior convolution theorems (along with some new ones) as straightforward corollaries. As another application, we describe a self-reduction for Learning With Errors (LWE) that uses a fixed number of samples to generate an unlimited number of additional ones (having somewhat larger error). The distinguishing features of our reduction are its simple analysis in our framework, and its exclusive use of discrete Gaussians without any loss in parameters relative to a prior mixed discrete-and-continuous approach. As a contribution of independent interest, for subgaussian random matrices we prove a singular value concentration bound with explicitly stated constants, and we give tighter heuristics for specific distributions that are commonly used for generating lattice trapdoors. These bounds yield improvements in the concrete bit-security estimates for trapdoor lattice cryptosystems.}, author = {Genise, Nicholas and Micciancio, Daniele and Peikert, Chris and Walter, Michael}, booktitle = {23rd IACR International Conference on the Practice and Theory of Public-Key Cryptography}, isbn = {9783030453732}, issn = {16113349}, location = {Edinburgh, United Kingdom}, pages = {623--651}, publisher = {Springer Nature}, title = {{Improved discrete Gaussian and subgaussian analysis for lattice cryptography}}, doi = {10.1007/978-3-030-45374-9_21}, volume = {12110}, year = {2020}, } @inproceedings{8987, abstract = {Currently several projects aim at designing and implementing protocols for privacy preserving automated contact tracing to help fight the current pandemic. Those proposal are quite similar, and in their most basic form basically propose an app for mobile phones which broadcasts frequently changing pseudorandom identifiers via (low energy) Bluetooth, and at the same time, the app stores IDs broadcast by phones in its proximity. Only if a user is tested positive, they upload either the beacons they did broadcast (which is the case in decentralized proposals as DP-3T, east and west coast PACT or Covid watch) or received (as in Popp-PT or ROBERT) during the last two weeks or so. Vaudenay [eprint 2020/399] observes that this basic scheme (he considers the DP-3T proposal) succumbs to relay and even replay attacks, and proposes more complex interactive schemes which prevent those attacks without giving up too many privacy aspects. Unfortunately interaction is problematic for this application for efficiency and security reasons. The countermeasures that have been suggested so far are either not practical or give up on key privacy aspects. We propose a simple non-interactive variant of the basic protocol that (security) Provably prevents replay and (if location data is available) relay attacks. (privacy) The data of all parties (even jointly) reveals no information on the location or time where encounters happened. (efficiency) The broadcasted message can fit into 128 bits and uses only basic crypto (commitments and secret key authentication). Towards this end we introduce the concept of “delayed authentication”, which basically is a message authentication code where verification can be done in two steps, where the first doesn’t require the key, and the second doesn’t require the message.}, author = {Pietrzak, Krzysztof Z}, booktitle = {Progress in Cryptology}, isbn = {9783030652760}, issn = {16113349}, location = {Bangalore, India}, pages = {3--15}, publisher = {Springer Nature}, title = {{Delayed authentication: Preventing replay and relay attacks in private contact tracing}}, doi = {10.1007/978-3-030-65277-7_1}, volume = {12578}, year = {2020}, } @inproceedings{7966, abstract = {For 1≤m≤n, we consider a natural m-out-of-n multi-instance scenario for a public-key encryption (PKE) scheme. An adversary, given n independent instances of PKE, wins if he breaks at least m out of the n instances. In this work, we are interested in the scaling factor of PKE schemes, SF, which measures how well the difficulty of breaking m out of the n instances scales in m. That is, a scaling factor SF=ℓ indicates that breaking m out of n instances is at least ℓ times more difficult than breaking one single instance. A PKE scheme with small scaling factor hence provides an ideal target for mass surveillance. In fact, the Logjam attack (CCS 2015) implicitly exploited, among other things, an almost constant scaling factor of ElGamal over finite fields (with shared group parameters). For Hashed ElGamal over elliptic curves, we use the generic group model to argue that the scaling factor depends on the scheme's granularity. In low granularity, meaning each public key contains its independent group parameter, the scheme has optimal scaling factor SF=m; In medium and high granularity, meaning all public keys share the same group parameter, the scheme still has a reasonable scaling factor SF=√m. Our findings underline that instantiating ElGamal over elliptic curves should be preferred to finite fields in a multi-instance scenario. As our main technical contribution, we derive new generic-group lower bounds of Ω(√(mp)) on the difficulty of solving both the m-out-of-n Gap Discrete Logarithm and the m-out-of-n Gap Computational Diffie-Hellman problem over groups of prime order p, extending a recent result by Yun (EUROCRYPT 2015). We establish the lower bound by studying the hardness of a related computational problem which we call the search-by-hypersurface problem.}, author = {Auerbach, Benedikt and Giacon, Federico and Kiltz, Eike}, booktitle = {Advances in Cryptology – EUROCRYPT 2020}, isbn = {9783030457266}, issn = {1611-3349}, pages = {475--506}, publisher = {Springer Nature}, title = {{Everybody’s a target: Scalability in public-key encryption}}, doi = {10.1007/978-3-030-45727-3_16}, volume = {12107}, year = {2020}, } @phdthesis{7896, abstract = {A search problem lies in the complexity class FNP if a solution to the given instance of the problem can be verified efficiently. The complexity class TFNP consists of all search problems in FNP that are total in the sense that a solution is guaranteed to exist. TFNP contains a host of interesting problems from fields such as algorithmic game theory, computational topology, number theory and combinatorics. Since TFNP is a semantic class, it is unlikely to have a complete problem. Instead, one studies its syntactic subclasses which are defined based on the combinatorial principle used to argue totality. Of particular interest is the subclass PPAD, which contains important problems like computing Nash equilibrium for bimatrix games and computational counterparts of several fixed-point theorems as complete. In the thesis, we undertake the study of averagecase hardness of TFNP, and in particular its subclass PPAD. Almost nothing was known about average-case hardness of PPAD before a series of recent results showed how to achieve it using a cryptographic primitive called program obfuscation. However, it is currently not known how to construct program obfuscation from standard cryptographic assumptions. Therefore, it is desirable to relax the assumption under which average-case hardness of PPAD can be shown. In the thesis we take a step in this direction. First, we show that assuming the (average-case) hardness of a numbertheoretic problem related to factoring of integers, which we call Iterated-Squaring, PPAD is hard-on-average in the random-oracle model. Then we strengthen this result to show that the average-case hardness of PPAD reduces to the (adaptive) soundness of the Fiat-Shamir Transform, a well-known technique used to compile a public-coin interactive protocol into a non-interactive one. As a corollary, we obtain average-case hardness for PPAD in the random-oracle model assuming the worst-case hardness of #SAT. Moreover, the above results can all be strengthened to obtain average-case hardness for the class CLS ⊆ PPAD. Our main technical contribution is constructing incrementally-verifiable procedures for computing Iterated-Squaring and #SAT. By incrementally-verifiable, we mean that every intermediate state of the computation includes a proof of its correctness, and the proof can be updated and verified in polynomial time. Previous constructions of such procedures relied on strong, non-standard assumptions. Instead, we introduce a technique called recursive proof-merging to obtain the same from weaker assumptions. }, author = {Kamath Hosdurg, Chethan}, issn = {2663-337X}, pages = {126}, publisher = {Institute of Science and Technology Austria}, title = {{On the average-case hardness of total search problems}}, doi = {10.15479/AT:ISTA:7896}, year = {2020}, } @article{5887, abstract = {Cryptographic security is usually defined as a guarantee that holds except when a bad event with negligible probability occurs, and nothing is guaranteed in that bad case. However, in settings where such failure can happen with substantial probability, one needs to provide guarantees even for the bad case. A typical example is where a (possibly weak) password is used instead of a secure cryptographic key to protect a session, the bad event being that the adversary correctly guesses the password. In a situation with multiple such sessions, a per-session guarantee is desired: any session for which the password has not been guessed remains secure, independently of whether other sessions have been compromised. A new formalism for stating such gracefully degrading security guarantees is introduced and applied to analyze the examples of password-based message authentication and password-based encryption. While a natural per-message guarantee is achieved for authentication, the situation of password-based encryption is more delicate: a per-session confidentiality guarantee only holds against attackers for which the distribution of password-guessing effort over the sessions is known in advance. In contrast, for more general attackers without such a restriction, a strong, composable notion of security cannot be achieved.}, author = {Demay, Gregory and Gazi, Peter and Maurer, Ueli and Tackmann, Bjorn}, issn = {0926227X}, journal = {Journal of Computer Security}, number = {1}, pages = {75--111}, publisher = {IOS Press}, title = {{Per-session security: Password-based cryptography revisited}}, doi = {10.3233/JCS-181131}, volume = {27}, year = {2019}, } @inproceedings{6528, abstract = {We construct a verifiable delay function (VDF) by showing how the Rivest-Shamir-Wagner time-lock puzzle can be made publicly verifiable. Concretely, we give a statistically sound public-coin protocol to prove that a tuple (N,x,T,y) satisfies y=x2T (mod N) where the prover doesn’t know the factorization of N and its running time is dominated by solving the puzzle, that is, compute x2T, which is conjectured to require T sequential squarings. To get a VDF we make this protocol non-interactive using the Fiat-Shamir heuristic.The motivation for this work comes from the Chia blockchain design, which uses a VDF as akey ingredient. For typical parameters (T≤2 40, N= 2048), our proofs are of size around 10K B, verification cost around three RSA exponentiations and computing the proof is 8000 times faster than solving the puzzle even without any parallelism.}, author = {Pietrzak, Krzysztof Z}, booktitle = {10th Innovations in Theoretical Computer Science Conference}, isbn = {978-3-95977-095-8}, issn = {1868-8969}, location = {San Diego, CA, United States}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Simple verifiable delay functions}}, doi = {10.4230/LIPICS.ITCS.2019.60}, volume = {124}, year = {2019}, } @inbook{6726, abstract = {Randomness is an essential part of any secure cryptosystem, but many constructions rely on distributions that are not uniform. This is particularly true for lattice based cryptosystems, which more often than not make use of discrete Gaussian distributions over the integers. For practical purposes it is crucial to evaluate the impact that approximation errors have on the security of a scheme to provide the best possible trade-off between security and performance. Recent years have seen surprising results allowing to use relatively low precision while maintaining high levels of security. A key insight in these results is that sampling a distribution with low relative error can provide very strong security guarantees. Since floating point numbers provide guarantees on the relative approximation error, they seem a suitable tool in this setting, but it is not obvious which sampling algorithms can actually profit from them. While previous works have shown that inversion sampling can be adapted to provide a low relative error (Pöppelmann et al., CHES 2014; Prest, ASIACRYPT 2017), other works have called into question if this is possible for other sampling techniques (Zheng et al., Eprint report 2018/309). In this work, we consider all sampling algorithms that are popular in the cryptographic setting and analyze the relationship of floating point precision and the resulting relative error. We show that all of the algorithms either natively achieve a low relative error or can be adapted to do so.}, author = {Walter, Michael}, booktitle = {Progress in Cryptology – AFRICACRYPT 2019}, editor = {Buchmann, J and Nitaj, A and Rachidi, T}, isbn = {978-3-0302-3695-3}, issn = {0302-9743}, location = {Rabat, Morocco}, pages = {157--180}, publisher = {Springer Nature}, title = {{Sampling the integers with low relative error}}, doi = {10.1007/978-3-030-23696-0_9}, volume = {11627}, year = {2019}, } @inproceedings{7136, abstract = {It is well established that the notion of min-entropy fails to satisfy the \emph{chain rule} of the form H(X,Y)=H(X|Y)+H(Y), known for Shannon Entropy. Such a property would help to analyze how min-entropy is split among smaller blocks. Problems of this kind arise for example when constructing extractors and dispersers. We show that any sequence of variables exhibits a very strong strong block-source structure (conditional distributions of blocks are nearly flat) when we \emph{spoil few correlated bits}. This implies, conditioned on the spoiled bits, that \emph{splitting-recombination properties} hold. In particular, we have many nice properties that min-entropy doesn't obey in general, for example strong chain rules, "information can't hurt" inequalities, equivalences of average and worst-case conditional entropy definitions and others. Quantitatively, for any sequence X1,…,Xt of random variables over an alphabet X we prove that, when conditioned on m=t⋅O(loglog|X|+loglog(1/ϵ)+logt) bits of auxiliary information, all conditional distributions of the form Xi|X 1 (the original distribution is only 1-wise independent). We observe that the security of PMAC will not increase in general, even if the masks are chosen from a 2-wise independent distribution, and then prove that the security increases to O(q<2/2n), if the τi are 4-wise independent. Due to simple extension attacks, this is the best bound one can hope for, using any distribution on the masks. Whether 3-wise independence is already sufficient to get this level of security is left as an open problem.}, author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal}, issn = {2519-173X}, journal = {IACR Transactions on Symmetric Cryptology}, number = {2}, pages = {145--161}, publisher = {Ruhr University Bochum}, title = {{The exact security of PMAC}}, doi = {10.13154/TOSC.V2016.I2.145-161}, volume = {2016}, year = {2017}, } @inproceedings{559, abstract = {Proofs of space (PoS) were suggested as more ecological and economical alternative to proofs of work, which are currently used in blockchain designs like Bitcoin. The existing PoS are based on rather sophisticated graph pebbling lower bounds. Much simpler and in several aspects more efficient schemes based on inverting random functions have been suggested, but they don’t give meaningful security guarantees due to existing time-memory trade-offs. In particular, Hellman showed that any permutation over a domain of size N can be inverted in time T by an algorithm that is given S bits of auxiliary information whenever (Formula presented). For functions Hellman gives a weaker attack with S2· T≈ N2 (e.g., S= T≈ N2/3). To prove lower bounds, one considers an adversary who has access to an oracle f: [ N] → [N] and can make T oracle queries. The best known lower bound is S· T∈ Ω(N) and holds for random functions and permutations. We construct functions that provably require more time and/or space to invert. Specifically, for any constant k we construct a function [N] → [N] that cannot be inverted unless Sk· T∈ Ω(Nk) (in particular, S= T≈ (Formula presented). Our construction does not contradict Hellman’s time-memory trade-off, because it cannot be efficiently evaluated in forward direction. However, its entire function table can be computed in time quasilinear in N, which is sufficient for the PoS application. Our simplest construction is built from a random function oracle g: [N] × [N] → [ N] and a random permutation oracle f: [N] → N] and is defined as h(x) = g(x, x′) where f(x) = π(f(x′)) with π being any involution without a fixed point, e.g. flipping all the bits. For this function we prove that any adversary who gets S bits of auxiliary information, makes at most T oracle queries, and inverts h on an ϵ fraction of outputs must satisfy S2· T∈ Ω(ϵ2N2).}, author = {Abusalah, Hamza M and Alwen, Joel F and Cohen, Bram and Khilko, Danylo and Pietrzak, Krzysztof Z and Reyzin, Leonid}, isbn = {978-331970696-2}, location = {Hong Kong, China}, pages = {357 -- 379}, publisher = {Springer}, title = {{Beyond Hellman’s time-memory trade-offs with applications to proofs of space}}, doi = {10.1007/978-3-319-70697-9_13}, volume = {10625}, year = {2017}, } @inproceedings{637, abstract = {For many cryptographic primitives, it is relatively easy to achieve selective security (where the adversary commits a-priori to some of the choices to be made later in the attack) but appears difficult to achieve the more natural notion of adaptive security (where the adversary can make all choices on the go as the attack progresses). A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption (Panjwani, TCC ’07 and Fuchsbauer et al., CRYPTO ’15), constrained PRFs (Fuchsbauer et al., ASIACRYPT ’14), and Yao garbled circuits (Jafargholi and Wichs, TCC ’16b). Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework that connects all of these works and allows us to present them in a unified and simplified fashion. Moreover, we use the framework to derive a new result for adaptively secure secret sharing over access structures defined via monotone circuits. We envision that further applications will follow in the future. Underlying our framework is the following simple idea. It is well known that selective security, where the adversary commits to n-bits of information about his future choices, automatically implies adaptive security at the cost of amplifying the adversary’s advantage by a factor of up to 2n. However, in some cases the proof of selective security proceeds via a sequence of hybrids, where each pair of adjacent hybrids locally only requires some smaller partial information consisting of m ≪ n bits. The partial information needed might be completely different between different pairs of hybrids, and if we look across all the hybrids we might rely on the entire n-bit commitment. Nevertheless, the above is sufficient to prove adaptive security, at the cost of amplifying the adversary’s advantage by a factor of only 2m ≪ 2n. In all of our examples using the above framework, the different hybrids are captured by some sort of a graph pebbling game and the amount of information that the adversary needs to commit to in each pair of hybrids is bounded by the maximum number of pebbles in play at any point in time. Therefore, coming up with better strategies for proving adaptive security translates to various pebbling strategies for different types of graphs.}, author = {Jafargholi, Zahra and Kamath Hosdurg, Chethan and Klein, Karen and Komargodski, Ilan and Pietrzak, Krzysztof Z and Wichs, Daniel}, editor = {Katz, Jonathan and Shacham, Hovav}, isbn = {978-331963687-0}, location = {Santa Barbara, CA, United States}, pages = {133 -- 163}, publisher = {Springer}, title = {{Be adaptive avoid overcommitting}}, doi = {10.1007/978-3-319-63688-7_5}, volume = {10401}, year = {2017}, } @inproceedings{1174, abstract = {Security of cryptographic applications is typically defined by security games. The adversary, within certain resources, cannot win with probability much better than 0 (for unpredictability applications, like one-way functions) or much better than 1/2 (indistinguishability applications for instance encryption schemes). In so called squared-friendly applications the winning probability of the adversary, for different values of the application secret randomness, is not only close to 0 or 1/2 on average, but also concentrated in the sense that its second central moment is small. The class of squared-friendly applications, which contains all unpredictability applications and many indistinguishability applications, is particularly important for key derivation. Barak et al. observed that for square-friendly applications one can beat the "RT-bound", extracting secure keys with significantly smaller entropy loss. In turn Dodis and Yu showed that in squared-friendly applications one can directly use a "weak" key, which has only high entropy, as a secure key. In this paper we give sharp lower bounds on square security assuming security for "weak" keys. We show that any application which is either (a) secure with weak keys or (b) allows for entropy savings for keys derived by universal hashing, must be square-friendly. Quantitatively, our lower bounds match the positive results of Dodis and Yu and Barak et al. (TCC\'13, CRYPTO\'11) Hence, they can be understood as a general characterization of squared-friendly applications. While the positive results on squared-friendly applications where derived by one clever application of the Cauchy-Schwarz Inequality, for tight lower bounds we need more machinery. In our approach we use convex optimization techniques and some theory of circular matrices.}, author = {Skórski, Maciej}, issn = {18688969}, location = {Hannover, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Lower bounds on key derivation for square-friendly applications}}, doi = {10.4230/LIPIcs.STACS.2017.57}, volume = {66}, year = {2017}, }