@inproceedings{2082, abstract = {NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). Security proofs and attacks for NMAC can typically be lifted to HMAC. NMAC was introduced by Bellare, Canetti and Krawczyk [Crypto'96], who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, assuming that (1) f is a PRF and (2) the function we get when cascading f is weakly collision-resistant. Unfortunately, HMAC is typically instantiated with cryptographic hash functions like MD5 or SHA-1 for which (2) has been found to be wrong. To restore the provable guarantees for NMAC, Bellare [Crypto'06] showed its security based solely on the assumption that f is a PRF, albeit via a non-uniform reduction. - Our first contribution is a simpler and uniform proof for this fact: If f is an ε-secure PRF (against q queries) and a δ-non-adaptively secure PRF (against q queries), then NMAC f is an (ε+ℓqδ)-secure PRF against q queries of length at most ℓ blocks each. - We then show that this ε+ℓqδ bound is basically tight. For the most interesting case where ℓqδ ≥ ε we prove this by constructing an f for which an attack with advantage ℓqδ exists. This also violates the bound O(ℓε) on the PRF-security of NMAC recently claimed by Koblitz and Menezes. - Finally, we analyze the PRF-security of a modification of NMAC called NI [An and Bellare, Crypto'99] that differs mainly by using a compression function with an additional keying input. This avoids the constant rekeying on multi-block messages in NMAC and allows for a security proof starting by the standard switch from a PRF to a random function, followed by an information-theoretic analysis. We carry out such an analysis, obtaining a tight ℓq2/2 c bound for this step, improving over the trivial bound of ℓ2q2/2c. The proof borrows combinatorial techniques originally developed for proving the security of CBC-MAC [Bellare et al., Crypto'05].}, author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal}, editor = {Garay, Juan and Gennaro, Rosario}, location = {Santa Barbara, USA}, number = {1}, pages = {113 -- 130}, publisher = {Springer}, title = {{The exact PRF-security of NMAC and HMAC}}, doi = {10.1007/978-3-662-44371-2_7}, volume = {8616}, year = {2014}, } @inproceedings{2259, abstract = {The learning with rounding (LWR) problem, introduced by Banerjee, Peikert and Rosen at EUROCRYPT ’12, is a variant of learning with errors (LWE), where one replaces random errors with deterministic rounding. The LWR problem was shown to be as hard as LWE for a setting of parameters where the modulus and modulus-to-error ratio are super-polynomial. In this work we resolve the main open problem and give a new reduction that works for a larger range of parameters, allowing for a polynomial modulus and modulus-to-error ratio. In particular, a smaller modulus gives us greater efficiency, and a smaller modulus-to-error ratio gives us greater security, which now follows from the worst-case hardness of GapSVP with polynomial (rather than super-polynomial) approximation factors. As a tool in the reduction, we show that there is a “lossy mode” for the LWR problem, in which LWR samples only reveal partial information about the secret. This property gives us several interesting new applications, including a proof that LWR remains secure with weakly random secrets of sufficient min-entropy, and very simple constructions of deterministic encryption, lossy trapdoor functions and reusable extractors. Our approach is inspired by a technique of Goldwasser et al. from ICS ’10, which implicitly showed the existence of a “lossy mode” for LWE. By refining this technique, we also improve on the parameters of that work to only requiring a polynomial (instead of super-polynomial) modulus and modulus-to-error ratio. }, author = {Alwen, Joel F and Krenn, Stephan and Pietrzak, Krzysztof Z and Wichs, Daniel}, location = {Santa Barbara, CA, United States}, number = {1}, pages = {57 -- 74}, publisher = {Springer}, title = {{Learning with rounding, revisited: New reduction properties and applications}}, doi = {10.1007/978-3-642-40041-4_4}, volume = {8042}, year = {2013}, } @inproceedings{2258, abstract = {In a digital signature scheme with message recovery, rather than transmitting the message m and its signature σ, a single enhanced signature τ is transmitted. The verifier is able to recover m from τ and at the same time verify its authenticity. The two most important parameters of such a scheme are its security and overhead |τ| − |m|. A simple argument shows that for any scheme with “n bits security” |τ| − |m| ≥ n, i.e., the overhead is lower bounded by the security parameter n. Currently, the best known constructions in the random oracle model are far from this lower bound requiring an overhead of n + logq h , where q h is the number of queries to the random oracle. In this paper we give a construction which basically matches the n bit lower bound. We propose a simple digital signature scheme with n + o(logq h ) bits overhead, where q h denotes the number of random oracle queries. Our construction works in two steps. First, we propose a signature scheme with message recovery having optimal overhead in a new ideal model, the random invertible function model. Second, we show that a four-round Feistel network with random oracles as round functions is tightly “public-indifferentiable” from a random invertible function. At the core of our indifferentiability proof is an almost tight upper bound for the expected number of edges of the densest “small” subgraph of a random Cayley graph, which may be of independent interest. }, author = {Kiltz, Eike and Pietrzak, Krzysztof Z and Szegedy, Mario}, location = {Santa Barbara, CA, United States}, pages = {571 -- 588}, publisher = {Springer}, title = {{Digital signatures with minimal overhead from indifferentiable random invertible functions}}, doi = {10.1007/978-3-642-40041-4_31}, volume = {8042}, year = {2013}, } @inproceedings{2260, abstract = {Direct Anonymous Attestation (DAA) is one of the most complex cryptographic protocols deployed in practice. It allows an embedded secure processor known as a Trusted Platform Module (TPM) to attest to the configuration of its host computer without violating the owner’s privacy. DAA has been standardized by the Trusted Computing Group and ISO/IEC. The security of the DAA standard and all existing schemes is analyzed in the random-oracle model. We provide the first constructions of DAA in the standard model, that is, without relying on random oracles. Our constructions use new building blocks, including the first efficient signatures of knowledge in the standard model, which have many applications beyond DAA. }, author = {Bernhard, David and Fuchsbauer, Georg and Ghadafi, Essam}, location = {Banff, AB, Canada}, pages = {518 -- 533}, publisher = {Springer}, title = {{Efficient signatures of knowledge and DAA in the standard model}}, doi = {10.1007/978-3-642-38980-1_33}, volume = {7954}, year = {2013}, } @inproceedings{2291, abstract = {Cryptographic access control promises to offer easily distributed trust and broader applicability, while reducing reliance on low-level online monitors. Traditional implementations of cryptographic access control rely on simple cryptographic primitives whereas recent endeavors employ primitives with richer functionality and security guarantees. Worryingly, few of the existing cryptographic access-control schemes come with precise guarantees, the gap between the policy specification and the implementation being analyzed only informally, if at all. In this paper we begin addressing this shortcoming. Unlike prior work that targeted ad-hoc policy specification, we look at the well-established Role-Based Access Control (RBAC) model, as used in a typical file system. In short, we provide a precise syntax for a computational version of RBAC, offer rigorous definitions for cryptographic policy enforcement of a large class of RBAC security policies, and demonstrate that an implementation based on attribute-based encryption meets our security notions. We view our main contribution as being at the conceptual level. Although we work with RBAC for concreteness, our general methodology could guide future research for uses of cryptography in other access-control models. }, author = {Ferrara, Anna and Fuchsbauer, Georg and Warinschi, Bogdan}, location = {New Orleans, LA, United States}, pages = {115 -- 129}, publisher = {IEEE}, title = {{Cryptographically enforced RBAC}}, doi = {10.1109/CSF.2013.15}, year = {2013}, } @inproceedings{2940, abstract = {A chain rule for an entropy notion H(.) states that the entropy H(X) of a variable X decreases by at most l if conditioned on an l-bit string A, i.e., H(X|A)>= H(X)-l. More generally, it satisfies a chain rule for conditional entropy if H(X|Y,A)>= H(X|Y)-l. All natural information theoretic entropy notions we are aware of (like Shannon or min-entropy) satisfy some kind of chain rule for conditional entropy. Moreover, many computational entropy notions (like Yao entropy, unpredictability entropy and several variants of HILL entropy) satisfy the chain rule for conditional entropy, though here not only the quantity decreases by l, but also the quality of the entropy decreases exponentially in l. However, for the standard notion of conditional HILL entropy (the computational equivalent of min-entropy) the existence of such a rule was unknown so far. In this paper, we prove that for conditional HILL entropy no meaningful chain rule exists, assuming the existence of one-way permutations: there exist distributions X,Y,A, where A is a distribution over a single bit, but $H(X|Y)>>H(X|Y,A)$, even if we simultaneously allow for a massive degradation in the quality of the entropy. The idea underlying our construction is based on a surprising connection between the chain rule for HILL entropy and deniable encryption. }, author = {Krenn, Stephan and Pietrzak, Krzysztof Z and Wadia, Akshay}, editor = {Sahai, Amit}, location = {Tokyo, Japan}, pages = {23 -- 39}, publisher = {Springer}, title = {{A counterexample to the chain rule for conditional HILL entropy, and what deniable encryption has to do with it}}, doi = {10.1007/978-3-642-36594-2_2}, volume = {7785}, year = {2013}, } @article{502, abstract = {Blind signatures allow users to obtain signatures on messages hidden from the signer; moreover, the signer cannot link the resulting message/signature pair to the signing session. This paper presents blind signature schemes, in which the number of interactions between the user and the signer is minimal and whose blind signatures are short. Our schemes are defined over bilinear groups and are proved secure in the common-reference-string model without random oracles and under standard assumptions: CDH and the decision-linear assumption. (We also give variants over asymmetric groups based on similar assumptions.) The blind signatures are Waters signatures, which consist of 2 group elements. Moreover, we instantiate partially blind signatures, where the message consists of a part hidden from the signer and a commonly known public part, and schemes achieving perfect blindness. We propose new variants of blind signatures, such as signer-friendly partially blind signatures, where the public part can be chosen by the signer without prior agreement, 3-party blind signatures, as well as blind signatures on multiple aggregated messages provided by independent sources. We also extend Waters signatures to non-binary alphabets by proving a new result on the underlying hash function. }, author = {Blazy, Olivier and Fuchsbauer, Georg and Pointcheval, David and Vergnaud, Damien}, journal = {Journal of Computer Security}, number = {5}, pages = {627 -- 661}, publisher = {IOS Press}, title = {{Short blind signatures}}, doi = {10.3233/JCS-130477}, volume = {21}, year = {2013}, } @techreport{2274, abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto'92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs -- so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model, using graphs with high "pebbling complexity" and Merkle hash-trees. }, author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z}, publisher = {IST Austria}, title = {{Proofs of Space}}, year = {2013}, } @inproceedings{2048, abstract = {Leakage resilient cryptography attempts to incorporate side-channel leakage into the black-box security model and designs cryptographic schemes that are provably secure within it. Informally, a scheme is leakage-resilient if it remains secure even if an adversary learns a bounded amount of arbitrary information about the schemes internal state. Unfortunately, most leakage resilient schemes are unnecessarily complicated in order to achieve strong provable security guarantees. As advocated by Yu et al. [CCS’10], this mostly is an artefact of the security proof and in practice much simpler construction may already suffice to protect against realistic side-channel attacks. In this paper, we show that indeed for simpler constructions leakage-resilience can be obtained when we aim for relaxed security notions where the leakage-functions and/or the inputs to the primitive are chosen non-adaptively. For example, we show that a three round Feistel network instantiated with a leakage resilient PRF yields a leakage resilient PRP if the inputs are chosen non-adaptively (This complements the result of Dodis and Pietrzak [CRYPTO’10] who show that if a adaptive queries are allowed, a superlogarithmic number of rounds is necessary.) We also show that a minor variation of the classical GGM construction gives a leakage resilient PRF if both, the leakage-function and the inputs, are chosen non-adaptively.}, author = {Faust, Sebastian and Pietrzak, Krzysztof Z and Schipper, Joachim}, booktitle = { Conference proceedings CHES 2012}, location = {Leuven, Belgium}, pages = {213 -- 232}, publisher = {Springer}, title = {{Practical leakage-resilient symmetric cryptography}}, doi = {10.1007/978-3-642-33027-8_13}, volume = {7428}, year = {2012}, } @inproceedings{2049, abstract = {We propose a new authentication protocol that is provably secure based on a ring variant of the learning parity with noise (LPN) problem. The protocol follows the design principle of the LPN-based protocol from Eurocrypt’11 (Kiltz et al.), and like it, is a two round protocol secure against active attacks. Moreover, our protocol has small communication complexity and a very small footprint which makes it applicable in scenarios that involve low-cost, resource-constrained devices. Performance-wise, our protocol is more efficient than previous LPN-based schemes, such as the many variants of the Hopper-Blum (HB) protocol and the aforementioned protocol from Eurocrypt’11. Our implementation results show that it is even comparable to the standard challenge-and-response protocols based on the AES block-cipher. Our basic protocol is roughly 20 times slower than AES, but with the advantage of having 10 times smaller code size. Furthermore, if a few hundred bytes of non-volatile memory are available to allow the storage of some off-line pre-computations, then the online phase of our protocols is only twice as slow as AES. }, author = {Heyse, Stefan and Kiltz, Eike and Lyubashevsky, Vadim and Paar, Christof and Pietrzak, Krzysztof Z}, booktitle = { Conference proceedings FSE 2012}, location = {Washington, DC, USA}, pages = {346 -- 365}, publisher = {Springer}, title = {{Lapin: An efficient authentication protocol based on ring-LPN}}, doi = {10.1007/978-3-642-34047-5_20}, volume = {7549}, year = {2012}, } @inproceedings{2937, abstract = {Developers building cryptography into security-sensitive applications face a daunting task. Not only must they understand the security guarantees delivered by the constructions they choose, they must also implement and combine them correctly and efficiently. Cryptographic compilers free developers from this task by turning high-level specifications of security goals into efficient implementations. Yet, trusting such tools is hard as they rely on complex mathematical machinery and claim security properties that are subtle and difficult to verify. In this paper we present ZKCrypt, an optimizing cryptographic compiler achieving an unprecedented level of assurance without sacrificing practicality for a comprehensive class of cryptographic protocols, known as Zero-Knowledge Proofs of Knowledge. The pipeline of ZKCrypt integrates purpose-built verified compilers and verifying compilers producing formal proofs in the CertiCrypt framework. By combining the guarantees delivered by each stage, ZKCrypt provides assurance that the output implementation securely realizes the abstract proof goal given as input. We report on the main characteristics of ZKCrypt, highlight new definitions and concepts at its foundations, and illustrate its applicability through a representative example of an anonymous credential system.}, author = {Almeida, José and Barbosa, Manuel and Bangerter, Endre and Barthe, Gilles and Krenn, Stephan and Béguelin, Santiago}, booktitle = {Proceedings of the 2012 ACM conference on Computer and communications security}, location = {Raleigh, NC, USA}, pages = {488 -- 500}, publisher = {ACM}, title = {{Full proof cryptography: Verifiable compilation of efficient zero-knowledge protocols}}, doi = {10.1145/2382196.2382249}, year = {2012}, } @inproceedings{2974, abstract = {We construct a perfectly binding string commitment scheme whose security is based on the learning parity with noise (LPN) assumption, or equivalently, the hardness of decoding random linear codes. Our scheme not only allows for a simple and efficient zero-knowledge proof of knowledge for committed values (essentially a Σ-protocol), but also for such proofs showing any kind of relation amongst committed values, i.e. proving that messages m_0,...,m_u, are such that m_0=C(m_1,...,m_u) for any circuit C. To get soundness which is exponentially small in a security parameter t, and when the zero-knowledge property relies on the LPN problem with secrets of length l, our 3 round protocol has communication complexity O(t|C|l log(l)) and computational complexity of O(t|C|l) bit operations. The hidden constants are small, and the computation consists mostly of computing inner products of bit-vectors.}, author = {Jain, Abhishek and Krenn, Stephan and Pietrzak, Krzysztof Z and Tentes, Aris}, editor = {Wang, Xiaoyun and Sako, Kazue}, location = {Beijing, China}, pages = {663 -- 680}, publisher = {Springer}, title = {{Commitments and efficient zero knowledge proofs from learning parity with noise}}, doi = {10.1007/978-3-642-34961-4_40}, volume = {7658}, year = {2012}, } @inproceedings{3250, abstract = {The Learning Parity with Noise (LPN) problem has recently found many applications in cryptography as the hardness assumption underlying the constructions of "provably secure" cryptographic schemes like encryption or authentication protocols. Being provably secure means that the scheme comes with a proof showing that the existence of an efficient adversary against the scheme implies that the underlying hardness assumption is wrong. LPN based schemes are appealing for theoretical and practical reasons. On the theoretical side, LPN based schemes offer a very strong security guarantee. The LPN problem is equivalent to the problem of decoding random linear codes, a problem that has been extensively studied in the last half century. The fastest known algorithms run in exponential time and unlike most number-theoretic problems used in cryptography, the LPN problem does not succumb to known quantum algorithms. On the practical side, LPN based schemes are often extremely simple and efficient in terms of code-size as well as time and space requirements. This makes them prime candidates for light-weight devices like RFID tags, which are too weak to implement standard cryptographic primitives like the AES block-cipher. This talk will be a gentle introduction to provable security using simple LPN based schemes as examples. Starting from pseudorandom generators and symmetric key encryption, over secret-key authentication protocols, and, if time admits, touching on recent constructions of public-key identification, commitments and zero-knowledge proofs.}, author = {Pietrzak, Krzysztof Z}, location = {Špindlerův Mlýn, Czech Republic}, pages = {99 -- 114}, publisher = {Springer}, title = {{Cryptography from learning parity with noise}}, doi = {10.1007/978-3-642-27660-6_9}, volume = {7147}, year = {2012}, } @inproceedings{3282, abstract = {Traditionally, symmetric-key message authentication codes (MACs) are easily built from pseudorandom functions (PRFs). In this work we propose a wide variety of other approaches to building efficient MACs, without going through a PRF first. In particular, unlike deterministic PRF-based MACs, where each message has a unique valid tag, we give a number of probabilistic MAC constructions from various other primitives/assumptions. Our main results are summarized as follows: We show several new probabilistic MAC constructions from a variety of general assumptions, including CCA-secure encryption, Hash Proof Systems and key-homomorphic weak PRFs. By instantiating these frameworks under concrete number theoretic assumptions, we get several schemes which are more efficient than just using a state-of-the-art PRF instantiation under the corresponding assumption. For probabilistic MACs, unlike deterministic ones, unforgeability against a chosen message attack (uf-cma ) alone does not imply security if the adversary can additionally make verification queries (uf-cmva ). We give an efficient generic transformation from any uf-cma secure MAC which is "message-hiding" into a uf-cmva secure MAC. This resolves the main open problem of Kiltz et al. from Eurocrypt'11; By using our transformation on their constructions, we get the first efficient MACs from the LPN assumption. While all our new MAC constructions immediately give efficient actively secure, two-round symmetric-key identification schemes, we also show a very simple, three-round actively secure identification protocol from any weak PRF. In particular, the resulting protocol is much more efficient than the trivial approach of building a regular PRF from a weak PRF. © 2012 International Association for Cryptologic Research.}, author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Kiltz, Eike and Wichs, Daniel}, location = {Cambridge, UK}, pages = {355 -- 374}, publisher = {Springer}, title = {{Message authentication, revisited}}, doi = {10.1007/978-3-642-29011-4_22}, volume = {7237}, year = {2012}, } @inproceedings{3280, abstract = {The (decisional) learning with errors problem (LWE) asks to distinguish "noisy" inner products of a secret vector with random vectors from uniform. The learning parities with noise problem (LPN) is the special case where the elements of the vectors are bits. In recent years, the LWE and LPN problems have found many applications in cryptography. In this paper we introduce a (seemingly) much stronger adaptive assumption, called "subspace LWE" (SLWE), where the adversary can learn the inner product of the secret and random vectors after they were projected into an adaptively and adversarially chosen subspace. We prove that, surprisingly, the SLWE problem mapping into subspaces of dimension d is almost as hard as LWE using secrets of length d (the other direction is trivial.) This result immediately implies that several existing cryptosystems whose security is based on the hardness of the LWE/LPN problems are provably secure in a much stronger sense than anticipated. As an illustrative example we show that the standard way of using LPN for symmetric CPA secure encryption is even secure against a very powerful class of related key attacks. }, author = {Pietrzak, Krzysztof Z}, location = {Taormina, Sicily, Italy}, pages = {548 -- 563}, publisher = {Springer}, title = {{Subspace LWE}}, doi = {10.1007/978-3-642-28914-9_31}, volume = {7194}, year = {2012}, } @inproceedings{3281, abstract = {We consider the problem of amplifying the "lossiness" of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic "lossy functions," where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.}, author = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil}, location = {Taormina, Sicily, Italy}, pages = {458 -- 475}, publisher = {Springer}, title = {{Lossy functions do not amplify well}}, doi = {10.1007/978-3-642-28914-9_26}, volume = {7194}, year = {2012}, } @inproceedings{3279, abstract = {We show a hardness-preserving construction of a PRF from any length doubling PRG which improves upon known constructions whenever we can put a non-trivial upper bound q on the number of queries to the PRF. Our construction requires only O(logq) invocations to the underlying PRG with each query. In comparison, the number of invocations by the best previous hardness-preserving construction (GGM using Levin's trick) is logarithmic in the hardness of the PRG. For example, starting from an exponentially secure PRG {0,1} n → {0,1} 2n, we get a PRF which is exponentially secure if queried at most q = exp(√n)times and where each invocation of the PRF requires Θ(√n) queries to the underlying PRG. This is much less than the Θ(n) required by known constructions. }, author = {Jain, Abhishek and Pietrzak, Krzysztof Z and Tentes, Aris}, location = {Taormina, Sicily, Italy}, pages = {369 -- 382}, publisher = {Springer}, title = {{Hardness preserving constructions of pseudorandom functions}}, doi = {10.1007/978-3-642-28914-9_21}, volume = {7194}, year = {2012}, }