@inproceedings{1647, abstract = {Round-optimal blind signatures are notoriously hard to construct in the standard model, especially in the malicious-signer model, where blindness must hold under adversarially chosen keys. This is substantiated by several impossibility results. The only construction that can be termed theoretically efficient, by Garg and Gupta (Eurocrypt’14), requires complexity leveraging, inducing an exponential security loss. We present a construction of practically efficient round-optimal blind signatures in the standard model. It is conceptually simple and builds on the recent structure-preserving signatures on equivalence classes (SPSEQ) from Asiacrypt’14. While the traditional notion of blindness follows from standard assumptions, we prove blindness under adversarially chosen keys under an interactive variant of DDH. However, we neither require non-uniform assumptions nor complexity leveraging. We then show how to extend our construction to partially blind signatures and to blind signatures on message vectors, which yield a construction of one-show anonymous credentials à la “anonymous credentials light” (CCS’13) in the standard model. Furthermore, we give the first SPS-EQ construction under noninteractive assumptions and show how SPS-EQ schemes imply conventional structure-preserving signatures, which allows us to apply optimality results for the latter to SPS-EQ.}, author = {Fuchsbauer, Georg and Hanser, Christian and Slamanig, Daniel}, location = {Santa Barbara, CA, United States}, pages = {233 -- 253}, publisher = {Springer}, title = {{Practical round-optimal blind signatures in the standard model}}, doi = {10.1007/978-3-662-48000-7_12}, volume = {9216}, year = {2015}, } @inproceedings{1645, abstract = {Secret-key constructions are often proved secure in a model where one or more underlying components are replaced by an idealized oracle accessible to the attacker. This model gives rise to information-theoretic security analyses, and several advances have been made in this area over the last few years. This paper provides a systematic overview of what is achievable in this model, and how existing works fit into this view.}, author = {Gazi, Peter and Tessaro, Stefano}, booktitle = {2015 IEEE Information Theory Workshop}, location = {Jerusalem, Israel}, publisher = {IEEE}, title = {{Secret-key cryptography from ideal primitives: A systematic verview}}, doi = {10.1109/ITW.2015.7133163}, year = {2015}, } @inproceedings{1654, abstract = {HMAC and its variant NMAC are the most popular approaches to deriving a MAC (and more generally, a PRF) from a cryptographic hash function. Despite nearly two decades of research, their exact security still remains far from understood in many different contexts. Indeed, recent works have re-surfaced interest for {\em generic} attacks, i.e., attacks that treat the compression function of the underlying hash function as a black box. Generic security can be proved in a model where the underlying compression function is modeled as a random function -- yet, to date, the question of proving tight, non-trivial bounds on the generic security of HMAC/NMAC even as a PRF remains a challenging open question. In this paper, we ask the question of whether a small modification to HMAC and NMAC can allow us to exactly characterize the security of the resulting constructions, while only incurring little penalty with respect to efficiency. To this end, we present simple variants of NMAC and HMAC, for which we prove tight bounds on the generic PRF security, expressed in terms of numbers of construction and compression function queries necessary to break the construction. All of our constructions are obtained via a (near) {\em black-box} modification of NMAC and HMAC, which can be interpreted as an initial step of key-dependent message pre-processing. While our focus is on PRF security, a further attractive feature of our new constructions is that they clearly defeat all recent generic attacks against properties such as state recovery and universal forgery. These exploit properties of the so-called ``functional graph'' which are not directly accessible in our new constructions. }, author = {Gazi, Peter and Pietrzak, Krzysztof Z and Tessaro, Stefano}, location = {Auckland, New Zealand}, pages = {85 -- 109}, publisher = {Springer}, title = {{Generic security of NMAC and HMAC with input whitening}}, doi = {10.1007/978-3-662-48800-3_4}, volume = {9453}, year = {2015}, } @inproceedings{1650, abstract = {We consider the task of deriving a key with high HILL entropy (i.e., being computationally indistinguishable from a key with high min-entropy) from an unpredictable source. Previous to this work, the only known way to transform unpredictability into a key that was ϵ indistinguishable from having min-entropy was via pseudorandomness, for example by Goldreich-Levin (GL) hardcore bits. This approach has the inherent limitation that from a source with k bits of unpredictability entropy one can derive a key of length (and thus HILL entropy) at most k−2log(1/ϵ) bits. In many settings, e.g. when dealing with biometric data, such a 2log(1/ϵ) bit entropy loss in not an option. Our main technical contribution is a theorem that states that in the high entropy regime, unpredictability implies HILL entropy. Concretely, any variable K with |K|−d bits of unpredictability entropy has the same amount of so called metric entropy (against real-valued, deterministic distinguishers), which is known to imply the same amount of HILL entropy. The loss in circuit size in this argument is exponential in the entropy gap d, and thus this result only applies for small d (i.e., where the size of distinguishers considered is exponential in d). To overcome the above restriction, we investigate if it’s possible to first “condense” unpredictability entropy and make the entropy gap small. We show that any source with k bits of unpredictability can be condensed into a source of length k with k−3 bits of unpredictability entropy. Our condenser simply “abuses" the GL construction and derives a k bit key from a source with k bits of unpredicatibily. The original GL theorem implies nothing when extracting that many bits, but we show that in this regime, GL still behaves like a “condenser" for unpredictability. This result comes with two caveats (1) the loss in circuit size is exponential in k and (2) we require that the source we start with has no HILL entropy (equivalently, one can efficiently check if a guess is correct). We leave it as an intriguing open problem to overcome these restrictions or to prove they’re inherent.}, author = {Skórski, Maciej and Golovnev, Alexander and Pietrzak, Krzysztof Z}, location = {Kyoto, Japan}, pages = {1046 -- 1057}, publisher = {Springer}, title = {{Condensed unpredictability }}, doi = {10.1007/978-3-662-47672-7_85}, volume = {9134}, year = {2015}, } @inproceedings{1651, abstract = {Cryptographic e-cash allows off-line electronic transactions between a bank, users and merchants in a secure and anonymous fashion. A plethora of e-cash constructions has been proposed in the literature; however, these traditional e-cash schemes only allow coins to be transferred once between users and merchants. Ideally, we would like users to be able to transfer coins between each other multiple times before deposit, as happens with physical cash. “Transferable” e-cash schemes are the solution to this problem. Unfortunately, the currently proposed schemes are either completely impractical or do not achieve the desirable anonymity properties without compromises, such as assuming the existence of a trusted “judge” who can trace all coins and users in the system. This paper presents the first efficient and fully anonymous transferable e-cash scheme without any trusted third parties. We start by revising the security and anonymity properties of transferable e-cash to capture issues that were previously overlooked. For our construction we use the recently proposed malleable signatures by Chase et al. to allow the secure and anonymous transfer of coins, combined with a new efficient double-spending detection mechanism. Finally, we discuss an instantiation of our construction.}, author = {Baldimtsi, Foteini and Chase, Melissa and Fuchsbauer, Georg and Kohlweiss, Markulf}, booktitle = {Public-Key Cryptography - PKC 2015}, isbn = {978-3-662-46446-5}, location = {Gaithersburg, MD, United States}, pages = {101 -- 124}, publisher = {Springer}, title = {{Anonymous transferable e-cash}}, doi = {10.1007/978-3-662-46447-2_5}, volume = {9020}, year = {2015}, } @inproceedings{1652, abstract = {We develop new theoretical tools for proving lower-bounds on the (amortized) complexity of certain functions in models of parallel computation. We apply the tools to construct a class of functions with high amortized memory complexity in the parallel Random Oracle Model (pROM); a variant of the standard ROM allowing for batches of simultaneous queries. In particular we obtain a new, more robust, type of Memory-Hard Functions (MHF); a security primitive which has recently been gaining acceptance in practice as an effective means of countering brute-force attacks on security relevant functions. Along the way we also demonstrate an important shortcoming of previous definitions of MHFs and give a new definition addressing the problem. The tools we develop represent an adaptation of the powerful pebbling paradigm (initially introduced by Hewitt and Paterson [HP70] and Cook [Coo73]) to a simple and intuitive parallel setting. We define a simple pebbling game Gp over graphs which aims to abstract parallel computation in an intuitive way. As a conceptual contribution we define a measure of pebbling complexity for graphs called cumulative complexity (CC) and show how it overcomes a crucial shortcoming (in the parallel setting) exhibited by more traditional complexity measures used in the past. As a main technical contribution we give an explicit construction of a constant in-degree family of graphs whose CC in Gp approaches maximality to within a polylogarithmic factor for any graph of equal size (analogous to the graphs of Tarjan et. al. [PTC76, LT82] for sequential pebbling games). Finally, for a given graph G and related function fG, we derive a lower-bound on the amortized memory complexity of fG in the pROM in terms of the CC of G in the game Gp.}, author = {Alwen, Joel F and Serbinenko, Vladimir}, booktitle = {Proceedings of the 47th annual ACM symposium on Theory of computing}, location = {Portland, OR, United States}, pages = {595 -- 603}, publisher = {ACM}, title = {{High parallel complexity graphs and memory-hard functions}}, doi = {10.1145/2746539.2746622}, year = {2015}, } @inproceedings{1672, abstract = {Composable notions of incoercibility aim to forbid a coercer from using anything beyond the coerced parties’ inputs and outputs to catch them when they try to deceive him. Existing definitions are restricted to weak coercion types, and/or are not universally composable. Furthermore, they often make too strong assumptions on the knowledge of coerced parties—e.g., they assume they known the identities and/or the strategies of other coerced parties, or those of corrupted parties— which makes them unsuitable for applications of incoercibility such as e-voting, where colluding adversarial parties may attempt to coerce honest voters, e.g., by offering them money for a promised vote, and use their own view to check that the voter keeps his end of the bargain. In this work we put forward the first universally composable notion of incoercible multi-party computation, which satisfies the above intuition and does not assume collusions among coerced parties or knowledge of the corrupted set. We define natural notions of UC incoercibility corresponding to standard coercion-types, i.e., receipt-freeness and resistance to full-active coercion. Importantly, our suggested notion has the unique property that it builds on top of the well studied UC framework by Canetti instead of modifying it. This guarantees backwards compatibility, and allows us to inherit results from the rich UC literature. We then present MPC protocols which realize our notions of UC incoercibility given access to an arguably minimal setup—namely honestly generate tamper-proof hardware performing a very simple cryptographic operation—e.g., a smart card. This is, to our knowledge, the first proposed construction of an MPC protocol (for more than two parties) that is incoercibly secure and universally composable, and therefore the first construction of a universally composable receipt-free e-voting protocol.}, author = {Alwen, Joel F and Ostrovsky, Rafail and Zhou, Hongsheng and Zikas, Vassilis}, booktitle = {Advances in Cryptology - CRYPTO 2015}, isbn = {978-3-662-47999-5}, location = {Santa Barbara, CA, United States}, pages = {763 -- 780}, publisher = {Springer}, title = {{Incoercible multi-party computation and universally composable receipt-free voting}}, doi = {10.1007/978-3-662-48000-7_37}, volume = {9216}, year = {2015}, } @inproceedings{1669, abstract = {Computational notions of entropy (a.k.a. pseudoentropy) have found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The most important tools to argue about pseudoentropy are chain rules, which quantify by how much (in terms of quantity and quality) the pseudoentropy of a given random variable X decreases when conditioned on some other variable Z (think for example of X as a secret key and Z as information leaked by a side-channel). In this paper we give a very simple and modular proof of the chain rule for HILL pseudoentropy, improving best known parameters. Our version allows for increasing the acceptable length of leakage in applications up to a constant factor compared to the best previous bounds. As a contribution of independent interest, we provide a comprehensive study of all known versions of the chain rule, comparing their worst-case strength and limitations.}, author = {Pietrzak, Krzysztof Z and Skórski, Maciej}, location = {Guadalajara, Mexico}, pages = {81 -- 98}, publisher = {Springer}, title = {{The chain rule for HILL pseudoentropy, revisited}}, doi = {10.1007/978-3-319-22174-8_5}, volume = {9230}, year = {2015}, } @inproceedings{1671, abstract = {This paper studies the concrete security of PRFs and MACs obtained by keying hash functions based on the sponge paradigm. One such hash function is KECCAK, selected as NIST’s new SHA-3 standard. In contrast to other approaches like HMAC, the exact security of keyed sponges is not well understood. Indeed, recent security analyses delivered concrete security bounds which are far from existing attacks. This paper aims to close this gap. We prove (nearly) exact bounds on the concrete PRF security of keyed sponges using a random permutation. These bounds are tight for the most relevant ranges of parameters, i.e., for messages of length (roughly) l ≤ min{2n/4, 2r} blocks, where n is the state size and r is the desired output length; and for l ≤ q queries (to the construction or the underlying permutation). Moreover, we also improve standard-model bounds. As an intermediate step of independent interest, we prove tight bounds on the PRF security of the truncated CBC-MAC construction, which operates as plain CBC-MAC, but only returns a prefix of the output.}, author = {Gazi, Peter and Pietrzak, Krzysztof Z and Tessaro, Stefano}, location = {Santa Barbara, CA, United States}, pages = {368 -- 387}, publisher = {Springer}, title = {{The exact PRF security of truncation: Tight bounds for keyed sponges and truncated CBC}}, doi = {10.1007/978-3-662-47989-6_18}, volume = {9215}, year = {2015}, } @inproceedings{1668, abstract = {We revisit the security (as a pseudorandom permutation) of cascading-based constructions for block-cipher key-length extension. Previous works typically considered the extreme case where the adversary is given the entire codebook of the construction, the only complexity measure being the number qe of queries to the underlying ideal block cipher, representing adversary’s secret-key-independent computation. Here, we initiate a systematic study of the more natural case of an adversary restricted to adaptively learning a number qc of plaintext/ciphertext pairs that is less than the entire codebook. For any such qc, we aim to determine the highest number of block-cipher queries qe the adversary can issue without being able to successfully distinguish the construction (under a secret key) from a random permutation. More concretely, we show the following results for key-length extension schemes using a block cipher with n-bit blocks and κ-bit keys: Plain cascades of length ℓ=2r+1 are secure whenever qcqre≪2r(κ+n), qc≪2κ and qe≪22κ. The bound for r=1 also applies to two-key triple encryption (as used within Triple DES). The r-round XOR-cascade is secure as long as qcqre≪2r(κ+n), matching an attack by Gaži (CRYPTO 2013). We fully characterize the security of Gaži and Tessaro’s two-call }, author = {Gazi, Peter and Lee, Jooyoung and Seurin, Yannick and Steinberger, John and Tessaro, Stefano}, location = {Istanbul, Turkey}, pages = {319 -- 341}, publisher = {Springer}, title = {{Relaxing full-codebook security: A refined analysis of key-length extension schemes}}, doi = {10.1007/978-3-662-48116-5_16}, volume = {9054}, year = {2015}, } @inproceedings{1675, abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto’92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs - so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model (with one additional mild assumption required for the proof to go through), using graphs with high “pebbling complexity” and Merkle hash-trees. We discuss some applications, including follow-up work where a decentralized digital currency scheme called Spacecoin is constructed that uses PoS (instead of wasteful PoW like in Bitcoin) to prevent double spending. The main technical contribution of this work is the construction of (directed, loop-free) graphs on N vertices with in-degree O(log logN) such that even if one places Θ(N) pebbles on the nodes of the graph, there’s a constant fraction of nodes that needs Θ(N) steps to be pebbled (where in every step one can put a pebble on a node if all its parents have a pebble).}, author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z}, booktitle = {35th Annual Cryptology Conference}, isbn = {9783662479995}, issn = {0302-9743}, location = {Santa Barbara, CA, United States}, pages = {585 -- 605}, publisher = {Springer}, title = {{Proofs of space}}, doi = {10.1007/978-3-662-48000-7_29}, volume = {9216}, year = {2015}, } @inproceedings{1643, abstract = {We extend the notion of verifiable random functions (VRF) to constrained VRFs, which generalize the concept of constrained pseudorandom functions, put forward by Boneh and Waters (Asiacrypt’13), and independently by Kiayias et al. (CCS’13) and Boyle et al. (PKC’14), who call them delegatable PRFs and functional PRFs, respectively. In a standard VRF the secret key sk allows one to evaluate a pseudorandom function at any point of its domain; in addition, it enables computation of a non-interactive proof that the function value was computed correctly. In a constrained VRF from the key sk one can derive constrained keys skS for subsets S of the domain, which allow computation of function values and proofs only at points in S. After formally defining constrained VRFs, we derive instantiations from the multilinear-maps-based constrained PRFs by Boneh and Waters, yielding a VRF with constrained keys for any set that can be decided by a polynomial-size circuit. Our VRFs have the same function values as the Boneh-Waters PRFs and are proved secure under the same hardness assumption, showing that verifiability comes at no cost. Constrained (functional) VRFs were stated as an open problem by Boyle et al.}, author = {Fuchsbauer, Georg}, booktitle = {SCN 2014}, editor = {Abdalla, Michel and De Prisco, Roberto}, location = {Amalfi, Italy}, pages = {95 -- 114}, publisher = {Springer}, title = {{Constrained Verifiable Random Functions }}, doi = {10.1007/978-3-319-10879-7_7}, volume = {8642}, year = {2014}, } @inproceedings{1907, abstract = {Most cryptographic security proofs require showing that two systems are indistinguishable. A central tool in such proofs is that of a game, where winning the game means provoking a certain condition, and it is shown that the two systems considered cannot be distinguished unless this condition is provoked. Upper bounding the probability of winning such a game, i.e., provoking this condition, for an arbitrary strategy is usually hard, except in the special case where the best strategy for winning such a game is known to be non-adaptive. A sufficient criterion for ensuring the optimality of non-adaptive strategies is that of conditional equivalence to a system, a notion introduced in [1]. In this paper, we show that this criterion is not necessary to ensure the optimality of non-adaptive strategies by giving two results of independent interest: 1) the optimality of non-adaptive strategies is not preserved under parallel composition; 2) in contrast, conditional equivalence is preserved under parallel composition.}, author = {Demay, Grégory and Gazi, Peter and Maurer, Ueli and Tackmann, Björn}, booktitle = {IEEE International Symposium on Information Theory}, location = {Honolulu, USA}, publisher = {IEEE}, title = {{Optimality of non-adaptive strategies: The case of parallel games}}, doi = {10.1109/ISIT.2014.6875125}, year = {2014}, } @inproceedings{2045, abstract = {We introduce and study a new notion of enhanced chosen-ciphertext security (ECCA) for public-key encryption. Loosely speaking, in the ECCA security experiment, the decryption oracle provided to the adversary is augmented to return not only the output of the decryption algorithm on a queried ciphertext but also of a randomness-recovery algorithm associated to the scheme. Our results mainly concern the case where the randomness-recovery algorithm is efficient. We provide constructions of ECCA-secure encryption from adaptive trapdoor functions as defined by Kiltz et al. (EUROCRYPT 2010), resulting in ECCA encryption from standard number-theoretic assumptions. We then give two applications of ECCA-secure encryption: (1) We use it as a unifying concept in showing equivalence of adaptive trapdoor functions and tag-based adaptive trapdoor functions, resolving an open question of Kiltz et al. (2) We show that ECCA-secure encryption can be used to securely realize an approach to public-key encryption with non-interactive opening (PKENO) originally suggested by Damgård and Thorbek (EUROCRYPT 2007), resulting in new and practical PKENO schemes quite different from those in prior work. Our results demonstrate that ECCA security is of both practical and theoretical interest.}, author = {Dachman Soled, Dana and Fuchsbauer, Georg and Mohassel, Payman and O’Neill, Adam}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, editor = {Krawczyk, Hugo}, location = {Buenos Aires, Argentina}, pages = {329 -- 344}, publisher = {Springer}, title = {{Enhanced chosen-ciphertext security and applications}}, doi = {10.1007/978-3-642-54631-0_19}, volume = {8383}, year = {2014}, } @inproceedings{2047, abstract = {Following the publication of an attack on genome-wide association studies (GWAS) data proposed by Homer et al., considerable attention has been given to developing methods for releasing GWAS data in a privacy-preserving way. Here, we develop an end-to-end differentially private method for solving regression problems with convex penalty functions and selecting the penalty parameters by cross-validation. In particular, we focus on penalized logistic regression with elastic-net regularization, a method widely used to in GWAS analyses to identify disease-causing genes. We show how a differentially private procedure for penalized logistic regression with elastic-net regularization can be applied to the analysis of GWAS data and evaluate our method’s performance.}, author = {Yu, Fei and Rybar, Michal and Uhler, Caroline and Fienberg, Stephen}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, editor = {Domingo Ferrer, Josep}, location = {Ibiza, Spain}, pages = {170 -- 184}, publisher = {Springer}, title = {{Differentially-private logistic regression for detecting multiple-SNP association in GWAS databases}}, doi = {10.1007/978-3-319-11257-2_14}, volume = {8744}, year = {2014}, } @inproceedings{2046, abstract = {We introduce policy-based signatures (PBS), where a signer can only sign messages conforming to some authority-specified policy. The main requirements are unforgeability and privacy, the latter meaning that signatures not reveal the policy. PBS offers value along two fronts: (1) On the practical side, they allow a corporation to control what messages its employees can sign under the corporate key. (2) On the theoretical side, they unify existing work, capturing other forms of signatures as special cases or allowing them to be easily built. Our work focuses on definitions of PBS, proofs that this challenging primitive is realizable for arbitrary policies, efficient constructions for specific policies, and a few representative applications.}, author = {Bellare, Mihir and Fuchsbauer, Georg}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, editor = {Krawczyk, Hugo}, location = {Buenos Aires, Argentina}, pages = {520 -- 537}, publisher = {Springer}, title = {{Policy-based signatures}}, doi = {10.1007/978-3-642-54631-0_30}, volume = {8383}, year = {2014}, } @inproceedings{2185, abstract = {We revisit the classical problem of converting an imperfect source of randomness into a usable cryptographic key. Assume that we have some cryptographic application P that expects a uniformly random m-bit key R and ensures that the best attack (in some complexity class) against P(R) has success probability at most δ. Our goal is to design a key-derivation function (KDF) h that converts any random source X of min-entropy k into a sufficiently "good" key h(X), guaranteeing that P(h(X)) has comparable security δ′ which is 'close' to δ. Seeded randomness extractors provide a generic way to solve this problem for all applications P, with resulting security δ′ = O(δ), provided that we start with entropy k ≥ m + 2 log (1/δ) - O(1). By a result of Radhakrishnan and Ta-Shma, this bound on k (called the "RT-bound") is also known to be tight in general. Unfortunately, in many situations the loss of 2 log (1/δ) bits of entropy is unacceptable. This motivates the study KDFs with less entropy waste by placing some restrictions on the source X or the application P. In this work we obtain the following new positive and negative results in this regard: - Efficient samplability of the source X does not help beat the RT-bound for general applications. This resolves the SRT (samplable RT) conjecture of Dachman-Soled et al. [DGKM12] in the affirmative, and also shows that the existence of computationally-secure extractors beating the RT-bound implies the existence of one-way functions. - We continue in the line of work initiated by Barak et al. [BDK+11] and construct new information-theoretic KDFs which beat the RT-bound for large but restricted classes of applications. Specifically, we design efficient KDFs that work for all unpredictability applications P (e.g., signatures, MACs, one-way functions, etc.) and can either: (1) extract all of the entropy k = m with a very modest security loss δ′ = O(δ·log (1/δ)), or alternatively, (2) achieve essentially optimal security δ′ = O(δ) with a very modest entropy loss k ≥ m + loglog (1/δ). In comparison, the best prior results from [BDK+11] for this class of applications would only guarantee δ′ = O(√δ) when k = m, and would need k ≥ m + log (1/δ) to get δ′ = O(δ). - The weaker bounds of [BDK+11] hold for a larger class of so-called "square- friendly" applications (which includes all unpredictability, but also some important indistinguishability, applications). Unfortunately, we show that these weaker bounds are tight for the larger class of applications. - We abstract out a clean, information-theoretic notion of (k,δ,δ′)- unpredictability extractors, which guarantee "induced" security δ′ for any δ-secure unpredictability application P, and characterize the parameters achievable for such unpredictability extractors. Of independent interest, we also relate this notion to the previously-known notion of (min-entropy) condensers, and improve the state-of-the-art parameters for such condensers.}, author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Wichs, Daniel}, editor = {Nguyen, Phong and Oswald, Elisabeth}, location = {Copenhagen, Denmark}, pages = {93 -- 110}, publisher = {Springer}, title = {{Key derivation without entropy waste}}, doi = {10.1007/978-3-642-55220-5_6}, volume = {8441}, year = {2014}, } @inproceedings{2219, abstract = {Recently, Döttling et al. (ASIACRYPT 2012) proposed the first chosen-ciphertext (IND-CCA) secure public-key encryption scheme from the learning parity with noise (LPN) assumption. In this work we give an alternative scheme which is conceptually simpler and more efficient. At the core of our construction is a trapdoor technique originally proposed for lattices by Micciancio and Peikert (EUROCRYPT 2012), which we adapt to the LPN setting. The main technical tool is a new double-trapdoor mechanism, together with a trapdoor switching lemma based on a computational variant of the leftover hash lemma.}, author = {Kiltz, Eike and Masny, Daniel and Pietrzak, Krzysztof Z}, isbn = {978-364254630-3}, pages = {1 -- 18}, publisher = {Springer}, title = {{Simple chosen-ciphertext security from low noise LPN}}, doi = {10.1007/978-3-642-54631-0_1}, volume = {8383}, year = {2014}, } @inproceedings{2236, abstract = {Consider a joint distribution (X,A) on a set. We show that for any family of distinguishers, there exists a simulator such that 1 no function in can distinguish (X,A) from (X,h(X)) with advantage ε, 2 h is only O(2 3ℓ ε -2) times less efficient than the functions in. For the most interesting settings of the parameters (in particular, the cryptographic case where X has superlogarithmic min-entropy, ε > 0 is negligible and consists of circuits of polynomial size), we can make the simulator h deterministic. As an illustrative application of our theorem, we give a new security proof for the leakage-resilient stream-cipher from Eurocrypt'09. Our proof is simpler and quantitatively much better than the original proof using the dense model theorem, giving meaningful security guarantees if instantiated with a standard blockcipher like AES. Subsequent to this work, Chung, Lui and Pass gave an interactive variant of our main theorem, and used it to investigate weak notions of Zero-Knowledge. Vadhan and Zheng give a more constructive version of our theorem using their new uniform min-max theorem.}, author = {Jetchev, Dimitar and Pietrzak, Krzysztof Z}, editor = {Lindell, Yehuda}, isbn = {978-364254241-1}, location = {San Diego, USA}, pages = {566 -- 590}, publisher = {Springer}, title = {{How to fake auxiliary input}}, doi = {10.1007/978-3-642-54242-8_24}, volume = {8349}, year = {2014}, } @article{2852, abstract = {A robust combiner for hash functions takes two candidate implementations and constructs a hash function which is secure as long as at least one of the candidates is secure. So far, hash function combiners only aim at preserving a single property such as collision-resistance or pseudorandomness. However, when hash functions are used in protocols like TLS they are often required to provide several properties simultaneously. We therefore put forward the notion of robust multi-property combiners and elaborate on different definitions for such combiners. We then propose a combiner that provably preserves (target) collision-resistance, pseudorandomness, and being a secure message authentication code. This combiner satisfies the strongest notion we propose, which requires that the combined function satisfies every security property which is satisfied by at least one of the underlying hash function. If the underlying hash functions have output length n, the combiner has output length 2 n. This basically matches a known lower bound for black-box combiners for collision-resistance only, thus the other properties can be achieved without penalizing the length of the hash values. We then propose a combiner which also preserves the property of being indifferentiable from a random oracle, slightly increasing the output length to 2 n+ω(log n). Moreover, we show how to augment our constructions in order to make them also robust for the one-wayness property, but in this case require an a priory upper bound on the input length.}, author = {Fischlin, Marc and Lehmann, Anja and Pietrzak, Krzysztof Z}, journal = {Journal of Cryptology}, number = {3}, pages = {397 -- 428}, publisher = {Springer}, title = {{Robust multi-property combiners for hash functions}}, doi = {10.1007/s00145-013-9148-7}, volume = {27}, year = {2014}, } @inproceedings{2082, abstract = {NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). Security proofs and attacks for NMAC can typically be lifted to HMAC. NMAC was introduced by Bellare, Canetti and Krawczyk [Crypto'96], who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, assuming that (1) f is a PRF and (2) the function we get when cascading f is weakly collision-resistant. Unfortunately, HMAC is typically instantiated with cryptographic hash functions like MD5 or SHA-1 for which (2) has been found to be wrong. To restore the provable guarantees for NMAC, Bellare [Crypto'06] showed its security based solely on the assumption that f is a PRF, albeit via a non-uniform reduction. - Our first contribution is a simpler and uniform proof for this fact: If f is an ε-secure PRF (against q queries) and a δ-non-adaptively secure PRF (against q queries), then NMAC f is an (ε+ℓqδ)-secure PRF against q queries of length at most ℓ blocks each. - We then show that this ε+ℓqδ bound is basically tight. For the most interesting case where ℓqδ ≥ ε we prove this by constructing an f for which an attack with advantage ℓqδ exists. This also violates the bound O(ℓε) on the PRF-security of NMAC recently claimed by Koblitz and Menezes. - Finally, we analyze the PRF-security of a modification of NMAC called NI [An and Bellare, Crypto'99] that differs mainly by using a compression function with an additional keying input. This avoids the constant rekeying on multi-block messages in NMAC and allows for a security proof starting by the standard switch from a PRF to a random function, followed by an information-theoretic analysis. We carry out such an analysis, obtaining a tight ℓq2/2 c bound for this step, improving over the trivial bound of ℓ2q2/2c. The proof borrows combinatorial techniques originally developed for proving the security of CBC-MAC [Bellare et al., Crypto'05].}, author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal}, editor = {Garay, Juan and Gennaro, Rosario}, location = {Santa Barbara, USA}, number = {1}, pages = {113 -- 130}, publisher = {Springer}, title = {{The exact PRF-security of NMAC and HMAC}}, doi = {10.1007/978-3-662-44371-2_7}, volume = {8616}, year = {2014}, } @inproceedings{2259, abstract = {The learning with rounding (LWR) problem, introduced by Banerjee, Peikert and Rosen at EUROCRYPT ’12, is a variant of learning with errors (LWE), where one replaces random errors with deterministic rounding. The LWR problem was shown to be as hard as LWE for a setting of parameters where the modulus and modulus-to-error ratio are super-polynomial. In this work we resolve the main open problem and give a new reduction that works for a larger range of parameters, allowing for a polynomial modulus and modulus-to-error ratio. In particular, a smaller modulus gives us greater efficiency, and a smaller modulus-to-error ratio gives us greater security, which now follows from the worst-case hardness of GapSVP with polynomial (rather than super-polynomial) approximation factors. As a tool in the reduction, we show that there is a “lossy mode” for the LWR problem, in which LWR samples only reveal partial information about the secret. This property gives us several interesting new applications, including a proof that LWR remains secure with weakly random secrets of sufficient min-entropy, and very simple constructions of deterministic encryption, lossy trapdoor functions and reusable extractors. Our approach is inspired by a technique of Goldwasser et al. from ICS ’10, which implicitly showed the existence of a “lossy mode” for LWE. By refining this technique, we also improve on the parameters of that work to only requiring a polynomial (instead of super-polynomial) modulus and modulus-to-error ratio. }, author = {Alwen, Joel F and Krenn, Stephan and Pietrzak, Krzysztof Z and Wichs, Daniel}, location = {Santa Barbara, CA, United States}, number = {1}, pages = {57 -- 74}, publisher = {Springer}, title = {{Learning with rounding, revisited: New reduction properties and applications}}, doi = {10.1007/978-3-642-40041-4_4}, volume = {8042}, year = {2013}, } @inproceedings{2258, abstract = {In a digital signature scheme with message recovery, rather than transmitting the message m and its signature σ, a single enhanced signature τ is transmitted. The verifier is able to recover m from τ and at the same time verify its authenticity. The two most important parameters of such a scheme are its security and overhead |τ| − |m|. A simple argument shows that for any scheme with “n bits security” |τ| − |m| ≥ n, i.e., the overhead is lower bounded by the security parameter n. Currently, the best known constructions in the random oracle model are far from this lower bound requiring an overhead of n + logq h , where q h is the number of queries to the random oracle. In this paper we give a construction which basically matches the n bit lower bound. We propose a simple digital signature scheme with n + o(logq h ) bits overhead, where q h denotes the number of random oracle queries. Our construction works in two steps. First, we propose a signature scheme with message recovery having optimal overhead in a new ideal model, the random invertible function model. Second, we show that a four-round Feistel network with random oracles as round functions is tightly “public-indifferentiable” from a random invertible function. At the core of our indifferentiability proof is an almost tight upper bound for the expected number of edges of the densest “small” subgraph of a random Cayley graph, which may be of independent interest. }, author = {Kiltz, Eike and Pietrzak, Krzysztof Z and Szegedy, Mario}, location = {Santa Barbara, CA, United States}, pages = {571 -- 588}, publisher = {Springer}, title = {{Digital signatures with minimal overhead from indifferentiable random invertible functions}}, doi = {10.1007/978-3-642-40041-4_31}, volume = {8042}, year = {2013}, } @inproceedings{2260, abstract = {Direct Anonymous Attestation (DAA) is one of the most complex cryptographic protocols deployed in practice. It allows an embedded secure processor known as a Trusted Platform Module (TPM) to attest to the configuration of its host computer without violating the owner’s privacy. DAA has been standardized by the Trusted Computing Group and ISO/IEC. The security of the DAA standard and all existing schemes is analyzed in the random-oracle model. We provide the first constructions of DAA in the standard model, that is, without relying on random oracles. Our constructions use new building blocks, including the first efficient signatures of knowledge in the standard model, which have many applications beyond DAA. }, author = {Bernhard, David and Fuchsbauer, Georg and Ghadafi, Essam}, location = {Banff, AB, Canada}, pages = {518 -- 533}, publisher = {Springer}, title = {{Efficient signatures of knowledge and DAA in the standard model}}, doi = {10.1007/978-3-642-38980-1_33}, volume = {7954}, year = {2013}, } @inproceedings{2291, abstract = {Cryptographic access control promises to offer easily distributed trust and broader applicability, while reducing reliance on low-level online monitors. Traditional implementations of cryptographic access control rely on simple cryptographic primitives whereas recent endeavors employ primitives with richer functionality and security guarantees. Worryingly, few of the existing cryptographic access-control schemes come with precise guarantees, the gap between the policy specification and the implementation being analyzed only informally, if at all. In this paper we begin addressing this shortcoming. Unlike prior work that targeted ad-hoc policy specification, we look at the well-established Role-Based Access Control (RBAC) model, as used in a typical file system. In short, we provide a precise syntax for a computational version of RBAC, offer rigorous definitions for cryptographic policy enforcement of a large class of RBAC security policies, and demonstrate that an implementation based on attribute-based encryption meets our security notions. We view our main contribution as being at the conceptual level. Although we work with RBAC for concreteness, our general methodology could guide future research for uses of cryptography in other access-control models. }, author = {Ferrara, Anna and Fuchsbauer, Georg and Warinschi, Bogdan}, location = {New Orleans, LA, United States}, pages = {115 -- 129}, publisher = {IEEE}, title = {{Cryptographically enforced RBAC}}, doi = {10.1109/CSF.2013.15}, year = {2013}, } @inproceedings{2940, abstract = {A chain rule for an entropy notion H(.) states that the entropy H(X) of a variable X decreases by at most l if conditioned on an l-bit string A, i.e., H(X|A)>= H(X)-l. More generally, it satisfies a chain rule for conditional entropy if H(X|Y,A)>= H(X|Y)-l. All natural information theoretic entropy notions we are aware of (like Shannon or min-entropy) satisfy some kind of chain rule for conditional entropy. Moreover, many computational entropy notions (like Yao entropy, unpredictability entropy and several variants of HILL entropy) satisfy the chain rule for conditional entropy, though here not only the quantity decreases by l, but also the quality of the entropy decreases exponentially in l. However, for the standard notion of conditional HILL entropy (the computational equivalent of min-entropy) the existence of such a rule was unknown so far. In this paper, we prove that for conditional HILL entropy no meaningful chain rule exists, assuming the existence of one-way permutations: there exist distributions X,Y,A, where A is a distribution over a single bit, but $H(X|Y)>>H(X|Y,A)$, even if we simultaneously allow for a massive degradation in the quality of the entropy. The idea underlying our construction is based on a surprising connection between the chain rule for HILL entropy and deniable encryption. }, author = {Krenn, Stephan and Pietrzak, Krzysztof Z and Wadia, Akshay}, editor = {Sahai, Amit}, location = {Tokyo, Japan}, pages = {23 -- 39}, publisher = {Springer}, title = {{A counterexample to the chain rule for conditional HILL entropy, and what deniable encryption has to do with it}}, doi = {10.1007/978-3-642-36594-2_2}, volume = {7785}, year = {2013}, } @article{502, abstract = {Blind signatures allow users to obtain signatures on messages hidden from the signer; moreover, the signer cannot link the resulting message/signature pair to the signing session. This paper presents blind signature schemes, in which the number of interactions between the user and the signer is minimal and whose blind signatures are short. Our schemes are defined over bilinear groups and are proved secure in the common-reference-string model without random oracles and under standard assumptions: CDH and the decision-linear assumption. (We also give variants over asymmetric groups based on similar assumptions.) The blind signatures are Waters signatures, which consist of 2 group elements. Moreover, we instantiate partially blind signatures, where the message consists of a part hidden from the signer and a commonly known public part, and schemes achieving perfect blindness. We propose new variants of blind signatures, such as signer-friendly partially blind signatures, where the public part can be chosen by the signer without prior agreement, 3-party blind signatures, as well as blind signatures on multiple aggregated messages provided by independent sources. We also extend Waters signatures to non-binary alphabets by proving a new result on the underlying hash function. }, author = {Blazy, Olivier and Fuchsbauer, Georg and Pointcheval, David and Vergnaud, Damien}, journal = {Journal of Computer Security}, number = {5}, pages = {627 -- 661}, publisher = {IOS Press}, title = {{Short blind signatures}}, doi = {10.3233/JCS-130477}, volume = {21}, year = {2013}, } @techreport{2274, abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto'92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs -- so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model, using graphs with high "pebbling complexity" and Merkle hash-trees. }, author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z}, publisher = {IST Austria}, title = {{Proofs of Space}}, year = {2013}, } @inproceedings{2048, abstract = {Leakage resilient cryptography attempts to incorporate side-channel leakage into the black-box security model and designs cryptographic schemes that are provably secure within it. Informally, a scheme is leakage-resilient if it remains secure even if an adversary learns a bounded amount of arbitrary information about the schemes internal state. Unfortunately, most leakage resilient schemes are unnecessarily complicated in order to achieve strong provable security guarantees. As advocated by Yu et al. [CCS’10], this mostly is an artefact of the security proof and in practice much simpler construction may already suffice to protect against realistic side-channel attacks. In this paper, we show that indeed for simpler constructions leakage-resilience can be obtained when we aim for relaxed security notions where the leakage-functions and/or the inputs to the primitive are chosen non-adaptively. For example, we show that a three round Feistel network instantiated with a leakage resilient PRF yields a leakage resilient PRP if the inputs are chosen non-adaptively (This complements the result of Dodis and Pietrzak [CRYPTO’10] who show that if a adaptive queries are allowed, a superlogarithmic number of rounds is necessary.) We also show that a minor variation of the classical GGM construction gives a leakage resilient PRF if both, the leakage-function and the inputs, are chosen non-adaptively.}, author = {Faust, Sebastian and Pietrzak, Krzysztof Z and Schipper, Joachim}, booktitle = { Conference proceedings CHES 2012}, location = {Leuven, Belgium}, pages = {213 -- 232}, publisher = {Springer}, title = {{Practical leakage-resilient symmetric cryptography}}, doi = {10.1007/978-3-642-33027-8_13}, volume = {7428}, year = {2012}, } @inproceedings{2049, abstract = {We propose a new authentication protocol that is provably secure based on a ring variant of the learning parity with noise (LPN) problem. The protocol follows the design principle of the LPN-based protocol from Eurocrypt’11 (Kiltz et al.), and like it, is a two round protocol secure against active attacks. Moreover, our protocol has small communication complexity and a very small footprint which makes it applicable in scenarios that involve low-cost, resource-constrained devices. Performance-wise, our protocol is more efficient than previous LPN-based schemes, such as the many variants of the Hopper-Blum (HB) protocol and the aforementioned protocol from Eurocrypt’11. Our implementation results show that it is even comparable to the standard challenge-and-response protocols based on the AES block-cipher. Our basic protocol is roughly 20 times slower than AES, but with the advantage of having 10 times smaller code size. Furthermore, if a few hundred bytes of non-volatile memory are available to allow the storage of some off-line pre-computations, then the online phase of our protocols is only twice as slow as AES. }, author = {Heyse, Stefan and Kiltz, Eike and Lyubashevsky, Vadim and Paar, Christof and Pietrzak, Krzysztof Z}, booktitle = { Conference proceedings FSE 2012}, location = {Washington, DC, USA}, pages = {346 -- 365}, publisher = {Springer}, title = {{Lapin: An efficient authentication protocol based on ring-LPN}}, doi = {10.1007/978-3-642-34047-5_20}, volume = {7549}, year = {2012}, } @inproceedings{2937, abstract = {Developers building cryptography into security-sensitive applications face a daunting task. Not only must they understand the security guarantees delivered by the constructions they choose, they must also implement and combine them correctly and efficiently. Cryptographic compilers free developers from this task by turning high-level specifications of security goals into efficient implementations. Yet, trusting such tools is hard as they rely on complex mathematical machinery and claim security properties that are subtle and difficult to verify. In this paper we present ZKCrypt, an optimizing cryptographic compiler achieving an unprecedented level of assurance without sacrificing practicality for a comprehensive class of cryptographic protocols, known as Zero-Knowledge Proofs of Knowledge. The pipeline of ZKCrypt integrates purpose-built verified compilers and verifying compilers producing formal proofs in the CertiCrypt framework. By combining the guarantees delivered by each stage, ZKCrypt provides assurance that the output implementation securely realizes the abstract proof goal given as input. We report on the main characteristics of ZKCrypt, highlight new definitions and concepts at its foundations, and illustrate its applicability through a representative example of an anonymous credential system.}, author = {Almeida, José and Barbosa, Manuel and Bangerter, Endre and Barthe, Gilles and Krenn, Stephan and Béguelin, Santiago}, booktitle = {Proceedings of the 2012 ACM conference on Computer and communications security}, location = {Raleigh, NC, USA}, pages = {488 -- 500}, publisher = {ACM}, title = {{Full proof cryptography: Verifiable compilation of efficient zero-knowledge protocols}}, doi = {10.1145/2382196.2382249}, year = {2012}, } @inproceedings{2974, abstract = {We construct a perfectly binding string commitment scheme whose security is based on the learning parity with noise (LPN) assumption, or equivalently, the hardness of decoding random linear codes. Our scheme not only allows for a simple and efficient zero-knowledge proof of knowledge for committed values (essentially a Σ-protocol), but also for such proofs showing any kind of relation amongst committed values, i.e. proving that messages m_0,...,m_u, are such that m_0=C(m_1,...,m_u) for any circuit C. To get soundness which is exponentially small in a security parameter t, and when the zero-knowledge property relies on the LPN problem with secrets of length l, our 3 round protocol has communication complexity O(t|C|l log(l)) and computational complexity of O(t|C|l) bit operations. The hidden constants are small, and the computation consists mostly of computing inner products of bit-vectors.}, author = {Jain, Abhishek and Krenn, Stephan and Pietrzak, Krzysztof Z and Tentes, Aris}, editor = {Wang, Xiaoyun and Sako, Kazue}, location = {Beijing, China}, pages = {663 -- 680}, publisher = {Springer}, title = {{Commitments and efficient zero knowledge proofs from learning parity with noise}}, doi = {10.1007/978-3-642-34961-4_40}, volume = {7658}, year = {2012}, } @inproceedings{3250, abstract = {The Learning Parity with Noise (LPN) problem has recently found many applications in cryptography as the hardness assumption underlying the constructions of "provably secure" cryptographic schemes like encryption or authentication protocols. Being provably secure means that the scheme comes with a proof showing that the existence of an efficient adversary against the scheme implies that the underlying hardness assumption is wrong. LPN based schemes are appealing for theoretical and practical reasons. On the theoretical side, LPN based schemes offer a very strong security guarantee. The LPN problem is equivalent to the problem of decoding random linear codes, a problem that has been extensively studied in the last half century. The fastest known algorithms run in exponential time and unlike most number-theoretic problems used in cryptography, the LPN problem does not succumb to known quantum algorithms. On the practical side, LPN based schemes are often extremely simple and efficient in terms of code-size as well as time and space requirements. This makes them prime candidates for light-weight devices like RFID tags, which are too weak to implement standard cryptographic primitives like the AES block-cipher. This talk will be a gentle introduction to provable security using simple LPN based schemes as examples. Starting from pseudorandom generators and symmetric key encryption, over secret-key authentication protocols, and, if time admits, touching on recent constructions of public-key identification, commitments and zero-knowledge proofs.}, author = {Pietrzak, Krzysztof Z}, location = {Špindlerův Mlýn, Czech Republic}, pages = {99 -- 114}, publisher = {Springer}, title = {{Cryptography from learning parity with noise}}, doi = {10.1007/978-3-642-27660-6_9}, volume = {7147}, year = {2012}, } @inproceedings{3282, abstract = {Traditionally, symmetric-key message authentication codes (MACs) are easily built from pseudorandom functions (PRFs). In this work we propose a wide variety of other approaches to building efficient MACs, without going through a PRF first. In particular, unlike deterministic PRF-based MACs, where each message has a unique valid tag, we give a number of probabilistic MAC constructions from various other primitives/assumptions. Our main results are summarized as follows: We show several new probabilistic MAC constructions from a variety of general assumptions, including CCA-secure encryption, Hash Proof Systems and key-homomorphic weak PRFs. By instantiating these frameworks under concrete number theoretic assumptions, we get several schemes which are more efficient than just using a state-of-the-art PRF instantiation under the corresponding assumption. For probabilistic MACs, unlike deterministic ones, unforgeability against a chosen message attack (uf-cma ) alone does not imply security if the adversary can additionally make verification queries (uf-cmva ). We give an efficient generic transformation from any uf-cma secure MAC which is "message-hiding" into a uf-cmva secure MAC. This resolves the main open problem of Kiltz et al. from Eurocrypt'11; By using our transformation on their constructions, we get the first efficient MACs from the LPN assumption. While all our new MAC constructions immediately give efficient actively secure, two-round symmetric-key identification schemes, we also show a very simple, three-round actively secure identification protocol from any weak PRF. In particular, the resulting protocol is much more efficient than the trivial approach of building a regular PRF from a weak PRF. © 2012 International Association for Cryptologic Research.}, author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Kiltz, Eike and Wichs, Daniel}, location = {Cambridge, UK}, pages = {355 -- 374}, publisher = {Springer}, title = {{Message authentication, revisited}}, doi = {10.1007/978-3-642-29011-4_22}, volume = {7237}, year = {2012}, } @inproceedings{3280, abstract = {The (decisional) learning with errors problem (LWE) asks to distinguish "noisy" inner products of a secret vector with random vectors from uniform. The learning parities with noise problem (LPN) is the special case where the elements of the vectors are bits. In recent years, the LWE and LPN problems have found many applications in cryptography. In this paper we introduce a (seemingly) much stronger adaptive assumption, called "subspace LWE" (SLWE), where the adversary can learn the inner product of the secret and random vectors after they were projected into an adaptively and adversarially chosen subspace. We prove that, surprisingly, the SLWE problem mapping into subspaces of dimension d is almost as hard as LWE using secrets of length d (the other direction is trivial.) This result immediately implies that several existing cryptosystems whose security is based on the hardness of the LWE/LPN problems are provably secure in a much stronger sense than anticipated. As an illustrative example we show that the standard way of using LPN for symmetric CPA secure encryption is even secure against a very powerful class of related key attacks. }, author = {Pietrzak, Krzysztof Z}, location = {Taormina, Sicily, Italy}, pages = {548 -- 563}, publisher = {Springer}, title = {{Subspace LWE}}, doi = {10.1007/978-3-642-28914-9_31}, volume = {7194}, year = {2012}, } @inproceedings{3281, abstract = {We consider the problem of amplifying the "lossiness" of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic "lossy functions," where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.}, author = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil}, location = {Taormina, Sicily, Italy}, pages = {458 -- 475}, publisher = {Springer}, title = {{Lossy functions do not amplify well}}, doi = {10.1007/978-3-642-28914-9_26}, volume = {7194}, year = {2012}, } @inproceedings{3279, abstract = {We show a hardness-preserving construction of a PRF from any length doubling PRG which improves upon known constructions whenever we can put a non-trivial upper bound q on the number of queries to the PRF. Our construction requires only O(logq) invocations to the underlying PRG with each query. In comparison, the number of invocations by the best previous hardness-preserving construction (GGM using Levin's trick) is logarithmic in the hardness of the PRG. For example, starting from an exponentially secure PRG {0,1} n → {0,1} 2n, we get a PRF which is exponentially secure if queried at most q = exp(√n)times and where each invocation of the PRF requires Θ(√n) queries to the underlying PRG. This is much less than the Θ(n) required by known constructions. }, author = {Jain, Abhishek and Pietrzak, Krzysztof Z and Tentes, Aris}, location = {Taormina, Sicily, Italy}, pages = {369 -- 382}, publisher = {Springer}, title = {{Hardness preserving constructions of pseudorandom functions}}, doi = {10.1007/978-3-642-28914-9_21}, volume = {7194}, year = {2012}, }