@inproceedings{1653,
abstract = {A somewhere statistically binding (SSB) hash, introduced by Hubáček and Wichs (ITCS ’15), can be used to hash a long string x to a short digest y = H hk (x) using a public hashing-key hk. Furthermore, there is a way to set up the hash key hk to make it statistically binding on some arbitrary hidden position i, meaning that: (1) the digest y completely determines the i’th bit (or symbol) of x so that all pre-images of y have the same value in the i’th position, (2) it is computationally infeasible to distinguish the position i on which hk is statistically binding from any other position i’. Lastly, the hash should have a local opening property analogous to Merkle-Tree hashing, meaning that given x and y = H hk (x) it should be possible to create a short proof π that certifies the value of the i’th bit (or symbol) of x without having to provide the entire input x. A similar primitive called a positional accumulator, introduced by Koppula, Lewko and Waters (STOC ’15) further supports dynamic updates of the hashed value. These tools, which are interesting in their own right, also serve as one of the main technical components in several recent works building advanced applications from indistinguishability obfuscation (iO).
The prior constructions of SSB hashing and positional accumulators required fully homomorphic encryption (FHE) and iO respectively. In this work, we give new constructions of these tools based on well studied number-theoretic assumptions such as DDH, Phi-Hiding and DCR, as well as a general construction from lossy/injective functions.},
author = {Okamoto, Tatsuaki and Pietrzak, Krzysztof Z and Waters, Brent and Wichs, Daniel},
location = {Auckland, New Zealand},
pages = {121 -- 145},
publisher = {Springer},
title = {{New realizations of somewhere statistically binding hashing and positional accumulators}},
doi = {10.1007/978-3-662-48797-6_6},
volume = {9452},
year = {2016},
}
@inproceedings{1225,
abstract = {At Crypto 2015 Fuchsbauer, Hanser and Slamanig (FHS) presented the first standard-model construction of efficient roundoptimal blind signatures that does not require complexity leveraging. It is conceptually simple and builds on the primitive of structure-preserving signatures on equivalence classes (SPS-EQ). FHS prove the unforgeability of their scheme assuming EUF-CMA security of the SPS-EQ scheme and hardness of a version of the DH inversion problem. Blindness under adversarially chosen keys is proven under an interactive variant of the DDH assumption. We propose a variant of their scheme whose blindness can be proven under a non-interactive assumption, namely a variant of the bilinear DDH assumption. We moreover prove its unforgeability assuming only unforgeability of the underlying SPS-EQ but no additional assumptions as needed for the FHS scheme.},
author = {Fuchsbauer, Georg and Hanser, Christian and Kamath Hosdurg, Chethan and Slamanig, Daniel},
location = {Amalfi, Italy},
pages = {391 -- 408},
publisher = {Springer},
title = {{Practical round-optimal blind signatures in the standard model from weaker assumptions}},
doi = {10.1007/978-3-319-44618-9_21},
volume = {9841},
year = {2016},
}
@inproceedings{1229,
abstract = {Witness encryption (WE) was introduced by Garg et al. [GGSW13]. A WE scheme is defined for some NP language L and lets a sender encrypt messages relative to instances x. A ciphertext for x can be decrypted using w witnessing x ∈ L, but hides the message if x ∈ L. Garg et al. construct WE from multilinear maps and give another construction [GGH+13b] using indistinguishability obfuscation (iO) for circuits. Due to the reliance on such heavy tools, WE can cur- rently hardly be implemented on powerful hardware and will unlikely be realizable on constrained devices like smart cards any time soon. We construct a WE scheme where encryption is done by simply computing a Naor-Yung ciphertext (two CPA encryptions and a NIZK proof). To achieve this, our scheme has a setup phase, which outputs public parameters containing an obfuscated circuit (only required for decryption), two encryption keys and a common reference string (used for encryption). This setup need only be run once, and the parame- ters can be used for arbitrary many encryptions. Our scheme can also be turned into a functional WE scheme, where a message is encrypted w.r.t. a statement and a function f, and decryption with a witness w yields f (m, w). Our construction is inspired by the functional encryption scheme by Garg et al. and we prove (selective) security assuming iO and statistically simulation-sound NIZK. We give a construction of the latter in bilinear groups and combining it with ElGamal encryption, our ciphertexts are of size 1.3 kB at a 128-bit security level and can be computed on a smart card.},
author = {Abusalah, Hamza M and Fuchsbauer, Georg and Pietrzak, Krzysztof Z},
location = {Guildford, UK},
pages = {285 -- 303},
publisher = {Springer},
title = {{Offline witness encryption}},
doi = {10.1007/978-3-319-39555-5_16},
volume = {9696},
year = {2016},
}
@inproceedings{1231,
abstract = {We study the time-and memory-complexities of the problem of computing labels of (multiple) randomly selected challenge-nodes in a directed acyclic graph. The w-bit label of a node is the hash of the labels of its parents, and the hash function is modeled as a random oracle. Specific instances of this problem underlie both proofs of space [Dziembowski et al. CRYPTO’15] as well as popular memory-hard functions like scrypt. As our main tool, we introduce the new notion of a probabilistic parallel entangled pebbling game, a new type of combinatorial pebbling game on a graph, which is closely related to the labeling game on the same graph. As a first application of our framework, we prove that for scrypt, when the underlying hash function is invoked n times, the cumulative memory complexity (CMC) (a notion recently introduced by Alwen and Serbinenko (STOC’15) to capture amortized memory-hardness for parallel adversaries) is at least Ω(w · (n/ log(n))2). This bound holds for adversaries that can store many natural functions of the labels (e.g., linear combinations), but still not arbitrary functions thereof. We then introduce and study a combinatorial quantity, and show how a sufficiently small upper bound on it (which we conjecture) extends our CMC bound for scrypt to hold against arbitrary adversaries. We also show that such an upper bound solves the main open problem for proofs-of-space protocols: namely, establishing that the time complexity of computing the label of a random node in a graph on n nodes (given an initial kw-bit state) reduces tightly to the time complexity for black pebbling on the same graph (given an initial k-node pebbling).},
author = {Alwen, Joel F and Chen, Binyi and Kamath Hosdurg, Chethan and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z and Tessaro, Stefano},
location = {Vienna, Austria},
pages = {358 -- 387},
publisher = {Springer},
title = {{On the complexity of scrypt and proofs of space in the parallel random oracle model}},
doi = {10.1007/978-3-662-49896-5_13},
volume = {9666},
year = {2016},
}
@inproceedings{1233,
abstract = {About three decades ago it was realized that implementing private channels between parties which can be adaptively corrupted requires an encryption scheme that is secure against selective opening attacks. Whether standard (IND-CPA) security implies security against selective opening attacks has been a major open question since. The only known reduction from selective opening to IND-CPA security loses an exponential factor. A polynomial reduction is only known for the very special case where the distribution considered in the selective opening security experiment is a product distribution, i.e., the messages are sampled independently from each other. In this paper we give a reduction whose loss is quantified via the dependence graph (where message dependencies correspond to edges) of the underlying message distribution. In particular, for some concrete distributions including Markov distributions, our reduction is polynomial.},
author = {Fuchsbauer, Georg and Heuer, Felix and Kiltz, Eike and Pietrzak, Krzysztof Z},
location = {Tel Aviv, Israel},
pages = {282 -- 305},
publisher = {Springer},
title = {{Standard security does imply security against selective opening for markov distributions}},
doi = {10.1007/978-3-662-49096-9_12},
volume = {9562},
year = {2016},
}
@inproceedings{1235,
abstract = {A constrained pseudorandom function (CPRF) F: K×X → Y for a family T of subsets of χ is a function where for any key k ∈ K and set S ∈ T one can efficiently compute a short constrained key kS, which allows to evaluate F(k, ·) on all inputs x ∈ S, while the outputs on all inputs x /∈ S look random even given kS. Abusalah et al. recently constructed the first constrained PRF for inputs of arbitrary length whose sets S are decided by Turing machines. They use their CPRF to build broadcast encryption and the first ID-based non-interactive key exchange for an unbounded number of users. Their constrained keys are obfuscated circuits and are therefore large. In this work we drastically reduce the key size and define a constrained key for a Turing machine M as a short signature on M. For this, we introduce a new signature primitive with constrained signing keys that let one only sign certain messages, while forging a signature on others is hard even when knowing the coins for key generation.},
author = {Abusalah, Hamza M and Fuchsbauer, Georg},
location = {Guildford, UK},
pages = {445 -- 463},
publisher = {Springer},
title = {{Constrained PRFs for unbounded inputs with short keys}},
doi = {10.1007/978-3-319-39555-5_24},
volume = {9696},
year = {2016},
}
@inproceedings{1236,
abstract = {A constrained pseudorandom function F: K × X → Y for a family T ⊆ 2X of subsets of X is a function where for any key k ∈ K and set S ∈ T one can efficiently compute a constrained key kS which allows to evaluate F (k, ·) on all inputs x ∈ S, while even given this key, the outputs on all inputs x ∉ S look random. At Asiacrypt’13 Boneh and Waters gave a construction which supports the most general set family so far. Its keys kc are defined for sets decided by boolean circuits C and enable evaluation of the PRF on any x ∈ X where C(x) = 1. In their construction the PRF input length and the size of the circuits C for which constrained keys can be computed must be fixed beforehand during key generation. We construct a constrained PRF that has an unbounded input length and whose constrained keys can be defined for any set recognized by a Turing machine. The only a priori bound we make is on the description size of the machines. We prove our construction secure assuming publiccoin differing-input obfuscation. As applications of our constrained PRF we build a broadcast encryption scheme where the number of potential receivers need not be fixed at setup (in particular, the length of the keys is independent of the number of parties) and the first identity-based non-interactive key exchange protocol with no bound on the number of parties that can agree on a shared key.},
author = {Abusalah, Hamza M and Fuchsbauer, Georg and Pietrzak, Krzysztof Z},
location = {San Francisco, CA, USA},
pages = {413 -- 428},
publisher = {Springer},
title = {{Constrained PRFs for unbounded inputs}},
doi = {10.1007/978-3-319-29485-8_24},
volume = {9610},
year = {2016},
}
@inproceedings{1365,
abstract = {A memory-hard function (MHF) f is equipped with a space cost σ and time cost τ parameter such that repeatedly computing fσ,τ on an application specific integrated circuit (ASIC) is not economically advantageous relative to a general purpose computer. Technically we would like that any (generalized) circuit for evaluating an iMHF fσ,τ has area × time (AT) complexity at Θ(σ2 ∗ τ). A data-independent MHF (iMHF) has the added property that it can be computed with almost optimal memory and time complexity by an algorithm which accesses memory in a pattern independent of the input value. Such functions can be specified by fixing a directed acyclic graph (DAG) G on n = Θ(σ ∗ τ) nodes representing its computation graph. In this work we develop new tools for analyzing iMHFs. First we define and motivate a new complexity measure capturing the amount of energy (i.e. electricity) required to compute a function. We argue that, in practice, this measure is at least as important as the more traditional AT-complexity. Next we describe an algorithm A for repeatedly evaluating an iMHF based on an arbitrary DAG G. We upperbound both its energy and AT complexities per instance evaluated in terms of a certain combinatorial property of G. Next we instantiate our attack for several general classes of DAGs which include those underlying many of the most important iMHF candidates in the literature. In particular, we obtain the following results which hold for all choices of parameters σ and τ (and thread-count) such that n = σ ∗ τ. -The Catena-Dragonfly function of [FLW13] has AT and energy complexities O(n1.67). -The Catena-Butterfly function of [FLW13] has complexities is O(n1.67). -The Double-Buffer and the Linear functions of [CGBS16] both have complexities in O(n1.67). -The Argon2i function of [BDK15] (winner of the Password Hashing Competition [PHC]) has complexities O(n7/4 log(n)). -The Single-Buffer function of [CGBS16] has complexities O(n7/4 log(n)). -Any iMHF can be computed by an algorithm with complexities O(n2/ log1 −ε(n)) for all ε > 0. In particular when τ = 1 this shows that the goal of constructing an iMHF with AT-complexity Θ(σ2 ∗ τ ) is unachievable. Along the way we prove a lemma upper-bounding the depth-robustness of any DAG which may prove to be of independent interest.},
author = {Alwen, Joel F and Blocki, Jeremiah},
location = {Santa Barbara, CA, USA},
pages = {241 -- 271},
publisher = {Springer},
title = {{Efficiently computing data-independent memory-hard functions}},
doi = {10.1007/978-3-662-53008-5_9},
volume = {9815},
year = {2016},
}
@inproceedings{1366,
abstract = {We study the problem of devising provably secure PRNGs with input based on the sponge paradigm. Such constructions are very appealing, as efficient software/hardware implementations of SHA-3 can easily be translated into a PRNG in a nearly black-box way. The only existing sponge-based construction, proposed by Bertoni et al. (CHES 2010), fails to achieve the security notion of robustness recently considered by Dodis et al. (CCS 2013), for two reasons: (1) The construction is deterministic, and thus there are high-entropy input distributions on which the construction fails to extract random bits, and (2) The construction is not forward secure, and presented solutions aiming at restoring forward security have not been rigorously analyzed. We propose a seeded variant of Bertoni et al.’s PRNG with input which we prove secure in the sense of robustness, delivering in particular concrete security bounds. On the way, we make what we believe to be an important conceptual contribution, developing a variant of the security framework of Dodis et al. tailored at the ideal permutation model that captures PRNG security in settings where the weakly random inputs are provided from a large class of possible adversarial samplers which are also allowed to query the random permutation. As a further application of our techniques, we also present an efficient sponge-based key-derivation function (which can be instantiated from SHA-3 in a black-box fashion), which we also prove secure when fed with samples from permutation-dependent distributions.},
author = {Gazi, Peter and Tessaro, Stefano},
location = {Vienna, Austria},
pages = {87 -- 116},
publisher = {Springer},
title = {{Provably robust sponge-based PRNGs and KDFs}},
doi = {10.1007/978-3-662-49890-3_4},
volume = {9665},
year = {2016},
}
@article{1479,
abstract = {Most entropy notions H(.) like Shannon or min-entropy satisfy a chain rule stating that for random variables X,Z, and A we have H(X|Z,A)≥H(X|Z)−|A|. That is, by conditioning on A the entropy of X can decrease by at most the bitlength |A| of A. Such chain rules are known to hold for some computational entropy notions like Yao’s and unpredictability-entropy. For HILL entropy, the computational analogue of min-entropy, the chain rule is of special interest and has found many applications, including leakage-resilient cryptography, deterministic encryption, and memory delegation. These applications rely on restricted special cases of the chain rule. Whether the chain rule for conditional HILL entropy holds in general was an open problem for which we give a strong negative answer: we construct joint distributions (X,Z,A), where A is a distribution over a single bit, such that the HILL entropy H HILL (X|Z) is large but H HILL (X|Z,A) is basically zero.
Our counterexample just makes the minimal assumption that NP⊈P/poly. Under the stronger assumption that injective one-way function exist, we can make all the distributions efficiently samplable.
Finally, we show that some more sophisticated cryptographic objects like lossy functions can be used to sample a distribution constituting a counterexample to the chain rule making only a single invocation to the underlying object.},
author = {Krenn, Stephan and Pietrzak, Krzysztof Z and Wadia, Akshay and Wichs, Daniel},
journal = {Computational Complexity},
number = {3},
pages = {567 -- 605},
publisher = {Springer},
title = {{A counterexample to the chain rule for conditional HILL entropy}},
doi = {10.1007/s00037-015-0120-9},
volume = {25},
year = {2016},
}
@article{1177,
abstract = {Boldyreva, Palacio and Warinschi introduced a multiple forking game as an extension of general forking. The notion of (multiple) forking is a useful abstraction from the actual simulation of cryptographic scheme to the adversary in a security reduction, and is achieved through the intermediary of a so-called wrapper algorithm. Multiple forking has turned out to be a useful tool in the security argument of several cryptographic protocols. However, a reduction employing multiple forking incurs a significant degradation of (Formula presented.) , where (Formula presented.) denotes the upper bound on the underlying random oracle calls and (Formula presented.) , the number of forkings. In this work we take a closer look at the reasons for the degradation with a tighter security bound in mind. We nail down the exact set of conditions for success in the multiple forking game. A careful analysis of the cryptographic schemes and corresponding security reduction employing multiple forking leads to the formulation of ‘dependence’ and ‘independence’ conditions pertaining to the output of the wrapper in different rounds. Based on the (in)dependence conditions we propose a general framework of multiple forking and a General Multiple Forking Lemma. Leveraging (in)dependence to the full allows us to improve the degradation factor in the multiple forking game by a factor of (Formula presented.). By implication, the cost of a single forking involving two random oracles (augmented forking) matches that involving a single random oracle (elementary forking). Finally, we study the effect of these observations on the concrete security of existing schemes employing multiple forking. We conclude that by careful design of the protocol (and the wrapper in the security reduction) it is possible to harness our observations to the full extent.},
author = {Kamath Hosdurg, Chethan and Chatterjee, Sanjit},
journal = {Algorithmica},
number = {4},
pages = {1321 -- 1362},
publisher = {Springer},
title = {{A closer look at multiple-forking: Leveraging (in)dependence for a tighter bound}},
doi = {10.1007/s00453-015-9997-6},
volume = {74},
year = {2016},
}
@inproceedings{1179,
abstract = {Computational notions of entropy have recently found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The two main types of results which make computational notions so useful are (1) Chain rules, which quantify by how much the computational entropy of a variable decreases if conditioned on some other variable (2) Transformations, which quantify to which extend one type of entropy implies another.
Such chain rules and transformations typically lose a significant amount in quality of the entropy, and are the reason why applying these results one gets rather weak quantitative security bounds. In this paper we for the first time prove lower bounds in this context, showing that existing results for transformations are, unfortunately, basically optimal for non-adaptive black-box reductions (and it’s hard to imagine how non black-box reductions or adaptivity could be useful here.)
A variable X has k bits of HILL entropy of quality (ϵ,s)
if there exists a variable Y with k bits min-entropy which cannot be distinguished from X with advantage ϵ
by distinguishing circuits of size s. A weaker notion is Metric entropy, where we switch quantifiers, and only require that for every distinguisher of size s, such a Y exists.
We first describe our result concerning transformations. By definition, HILL implies Metric without any loss in quality. Metric entropy often comes up in applications, but must be transformed to HILL for meaningful security guarantees. The best known result states that if a variable X has k bits of Metric entropy of quality (ϵ,s)
, then it has k bits of HILL with quality (2ϵ,s⋅ϵ2). We show that this loss of a factor Ω(ϵ−2)
in circuit size is necessary. In fact, we show the stronger result that this loss is already necessary when transforming so called deterministic real valued Metric entropy to randomised boolean Metric (both these variants of Metric entropy are implied by HILL without loss in quality).
The chain rule for HILL entropy states that if X has k bits of HILL entropy of quality (ϵ,s)
, then for any variable Z of length m, X conditioned on Z has k−m bits of HILL entropy with quality (ϵ,s⋅ϵ2/2m). We show that a loss of Ω(2m/ϵ) in circuit size necessary here. Note that this still leaves a gap of ϵ between the known bound and our lower bound.},
author = {Pietrzak, Krzysztof Z and Maciej, Skorski},
location = {Beijing, China},
pages = {183 -- 203},
publisher = {Springer},
title = {{Pseudoentropy: Lower-bounds for chain rules and transformations}},
doi = {10.1007/978-3-662-53641-4_8},
volume = {9985},
year = {2016},
}
@inproceedings{1644,
abstract = {Increasing the computational complexity of evaluating a hash function, both for the honest users as well as for an adversary, is a useful technique employed for example in password-based cryptographic schemes to impede brute-force attacks, and also in so-called proofs of work (used in protocols like Bitcoin) to show that a certain amount of computation was performed by a legitimate user. A natural approach to adjust the complexity of a hash function is to iterate it c times, for some parameter c, in the hope that any query to the scheme requires c evaluations of the underlying hash function. However, results by Dodis et al. (Crypto 2012) imply that plain iteration falls short of achieving this goal, and designing schemes which provably have such a desirable property remained an open problem. This paper formalizes explicitly what it means for a given scheme to amplify the query complexity of a hash function. In the random oracle model, the goal of a secure query-complexity amplifier (QCA) scheme is captured as transforming, in the sense of indifferentiability, a random oracle allowing R queries (for the adversary) into one provably allowing only r < R queries. Turned around, this means that making r queries to the scheme requires at least R queries to the actual random oracle. Second, a new scheme, called collision-free iteration, is proposed and proven to achieve c-fold QCA for both the honest parties and the adversary, for any fixed parameter c.},
author = {Demay, Grégory and Gazi, Peter and Maurer, Ueli and Tackmann, Björn},
location = {Lugano, Switzerland},
pages = {159 -- 180},
publisher = {Springer},
title = {{Query-complexity amplification for random oracles}},
doi = {10.1007/978-3-319-17470-9_10},
volume = {9063},
year = {2015},
}
@inproceedings{1645,
abstract = {Secret-key constructions are often proved secure in a model where one or more underlying components are replaced by an idealized oracle accessible to the attacker. This model gives rise to information-theoretic security analyses, and several advances have been made in this area over the last few years. This paper provides a systematic overview of what is achievable in this model, and how existing works fit into this view.},
author = {Gazi, Peter and Tessaro, Stefano},
booktitle = {2015 IEEE Information Theory Workshop},
location = {Jerusalem, Israel},
publisher = {IEEE},
title = {{Secret-key cryptography from ideal primitives: A systematic verview}},
doi = {10.1109/ITW.2015.7133163},
year = {2015},
}
@inproceedings{1646,
abstract = {A pseudorandom function (PRF) is a keyed function F : K × X → Y where, for a random key k ∈ K, the function F(k, ·) is indistinguishable from a uniformly random function, given black-box access. A key-homomorphic PRF has the additional feature that for any keys k, k' and any input x, we have F(k+k', x) = F(k, x)⊕F(k', x) for some group operations +,⊕ on K and Y, respectively. A constrained PRF for a family of setsS ⊆ P(X) has the property that, given any key k and set S ∈ S, one can efficiently compute a “constrained” key kS that enables evaluation of F(k, x) on all inputs x ∈ S, while the values F(k, x) for x /∈ S remain pseudorandom even given kS. In this paper we construct PRFs that are simultaneously constrained and key homomorphic, where the homomorphic property holds even for constrained keys. We first show that the multilinear map-based bit-fixing and circuit-constrained PRFs of Boneh and Waters (Asiacrypt 2013) can be modified to also be keyhomomorphic. We then show that the LWE-based key-homomorphic PRFs of Banerjee and Peikert (Crypto 2014) are essentially already prefix-constrained PRFs, using a (non-obvious) definition of constrained keys and associated group operation. Moreover, the constrained keys themselves are pseudorandom, and the constraining and evaluation functions can all be computed in low depth. As an application of key-homomorphic constrained PRFs,we construct a proxy re-encryption schemewith fine-grained access control. This scheme allows storing encrypted data on an untrusted server, where each file can be encrypted relative to some attributes, so that only parties whose constrained keys match the attributes can decrypt. Moreover, the server can re-key (arbitrary subsets of) the ciphertexts without learning anything about the plaintexts, thus permitting efficient and finegrained revocation.},
author = {Banerjee, Abishek and Fuchsbauer, Georg and Peikert, Chris and Pietrzak, Krzysztof Z and Stevens, Sophie},
location = {Warsaw, Poland},
pages = {31 -- 60},
publisher = {Springer},
title = {{Key-homomorphic constrained pseudorandom functions}},
doi = {10.1007/978-3-662-46497-7_2},
volume = {9015},
year = {2015},
}
@inproceedings{1647,
abstract = {Round-optimal blind signatures are notoriously hard to construct in the standard model, especially in the malicious-signer model, where blindness must hold under adversarially chosen keys. This is substantiated by several impossibility results. The only construction that can be termed theoretically efficient, by Garg and Gupta (Eurocrypt’14), requires complexity leveraging, inducing an exponential security loss. We present a construction of practically efficient round-optimal blind signatures in the standard model. It is conceptually simple and builds on the recent structure-preserving signatures on equivalence classes (SPSEQ) from Asiacrypt’14. While the traditional notion of blindness follows from standard assumptions, we prove blindness under adversarially chosen keys under an interactive variant of DDH. However, we neither require non-uniform assumptions nor complexity leveraging. We then show how to extend our construction to partially blind signatures and to blind signatures on message vectors, which yield a construction of one-show anonymous credentials à la “anonymous credentials light” (CCS’13) in the standard model. Furthermore, we give the first SPS-EQ construction under noninteractive assumptions and show how SPS-EQ schemes imply conventional structure-preserving signatures, which allows us to apply optimality results for the latter to SPS-EQ.},
author = {Fuchsbauer, Georg and Hanser, Christian and Slamanig, Daniel},
location = {Santa Barbara, CA, United States},
pages = {233 -- 253},
publisher = {Springer},
title = {{Practical round-optimal blind signatures in the standard model}},
doi = {10.1007/978-3-662-48000-7_12},
volume = {9216},
year = {2015},
}
@inproceedings{1648,
abstract = {Generalized Selective Decryption (GSD), introduced by Panjwani [TCC’07], is a game for a symmetric encryption scheme Enc that captures the difficulty of proving adaptive security of certain protocols, most notably the Logical Key Hierarchy (LKH) multicast encryption protocol. In the GSD game there are n keys k1,..., kn, which the adversary may adaptively corrupt (learn); moreover, it can ask for encryptions Encki (kj) of keys under other keys. The adversary’s task is to distinguish keys (which it cannot trivially compute) from random. Proving the hardness of GSD assuming only IND-CPA security of Enc is surprisingly hard. Using “complexity leveraging” loses a factor exponential in n, which makes the proof practically meaningless. We can think of the GSD game as building a graph on n vertices, where we add an edge i → j when the adversary asks for an encryption of kj under ki. If restricted to graphs of depth ℓ, Panjwani gave a reduction that loses only a factor exponential in ℓ (not n). To date, this is the only non-trivial result known for GSD. In this paper we give almost-polynomial reductions for large classes of graphs. Most importantly, we prove the security of the GSD game restricted to trees losing only a quasi-polynomial factor n3 log n+5. Trees are an important special case capturing real-world protocols like the LKH protocol. Our new bound improves upon Panjwani’s on some LKH variants proposed in the literature where the underlying tree is not balanced. Our proof builds on ideas from the “nested hybrids” technique recently introduced by Fuchsbauer et al. [Asiacrypt’14] for proving the adaptive security of constrained PRFs.},
author = {Fuchsbauer, Georg and Jafargholi, Zahra and Pietrzak, Krzysztof Z},
location = {Santa Barbara, CA, USA},
pages = {601 -- 620},
publisher = {Springer},
title = {{A quasipolynomial reduction for generalized selective decryption on trees}},
doi = {10.1007/978-3-662-47989-6_29},
volume = {9215},
year = {2015},
}
@inproceedings{1649,
abstract = {We extend a commitment scheme based on the learning with errors over rings (RLWE) problem, and present efficient companion zeroknowledge proofs of knowledge. Our scheme maps elements from the ring (or equivalently, n elements from },
author = {Benhamouda, Fabrice and Krenn, Stephan and Lyubashevsky, Vadim and Pietrzak, Krzysztof Z},
location = {Vienna, Austria},
pages = {305 -- 325},
publisher = {Springer},
title = {{Efficient zero-knowledge proofs for commitments from learning with errors over rings}},
doi = {10.1007/978-3-319-24174-6_16},
volume = {9326},
year = {2015},
}
@inproceedings{1650,
abstract = {We consider the task of deriving a key with high HILL entropy (i.e., being computationally indistinguishable from a key with high min-entropy) from an unpredictable source.
Previous to this work, the only known way to transform unpredictability into a key that was ϵ indistinguishable from having min-entropy was via pseudorandomness, for example by Goldreich-Levin (GL) hardcore bits. This approach has the inherent limitation that from a source with k bits of unpredictability entropy one can derive a key of length (and thus HILL entropy) at most k−2log(1/ϵ) bits. In many settings, e.g. when dealing with biometric data, such a 2log(1/ϵ) bit entropy loss in not an option. Our main technical contribution is a theorem that states that in the high entropy regime, unpredictability implies HILL entropy. Concretely, any variable K with |K|−d bits of unpredictability entropy has the same amount of so called metric entropy (against real-valued, deterministic distinguishers), which is known to imply the same amount of HILL entropy. The loss in circuit size in this argument is exponential in the entropy gap d, and thus this result only applies for small d (i.e., where the size of distinguishers considered is exponential in d).
To overcome the above restriction, we investigate if it’s possible to first “condense” unpredictability entropy and make the entropy gap small. We show that any source with k bits of unpredictability can be condensed into a source of length k with k−3 bits of unpredictability entropy. Our condenser simply “abuses" the GL construction and derives a k bit key from a source with k bits of unpredicatibily. The original GL theorem implies nothing when extracting that many bits, but we show that in this regime, GL still behaves like a “condenser" for unpredictability. This result comes with two caveats (1) the loss in circuit size is exponential in k and (2) we require that the source we start with has no HILL entropy (equivalently, one can efficiently check if a guess is correct). We leave it as an intriguing open problem to overcome these restrictions or to prove they’re inherent.},
author = {Skórski, Maciej and Golovnev, Alexander and Pietrzak, Krzysztof Z},
location = {Kyoto, Japan},
pages = {1046 -- 1057},
publisher = {Springer},
title = {{Condensed unpredictability }},
doi = {10.1007/978-3-662-47672-7_85},
volume = {9134},
year = {2015},
}
@inproceedings{1651,
abstract = {Cryptographic e-cash allows off-line electronic transactions between a bank, users and merchants in a secure and anonymous fashion. A plethora of e-cash constructions has been proposed in the literature; however, these traditional e-cash schemes only allow coins to be transferred once between users and merchants. Ideally, we would like users to be able to transfer coins between each other multiple times before deposit, as happens with physical cash. “Transferable” e-cash schemes are the solution to this problem. Unfortunately, the currently proposed schemes are either completely impractical or do not achieve the desirable anonymity properties without compromises, such as assuming the existence of a trusted “judge” who can trace all coins and users in the system. This paper presents the first efficient and fully anonymous transferable e-cash scheme without any trusted third parties. We start by revising the security and anonymity properties of transferable e-cash to capture issues that were previously overlooked. For our construction we use the recently proposed malleable signatures by Chase et al. to allow the secure and anonymous transfer of coins, combined with a new efficient double-spending detection mechanism. Finally, we discuss an instantiation of our construction.},
author = {Baldimtsi, Foteini and Chase, Melissa and Fuchsbauer, Georg and Kohlweiss, Markulf},
location = {Gaithersburg, MD, USA},
pages = {101 -- 124},
publisher = {Springer},
title = {{Anonymous transferable e-cash}},
doi = {10.1007/978-3-662-46447-2_5},
volume = {9020},
year = {2015},
}