@phdthesis{83, abstract = {A proof system is a protocol between a prover and a verifier over a common input in which an honest prover convinces the verifier of the validity of true statements. Motivated by the success of decentralized cryptocurrencies, exemplified by Bitcoin, the focus of this thesis will be on proof systems which found applications in some sustainable alternatives to Bitcoin, such as the Spacemint and Chia cryptocurrencies. In particular, we focus on proofs of space and proofs of sequential work. Proofs of space (PoSpace) were suggested as more ecological, economical, and egalitarian alternative to the energy-wasteful proof-of-work mining of Bitcoin. However, the state-of-the-art constructions of PoSpace are based on sophisticated graph pebbling lower bounds, and are therefore complex. Moreover, when these PoSpace are used in cryptocurrencies like Spacemint, miners can only start mining after ensuring that a commitment to their space is already added in a special transaction to the blockchain. Proofs of sequential work (PoSW) are proof systems in which a prover, upon receiving a statement x and a time parameter T, computes a proof which convinces the verifier that T time units had passed since x was received. Whereas Spacemint assumes synchrony to retain some interesting Bitcoin dynamics, Chia requires PoSW with unique proofs, i.e., PoSW in which it is hard to come up with more than one accepting proof for any true statement. In this thesis we construct simple and practically-efficient PoSpace and PoSW. When using our PoSpace in cryptocurrencies, miners can start mining on the fly, like in Bitcoin, and unlike current constructions of PoSW, which either achieve efficient verification of sequential work, or faster-than-recomputing verification of correctness of proofs, but not both at the same time, ours achieve the best of these two worlds.}, author = {Abusalah, Hamza M}, issn = {2663-337X}, pages = {59}, publisher = {Institute of Science and Technology Austria}, title = {{Proof systems for sustainable decentralized cryptocurrencies}}, doi = {10.15479/AT:ISTA:TH_1046}, year = {2018}, } @inproceedings{108, abstract = {Universal hashing found a lot of applications in computer science. In cryptography the most important fact about universal families is the so called Leftover Hash Lemma, proved by Impagliazzo, Levin and Luby. In the language of modern cryptography it states that almost universal families are good extractors. In this work we provide a somewhat surprising characterization in the opposite direction. Namely, every extractor with sufficiently good parameters yields a universal family on a noticeable fraction of its inputs. Our proof technique is based on tools from extremal graph theory applied to the \'collision graph\' induced by the extractor, and may be of independent interest. We discuss possible applications to the theory of randomness extractors and non-malleable codes.}, author = {Obremski, Marciej and Skorski, Maciej}, location = {Vail, CO, USA}, publisher = {IEEE}, title = {{Inverted leftover hash lemma}}, doi = {10.1109/ISIT.2018.8437654}, volume = {2018}, year = {2018}, } @article{107, abstract = {We introduce the notion of “non-malleable codes” which relaxes the notion of error correction and error detection. Informally, a code is non-malleable if the message contained in a modified codeword is either the original message, or a completely unrelated value. In contrast to error correction and error detection, non-malleability can be achieved for very rich classes of modifications. We construct an efficient code that is non-malleable with respect to modifications that affect each bit of the codeword arbitrarily (i.e., leave it untouched, flip it, or set it to either 0 or 1), but independently of the value of the other bits of the codeword. Using the probabilistic method, we also show a very strong and general statement: there exists a non-malleable code for every “small enough” family F of functions via which codewords can be modified. Although this probabilistic method argument does not directly yield efficient constructions, it gives us efficient non-malleable codes in the random-oracle model for very general classes of tampering functions—e.g., functions where every bit in the tampered codeword can depend arbitrarily on any 99% of the bits in the original codeword. As an application of non-malleable codes, we show that they provide an elegant algorithmic solution to the task of protecting functionalities implemented in hardware (e.g., signature cards) against “tampering attacks.” In such attacks, the secret state of a physical system is tampered, in the hopes that future interaction with the modified system will reveal some secret information. This problem was previously studied in the work of Gennaro et al. in 2004 under the name “algorithmic tamper proof security” (ATP). We show that non-malleable codes can be used to achieve important improvements over the prior work. In particular, we show that any functionality can be made secure against a large class of tampering attacks, simply by encoding the secret state with a non-malleable code while it is stored in memory.}, author = {Dziembowski, Stefan and Pietrzak, Krzysztof Z and Wichs, Daniel}, journal = {Journal of the ACM}, number = {4}, publisher = {ACM}, title = {{Non-malleable codes}}, doi = {10.1145/3178432}, volume = {65}, year = {2018}, } @inproceedings{193, abstract = {We show attacks on five data-independent memory-hard functions (iMHF) that were submitted to the password hashing competition (PHC). Informally, an MHF is a function which cannot be evaluated on dedicated hardware, like ASICs, at significantly lower hardware and/or energy cost than evaluating a single instance on a standard single-core architecture. Data-independent means the memory access pattern of the function is independent of the input; this makes iMHFs harder to construct than data-dependent ones, but the latter can be attacked by various side-channel attacks. Following [Alwen-Blocki'16], we capture the evaluation of an iMHF as a directed acyclic graph (DAG). The cumulative parallel pebbling complexity of this DAG is a measure for the hardware cost of evaluating the iMHF on an ASIC. Ideally, one would like the complexity of a DAG underlying an iMHF to be as close to quadratic in the number of nodes of the graph as possible. Instead, we show that (the DAGs underlying) the following iMHFs are far from this bound: Rig.v2, TwoCats and Gambit each having an exponent no more than 1.75. Moreover, we show that the complexity of the iMHF modes of the PHC finalists Pomelo and Lyra2 have exponents at most 1.83 and 1.67 respectively. To show this we investigate a combinatorial property of each underlying DAG (called its depth-robustness. By establishing upper bounds on this property we are then able to apply the general technique of [Alwen-Block'16] for analyzing the hardware costs of an iMHF.}, author = {Alwen, Joel F and Gazi, Peter and Kamath Hosdurg, Chethan and Klein, Karen and Osang, Georg F and Pietrzak, Krzysztof Z and Reyzin, Lenoid and Rolinek, Michal and Rybar, Michal}, booktitle = {Proceedings of the 2018 on Asia Conference on Computer and Communication Security}, location = {Incheon, Republic of Korea}, pages = {51 -- 65}, publisher = {ACM}, title = {{On the memory hardness of data independent password hashing functions}}, doi = {10.1145/3196494.3196534}, year = {2018}, } @inproceedings{300, abstract = {We introduce a formal quantitative notion of “bit security” for a general type of cryptographic games (capturing both decision and search problems), aimed at capturing the intuition that a cryptographic primitive with k-bit security is as hard to break as an ideal cryptographic function requiring a brute force attack on a k-bit key space. Our new definition matches the notion of bit security commonly used by cryptographers and cryptanalysts when studying search (e.g., key recovery) problems, where the use of the traditional definition is well established. However, it produces a quantitatively different metric in the case of decision (indistinguishability) problems, where the use of (a straightforward generalization of) the traditional definition is more problematic and leads to a number of paradoxical situations or mismatches between theoretical/provable security and practical/common sense intuition. Key to our new definition is to consider adversaries that may explicitly declare failure of the attack. We support and justify the new definition by proving a number of technical results, including tight reductions between several standard cryptographic problems, a new hybrid theorem that preserves bit security, and an application to the security analysis of indistinguishability primitives making use of (approximate) floating point numbers. This is the first result showing that (standard precision) 53-bit floating point numbers can be used to achieve 100-bit security in the context of cryptographic primitives with general indistinguishability-based security definitions. Previous results of this type applied only to search problems, or special types of decision problems.}, author = {Micciancio, Daniele and Walter, Michael}, location = {Tel Aviv, Israel}, pages = {3 -- 28}, publisher = {Springer}, title = {{On the bit security of cryptographic primitives}}, doi = {10.1007/978-3-319-78381-9_1}, volume = {10820}, year = {2018}, }