@inproceedings{650, abstract = {In this work we present a short and unified proof for the Strong and Weak Regularity Lemma, based on the cryptographic tech-nique called low-complexity approximations. In short, both problems reduce to a task of finding constructively an approximation for a certain target function under a class of distinguishers (test functions), where dis-tinguishers are combinations of simple rectangle-indicators. In our case these approximations can be learned by a simple iterative procedure, which yields a unified and simple proof, achieving for any graph with density d and any approximation parameter the partition size. The novelty in our proof is: (a) a simple approach which yields both strong and weaker variant, and (b) improvements when d = o(1). At an abstract level, our proof can be seen a refinement and simplification of the “analytic” proof given by Lovasz and Szegedy.}, author = {Skórski, Maciej}, editor = {Jäger, Gerhard and Steila, Silvia}, issn = {03029743}, location = {Bern, Switzerland}, pages = {586 -- 599}, publisher = {Springer}, title = {{A cryptographic view of regularity lemmas: Simpler unified proofs and refined bounds}}, doi = {10.1007/978-3-319-55911-7_42}, volume = {10185}, year = {2017}, } @inproceedings{6527, abstract = {A memory-hard function (MHF) ƒn with parameter n can be computed in sequential time and space n. Simultaneously, a high amortized parallel area-time complexity (aAT) is incurred per evaluation. In practice, MHFs are used to limit the rate at which an adversary (using a custom computational device) can evaluate a security sensitive function that still occasionally needs to be evaluated by honest users (using an off-the-shelf general purpose device). The most prevalent examples of such sensitive functions are Key Derivation Functions (KDFs) and password hashing algorithms where rate limits help mitigate off-line dictionary attacks. As the honest users' inputs to these functions are often (low-entropy) passwords special attention is given to a class of side-channel resistant MHFs called iMHFs. Essentially all iMHFs can be viewed as some mode of operation (making n calls to some round function) given by a directed acyclic graph (DAG) with very low indegree. Recently, a combinatorial property of a DAG has been identified (called "depth-robustness") which results in good provable security for an iMHF based on that DAG. Depth-robust DAGs have also proven useful in other cryptographic applications. Unfortunately, up till now, all known very depth-robust DAGs are impractically complicated and little is known about their exact (i.e. non-asymptotic) depth-robustness both in theory and in practice. In this work we build and analyze (both formally and empirically) several exceedingly simple and efficient to navigate practical DAGs for use in iMHFs and other applications. For each DAG we: *Prove that their depth-robustness is asymptotically maximal. *Prove bounds of at least 3 orders of magnitude better on their exact depth-robustness compared to known bounds for other practical iMHF. *Implement and empirically evaluate their depth-robustness and aAT against a variety of state-of-the art (and several new) depth-reduction and low aAT attacks. We find that, against all attacks, the new DAGs perform significantly better in practice than Argon2i, the most widely deployed iMHF in practice. Along the way we also improve the best known empirical attacks on the aAT of Argon2i by implementing and testing several heuristic versions of a (hitherto purely theoretical) depth-reduction attack. Finally, we demonstrate practicality of our constructions by modifying the Argon2i code base to use one of the new high aAT DAGs. Experimental benchmarks on a standard off-the-shelf CPU show that the new modifications do not adversely affect the impressive throughput of Argon2i (despite seemingly enjoying significantly higher aAT). }, author = {Alwen, Joel F and Blocki, Jeremiah and Harsha, Ben}, booktitle = {Proceedings of the 2017 ACM SIGSAC Conference on Computer and Communications Security}, isbn = {9781450349468}, location = {Dallas, TX, USA}, pages = {1001--1017}, publisher = {ACM Press}, title = {{Practical graphs for optimal side-channel resistant memory-hard functions}}, doi = {10.1145/3133956.3134031}, year = {2017}, } @inproceedings{6526, abstract = {This paper studies the complexity of estimating Rényi divergences of discrete distributions: p observed from samples and the baseline distribution q known a priori. Extending the results of Acharya et al. (SODA'15) on estimating Rényi entropy, we present improved estimation techniques together with upper and lower bounds on the sample complexity. We show that, contrarily to estimating Rényi entropy where a sublinear (in the alphabet size) number of samples suffices, the sample complexity is heavily dependent on events occurring unlikely in q, and is unbounded in general (no matter what an estimation technique is used). For any divergence of integer order bigger than 1, we provide upper and lower bounds on the number of samples dependent on probabilities of p and q (the lower bounds hold for non-integer orders as well). We conclude that the worst-case sample complexity is polynomial in the alphabet size if and only if the probabilities of q are non-negligible. This gives theoretical insights into heuristics used in the applied literature to handle numerical instability, which occurs for small probabilities of q. Our result shows that they should be handled with care not only because of numerical issues, but also because of a blow up in the sample complexity.}, author = {Skórski, Maciej}, booktitle = {2017 IEEE International Symposium on Information Theory (ISIT)}, isbn = {9781509040964}, location = {Aachen, Germany}, publisher = {IEEE}, title = {{On the complexity of estimating Rènyi divergences}}, doi = {10.1109/isit.2017.8006529}, year = {2017}, } @inproceedings{697, abstract = {De, Trevisan and Tulsiani [CRYPTO 2010] show that every distribution over n-bit strings which has constant statistical distance to uniform (e.g., the output of a pseudorandom generator mapping n-1 to n bit strings), can be distinguished from the uniform distribution with advantage epsilon by a circuit of size O( 2^n epsilon^2). We generalize this result, showing that a distribution which has less than k bits of min-entropy, can be distinguished from any distribution with k bits of delta-smooth min-entropy with advantage epsilon by a circuit of size O(2^k epsilon^2/delta^2). As a special case, this implies that any distribution with support at most 2^k (e.g., the output of a pseudoentropy generator mapping k to n bit strings) can be distinguished from any given distribution with min-entropy k+1 with advantage epsilon by a circuit of size O(2^k epsilon^2). Our result thus shows that pseudoentropy distributions face basically the same non-uniform attacks as pseudorandom distributions. }, author = {Pietrzak, Krzysztof Z and Skórski, Maciej}, issn = {18688969}, location = {Warsaw, Poland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Non uniform attacks against pseudoentropy}}, doi = {10.4230/LIPIcs.ICALP.2017.39}, volume = {80}, year = {2017}, } @inproceedings{710, abstract = {We revisit the problem of estimating entropy of discrete distributions from independent samples, studied recently by Acharya, Orlitsky, Suresh and Tyagi (SODA 2015), improving their upper and lower bounds on the necessary sample size n. For estimating Renyi entropy of order alpha, up to constant accuracy and error probability, we show the following * Upper bounds n = O(1) 2^{(1-1/alpha)H_alpha} for integer alpha>1, as the worst case over distributions with Renyi entropy equal to H_alpha. * Lower bounds n = Omega(1) K^{1-1/alpha} for any real alpha>1, with the constant being an inverse polynomial of the accuracy, as the worst case over all distributions on K elements. Our upper bounds essentially replace the alphabet size by a factor exponential in the entropy, which offers improvements especially in low or medium entropy regimes (interesting for example in anomaly detection). As for the lower bounds, our proof explicitly shows how the complexity depends on both alphabet and accuracy, partially solving the open problem posted in previous works. The argument for upper bounds derives a clean identity for the variance of falling-power sum of a multinomial distribution. Our approach for lower bounds utilizes convex optimization to find a distribution with possibly worse estimation performance, and may be of independent interest as a tool to work with Le Cam’s two point method. }, author = {Obremski, Maciej and Skórski, Maciej}, issn = {18688969}, location = {Berkeley, USA}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Renyi entropy estimation revisited}}, doi = {10.4230/LIPIcs.APPROX-RANDOM.2017.20}, volume = {81}, year = {2017}, }