@inproceedings{14888, abstract = {A face in a curve arrangement is called popular if it is bounded by the same curve multiple times. Motivated by the automatic generation of curved nonogram puzzles, we investigate possibilities to eliminate the popular faces in an arrangement by inserting a single additional curve. This turns out to be NP-hard; however, it becomes tractable when the number of popular faces is small: We present a probabilistic FPT-approach in the number of popular faces.}, author = {De Nooijer, Phoebe and Terziadis, Soeren and Weinberger, Alexandra and Masárová, Zuzana and Mchedlidze, Tamara and Löffler, Maarten and Rote, Günter}, booktitle = {31st International Symposium on Graph Drawing and Network Visualization}, isbn = {9783031492747}, issn = {1611-3349}, location = {Isola delle Femmine, Palermo, Italy}, pages = {18--33}, publisher = {Springer Nature}, title = {{Removing popular faces in curve arrangements}}, doi = {10.1007/978-3-031-49275-4_2}, volume = {14466}, year = {2024}, } @inproceedings{12854, abstract = {The main idea behind BUBAAK is to run multiple program analyses in parallel and use runtime monitoring and enforcement to observe and control their progress in real time. The analyses send information about (un)explored states of the program and discovered invariants to a monitor. The monitor processes the received data and can force an analysis to stop the search of certain program parts (which have already been analyzed by other analyses), or to make it utilize a program invariant found by another analysis. At SV-COMP 2023, the implementation of data exchange between the monitor and the analyses was not yet completed, which is why BUBAAK only ran several analyses in parallel, without any coordination. Still, BUBAAK won the meta-category FalsificationOverall and placed very well in several other (sub)-categories of the competition.}, author = {Chalupa, Marek and Henzinger, Thomas A}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783031308192}, issn = {1611-3349}, location = {Paris, France}, pages = {535--540}, publisher = {Springer Nature}, title = {{Bubaak: Runtime monitoring of program verifiers}}, doi = {10.1007/978-3-031-30820-8_32}, volume = {13994}, year = {2023}, } @inproceedings{12856, abstract = {As the complexity and criticality of software increase every year, so does the importance of run-time monitoring. Third-party monitoring, with limited knowledge of the monitored software, and best-effort monitoring, which keeps pace with the monitored software, are especially valuable, yet underexplored areas of run-time monitoring. Most existing monitoring frameworks do not support their combination because they either require access to the monitored code for instrumentation purposes or the processing of all observed events, or both. We present a middleware framework, VAMOS, for the run-time monitoring of software which is explicitly designed to support third-party and best-effort scenarios. The design goals of VAMOS are (i) efficiency (keeping pace at low overhead), (ii) flexibility (the ability to monitor black-box code through a variety of different event channels, and the connectability to monitors written in different specification languages), and (iii) ease-of-use. To achieve its goals, VAMOS combines aspects of event broker and event recognition systems with aspects of stream processing systems. We implemented a prototype toolchain for VAMOS and conducted experiments including a case study of monitoring for data races. The results indicate that VAMOS enables writing useful yet efficient monitors, is compatible with a variety of event sources and monitor specifications, and simplifies key aspects of setting up a monitoring system from scratch.}, author = {Chalupa, Marek and Mühlböck, Fabian and Muroya Lei, Stefanie and Henzinger, Thomas A}, booktitle = {Fundamental Approaches to Software Engineering}, isbn = {9783031308253}, issn = {1611-3349}, location = {Paris, France}, pages = {260--281}, publisher = {Springer Nature}, title = {{Vamos: Middleware for best-effort third-party monitoring}}, doi = {10.1007/978-3-031-30826-0_15}, volume = {13991}, year = {2023}, } @inproceedings{13143, abstract = {GIMPS and PrimeGrid are large-scale distributed projects dedicated to searching giant prime numbers, usually of special forms like Mersenne and Proth primes. The numbers in the current search-space are millions of digits large and the participating volunteers need to run resource-consuming primality tests. Once a candidate prime N has been found, the only way for another party to independently verify the primality of N used to be by repeating the expensive primality test. To avoid the need for second recomputation of each primality test, these projects have recently adopted certifying mechanisms that enable efficient verification of performed tests. However, the mechanisms presently in place only detect benign errors and there is no guarantee against adversarial behavior: a malicious volunteer can mislead the project to reject a giant prime as being non-prime. In this paper, we propose a practical, cryptographically-sound mechanism for certifying the non-primality of Proth numbers. That is, a volunteer can – parallel to running the primality test for N – generate an efficiently verifiable proof at a little extra cost certifying that N is not prime. The interactive protocol has statistical soundness and can be made non-interactive using the Fiat-Shamir heuristic. Our approach is based on a cryptographic primitive called Proof of Exponentiation (PoE) which, for a group G, certifies that a tuple (x,y,T)∈G2×N satisfies x2T=y (Pietrzak, ITCS 2019 and Wesolowski, J. Cryptol. 2020). In particular, we show how to adapt Pietrzak’s PoE at a moderate additional cost to make it a cryptographically-sound certificate of non-primality.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Pietrzak, Krzysztof Z}, booktitle = {Public-Key Cryptography - PKC 2023}, isbn = {9783031313677}, issn = {1611-3349}, location = {Atlanta, GA, United States}, pages = {530--553}, publisher = {Springer Nature}, title = {{Certifying giant nonprimes}}, doi = {10.1007/978-3-031-31368-4_19}, volume = {13940}, year = {2023}, } @inproceedings{13142, abstract = {Reinforcement learning has received much attention for learning controllers of deterministic systems. We consider a learner-verifier framework for stochastic control systems and survey recent methods that formally guarantee a conjunction of reachability and safety properties. Given a property and a lower bound on the probability of the property being satisfied, our framework jointly learns a control policy and a formal certificate to ensure the satisfaction of the property with a desired probability threshold. Both the control policy and the formal certificate are continuous functions from states to reals, which are learned as parameterized neural networks. While in the deterministic case, the certificates are invariant and barrier functions for safety, or Lyapunov and ranking functions for liveness, in the stochastic case the certificates are supermartingales. For certificate verification, we use interval arithmetic abstract interpretation to bound the expected values of neural network functions.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems }, isbn = {9783031308222}, issn = {1611-3349}, location = {Paris, France}, pages = {3--25}, publisher = {Springer Nature}, title = {{A learner-verifier framework for neural network controllers and certificates of stochastic systems}}, doi = {10.1007/978-3-031-30823-9_1}, volume = {13993}, year = {2023}, } @inproceedings{13141, abstract = {We automatically compute a new class of environment assumptions in two-player turn-based finite graph games which characterize an “adequate cooperation” needed from the environment to allow the system player to win. Given an ω-regular winning condition Φ for the system player, we compute an ω-regular assumption Ψ for the environment player, such that (i) every environment strategy compliant with Ψ allows the system to fulfill Φ (sufficiency), (ii) Ψ can be fulfilled by the environment for every strategy of the system (implementability), and (iii) Ψ does not prevent any cooperative strategy choice (permissiveness). For parity games, which are canonical representations of ω-regular games, we present a polynomial-time algorithm for the symbolic computation of adequately permissive assumptions and show that our algorithm runs faster and produces better assumptions than existing approaches—both theoretically and empirically. To the best of our knowledge, for ω -regular games, we provide the first algorithm to compute sufficient and implementable environment assumptions that are also permissive.}, author = {Anand, Ashwani and Mallik, Kaushik and Nayak, Satya Prakash and Schmuck, Anne Kathrin}, booktitle = {TACAS 2023: Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783031308192}, issn = {1611-3349}, location = {Paris, France}, pages = {211--228}, publisher = {Springer Nature}, title = {{Computing adequately permissive assumptions for synthesis}}, doi = {10.1007/978-3-031-30820-8_15}, volume = {13994}, year = {2023}, } @inproceedings{12467, abstract = {Safety and liveness are elementary concepts of computation, and the foundation of many verification paradigms. The safety-liveness classification of boolean properties characterizes whether a given property can be falsified by observing a finite prefix of an infinite computation trace (always for safety, never for liveness). In quantitative specification and verification, properties assign not truth values, but quantitative values to infinite traces (e.g., a cost, or the distance to a boolean property). We introduce quantitative safety and liveness, and we prove that our definitions induce conservative quantitative generalizations of both (1)~the safety-progress hierarchy of boolean properties and (2)~the safety-liveness decomposition of boolean properties. In particular, we show that every quantitative property can be written as the pointwise minimum of a quantitative safety property and a quantitative liveness property. Consequently, like boolean properties, also quantitative properties can be min-decomposed into safety and liveness parts, or alternatively, max-decomposed into co-safety and co-liveness parts. Moreover, quantitative properties can be approximated naturally. We prove that every quantitative property that has both safe and co-safe approximations can be monitored arbitrarily precisely by a monitor that uses only a finite number of states.}, author = {Henzinger, Thomas A and Mazzocchi, Nicolas Adrien and Sarac, Naci E}, booktitle = {26th International Conference Foundations of Software Science and Computation Structures}, isbn = {9783031308284}, issn = {1611-3349}, location = {Paris, France}, pages = {349--370}, publisher = {Springer Nature}, title = {{Quantitative safety and liveness}}, doi = {10.1007/978-3-031-30829-1_17}, volume = {13992}, year = {2023}, } @inproceedings{13310, abstract = {Machine-learned systems are in widespread use for making decisions about humans, and it is important that they are fair, i.e., not biased against individuals based on sensitive attributes. We present runtime verification of algorithmic fairness for systems whose models are unknown, but are assumed to have a Markov chain structure. We introduce a specification language that can model many common algorithmic fairness properties, such as demographic parity, equal opportunity, and social burden. We build monitors that observe a long sequence of events as generated by a given system, and output, after each observation, a quantitative estimate of how fair or biased the system was on that run until that point in time. The estimate is proven to be correct modulo a variable error bound and a given confidence level, where the error bound gets tighter as the observed sequence gets longer. Our monitors are of two types, and use, respectively, frequentist and Bayesian statistical inference techniques. While the frequentist monitors compute estimates that are objectively correct with respect to the ground truth, the Bayesian monitors compute estimates that are correct subject to a given prior belief about the system’s model. Using a prototype implementation, we show how we can monitor if a bank is fair in giving loans to applicants from different social backgrounds, and if a college is fair in admitting students while maintaining a reasonable financial burden on the society. Although they exhibit different theoretical complexities in certain cases, in our experiments, both frequentist and Bayesian monitors took less than a millisecond to update their verdicts after each observation.}, author = {Henzinger, Thomas A and Karimi, Mahyar and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {Computer Aided Verification}, isbn = {9783031377020}, issn = {1611-3349}, location = {Paris, France}, pages = {358–382}, publisher = {Springer Nature}, title = {{Monitoring algorithmic fairness}}, doi = {10.1007/978-3-031-37703-7_17}, volume = {13965}, year = {2023}, } @inproceedings{14259, abstract = {We provide a learning-based technique for guessing a winning strategy in a parity game originating from an LTL synthesis problem. A cheaply obtained guess can be useful in several applications. Not only can the guessed strategy be applied as best-effort in cases where the game’s huge size prohibits rigorous approaches, but it can also increase the scalability of rigorous LTL synthesis in several ways. Firstly, checking whether a guessed strategy is winning is easier than constructing one. Secondly, even if the guess is wrong in some places, it can be fixed by strategy iteration faster than constructing one from scratch. Thirdly, the guess can be used in on-the-fly approaches to prioritize exploration in the most fruitful directions. In contrast to previous works, we (i) reflect the highly structured logical information in game’s states, the so-called semantic labelling, coming from the recent LTL-to-automata translations, and (ii) learn to reflect it properly by learning from previously solved games, bringing the solving process closer to human-like reasoning.}, author = {Kretinsky, Jan and Meggendorfer, Tobias and Prokop, Maximilian and Rieder, Sabine}, booktitle = {35th International Conference on Computer Aided Verification }, isbn = {9783031377051}, issn = {1611-3349}, location = {Paris, France}, pages = {390--414}, publisher = {Springer Nature}, title = {{Guessing winning policies in LTL synthesis by semantic learning}}, doi = {10.1007/978-3-031-37706-8_20}, volume = {13964}, year = {2023}, } @inproceedings{14318, abstract = {Probabilistic recurrence relations (PRRs) are a standard formalism for describing the runtime of a randomized algorithm. Given a PRR and a time limit κ, we consider the tail probability Pr[T≥κ], i.e., the probability that the randomized runtime T of the PRR exceeds κ. Our focus is the formal analysis of tail bounds that aims at finding a tight asymptotic upper bound u≥Pr[T≥κ]. To address this problem, the classical and most well-known approach is the cookbook method by Karp (JACM 1994), while other approaches are mostly limited to deriving tail bounds of specific PRRs via involved custom analysis. In this work, we propose a novel approach for deriving the common exponentially-decreasing tail bounds for PRRs whose preprocessing time and random passed sizes observe discrete or (piecewise) uniform distribution and whose recursive call is either a single procedure call or a divide-and-conquer. We first establish a theoretical approach via Markov’s inequality, and then instantiate the theoretical approach with a template-based algorithmic approach via a refined treatment of exponentiation. Experimental evaluation shows that our algorithmic approach is capable of deriving tail bounds that are (i) asymptotically tighter than Karp’s method, (ii) match the best-known manually-derived asymptotic tail bound for QuickSelect, and (iii) is only slightly worse (with a loglogn factor) than the manually-proven optimal asymptotic tail bound for QuickSort. Moreover, our algorithmic approach handles all examples (including realistic PRRs such as QuickSort, QuickSelect, DiameterComputation, etc.) in less than 0.1 s, showing that our approach is efficient in practice.}, author = {Sun, Yican and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar}, booktitle = {Computer Aided Verification}, isbn = {9783031377082}, issn = {1611-3349}, location = {Paris, France}, pages = {16--39}, publisher = {Springer Nature}, title = {{Automated tail bound analysis for probabilistic recurrence relations}}, doi = {10.1007/978-3-031-37709-9_2}, volume = {13966}, year = {2023}, } @inproceedings{14317, abstract = {Markov decision processes can be viewed as transformers of probability distributions. While this view is useful from a practical standpoint to reason about trajectories of distributions, basic reachability and safety problems are known to be computationally intractable (i.e., Skolem-hard) to solve in such models. Further, we show that even for simple examples of MDPs, strategies for safety objectives over distributions can require infinite memory and randomization. In light of this, we present a novel overapproximation approach to synthesize strategies in an MDP, such that a safety objective over the distributions is met. More precisely, we develop a new framework for template-based synthesis of certificates as affine distributional and inductive invariants for safety objectives in MDPs. We provide two algorithms within this framework. One can only synthesize memoryless strategies, but has relative completeness guarantees, while the other can synthesize general strategies. The runtime complexity of both algorithms is in PSPACE. We implement these algorithms and show that they can solve several non-trivial examples.}, author = {Akshay, S. and Chatterjee, Krishnendu and Meggendorfer, Tobias and Zikelic, Dorde}, booktitle = {International Conference on Computer Aided Verification}, isbn = {9783031377082}, issn = {1611-3349}, location = {Paris, France}, pages = {86--112}, publisher = {Springer Nature}, title = {{MDPs as distribution transformers: Affine invariant synthesis for safety objectives}}, doi = {10.1007/978-3-031-37709-9_5}, volume = {13966}, year = {2023}, } @inproceedings{14410, abstract = {This paper focuses on the implementation details of the baseline methods and a recent lightweight conditional model extrapolation algorithm LIMES [5] for streaming data under class-prior shift. LIMES achieves superior performance over the baseline methods, especially concerning the minimum-across-day accuracy, which is important for the users of the system. In this work, the key measures to facilitate reproducibility and enhance the credibility of the results are described.}, author = {Tomaszewska, Paulina and Lampert, Christoph}, booktitle = {International Workshop on Reproducible Research in Pattern Recognition}, isbn = {9783031407727}, issn = {1611-3349}, location = {Montreal, Canada}, pages = {67--73}, publisher = {Springer Nature}, title = {{On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift}}, doi = {10.1007/978-3-031-40773-4_6}, volume = {14068}, year = {2023}, } @inproceedings{14428, abstract = {Suppose we have two hash functions h1 and h2, but we trust the security of only one of them. To mitigate this worry, we wish to build a hash combiner Ch1,h2 which is secure so long as one of the underlying hash functions is. This question has been well-studied in the regime of collision resistance. In this case, concatenating the two hash function outputs clearly works. Unfortunately, a long series of works (Boneh and Boyen, CRYPTO’06; Pietrzak, Eurocrypt’07; Pietrzak, CRYPTO’08) showed no (noticeably) shorter combiner for collision resistance is possible. In this work, we revisit this pessimistic state of affairs, motivated by the observation that collision-resistance is insufficient for many interesting applications of cryptographic hash functions anyway. We argue the right formulation of the “hash combiner” is to build what we call random oracle (RO) combiners, utilizing stronger assumptions for stronger constructions. Indeed, we circumvent the previous lower bounds for collision resistance by constructing a simple length-preserving RO combiner C˜h1,h2Z1,Z2(M)=h1(M,Z1)⊕h2(M,Z2),where Z1,Z2 are random salts of appropriate length. We show that this extra randomness is necessary for RO combiners, and indeed our construction is somewhat tight with this lower bound. On the negative side, we show that one cannot generically apply the composition theorem to further replace “monolithic” hash functions h1 and h2 by some simpler indifferentiable construction (such as the Merkle-Damgård transformation) from smaller components, such as fixed-length compression functions. Finally, despite this issue, we directly prove collision resistance of the Merkle-Damgård variant of our combiner, where h1 and h2 are replaced by iterative Merkle-Damgård hashes applied to a fixed-length compression function. Thus, we can still subvert the concatenation barrier for collision-resistance combiners while utilizing practically small fixed-length components underneath.}, author = {Dodis, Yevgeniy and Ferguson, Niels and Goldin, Eli and Hall, Peter and Pietrzak, Krzysztof Z}, booktitle = {43rd Annual International Cryptology Conference}, isbn = {9783031385445}, issn = {1611-3349}, location = {Santa Barbara, CA, United States}, pages = {514--546}, publisher = {Springer Nature}, title = {{Random oracle combiners: Breaking the concatenation barrier for collision-resistance}}, doi = {10.1007/978-3-031-38545-2_17}, volume = {14082}, year = {2023}, } @inproceedings{14457, abstract = {Threshold secret sharing allows a dealer to split a secret s into n shares, such that any t shares allow for reconstructing s, but no t-1 shares reveal any information about s. Leakage-resilient secret sharing requires that the secret remains hidden, even when an adversary additionally obtains a limited amount of leakage from every share. Benhamouda et al. (CRYPTO’18) proved that Shamir’s secret sharing scheme is one bit leakage-resilient for reconstruction threshold t≥0.85n and conjectured that the same holds for t = c.n for any constant 0≤c≤1. Nielsen and Simkin (EUROCRYPT’20) showed that this is the best one can hope for by proving that Shamir’s scheme is not secure against one-bit leakage when t0c.n/log(n). In this work, we strengthen the lower bound of Nielsen and Simkin. We consider noisy leakage-resilience, where a random subset of leakages is replaced by uniformly random noise. We prove a lower bound for Shamir’s secret sharing, similar to that of Nielsen and Simkin, which holds even when a constant fraction of leakages is replaced by random noise. To this end, we first prove a lower bound on the share size of any noisy-leakage-resilient sharing scheme. We then use this lower bound to show that there exist universal constants c1, c2, such that for sufficiently large n it holds that Shamir’s secret sharing scheme is not noisy-leakage-resilient for t≤c1.n/log(n), even when a c2 fraction of leakages are replaced by random noise. }, author = {Hoffmann, Charlotte and Simkin, Mark}, booktitle = {8th International Conference on Cryptology and Information Security in Latin America}, isbn = {9783031444685}, issn = {1611-3349}, location = {Quito, Ecuador}, pages = {215--228}, publisher = {Springer Nature}, title = {{Stronger lower bounds for leakage-resilient secret sharing}}, doi = {10.1007/978-3-031-44469-2_11}, volume = {14168}, year = {2023}, } @inproceedings{14454, abstract = {As AI and machine-learned software are used increasingly for making decisions that affect humans, it is imperative that they remain fair and unbiased in their decisions. To complement design-time bias mitigation measures, runtime verification techniques have been introduced recently to monitor the algorithmic fairness of deployed systems. Previous monitoring techniques assume full observability of the states of the (unknown) monitored system. Moreover, they can monitor only fairness properties that are specified as arithmetic expressions over the probabilities of different events. In this work, we extend fairness monitoring to systems modeled as partially observed Markov chains (POMC), and to specifications containing arithmetic expressions over the expected values of numerical functions on event sequences. The only assumptions we make are that the underlying POMC is aperiodic and starts in the stationary distribution, with a bound on its mixing time being known. These assumptions enable us to estimate a given property for the entire distribution of possible executions of the monitored POMC, by observing only a single execution. Our monitors observe a long run of the system and, after each new observation, output updated PAC-estimates of how fair or biased the system is. The monitors are computationally lightweight and, using a prototype implementation, we demonstrate their effectiveness on several real-world examples.}, author = {Henzinger, Thomas A and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {23rd International Conference on Runtime Verification}, isbn = {9783031442667}, issn = {1611-3349}, location = {Thessaloniki, Greece}, pages = {291--311}, publisher = {Springer Nature}, title = {{Monitoring algorithmic fairness under partial observations}}, doi = {10.1007/978-3-031-44267-4_15}, volume = {14245}, year = {2023}, } @inproceedings{14559, abstract = {We consider the problem of learning control policies in discrete-time stochastic systems which guarantee that the system stabilizes within some specified stabilization region with probability 1. Our approach is based on the novel notion of stabilizing ranking supermartingales (sRSMs) that we introduce in this work. Our sRSMs overcome the limitation of methods proposed in previous works whose applicability is restricted to systems in which the stabilizing region cannot be left once entered under any control policy. We present a learning procedure that learns a control policy together with an sRSM that formally certifies probability 1 stability, both learned as neural networks. We show that this procedure can also be adapted to formally verifying that, under a given Lipschitz continuous control policy, the stochastic system stabilizes within some stabilizing region with probability 1. Our experimental evaluation shows that our learning procedure can successfully learn provably stabilizing policies in practice.}, author = {Ansaripour, Matin and Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde}, booktitle = {21st International Symposium on Automated Technology for Verification and Analysis}, isbn = {9783031453281}, issn = {1611-3349}, location = {Singapore, Singapore}, pages = {357--379}, publisher = {Springer Nature}, title = {{Learning provably stabilizing neural controllers for discrete-time stochastic systems}}, doi = {10.1007/978-3-031-45329-8_17}, volume = {14215}, year = {2023}, } @inproceedings{13238, abstract = {We consider a natural problem dealing with weighted packet selection across a rechargeable link, which e.g., finds applications in cryptocurrency networks. The capacity of a link (u, v) is determined by how much nodes u and v allocate for this link. Specifically, the input is a finite ordered sequence of packets that arrive in both directions along a link. Given (u, v) and a packet of weight x going from u to v, node u can either accept or reject the packet. If u accepts the packet, the capacity on link (u, v) decreases by x. Correspondingly, v’s capacity on (u, v) increases by x. If a node rejects the packet, this will entail a cost affinely linear in the weight of the packet. A link is “rechargeable” in the sense that the total capacity of the link has to remain constant, but the allocation of capacity at the ends of the link can depend arbitrarily on the nodes’ decisions. The goal is to minimise the sum of the capacity injected into the link and the cost of rejecting packets. We show that the problem is NP-hard, but can be approximated efficiently with a ratio of (1+ε)⋅(1+3–√) for some arbitrary ε>0. .}, author = {Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X}, booktitle = {SIROCCO 2023: Structural Information and Communication Complexity }, isbn = {9783031327322}, issn = {1611-3349}, location = {Alcala de Henares, Spain}, pages = {576--594}, publisher = {Springer Nature}, title = {{Weighted packet selection for rechargeable links in cryptocurrency networks: Complexity and approximation}}, doi = {10.1007/978-3-031-32733-9_26}, volume = {13892}, year = {2023}, } @inproceedings{14693, abstract = {Lucas sequences are constant-recursive integer sequences with a long history of applications in cryptography, both in the design of cryptographic schemes and cryptanalysis. In this work, we study the sequential hardness of computing Lucas sequences over an RSA modulus. First, we show that modular Lucas sequences are at least as sequentially hard as the classical delay function given by iterated modular squaring proposed by Rivest, Shamir, and Wagner (MIT Tech. Rep. 1996) in the context of time-lock puzzles. Moreover, there is no obvious reduction in the other direction, which suggests that the assumption of sequential hardness of modular Lucas sequences is strictly weaker than that of iterated modular squaring. In other words, the sequential hardness of modular Lucas sequences might hold even in the case of an algorithmic improvement violating the sequential hardness of iterated modular squaring. Second, we demonstrate the feasibility of constructing practically-efficient verifiable delay functions based on the sequential hardness of modular Lucas sequences. Our construction builds on the work of Pietrzak (ITCS 2019) by leveraging the intrinsic connection between the problem of computing modular Lucas sequences and exponentiation in an appropriate extension field.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Krňák, Tomáš}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486234}, issn = {1611-3349}, location = {Taipei, Taiwan}, pages = {336--362}, publisher = {Springer Nature}, title = {{(Verifiable) delay functions from Lucas sequences}}, doi = {10.1007/978-3-031-48624-1_13}, volume = {14372}, year = {2023}, } @inproceedings{14691, abstract = {Continuous Group-Key Agreement (CGKA) allows a group of users to maintain a shared key. It is the fundamental cryptographic primitive underlying group messaging schemes and related protocols, most notably TreeKEM, the underlying key agreement protocol of the Messaging Layer Security (MLS) protocol, a standard for group messaging by the IETF. CKGA works in an asynchronous setting where parties only occasionally must come online, and their messages are relayed by an untrusted server. The most expensive operation provided by CKGA is that which allows for a user to refresh their key material in order to achieve forward secrecy (old messages are secure when a user is compromised) and post-compromise security (users can heal from compromise). One caveat of early CGKA protocols is that these update operations had to be performed sequentially, with any user wanting to update their key material having had to receive and process all previous updates. Late versions of TreeKEM do allow for concurrent updates at the cost of a communication overhead per update message that is linear in the number of updating parties. This was shown to be indeed necessary when achieving PCS in just two rounds of communication by [Bienstock et al. TCC’20]. The recently proposed protocol CoCoA [Alwen et al. Eurocrypt’22], however, shows that this overhead can be reduced if PCS requirements are relaxed, and only a logarithmic number of rounds is required. The natural question, thus, is whether CoCoA is optimal in this setting. In this work we answer this question, providing a lower bound on the cost (concretely, the amount of data to be uploaded to the server) for CGKA protocols that heal in an arbitrary k number of rounds, that shows that CoCoA is very close to optimal. Additionally, we extend CoCoA to heal in an arbitrary number of rounds, and propose a modification of it, with a reduced communication cost for certain k. We prove our bound in a combinatorial setting where the state of the protocol progresses in rounds, and the state of the protocol in each round is captured by a set system, each set specifying a set of users who share a secret key. We show this combinatorial model is equivalent to a symbolic model capturing building blocks including PRFs and public-key encryption, related to the one used by Bienstock et al. Our lower bound is of order k•n1+1/(k-1)/log(k), where 2≤k≤log(n) is the number of updates per user the protocol requires to heal. This generalizes the n2 bound for k=2 from Bienstock et al.. This bound almost matches the k⋅n1+2/(k-1) or k2⋅n1+1/(k-1) efficiency we get for the variants of the CoCoA protocol also introduced in this paper.}, author = {Auerbach, Benedikt and Cueto Noval, Miguel and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486203}, issn = {1611-3349}, location = {Taipei, Taiwan}, pages = {271--300}, publisher = {Springer Nature}, title = {{On the cost of post-compromise security in concurrent Continuous Group-Key Agreement}}, doi = {10.1007/978-3-031-48621-0_10}, volume = {14371}, year = {2023}, } @inproceedings{14692, abstract = {The generic-group model (GGM) aims to capture algorithms working over groups of prime order that only rely on the group operation, but do not exploit any additional structure given by the concrete implementation of the group. In it, it is possible to prove information-theoretic lower bounds on the hardness of problems like the discrete logarithm (DL) or computational Diffie-Hellman (CDH). Thus, since its introduction, it has served as a valuable tool to assess the concrete security provided by cryptographic schemes based on such problems. A work on the related algebraic-group model (AGM) introduced a method, used by many subsequent works, to adapt GGM lower bounds for one problem to another, by means of conceptually simple reductions. In this work, we propose an alternative approach to extend GGM bounds from one problem to another. Following an idea by Yun [EC15], we show that, in the GGM, the security of a large class of problems can be reduced to that of geometric search-problems. By reducing the security of the resulting geometric-search problems to variants of the search-by-hypersurface problem, for which information theoretic lower bounds exist, we give alternative proofs of several results that used the AGM approach. The main advantage of our approach is that our reduction from geometric search-problems works, as well, for the GGM with preprocessing (more precisely the bit-fixing GGM introduced by Coretti, Dodis and Guo [Crypto18]). As a consequence, this opens up the possibility of transferring preprocessing GGM bounds from one problem to another, also by means of simple reductions. Concretely, we prove novel preprocessing bounds on the hardness of the d-strong discrete logarithm, the d-strong Diffie-Hellman inversion, and multi-instance CDH problems, as well as a large class of Uber assumptions. Additionally, our approach applies to Shoup’s GGM without additional restrictions on the query behavior of the adversary, while the recent works of Zhang, Zhou, and Katz [AC22] and Zhandry [Crypto22] highlight that this is not the case for the AGM approach.}, author = {Auerbach, Benedikt and Hoffmann, Charlotte and Pascual Perez, Guillermo}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486203}, issn = {1611-3349}, pages = {301--330}, publisher = {Springer Nature}, title = {{Generic-group lower bounds via reductions between geometric-search problems: With and without preprocessing}}, doi = {10.1007/978-3-031-48621-0_11}, volume = {14371}, year = {2023}, }