@article{13179, abstract = {Writing concurrent code that is both correct and efficient is notoriously difficult. Thus, programmers often prefer to use synchronization abstractions, which render code simpler and easier to reason about. Despite a wealth of work on this topic, there is still a gap between the rich semantics provided by synchronization abstractions in modern programming languages—specifically, fair FIFO ordering of synchronization requests and support for abortable operations—and frameworks for implementing it correctly and efficiently. Supporting such semantics is critical given the rising popularity of constructs for asynchronous programming, such as coroutines, which abort frequently and are cheaper to suspend and resume compared to native threads. This paper introduces a new framework called CancellableQueueSynchronizer (CQS), which enables simple yet efficient implementations of a wide range of fair and abortable synchronization primitives: mutexes, semaphores, barriers, count-down latches, and blocking pools. Our main contribution is algorithmic, as implementing both fairness and abortability efficiently at this level of generality is non-trivial. Importantly, all our algorithms, including the CQS framework and the primitives built on top of it, come with formal proofs in the Iris framework for Coq for many of their properties. These proofs are modular, so it is easy to show correctness for new primitives implemented on top of CQS. From a practical perspective, implementation of CQS for native threads on the JVM improves throughput by up to two orders of magnitude over Java’s AbstractQueuedSynchronizer, the only practical abstraction offering similar semantics. Further, we successfully integrated CQS as a core component of the popular Kotlin Coroutines library, validating the framework’s practical impact and expressiveness in a real-world environment. In sum, CancellableQueueSynchronizer is the first framework to combine expressiveness with formal guarantees and solid practical performance. Our approach should be extensible to other languages and families of synchronization primitives.}, author = {Koval, Nikita and Khalanskiy, Dmitry and Alistarh, Dan-Adrian}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, publisher = {Association for Computing Machinery }, title = {{CQS: A formally-verified framework for fair and abortable synchronization}}, doi = {10.1145/3591230}, volume = {7}, year = {2023}, } @article{10153, abstract = {Gradual typing is a principled means for mixing typed and untyped code. But typed and untyped code often exhibit different programming patterns. There is already substantial research investigating gradually giving types to code exhibiting typical untyped patterns, and some research investigating gradually removing types from code exhibiting typical typed patterns. This paper investigates how to extend these established gradual-typing concepts to give formal guarantees not only about how to change types as code evolves but also about how to change such programming patterns as well. In particular, we explore mixing untyped "structural" code with typed "nominal" code in an object-oriented language. But whereas previous work only allowed "nominal" objects to be treated as "structural" objects, we also allow "structural" objects to dynamically acquire certain nominal types, namely interfaces. We present a calculus that supports such "cross-paradigm" code migration and interoperation in a manner satisfying both the static and dynamic gradual guarantees, and demonstrate that the calculus can be implemented efficiently.}, author = {Mühlböck, Fabian and Tate, Ross}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, keywords = {gradual typing, gradual guarantee, nominal, structural, call tags}, location = {Chicago, IL, United States}, publisher = {Association for Computing Machinery}, title = {{Transitioning from structural to nominal code with efficient gradual typing}}, doi = {10.1145/3485504}, volume = {5}, year = {2021}, } @article{10191, abstract = {In this work we solve the algorithmic problem of consistency verification for the TSO and PSO memory models given a reads-from map, denoted VTSO-rf and VPSO-rf, respectively. For an execution of n events over k threads and d variables, we establish novel bounds that scale as nk+1 for TSO and as nk+1· min(nk2, 2k· d) for PSO. Moreover, based on our solution to these problems, we develop an SMC algorithm under TSO and PSO that uses the RF equivalence. The algorithm is exploration-optimal, in the sense that it is guaranteed to explore each class of the RF partitioning exactly once, and spends polynomial time per class when k is bounded. Finally, we implement all our algorithms in the SMC tool Nidhugg, and perform a large number of experiments over benchmarks from existing literature. Our experimental results show that our algorithms for VTSO-rf and VPSO-rf provide significant scalability improvements over standard alternatives. Moreover, when used for SMC, the RF partitioning is often much coarser than the standard Shasha-Snir partitioning for TSO/PSO, which yields a significant speedup in the model checking task. }, author = {Bui, Truc Lam and Chatterjee, Krishnendu and Gautam, Tushar and Pavlogiannis, Andreas and Toman, Viktor}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, keywords = {safety, risk, reliability and quality, software}, number = {OOPSLA}, publisher = {Association for Computing Machinery}, title = {{The reads-from equivalence for the TSO and PSO memory models}}, doi = {10.1145/3485541}, volume = {5}, year = {2021}, } @inproceedings{8324, abstract = {The notion of program sensitivity (aka Lipschitz continuity) specifies that changes in the program input result in proportional changes to the program output. For probabilistic programs the notion is naturally extended to expected sensitivity. A previous approach develops a relational program logic framework for proving expected sensitivity of probabilistic while loops, where the number of iterations is fixed and bounded. In this work, we consider probabilistic while loops where the number of iterations is not fixed, but randomized and depends on the initial input values. We present a sound approach for proving expected sensitivity of such programs. Our sound approach is martingale-based and can be automated through existing martingale-synthesis algorithms. Furthermore, our approach is compositional for sequential composition of while loops under a mild side condition. We demonstrate the effectiveness of our approach on several classical examples from Gambler's Ruin, stochastic hybrid systems and stochastic gradient descent. We also present experimental results showing that our automated approach can handle various probabilistic programs in the literature.}, author = {Wang, Peixin and Fu, Hongfei and Chatterjee, Krishnendu and Deng, Yuxin and Xu, Ming}, booktitle = {Proceedings of the ACM on Programming Languages}, issn = {2475-1421}, number = {POPL}, publisher = {ACM}, title = {{Proving expected sensitivity of probabilistic programs with randomized variable-dependent termination time}}, doi = {10.1145/3371093}, volume = {4}, year = {2020}, } @inproceedings{10190, abstract = {The verification of concurrent programs remains an open challenge, as thread interaction has to be accounted for, which leads to state-space explosion. Stateless model checking battles this problem by exploring traces rather than states of the program. As there are exponentially many traces, dynamic partial-order reduction (DPOR) techniques are used to partition the trace space into equivalence classes, and explore a few representatives from each class. The standard equivalence that underlies most DPOR techniques is the happens-before equivalence, however recent works have spawned a vivid interest towards coarser equivalences. The efficiency of such approaches is a product of two parameters: (i) the size of the partitioning induced by the equivalence, and (ii) the time spent by the exploration algorithm in each class of the partitioning. In this work, we present a new equivalence, called value-happens-before and show that it has two appealing features. First, value-happens-before is always at least as coarse as the happens-before equivalence, and can be even exponentially coarser. Second, the value-happens-before partitioning is efficiently explorable when the number of threads is bounded. We present an algorithm called value-centric DPOR (VCDPOR), which explores the underlying partitioning using polynomial time per class. Finally, we perform an experimental evaluation of VCDPOR on various benchmarks, and compare it against other state-of-the-art approaches. Our results show that value-happens-before typically induces a significant reduction in the size of the underlying partitioning, which leads to a considerable reduction in the running time for exploring the whole partitioning.}, author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Toman, Viktor}, booktitle = {Proceedings of the 34th ACM International Conference on Object-Oriented Programming, Systems, Languages, and Applications}, issn = {2475-1421}, keywords = {safety, risk, reliability and quality, software}, location = {Athens, Greece}, publisher = {ACM}, title = {{Value-centric dynamic partial order reduction}}, doi = {10.1145/3360550}, volume = {3}, year = {2019}, } @article{10418, abstract = {We present a new proof rule for proving almost-sure termination of probabilistic programs, including those that contain demonic non-determinism. An important question for a probabilistic program is whether the probability mass of all its diverging runs is zero, that is that it terminates "almost surely". Proving that can be hard, and this paper presents a new method for doing so. It applies directly to the program's source code, even if the program contains demonic choice. Like others, we use variant functions (a.k.a. "super-martingales") that are real-valued and decrease randomly on each loop iteration; but our key innovation is that the amount as well as the probability of the decrease are parametric. We prove the soundness of the new rule, indicate where its applicability goes beyond existing rules, and explain its connection to classical results on denumerable (non-demonic) Markov chains.}, author = {Mciver, Annabelle and Morgan, Carroll and Kaminski, Benjamin Lucien and Katoen, Joost P}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{A new proof rule for almost-sure termination}}, doi = {10.1145/3158121}, volume = {2}, year = {2017}, } @article{10416, abstract = {A fundamental algorithmic problem at the heart of static analysis is Dyck reachability. The input is a graph where the edges are labeled with different types of opening and closing parentheses, and the reachability information is computed via paths whose parentheses are properly matched. We present new results for Dyck reachability problems with applications to alias analysis and data-dependence analysis. Our main contributions, that include improved upper bounds as well as lower bounds that establish optimality guarantees, are as follows: First, we consider Dyck reachability on bidirected graphs, which is the standard way of performing field-sensitive points-to analysis. Given a bidirected graph with n nodes and m edges, we present: (i) an algorithm with worst-case running time O(m + n · α(n)), where α(n) is the inverse Ackermann function, improving the previously known O(n2) time bound; (ii) a matching lower bound that shows that our algorithm is optimal wrt to worst-case complexity; and (iii) an optimal average-case upper bound of O(m) time, improving the previously known O(m · logn) bound. Second, we consider the problem of context-sensitive data-dependence analysis, where the task is to obtain analysis summaries of library code in the presence of callbacks. Our algorithm preprocesses libraries in almost linear time, after which the contribution of the library in the complexity of the client analysis is only linear, and only wrt the number of call sites. Third, we prove that combinatorial algorithms for Dyck reachability on general graphs with truly sub-cubic bounds cannot be obtained without obtaining sub-cubic combinatorial algorithms for Boolean Matrix Multiplication, which is a long-standing open problem. Thus we establish that the existing combinatorial algorithms for Dyck reachability are (conditionally) optimal for general graphs. We also show that the same hardness holds for graphs of constant treewidth. Finally, we provide a prototype implementation of our algorithms for both alias analysis and data-dependence analysis. Our experimental evaluation demonstrates that the new algorithms significantly outperform all existing methods on the two problems, over real-world benchmarks.}, author = {Chatterjee, Krishnendu and Choudhary, Bhavya and Pavlogiannis, Andreas}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{Optimal Dyck reachability for data-dependence and Alias analysis}}, doi = {10.1145/3158118}, volume = {2}, year = {2017}, } @article{10417, abstract = {We present a new dynamic partial-order reduction method for stateless model checking of concurrent programs. A common approach for exploring program behaviors relies on enumerating the traces of the program, without storing the visited states (aka stateless exploration). As the number of distinct traces grows exponentially, dynamic partial-order reduction (DPOR) techniques have been successfully used to partition the space of traces into equivalence classes (Mazurkiewicz partitioning), with the goal of exploring only few representative traces from each class. We introduce a new equivalence on traces under sequential consistency semantics, which we call the observation equivalence. Two traces are observationally equivalent if every read event observes the same write event in both traces. While the traditional Mazurkiewicz equivalence is control-centric, our new definition is data-centric. We show that our observation equivalence is coarser than the Mazurkiewicz equivalence, and in many cases even exponentially coarser. We devise a DPOR exploration of the trace space, called data-centric DPOR, based on the observation equivalence.}, author = {Chalupa, Marek and Chatterjee, Krishnendu and Pavlogiannis, Andreas and Sinha, Nishant and Vaidya, Kapil}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, location = {Los Angeles, CA, United States}, number = {POPL}, publisher = {Association for Computing Machinery}, title = {{Data-centric dynamic partial order reduction}}, doi = {10.1145/3158119}, volume = {2}, year = {2017}, }