@article{14820, abstract = {We consider a natural problem dealing with weighted packet selection across a rechargeable link, which e.g., finds applications in cryptocurrency networks. The capacity of a link (u, v) is determined by how many nodes u and v allocate for this link. Specifically, the input is a finite ordered sequence of packets that arrive in both directions along a link. Given (u, v) and a packet of weight x going from u to v, node u can either accept or reject the packet. If u accepts the packet, the capacity on link (u, v) decreases by x. Correspondingly, v's capacity on increases by x. If a node rejects the packet, this will entail a cost affinely linear in the weight of the packet. A link is “rechargeable” in the sense that the total capacity of the link has to remain constant, but the allocation of capacity at the ends of the link can depend arbitrarily on the nodes' decisions. The goal is to minimise the sum of the capacity injected into the link and the cost of rejecting packets. We show that the problem is NP-hard, but can be approximated efficiently with a ratio of (1+E) . (1+3) for some arbitrary E>0.}, author = {Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X}, issn = {0304-3975}, journal = {Theoretical Computer Science}, keywords = {General Computer Science, Theoretical Computer Science}, publisher = {Elsevier}, title = {{Weighted packet selection for rechargeable links in cryptocurrency networks: Complexity and approximation}}, doi = {10.1016/j.tcs.2023.114353}, volume = {989}, year = {2024}, } @inproceedings{15006, abstract = {Graphical games are a useful framework for modeling the interactions of (selfish) agents who are connected via an underlying topology and whose behaviors influence each other. They have wide applications ranging from computer science to economics and biology. Yet, even though an agent’s payoff only depends on the actions of their direct neighbors in graphical games, computing the Nash equilibria and making statements about the convergence time of "natural" local dynamics in particular can be highly challenging. In this work, we present a novel approach for classifying complexity of Nash equilibria in graphical games by establishing a connection to local graph algorithms, a subfield of distributed computing. In particular, we make the observation that the equilibria of graphical games are equivalent to locally verifiable labelings (LVL) in graphs; vertex labelings which are verifiable with constant-round local algorithms. This connection allows us to derive novel lower bounds on the convergence time to equilibrium of best-response dynamics in graphical games. Since we establish that distributed convergence can sometimes be provably slow, we also introduce and give bounds on an intuitive notion of "time-constrained" inefficiency of best responses. We exemplify how our results can be used in the implementation of mechanisms that ensure convergence of best responses to a Nash equilibrium. Our results thus also give insight into the convergence of strategy-proof algorithms for graphical games, which is still not well understood.}, author = {Hirvonen, Juho and Schmid, Laura and Chatterjee, Krishnendu and Schmid, Stefan}, booktitle = {27th International Conference on Principles of Distributed Systems}, isbn = {9783959773089}, issn = {18688969}, location = {Tokyo, Japan}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{On the convergence time in graphical games: A locality-sensitive approach}}, doi = {10.4230/LIPIcs.OPODIS.2023.11}, volume = {286}, year = {2024}, } @article{15083, abstract = {Direct reciprocity is a powerful mechanism for cooperation in social dilemmas. The very logic of reciprocity, however, seems to require that individuals are symmetric, and that everyone has the same means to influence each others’ payoffs. Yet in many applications, individuals are asymmetric. Herein, we study the effect of asymmetry in linear public good games. Individuals may differ in their endowments (their ability to contribute to a public good) and in their productivities (how effective their contributions are). Given the individuals’ productivities, we ask which allocation of endowments is optimal for cooperation. To this end, we consider two notions of optimality. The first notion focuses on the resilience of cooperation. The respective endowment distribution ensures that full cooperation is feasible even under the most adverse conditions. The second notion focuses on efficiency. The corresponding endowment distribution maximizes group welfare. Using analytical methods, we fully characterize these two endowment distributions. This analysis reveals that both optimality notions favor some endowment inequality: More productive players ought to get higher endowments. Yet the two notions disagree on how unequal endowments are supposed to be. A focus on resilience results in less inequality. With additional simulations, we show that the optimal endowment allocation needs to account for both the resilience and the efficiency of cooperation.}, author = {Hübner, Valentin and Staab, Manuel and Hilbe, Christian and Chatterjee, Krishnendu and Kleshnina, Maria}, issn = {1091-6490}, journal = {Proceedings of the National Academy of Sciences}, number = {10}, publisher = {Proceedings of the National Academy of Sciences}, title = {{Efficiency and resilience of cooperation in asymmetric social dilemmas}}, doi = {10.1073/pnas.2315558121}, volume = {121}, year = {2024}, } @misc{15108, abstract = {in the research article "Efficiency and resilience of cooperation in asymmetric social dilemmas" (by Valentin Hübner, Manuel Staab, Christian Hilbe, Krishnendu Chatterjee, and Maria Kleshnina). We used different implementations for the case of two and three players, both described below.}, author = {Hübner, Valentin and Kleshnina, Maria}, publisher = {Zenodo}, title = {{Computer code for "Efficiency and resilience of cooperation in asymmetric social dilemmas"}}, doi = {10.5281/ZENODO.10639167}, year = {2024}, } @inproceedings{12676, abstract = {Turn-based stochastic games (aka simple stochastic games) are two-player zero-sum games played on directed graphs with probabilistic transitions. The goal of player-max is to maximize the probability to reach a target state against the adversarial player-min. These games lie in NP ∩ coNP and are among the rare combinatorial problems that belong to this complexity class for which the existence of polynomial-time algorithm is a major open question. While randomized sub-exponential time algorithm exists, all known deterministic algorithms require exponential time in the worst-case. An important open question has been whether faster algorithms can be obtained parametrized by the treewidth of the game graph. Even deterministic sub-exponential time algorithm for constant treewidth turn-based stochastic games has remain elusive. In this work our main result is a deterministic algorithm to solve turn-based stochastic games that, given a game with n states, treewidth at most t, and the bit-complexity of the probabilistic transition function log D, has running time O ((tn2 log D)t log n). In particular, our algorithm is quasi-polynomial time for games with constant or poly-logarithmic treewidth.}, author = {Chatterjee, Krishnendu and Meggendorfer, Tobias and Saona Urmeneta, Raimundo J and Svoboda, Jakub}, booktitle = {Proceedings of the 2023 Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {9781611977554}, location = {Florence, Italy}, pages = {4590--4605}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Faster algorithm for turn-based stochastic games with bounded treewidth}}, doi = {10.1137/1.9781611977554.ch173}, year = {2023}, } @inproceedings{13142, abstract = {Reinforcement learning has received much attention for learning controllers of deterministic systems. We consider a learner-verifier framework for stochastic control systems and survey recent methods that formally guarantee a conjunction of reachability and safety properties. Given a property and a lower bound on the probability of the property being satisfied, our framework jointly learns a control policy and a formal certificate to ensure the satisfaction of the property with a desired probability threshold. Both the control policy and the formal certificate are continuous functions from states to reals, which are learned as parameterized neural networks. While in the deterministic case, the certificates are invariant and barrier functions for safety, or Lyapunov and ranking functions for liveness, in the stochastic case the certificates are supermartingales. For certificate verification, we use interval arithmetic abstract interpretation to bound the expected values of neural network functions.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems }, isbn = {9783031308222}, issn = {1611-3349}, location = {Paris, France}, pages = {3--25}, publisher = {Springer Nature}, title = {{A learner-verifier framework for neural network controllers and certificates of stochastic systems}}, doi = {10.1007/978-3-031-30823-9_1}, volume = {13993}, year = {2023}, } @article{12787, abstract = {Populations evolve in spatially heterogeneous environments. While a certain trait might bring a fitness advantage in some patch of the environment, a different trait might be advantageous in another patch. Here, we study the Moran birth–death process with two types of individuals in a population stretched across two patches of size N, each patch favouring one of the two types. We show that the long-term fate of such populations crucially depends on the migration rate μ between the patches. To classify the possible fates, we use the distinction between polynomial (short) and exponential (long) timescales. We show that when μ is high then one of the two types fixates on the whole population after a number of steps that is only polynomial in N. By contrast, when μ is low then each type holds majority in the patch where it is favoured for a number of steps that is at least exponential in N. Moreover, we precisely identify the threshold migration rate μ⋆ that separates those two scenarios, thereby exactly delineating the situations that support long-term coexistence of the two types. We also discuss the case of various cycle graphs and we present computer simulations that perfectly match our analytical results.}, author = {Svoboda, Jakub and Tkadlec, Josef and Kaveh, Kamran and Chatterjee, Krishnendu}, issn = {1471-2946}, journal = {Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences}, number = {2271}, publisher = {The Royal Society}, title = {{Coexistence times in the Moran process with environmental heterogeneity}}, doi = {10.1098/rspa.2022.0685}, volume = {479}, year = {2023}, } @article{12861, abstract = {The field of indirect reciprocity investigates how social norms can foster cooperation when individuals continuously monitor and assess each other’s social interactions. By adhering to certain social norms, cooperating individuals can improve their reputation and, in turn, receive benefits from others. Eight social norms, known as the “leading eight," have been shown to effectively promote the evolution of cooperation as long as information is public and reliable. These norms categorize group members as either ’good’ or ’bad’. In this study, we examine a scenario where individuals instead assign nuanced reputation scores to each other, and only cooperate with those whose reputation exceeds a certain threshold. We find both analytically and through simulations that such quantitative assessments are error-correcting, thus facilitating cooperation in situations where information is private and unreliable. Moreover, our results identify four specific norms that are robust to such conditions, and may be relevant for helping to sustain cooperation in natural populations.}, author = {Schmid, Laura and Ekbatani, Farbod and Hilbe, Christian and Chatterjee, Krishnendu}, issn = {2041-1723}, journal = {Nature Communications}, publisher = {Springer Nature}, title = {{Quantitative assessment can stabilize indirect reciprocity under imperfect information}}, doi = {10.1038/s41467-023-37817-x}, volume = {14}, year = {2023}, } @inproceedings{14242, abstract = {We study the problem of training and certifying adversarially robust quantized neural networks (QNNs). Quantization is a technique for making neural networks more efficient by running them using low-bit integer arithmetic and is therefore commonly adopted in industry. Recent work has shown that floating-point neural networks that have been verified to be robust can become vulnerable to adversarial attacks after quantization, and certification of the quantized representation is necessary to guarantee robustness. In this work, we present quantization-aware interval bound propagation (QA-IBP), a novel method for training robust QNNs. Inspired by advances in robust learning of non-quantized networks, our training algorithm computes the gradient of an abstract representation of the actual network. Unlike existing approaches, our method can handle the discrete semantics of QNNs. Based on QA-IBP, we also develop a complete verification procedure for verifying the adversarial robustness of QNNs, which is guaranteed to terminate and produce a correct answer. Compared to existing approaches, the key advantage of our verification procedure is that it runs entirely on GPU or other accelerator devices. We demonstrate experimentally that our approach significantly outperforms existing methods and establish the new state-of-the-art for training and certifying the robustness of QNNs.}, author = {Lechner, Mathias and Zikelic, Dorde and Chatterjee, Krishnendu and Henzinger, Thomas A and Rus, Daniela}, booktitle = {Proceedings of the 37th AAAI Conference on Artificial Intelligence}, isbn = {9781577358800}, location = {Washington, DC, United States}, number = {12}, pages = {14964--14973}, publisher = {Association for the Advancement of Artificial Intelligence}, title = {{Quantization-aware interval bound propagation for training certifiably robust quantized neural networks}}, doi = {10.1609/aaai.v37i12.26747}, volume = {37}, year = {2023}, } @inproceedings{14243, abstract = {Two-player zero-sum "graph games" are central in logic, verification, and multi-agent systems. The game proceeds by placing a token on a vertex of a graph, and allowing the players to move it to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In "bidding games", however, the players have budgets and in each turn, an auction (bidding) determines which player moves the token. So far, bidding games have only been studied as full-information games. In this work we initiate the study of partial-information bidding games: we study bidding games in which a player's initial budget is drawn from a known probability distribution. We show that while for some bidding mechanisms and objectives, it is straightforward to adapt the results from the full-information setting to the partial-information setting, for others, the analysis is significantly more challenging, requires new techniques, and gives rise to interesting results. Specifically, we study games with "mean-payoff" objectives in combination with "poorman" bidding. We construct optimal strategies for a partially-informed player who plays against a fully-informed adversary. We show that, somewhat surprisingly, the "value" under pure strategies does not necessarily exist in such games.}, author = {Avni, Guy and Jecker, Ismael R and Zikelic, Dorde}, booktitle = {Proceedings of the 37th AAAI Conference on Artificial Intelligence}, isbn = {9781577358800}, location = {Washington, DC, United States}, number = {5}, pages = {5464--5471}, title = {{Bidding graph games with partially-observable budgets}}, doi = {10.1609/aaai.v37i5.25679}, volume = {37}, year = {2023}, }