@phdthesis{12891, abstract = {The tight spatiotemporal coordination of signaling activity determining embryo patterning and the physical processes driving embryo morphogenesis renders embryonic development robust, such that key developmental processes can unfold relatively normally even outside of the full embryonic context. For instance, embryonic stem cell cultures can recapitulate the hallmarks of gastrulation, i.e. break symmetry leading to germ layer formation and morphogenesis, in a very reduced environment. This leads to questions on specific contributions of embryo-specific features, such as the presence of extraembryonic tissues, which are inherently involved in gastrulation in the full embryonic context. To address this, we established zebrafish embryonic explants without the extraembryonic yolk cell, an important player as a signaling source and for morphogenesis during gastrulation, as a model of ex vivo development. We found that dorsal-marginal determinants are required and sufficient in these explants to form and pattern all three germ layers. However, formation of tissues, which require the highest Nodal-signaling levels, is variable, demonstrating a contribution of extraembryonic tissues for reaching peak Nodal signaling levels. Blastoderm explants also undergo gastrulation-like axis elongation. We found that this elongation movement shows hallmarks of oriented mesendoderm cell intercalations typically associated with dorsal tissues in the intact embryo. These are disrupted by uniform upregulation of BMP signaling activity and concomitant explant ventralization, suggesting that tight spatial control of BMP signaling is a prerequisite for explant morphogenesis. This control is achieved by Nodal signaling, which is critical for effectively downregulating BMP signaling in the mesendoderm, highlighting that Nodal signaling is not only directly required for mesendoderm cell fate specification and morphogenesis, but also by maintaining low levels of BMP signaling at the dorsal side. Collectively, we provide insights into the capacity and organization of signaling and morphogenetic domains to recapitulate features of zebrafish gastrulation outside of the full embryonic context.}, author = {Schauer, Alexandra}, issn = {2663 - 337X}, pages = {190}, publisher = {Institute of Science and Technology Austria}, title = {{Mesendoderm formation in zebrafish gastrulation: The role of extraembryonic tissues}}, doi = {10.15479/at:ista:12891}, year = {2023}, } @inproceedings{14085, abstract = {We show an (1+ϵ)-approximation algorithm for maintaining maximum s-t flow under m edge insertions in m1/2+o(1)ϵ−1/2 amortized update time for directed, unweighted graphs. This constitutes the first sublinear dynamic maximum flow algorithm in general sparse graphs with arbitrarily good approximation guarantee.}, author = {Goranci, Gramoz and Henzinger, Monika H}, booktitle = {50th International Colloquium on Automata, Languages, and Programming}, isbn = {9783959772785}, issn = {1868-8969}, location = {Paderborn, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Efficient data structures for incremental exact and approximate maximum flow}}, doi = {10.4230/LIPIcs.ICALP.2023.69}, volume = {261}, year = {2023}, } @inproceedings{14084, abstract = {A central problem in computational statistics is to convert a procedure for sampling combinatorial objects into a procedure for counting those objects, and vice versa. We will consider sampling problems which come from Gibbs distributions, which are families of probability distributions over a discrete space Ω with probability mass function of the form μ^Ω_β(ω) ∝ e^{β H(ω)} for β in an interval [β_min, β_max] and H(ω) ∈ {0} ∪ [1, n]. The partition function is the normalization factor Z(β) = ∑_{ω ∈ Ω} e^{β H(ω)}, and the log partition ratio is defined as q = (log Z(β_max))/Z(β_min) We develop a number of algorithms to estimate the counts c_x using roughly Õ(q/ε²) samples for general Gibbs distributions and Õ(n²/ε²) samples for integer-valued distributions (ignoring some second-order terms and parameters), We show this is optimal up to logarithmic factors. We illustrate with improved algorithms for counting connected subgraphs and perfect matchings in a graph.}, author = {Harris, David G. and Kolmogorov, Vladimir}, booktitle = {50th International Colloquium on Automata, Languages, and Programming}, isbn = {9783959772785}, issn = {1868-8969}, location = {Paderborn, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Parameter estimation for Gibbs distributions}}, doi = {10.4230/LIPIcs.ICALP.2023.72}, volume = {261}, year = {2023}, } @inproceedings{14086, abstract = {The maximization of submodular functions have found widespread application in areas such as machine learning, combinatorial optimization, and economics, where practitioners often wish to enforce various constraints; the matroid constraint has been investigated extensively due to its algorithmic properties and expressive power. Though tight approximation algorithms for general matroid constraints exist in theory, the running times of such algorithms typically scale quadratically, and are not practical for truly large scale settings. Recent progress has focused on fast algorithms for important classes of matroids given in explicit form. Currently, nearly-linear time algorithms only exist for graphic and partition matroids [Alina Ene and Huy L. Nguyen, 2019]. In this work, we develop algorithms for monotone submodular maximization constrained by graphic, transversal matroids, or laminar matroids in time near-linear in the size of their representation. Our algorithms achieve an optimal approximation of 1-1/e-ε and both generalize and accelerate the results of Ene and Nguyen [Alina Ene and Huy L. Nguyen, 2019]. In fact, the running time of our algorithm cannot be improved within the fast continuous greedy framework of Badanidiyuru and Vondrák [Ashwinkumar Badanidiyuru and Jan Vondrák, 2014]. To achieve near-linear running time, we make use of dynamic data structures that maintain bases with approximate maximum cardinality and weight under certain element updates. These data structures need to support a weight decrease operation and a novel Freeze operation that allows the algorithm to freeze elements (i.e. force to be contained) in its basis regardless of future data structure operations. For the laminar matroid, we present a new dynamic data structure using the top tree interface of Alstrup, Holm, de Lichtenberg, and Thorup [Stephen Alstrup et al., 2005] that maintains the maximum weight basis under insertions and deletions of elements in O(log n) time. This data structure needs to support certain subtree query and path update operations that are performed every insertion and deletion that are non-trivial to handle in conjunction. For the transversal matroid the Freeze operation corresponds to requiring the data structure to keep a certain set S of vertices matched, a property that we call S-stability. While there is a large body of work on dynamic matching algorithms, none are S-stable and maintain an approximate maximum weight matching under vertex updates. We give the first such algorithm for bipartite graphs with total running time linear (up to log factors) in the number of edges.}, author = {Henzinger, Monika H and Liu, Paul and Vondrák, Jan and Zheng, Da Wei}, booktitle = {50th International Colloquium on Automata, Languages, and Programming}, isbn = {9783959772785}, issn = {18688969}, location = {Paderborn, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Faster submodular maximization for several classes of matroids}}, doi = {10.4230/LIPIcs.ICALP.2023.74}, volume = {261}, year = {2023}, } @inproceedings{14083, abstract = {In this work we consider the list-decodability and list-recoverability of arbitrary q-ary codes, for all integer values of q ≥ 2. A code is called (p,L)_q-list-decodable if every radius pn Hamming ball contains less than L codewords; (p,𝓁,L)_q-list-recoverability is a generalization where we place radius pn Hamming balls on every point of a combinatorial rectangle with side length 𝓁 and again stipulate that there be less than L codewords. Our main contribution is to precisely calculate the maximum value of p for which there exist infinite families of positive rate (p,𝓁,L)_q-list-recoverable codes, the quantity we call the zero-rate threshold. Denoting this value by p_*, we in fact show that codes correcting a p_*+ε fraction of errors must have size O_ε(1), i.e., independent of n. Such a result is typically referred to as a "Plotkin bound." To complement this, a standard random code with expurgation construction shows that there exist positive rate codes correcting a p_*-ε fraction of errors. We also follow a classical proof template (typically attributed to Elias and Bassalygo) to derive from the zero-rate threshold other tradeoffs between rate and decoding radius for list-decoding and list-recovery. Technically, proving the Plotkin bound boils down to demonstrating the Schur convexity of a certain function defined on the q-simplex as well as the convexity of a univariate function derived from it. We remark that an earlier argument claimed similar results for q-ary list-decoding; however, we point out that this earlier proof is flawed.}, author = {Resch, Nicolas and Yuan, Chen and Zhang, Yihan}, booktitle = {50th International Colloquium on Automata, Languages, and Programming}, isbn = {9783959772785}, issn = {1868-8969}, location = {Paderborn, Germany}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery}}, doi = {10.4230/LIPIcs.ICALP.2023.99}, volume = {261}, year = {2023}, } @article{12697, abstract = {Models for same-material contact electrification in granular media often rely on a local charge-driving parameter whose spatial variations lead to a stochastic origin for charge exchange. Measuring the charge transfer from individual granular spheres after contacts with substrates of the same material, we find instead a “global” charging behavior, coherent over the sample’s whole surface. Cleaning and baking samples fully resets charging magnitude and direction, which indicates the underlying global parameter is not intrinsic to the material, but acquired from its history. Charging behavior is randomly and irreversibly affected by changes in relative humidity, hinting at a mechanism where adsorbates, in particular, water, are fundamental to the charge-transfer process.}, author = {Grosjean, Galien M and Waitukaitis, Scott R}, issn = {1079-7114}, journal = {Physical Review Letters}, keywords = {General Physics, Electrostatics, Triboelectricity, Soft Matter, Acoustic Levitation, Granular Materials}, number = {9}, publisher = {American Physical Society}, title = {{Single-collision statistics reveal a global mechanism driven by sample history for contact electrification in granular media}}, doi = {10.1103/physrevlett.130.098202}, volume = {130}, year = {2023}, } @phdthesis{13175, abstract = {About a 100 years ago, we discovered that our universe is inherently noisy, that is, measuring any physical quantity with a precision beyond a certain point is not possible because of an omnipresent inherent noise. We call this - the quantum noise. Certain physical processes allow this quantum noise to get correlated in conjugate physical variables. These quantum correlations can be used to go beyond the potential of our inherently noisy universe and obtain a quantum advantage over the classical applications. Quantum noise being inherent also means that, at the fundamental level, the physical quantities are not well defined and therefore, objects can stay in multiple states at the same time. For example, the position of a particle not being well defined means that the particle is in multiple positions at the same time. About 4 decades ago, we started exploring the possibility of using objects which can be in multiple states at the same time to increase the dimensionality in computation. Thus, the field of quantum computing was born. We discovered that using quantum entanglement, a property closely related to quantum correlations, can be used to speed up computation of certain problems, such as factorisation of large numbers, faster than any known classical algorithm. Thus began the pursuit to make quantum computers a reality. Till date, we have explored quantum control over many physical systems including photons, spins, atoms, ions and even simple circuits made up of superconducting material. However, there persists one ubiquitous theme. The more readily a system interacts with an external field or matter, the more easily we can control it. But this also means that such a system can easily interact with a noisy environment and quickly lose its coherence. Consequently, such systems like electron spins need to be protected from the environment to ensure the longevity of their coherence. Other systems like nuclear spins are naturally protected as they do not interact easily with the environment. But, due to the same reason, it is harder to interact with such systems. After decades of experimentation with various systems, we are convinced that no one type of quantum system would be the best for all the quantum applications. We would need hybrid systems which are all interconnected - much like the current internet where all sorts of devices can all talk to each other - but now for quantum devices. A quantum internet. Optical photons are the best contenders to carry information for the quantum internet. They can carry quantum information cheaply and without much loss - the same reasons which has made them the backbone of our current internet. Following this direction, many systems, like trapped ions, have already demonstrated successful quantum links over a large distances using optical photons. However, some of the most promising contenders for quantum computing which are based on microwave frequencies have been left behind. This is because high energy optical photons can adversely affect fragile low-energy microwave systems. In this thesis, we present substantial progress on this missing quantum link between microwave and optics using electrooptical nonlinearities in lithium niobate. The nonlinearities are enhanced by using resonant cavities for all the involved modes leading to observation of strong direct coupling between optical and microwave frequencies. With this strong coupling we are not only able to achieve almost 100\% internal conversion efficiency with low added noise, thus presenting a quantum-enabled transducer, but also we are able to observe novel effects such as cooling of a microwave mode using optics. The strong coupling regime also leads to direct observation of dynamical backaction effect between microwave and optical frequencies which are studied in detail here. Finally, we also report first observation of microwave-optics entanglement in form of two-mode squeezed vacuum squeezed 0.7dB below vacuum level. With this new bridge between microwave and optics, the microwave-based quantum technologies can finally be a part of a quantum network which is based on optical photons - putting us one step closer to a future with quantum internet. }, author = {Sahu, Rishabh}, isbn = {978-3-99078-030-5}, issn = {2663 - 337X}, keywords = {quantum optics, electrooptics, quantum networks, quantum communication, transduction}, pages = {202}, publisher = {Institute of Science and Technology Austria}, title = {{Cavity quantum electrooptics}}, doi = {10.15479/at:ista:13175}, year = {2023}, } @phdthesis{12900, abstract = {About a 100 years ago, we discovered that our universe is inherently noisy, that is, measuring any physical quantity with a precision beyond a certain point is not possible because of an omnipresent inherent noise. We call this - the quantum noise. Certain physical processes allow this quantum noise to get correlated in conjugate physical variables. These quantum correlations can be used to go beyond the potential of our inherently noisy universe and obtain a quantum advantage over the classical applications. Quantum noise being inherent also means that, at the fundamental level, the physical quantities are not well defined and therefore, objects can stay in multiple states at the same time. For example, the position of a particle not being well defined means that the particle is in multiple positions at the same time. About 4 decades ago, we started exploring the possibility of using objects which can be in multiple states at the same time to increase the dimensionality in computation. Thus, the field of quantum computing was born. We discovered that using quantum entanglement, a property closely related to quantum correlations, can be used to speed up computation of certain problems, such as factorisation of large numbers, faster than any known classical algorithm. Thus began the pursuit to make quantum computers a reality. Till date, we have explored quantum control over many physical systems including photons, spins, atoms, ions and even simple circuits made up of superconducting material. However, there persists one ubiquitous theme. The more readily a system interacts with an external field or matter, the more easily we can control it. But this also means that such a system can easily interact with a noisy environment and quickly lose its coherence. Consequently, such systems like electron spins need to be protected from the environment to ensure the longevity of their coherence. Other systems like nuclear spins are naturally protected as they do not interact easily with the environment. But, due to the same reason, it is harder to interact with such systems. After decades of experimentation with various systems, we are convinced that no one type of quantum system would be the best for all the quantum applications. We would need hybrid systems which are all interconnected - much like the current internet where all sorts of devices can all talk to each other - but now for quantum devices. A quantum internet. Optical photons are the best contenders to carry information for the quantum internet. They can carry quantum information cheaply and without much loss - the same reasons which has made them the backbone of our current internet. Following this direction, many systems, like trapped ions, have already demonstrated successful quantum links over a large distances using optical photons. However, some of the most promising contenders for quantum computing which are based on microwave frequencies have been left behind. This is because high energy optical photons can adversely affect fragile low-energy microwave systems. In this thesis, we present substantial progress on this missing quantum link between microwave and optics using electrooptical nonlinearities in lithium niobate. The nonlinearities are enhanced by using resonant cavities for all the involved modes leading to observation of strong direct coupling between optical and microwave frequencies. With this strong coupling we are not only able to achieve almost 100\% internal conversion efficiency with low added noise, thus presenting a quantum-enabled transducer, but also we are able to observe novel effects such as cooling of a microwave mode using optics. The strong coupling regime also leads to direct observation of dynamical backaction effect between microwave and optical frequencies which are studied in detail here. Finally, we also report first observation of microwave-optics entanglement in form of two-mode squeezed vacuum squeezed 0.7dB below vacuum level. With this new bridge between microwave and optics, the microwave-based quantum technologies can finally be a part of a quantum network which is based on optical photons - putting us one step closer to a future with quantum internet. }, author = {Sahu, Rishabh}, isbn = {978-3-99078-030-5}, issn = {2663 - 337X}, keywords = {quantum optics, electrooptics, quantum networks, quantum communication, transduction}, pages = {190}, publisher = {Institute of Science and Technology Austria}, title = {{Cavity quantum electrooptics}}, doi = {10.15479/at:ista:12900}, year = {2023}, } @inproceedings{14242, abstract = {We study the problem of training and certifying adversarially robust quantized neural networks (QNNs). Quantization is a technique for making neural networks more efficient by running them using low-bit integer arithmetic and is therefore commonly adopted in industry. Recent work has shown that floating-point neural networks that have been verified to be robust can become vulnerable to adversarial attacks after quantization, and certification of the quantized representation is necessary to guarantee robustness. In this work, we present quantization-aware interval bound propagation (QA-IBP), a novel method for training robust QNNs. Inspired by advances in robust learning of non-quantized networks, our training algorithm computes the gradient of an abstract representation of the actual network. Unlike existing approaches, our method can handle the discrete semantics of QNNs. Based on QA-IBP, we also develop a complete verification procedure for verifying the adversarial robustness of QNNs, which is guaranteed to terminate and produce a correct answer. Compared to existing approaches, the key advantage of our verification procedure is that it runs entirely on GPU or other accelerator devices. We demonstrate experimentally that our approach significantly outperforms existing methods and establish the new state-of-the-art for training and certifying the robustness of QNNs.}, author = {Lechner, Mathias and Zikelic, Dorde and Chatterjee, Krishnendu and Henzinger, Thomas A and Rus, Daniela}, booktitle = {Proceedings of the 37th AAAI Conference on Artificial Intelligence}, isbn = {9781577358800}, location = {Washington, DC, United States}, number = {12}, pages = {14964--14973}, publisher = {Association for the Advancement of Artificial Intelligence}, title = {{Quantization-aware interval bound propagation for training certifiably robust quantized neural networks}}, doi = {10.1609/aaai.v37i12.26747}, volume = {37}, year = {2023}, } @inproceedings{14243, abstract = {Two-player zero-sum "graph games" are central in logic, verification, and multi-agent systems. The game proceeds by placing a token on a vertex of a graph, and allowing the players to move it to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In "bidding games", however, the players have budgets and in each turn, an auction (bidding) determines which player moves the token. So far, bidding games have only been studied as full-information games. In this work we initiate the study of partial-information bidding games: we study bidding games in which a player's initial budget is drawn from a known probability distribution. We show that while for some bidding mechanisms and objectives, it is straightforward to adapt the results from the full-information setting to the partial-information setting, for others, the analysis is significantly more challenging, requires new techniques, and gives rise to interesting results. Specifically, we study games with "mean-payoff" objectives in combination with "poorman" bidding. We construct optimal strategies for a partially-informed player who plays against a fully-informed adversary. We show that, somewhat surprisingly, the "value" under pure strategies does not necessarily exist in such games.}, author = {Avni, Guy and Jecker, Ismael R and Zikelic, Dorde}, booktitle = {Proceedings of the 37th AAAI Conference on Artificial Intelligence}, isbn = {9781577358800}, location = {Washington, DC, United States}, number = {5}, pages = {5464--5471}, title = {{Bidding graph games with partially-observable budgets}}, doi = {10.1609/aaai.v37i5.25679}, volume = {37}, year = {2023}, } @inproceedings{14241, abstract = {We present a technique to optimize the reflectivity of a surface while preserving its overall shape. The naïve optimization of the mesh vertices using the gradients of reflectivity simulations results in undesirable distortion. In contrast, our robust formulation optimizes the surface normal as an independent variable that bridges the reflectivity term with differential rendering, and the regularization term with as-rigid-as-possible elastic energy. We further adaptively subdivide the input mesh to improve the convergence. Consequently, our method can minimize the retroreflectivity of a wide range of input shapes, resulting in sharply creased shapes ubiquitous among stealth aircraft and Sci-Fi vehicles. Furthermore, by changing the reward for the direction of the outgoing light directions, our method can be applied to other reflectivity design tasks, such as the optimization of architectural walls to concentrate light in a specific region. We have tested the proposed method using light-transport simulations and real-world 3D-printed objects.}, author = {Tojo, Kenji and Shamir, Ariel and Bickel, Bernd and Umetani, Nobuyuki}, booktitle = {SIGGRAPH 2023 Conference Proceedings}, isbn = {9798400701597}, location = {Los Angeles, CA, United States}, publisher = {Association for Computing Machinery}, title = {{Stealth shaper: Reflectivity optimization as surface stylization}}, doi = {10.1145/3588432.3591542}, year = {2023}, } @article{12562, abstract = {Presynaptic inputs determine the pattern of activation of postsynaptic neurons in a neural circuit. Molecular and genetic pathways that regulate the selective formation of subsets of presynaptic inputs are largely unknown, despite significant understanding of the general process of synaptogenesis. In this study, we have begun to identify such factors using the spinal monosynaptic stretch reflex circuit as a model system. In this neuronal circuit, Ia proprioceptive afferents establish monosynaptic connections with spinal motor neurons that project to the same muscle (termed homonymous connections) or muscles with related or synergistic function. However, monosynaptic connections are not formed with motor neurons innervating muscles with antagonistic functions. The ETS transcription factor ER81 (also known as ETV1) is expressed by all proprioceptive afferents, but only a small set of motor neuron pools in the lumbar spinal cord of the mouse. Here we use conditional mouse genetic techniques to eliminate Er81 expression selectively from motor neurons. We find that ablation of Er81 in motor neurons reduces synaptic inputs from proprioceptive afferents conveying information from homonymous and synergistic muscles, with no change observed in the connectivity pattern from antagonistic proprioceptive afferents. In summary, these findings suggest a role for ER81 in defined motor neuron pools to control the assembly of specific presynaptic inputs and thereby influence the profile of activation of these motor neurons.}, author = {Ladle, David R. and Hippenmeyer, Simon}, issn = {1522-1598}, journal = {Journal of Neurophysiology}, keywords = {Physiology, General Neuroscience}, number = {3}, pages = {501--512}, publisher = {American Physiological Society}, title = {{Loss of ETV1/ER81 in motor neurons leads to reduced monosynaptic inputs from proprioceptive sensory neurons}}, doi = {10.1152/jn.00172.2022}, volume = {129}, year = {2023}, } @inproceedings{13310, abstract = {Machine-learned systems are in widespread use for making decisions about humans, and it is important that they are fair, i.e., not biased against individuals based on sensitive attributes. We present runtime verification of algorithmic fairness for systems whose models are unknown, but are assumed to have a Markov chain structure. We introduce a specification language that can model many common algorithmic fairness properties, such as demographic parity, equal opportunity, and social burden. We build monitors that observe a long sequence of events as generated by a given system, and output, after each observation, a quantitative estimate of how fair or biased the system was on that run until that point in time. The estimate is proven to be correct modulo a variable error bound and a given confidence level, where the error bound gets tighter as the observed sequence gets longer. Our monitors are of two types, and use, respectively, frequentist and Bayesian statistical inference techniques. While the frequentist monitors compute estimates that are objectively correct with respect to the ground truth, the Bayesian monitors compute estimates that are correct subject to a given prior belief about the system’s model. Using a prototype implementation, we show how we can monitor if a bank is fair in giving loans to applicants from different social backgrounds, and if a college is fair in admitting students while maintaining a reasonable financial burden on the society. Although they exhibit different theoretical complexities in certain cases, in our experiments, both frequentist and Bayesian monitors took less than a millisecond to update their verdicts after each observation.}, author = {Henzinger, Thomas A and Karimi, Mahyar and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {Computer Aided Verification}, isbn = {9783031377020}, issn = {1611-3349}, location = {Paris, France}, pages = {358–382}, publisher = {Springer Nature}, title = {{Monitoring algorithmic fairness}}, doi = {10.1007/978-3-031-37703-7_17}, volume = {13965}, year = {2023}, } @article{12205, abstract = {Background: This study seeks to evaluate the impact of breast cancer (BRCA) gene status on tumor dissemination pattern, surgical outcome and survival in a multicenter cohort of paired primary ovarian cancer (pOC) and recurrent ovarian cancer (rOC). Patients and Methods: Medical records and follow-up data from 190 patients were gathered retrospectively. All patients had surgery at pOC and at least one further rOC surgery at four European high-volume centers. Patients were divided into one cohort with confirmed mutation for BRCA1 and/or BRCA2 (BRCAmut) and a second cohort with BRCA wild type or unknown (BRCAwt). Patterns of tumor presentation, surgical outcome and survival data were analyzed between the two groups. Results: Patients with BRCAmut disease were on average 4 years younger and had significantly more tumor involvement upon diagnosis. Patients with BRCAmut disease showed higher debulking rates at all stages. Multivariate analysis showed that only patient age had significant predictive value for complete tumor resection in pOC. At rOC, however, only BRCAmut status significantly correlated with optimal debulking. Patients with BRCAmut disease showed significantly prolonged overall survival (OS) by 24.3 months. Progression-free survival (PFS) was prolonged in the BRCAmut group at all stages as well, reaching statistical significance during recurrence. Conclusions: Patients with BRCAmut disease showed a more aggressive course of disease with earlier onset and more extensive tumor dissemination at pOC. However, surgical outcome and OS were significantly better in patients with BRCAmut disease compared with patients with BRCAwt disease. We therefore propose to consider BRCAmut status in regard to patient selection for cytoreductive surgery, especially in rOC.}, author = {Glajzer, Jacek and Castillo-Tong, Dan Cacsire and Richter, Rolf and Vergote, Ignace and Kulbe, Hagen and Vanderstichele, Adriaan and Ruscito, Ilary and Trillsch, Fabian and Mustea, Alexander and Kreuzinger, Caroline and Gourley, Charlie and Gabra, Hani and Taube, Eliane T. and Dorigo, Oliver and Horst, David and Keunecke, Carlotta and Baum, Joanna and Angelotti, Timothy and Sehouli, Jalid and Braicu, Elena Ioana}, issn = {1534-4681}, journal = {Annals of Surgical Oncology}, keywords = {Oncology, Surgery}, pages = {35--45}, publisher = {Springer Nature}, title = {{Impact of BRCA mutation status on tumor dissemination pattern, surgical outcome and patient survival in primary and recurrent high-grade serous ovarian cancer: A multicenter retrospective study by the Ovarian Cancer Therapy-Innovative Models Prolong Survival (OCTIPS) consortium}}, doi = {10.1245/s10434-022-12459-3}, volume = {30}, year = {2023}, } @article{12115, author = {Glajzer, Jacek and Castillo-Tong, Dan Cacsire and Richter, Rolf and Vergote, Ignace and Kulbe, Hagen and Vanderstichele, Adriaan and Ruscito, Ilary and Trillsch, Fabian and Mustea, Alexander and Kreuzinger, Caroline and Gourley, Charlie and Gabra, Hani and Taube, Eliane T. and Dorigo, Oliver and Horst, David and Keunecke, Carlotta and Baum, Joanna and Angelotti, Timothy and Sehouli, Jalid and Braicu, Elena Ioana}, issn = {1534-4681}, journal = {Annals of Surgical Oncology}, keywords = {Oncology, Surgery}, pages = {46--47}, publisher = {Springer Nature}, title = {{ASO Visual Abstract: Impact of BRCA mutation status on tumor dissemination pattern, surgical outcome, and patient survival in primary and recurrent high-grade serous ovarian cancer (HGSOC). A multicenter, retrospective study of the ovarian cancer therapy—innovative models prolong survival (OCTIPS) consortium}}, doi = {10.1245/s10434-022-12681-z}, volume = {30}, year = {2023}, } @article{14253, abstract = {Junctions between the endoplasmic reticulum (ER) and the plasma membrane (PM) are specialized membrane contacts ubiquitous in eukaryotic cells. Concentration of intracellular signaling machinery near ER-PM junctions allows these domains to serve critical roles in lipid and Ca2+ signaling and homeostasis. Subcellular compartmentalization of protein kinase A (PKA) signaling also regulates essential cellular functions, however, no specific association between PKA and ER-PM junctional domains is known. Here, we show that in brain neurons type I PKA is directed to Kv2.1 channel-dependent ER-PM junctional domains via SPHKAP, a type I PKA-specific anchoring protein. SPHKAP association with type I PKA regulatory subunit RI and ER-resident VAP proteins results in the concentration of type I PKA between stacked ER cisternae associated with ER-PM junctions. This ER-associated PKA signalosome enables reciprocal regulation between PKA and Ca2+ signaling machinery to support Ca2+ influx and excitation-transcription coupling. These data reveal that neuronal ER-PM junctions support a receptor-independent form of PKA signaling driven by membrane depolarization and intracellular Ca2+, allowing conversion of information encoded in electrical signals into biochemical changes universally recognized throughout the cell.}, author = {Vierra, Nicholas C. and Ribeiro-Silva, Luisa and Kirmiz, Michael and Van Der List, Deborah and Bhandari, Pradeep and Mack, Olivia A. and Carroll, James and Le Monnier, Elodie and Aicher, Sue A. and Shigemoto, Ryuichi and Trimmer, James S.}, issn = {2041-1723}, journal = {Nature Communications}, publisher = {Springer Nature}, title = {{Neuronal ER-plasma membrane junctions couple excitation to Ca2+-activated PKA signaling}}, doi = {10.1038/s41467-023-40930-6}, volume = {14}, year = {2023}, } @inproceedings{14259, abstract = {We provide a learning-based technique for guessing a winning strategy in a parity game originating from an LTL synthesis problem. A cheaply obtained guess can be useful in several applications. Not only can the guessed strategy be applied as best-effort in cases where the game’s huge size prohibits rigorous approaches, but it can also increase the scalability of rigorous LTL synthesis in several ways. Firstly, checking whether a guessed strategy is winning is easier than constructing one. Secondly, even if the guess is wrong in some places, it can be fixed by strategy iteration faster than constructing one from scratch. Thirdly, the guess can be used in on-the-fly approaches to prioritize exploration in the most fruitful directions. In contrast to previous works, we (i) reflect the highly structured logical information in game’s states, the so-called semantic labelling, coming from the recent LTL-to-automata translations, and (ii) learn to reflect it properly by learning from previously solved games, bringing the solving process closer to human-like reasoning.}, author = {Kretinsky, Jan and Meggendorfer, Tobias and Prokop, Maximilian and Rieder, Sabine}, booktitle = {35th International Conference on Computer Aided Verification }, isbn = {9783031377051}, issn = {1611-3349}, location = {Paris, France}, pages = {390--414}, publisher = {Springer Nature}, title = {{Guessing winning policies in LTL synthesis by semantic learning}}, doi = {10.1007/978-3-031-37706-8_20}, volume = {13964}, year = {2023}, } @inproceedings{14105, abstract = {Despite their recent success, deep neural networks continue to perform poorly when they encounter distribution shifts at test time. Many recently proposed approaches try to counter this by aligning the model to the new distribution prior to inference. With no labels available this requires unsupervised objectives to adapt the model on the observed test data. In this paper, we propose Test-Time SelfTraining (TeST): a technique that takes as input a model trained on some source data and a novel data distribution at test time, and learns invariant and robust representations using a student-teacher framework. We find that models adapted using TeST significantly improve over baseline testtime adaptation algorithms. TeST achieves competitive performance to modern domain adaptation algorithms [4, 43], while having access to 5-10x less data at time of adaption. We thoroughly evaluate a variety of baselines on two tasks: object detection and image segmentation and find that models adapted with TeST. We find that TeST sets the new stateof-the art for test-time domain adaptation algorithms. }, author = {Sinha, Samarth and Gehler, Peter and Locatello, Francesco and Schiele, Bernt}, booktitle = {2023 IEEE/CVF Winter Conference on Applications of Computer Vision}, isbn = {9781665493475}, issn = {2642-9381}, location = {Waikoloa, HI, United States}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{TeST: Test-time Self-Training under distribution shift}}, doi = {10.1109/wacv56688.2023.00278}, year = {2023}, } @article{14256, abstract = {Context. Space asteroseismology is revolutionizing our knowledge of the internal structure and dynamics of stars. A breakthrough is ongoing with the recent discoveries of signatures of strong magnetic fields in the core of red giant stars. The key signature for such a detection is the asymmetry these fields induce in the frequency splittings of observed dipolar mixed gravito-acoustic modes. Aims. We investigate the ability of the observed asymmetries of the frequency splittings of dipolar mixed modes to constrain the geometrical properties of deep magnetic fields. Methods. We used the powerful analytical Racah-Wigner algebra used in quantum mechanics to characterize the geometrical couplings of dipolar mixed oscillation modes with various realistically plausible topologies of fossil magnetic fields. We also computed the induced perturbation of their frequencies. Results. First, in the case of an oblique magnetic dipole, we provide the exact analytical expression of the asymmetry as a function of the angle between the rotation and magnetic axes. Its value provides a direct measure of this angle. Second, considering a combination of axisymmetric dipolar and quadrupolar fields, we show how the asymmetry is blind to the unraveling of the relative strength and sign of each component. Finally, in the case of a given multipole, we show that a negative asymmetry is a signature of non-axisymmetric topologies. Conclusions. Asymmetries of dipolar mixed modes provide a key bit of information on the geometrical topology of deep fossil magnetic fields, but this is insufficient on its own. Asteroseismic constraints should therefore be combined with spectropolarimetric observations and numerical simulations, which aim to predict the more probable stable large-scale geometries.}, author = {Mathis, S. and Bugnet, Lisa Annabelle}, issn = {1432-0746}, journal = {Astronomy and Astrophysics}, publisher = {EDP Sciences}, title = {{Asymmetries of frequency splittings of dipolar mixed modes: A window on the topology of deep magnetic fields}}, doi = {10.1051/0004-6361/202346832}, volume = {676}, year = {2023}, } @article{14261, abstract = {In this work, a generalized, adapted Numerov implementation capable of determining band structures of periodic quantum systems is outlined. Based on the input potential, the presented approach numerically solves the Schrödinger equation in position space at each momentum space point. Thus, in addition to the band structure, the method inherently provides information about the state functions and probability densities in position space at each momentum space point considered. The generalized, adapted Numerov framework provided reliable estimates for a variety of increasingly complex test suites in one, two, and three dimensions. The accuracy of the proposed methodology was benchmarked against results obtained for the analytically solvable Kronig-Penney model. Furthermore, the presented numerical solver was applied to a model potential representing a 2D optical lattice being a challenging application relevant, for example, in the field of quantum computing.}, author = {Gamper, Jakob and Kluibenschedl, Florian and Weiss, Alexander K.H. and Hofer, Thomas S.}, issn = {1948-7185}, journal = {Journal of Physical Chemistry Letters}, number = {33}, pages = {7395--7403}, publisher = {American Chemical Society}, title = {{Accessing position space wave functions in band structure calculations of periodic systems - a generalized, adapted numerov implementation for one-, two-, and three-dimensional quantum problems}}, doi = {10.1021/acs.jpclett.3c01707}, volume = {14}, year = {2023}, } @inproceedings{14208, abstract = {This paper focuses on over-parameterized deep neural networks (DNNs) with ReLU activation functions and proves that when the data distribution is well-separated, DNNs can achieve Bayes-optimal test error for classification while obtaining (nearly) zero-training error under the lazy training regime. For this purpose, we unify three interrelated concepts of overparameterization, benign overfitting, and the Lipschitz constant of DNNs. Our results indicate that interpolating with smoother functions leads to better generalization. Furthermore, we investigate the special case where interpolating smooth ground-truth functions is performed by DNNs under the Neural Tangent Kernel (NTK) regime for generalization. Our result demonstrates that the generalization error converges to a constant order that only depends on label noise and initialization noise, which theoretically verifies benign overfitting. Our analysis provides a tight lower bound on the normalized margin under non-smooth activation functions, as well as the minimum eigenvalue of NTK under high-dimensional settings, which has its own interest in learning theory.}, author = {Zhu, Zhenyu and Liu, Fanghui and Chrysos, Grigorios G and Locatello, Francesco and Cevher, Volkan}, booktitle = {Proceedings of the 40th International Conference on Machine Learning}, location = {Honolulu, Hawaii, United States}, pages = {43105--43128}, publisher = {ML Research Press}, title = {{Benign overfitting in deep neural networks under lazy training}}, volume = {202}, year = {2023}, } @unpublished{14209, abstract = {Diffusion models excel at generating photorealistic images from text-queries. Naturally, many approaches have been proposed to use these generative abilities to augment training datasets for downstream tasks, such as classification. However, diffusion models are themselves trained on large noisily supervised, but nonetheless, annotated datasets. It is an open question whether the generalization capabilities of diffusion models beyond using the additional data of the pre-training process for augmentation lead to improved downstream performance. We perform a systematic evaluation of existing methods to generate images from diffusion models and study new extensions to assess their benefit for data augmentation. While we find that personalizing diffusion models towards the target data outperforms simpler prompting strategies, we also show that using the training data of the diffusion model alone, via a simple nearest neighbor retrieval procedure, leads to even stronger downstream performance. Overall, our study probes the limitations of diffusion models for data augmentation but also highlights its potential in generating new training data to improve performance on simple downstream vision tasks.}, author = {Burg, Max F. and Wenzel, Florian and Zietlow, Dominik and Horn, Max and Makansi, Osama and Locatello, Francesco and Russell, Chris}, booktitle = {arXiv}, title = {{A data augmentation perspective on diffusion models and retrieval}}, doi = {10.48550/arXiv.2304.10253}, year = {2023}, } @inproceedings{14211, abstract = {Causal discovery methods are intrinsically constrained by the set of assumptions needed to ensure structure identifiability. Moreover additional restrictions are often imposed in order to simplify the inference task: this is the case for the Gaussian noise assumption on additive non-linear models, which is common to many causal discovery approaches. In this paper we show the shortcomings of inference under this hypothesis, analyzing the risk of edge inversion under violation of Gaussianity of the noise terms. Then, we propose a novel method for inferring the topological ordering of the variables in the causal graph, from data generated according to an additive non-linear model with a generic noise distribution. This leads to NoGAM (Not only Gaussian Additive noise Models), a causal discovery algorithm with a minimal set of assumptions and state of the art performance, experimentally benchmarked on synthetic data.}, author = {Montagna, Francesco and Noceti, Nicoletta and Rosasco, Lorenzo and Zhang, Kun and Locatello, Francesco}, booktitle = {2nd Conference on Causal Learning and Reasoning}, location = {Tübingen, Germany}, title = {{Causal discovery with score matching on additive models with arbitrary noise}}, year = {2023}, } @inproceedings{14212, abstract = {This paper demonstrates how to discover the whole causal graph from the second derivative of the log-likelihood in observational non-linear additive Gaussian noise models. Leveraging scalable machine learning approaches to approximate the score function ∇logp(X), we extend the work of Rolland et al. (2022) that only recovers the topological order from the score and requires an expensive pruning step removing spurious edges among those admitted by the ordering. Our analysis leads to DAS (acronym for Discovery At Scale), a practical algorithm that reduces the complexity of the pruning by a factor proportional to the graph size. In practice, DAS achieves competitive accuracy with current state-of-the-art while being over an order of magnitude faster. Overall, our approach enables principled and scalable causal discovery, significantly lowering the compute bar.}, author = {Montagna, Francesco and Noceti, Nicoletta and Rosasco, Lorenzo and Zhang, Kun and Locatello, Francesco}, booktitle = {2nd Conference on Causal Learning and Reasoning}, location = {Tübingen, Germany}, title = {{Scalable causal discovery with score matching}}, year = {2023}, } @inproceedings{14214, abstract = {Recent years have seen a surge of interest in learning high-level causal representations from low-level image pairs under interventions. Yet, existing efforts are largely limited to simple synthetic settings that are far away from real-world problems. In this paper, we present Causal Triplet, a causal representation learning benchmark featuring not only visually more complex scenes, but also two crucial desiderata commonly overlooked in previous works: (i) an actionable counterfactual setting, where only certain object-level variables allow for counterfactual observations whereas others do not; (ii) an interventional downstream task with an emphasis on out-of-distribution robustness from the independent causal mechanisms principle. Through extensive experiments, we find that models built with the knowledge of disentangled or object-centric representations significantly outperform their distributed counterparts. However, recent causal representation learning methods still struggle to identify such latent structures, indicating substantial challenges and opportunities for future work.}, author = {Liu, Yuejiang and Alahi, Alexandre and Russell, Chris and Horn, Max and Zietlow, Dominik and Schölkopf, Bernhard and Locatello, Francesco}, booktitle = {2nd Conference on Causal Learning and Reasoning}, location = {Tübingen, Germany}, title = {{Causal triplet: An open challenge for intervention-centric causal representation learning}}, year = {2023}, } @inproceedings{14217, abstract = {Neural networks embed the geometric structure of a data manifold lying in a high-dimensional space into latent representations. Ideally, the distribution of the data points in the latent space should depend only on the task, the data, the loss, and other architecture-specific constraints. However, factors such as the random weights initialization, training hyperparameters, or other sources of randomness in the training phase may induce incoherent latent spaces that hinder any form of reuse. Nevertheless, we empirically observe that, under the same data and modeling choices, the angles between the encodings within distinct latent spaces do not change. In this work, we propose the latent similarity between each sample and a fixed set of anchors as an alternative data representation, demonstrating that it can enforce the desired invariances without any additional training. We show how neural architectures can leverage these relative representations to guarantee, in practice, invariance to latent isometries and rescalings, effectively enabling latent space communication: from zero-shot model stitching to latent space comparison between diverse settings. We extensively validate the generalization capability of our approach on different datasets, spanning various modalities (images, text, graphs), tasks (e.g., classification, reconstruction) and architectures (e.g., CNNs, GCNs, transformers).}, author = {Moschella, Luca and Maiorca, Valentino and Fumero, Marco and Norelli, Antonio and Locatello, Francesco and Rodolà, Emanuele}, booktitle = {The 11th International Conference on Learning Representations}, location = {Kigali, Rwanda}, title = {{Relative representations enable zero-shot latent space communication}}, year = {2023}, } @inproceedings{14222, abstract = {Learning generative object models from unlabelled videos is a long standing problem and required for causal scene modeling. We decompose this problem into three easier subtasks, and provide candidate solutions for each of them. Inspired by the Common Fate Principle of Gestalt Psychology, we first extract (noisy) masks of moving objects via unsupervised motion segmentation. Second, generative models are trained on the masks of the background and the moving objects, respectively. Third, background and foreground models are combined in a conditional "dead leaves" scene model to sample novel scene configurations where occlusions and depth layering arise naturally. To evaluate the individual stages, we introduce the Fishbowl dataset positioned between complex real-world scenes and common object-centric benchmarks of simplistic objects. We show that our approach allows learning generative models that generalize beyond the occlusions present in the input videos, and represent scenes in a modular fashion that allows sampling plausible scenes outside the training distribution by permitting, for instance, object numbers or densities not observed in the training set.}, author = {Tangemann, Matthias and Schneider, Steffen and Kügelgen, Julius von and Locatello, Francesco and Gehler, Peter and Brox, Thomas and Kümmerer, Matthias and Bethge, Matthias and Schölkopf, Bernhard}, booktitle = {2nd Conference on Causal Learning and Reasoning}, location = {Tübingen, Germany}, title = {{Unsupervised object learning via common fate}}, year = {2023}, } @inproceedings{14218, abstract = {Humans naturally decompose their environment into entities at the appropriate level of abstraction to act in the world. Allowing machine learning algorithms to derive this decomposition in an unsupervised way has become an important line of research. However, current methods are restricted to simulated data or require additional information in the form of motion or depth in order to successfully discover objects. In this work, we overcome this limitation by showing that reconstructing features from models trained in a self-supervised manner is a sufficient training signal for object-centric representations to arise in a fully unsupervised way. Our approach, DINOSAUR, significantly out-performs existing image-based object-centric learning models on simulated data and is the first unsupervised object-centric model that scales to real-world datasets such as COCO and PASCAL VOC. DINOSAUR is conceptually simple and shows competitive performance compared to more involved pipelines from the computer vision literature.}, author = {Seitzer, Maximilian and Horn, Max and Zadaianchuk, Andrii and Zietlow, Dominik and Xiao, Tianjun and Carl-Johann Simon-Gabriel, Carl-Johann Simon-Gabriel and He, Tong and Zhang, Zheng and Schölkopf, Bernhard and Brox, Thomas and Locatello, Francesco}, booktitle = {The 11th International Conference on Learning Representations}, location = {Kigali, Rwanda}, title = {{Bridging the gap to real-world object-centric learning}}, year = {2023}, } @inproceedings{14219, abstract = {In this paper, we show that recent advances in self-supervised feature learning enable unsupervised object discovery and semantic segmentation with a performance that matches the state of the field on supervised semantic segmentation 10 years ago. We propose a methodology based on unsupervised saliency masks and self-supervised feature clustering to kickstart object discovery followed by training a semantic segmentation network on pseudo-labels to bootstrap the system on images with multiple objects. We present results on PASCAL VOC that go far beyond the current state of the art (50.0 mIoU), and we report for the first time results on MS COCO for the whole set of 81 classes: our method discovers 34 categories with more than $20\%$ IoU, while obtaining an average IoU of 19.6 for all 81 categories.}, author = {Zadaianchuk, Andrii and Kleindessner, Matthaeus and Zhu, Yi and Locatello, Francesco and Brox, Thomas}, booktitle = {The 11th International Conference on Learning Representations}, location = {Kigali, Rwanda}, title = {{Unsupervised semantic segmentation with self-supervised object-centric representations}}, year = {2023}, } @unpublished{14333, abstract = {As causal ground truth is incredibly rare, causal discovery algorithms are commonly only evaluated on simulated data. This is concerning, given that simulations reflect common preconceptions about generating processes regarding noise distributions, model classes, and more. In this work, we propose a novel method for falsifying the output of a causal discovery algorithm in the absence of ground truth. Our key insight is that while statistical learning seeks stability across subsets of data points, causal learning should seek stability across subsets of variables. Motivated by this insight, our method relies on a notion of compatibility between causal graphs learned on different subsets of variables. We prove that detecting incompatibilities can falsify wrongly inferred causal relations due to violation of assumptions or errors from finite sample effects. Although passing such compatibility tests is only a necessary criterion for good performance, we argue that it provides strong evidence for the causal models whenever compatibility entails strong implications for the joint distribution. We also demonstrate experimentally that detection of incompatibilities can aid in causal model selection.}, author = {Faller, Philipp M. and Vankadara, Leena Chennuru and Mastakouri, Atalanti A. and Locatello, Francesco and Janzing, Dominik}, booktitle = {arXiv}, title = {{Self-compatibility: Evaluating causal discovery without ground truth}}, doi = {10.48550/arXiv.2307.09552}, year = {2023}, } @article{14277, abstract = {Living tissues are characterized by an intrinsically mechanochemical interplay of active physical forces and complex biochemical signaling pathways. Either feature alone can give rise to complex emergent phenomena, for example, mechanically driven glassy dynamics and rigidity transitions, or chemically driven reaction-diffusion instabilities. An important question is how to quantitatively assess the contribution of these different cues to the large-scale dynamics of biological materials. We address this in Madin-Darby canine kidney (MDCK) monolayers, considering both mechanochemical feedback between extracellular signal-regulated kinase (ERK) signaling activity and cellular density as well as a mechanically active tissue rheology via a self-propelled vertex model. We show that the relative strength of active migration forces to mechanochemical couplings controls a transition from a uniform active glass to periodic spatiotemporal waves. We parametrize the model from published experimental data sets on MDCK monolayers and use it to make new predictions on the correlation functions of cellular dynamics and the dynamics of topological defects associated with the oscillatory phase of cells. Interestingly, MDCK monolayers are best described by an intermediary parameter region in which both mechanochemical couplings and noisy active propulsion have a strong influence on the dynamics. Finally, we study how tissue rheology and ERK waves produce feedback on one another and uncover a mechanism via which tissue fluidity can be controlled by mechanochemical waves at both the local and global levels.}, author = {Boocock, Daniel R and Hirashima, Tsuyoshi and Hannezo, Edouard B}, issn = {2835-8279}, journal = {PRX Life}, number = {1}, publisher = {American Physical Society}, title = {{Interplay between mechanochemical patterning and glassy dynamics in cellular monolayers}}, doi = {10.1103/prxlife.1.013001}, volume = {1}, year = {2023}, } @article{14314, abstract = {The execution of cognitive functions requires coordinated circuit activity across different brain areas that involves the associated firing of neuronal assemblies. Here, we tested the circuit mechanism behind assembly interactions between the hippocampus and the medial prefrontal cortex (mPFC) of adult rats by recording neuronal populations during a rule-switching task. We identified functionally coupled CA1-mPFC cells that synchronized their activity beyond that expected from common spatial coding or oscillatory firing. When such cell pairs fired together, the mPFC cell strongly phase locked to CA1 theta oscillations and maintained consistent theta firing phases, independent of the theta timing of their CA1 counterpart. These functionally connected CA1-mPFC cells formed interconnected assemblies. While firing together with their CA1 assembly partners, mPFC cells fired along specific theta sequences. Our results suggest that upregulated theta oscillatory firing of mPFC cells can signal transient interactions with specific CA1 assemblies, thus enabling distributed computations.}, author = {Nardin, Michele and Käfer, Karola and Stella, Federico and Csicsvari, Jozsef L}, issn = {2211-1247}, journal = {Cell Reports}, number = {9}, publisher = {Elsevier}, title = {{Theta oscillations as a substrate for medial prefrontal-hippocampal assembly interactions}}, doi = {10.1016/j.celrep.2023.113015}, volume = {42}, year = {2023}, } @article{14315, abstract = {During apoptosis, caspases degrade 8 out of ~30 nucleoporins to irreversibly demolish the nuclear pore complex. However, for poorly understood reasons, caspases are also activated during cell differentiation. Here, we show that sublethal activation of caspases during myogenesis results in the transient proteolysis of four peripheral Nups and one transmembrane Nup. ‘Trimmed’ NPCs become nuclear export-defective, and we identified in an unbiased manner several classes of cytoplasmic, plasma membrane, and mitochondrial proteins that rapidly accumulate in the nucleus. NPC trimming by non-apoptotic caspases was also observed in neurogenesis and endoplasmic reticulum stress. Our results suggest that caspases can reversibly modulate nuclear transport activity, which allows them to function as agents of cell differentiation and adaptation at sublethal levels.}, author = {Cho, Ukrae H. and Hetzer, Martin W}, issn = {2050-084X}, journal = {eLife}, publisher = {eLife Sciences Publications}, title = {{Caspase-mediated nuclear pore complex trimming in cell differentiation and endoplasmic reticulum stress}}, doi = {10.7554/eLife.89066}, volume = {12}, year = {2023}, } @article{14319, abstract = {We study multigraphs whose edge-sets are the union of three perfect matchings, M1, M2, and M3. Given such a graph G and any a1; a2; a3 2 N with a1 +a2 +a3 6 n - 2, we show there exists a matching M of G with jM \ Mij = ai for each i 2 f1; 2; 3g. The bound n - 2 in the theorem is best possible in general. We conjecture however that if G is bipartite, the same result holds with n - 2 replaced by n - 1. We give a construction that shows such a result would be tight. We also make a conjecture generalising the Ryser-Brualdi-Stein conjecture with colour multiplicities.}, author = {Anastos, Michael and Fabian, David and Müyesser, Alp and Szabó, Tibor}, issn = {1077-8926}, journal = {Electronic Journal of Combinatorics}, number = {3}, publisher = {Electronic Journal of Combinatorics}, title = {{Splitting matchings and the Ryser-Brualdi-Stein conjecture for multisets}}, doi = {10.37236/11714}, volume = {30}, year = {2023}, } @inproceedings{14318, abstract = {Probabilistic recurrence relations (PRRs) are a standard formalism for describing the runtime of a randomized algorithm. Given a PRR and a time limit κ, we consider the tail probability Pr[T≥κ], i.e., the probability that the randomized runtime T of the PRR exceeds κ. Our focus is the formal analysis of tail bounds that aims at finding a tight asymptotic upper bound u≥Pr[T≥κ]. To address this problem, the classical and most well-known approach is the cookbook method by Karp (JACM 1994), while other approaches are mostly limited to deriving tail bounds of specific PRRs via involved custom analysis. In this work, we propose a novel approach for deriving the common exponentially-decreasing tail bounds for PRRs whose preprocessing time and random passed sizes observe discrete or (piecewise) uniform distribution and whose recursive call is either a single procedure call or a divide-and-conquer. We first establish a theoretical approach via Markov’s inequality, and then instantiate the theoretical approach with a template-based algorithmic approach via a refined treatment of exponentiation. Experimental evaluation shows that our algorithmic approach is capable of deriving tail bounds that are (i) asymptotically tighter than Karp’s method, (ii) match the best-known manually-derived asymptotic tail bound for QuickSelect, and (iii) is only slightly worse (with a loglogn factor) than the manually-proven optimal asymptotic tail bound for QuickSort. Moreover, our algorithmic approach handles all examples (including realistic PRRs such as QuickSort, QuickSelect, DiameterComputation, etc.) in less than 0.1 s, showing that our approach is efficient in practice.}, author = {Sun, Yican and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar}, booktitle = {Computer Aided Verification}, isbn = {9783031377082}, issn = {1611-3349}, location = {Paris, France}, pages = {16--39}, publisher = {Springer Nature}, title = {{Automated tail bound analysis for probabilistic recurrence relations}}, doi = {10.1007/978-3-031-37709-9_2}, volume = {13966}, year = {2023}, } @inproceedings{14317, abstract = {Markov decision processes can be viewed as transformers of probability distributions. While this view is useful from a practical standpoint to reason about trajectories of distributions, basic reachability and safety problems are known to be computationally intractable (i.e., Skolem-hard) to solve in such models. Further, we show that even for simple examples of MDPs, strategies for safety objectives over distributions can require infinite memory and randomization. In light of this, we present a novel overapproximation approach to synthesize strategies in an MDP, such that a safety objective over the distributions is met. More precisely, we develop a new framework for template-based synthesis of certificates as affine distributional and inductive invariants for safety objectives in MDPs. We provide two algorithms within this framework. One can only synthesize memoryless strategies, but has relative completeness guarantees, while the other can synthesize general strategies. The runtime complexity of both algorithms is in PSPACE. We implement these algorithms and show that they can solve several non-trivial examples.}, author = {Akshay, S. and Chatterjee, Krishnendu and Meggendorfer, Tobias and Zikelic, Dorde}, booktitle = {International Conference on Computer Aided Verification}, isbn = {9783031377082}, issn = {1611-3349}, location = {Paris, France}, pages = {86--112}, publisher = {Springer Nature}, title = {{MDPs as distribution transformers: Affine invariant synthesis for safety objectives}}, doi = {10.1007/978-3-031-37709-9_5}, volume = {13966}, year = {2023}, } @article{14316, abstract = {Clathrin-mediated vesicle trafficking plays central roles in post-Golgi transport. In yeast (Saccharomyces cerevisiae), the AP-1 complex and GGA adaptors are predicted to generate distinct transport vesicles at the trans-Golgi network (TGN), and the epsin-related proteins Ent3p and Ent5p (collectively Ent3p/5p) act as accessories for these adaptors. Recently, we showed that vesicle transport from the TGN is crucial for yeast Rab5 (Vps21p)-mediated endosome formation, and that Ent3p/5p are crucial for this process, whereas AP-1 and GGA adaptors are dispensable. However, these observations were incompatible with previous studies showing that these adaptors are required for Ent3p/5p recruitment to the TGN, and thus the overall mechanism responsible for regulation of Vps21p activity remains ambiguous. Here, we investigated the functional relationships between clathrin adaptors in post-Golgi-mediated Vps21p activation. We show that AP-1 disruption in the ent3Δ5Δ mutant impaired transport of the Vps21p guanine nucleotide exchange factor Vps9p transport to the Vps21p compartment and severely reduced Vps21p activity. Additionally, GGA adaptors, the phosphatidylinositol-4-kinase Pik1p and Rab11 GTPases Ypt31p and Ypt32p were found to have partially overlapping functions for recruitment of AP-1 and Ent3p/5p to the TGN. These findings suggest a distinct role of clathrin adaptors for Vps21p activation in the TGN–endosome trafficking pathway.}, author = {Nagano, Makoto and Aoshima, Kaito and Shimamura, Hiroki and Siekhaus, Daria E and Toshima, Junko Y. and Toshima, Jiro}, issn = {1477-9137}, journal = {Journal of Cell Science}, number = {17}, publisher = {The Company of Biologists}, title = {{Distinct role of TGN-resident clathrin adaptors for Vps21p activation in the TGN-endosome trafficking pathway}}, doi = {10.1242/jcs.261448}, volume = {136}, year = {2023}, } @article{14320, abstract = {The development of two-dimensional materials has resulted in a diverse range of novel, high-quality compounds with increasing complexity. A key requirement for a comprehensive quantitative theory is the accurate determination of these materials' band structure parameters. However, this task is challenging due to the intricate band structures and the indirect nature of experimental probes. In this work, we introduce a general framework to derive band structure parameters from experimental data using deep neural networks. We applied our method to the penetration field capacitance measurement of trilayer graphene, an effective probe of its density of states. First, we demonstrate that a trained deep network gives accurate predictions for the penetration field capacitance as a function of tight-binding parameters. Next, we use the fast and accurate predictions from the trained network to automatically determine tight-binding parameters directly from experimental data, with extracted parameters being in a good agreement with values in the literature. We conclude by discussing potential applications of our method to other materials and experimental techniques beyond penetration field capacitance.}, author = {Henderson, Paul M and Ghazaryan, Areg and Zibrov, Alexander A. and Young, Andrea F. and Serbyn, Maksym}, issn = {2469-9969}, journal = {Physical Review B}, number = {12}, publisher = {American Physical Society}, title = {{Deep learning extraction of band structure parameters from density of states: A case study on trilayer graphene}}, doi = {10.1103/physrevb.108.125411}, volume = {108}, year = {2023}, } @phdthesis{12732, abstract = {Nonergodic systems, whose out-of-equilibrium dynamics fail to thermalize, provide a fascinating research direction both for fundamental reasons and for application in state of the art quantum devices. Going beyond the description of statistical mechanics, ergodicity breaking yields a new paradigm in quantum many-body physics, introducing novel phases of matter with no counterpart at equilibrium. In this Thesis, we address different open questions in the field, focusing on disorder-induced many-body localization (MBL) and on weak ergodicity breaking in kinetically constrained models. In particular, we contribute to the debate about transport in kinetically constrained models, studying the effect of $U(1)$ conservation and inversion-symmetry breaking in a family of quantum East models. Using tensor network techniques, we analyze the dynamics of large MBL systems beyond the limit of exact numerical methods. In this setting, we approach the debated topic of the coexistence of localized and thermal eigenstates separated by energy thresholds known as many-body mobility edges. Inspired by recent experiments, our work further investigates the localization of a small bath induced by the coupling to a large localized chain, the so-called MBL proximity effect. In the first Chapter, we introduce a family of particle-conserving kinetically constrained models, inspired by the quantum East model. The system we study features strong inversion-symmetry breaking, due to the nature of the correlated hopping. We show that these models host so-called quantum Hilbert space fragmentation, consisting of disconnected subsectors in an entangled basis, and further provide an analytical description of this phenomenon. We further probe its effect on dynamics of simple product states, showing revivals in fidelity and local observalbes. The study of dynamics within the largest subsector reveals an anomalous transient superdiffusive behavior crossing over to slow logarithmic dynamics at later times. This work suggests that particle conserving constrained models with inversion-symmetry breaking realize new universality classes of dynamics and invite their further theoretical and experimental studies. Next, we use kinetic constraints and disorder to design a model with many-body mobility edges in particle density. This feature allows to study the dynamics of localized and thermal states in large systems beyond the limitations of previous studies. The time-evolution shows typical signatures of localization at small densities, replaced by thermal behavior at larger densities. Our results provide evidence in favor of the stability of many-body mobility edges, which was recently challenged by a theoretical argument. To support our findings, we probe the mechanism proposed as a cause of delocalization in many-body localized systems with mobility edges suggesting its ineffectiveness in the model studied. In the last Chapter of this Thesis, we address the topic of many-body localization proximity effect. We study a model inspired by recent experiments, featuring Anderson localized coupled to a small bath of free hard-core bosons. The interaction among the two particle species results in non-trivial dynamics, which we probe using tensor network techniques. Our simulations show convincing evidence of many-body localization proximity effect when the bath is composed by a single free particle and interactions are strong. We furthter observe an anomalous entanglement dynamics, which we explain through a phenomenological theory. Finally, we extract highly excited eigenstates of large systems, providing supplementary evidence in favor of our findings.}, author = {Brighi, Pietro}, issn = {2663-337X}, pages = {158}, publisher = {Institute of Science and Technology Austria}, title = {{Ergodicity breaking in disordered and kinetically constrained quantum many-body systems}}, doi = {10.15479/at:ista:12732}, year = {2023}, } @article{14334, abstract = {Quantum kinetically constrained models have recently attracted significant attention due to their anomalous dynamics and thermalization. In this work, we introduce a hitherto unexplored family of kinetically constrained models featuring conserved particle number and strong inversion-symmetry breaking due to facilitated hopping. We demonstrate that these models provide a generic example of so-called quantum Hilbert space fragmentation, that is manifested in disconnected sectors in the Hilbert space that are not apparent in the computational basis. Quantum Hilbert space fragmentation leads to an exponential in system size number of eigenstates with exactly zero entanglement entropy across several bipartite cuts. These eigenstates can be probed dynamically using quenches from simple initial product states. In addition, we study the particle spreading under unitary dynamics launched from the domain wall state, and find faster than diffusive dynamics at high particle densities, that crosses over into logarithmically slow relaxation at smaller densities. Using a classically simulable cellular automaton, we reproduce the logarithmic dynamics observed in the quantum case. Our work suggests that particle conserving constrained models with inversion symmetry breaking realize so far unexplored dynamical behavior and invite their further theoretical and experimental studies.}, author = {Brighi, Pietro and Ljubotina, Marko and Serbyn, Maksym}, issn = {2542-4653}, journal = {SciPost Physics}, keywords = {General Physics and Astronomy}, number = {3}, publisher = {SciPost Foundation}, title = {{Hilbert space fragmentation and slow dynamics in particle-conserving quantum East models}}, doi = {10.21468/scipostphys.15.3.093}, volume = {15}, year = {2023}, } @article{14321, abstract = {We demonstrate the possibility of a coupling between the magnetization direction of a ferromagnet and the tilting angle of adsorbed achiral molecules. To illustrate the mechanism of the coupling, we analyze a minimal Stoner model that includes Rashba spin–orbit coupling due to the electric field on the surface of the ferromagnet. The proposed mechanism allows us to study magnetic anisotropy of the system with an extended Stoner–Wohlfarth model and argue that adsorbed achiral molecules can change magnetocrystalline anisotropy of the substrate. Our research aims to motivate further experimental studies of the current-free chirality induced spin selectivity effect involving both enantiomers.}, author = {Al Hyder, Ragheed and Cappellaro, Alberto and Lemeshko, Mikhail and Volosniev, Artem}, issn = {1089-7690}, journal = {The Journal of Chemical Physics}, keywords = {Physical and Theoretical Chemistry, General Physics and Astronomy}, number = {10}, publisher = {AIP Publishing}, title = {{Achiral dipoles on a ferromagnet can affect its magnetization direction}}, doi = {10.1063/5.0165806}, volume = {159}, year = {2023}, } @article{14342, abstract = {We propose a simple method to measure nonlinear Kerr refractive index in mid-infrared frequency range that avoids using sophisticated infrared detectors. Our approach is based on using a near-infrared probe beam which interacts with a mid-IR beam via wavelength-non-degenerate cross-phase modulation (XPM). By carefully measuring XPM-induced spectral modifications in the probe beam and comparing the experimental data with simulation results, we extract the value for the non-degenerate Kerr index. Finally, in order to obtain the value of degenerate mid-IR Kerr index, we use the well-established two-band formalism of Sheik-Bahae et al., which is shown to become particularly simple in the limit of low frequencies. The proposed technique is complementary to the conventional techniques, such as z-scan, and has the advantage of not requiring any mid-infrared detectors.}, author = {Lorenc, Dusan and Alpichshev, Zhanybek}, issn = {0003-6951}, journal = {Applied Physics Letters}, number = {9}, publisher = {AIP Publishing}, title = {{Mid-infrared Kerr index evaluation via cross-phase modulation with a near-infrared probe beam}}, doi = {10.1063/5.0161713}, volume = {123}, year = {2023}, } @article{14341, abstract = {Flows through pipes and channels are, in practice, almost always turbulent, and the multiscale eddying motion is responsible for a major part of the encountered friction losses and pumping costs1. Conversely, for pulsatile flows, in particular for aortic blood flow, turbulence levels remain low despite relatively large peak velocities. For aortic blood flow, high turbulence levels are intolerable as they would damage the shear-sensitive endothelial cell layer2,3,4,5. Here we show that turbulence in ordinary pipe flow is diminished if the flow is driven in a pulsatile mode that incorporates all the key features of the cardiac waveform. At Reynolds numbers comparable to those of aortic blood flow, turbulence is largely inhibited, whereas at much higher speeds, the turbulent drag is reduced by more than 25%. This specific operation mode is more efficient when compared with steady driving, which is the present situation for virtually all fluid transport processes ranging from heating circuits to water, gas and oil pipelines.}, author = {Scarselli, Davide and Lopez Alonso, Jose M and Varshney, Atul and Hof, Björn}, issn = {1476-4687}, journal = {Nature}, number = {7977}, pages = {71--74}, publisher = {Springer Nature}, title = {{Turbulence suppression by cardiac-cycle-inspired driving of pipe flow}}, doi = {10.1038/s41586-023-06399-5}, volume = {621}, year = {2023}, } @inproceedings{13120, abstract = {We formalized general (i.e., type-0) grammars using the Lean 3 proof assistant. We defined basic notions of rewrite rules and of words derived by a grammar, and used grammars to show closure of the class of type-0 languages under four operations: union, reversal, concatenation, and the Kleene star. The literature mostly focuses on Turing machine arguments, which are possibly more difficult to formalize. For the Kleene star, we could not follow the literature and came up with our own grammar-based construction.}, author = {Dvorak, Martin and Blanchette, Jasmin}, booktitle = {14th International Conference on Interactive Theorem Proving}, isbn = {9783959772846}, issn = {1868-8969}, location = {Bialystok, Poland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Closure properties of general grammars - formally verified}}, doi = {10.4230/LIPIcs.ITP.2023.15}, volume = {268}, year = {2023}, } @article{13969, abstract = {Bundling crossings is a strategy which can enhance the readability of graph drawings. In this paper we consider good drawings, i.e., we require that any two edges have at most one common point which can be a common vertex or a crossing. Our main result is that there is a polynomial-time algorithm to compute an 8-approximation of the bundled crossing number of a good drawing with no toothed hole. In general the number of toothed holes has to be added to the 8-approximation. In the special case of circular drawings the approximation factor is 8, this improves upon the 10-approximation of Fink et al. [14]. Our approach also works with the same approximation factor for families of pseudosegments, i.e., curves intersecting at most once. We also show how to compute a 9/2-approximation when the intersection graph of the pseudosegments is bipartite and has no toothed hole.}, author = {Arroyo Guevara, Alan M and Felsner, Stefan}, issn = {1526-1719}, journal = {Journal of Graph Algorithms and Applications}, number = {6}, pages = {433--457}, publisher = {Brown University}, title = {{Approximating the bundled crossing number}}, doi = {10.7155/jgaa.00629}, volume = {27}, year = {2023}, } @inproceedings{14344, abstract = {We study the Hamilton cycle problem with input a random graph G ~ G(n,p) in two different settings. In the first one, G is given to us in the form of randomly ordered adjacency lists while in the second one, we are given the adjacency matrix of G. In each of the two settings we derive a deterministic algorithm that w.h.p. either finds a Hamilton cycle or returns a certificate that such a cycle does not exist for p = p(n) ≥ 0. The running times of our algorithms are O(n) and respectively, each being best possible in its own setting.}, author = {Anastos, Michael}, booktitle = {Proceedings of the Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {9781611977554}, location = {Florence, Italy}, pages = {2286--2323}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Fast algorithms for solving the Hamilton cycle problem with high probability}}, doi = {10.1137/1.9781611977554.ch88}, volume = {2023}, year = {2023}, } @article{12710, abstract = {Surface curvature both emerges from, and influences the behavior of, living objects at length scales ranging from cell membranes to single cells to tissues and organs. The relevance of surface curvature in biology is supported by numerous experimental and theoretical investigations in recent years. In this review, first, a brief introduction to the key ideas of surface curvature in the context of biological systems is given and the challenges that arise when measuring surface curvature are discussed. Giving an overview of the emergence of curvature in biological systems, its significance at different length scales becomes apparent. On the other hand, summarizing current findings also shows that both single cells and entire cell sheets, tissues or organisms respond to curvature by modulating their shape and their migration behavior. Finally, the interplay between the distribution of morphogens or micro-organisms and the emergence of curvature across length scales is addressed with examples demonstrating these key mechanistic principles of morphogenesis. Overall, this review highlights that curved interfaces are not merely a passive by-product of the chemical, biological, and mechanical processes but that curvature acts also as a signal that co-determines these processes.}, author = {Schamberger, Barbara and Ziege, Ricardo and Anselme, Karine and Ben Amar, Martine and Bykowski, Michał and Castro, André P.G. and Cipitria, Amaia and Coles, Rhoslyn A. and Dimova, Rumiana and Eder, Michaela and Ehrig, Sebastian and Escudero, Luis M. and Evans, Myfanwy E. and Fernandes, Paulo R. and Fratzl, Peter and Geris, Liesbet and Gierlinger, Notburga and Hannezo, Edouard B and Iglič, Aleš and Kirkensgaard, Jacob J.K. and Kollmannsberger, Philip and Kowalewska, Łucja and Kurniawan, Nicholas A. and Papantoniou, Ioannis and Pieuchot, Laurent and Pires, Tiago H.V. and Renner, Lars D. and Sageman-Furnas, Andrew O. and Schröder-Turk, Gerd E. and Sengupta, Anupam and Sharma, Vikas R. and Tagua, Antonio and Tomba, Caterina and Trepat, Xavier and Waters, Sarah L. and Yeo, Edwina F. and Roschger, Andreas and Bidan, Cécile M. and Dunlop, John W.C.}, issn = {1521-4095}, journal = {Advanced Materials}, number = {13}, publisher = {Wiley}, title = {{Curvature in biological systems: Its quantification, emergence, and implications across the scales}}, doi = {10.1002/adma.202206110}, volume = {35}, year = {2023}, } @article{13340, abstract = {Photoisomerization of azobenzenes from their stable E isomer to the metastable Z state is the basis of numerous applications of these molecules. However, this reaction typically requires ultraviolet light, which limits applicability. In this study, we introduce disequilibration by sensitization under confinement (DESC), a supramolecular approach to induce the E-to-Z isomerization by using light of a desired color, including red. DESC relies on a combination of a macrocyclic host and a photosensitizer, which act together to selectively bind and sensitize E-azobenzenes for isomerization. The Z isomer lacks strong affinity for and is expelled from the host, which can then convert additional E-azobenzenes to the Z state. In this way, the host–photosensitizer complex converts photon energy into chemical energy in the form of out-of-equilibrium photostationary states, including ones that cannot be accessed through direct photoexcitation.}, author = {Gemen, Julius and Church, Jonathan R. and Ruoko, Tero-Petri and Durandin, Nikita and Białek, Michał J. and Weissenfels, Maren and Feller, Moran and Kazes, Miri and Borin, Veniamin A. and Odaybat, Magdalena and Kalepu, Rishir and Diskin-Posner, Yael and Oron, Dan and Fuchter, Matthew J. and Priimagi, Arri and Schapiro, Igor and Klajn, Rafal}, issn = {1095-9203}, journal = {Science}, number = {6664}, pages = {1357--1363}, publisher = {American Association for the Advancement of Science}, title = {{Disequilibrating azoarenes by visible-light sensitization under confinement}}, doi = {10.1126/science.adh9059}, volume = {381}, year = {2023}, } @article{12705, abstract = {The elasticity of disordered and polydisperse polymer networks is a fundamental problem of soft matter physics that is still open. Here, we self-assemble polymer networks via simulations of a mixture of bivalent and tri- or tetravalent patchy particles, which result in an exponential strand length distribution analogous to that of experimental randomly cross-linked systems. After assembly, the network connectivity and topology are frozen and the resulting system is characterized. We find that the fractal structure of the network depends on the number density at which the assembly has been carried out, but that systems with the same mean valence and same assembly density have the same structural properties. Moreover, we compute the long-time limit of the mean-squared displacement, also known as the (squared) localization length, of the cross-links and of the middle monomers of the strands, showing that the dynamics of long strands is well described by the tube model. Finally, we find a relation connecting these two localization lengths at high density and connect the cross-link localization length to the shear modulus of the system.}, author = {Sorichetti, Valerio and Ninarello, Andrea and Ruiz-Franco, José and Hugouvieux, Virginie and Zaccarelli, Emanuela and Micheletti, Cristian and Kob, Walter and Rovigatti, Lorenzo}, issn = {1089-7690}, journal = {Journal of Chemical Physics}, number = {7}, publisher = {American Institute of Physics}, title = {{Structure and elasticity of model disordered, polydisperse, and defect-free polymer networks}}, doi = {10.1063/5.0134271}, volume = {158}, year = {2023}, } @article{12738, abstract = {We study turn-based stochastic zero-sum games with lexicographic preferences over objectives. Stochastic games are standard models in control, verification, and synthesis of stochastic reactive systems that exhibit both randomness as well as controllable and adversarial non-determinism. Lexicographic order allows one to consider multiple objectives with a strict preference order. To the best of our knowledge, stochastic games with lexicographic objectives have not been studied before. For a mixture of reachability and safety objectives, we show that deterministic lexicographically optimal strategies exist and memory is only required to remember the already satisfied and violated objectives. For a constant number of objectives, we show that the relevant decision problem is in NP∩coNP, matching the current known bound for single objectives; and in general the decision problem is PSPACE-hard and can be solved in NEXPTIME∩coNEXPTIME. We present an algorithm that computes the lexicographically optimal strategies via a reduction to the computation of optimal strategies in a sequence of single-objectives games. For omega-regular objectives, we restrict our analysis to one-player games, also known as Markov decision processes. We show that lexicographically optimal strategies exist and need either randomization or finite memory. We present an algorithm that solves the relevant decision problem in polynomial time. We have implemented our algorithms and report experimental results on various case studies.}, author = {Chatterjee, Krishnendu and Katoen, Joost P and Mohr, Stefanie and Weininger, Maximilian and Winkler, Tobias}, issn = {1572-8102}, journal = {Formal Methods in System Design}, publisher = {Springer Nature}, title = {{Stochastic games with lexicographic objectives}}, doi = {10.1007/s10703-023-00411-4}, year = {2023}, }