@article{7900, abstract = {Hartree–Fock theory has been justified as a mean-field approximation for fermionic systems. However, it suffers from some defects in predicting physical properties, making necessary a theory of quantum correlations. Recently, bosonization of many-body correlations has been rigorously justified as an upper bound on the correlation energy at high density with weak interactions. We review the bosonic approximation, deriving an effective Hamiltonian. We then show that for systems with Coulomb interaction this effective theory predicts collective excitations (plasmons) in accordance with the random phase approximation of Bohm and Pines, and with experimental observation.}, author = {Benedikter, Niels P}, issn = {1793-6659}, journal = {Reviews in Mathematical Physics}, number = {1}, publisher = {World Scientific}, title = {{Bosonic collective excitations in Fermi gases}}, doi = {10.1142/s0129055x20600090}, volume = {33}, year = {2021}, } @article{10852, abstract = { We review old and new results on the Fröhlich polaron model. The discussion includes the validity of the (classical) Pekar approximation in the strong coupling limit, quantum corrections to this limit, as well as the divergence of the effective polaron mass.}, author = {Seiringer, Robert}, issn = {1793-6659}, journal = {Reviews in Mathematical Physics}, keywords = {Mathematical Physics, Statistical and Nonlinear Physics}, number = {01}, publisher = {World Scientific Publishing}, title = {{The polaron at strong coupling}}, doi = {10.1142/s0129055x20600120}, volume = {33}, year = {2021}, } @phdthesis{9056, abstract = {In this thesis we study persistence of multi-covers of Euclidean balls and the geometric structures underlying their computation, in particular Delaunay mosaics and Voronoi tessellations. The k-fold cover for some discrete input point set consists of the space where at least k balls of radius r around the input points overlap. Persistence is a notion that captures, in some sense, the topology of the shape underlying the input. While persistence is usually computed for the union of balls, the k-fold cover is of interest as it captures local density, and thus might approximate the shape of the input better if the input data is noisy. To compute persistence of these k-fold covers, we need a discretization that is provided by higher-order Delaunay mosaics. We present and implement a simple and efficient algorithm for the computation of higher-order Delaunay mosaics, and use it to give experimental results for their combinatorial properties. The algorithm makes use of a new geometric structure, the rhomboid tiling. It contains the higher-order Delaunay mosaics as slices, and by introducing a filtration function on the tiling, we also obtain higher-order α-shapes as slices. These allow us to compute persistence of the multi-covers for varying radius r; the computation for varying k is less straight-foward and involves the rhomboid tiling directly. We apply our algorithms to experimental sphere packings to shed light on their structural properties. Finally, inspired by periodic structures in packings and materials, we propose and implement an algorithm for periodic Delaunay triangulations to be integrated into the Computational Geometry Algorithms Library (CGAL), and discuss the implications on persistence for periodic data sets.}, author = {Osang, Georg F}, issn = {2663-337X}, pages = {134}, publisher = {Institute of Science and Technology Austria}, title = {{Multi-cover persistence and Delaunay mosaics}}, doi = {10.15479/AT:ISTA:9056}, year = {2021}, } @phdthesis{9022, abstract = {In the first part of the thesis we consider Hermitian random matrices. Firstly, we consider sample covariance matrices XX∗ with X having independent identically distributed (i.i.d.) centred entries. We prove a Central Limit Theorem for differences of linear statistics of XX∗ and its minor after removing the first column of X. Secondly, we consider Wigner-type matrices and prove that the eigenvalue statistics near cusp singularities of the limiting density of states are universal and that they form a Pearcey process. Since the limiting eigenvalue distribution admits only square root (edge) and cubic root (cusp) singularities, this concludes the third and last remaining case of the Wigner-Dyson-Mehta universality conjecture. The main technical ingredients are an optimal local law at the cusp, and the proof of the fast relaxation to equilibrium of the Dyson Brownian motion in the cusp regime. In the second part we consider non-Hermitian matrices X with centred i.i.d. entries. We normalise the entries of X to have variance N −1. It is well known that the empirical eigenvalue density converges to the uniform distribution on the unit disk (circular law). In the first project, we prove universality of the local eigenvalue statistics close to the edge of the spectrum. This is the non-Hermitian analogue of the TracyWidom universality at the Hermitian edge. Technically we analyse the evolution of the spectral distribution of X along the Ornstein-Uhlenbeck flow for very long time (up to t = +∞). In the second project, we consider linear statistics of eigenvalues for macroscopic test functions f in the Sobolev space H2+ϵ and prove their convergence to the projection of the Gaussian Free Field on the unit disk. We prove this result for non-Hermitian matrices with real or complex entries. The main technical ingredients are: (i) local law for products of two resolvents at different spectral parameters, (ii) analysis of correlated Dyson Brownian motions. In the third and final part we discuss the mathematically rigorous application of supersymmetric techniques (SUSY ) to give a lower tail estimate of the lowest singular value of X − z, with z ∈ C. More precisely, we use superbosonisation formula to give an integral representation of the resolvent of (X − z)(X − z)∗ which reduces to two and three contour integrals in the complex and real case, respectively. The rigorous analysis of these integrals is quite challenging since simple saddle point analysis cannot be applied (the main contribution comes from a non-trivial manifold). Our result improves classical smoothing inequalities in the regime |z| ≈ 1; this result is essential to prove edge universality for i.i.d. non-Hermitian matrices.}, author = {Cipolloni, Giorgio}, issn = {2663-337X}, pages = {380}, publisher = {Institute of Science and Technology Austria}, title = {{Fluctuations in the spectrum of random matrices}}, doi = {10.15479/AT:ISTA:9022}, year = {2021}, } @inproceedings{9416, abstract = {We study the inductive bias of two-layer ReLU networks trained by gradient flow. We identify a class of easy-to-learn (`orthogonally separable') datasets, and characterise the solution that ReLU networks trained on such datasets converge to. Irrespective of network width, the solution turns out to be a combination of two max-margin classifiers: one corresponding to the positive data subset and one corresponding to the negative data subset. The proof is based on the recently introduced concept of extremal sectors, for which we prove a number of properties in the context of orthogonal separability. In particular, we prove stationarity of activation patterns from some time onwards, which enables a reduction of the ReLU network to an ensemble of linear subnetworks.}, author = {Bui Thi Mai, Phuong and Lampert, Christoph}, booktitle = {9th International Conference on Learning Representations}, location = {Virtual}, title = {{The inductive bias of ReLU networks on orthogonally separable data}}, year = {2021}, } @article{9225, abstract = {The Landau–Pekar equations describe the dynamics of a strongly coupled polaron. Here, we provide a class of initial data for which the associated effective Hamiltonian has a uniform spectral gap for all times. For such initial data, this allows us to extend the results on the adiabatic theorem for the Landau–Pekar equations and their derivation from the Fröhlich model obtained in previous works to larger times.}, author = {Feliciangeli, Dario and Rademacher, Simone Anna Elvira and Seiringer, Robert}, issn = {15730530}, journal = {Letters in Mathematical Physics}, publisher = {Springer Nature}, title = {{Persistence of the spectral gap for the Landau–Pekar equations}}, doi = {10.1007/s11005-020-01350-5}, volume = {111}, year = {2021}, } @unpublished{9787, abstract = {We investigate the Fröhlich polaron model on a three-dimensional torus, and give a proof of the second-order quantum corrections to its ground-state energy in the strong-coupling limit. Compared to previous work in the confined case, the translational symmetry (and its breaking in the Pekar approximation) makes the analysis substantially more challenging.}, author = {Feliciangeli, Dario and Seiringer, Robert}, booktitle = {arXiv}, title = {{The strongly coupled polaron on the torus: Quantum corrections to the Pekar asymptotics}}, year = {2021}, } @inproceedings{9987, abstract = {Stateless model checking (SMC) is one of the standard approaches to the verification of concurrent programs. As scheduling non-determinism creates exponentially large spaces of thread interleavings, SMC attempts to partition this space into equivalence classes and explore only a few representatives from each class. The efficiency of this approach depends on two factors: (a) the coarseness of the partitioning, and (b) the time to generate representatives in each class. For this reason, the search for coarse partitionings that are efficiently explorable is an active research challenge. In this work we present RVF-SMC , a new SMC algorithm that uses a novel reads-value-from (RVF) partitioning. Intuitively, two interleavings are deemed equivalent if they agree on the value obtained in each read event, and read events induce consistent causal orderings between them. The RVF partitioning is provably coarser than recent approaches based on Mazurkiewicz and “reads-from” partitionings. Our experimental evaluation reveals that RVF is quite often a very effective equivalence, as the underlying partitioning is exponentially coarser than other approaches. Moreover, RVF-SMC generates representatives very efficiently, as the reduction in the partitioning is often met with significant speed-ups in the model checking task.}, author = {Agarwal, Pratyush and Chatterjee, Krishnendu and Pathak, Shreya and Pavlogiannis, Andreas and Toman, Viktor}, booktitle = {33rd International Conference on Computer-Aided Verification }, isbn = {978-3-030-81684-1}, issn = {1611-3349}, location = {Virtual}, pages = {341--366}, publisher = {Springer Nature}, title = {{Stateless model checking under a reads-value-from equivalence}}, doi = {10.1007/978-3-030-81685-8_16}, volume = {12759 }, year = {2021}, } @phdthesis{10007, abstract = {The present thesis is concerned with the derivation of weak-strong uniqueness principles for curvature driven interface evolution problems not satisfying a comparison principle. The specific examples being treated are two-phase Navier-Stokes flow with surface tension, modeling the evolution of two incompressible, viscous and immiscible fluids separated by a sharp interface, and multiphase mean curvature flow, which serves as an idealized model for the motion of grain boundaries in an annealing polycrystalline material. Our main results - obtained in joint works with Julian Fischer, Tim Laux and Theresa M. Simon - state that prior to the formation of geometric singularities due to topology changes, the weak solution concept of Abels (Interfaces Free Bound. 9, 2007) to two-phase Navier-Stokes flow with surface tension and the weak solution concept of Laux and Otto (Calc. Var. Partial Differential Equations 55, 2016) to multiphase mean curvature flow (for networks in R^2 or double bubbles in R^3) represents the unique solution to these interface evolution problems within the class of classical solutions, respectively. To the best of the author's knowledge, for interface evolution problems not admitting a geometric comparison principle the derivation of a weak-strong uniqueness principle represented an open problem, so that the works contained in the present thesis constitute the first positive results in this direction. The key ingredient of our approach consists of the introduction of a novel concept of relative entropies for a class of curvature driven interface evolution problems, for which the associated energy contains an interfacial contribution being proportional to the surface area of the evolving (network of) interface(s). The interfacial part of the relative entropy gives sufficient control on the interface error between a weak and a classical solution, and its time evolution can be computed, at least in principle, for any energy dissipating weak solution concept. A resulting stability estimate for the relative entropy essentially entails the above mentioned weak-strong uniqueness principles. The present thesis contains a detailed introduction to our relative entropy approach, which in particular highlights potential applications to other problems in curvature driven interface evolution not treated in this thesis.}, author = {Hensel, Sebastian}, issn = {2663-337X}, pages = {300}, publisher = {Institute of Science and Technology Austria}, title = {{Curvature driven interface evolution: Uniqueness properties of weak solution concepts}}, doi = {10.15479/at:ista:10007}, year = {2021}, } @article{10191, abstract = {In this work we solve the algorithmic problem of consistency verification for the TSO and PSO memory models given a reads-from map, denoted VTSO-rf and VPSO-rf, respectively. For an execution of n events over k threads and d variables, we establish novel bounds that scale as nk+1 for TSO and as nk+1· min(nk2, 2k· d) for PSO. Moreover, based on our solution to these problems, we develop an SMC algorithm under TSO and PSO that uses the RF equivalence. The algorithm is exploration-optimal, in the sense that it is guaranteed to explore each class of the RF partitioning exactly once, and spends polynomial time per class when k is bounded. Finally, we implement all our algorithms in the SMC tool Nidhugg, and perform a large number of experiments over benchmarks from existing literature. Our experimental results show that our algorithms for VTSO-rf and VPSO-rf provide significant scalability improvements over standard alternatives. Moreover, when used for SMC, the RF partitioning is often much coarser than the standard Shasha-Snir partitioning for TSO/PSO, which yields a significant speedup in the model checking task. }, author = {Bui, Truc Lam and Chatterjee, Krishnendu and Gautam, Tushar and Pavlogiannis, Andreas and Toman, Viktor}, issn = {2475-1421}, journal = {Proceedings of the ACM on Programming Languages}, keywords = {safety, risk, reliability and quality, software}, number = {OOPSLA}, publisher = {Association for Computing Machinery}, title = {{The reads-from equivalence for the TSO and PSO memory models}}, doi = {10.1145/3485541}, volume = {5}, year = {2021}, } @unpublished{10013, abstract = {We derive a weak-strong uniqueness principle for BV solutions to multiphase mean curvature flow of triple line clusters in three dimensions. Our proof is based on the explicit construction of a gradient-flow calibration in the sense of the recent work of Fischer et al. [arXiv:2003.05478] for any such cluster. This extends the two-dimensional construction to the three-dimensional case of surfaces meeting along triple junctions.}, author = {Hensel, Sebastian and Laux, Tim}, booktitle = {arXiv}, title = {{Weak-strong uniqueness for the mean curvature flow of double bubbles}}, doi = {10.48550/arXiv.2108.01733}, year = {2021}, } @article{9928, abstract = {There are two elementary superconducting qubit types that derive directly from the quantum harmonic oscillator. In one, the inductor is replaced by a nonlinear Josephson junction to realize the widely used charge qubits with a compact phase variable and a discrete charge wave function. In the other, the junction is added in parallel, which gives rise to an extended phase variable, continuous wave functions, and a rich energy-level structure due to the loop topology. While the corresponding rf superconducting quantum interference device Hamiltonian was introduced as a quadratic quasi-one-dimensional potential approximation to describe the fluxonium qubit implemented with long Josephson-junction arrays, in this work we implement it directly using a linear superinductor formed by a single uninterrupted aluminum wire. We present a large variety of qubits, all stemming from the same circuit but with drastically different characteristic energy scales. This includes flux and fluxonium qubits but also the recently introduced quasicharge qubit with strongly enhanced zero-point phase fluctuations and a heavily suppressed flux dispersion. The use of a geometric inductor results in high reproducibility of the inductive energy as guaranteed by top-down lithography—a key ingredient for intrinsically protected superconducting qubits.}, author = {Peruzzo, Matilda and Hassani, Farid and Szep, Gregory and Trioni, Andrea and Redchenko, Elena and Zemlicka, Martin and Fink, Johannes M}, issn = {2691-3399}, journal = {PRX Quantum}, keywords = {quantum physics, mesoscale and nanoscale physics}, number = {4}, pages = {040341}, publisher = {American Physical Society}, title = {{Geometric superinductance qubits: Controlling phase delocalization across a single Josephson junction}}, doi = {10.1103/PRXQuantum.2.040341}, volume = {2}, year = {2021}, } @phdthesis{10030, abstract = {This PhD thesis is primarily focused on the study of discrete transport problems, introduced for the first time in the seminal works of Maas [Maa11] and Mielke [Mie11] on finite state Markov chains and reaction-diffusion equations, respectively. More in detail, my research focuses on the study of transport costs on graphs, in particular the convergence and the stability of such problems in the discrete-to-continuum limit. This thesis also includes some results concerning non-commutative optimal transport. The first chapter of this thesis consists of a general introduction to the optimal transport problems, both in the discrete, the continuous, and the non-commutative setting. Chapters 2 and 3 present the content of two works, obtained in collaboration with Peter Gladbach, Eva Kopfer, and Jan Maas, where we have been able to show the convergence of discrete transport costs on periodic graphs to suitable continuous ones, which can be described by means of a homogenisation result. We first focus on the particular case of quadratic costs on the real line and then extending the result to more general costs in arbitrary dimension. Our results are the first complete characterisation of limits of transport costs on periodic graphs in arbitrary dimension which do not rely on any additional symmetry. In Chapter 4 we turn our attention to one of the intriguing connection between evolution equations and optimal transport, represented by the theory of gradient flows. We show that discrete gradient flow structures associated to a finite volume approximation of a certain class of diffusive equations (Fokker–Planck) is stable in the limit of vanishing meshes, reproving the convergence of the scheme via the method of evolutionary Γ-convergence and exploiting a more variational point of view on the problem. This is based on a collaboration with Dominik Forkert and Jan Maas. Chapter 5 represents a change of perspective, moving away from the discrete world and reaching the non-commutative one. As in the discrete case, we discuss how classical tools coming from the commutative optimal transport can be translated into the setting of density matrices. In particular, in this final chapter we present a non-commutative version of the Schrödinger problem (or entropic regularised optimal transport problem) and discuss existence and characterisation of minimisers, a duality result, and present a non-commutative version of the well-known Sinkhorn algorithm to compute the above mentioned optimisers. This is based on a joint work with Dario Feliciangeli and Augusto Gerolin. Finally, Appendix A and B contain some additional material and discussions, with particular attention to Harnack inequalities and the regularity of flows on discrete spaces.}, author = {Portinale, Lorenzo}, issn = {2663-337X}, publisher = {Institute of Science and Technology Austria}, title = {{Discrete-to-continuum limits of transport problems and gradient flows in the space of measures}}, doi = {10.15479/at:ista:10030}, year = {2021}, } @phdthesis{9920, abstract = {This work is concerned with two fascinating circuit quantum electrodynamics components, the Josephson junction and the geometric superinductor, and the interesting experiments that can be done by combining the two. The Josephson junction has revolutionized the field of superconducting circuits as a non-linear dissipation-less circuit element and is used in almost all superconducting qubit implementations since the 90s. On the other hand, the superinductor is a relatively new circuit element introduced as a key component of the fluxonium qubit in 2009. This is an inductor with characteristic impedance larger than the resistance quantum and self-resonance frequency in the GHz regime. The combination of these two elements can occur in two fundamental ways: in parallel and in series. When connected in parallel the two create the fluxonium qubit, a loop with large inductance and a rich energy spectrum reliant on quantum tunneling. On the other hand placing the two elements in series aids with the measurement of the IV curve of a single Josephson junction in a high impedance environment. In this limit theory predicts that the junction will behave as its dual element: the phase-slip junction. While the Josephson junction acts as a non-linear inductor the phase-slip junction has the behavior of a non-linear capacitance and can be used to measure new Josephson junction phenomena, namely Coulomb blockade of Cooper pairs and phase-locked Bloch oscillations. The latter experiment allows for a direct link between frequency and current which is an elusive connection in quantum metrology. This work introduces the geometric superinductor, a superconducting circuit element where the high inductance is due to the geometry rather than the material properties of the superconductor, realized from a highly miniaturized superconducting planar coil. These structures will be described and characterized as resonators and qubit inductors and progress towards the measurement of phase-locked Bloch oscillations will be presented.}, author = {Peruzzo, Matilda}, isbn = {978-3-99078-013-8}, issn = {2663-337X}, keywords = {quantum computing, superinductor, quantum metrology}, pages = {149}, publisher = {Institute of Science and Technology Austria}, title = {{Geometric superinductors and their applications in circuit quantum electrodynamics}}, doi = {10.15479/at:ista:9920}, year = {2021}, } @inproceedings{10432, abstract = {One key element behind the recent progress of machine learning has been the ability to train machine learning models in large-scale distributed shared-memory and message-passing environments. Most of these models are trained employing variants of stochastic gradient descent (SGD) based optimization, but most methods involve some type of consistency relaxation relative to sequential SGD, to mitigate its large communication or synchronization costs at scale. In this paper, we introduce a general consistency condition covering communication-reduced and asynchronous distributed SGD implementations. Our framework, called elastic consistency, decouples the system-specific aspects of the implementation from the SGD convergence requirements, giving a general way to obtain convergence bounds for a wide variety of distributed SGD methods used in practice. Elastic consistency can be used to re-derive or improve several previous convergence bounds in message-passing and shared-memory settings, but also to analyze new models and distribution schemes. As a direct application, we propose and analyze a new synchronization-avoiding scheduling scheme for distributed SGD, and show that it can be used to efficiently train deep convolutional models for image classification.}, author = {Nadiradze, Giorgi and Markov, Ilia and Chatterjee, Bapi and Kungurtsev, Vyacheslav and Alistarh, Dan-Adrian}, booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence}, location = {Virtual}, number = {10}, pages = {9037--9045}, title = {{Elastic consistency: A practical consistency model for distributed stochastic gradient descent}}, volume = {35}, year = {2021}, } @inproceedings{10041, abstract = {Yao’s garbling scheme is one of the most fundamental cryptographic constructions. Lindell and Pinkas (Journal of Cryptograhy 2009) gave a formal proof of security in the selective setting where the adversary chooses the challenge inputs before seeing the garbled circuit assuming secure symmetric-key encryption (and hence one-way functions). This was followed by results, both positive and negative, concerning its security in the, stronger, adaptive setting. Applebaum et al. (Crypto 2013) showed that it cannot satisfy adaptive security as is, due to a simple incompressibility argument. Jafargholi and Wichs (TCC 2017) considered a natural adaptation of Yao’s scheme (where the output mapping is sent in the online phase, together with the garbled input) that circumvents this negative result, and proved that it is adaptively secure, at least for shallow circuits. In particular, they showed that for the class of circuits of depth δ , the loss in security is at most exponential in δ . The above results all concern the simulation-based notion of security. In this work, we show that the upper bound of Jafargholi and Wichs is basically optimal in a strong sense. As our main result, we show that there exists a family of Boolean circuits, one for each depth δ∈N , such that any black-box reduction proving the adaptive indistinguishability of the natural adaptation of Yao’s scheme from any symmetric-key encryption has to lose a factor that is exponential in δ√ . Since indistinguishability is a weaker notion than simulation, our bound also applies to adaptive simulation. To establish our results, we build on the recent approach of Kamath et al. (Eprint 2021), which uses pebbling lower bounds in conjunction with oracle separations to prove fine-grained lower bounds on loss in cryptographic security.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Wichs, Daniel}, booktitle = {41st Annual International Cryptology Conference, Part II }, isbn = {978-3-030-84244-4}, issn = {1611-3349}, location = {Virtual}, pages = {486--515}, publisher = {Springer Nature}, title = {{Limits on the Adaptive Security of Yao’s Garbling}}, doi = {10.1007/978-3-030-84245-1_17}, volume = {12826}, year = {2021}, } @inproceedings{10049, abstract = {While messaging systems with strong security guarantees are widely used in practice, designing a protocol that scales efficiently to large groups and enjoys similar security guarantees remains largely open. The two existing proposals to date are ART (Cohn-Gordon et al., CCS18) and TreeKEM (IETF, The Messaging Layer Security Protocol, draft). TreeKEM is the currently considered candidate by the IETF MLS working group, but dynamic group operations (i.e. adding and removing users) can cause efficiency issues. In this paper we formalize and analyze a variant of TreeKEM which we term Tainted TreeKEM (TTKEM for short). The basic idea underlying TTKEM was suggested by Millican (MLS mailing list, February 2018). This version is more efficient than TreeKEM for some natural distributions of group operations, we quantify this through simulations.Our second contribution is two security proofs for TTKEM which establish post compromise and forward secrecy even against adaptive attackers. The security loss (to the underlying PKE) in the Random Oracle Model is a polynomial factor, and a quasipolynomial one in the Standard Model. Our proofs can be adapted to TreeKEM as well. Before our work no security proof for any TreeKEM-like protocol establishing tight security against an adversary who can adaptively choose the sequence of operations was known. We also are the first to prove (or even formalize) active security where the server can arbitrarily deviate from the protocol specification. Proving fully active security – where also the users can arbitrarily deviate – remains open.}, author = {Klein, Karen and Pascual Perez, Guillermo and Walter, Michael and Kamath Hosdurg, Chethan and Capretto, Margarita and Cueto Noval, Miguel and Markov, Ilia and Yeo, Michelle X and Alwen, Joel F and Pietrzak, Krzysztof Z}, booktitle = {2021 IEEE Symposium on Security and Privacy }, location = {San Francisco, CA, United States}, pages = {268--284}, publisher = {IEEE}, title = {{Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement}}, doi = {10.1109/sp40001.2021.00035}, year = {2021}, } @inproceedings{10044, abstract = {We show that Yao’s garbling scheme is adaptively indistinguishable for the class of Boolean circuits of size S and treewidth w with only a S^O(w) loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly O(d w log(S)), d being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z}, booktitle = {19th Theory of Cryptography Conference 2021}, location = {Raleigh, NC, United States}, publisher = {International Association for Cryptologic Research}, title = {{On treewidth, separators and Yao's garbling}}, year = {2021}, } @phdthesis{10422, abstract = {Those who aim to devise new materials with desirable properties usually examine present methods first. However, they will find out that some approaches can exist only conceptually without high chances to become practically useful. It seems that a numerical technique called automatic differentiation together with increasing supply of computational accelerators will soon shift many methods of the material design from the category ”unimaginable” to the category ”expensive but possible”. Approach we suggest is not an exception. Our overall goal is to have an efficient and generalizable approach allowing to solve inverse design problems. In this thesis we scratch its surface. We consider jammed systems of identical particles. And ask ourselves how the shape of those particles (or the parameters codifying it) may affect mechanical properties of the system. An indispensable part of reaching the answer is an appropriate particle parametrization. We come up with a simple, yet generalizable and purposeful scheme for it. Using our generalizable shape parameterization, we simulate the formation of a solid composed of pentagonal-like particles and measure anisotropy in the resulting elastic response. Through automatic differentiation techniques, we directly connect the shape parameters with the elastic response. Interestingly, for our system we find that less isotropic particles lead to a more isotropic elastic response. Together with other results known about our method it seems that it can be successfully generalized for different inverse design problems.}, author = {Piankov, Anton}, issn = {2791-4585}, publisher = {Institute of Science and Technology Austria}, title = {{Towards designer materials using customizable particle shape}}, doi = {10.15479/at:ista:10422}, year = {2021}, } @unpublished{10803, abstract = {Given the abundance of applications of ranking in recent years, addressing fairness concerns around automated ranking systems becomes necessary for increasing the trust among end-users. Previous work on fair ranking has mostly focused on application-specific fairness notions, often tailored to online advertising, and it rarely considers learning as part of the process. In this work, we show how to transfer numerous fairness notions from binary classification to a learning to rank setting. Our formalism allows us to design methods for incorporating fairness objectives with provable generalization guarantees. An extensive experimental evaluation shows that our method can improve ranking fairness substantially with no or only little loss of model quality.}, author = {Konstantinov, Nikola H and Lampert, Christoph}, booktitle = {arXiv}, title = {{Fairness through regularization for learning to rank}}, doi = {10.48550/arXiv.2102.05996}, year = {2021}, } @unpublished{10762, abstract = {Methods inspired from machine learning have recently attracted great interest in the computational study of quantum many-particle systems. So far, however, it has proven challenging to deal with microscopic models in which the total number of particles is not conserved. To address this issue, we propose a new variant of neural network states, which we term neural coherent states. Taking the Fröhlich impurity model as a case study, we show that neural coherent states can learn the ground state of non-additive systems very well. In particular, we observe substantial improvement over the standard coherent state estimates in the most challenging intermediate coupling regime. Our approach is generic and does not assume specific details of the system, suggesting wide applications.}, author = {Rzadkowski, Wojciech and Lemeshko, Mikhail and Mentink, Johan H.}, booktitle = {arXiv}, pages = {2105.15193}, title = {{Artificial neural network states for non-additive systems}}, doi = {10.48550/arXiv.2105.15193}, year = {2021}, } @phdthesis{9418, abstract = {Deep learning is best known for its empirical success across a wide range of applications spanning computer vision, natural language processing and speech. Of equal significance, though perhaps less known, are its ramifications for learning theory: deep networks have been observed to perform surprisingly well in the high-capacity regime, aka the overfitting or underspecified regime. Classically, this regime on the far right of the bias-variance curve is associated with poor generalisation; however, recent experiments with deep networks challenge this view. This thesis is devoted to investigating various aspects of underspecification in deep learning. First, we argue that deep learning models are underspecified on two levels: a) any given training dataset can be fit by many different functions, and b) any given function can be expressed by many different parameter configurations. We refer to the second kind of underspecification as parameterisation redundancy and we precisely characterise its extent. Second, we characterise the implicit criteria (the inductive bias) that guide learning in the underspecified regime. Specifically, we consider a nonlinear but tractable classification setting, and show that given the choice, neural networks learn classifiers with a large margin. Third, we consider learning scenarios where the inductive bias is not by itself sufficient to deal with underspecification. We then study different ways of ‘tightening the specification’: i) In the setting of representation learning with variational autoencoders, we propose a hand- crafted regulariser based on mutual information. ii) In the setting of binary classification, we consider soft-label (real-valued) supervision. We derive a generalisation bound for linear networks supervised in this way and verify that soft labels facilitate fast learning. Finally, we explore an application of soft-label supervision to the training of multi-exit models.}, author = {Bui Thi Mai, Phuong}, issn = {2663-337X}, pages = {125}, publisher = {Institute of Science and Technology Austria}, title = {{Underspecification in deep learning}}, doi = {10.15479/AT:ISTA:9418}, year = {2021}, } @inproceedings{14177, abstract = {The focus of disentanglement approaches has been on identifying independent factors of variation in data. However, the causal variables underlying real-world observations are often not statistically independent. In this work, we bridge the gap to real-world scenarios by analyzing the behavior of the most prominent disentanglement approaches on correlated data in a large-scale empirical study (including 4260 models). We show and quantify that systematically induced correlations in the dataset are being learned and reflected in the latent representations, which has implications for downstream applications of disentanglement such as fairness. We also demonstrate how to resolve these latent correlations, either using weak supervision during training or by post-hoc correcting a pre-trained model with a small number of labels.}, author = {Träuble, Frederik and Creager, Elliot and Kilbertus, Niki and Locatello, Francesco and Dittadi, Andrea and Goyal, Anirudh and Schölkopf, Bernhard and Bauer, Stefan}, booktitle = {Proceedings of the 38th International Conference on Machine Learning}, location = {Virtual}, pages = {10401--10412}, publisher = {ML Research Press}, title = {{On disentangled representations learned from correlated data}}, volume = {139}, year = {2021}, } @inproceedings{14176, abstract = {Intensive care units (ICU) are increasingly looking towards machine learning for methods to provide online monitoring of critically ill patients. In machine learning, online monitoring is often formulated as a supervised learning problem. Recently, contrastive learning approaches have demonstrated promising improvements over competitive supervised benchmarks. These methods rely on well-understood data augmentation techniques developed for image data which do not apply to online monitoring. In this work, we overcome this limitation by supplementing time-series data augmentation techniques with a novel contrastive learning objective which we call neighborhood contrastive learning (NCL). Our objective explicitly groups together contiguous time segments from each patient while maintaining state-specific information. Our experiments demonstrate a marked improvement over existing work applying contrastive methods to medical time-series.}, author = {Yèche, Hugo and Dresdner, Gideon and Locatello, Francesco and Hüser, Matthias and Rätsch, Gunnar}, booktitle = {Proceedings of 38th International Conference on Machine Learning}, location = {Virtual}, pages = {11964--11974}, publisher = {ML Research Press}, title = {{Neighborhood contrastive learning applied to online patient monitoring}}, volume = {139}, year = {2021}, } @inproceedings{14182, abstract = {When machine learning systems meet real world applications, accuracy is only one of several requirements. In this paper, we assay a complementary perspective originating from the increasing availability of pre-trained and regularly improving state-of-the-art models. While new improved models develop at a fast pace, downstream tasks vary more slowly or stay constant. Assume that we have a large unlabelled data set for which we want to maintain accurate predictions. Whenever a new and presumably better ML models becomes available, we encounter two problems: (i) given a limited budget, which data points should be re-evaluated using the new model?; and (ii) if the new predictions differ from the current ones, should we update? Problem (i) is about compute cost, which matters for very large data sets and models. Problem (ii) is about maintaining consistency of the predictions, which can be highly relevant for downstream applications; our demand is to avoid negative flips, i.e., changing correct to incorrect predictions. In this paper, we formalize the Prediction Update Problem and present an efficient probabilistic approach as answer to the above questions. In extensive experiments on standard classification benchmark data sets, we show that our method outperforms alternative strategies along key metrics for backward-compatible prediction updates.}, author = {Träuble, Frederik and Kügelgen, Julius von and Kleindessner, Matthäus and Locatello, Francesco and Schölkopf, Bernhard and Gehler, Peter}, booktitle = {35th Conference on Neural Information Processing Systems}, isbn = {9781713845393}, location = {Virtual}, pages = {116--128}, title = {{Backward-compatible prediction updates: A probabilistic approach}}, volume = {34}, year = {2021}, } @inproceedings{14181, abstract = {Variational Inference makes a trade-off between the capacity of the variational family and the tractability of finding an approximate posterior distribution. Instead, Boosting Variational Inference allows practitioners to obtain increasingly good posterior approximations by spending more compute. The main obstacle to widespread adoption of Boosting Variational Inference is the amount of resources necessary to improve over a strong Variational Inference baseline. In our work, we trace this limitation back to the global curvature of the KL-divergence. We characterize how the global curvature impacts time and memory consumption, address the problem with the notion of local curvature, and provide a novel approximate backtracking algorithm for estimating local curvature. We give new theoretical convergence rates for our algorithms and provide experimental validation on synthetic and real-world datasets.}, author = {Dresdner, Gideon and Shekhar, Saurav and Pedregosa, Fabian and Locatello, Francesco and Rätsch, Gunnar}, booktitle = {Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence}, location = {Montreal, Canada}, pages = {2337--2343}, publisher = {International Joint Conferences on Artificial Intelligence}, title = {{Boosting variational inference with locally adaptive step-sizes}}, doi = {10.24963/ijcai.2021/322}, year = {2021}, } @inproceedings{14179, abstract = {Self-supervised representation learning has shown remarkable success in a number of domains. A common practice is to perform data augmentation via hand-crafted transformations intended to leave the semantics of the data invariant. We seek to understand the empirical success of this approach from a theoretical perspective. We formulate the augmentation process as a latent variable model by postulating a partition of the latent representation into a content component, which is assumed invariant to augmentation, and a style component, which is allowed to change. Unlike prior work on disentanglement and independent component analysis, we allow for both nontrivial statistical and causal dependencies in the latent space. We study the identifiability of the latent representation based on pairs of views of the observations and prove sufficient conditions that allow us to identify the invariant content partition up to an invertible mapping in both generative and discriminative settings. We find numerical simulations with dependent latent variables are consistent with our theory. Lastly, we introduce Causal3DIdent, a dataset of high-dimensional, visually complex images with rich causal dependencies, which we use to study the effect of data augmentations performed in practice.}, author = {Kügelgen, Julius von and Sharma, Yash and Gresele, Luigi and Brendel, Wieland and Schölkopf, Bernhard and Besserve, Michel and Locatello, Francesco}, booktitle = {Advances in Neural Information Processing Systems}, isbn = {9781713845393}, location = {Virtual}, pages = {16451--16467}, title = {{Self-supervised learning with data augmentations provably isolates content from style}}, volume = {34}, year = {2021}, } @inproceedings{14180, abstract = {Modern neural network architectures can leverage large amounts of data to generalize well within the training distribution. However, they are less capable of systematic generalization to data drawn from unseen but related distributions, a feat that is hypothesized to require compositional reasoning and reuse of knowledge. In this work, we present Neural Interpreters, an architecture that factorizes inference in a self-attention network as a system of modules, which we call \emph{functions}. Inputs to the model are routed through a sequence of functions in a way that is end-to-end learned. The proposed architecture can flexibly compose computation along width and depth, and lends itself well to capacity extension after training. To demonstrate the versatility of Neural Interpreters, we evaluate it in two distinct settings: image classification and visual abstract reasoning on Raven Progressive Matrices. In the former, we show that Neural Interpreters perform on par with the vision transformer using fewer parameters, while being transferrable to a new task in a sample efficient manner. In the latter, we find that Neural Interpreters are competitive with respect to the state-of-the-art in terms of systematic generalization. }, author = {Rahaman, Nasim and Gondal, Muhammad Waleed and Joshi, Shruti and Gehler, Peter and Bengio, Yoshua and Locatello, Francesco and Schölkopf, Bernhard}, booktitle = {Advances in Neural Information Processing Systems}, isbn = {9781713845393}, location = {Virtual}, pages = {10985--10998}, title = {{Dynamic inference with neural interpreters}}, volume = {34}, year = {2021}, } @article{14117, abstract = {The two fields of machine learning and graphical causality arose and are developed separately. However, there is, now, cross-pollination and increasing interest in both fields to benefit from the advances of the other. In this article, we review fundamental concepts of causal inference and relate them to crucial open problems of machine learning, including transfer and generalization, thereby assaying how causality can contribute to modern machine learning research. This also applies in the opposite direction: we note that most work in causality starts from the premise that the causal variables are given. A central problem for AI and causality is, thus, causal representation learning, that is, the discovery of high-level causal variables from low-level observations. Finally, we delineate some implications of causality for machine learning and propose key research areas at the intersection of both communities.}, author = {Scholkopf, Bernhard and Locatello, Francesco and Bauer, Stefan and Ke, Nan Rosemary and Kalchbrenner, Nal and Goyal, Anirudh and Bengio, Yoshua}, issn = {1558-2256}, journal = {Proceedings of the IEEE}, keywords = {Electrical and Electronic Engineering}, number = {5}, pages = {612--634}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Toward causal representation learning}}, doi = {10.1109/jproc.2021.3058954}, volume = {109}, year = {2021}, } @inproceedings{14178, abstract = {Learning meaningful representations that disentangle the underlying structure of the data generating process is considered to be of key importance in machine learning. While disentangled representations were found to be useful for diverse tasks such as abstract reasoning and fair classification, their scalability and real-world impact remain questionable. We introduce a new high-resolution dataset with 1M simulated images and over 1,800 annotated real-world images of the same setup. In contrast to previous work, this new dataset exhibits correlations, a complex underlying structure, and allows to evaluate transfer to unseen simulated and real-world settings where the encoder i) remains in distribution or ii) is out of distribution. We propose new architectures in order to scale disentangled representation learning to realistic high-resolution settings and conduct a large-scale empirical study of disentangled representations on this dataset. We observe that disentanglement is a good predictor for out-of-distribution (OOD) task performance.}, author = {Dittadi, Andrea and Träuble, Frederik and Locatello, Francesco and Wüthrich, Manuel and Agrawal, Vaibhav and Winther, Ole and Bauer, Stefan and Schölkopf, Bernhard}, booktitle = {The Ninth International Conference on Learning Representations}, location = {Virtual}, title = {{On the transfer of disentangled representations in realistic settings}}, year = {2021}, } @unpublished{14221, abstract = {The world is structured in countless ways. It may be prudent to enforce corresponding structural properties to a learning algorithm's solution, such as incorporating prior beliefs, natural constraints, or causal structures. Doing so may translate to faster, more accurate, and more flexible models, which may directly relate to real-world impact. In this dissertation, we consider two different research areas that concern structuring a learning algorithm's solution: when the structure is known and when it has to be discovered.}, author = {Locatello, Francesco}, booktitle = {arXiv}, title = {{Enforcing and discovering structure in machine learning}}, doi = {10.48550/arXiv.2111.13693}, year = {2021}, } @unpublished{14278, abstract = {The Birkhoff conjecture says that the boundary of a strictly convex integrable billiard table is necessarily an ellipse. In this article, we consider a stronger notion of integrability, namely, integrability close to the boundary, and prove a local version of this conjecture: a small perturbation of almost every ellipse that preserves integrability near the boundary, is itself an ellipse. We apply this result to study local spectral rigidity of ellipses using the connection between the wave trace of the Laplacian and the dynamics near the boundary and establish rigidity for almost all of them.}, author = {Koval, Illya}, booktitle = {arXiv}, title = {{Local strong Birkhoff conjecture and local spectral rigidity of almost every ellipse}}, doi = {10.48550/ARXIV.2111.12171}, year = {2021}, } @phdthesis{10199, abstract = {The design and verification of concurrent systems remains an open challenge due to the non-determinism that arises from the inter-process communication. In particular, concurrent programs are notoriously difficult both to be written correctly and to be analyzed formally, as complex thread interaction has to be accounted for. The difficulties are further exacerbated when concurrent programs get executed on modern-day hardware, which contains various buffering and caching mechanisms for efficiency reasons. This causes further subtle non-determinism, which can often produce very unintuitive behavior of the concurrent programs. Model checking is at the forefront of tackling the verification problem, where the task is to decide, given as input a concurrent system and a desired property, whether the system satisfies the property. The inherent state-space explosion problem in model checking of concurrent systems causes naïve explicit methods not to scale, thus more inventive methods are required. One such method is stateless model checking (SMC), which explores in memory-efficient manner the program executions rather than the states of the program. State-of-the-art SMC is typically coupled with partial order reduction (POR) techniques, which argue that certain executions provably produce identical system behavior, thus limiting the amount of executions one needs to explore in order to cover all possible behaviors. Another method to tackle the state-space explosion is symbolic model checking, where the considered techniques operate on a succinct implicit representation of the input system rather than explicitly accessing the system. In this thesis we present new techniques for verification of concurrent systems. We present several novel POR methods for SMC of concurrent programs under various models of semantics, some of which account for write-buffering mechanisms. Additionally, we present novel algorithms for symbolic model checking of finite-state concurrent systems, where the desired property of the systems is to ensure a formally defined notion of fairness.}, author = {Toman, Viktor}, issn = {2663-337X}, keywords = {concurrency, verification, model checking}, pages = {166}, publisher = {Institute of Science and Technology Austria}, title = {{Improved verification techniques for concurrent systems}}, doi = {10.15479/at:ista:10199}, year = {2021}, } @article{8429, abstract = {We develop a Bayesian model (BayesRR-RC) that provides robust SNP-heritability estimation, an alternative to marker discovery, and accurate genomic prediction, taking 22 seconds per iteration to estimate 8.4 million SNP-effects and 78 SNP-heritability parameters in the UK Biobank. We find that only ≤10% of the genetic variation captured for height, body mass index, cardiovascular disease, and type 2 diabetes is attributable to proximal regulatory regions within 10kb upstream of genes, while 12-25% is attributed to coding regions, 32–44% to introns, and 22-28% to distal 10-500kb upstream regions. Up to 24% of all cis and coding regions of each chromosome are associated with each trait, with over 3,100 independent exonic and intronic regions and over 5,400 independent regulatory regions having ≥95% probability of contributing ≥0.001% to the genetic variance of these four traits. Our open-source software (GMRM) provides a scalable alternative to current approaches for biobank data.}, author = {Patxot, Marion and Trejo Banos, Daniel and Kousathanas, Athanasios and Orliac, Etienne J and Ojavee, Sven E and Moser, Gerhard and Sidorenko, Julia and Kutalik, Zoltan and Magi, Reedik and Visscher, Peter M and Ronnegard, Lars and Robinson, Matthew Richard}, issn = {2041-1723}, journal = {Nature Communications}, number = {1}, publisher = {Springer Nature}, title = {{Probabilistic inference of the genetic architecture underlying functional enrichment of complex traits}}, doi = {10.1038/s41467-021-27258-9}, volume = {12}, year = {2021}, } @inproceedings{10854, abstract = {Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner? To address this question, we define the batch dynamic CONGEST model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labelled graph under batch changes. We investigate, when a batch of alpha edge label changes arrive, - how much time as a function of alpha we need to update an existing solution, and - how much information the nodes have to keep in local memory between batches in order to update the solution quickly. Our work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. The diverse time complexity of our model spans from constant time, through time polynomial in alpha, and to alpha time, which we show to be enough for any task.}, author = {Foerster, Klaus-Tycho and Korhonen, Janne and Paz, Ami and Rybicki, Joel and Schmid, Stefan}, booktitle = {Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems}, isbn = {9781450380720}, location = {Virtual, Online}, pages = {71--72}, publisher = {Association for Computing Machinery}, title = {{Input-dynamic distributed algorithms for communication networks}}, doi = {10.1145/3410220.3453923}, year = {2021}, } @article{10855, abstract = {Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner? To address this question, we define the batch dynamic \congest model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labeled graph under batch changes. We investigate, when a batch of α edge label changes arrive, \beginitemize \item how much time as a function of α we need to update an existing solution, and \item how much information the nodes have to keep in local memory between batches in order to update the solution quickly. \enditemize Our work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. In particular, we derive non-trivial upper bounds for two selected, contrasting problems: maintaining a minimum spanning tree and detecting cliques.}, author = {Foerster, Klaus-Tycho and Korhonen, Janne and Paz, Ami and Rybicki, Joel and Schmid, Stefan}, issn = {2476-1249}, journal = {Proceedings of the ACM on Measurement and Analysis of Computing Systems}, keywords = {Computer Networks and Communications, Hardware and Architecture, Safety, Risk, Reliability and Quality, Computer Science (miscellaneous)}, number = {1}, pages = {1--33}, publisher = {Association for Computing Machinery}, title = {{Input-dynamic distributed algorithms for communication networks}}, doi = {10.1145/3447384}, volume = {5}, year = {2021}, } @article{9293, abstract = {We consider planning problems for graphs, Markov Decision Processes (MDPs), and games on graphs in an explicit state space. While graphs represent the most basic planning model, MDPs represent interaction with nature and games on graphs represent interaction with an adversarial environment. We consider two planning problems with k different target sets: (a) the coverage problem asks whether there is a plan for each individual target set; and (b) the sequential target reachability problem asks whether the targets can be reached in a given sequence. For the coverage problem, we present a linear-time algorithm for graphs, and quadratic conditional lower bound for MDPs and games on graphs. For the sequential target problem, we present a linear-time algorithm for graphs, a sub-quadratic algorithm for MDPs, and a quadratic conditional lower bound for games on graphs. Our results with conditional lower bounds, based on the boolean matrix multiplication (BMM) conjecture and strong exponential time hypothesis (SETH), establish (i) model-separation results showing that for the coverage problem MDPs and games on graphs are harder than graphs, and for the sequential reachability problem games on graphs are harder than MDPs and graphs; and (ii) problem-separation results showing that for MDPs the coverage problem is harder than the sequential target problem.}, author = {Chatterjee, Krishnendu and Dvořák, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, issn = {0004-3702}, journal = {Artificial Intelligence}, number = {8}, publisher = {Elsevier}, title = {{Algorithms and conditional lower bounds for planning problems}}, doi = {10.1016/j.artint.2021.103499}, volume = {297}, year = {2021}, } @misc{13063, abstract = {We develop a Bayesian model (BayesRR-RC) that provides robust SNP-heritability estimation, an alternative to marker discovery, and accurate genomic prediction, taking 22 seconds per iteration to estimate 8.4 million SNP-effects and 78 SNP-heritability parameters in the UK Biobank. We find that only $\leq$ 10\% of the genetic variation captured for height, body mass index, cardiovascular disease, and type 2 diabetes is attributable to proximal regulatory regions within 10kb upstream of genes, while 12-25% is attributed to coding regions, 32-44% to introns, and 22-28% to distal 10-500kb upstream regions. Up to 24% of all cis and coding regions of each chromosome are associated with each trait, with over 3,100 independent exonic and intronic regions and over 5,400 independent regulatory regions having >95% probability of contributing >0.001% to the genetic variance of these four traits. Our open-source software (GMRM) provides a scalable alternative to current approaches for biobank data.}, author = {Robinson, Matthew Richard}, publisher = {Dryad}, title = {{Probabilistic inference of the genetic architecture of functional enrichment of complex traits}}, doi = {10.5061/dryad.sqv9s4n51}, year = {2021}, } @article{9304, abstract = {The high processing cost, poor mechanical properties and moderate performance of Bi2Te3–based alloys used in thermoelectric devices limit the cost-effectiveness of this energy conversion technology. Towards solving these current challenges, in the present work, we detail a low temperature solution-based approach to produce Bi2Te3-Cu2-xTe nanocomposites with improved thermoelectric performance. Our approach consists in combining proper ratios of colloidal nanoparticles and to consolidate the resulting mixture into nanocomposites using a hot press. The transport properties of the nanocomposites are characterized and compared with those of pure Bi2Te3 nanomaterials obtained following the same procedure. In contrast with most previous works, the presence of Cu2-xTe nanodomains does not result in a significant reduction of the lattice thermal conductivity of the reference Bi2Te3 nanomaterial, which is already very low. However, the introduction of Cu2-xTe yields a nearly threefold increase of the power factor associated to a simultaneous increase of the Seebeck coefficient and electrical conductivity at temperatures above 400 K. Taking into account the band alignment of the two materials, we rationalize this increase by considering that Cu2-xTe nanostructures, with a relatively low electron affinity, are able to inject electrons into Bi2Te3, enhancing in this way its electrical conductivity. The simultaneous increase of the Seebeck coefficient is related to the energy filtering of charge carriers at energy barriers within Bi2Te3 domains associated with the accumulation of electrons in regions nearby a Cu2-xTe/Bi2Te3 heterojunction. Overall, with the incorporation of a proper amount of Cu2-xTe nanoparticles, we demonstrate a 250% improvement of the thermoelectric figure of merit of Bi2Te3.}, author = {Zhang, Yu and Xing, Congcong and Liu, Yu and Li, Mengyao and Xiao, Ke and Guardia, Pablo and Lee, Seungho and Han, Xu and Moghaddam, Ahmad and Roa, Joan J and Arbiol, Jordi and Ibáñez, Maria and Pan, Kai and Prato, Mirko and Xie, Ying and Cabot, Andreu}, issn = {1385-8947}, journal = {Chemical Engineering Journal}, number = {8}, publisher = {Elsevier}, title = {{Influence of copper telluride nanodomains on the transport properties of n-type bismuth telluride}}, doi = {10.1016/j.cej.2021.129374}, volume = {418}, year = {2021}, } @article{9793, abstract = {Astrocytes extensively infiltrate the neuropil to regulate critical aspects of synaptic development and function. This process is regulated by transcellular interactions between astrocytes and neurons via cell adhesion molecules. How astrocytes coordinate developmental processes among one another to parse out the synaptic neuropil and form non-overlapping territories is unknown. Here we identify a molecular mechanism regulating astrocyte-astrocyte interactions during development to coordinate astrocyte morphogenesis and gap junction coupling. We show that hepaCAM, a disease-linked, astrocyte-enriched cell adhesion molecule, regulates astrocyte competition for territory and morphological complexity in the developing mouse cortex. Furthermore, conditional deletion of Hepacam from developing astrocytes significantly impairs gap junction coupling between astrocytes and disrupts the balance between synaptic excitation and inhibition. Mutations in HEPACAM cause megalencephalic leukoencephalopathy with subcortical cysts in humans. Therefore, our findings suggest that disruption of astrocyte self-organization mechanisms could be an underlying cause of neural pathology.}, author = {Baldwin, Katherine T. and Tan, Christabel X. and Strader, Samuel T. and Jiang, Changyu and Savage, Justin T. and Elorza-Vidal, Xabier and Contreras, Ximena and Rülicke, Thomas and Hippenmeyer, Simon and Estévez, Raúl and Ji, Ru-Rong and Eroglu, Cagla}, issn = {1097-4199}, journal = {Neuron}, number = {15}, pages = {2427--2442.e10}, publisher = {Elsevier}, title = {{HepaCAM controls astrocyte self-organization and coupling}}, doi = {10.1016/j.neuron.2021.05.025}, volume = {109}, year = {2021}, } @article{9305, abstract = {Copper chalcogenides are outstanding thermoelectric materials for applications in the medium-high temperature range. Among different chalcogenides, while Cu2−xSe is characterized by higher thermoelectric figures of merit, Cu2−xS provides advantages in terms of low cost and element abundance. In the present work, we investigate the effect of different dopants to enhance the Cu2−xS performance and also its thermal stability. Among the tested options, Pb-doped Cu2−xS shows the highest improvement in stability against sulfur volatilization. Additionally, Pb incorporation allows tuning charge carrier concentration, which enables a significant improvement of the power factor. We demonstrate here that the introduction of an optimal additive amount of just 0.3% results in a threefold increase of the power factor in the middle-temperature range (500–800 K) and a record dimensionless thermoelectric figure of merit above 2 at 880 K.}, author = {Zhang, Yu and Xing, Congcong and Liu, Yu and Spadaro, Maria Chiara and Wang, Xiang and Li, Mengyao and Xiao, Ke and Zhang, Ting and Guardia, Pablo and Lim, Khak Ho and Moghaddam, Ahmad Ostovari and Llorca, Jordi and Arbiol, Jordi and Ibáñez, Maria and Cabot, Andreu}, issn = {2211-2855}, journal = {Nano Energy}, number = {7}, publisher = {Elsevier}, title = {{Doping-mediated stabilization of copper vacancies to promote thermoelectric properties of Cu2-xS}}, doi = {10.1016/j.nanoen.2021.105991}, volume = {85}, year = {2021}, } @article{9212, abstract = {Plant fitness is largely dependent on the root, the underground organ, which, besides its anchoring function, supplies the plant body with water and all nutrients necessary for growth and development. To exploit the soil effectively, roots must constantly integrate environmental signals and react through adjustment of growth and development. Important components of the root management strategy involve a rapid modulation of the root growth kinetics and growth direction, as well as an increase of the root system radius through formation of lateral roots (LRs). At the molecular level, such a fascinating growth and developmental flexibility of root organ requires regulatory networks that guarantee stability of the developmental program but also allows integration of various environmental inputs. The plant hormone auxin is one of the principal endogenous regulators of root system architecture by controlling primary root growth and formation of LR. In this review, we discuss recent progress in understanding molecular networks where auxin is one of the main players shaping the root system and acting as mediator between endogenous cues and environmental factors.}, author = {Cavallari, Nicola and Artner, Christina and Benková, Eva}, issn = {1943-0264}, journal = {Cold Spring Harbor Perspectives in Biology}, number = {7}, publisher = {Cold Spring Harbor Laboratory Press}, title = {{Auxin-regulated lateral root organogenesis}}, doi = {10.1101/cshperspect.a039941}, volume = {13}, year = {2021}, } @article{9953, abstract = {Chronic psychological stress is one of the most important triggers and environmental risk factors for neuropsychiatric disorders. Chronic stress can influence all organs via the secretion of stress hormones, including glucocorticoids by the adrenal glands, which coordinate the stress response across the body. In the brain, glucocorticoid receptors (GR) are expressed by various cell types including microglia, which are its resident immune cells regulating stress-induced inflammatory processes. To study the roles of microglial GR under normal homeostatic conditions and following chronic stress, we generated a mouse model in which the GR gene is depleted in microglia specifically at adulthood to prevent developmental confounds. We first confirmed that microglia were depleted in GR in our model in males and females among the cingulate cortex and the hippocampus, both stress-sensitive brain regions. Then, cohorts of microglial-GR depleted and wild-type (WT) adult female mice were housed for 3 weeks in a standard or stressful condition, using a chronic unpredictable mild stress (CUMS) paradigm. CUMS induced stress-related behavior in both microglial-GR depleted and WT animals as demonstrated by a decrease of both saccharine preference and progressive ratio breakpoint. Nevertheless, the hippocampal microglial and neural mechanisms underlying the adaptation to stress occurred differently between the two genotypes. Upon CUMS exposure, microglial morphology was altered in the WT controls, without any apparent effect in microglial-GR depleted mice. Furthermore, in the standard environment condition, GR depleted-microglia showed increased expression of pro-inflammatory genes, and genes involved in microglial homeostatic functions (such as Trem2, Cx3cr1 and Mertk). On the contrary, in CUMS condition, GR depleted-microglia showed reduced expression levels of pro-inflammatory genes and increased neuroprotective as well as anti-inflammatory genes compared to WT-microglia. Moreover, in microglial-GR depleted mice, but not in WT mice, CUMS led to a significant reduction of CA1 long-term potentiation and paired-pulse ratio. Lastly, differences in adult hippocampal neurogenesis were observed between the genotypes during normal homeostatic conditions, with microglial-GR deficiency increasing the formation of newborn neurons in the dentate gyrus subgranular zone independently from stress exposure. Together, these findings indicate that, although the deletion of microglial GR did not prevent the animal’s ability to respond to stress, it contributed to modulating hippocampal functions in both standard and stressful conditions, notably by shaping the microglial response to chronic stress.}, author = {Picard, Katherine and Bisht, Kanchan and Poggini, Silvia and Garofalo, Stefano and Golia, Maria Teresa and Basilico, Bernadette and Abdallah, Fatima and Ciano Albanese, Naomi and Amrein, Irmgard and Vernoux, Nathalie and Sharma, Kaushik and Hui, Chin Wai and C. Savage, Julie and Limatola, Cristina and Ragozzino, Davide and Maggi, Laura and Branchi, Igor and Tremblay, Marie Ève}, issn = {0889-1591}, journal = {Brain, Behavior, and Immunity}, pages = {423--439}, publisher = {Elsevier}, title = {{Microglial-glucocorticoid receptor depletion alters the response of hippocampal microglia and neurons in a chronic unpredictable mild stress paradigm in female mice}}, doi = {10.1016/j.bbi.2021.07.022}, volume = {97}, year = {2021}, } @article{10327, abstract = {Composite materials offer numerous advantages in a wide range of applications, including thermoelectrics. Here, semiconductor–metal composites are produced by just blending nanoparticles of a sulfide semiconductor obtained in aqueous solution and at room temperature with a metallic Cu powder. The obtained blend is annealed in a reducing atmosphere and afterward consolidated into dense polycrystalline pellets through spark plasma sintering (SPS). We observe that, during the annealing process, the presence of metallic copper activates a partial reduction of the PbS, resulting in the formation of PbS–Pb–CuxS composites. The presence of metallic lead during the SPS process habilitates the liquid-phase sintering of the composite. Besides, by comparing the transport properties of PbS, the PbS–Pb–CuxS composites, and PbS–CuxS composites obtained by blending PbS and CuxS nanoparticles, we demonstrate that the presence of metallic lead decisively contributes to a strong increase of the charge carrier concentration through spillover of charge carriers enabled by the low work function of lead. The increase in charge carrier concentration translates into much higher electrical conductivities and moderately lower Seebeck coefficients. These properties translate into power factors up to 2.1 mW m–1 K–2 at ambient temperature, well above those of PbS and PbS + CuxS. Additionally, the presence of multiple phases in the final composite results in a notable decrease in the lattice thermal conductivity. Overall, the introduction of metallic copper in the initial blend results in a significant improvement of the thermoelectric performance of PbS, reaching a dimensionless thermoelectric figure of merit ZT = 1.1 at 750 K, which represents about a 400% increase over bare PbS. Besides, an average ZTave = 0.72 in the temperature range 320–773 K is demonstrated.}, author = {Li, Mengyao and Liu, Yu and Zhang, Yu and Han, Xu and Xiao, Ke and Nabahat, Mehran and Arbiol, Jordi and Llorca, Jordi and Ibáñez, Maria and Cabot, Andreu}, issn = {1944-8252}, journal = {ACS Applied Materials and Interfaces}, keywords = {CuxS, PbS, energy conversion, nanocomposite, nanoparticle, solution synthesis, thermoelectric}, number = {43}, pages = {51373–51382}, publisher = {American Chemical Society }, title = {{PbS–Pb–CuxS composites for thermoelectric application}}, doi = {10.1021/acsami.1c15609}, volume = {13}, year = {2021}, } @article{9235, abstract = {Cu2–xS has become one of the most promising thermoelectric materials for application in the middle-high temperature range. Its advantages include the abundance, low cost, and safety of its elements and a high performance at relatively elevated temperatures. However, stability issues limit its operation current and temperature, thus calling for the optimization of the material performance in the middle temperature range. Here, we present a synthetic protocol for large scale production of covellite CuS nanoparticles at ambient temperature and atmosphere, and using water as a solvent. The crystal phase and stoichiometry of the particles are afterward tuned through an annealing process at a moderate temperature under inert or reducing atmosphere. While annealing under argon results in Cu1.8S nanopowder with a rhombohedral crystal phase, annealing in an atmosphere containing hydrogen leads to tetragonal Cu1.96S. High temperature X-ray diffraction analysis shows the material annealed in argon to transform to the cubic phase at ca. 400 K, while the material annealed in the presence of hydrogen undergoes two phase transitions, first to hexagonal and then to the cubic structure. The annealing atmosphere, temperature, and time allow adjustment of the density of copper vacancies and thus tuning of the charge carrier concentration and material transport properties. In this direction, the material annealed under Ar is characterized by higher electrical conductivities but lower Seebeck coefficients than the material annealed in the presence of hydrogen. By optimizing the charge carrier concentration through the annealing time, Cu2–xS with record figures of merit in the middle temperature range, up to 1.41 at 710 K, is obtained. We finally demonstrate that this strategy, based on a low-cost and scalable solution synthesis process, is also suitable for the production of high performance Cu2–xS layers using high throughput and cost-effective printing technologies.}, author = {Li, Mengyao and Liu, Yu and Zhang, Yu and Han, Xu and Zhang, Ting and Zuo, Yong and Xie, Chenyang and Xiao, Ke and Arbiol, Jordi and Llorca, Jordi and Ibáñez, Maria and Liu, Junfeng and Cabot, Andreu}, issn = {1936-086X}, journal = {ACS Nano}, keywords = {General Engineering, General Physics and Astronomy, General Materials Science}, number = {3}, pages = {4967–4978}, publisher = {American Chemical Society }, title = {{Effect of the annealing atmosphere on crystal phase and thermoelectric properties of copper sulfide}}, doi = {10.1021/acsnano.0c09866}, volume = {15}, year = {2021}, } @article{10204, abstract = {Two common representations of close packings of identical spheres consisting of hexagonal layers, called Barlow stackings, appear abundantly in minerals and metals. These motifs, however, occupy an identical portion of space and bear identical first-order topological signatures as measured by persistent homology. Here we present a novel method based on k-fold covers that unambiguously distinguishes between these patterns. Moreover, our approach provides topological evidence that the FCC motif is the more stable of the two in the context of evolving experimental sphere packings during the transition from disordered to an ordered state. We conclude that our approach can be generalised to distinguish between various Barlow stackings manifested in minerals and metals.}, author = {Osang, Georg F and Edelsbrunner, Herbert and Saadatfar, Mohammad}, issn = {1744-6848}, journal = {Soft Matter}, number = {40}, pages = {9107--9115}, publisher = {Royal Society of Chemistry }, title = {{Topological signatures and stability of hexagonal close packing and Barlow stackings}}, doi = {10.1039/d1sm00774b}, volume = {17}, year = {2021}, } @inproceedings{9605, abstract = {Given a finite set A ⊂ ℝ^d, let Cov_{r,k} denote the set of all points within distance r to at least k points of A. Allowing r and k to vary, we obtain a 2-parameter family of spaces that grow larger when r increases or k decreases, called the multicover bifiltration. Motivated by the problem of computing the homology of this bifiltration, we introduce two closely related combinatorial bifiltrations, one polyhedral and the other simplicial, which are both topologically equivalent to the multicover bifiltration and far smaller than a Čech-based model considered in prior work of Sheehy. Our polyhedral construction is a bifiltration of the rhomboid tiling of Edelsbrunner and Osang, and can be efficiently computed using a variant of an algorithm given by these authors as well. Using an implementation for dimension 2 and 3, we provide experimental results. Our simplicial construction is useful for understanding the polyhedral construction and proving its correctness. }, author = {Corbet, René and Kerber, Michael and Lesnick, Michael and Osang, Georg F}, booktitle = {Leibniz International Proceedings in Informatics}, isbn = {9783959771849}, issn = {18688969}, location = {Online}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Computing the multicover bifiltration}}, doi = {10.4230/LIPIcs.SoCG.2021.27}, volume = {189}, year = {2021}, } @inproceedings{9441, abstract = {Isomanifolds are the generalization of isosurfaces to arbitrary dimension and codimension, i.e. submanifolds of ℝ^d defined as the zero set of some multivariate multivalued smooth function f: ℝ^d → ℝ^{d-n}, where n is the intrinsic dimension of the manifold. A natural way to approximate a smooth isomanifold M is to consider its Piecewise-Linear (PL) approximation M̂ based on a triangulation 𝒯 of the ambient space ℝ^d. In this paper, we describe a simple algorithm to trace isomanifolds from a given starting point. The algorithm works for arbitrary dimensions n and d, and any precision D. Our main result is that, when f (or M) has bounded complexity, the complexity of the algorithm is polynomial in d and δ = 1/D (and unavoidably exponential in n). Since it is known that for δ = Ω (d^{2.5}), M̂ is O(D²)-close and isotopic to M, our algorithm produces a faithful PL-approximation of isomanifolds of bounded complexity in time polynomial in d. Combining this algorithm with dimensionality reduction techniques, the dependency on d in the size of M̂ can be completely removed with high probability. We also show that the algorithm can handle isomanifolds with boundary and, more generally, isostratifolds. The algorithm for isomanifolds with boundary has been implemented and experimental results are reported, showing that it is practical and can handle cases that are far ahead of the state-of-the-art. }, author = {Boissonnat, Jean-Daniel and Kachanovich, Siargey and Wintraecken, Mathijs}, booktitle = {37th International Symposium on Computational Geometry (SoCG 2021)}, isbn = {978-3-95977-184-9}, issn = {1868-8969}, location = {Virtual}, pages = {17:1--17:16}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Tracing isomanifolds in Rd in time polynomial in d using Coxeter-Freudenthal-Kuhn triangulations}}, doi = {10.4230/LIPIcs.SoCG.2021.17}, volume = {189}, year = {2021}, } @article{9393, abstract = {We consider the core algorithmic problems related to verification of systems with respect to three classical quantitative properties, namely, the mean-payoff, the ratio, and the minimum initial credit for energy property. The algorithmic problem given a graph and a quantitative property asks to compute the optimal value (the infimum value over all traces) from every node of the graph. We consider graphs with bounded treewidth—a class that contains the control flow graphs of most programs. Let n denote the number of nodes of a graph, m the number of edges (for bounded treewidth 𝑚=𝑂(𝑛)) and W the largest absolute value of the weights. Our main theoretical results are as follows. First, for the minimum initial credit problem we show that (1) for general graphs the problem can be solved in 𝑂(𝑛2⋅𝑚) time and the associated decision problem in 𝑂(𝑛⋅𝑚) time, improving the previous known 𝑂(𝑛3⋅𝑚⋅log(𝑛⋅𝑊)) and 𝑂(𝑛2⋅𝑚) bounds, respectively; and (2) for bounded treewidth graphs we present an algorithm that requires 𝑂(𝑛⋅log𝑛) time. Second, for bounded treewidth graphs we present an algorithm that approximates the mean-payoff value within a factor of 1+𝜖 in time 𝑂(𝑛⋅log(𝑛/𝜖)) as compared to the classical exact algorithms on general graphs that require quadratic time. Third, for the ratio property we present an algorithm that for bounded treewidth graphs works in time 𝑂(𝑛⋅log(|𝑎⋅𝑏|))=𝑂(𝑛⋅log(𝑛⋅𝑊)), when the output is 𝑎𝑏, as compared to the previously best known algorithm on general graphs with running time 𝑂(𝑛2⋅log(𝑛⋅𝑊)). We have implemented some of our algorithms and show that they present a significant speedup on standard benchmarks.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas}, issn = {1572-8102}, journal = {Formal Methods in System Design}, pages = {401--428}, publisher = {Springer}, title = {{Faster algorithms for quantitative verification in bounded treewidth graphs}}, doi = {10.1007/s10703-021-00373-5}, volume = {57}, year = {2021}, } @article{10365, abstract = {The early development of many organisms involves the folding of cell monolayers, but this behaviour is difficult to reproduce in vitro; therefore, both mechanistic causes and effects of local curvature remain unclear. Here we study epithelial cell monolayers on corrugated hydrogels engineered into wavy patterns, examining how concave and convex curvatures affect cellular and nuclear shape. We find that substrate curvature affects monolayer thickness, which is larger in valleys than crests. We show that this feature generically arises in a vertex model, leading to the hypothesis that cells may sense curvature by modifying the thickness of the tissue. We find that local curvature also affects nuclear morphology and positioning, which we explain by extending the vertex model to take into account membrane–nucleus interactions, encoding thickness modulation in changes to nuclear deformation and position. We propose that curvature governs the spatial distribution of yes-associated proteins via nuclear shape and density changes. We show that curvature also induces significant variations in lamins, chromatin condensation and cell proliferation rate in folded epithelial tissues. Together, this work identifies active cell mechanics and nuclear mechanoadaptation as the key players of the mechanistic regulation of epithelia to substrate curvature.}, author = {Luciano, Marine and Xue, Shi-lei and De Vos, Winnok H. and Redondo-Morata, Lorena and Surin, Mathieu and Lafont, Frank and Hannezo, Edouard B and Gabriele, Sylvain}, issn = {1745-2481}, journal = {Nature Physics}, number = {12}, pages = {1382–1390}, publisher = {Springer Nature}, title = {{Cell monolayers sense curvature by exploiting active mechanics and nuclear mechanoadaptation}}, doi = {10.1038/s41567-021-01374-1}, volume = {17}, year = {2021}, }