@article{12706, abstract = {Allometric settings of population dynamics models are appealing due to their parsimonious nature and broad utility when studying system level effects. Here, we parameterise the size-scaled Rosenzweig-MacArthur differential equations to eliminate prey-mass dependency, facilitating an in depth analytic study of the equations which incorporates scaling parameters’ contributions to coexistence. We define the functional response term to match empirical findings, and examine situations where metabolic theory derivations and observation diverge. The dynamical properties of the Rosenzweig-MacArthur system, encompassing the distribution of size-abundance equilibria, the scaling of period and amplitude of population cycling, and relationships between predator and prey abundances, are consistent with empirical observation. Our parameterisation is an accurate minimal model across 15+ orders of mass magnitude.}, author = {Mckerral, Jody C. and Kleshnina, Maria and Ejov, Vladimir and Bartle, Louise and Mitchell, James G. and Filar, Jerzy A.}, issn = {1932-6203}, journal = {PLoS One}, number = {2}, pages = {e0279838}, publisher = {Public Library of Science}, title = {{Empirical parameterisation and dynamical analysis of the allometric Rosenzweig-MacArthur equations}}, doi = {10.1371/journal.pone.0279838}, volume = {18}, year = {2023}, } @article{13202, abstract = {Phosphatidylinositol-4,5-bisphosphate (PI(4,5)P2) plays an essential role in neuronal activities through interaction with various proteins involved in signaling at membranes. However, the distribution pattern of PI(4,5)P2 and the association with these proteins on the neuronal cell membranes remain elusive. In this study, we established a method for visualizing PI(4,5)P2 by SDS-digested freeze-fracture replica labeling (SDS-FRL) to investigate the quantitative nanoscale distribution of PI(4,5)P2 in cryo-fixed brain. We demonstrate that PI(4,5)P2 forms tiny clusters with a mean size of ∼1000 nm2 rather than randomly distributed in cerebellar neuronal membranes in male C57BL/6J mice. These clusters show preferential accumulation in specific membrane compartments of different cell types, in particular, in Purkinje cell (PC) spines and granule cell (GC) presynaptic active zones. Furthermore, we revealed extensive association of PI(4,5)P2 with CaV2.1 and GIRK3 across different membrane compartments, whereas its association with mGluR1α was compartment specific. These results suggest that our SDS-FRL method provides valuable insights into the physiological functions of PI(4,5)P2 in neurons.}, author = {Eguchi, Kohgaku and Le Monnier, Elodie and Shigemoto, Ryuichi}, issn = {1529-2401}, journal = {The Journal of Neuroscience}, number = {23}, pages = {4197--4216}, publisher = {Society for Neuroscience}, title = {{Nanoscale phosphoinositide distribution on cell membranes of mouse cerebellar neurons}}, doi = {10.1523/JNEUROSCI.1514-22.2023}, volume = {43}, year = {2023}, } @article{12916, abstract = {We apply a variant of the square-sieve to produce an upper bound for the number of rational points of bounded height on a family of surfaces that admit a fibration over P1 whose general fibre is a hyperelliptic curve. The implied constant does not depend on the coefficients of the polynomial defining the surface. }, author = {Bonolis, Dante and Browning, Timothy D}, issn = {2036-2145}, journal = {Annali della Scuola Normale Superiore di Pisa - Classe di Scienze}, number = {1}, pages = {173--204}, publisher = {Scuola Normale Superiore - Edizioni della Normale}, title = {{Uniform bounds for rational points on hyperelliptic fibrations}}, doi = {10.2422/2036-2145.202010_018}, volume = {24}, year = {2023}, } @phdthesis{14422, abstract = {Animals exhibit a remarkable ability to learn and remember new behaviors, skills, and associations throughout their lifetime. These capabilities are made possible thanks to a variety of changes in the brain throughout adulthood, regrouped under the term "plasticity". Some cells in the brain —neurons— and specifically changes in the connections between neurons, the synapses, were shown to be crucial for the formation, selection, and consolidation of memories from past experiences. These ongoing changes of synapses across time are called synaptic plasticity. Understanding how a myriad of biochemical processes operating at individual synapses can somehow work in concert to give rise to meaningful changes in behavior is a fascinating problem and an active area of research. However, the experimental search for the precise plasticity mechanisms at play in the brain is daunting, as it is difficult to control and observe synapses during learning. Theoretical approaches have thus been the default method to probe the plasticity-behavior connection. Such studies attempt to extract unifying principles across synapses and model all observed synaptic changes using plasticity rules: equations that govern the evolution of synaptic strengths across time in neuronal network models. These rules can use many relevant quantities to determine the magnitude of synaptic changes, such as the precise timings of pre- and postsynaptic action potentials, the recent neuronal activity levels, the state of neighboring synapses, etc. However, analytical studies rely heavily on human intuition and are forced to make simplifying assumptions about plasticity rules. In this thesis, we aim to assist and augment human intuition in this search for plasticity rules. We explore whether a numerical approach could automatically discover the plasticity rules that elicit desired behaviors in large networks of interconnected neurons. This approach is dubbed meta-learning synaptic plasticity: learning plasticity rules which themselves will make neuronal networks learn how to solve a desired task. We first write all the potential plasticity mechanisms to consider using a single expression with adjustable parameters. We then optimize these plasticity parameters using evolutionary strategies or Bayesian inference on tasks known to involve synaptic plasticity, such as familiarity detection and network stabilization. We show that these automated approaches are powerful tools, able to complement established analytical methods. By comprehensively screening plasticity rules at all synapse types in realistic, spiking neuronal network models, we discover entire sets of degenerate plausible plasticity rules that reliably elicit memory-related behaviors. Our approaches allow for more robust experimental predictions, by abstracting out the idiosyncrasies of individual plasticity rules, and provide fresh insights on synaptic plasticity in spiking network models. }, author = {Confavreux, Basile J}, issn = {2663 - 337X}, pages = {148}, publisher = {Institute of Science and Technology Austria}, title = {{Synapseek: Meta-learning synaptic plasticity rules}}, doi = {10.15479/at:ista:14422}, year = {2023}, } @phdthesis{14374, abstract = {Superconductivity has many important applications ranging from levitating trains over qubits to MRI scanners. The phenomenon is successfully modeled by Bardeen-Cooper-Schrieffer (BCS) theory. From a mathematical perspective, BCS theory has been studied extensively for systems without boundary. However, little is known in the presence of boundaries. With the help of numerical methods physicists observed that the critical temperature may increase in the presence of a boundary. The goal of this thesis is to understand the influence of boundaries on the critical temperature in BCS theory and to give a first rigorous justification of these observations. On the way, we also study two-body Schrödinger operators on domains with boundaries and prove additional results for superconductors without boundary. BCS theory is based on a non-linear functional, where the minimizer indicates whether the system is superconducting or in the normal, non-superconducting state. By considering the Hessian of the BCS functional at the normal state, one can analyze whether the normal state is possibly a minimum of the BCS functional and estimate the critical temperature. The Hessian turns out to be a linear operator resembling a Schrödinger operator for two interacting particles, but with more complicated kinetic energy. As a first step, we study the two-body Schrödinger operator in the presence of boundaries. For Neumann boundary conditions, we prove that the addition of a boundary can create new eigenvalues, which correspond to the two particles forming a bound state close to the boundary. Second, we need to understand superconductivity in the translation invariant setting. While in three dimensions this has been extensively studied, there is no mathematical literature for the one and two dimensional cases. In dimensions one and two, we compute the weak coupling asymptotics of the critical temperature and the energy gap in the translation invariant setting. We also prove that their ratio is independent of the microscopic details of the model in the weak coupling limit; this property is referred to as universality. In the third part, we study the critical temperature of superconductors in the presence of boundaries. We start by considering the one-dimensional case of a half-line with contact interaction. Then, we generalize the results to generic interactions and half-spaces in one, two and three dimensions. Finally, we compare the critical temperature of a quarter space in two dimensions to the critical temperatures of a half-space and of the full space.}, author = {Roos, Barbara}, issn = {2663 - 337X}, pages = {206}, publisher = {Institute of Science and Technology Austria}, title = {{Boundary superconductivity in BCS theory}}, doi = {10.15479/at:ista:14374}, year = {2023}, } @article{13207, abstract = {We consider the linear BCS equation, determining the BCS critical temperature, in the presence of a boundary, where Dirichlet boundary conditions are imposed. In the one-dimensional case with point interactions, we prove that the critical temperature is strictly larger than the bulk value, at least at weak coupling. In particular, the Cooper-pair wave function localizes near the boundary, an effect that cannot be modeled by effective Neumann boundary conditions on the order parameter as often imposed in Ginzburg–Landau theory. We also show that the relative shift in critical temperature vanishes if the coupling constant either goes to zero or to infinity.}, author = {Hainzl, Christian and Roos, Barbara and Seiringer, Robert}, issn = {1664-0403}, journal = {Journal of Spectral Theory}, number = {4}, pages = {1507–1540}, publisher = {EMS Press}, title = {{Boundary superconductivity in the BCS model}}, doi = {10.4171/JST/439}, volume = {12}, year = {2023}, } @article{14452, abstract = {The classical infinitesimal model is a simple and robust model for the inheritance of quantitative traits. In this model, a quantitative trait is expressed as the sum of a genetic and an environmental component, and the genetic component of offspring traits within a family follows a normal distribution around the average of the parents’ trait values, and has a variance that is independent of the parental traits. In previous work, we showed that when trait values are determined by the sum of a large number of additive Mendelian factors, each of small effect, one can justify the infinitesimal model as a limit of Mendelian inheritance. In this paper, we show that this result extends to include dominance. We define the model in terms of classical quantities of quantitative genetics, before justifying it as a limit of Mendelian inheritance as the number, M, of underlying loci tends to infinity. As in the additive case, the multivariate normal distribution of trait values across the pedigree can be expressed in terms of variance components in an ancestral population and probabilities of identity by descent determined by the pedigree. Now, with just first-order dominance effects, we require two-, three-, and four-way identities. We also show that, even if we condition on parental trait values, the “shared” and “residual” components of trait values within each family will be asymptotically normally distributed as the number of loci tends to infinity, with an error of order 1/M−−√⁠. We illustrate our results with some numerical examples.}, author = {Barton, Nicholas H and Etheridge, Alison M. and Véber, Amandine}, issn = {1943-2631}, journal = {Genetics}, number = {2}, publisher = {Oxford Academic}, title = {{The infinitesimal model with dominance}}, doi = {10.1093/genetics/iyad133}, volume = {225}, year = {2023}, } @misc{12949, abstract = {The classical infinitesimal model is a simple and robust model for the inheritance of quantitative traits. In this model, a quantitative trait is expressed as the sum of a genetic and a non-genetic (environmental) component and the genetic component of offspring traits within a family follows a normal distribution around the average of the parents’ trait values, and has a variance that is independent of the trait values of the parents. Although the trait distribution across the whole population can be far from normal, the trait distributions within families are normally distributed with a variance-covariance matrix that is determined entirely by that in the ancestral population and the probabilities of identity determined by the pedigree. Moreover, conditioning on some of the trait values within the pedigree has predictable effects on the mean and variance within and between families. In previous work, Barton et al. (2017), we showed that when trait values are determined by the sum of a large number of Mendelian factors, each of small effect, one can justify the infinitesimal model as limit of Mendelian inheritance. It was also shown that under some forms of epistasis, trait values within a family are still normally distributed.}, author = {Barton, Nicholas H}, keywords = {Quantitative genetics, infinitesimal model}, publisher = {Institute of Science and Technology Austria}, title = {{The infinitesimal model with dominance}}, doi = {10.15479/AT:ISTA:12949}, year = {2023}, } @inproceedings{14461, abstract = {Communication-reduction techniques are a popular way to improve scalability in data-parallel training of deep neural networks (DNNs). The recent emergence of large language models such as GPT has created the need for new approaches to exploit data-parallelism. Among these, fully-sharded data parallel (FSDP) training is highly popular, yet it still encounters scalability bottlenecks. One reason is that applying compression techniques to FSDP is challenging: as the vast majority of the communication involves the model’s weights, direct compression alters convergence and leads to accuracy loss. We present QSDP, a variant of FSDP which supports both gradient and weight quantization with theoretical guarantees, is simple to implement and has essentially no overheads. To derive QSDP we prove that a natural modification of SGD achieves convergence even when we only maintain quantized weights, and thus the domain over which we train consists of quantized points and is, therefore, highly non-convex. We validate this approach by training GPT-family models with up to 1.3 billion parameters on a multi-node cluster. Experiments show that QSDP preserves model accuracy, while completely removing the communication bottlenecks of FSDP, providing end-to-end speedups of up to 2.2x.}, author = {Markov, Ilia and Vladu, Adrian and Guo, Qi and Alistarh, Dan-Adrian}, booktitle = {Proceedings of the 40th International Conference on Machine Learning}, issn = {2640-3498}, location = {Honolulu, Hawaii, HI, United States}, pages = {24020--24044}, publisher = {ML Research Press}, title = {{Quantized distributed training of large models with convergence guarantees}}, volume = {202}, year = {2023}, } @inproceedings{14462, abstract = {We study fine-grained error bounds for differentially private algorithms for counting under continual observation. Our main insight is that the matrix mechanism when using lower-triangular matrices can be used in the continual observation model. More specifically, we give an explicit factorization for the counting matrix Mcount and upper bound the error explicitly. We also give a fine-grained analysis, specifying the exact constant in the upper bound. Our analysis is based on upper and lower bounds of the completely bounded norm (cb-norm) of Mcount . Along the way, we improve the best-known bound of 28 years by Mathias (SIAM Journal on Matrix Analysis and Applications, 1993) on the cb-norm of Mcount for a large range of the dimension of Mcount. Furthermore, we are the first to give concrete error bounds for various problems under continual observation such as binary counting, maintaining a histogram, releasing an approximately cut-preserving synthetic graph, many graph-based statistics, and substring and episode counting. Finally, we note that our result can be used to get a fine-grained error bound for non-interactive local learning and the first lower bounds on the additive error for (ϵ,δ)-differentially-private counting under continual observation. Subsequent to this work, Henzinger et al. (SODA, 2023) showed that our factorization also achieves fine-grained mean-squared error.}, author = {Fichtenberger, Hendrik and Henzinger, Monika H and Upadhyay, Jalaj}, booktitle = {Proceedings of the 40th International Conference on Machine Learning}, issn = {2640-3498}, location = {Honolulu, Hawaii, HI, United States}, pages = {10072--10092}, publisher = {ML Research Press}, title = {{Constant matters: Fine-grained error bound on differentially private continual observation}}, volume = {202}, year = {2023}, } @inproceedings{14459, abstract = {Autoencoders are a popular model in many branches of machine learning and lossy data compression. However, their fundamental limits, the performance of gradient methods and the features learnt during optimization remain poorly understood, even in the two-layer setting. In fact, earlier work has considered either linear autoencoders or specific training regimes (leading to vanishing or diverging compression rates). Our paper addresses this gap by focusing on non-linear two-layer autoencoders trained in the challenging proportional regime in which the input dimension scales linearly with the size of the representation. Our results characterize the minimizers of the population risk, and show that such minimizers are achieved by gradient methods; their structure is also unveiled, thus leading to a concise description of the features obtained via training. For the special case of a sign activation function, our analysis establishes the fundamental limits for the lossy compression of Gaussian sources via (shallow) autoencoders. Finally, while the results are proved for Gaussian data, numerical simulations on standard datasets display the universality of the theoretical predictions.}, author = {Shevchenko, Aleksandr and Kögler, Kevin and Hassani, Hamed and Mondelli, Marco}, booktitle = {Proceedings of the 40th International Conference on Machine Learning}, issn = {2640-3498}, location = {Honolulu, Hawaii, HI, United States}, pages = {31151--31209}, publisher = {ML Research Press}, title = {{Fundamental limits of two-layer autoencoders, and achieving them with gradient methods}}, volume = {202}, year = {2023}, } @inproceedings{14460, abstract = {We provide an efficient implementation of the backpropagation algorithm, specialized to the case where the weights of the neural network being trained are sparse. Our algorithm is general, as it applies to arbitrary (unstructured) sparsity and common layer types (e.g., convolutional or linear). We provide a fast vectorized implementation on commodity CPUs, and show that it can yield speedups in end-to-end runtime experiments, both in transfer learning using already-sparsified networks, and in training sparse networks from scratch. Thus, our results provide the first support for sparse training on commodity hardware.}, author = {Nikdan, Mahdi and Pegolotti, Tommaso and Iofinova, Eugenia B and Kurtic, Eldar and Alistarh, Dan-Adrian}, booktitle = {Proceedings of the 40th International Conference on Machine Learning}, issn = {2640-3498}, location = {Honolulu, Hawaii, HI, United States}, pages = {26215--26227}, publisher = {ML Research Press}, title = {{SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge}}, volume = {202}, year = {2023}, } @inproceedings{14457, abstract = {Threshold secret sharing allows a dealer to split a secret s into n shares, such that any t shares allow for reconstructing s, but no t-1 shares reveal any information about s. Leakage-resilient secret sharing requires that the secret remains hidden, even when an adversary additionally obtains a limited amount of leakage from every share. Benhamouda et al. (CRYPTO’18) proved that Shamir’s secret sharing scheme is one bit leakage-resilient for reconstruction threshold t≥0.85n and conjectured that the same holds for t = c.n for any constant 0≤c≤1. Nielsen and Simkin (EUROCRYPT’20) showed that this is the best one can hope for by proving that Shamir’s scheme is not secure against one-bit leakage when t0c.n/log(n). In this work, we strengthen the lower bound of Nielsen and Simkin. We consider noisy leakage-resilience, where a random subset of leakages is replaced by uniformly random noise. We prove a lower bound for Shamir’s secret sharing, similar to that of Nielsen and Simkin, which holds even when a constant fraction of leakages is replaced by random noise. To this end, we first prove a lower bound on the share size of any noisy-leakage-resilient sharing scheme. We then use this lower bound to show that there exist universal constants c1, c2, such that for sufficiently large n it holds that Shamir’s secret sharing scheme is not noisy-leakage-resilient for t≤c1.n/log(n), even when a c2 fraction of leakages are replaced by random noise. }, author = {Hoffmann, Charlotte and Simkin, Mark}, booktitle = {8th International Conference on Cryptology and Information Security in Latin America}, isbn = {9783031444685}, issn = {1611-3349}, location = {Quito, Ecuador}, pages = {215--228}, publisher = {Springer Nature}, title = {{Stronger lower bounds for leakage-resilient secret sharing}}, doi = {10.1007/978-3-031-44469-2_11}, volume = {14168}, year = {2023}, } @inproceedings{14458, abstract = {We show for the first time that large-scale generative pretrained transformer (GPT) family models can be pruned to at least 50% sparsity in one-shot, without any retraining, at minimal loss of accuracy. This is achieved via a new pruning method called SparseGPT, specifically designed to work efficiently and accurately on massive GPT-family models. We can execute SparseGPT on the largest available open-source models, OPT-175B and BLOOM-176B, in under 4.5 hours, and can reach 60% unstructured sparsity with negligible increase in perplexity: remarkably, more than 100 billion weights from these models can be ignored at inference time. SparseGPT generalizes to semi-structured (2:4 and 4:8) patterns, and is compatible with weight quantization approaches. The code is available at: https://github.com/IST-DASLab/sparsegpt.}, author = {Frantar, Elias and Alistarh, Dan-Adrian}, booktitle = {Proceedings of the 40th International Conference on Machine Learning}, issn = {2640-3498}, location = {Honolulu, Hawaii, HI, United States}, pages = {10323--10337}, publisher = {ML Research Press}, title = {{SparseGPT: Massive language models can be accurately pruned in one-shot}}, volume = {202}, year = {2023}, } @article{14451, abstract = {We investigate the potential of Multi-Objective, Deep Reinforcement Learning for stock and cryptocurrency single-asset trading: in particular, we consider a Multi-Objective algorithm which generalizes the reward functions and discount factor (i.e., these components are not specified a priori, but incorporated in the learning process). Firstly, using several important assets (BTCUSD, ETHUSDT, XRPUSDT, AAPL, SPY, NIFTY50), we verify the reward generalization property of the proposed Multi-Objective algorithm, and provide preliminary statistical evidence showing increased predictive stability over the corresponding Single-Objective strategy. Secondly, we show that the Multi-Objective algorithm has a clear edge over the corresponding Single-Objective strategy when the reward mechanism is sparse (i.e., when non-null feedback is infrequent over time). Finally, we discuss the generalization properties with respect to the discount factor. The entirety of our code is provided in open-source format.}, author = {Cornalba, Federico and Disselkamp, Constantin and Scassola, Davide and Helf, Christopher}, issn = {1433-3058}, journal = {Neural Computing and Applications}, publisher = {Springer Nature}, title = {{Multi-objective reward generalization: improving performance of Deep Reinforcement Learning for applications in single-asset trading}}, doi = {10.1007/s00521-023-09033-7}, year = {2023}, } @article{14442, abstract = {In the presence of an obstacle, active particles condensate into a surface “wetting” layer due to persistent motion. If the obstacle is asymmetric, a rectification current arises in addition to wetting. Asymmetric geometries are therefore commonly used to concentrate microorganisms like bacteria and sperms. However, most studies neglect the fact that biological active matter is diverse, composed of individuals with distinct self-propulsions. Using simulations, we study a mixture of “fast” and “slow” active Brownian disks in two dimensions interacting with large half-disk obstacles. With this prototypical obstacle geometry, we analyze how the stationary collective behavior depends on the degree of self-propulsion “diversity,” defined as proportional to the difference between the self-propulsion speeds, while keeping the average self-propulsion speed fixed. A wetting layer rich in fast particles arises. The rectification current is amplified by speed diversity due to a superlinear dependence of rectification on self-propulsion speed, which arises from cooperative effects. Thus, the total rectification current cannot be obtained from an effective one-component active fluid with the same average self-propulsion speed, highlighting the importance of considering diversity in active matter.}, author = {Rojas Vega, Mauricio Nicolas and De Castro, Pablo and Soto, Rodrigo}, issn = {1292-895X}, journal = {The European Physical Journal E}, number = {10}, publisher = {Springer Nature}, title = {{Mixtures of self-propelled particles interacting with asymmetric obstacles}}, doi = {10.1140/epje/s10189-023-00354-y}, volume = {46}, year = {2023}, } @article{14444, abstract = {We prove several results about substructures in Latin squares. First, we explain how to adapt our recent work on high-girth Steiner triple systems to the setting of Latin squares, resolving a conjecture of Linial that there exist Latin squares with arbitrarily high girth. As a consequence, we see that the number of order- n Latin squares with no intercalate (i.e., no 2×2 Latin subsquare) is at least (e−9/4n−o(n))n2. Equivalently, P[N=0]≥e−n2/4−o(n2)=e−(1+o(1))EN , where N is the number of intercalates in a uniformly random order- n Latin square. In fact, extending recent work of Kwan, Sah, and Sawhney, we resolve the general large-deviation problem for intercalates in random Latin squares, up to constant factors in the exponent: for any constant 0<δ≤1 we have P[N≤(1−δ)EN]=exp(−Θ(n2)) and for any constant δ>0 we have P[N≥(1+δ)EN]=exp(−Θ(n4/3logn)). Finally, as an application of some new general tools for studying substructures in random Latin squares, we show that in almost all order- n Latin squares, the number of cuboctahedra (i.e., the number of pairs of possibly degenerate 2×2 submatrices with the same arrangement of symbols) is of order n4, which is the minimum possible. As observed by Gowers and Long, this number can be interpreted as measuring ``how associative'' the quasigroup associated with the Latin square is.}, author = {Kwan, Matthew Alan and Sah, Ashwin and Sawhney, Mehtaab and Simkin, Michael}, issn = {1565-8511}, journal = {Israel Journal of Mathematics}, number = {2}, pages = {363--416}, publisher = {Springer Nature}, title = {{Substructures in Latin squares}}, doi = {10.1007/s11856-023-2513-9}, volume = {256}, year = {2023}, } @inproceedings{14454, abstract = {As AI and machine-learned software are used increasingly for making decisions that affect humans, it is imperative that they remain fair and unbiased in their decisions. To complement design-time bias mitigation measures, runtime verification techniques have been introduced recently to monitor the algorithmic fairness of deployed systems. Previous monitoring techniques assume full observability of the states of the (unknown) monitored system. Moreover, they can monitor only fairness properties that are specified as arithmetic expressions over the probabilities of different events. In this work, we extend fairness monitoring to systems modeled as partially observed Markov chains (POMC), and to specifications containing arithmetic expressions over the expected values of numerical functions on event sequences. The only assumptions we make are that the underlying POMC is aperiodic and starts in the stationary distribution, with a bound on its mixing time being known. These assumptions enable us to estimate a given property for the entire distribution of possible executions of the monitored POMC, by observing only a single execution. Our monitors observe a long run of the system and, after each new observation, output updated PAC-estimates of how fair or biased the system is. The monitors are computationally lightweight and, using a prototype implementation, we demonstrate their effectiveness on several real-world examples.}, author = {Henzinger, Thomas A and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {23rd International Conference on Runtime Verification}, isbn = {9783031442667}, issn = {1611-3349}, location = {Thessaloniki, Greece}, pages = {291--311}, publisher = {Springer Nature}, title = {{Monitoring algorithmic fairness under partial observations}}, doi = {10.1007/978-3-031-44267-4_15}, volume = {14245}, year = {2023}, } @article{14446, abstract = {Recent work has paid close attention to the first principle of Granger causality, according to which cause precedes effect. In this context, the question may arise whether the detected direction of causality also reverses after the time reversal of unidirectionally coupled data. Recently, it has been shown that for unidirectionally causally connected autoregressive (AR) processes X → Y, after time reversal of data, the opposite causal direction Y → X is indeed detected, although typically as part of the bidirectional X↔ Y link. As we argue here, the answer is different when the measured data are not from AR processes but from linked deterministic systems. When the goal is the usual forward data analysis, cross-mapping-like approaches correctly detect X → Y, while Granger causality-like approaches, which should not be used for deterministic time series, detect causal independence X → Y. The results of backward causal analysis depend on the predictability of the reversed data. Unlike AR processes, observables from deterministic dynamical systems, even complex nonlinear ones, can be predicted well forward, while backward predictions can be difficult (notably when the time reversal of a function leads to one-to-many relations). To address this problem, we propose an approach based on models that provide multiple candidate predictions for the target, combined with a loss function that consideres only the best candidate. The resulting good forward and backward predictability supports the view that unidirectionally causally linked deterministic dynamical systems X → Y can be expected to detect the same link both before and after time reversal.}, author = {Jakubík, Jozef and Bui Thi Mai, Phuong and Chvosteková, Martina and Krakovská, Anna}, issn = {1335-8871}, journal = {Measurement Science Review}, number = {4}, pages = {175--183}, publisher = {Sciendo}, title = {{Against the flow of time with multi-output models}}, doi = {10.2478/msr-2023-0023}, volume = {23}, year = {2023}, } @article{14443, abstract = {Importance Climate change, pollution, urbanization, socioeconomic inequality, and psychosocial effects of the COVID-19 pandemic have caused massive changes in environmental conditions that affect brain health during the life span, both on a population level as well as on the level of the individual. How these environmental factors influence the brain, behavior, and mental illness is not well known. Observations A research strategy enabling population neuroscience to contribute to identify brain mechanisms underlying environment-related mental illness by leveraging innovative enrichment tools for data federation, geospatial observation, climate and pollution measures, digital health, and novel data integration techniques is described. This strategy can inform innovative treatments that target causal cognitive and molecular mechanisms of mental illness related to the environment. An example is presented of the environMENTAL Project that is leveraging federated cohort data of over 1.5 million European citizens and patients enriched with deep phenotyping data from large-scale behavioral neuroimaging cohorts to identify brain mechanisms related to environmental adversity underlying symptoms of depression, anxiety, stress, and substance misuse. Conclusions and Relevance This research will lead to the development of objective biomarkers and evidence-based interventions that will significantly improve outcomes of environment-related mental illness.}, author = {Schumann, Gunter and Andreassen, Ole A. and Banaschewski, Tobias and Calhoun, Vince D. and Clinton, Nicholas and Desrivieres, Sylvane and Brandlistuen, Ragnhild Eek and Feng, Jianfeng and Hese, Soeren and Hitchen, Esther and Hoffmann, Per and Jia, Tianye and Jirsa, Viktor and Marquand, Andre F. and Nees, Frauke and Nöthen, Markus M. and Novarino, Gaia and Polemiti, Elli and Ralser, Markus and Rapp, Michael and Schepanski, Kerstin and Schikowski, Tamara and Slater, Mel and Sommer, Peter and Stahl, Bernd Carsten and Thompson, Paul M. and Twardziok, Sven and Van Der Meer, Dennis and Walter, Henrik and Westlye, Lars}, issn = {2168-6238}, journal = {JAMA Psychiatry}, number = {10}, pages = {1066--1074}, publisher = {American Medical Association}, title = {{Addressing global environmental challenges to mental health using population neuroscience: A review}}, doi = {10.1001/jamapsychiatry.2023.2996}, volume = {80}, year = {2023}, }