[{"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"ista":"Shevchenko A, Kögler K, Hassani H, Mondelli M. 2023. Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 31151–31209.","chicago":"Shevchenko, Aleksandr, Kevin Kögler, Hamed Hassani, and Marco Mondelli. “Fundamental Limits of Two-Layer Autoencoders, and Achieving Them with Gradient Methods.” In Proceedings of the 40th International Conference on Machine Learning, 202:31151–209. ML Research Press, 2023.","ieee":"A. Shevchenko, K. Kögler, H. Hassani, and M. Mondelli, “Fundamental limits of two-layer autoencoders, and achieving them with gradient methods,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 31151–31209.","short":"A. Shevchenko, K. Kögler, H. Hassani, M. Mondelli, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 31151–31209.","ama":"Shevchenko A, Kögler K, Hassani H, Mondelli M. Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:31151-31209.","apa":"Shevchenko, A., Kögler, K., Hassani, H., & Mondelli, M. (2023). Fundamental limits of two-layer autoencoders, and achieving them with gradient methods. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 31151–31209). Honolulu, Hawaii, HI, United States: ML Research Press.","mla":"Shevchenko, Aleksandr, et al. “Fundamental Limits of Two-Layer Autoencoders, and Achieving Them with Gradient Methods.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 31151–209."},"title":"Fundamental limits of two-layer autoencoders, and achieving them with gradient methods","author":[{"id":"F2B06EC2-C99E-11E9-89F0-752EE6697425","first_name":"Aleksandr","full_name":"Shevchenko, Aleksandr","last_name":"Shevchenko"},{"id":"94ec913c-dc85-11ea-9058-e5051ab2428b","first_name":"Kevin","full_name":"Kögler, Kevin","last_name":"Kögler"},{"full_name":"Hassani, Hamed","last_name":"Hassani","first_name":"Hamed"},{"id":"27EB676C-8706-11E9-9510-7717E6697425","first_name":"Marco","last_name":"Mondelli","orcid":"0000-0002-3242-7020","full_name":"Mondelli, Marco"}],"external_id":{"arxiv":["2212.13468"]},"article_processing_charge":"No","project":[{"_id":"059876FA-7A3F-11EA-A408-12923DDC885E","name":"Prix Lopez-Loretta 2019 - Marco Mondelli"}],"day":"30","publication":"Proceedings of the 40th International Conference on Machine Learning","year":"2023","date_published":"2023-07-30T00:00:00Z","date_created":"2023-10-29T23:01:17Z","page":"31151-31209","acknowledgement":"Aleksandr Shevchenko, Kevin Kogler and Marco Mondelli are supported by the 2019 Lopez-Loreta Prize. Hamed Hassani acknowledges the support by the NSF CIF award (1910056) and the NSF Institute for CORE Emerging Methods in Data Science (EnCORE).","quality_controlled":"1","publisher":"ML Research Press","oa":1,"date_updated":"2023-10-31T08:52:28Z","department":[{"_id":"MaMo"},{"_id":"DaAl"}],"_id":"14459","status":"public","type":"conference","conference":{"name":"ICML: International Conference on Machine Learning","end_date":"2023-07-29","location":"Honolulu, Hawaii, HI, United States","start_date":"2023-07-23"},"language":[{"iso":"eng"}],"publication_identifier":{"eissn":["2640-3498"]},"publication_status":"published","volume":202,"oa_version":"Preprint","abstract":[{"text":"Autoencoders are a popular model in many branches of machine learning and lossy data compression. However, their fundamental limits, the performance of gradient methods and the features learnt during optimization remain poorly understood, even in the two-layer setting. In fact, earlier work has considered either linear autoencoders or specific training regimes (leading to vanishing or diverging compression rates). Our paper addresses this gap by focusing on non-linear two-layer autoencoders trained in the challenging proportional regime in which the input dimension scales linearly with the size of the representation. Our results characterize the minimizers of the population risk, and show that such minimizers are achieved by gradient methods; their structure is also unveiled, thus leading to a concise description of the features obtained via training. For the special case of a sign activation function, our analysis establishes the fundamental limits for the lossy compression of Gaussian sources via (shallow) autoencoders. Finally, while the results are proved for Gaussian data, numerical simulations on standard datasets display the universality of the theoretical predictions.","lang":"eng"}],"month":"07","intvolume":" 202","scopus_import":"1","alternative_title":["PMLR"],"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2212.13468","open_access":"1"}]},{"author":[{"full_name":"Nikdan, Mahdi","last_name":"Nikdan","id":"66374281-f394-11eb-9cf6-869147deecc0","first_name":"Mahdi"},{"full_name":"Pegolotti, Tommaso","last_name":"Pegolotti","first_name":"Tommaso"},{"first_name":"Eugenia B","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","orcid":"0000-0002-7778-3221","full_name":"Iofinova, Eugenia B","last_name":"Iofinova"},{"full_name":"Kurtic, Eldar","last_name":"Kurtic","id":"47beb3a5-07b5-11eb-9b87-b108ec578218","first_name":"Eldar"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","last_name":"Alistarh","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian"}],"external_id":{"arxiv":["2302.04852"]},"article_processing_charge":"No","title":"SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge","citation":{"ista":"Nikdan M, Pegolotti T, Iofinova EB, Kurtic E, Alistarh D-A. 2023. SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 26215–26227.","chicago":"Nikdan, Mahdi, Tommaso Pegolotti, Eugenia B Iofinova, Eldar Kurtic, and Dan-Adrian Alistarh. “SparseProp: Efficient Sparse Backpropagation for Faster Training of Neural Networks at the Edge.” In Proceedings of the 40th International Conference on Machine Learning, 202:26215–27. ML Research Press, 2023.","ieee":"M. Nikdan, T. Pegolotti, E. B. Iofinova, E. Kurtic, and D.-A. Alistarh, “SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 26215–26227.","short":"M. Nikdan, T. Pegolotti, E.B. Iofinova, E. Kurtic, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 26215–26227.","apa":"Nikdan, M., Pegolotti, T., Iofinova, E. B., Kurtic, E., & Alistarh, D.-A. (2023). SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 26215–26227). Honolulu, Hawaii, HI, United States: ML Research Press.","ama":"Nikdan M, Pegolotti T, Iofinova EB, Kurtic E, Alistarh D-A. SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:26215-26227.","mla":"Nikdan, Mahdi, et al. “SparseProp: Efficient Sparse Backpropagation for Faster Training of Neural Networks at the Edge.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 26215–27."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","project":[{"call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223"}],"page":"26215-26227","date_published":"2023-07-30T00:00:00Z","date_created":"2023-10-29T23:01:17Z","year":"2023","day":"30","publication":"Proceedings of the 40th International Conference on Machine Learning","quality_controlled":"1","publisher":"ML Research Press","oa":1,"acknowledgement":"We would like to thank Elias Frantar for his valuable assistance and support at the outset of this project, and the anonymous ICML and SNN reviewers for very constructive feedback. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. DA acknowledges generous ERC support, via Starting Grant 805223 ScaleML. ","department":[{"_id":"DaAl"}],"date_updated":"2023-10-31T09:33:51Z","type":"conference","conference":{"end_date":"2023-07-29","location":"Honolulu, Hawaii, HI, United States","start_date":"2023-07-23","name":"ICML: International Conference on Machine Learning"},"status":"public","_id":"14460","volume":202,"ec_funded":1,"publication_identifier":{"eissn":["2640-3498"]},"publication_status":"published","language":[{"iso":"eng"}],"alternative_title":["PMLR"],"scopus_import":"1","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2302.04852","open_access":"1"}],"month":"07","intvolume":" 202","abstract":[{"text":"We provide an efficient implementation of the backpropagation algorithm, specialized to the case where the weights of the neural network being trained are sparse. Our algorithm is general, as it applies to arbitrary (unstructured) sparsity and common layer types (e.g., convolutional or linear). We provide a fast vectorized implementation on commodity CPUs, and show that it can yield speedups in end-to-end runtime experiments, both in transfer learning using already-sparsified networks, and in training sparse networks from scratch. Thus, our results provide the first support for sparse training on commodity hardware.","lang":"eng"}],"oa_version":"Preprint"},{"author":[{"last_name":"Hoffmann","full_name":"Hoffmann, Charlotte","orcid":"0000-0003-2027-5549","id":"0f78d746-dc7d-11ea-9b2f-83f92091afe7","first_name":"Charlotte"},{"first_name":"Mark","last_name":"Simkin","full_name":"Simkin, Mark"}],"article_processing_charge":"No","title":"Stronger lower bounds for leakage-resilient secret sharing","citation":{"mla":"Hoffmann, Charlotte, and Mark Simkin. “Stronger Lower Bounds for Leakage-Resilient Secret Sharing.” 8th International Conference on Cryptology and Information Security in Latin America, vol. 14168, Springer Nature, 2023, pp. 215–28, doi:10.1007/978-3-031-44469-2_11.","ama":"Hoffmann C, Simkin M. Stronger lower bounds for leakage-resilient secret sharing. In: 8th International Conference on Cryptology and Information Security in Latin America. Vol 14168. Springer Nature; 2023:215-228. doi:10.1007/978-3-031-44469-2_11","apa":"Hoffmann, C., & Simkin, M. (2023). Stronger lower bounds for leakage-resilient secret sharing. In 8th International Conference on Cryptology and Information Security in Latin America (Vol. 14168, pp. 215–228). Quito, Ecuador: Springer Nature. https://doi.org/10.1007/978-3-031-44469-2_11","short":"C. Hoffmann, M. Simkin, in:, 8th International Conference on Cryptology and Information Security in Latin America, Springer Nature, 2023, pp. 215–228.","ieee":"C. Hoffmann and M. Simkin, “Stronger lower bounds for leakage-resilient secret sharing,” in 8th International Conference on Cryptology and Information Security in Latin America, Quito, Ecuador, 2023, vol. 14168, pp. 215–228.","chicago":"Hoffmann, Charlotte, and Mark Simkin. “Stronger Lower Bounds for Leakage-Resilient Secret Sharing.” In 8th International Conference on Cryptology and Information Security in Latin America, 14168:215–28. Springer Nature, 2023. https://doi.org/10.1007/978-3-031-44469-2_11.","ista":"Hoffmann C, Simkin M. 2023. Stronger lower bounds for leakage-resilient secret sharing. 8th International Conference on Cryptology and Information Security in Latin America. LATINCRYPT: Conference on Cryptology and Information Security in Latin America, LNCS, vol. 14168, 215–228."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","page":"215-228","doi":"10.1007/978-3-031-44469-2_11","date_published":"2023-10-01T00:00:00Z","date_created":"2023-10-29T23:01:16Z","year":"2023","day":"01","publication":"8th International Conference on Cryptology and Information Security in Latin America","quality_controlled":"1","publisher":"Springer Nature","oa":1,"department":[{"_id":"KrPi"}],"date_updated":"2023-10-31T11:43:12Z","type":"conference","conference":{"start_date":"2023-10-03","location":"Quito, Ecuador","end_date":"2023-10-06","name":"LATINCRYPT: Conference on Cryptology and Information Security in Latin America"},"status":"public","_id":"14457","volume":14168,"publication_identifier":{"issn":["0302-9743"],"eissn":["1611-3349"],"isbn":["9783031444685"]},"publication_status":"published","language":[{"iso":"eng"}],"alternative_title":["LNCS"],"scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://eprint.iacr.org/2023/1017"}],"month":"10","intvolume":" 14168","abstract":[{"lang":"eng","text":"Threshold secret sharing allows a dealer to split a secret s into n shares, such that any t shares allow for reconstructing s, but no t-1 shares reveal any information about s. Leakage-resilient secret sharing requires that the secret remains hidden, even when an adversary additionally obtains a limited amount of leakage from every share. Benhamouda et al. (CRYPTO’18) proved that Shamir’s secret sharing scheme is one bit leakage-resilient for reconstruction threshold t≥0.85n and conjectured that the same holds for t = c.n for any constant 0≤c≤1. Nielsen and Simkin (EUROCRYPT’20) showed that this is the best one can hope for by proving that Shamir’s scheme is not secure against one-bit leakage when t0c.n/log(n).\r\nIn this work, we strengthen the lower bound of Nielsen and Simkin. We consider noisy leakage-resilience, where a random subset of leakages is replaced by uniformly random noise. We prove a lower bound for Shamir’s secret sharing, similar to that of Nielsen and Simkin, which holds even when a constant fraction of leakages is replaced by random noise. To this end, we first prove a lower bound on the share size of any noisy-leakage-resilient sharing scheme. We then use this lower bound to show that there exist universal constants c1, c2, such that for sufficiently large n it holds that Shamir’s secret sharing scheme is not noisy-leakage-resilient for t≤c1.n/log(n), even when a c2 fraction of leakages are replaced by random noise.\r\n\r\n\r\n\r\n"}],"oa_version":"Preprint"},{"quality_controlled":"1","publisher":"ML Research Press","oa":1,"acknowledgement":"The authors gratefully acknowledge funding from the European Research Council (ERC) under the European Union’s Horizon 2020 programme (grant agreement No. 805223 ScaleML), as well as experimental support from Eldar Kurtic, and from the IST Austria IT department, in particular Stefano Elefante, Andrei Hornoiu, and Alois Schloegl.","date_published":"2023-07-30T00:00:00Z","date_created":"2023-10-29T23:01:16Z","page":"10323-10337","day":"30","publication":"Proceedings of the 40th International Conference on Machine Learning","year":"2023","project":[{"call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning"}],"title":"SparseGPT: Massive language models can be accurately pruned in one-shot","author":[{"full_name":"Frantar, Elias","last_name":"Frantar","id":"09a8f98d-ec99-11ea-ae11-c063a7b7fe5f","first_name":"Elias"},{"last_name":"Alistarh","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian"}],"article_processing_charge":"No","external_id":{"arxiv":["2301.00774"]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"short":"E. Frantar, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 10323–10337.","ieee":"E. Frantar and D.-A. Alistarh, “SparseGPT: Massive language models can be accurately pruned in one-shot,” in Proceedings of the 40th International Conference on Machine Learning, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 10323–10337.","ama":"Frantar E, Alistarh D-A. SparseGPT: Massive language models can be accurately pruned in one-shot. In: Proceedings of the 40th International Conference on Machine Learning. Vol 202. ML Research Press; 2023:10323-10337.","apa":"Frantar, E., & Alistarh, D.-A. (2023). SparseGPT: Massive language models can be accurately pruned in one-shot. In Proceedings of the 40th International Conference on Machine Learning (Vol. 202, pp. 10323–10337). Honolulu, Hawaii, HI, United States: ML Research Press.","mla":"Frantar, Elias, and Dan-Adrian Alistarh. “SparseGPT: Massive Language Models Can Be Accurately Pruned in One-Shot.” Proceedings of the 40th International Conference on Machine Learning, vol. 202, ML Research Press, 2023, pp. 10323–37.","ista":"Frantar E, Alistarh D-A. 2023. SparseGPT: Massive language models can be accurately pruned in one-shot. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 10323–10337.","chicago":"Frantar, Elias, and Dan-Adrian Alistarh. “SparseGPT: Massive Language Models Can Be Accurately Pruned in One-Shot.” In Proceedings of the 40th International Conference on Machine Learning, 202:10323–37. ML Research Press, 2023."},"month":"07","intvolume":" 202","scopus_import":"1","alternative_title":["PMLR"],"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2301.00774"}],"oa_version":"Preprint","abstract":[{"text":"We show for the first time that large-scale generative pretrained transformer (GPT) family models can be pruned to at least 50% sparsity in one-shot, without any retraining, at minimal loss of accuracy. This is achieved via a new pruning method called SparseGPT, specifically designed to work efficiently and accurately on massive GPT-family models. We can execute SparseGPT on the largest available open-source models, OPT-175B and BLOOM-176B, in under 4.5 hours, and can reach 60% unstructured sparsity with negligible increase in perplexity: remarkably, more than 100 billion weights from these models can be ignored at inference time. SparseGPT generalizes to semi-structured (2:4 and 4:8) patterns, and is compatible with weight quantization approaches. The code is available at: https://github.com/IST-DASLab/sparsegpt.","lang":"eng"}],"acknowledged_ssus":[{"_id":"ScienComp"}],"volume":202,"ec_funded":1,"language":[{"iso":"eng"}],"publication_identifier":{"eissn":["2640-3498"]},"publication_status":"published","status":"public","type":"conference","conference":{"name":"ICML: International Conference on Machine Learning","start_date":"2023-07-23","end_date":"2023-07-29","location":"Honolulu, Hawaii, HI, United States"},"_id":"14458","department":[{"_id":"DaAl"}],"date_updated":"2023-10-31T09:59:42Z"},{"language":[{"iso":"eng"}],"publication_status":"epub_ahead","publication_identifier":{"issn":["0941-0643"],"eissn":["1433-3058"]},"ec_funded":1,"oa_version":"Published Version","abstract":[{"lang":"eng","text":"We investigate the potential of Multi-Objective, Deep Reinforcement Learning for stock and cryptocurrency single-asset trading: in particular, we consider a Multi-Objective algorithm which generalizes the reward functions and discount factor (i.e., these components are not specified a priori, but incorporated in the learning process). Firstly, using several important assets (BTCUSD, ETHUSDT, XRPUSDT, AAPL, SPY, NIFTY50), we verify the reward generalization property of the proposed Multi-Objective algorithm, and provide preliminary statistical evidence showing increased predictive stability over the corresponding Single-Objective strategy. Secondly, we show that the Multi-Objective algorithm has a clear edge over the corresponding Single-Objective strategy when the reward mechanism is sparse (i.e., when non-null feedback is infrequent over time). Finally, we discuss the generalization properties with respect to the discount factor. The entirety of our code is provided in open-source format."}],"month":"10","main_file_link":[{"url":"https://doi.org/10.1007/s00521-023-09033-7","open_access":"1"}],"scopus_import":"1","date_updated":"2023-10-31T10:58:28Z","department":[{"_id":"JuFi"}],"_id":"14451","status":"public","article_type":"original","type":"journal_article","publication":"Neural Computing and Applications","day":"05","year":"2023","date_created":"2023-10-22T22:01:16Z","date_published":"2023-10-05T00:00:00Z","doi":"10.1007/s00521-023-09033-7","acknowledgement":"Open access funding provided by Università degli Studi di Trieste within the CRUI-CARE Agreement. Funding was provided by Austrian Science Fund (Grant No. F65), Horizon 2020 (Grant No. 754411) and Österreichische Forschungsförderungsgesellschaft.","oa":1,"quality_controlled":"1","publisher":"Springer Nature","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"mla":"Cornalba, Federico, et al. “Multi-Objective Reward Generalization: Improving Performance of Deep Reinforcement Learning for Applications in Single-Asset Trading.” Neural Computing and Applications, Springer Nature, 2023, doi:10.1007/s00521-023-09033-7.","ama":"Cornalba F, Disselkamp C, Scassola D, Helf C. Multi-objective reward generalization: improving performance of Deep Reinforcement Learning for applications in single-asset trading. Neural Computing and Applications. 2023. doi:10.1007/s00521-023-09033-7","apa":"Cornalba, F., Disselkamp, C., Scassola, D., & Helf, C. (2023). Multi-objective reward generalization: improving performance of Deep Reinforcement Learning for applications in single-asset trading. Neural Computing and Applications. Springer Nature. https://doi.org/10.1007/s00521-023-09033-7","short":"F. Cornalba, C. Disselkamp, D. Scassola, C. Helf, Neural Computing and Applications (2023).","ieee":"F. Cornalba, C. Disselkamp, D. Scassola, and C. Helf, “Multi-objective reward generalization: improving performance of Deep Reinforcement Learning for applications in single-asset trading,” Neural Computing and Applications. Springer Nature, 2023.","chicago":"Cornalba, Federico, Constantin Disselkamp, Davide Scassola, and Christopher Helf. “Multi-Objective Reward Generalization: Improving Performance of Deep Reinforcement Learning for Applications in Single-Asset Trading.” Neural Computing and Applications. Springer Nature, 2023. https://doi.org/10.1007/s00521-023-09033-7.","ista":"Cornalba F, Disselkamp C, Scassola D, Helf C. 2023. Multi-objective reward generalization: improving performance of Deep Reinforcement Learning for applications in single-asset trading. Neural Computing and Applications."},"title":"Multi-objective reward generalization: improving performance of Deep Reinforcement Learning for applications in single-asset trading","article_processing_charge":"Yes (via OA deal)","external_id":{"arxiv":["2203.04579"]},"author":[{"full_name":"Cornalba, Federico","orcid":"0000-0002-6269-5149","last_name":"Cornalba","first_name":"Federico","id":"2CEB641C-A400-11E9-A717-D712E6697425"},{"first_name":"Constantin","last_name":"Disselkamp","full_name":"Disselkamp, Constantin"},{"first_name":"Davide","last_name":"Scassola","full_name":"Scassola, Davide"},{"last_name":"Helf","full_name":"Helf, Christopher","first_name":"Christopher"}],"project":[{"name":"Taming Complexity in Partial Differential Systems","grant_number":"F6504","_id":"fc31cba2-9c52-11eb-aca3-ff467d239cd2"},{"grant_number":"754411","name":"ISTplus - Postdoctoral Fellowships","call_identifier":"H2020","_id":"260C2330-B435-11E9-9278-68D0E5697425"}]},{"department":[{"_id":"AnSa"}],"date_updated":"2023-10-31T11:16:41Z","article_type":"original","type":"journal_article","status":"public","_id":"14442","issue":"10","volume":46,"publication_identifier":{"issn":["1292-8941"],"eissn":["1292-895X"]},"publication_status":"published","language":[{"iso":"eng"}],"scopus_import":"1","month":"10","intvolume":" 46","abstract":[{"lang":"eng","text":"In the presence of an obstacle, active particles condensate into a surface “wetting” layer due to persistent motion. If the obstacle is asymmetric, a rectification current arises in addition to wetting. Asymmetric geometries are therefore commonly used to concentrate microorganisms like bacteria and sperms. However, most studies neglect the fact that biological active matter is diverse, composed of individuals with distinct self-propulsions. Using simulations, we study a mixture of “fast” and “slow” active Brownian disks in two dimensions interacting with large half-disk obstacles. With this prototypical obstacle geometry, we analyze how the stationary collective behavior depends on the degree of self-propulsion “diversity,” defined as proportional to the difference between the self-propulsion speeds, while keeping the average self-propulsion speed fixed. A wetting layer rich in fast particles arises. The rectification current is amplified by speed diversity due to a superlinear dependence of rectification on self-propulsion speed, which arises from cooperative effects. Thus, the total rectification current cannot be obtained from an effective one-component active fluid with the same average self-propulsion speed, highlighting the importance of considering diversity in active matter."}],"oa_version":"None","pmid":1,"author":[{"full_name":"Rojas Vega, Mauricio Nicolas","last_name":"Rojas Vega","id":"441e7207-f91f-11ec-b67c-9e6fe3d8fd6d","first_name":"Mauricio Nicolas"},{"full_name":"De Castro, Pablo","last_name":"De Castro","first_name":"Pablo"},{"last_name":"Soto","full_name":"Soto, Rodrigo","first_name":"Rodrigo"}],"article_processing_charge":"No","external_id":{"pmid":["37819444"]},"title":"Mixtures of self-propelled particles interacting with asymmetric obstacles","citation":{"apa":"Rojas Vega, M. N., De Castro, P., & Soto, R. (2023). Mixtures of self-propelled particles interacting with asymmetric obstacles. The European Physical Journal E. Springer Nature. https://doi.org/10.1140/epje/s10189-023-00354-y","ama":"Rojas Vega MN, De Castro P, Soto R. Mixtures of self-propelled particles interacting with asymmetric obstacles. The European Physical Journal E. 2023;46(10). doi:10.1140/epje/s10189-023-00354-y","short":"M.N. Rojas Vega, P. De Castro, R. Soto, The European Physical Journal E 46 (2023).","ieee":"M. N. Rojas Vega, P. De Castro, and R. Soto, “Mixtures of self-propelled particles interacting with asymmetric obstacles,” The European Physical Journal E, vol. 46, no. 10. Springer Nature, 2023.","mla":"Rojas Vega, Mauricio Nicolas, et al. “Mixtures of Self-Propelled Particles Interacting with Asymmetric Obstacles.” The European Physical Journal E, vol. 46, no. 10, 95, Springer Nature, 2023, doi:10.1140/epje/s10189-023-00354-y.","ista":"Rojas Vega MN, De Castro P, Soto R. 2023. Mixtures of self-propelled particles interacting with asymmetric obstacles. The European Physical Journal E. 46(10), 95.","chicago":"Rojas Vega, Mauricio Nicolas, Pablo De Castro, and Rodrigo Soto. “Mixtures of Self-Propelled Particles Interacting with Asymmetric Obstacles.” The European Physical Journal E. Springer Nature, 2023. https://doi.org/10.1140/epje/s10189-023-00354-y."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_number":"95","date_published":"2023-10-01T00:00:00Z","doi":"10.1140/epje/s10189-023-00354-y","date_created":"2023-10-22T22:01:13Z","year":"2023","day":"01","publication":"The European Physical Journal E","quality_controlled":"1","publisher":"Springer Nature","acknowledgement":"MR-V and RS are supported by Fondecyt Grant No. 1220536 and Millennium Science Initiative Program NCN19_170D of ANID, Chile. P.d.C. was supported by Scholarships Nos. 2021/10139-2 and 2022/13872-5 and ICTP-SAIFR Grant No. 2021/14335-0, all granted by São Paulo Research Foundation (FAPESP), Brazil."},{"_id":"14444","status":"public","article_type":"original","type":"journal_article","date_updated":"2023-10-31T11:27:30Z","department":[{"_id":"MaKw"}],"oa_version":"Preprint","abstract":[{"lang":"eng","text":"We prove several results about substructures in Latin squares. First, we explain how to adapt our recent work on high-girth Steiner triple systems to the setting of Latin squares, resolving a conjecture of Linial that there exist Latin squares with arbitrarily high girth. As a consequence, we see that the number of order- n Latin squares with no intercalate (i.e., no 2×2 Latin subsquare) is at least (e−9/4n−o(n))n2. Equivalently, P[N=0]≥e−n2/4−o(n2)=e−(1+o(1))EN\r\n , where N is the number of intercalates in a uniformly random order- n Latin square. \r\nIn fact, extending recent work of Kwan, Sah, and Sawhney, we resolve the general large-deviation problem for intercalates in random Latin squares, up to constant factors in the exponent: for any constant 0<δ≤1 we have P[N≤(1−δ)EN]=exp(−Θ(n2)) and for any constant δ>0 we have P[N≥(1+δ)EN]=exp(−Θ(n4/3logn)). \r\nFinally, as an application of some new general tools for studying substructures in random Latin squares, we show that in almost all order- n Latin squares, the number of cuboctahedra (i.e., the number of pairs of possibly degenerate 2×2 submatrices with the same arrangement of symbols) is of order n4, which is the minimum possible. As observed by Gowers and Long, this number can be interpreted as measuring ``how associative'' the quasigroup associated with the Latin square is."}],"month":"09","intvolume":" 256","scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2202.05088"}],"language":[{"iso":"eng"}],"publication_identifier":{"issn":["0021-2172"],"eissn":["1565-8511"]},"publication_status":"published","volume":256,"issue":"2","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"mla":"Kwan, Matthew Alan, et al. “Substructures in Latin Squares.” Israel Journal of Mathematics, vol. 256, no. 2, Springer Nature, 2023, pp. 363–416, doi:10.1007/s11856-023-2513-9.","ama":"Kwan MA, Sah A, Sawhney M, Simkin M. Substructures in Latin squares. Israel Journal of Mathematics. 2023;256(2):363-416. doi:10.1007/s11856-023-2513-9","apa":"Kwan, M. A., Sah, A., Sawhney, M., & Simkin, M. (2023). Substructures in Latin squares. Israel Journal of Mathematics. Springer Nature. https://doi.org/10.1007/s11856-023-2513-9","ieee":"M. A. Kwan, A. Sah, M. Sawhney, and M. Simkin, “Substructures in Latin squares,” Israel Journal of Mathematics, vol. 256, no. 2. Springer Nature, pp. 363–416, 2023.","short":"M.A. Kwan, A. Sah, M. Sawhney, M. Simkin, Israel Journal of Mathematics 256 (2023) 363–416.","chicago":"Kwan, Matthew Alan, Ashwin Sah, Mehtaab Sawhney, and Michael Simkin. “Substructures in Latin Squares.” Israel Journal of Mathematics. Springer Nature, 2023. https://doi.org/10.1007/s11856-023-2513-9.","ista":"Kwan MA, Sah A, Sawhney M, Simkin M. 2023. Substructures in Latin squares. Israel Journal of Mathematics. 256(2), 363–416."},"title":"Substructures in Latin squares","author":[{"id":"5fca0887-a1db-11eb-95d1-ca9d5e0453b3","first_name":"Matthew Alan","last_name":"Kwan","orcid":"0000-0002-4003-7567","full_name":"Kwan, Matthew Alan"},{"first_name":"Ashwin","last_name":"Sah","full_name":"Sah, Ashwin"},{"first_name":"Mehtaab","full_name":"Sawhney, Mehtaab","last_name":"Sawhney"},{"last_name":"Simkin","full_name":"Simkin, Michael","first_name":"Michael"}],"article_processing_charge":"Yes (in subscription journal)","external_id":{"arxiv":["2202.05088"]},"acknowledgement":"Sah and Sawhney were supported by NSF Graduate Research Fellowship Program DGE-1745302. Sah was supported by the PD Soros Fellowship. Simkin was supported by the Center of Mathematical Sciences and Applications at Harvard University.","quality_controlled":"1","publisher":"Springer Nature","oa":1,"day":"01","publication":"Israel Journal of Mathematics","year":"2023","date_published":"2023-09-01T00:00:00Z","doi":"10.1007/s11856-023-2513-9","date_created":"2023-10-22T22:01:14Z","page":"363-416"},{"volume":14245,"ec_funded":1,"language":[{"iso":"eng"}],"publication_identifier":{"eissn":["1611-3349"],"isbn":["9783031442667"],"issn":["0302-9743"]},"publication_status":"published","month":"10","intvolume":" 14245","alternative_title":["LNCS"],"scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2308.00341"}],"oa_version":"Preprint","abstract":[{"lang":"eng","text":"As AI and machine-learned software are used increasingly for making decisions that affect humans, it is imperative that they remain fair and unbiased in their decisions. To complement design-time bias mitigation measures, runtime verification techniques have been introduced recently to monitor the algorithmic fairness of deployed systems. Previous monitoring techniques assume full observability of the states of the (unknown) monitored system. Moreover, they can monitor only fairness properties that are specified as arithmetic expressions over the probabilities of different events. In this work, we extend fairness monitoring to systems modeled as partially observed Markov chains (POMC), and to specifications containing arithmetic expressions over the expected values of numerical functions on event sequences. The only assumptions we make are that the underlying POMC is aperiodic and starts in the stationary distribution, with a bound on its mixing time being known. These assumptions enable us to estimate a given property for the entire distribution of possible executions of the monitored POMC, by observing only a single execution. Our monitors observe a long run of the system and, after each new observation, output updated PAC-estimates of how fair or biased the system is. The monitors are computationally lightweight and, using a prototype implementation, we demonstrate their effectiveness on several real-world examples."}],"department":[{"_id":"ToHe"}],"date_updated":"2023-10-31T11:48:20Z","status":"public","type":"conference","conference":{"name":"RV: Conference on Runtime Verification","end_date":"2023-10-06","location":"Thessaloniki, Greece","start_date":"2023-10-03"},"_id":"14454","doi":"10.1007/978-3-031-44267-4_15","date_published":"2023-10-01T00:00:00Z","date_created":"2023-10-29T23:01:15Z","page":"291-311","day":"01","publication":"23rd International Conference on Runtime Verification","year":"2023","publisher":"Springer Nature","quality_controlled":"1","oa":1,"acknowledgement":"This work is supported by the European Research Council under Grant No.: ERC-2020-AdG 101020093.","title":"Monitoring algorithmic fairness under partial observations","author":[{"id":"40876CD8-F248-11E8-B48F-1D18A9856A87","first_name":"Thomas A","orcid":"0000-0002-2985-7724","full_name":"Henzinger, Thomas A","last_name":"Henzinger"},{"full_name":"Kueffner, Konstantin","orcid":"0000-0001-8974-2542","last_name":"Kueffner","first_name":"Konstantin","id":"8121a2d0-dc85-11ea-9058-af578f3b4515"},{"id":"0834ff3c-6d72-11ec-94e0-b5b0a4fb8598","first_name":"Kaushik","orcid":"0000-0001-9864-7475","full_name":"Mallik, Kaushik","last_name":"Mallik"}],"external_id":{"arxiv":["2308.00341"]},"article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"ista":"Henzinger TA, Kueffner K, Mallik K. 2023. Monitoring algorithmic fairness under partial observations. 23rd International Conference on Runtime Verification. RV: Conference on Runtime Verification, LNCS, vol. 14245, 291–311.","chicago":"Henzinger, Thomas A, Konstantin Kueffner, and Kaushik Mallik. “Monitoring Algorithmic Fairness under Partial Observations.” In 23rd International Conference on Runtime Verification, 14245:291–311. Springer Nature, 2023. https://doi.org/10.1007/978-3-031-44267-4_15.","ieee":"T. A. Henzinger, K. Kueffner, and K. Mallik, “Monitoring algorithmic fairness under partial observations,” in 23rd International Conference on Runtime Verification, Thessaloniki, Greece, 2023, vol. 14245, pp. 291–311.","short":"T.A. Henzinger, K. Kueffner, K. Mallik, in:, 23rd International Conference on Runtime Verification, Springer Nature, 2023, pp. 291–311.","ama":"Henzinger TA, Kueffner K, Mallik K. Monitoring algorithmic fairness under partial observations. In: 23rd International Conference on Runtime Verification. Vol 14245. Springer Nature; 2023:291-311. doi:10.1007/978-3-031-44267-4_15","apa":"Henzinger, T. A., Kueffner, K., & Mallik, K. (2023). Monitoring algorithmic fairness under partial observations. In 23rd International Conference on Runtime Verification (Vol. 14245, pp. 291–311). Thessaloniki, Greece: Springer Nature. https://doi.org/10.1007/978-3-031-44267-4_15","mla":"Henzinger, Thomas A., et al. “Monitoring Algorithmic Fairness under Partial Observations.” 23rd International Conference on Runtime Verification, vol. 14245, Springer Nature, 2023, pp. 291–311, doi:10.1007/978-3-031-44267-4_15."},"project":[{"call_identifier":"H2020","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","grant_number":"101020093","name":"Vigilant Algorithmic Monitoring of Software"}]},{"author":[{"last_name":"Jakubík","full_name":"Jakubík, Jozef","first_name":"Jozef"},{"first_name":"Phuong","id":"3EC6EE64-F248-11E8-B48F-1D18A9856A87","full_name":"Bui Thi Mai, Phuong","last_name":"Bui Thi Mai"},{"full_name":"Chvosteková, Martina","last_name":"Chvosteková","first_name":"Martina"},{"first_name":"Anna","full_name":"Krakovská, Anna","last_name":"Krakovská"}],"article_processing_charge":"Yes","title":"Against the flow of time with multi-output models","citation":{"ista":"Jakubík J, Phuong M, Chvosteková M, Krakovská A. 2023. Against the flow of time with multi-output models. Measurement Science Review. 23(4), 175–183.","chicago":"Jakubík, Jozef, Mary Phuong, Martina Chvosteková, and Anna Krakovská. “Against the Flow of Time with Multi-Output Models.” Measurement Science Review. Sciendo, 2023. https://doi.org/10.2478/msr-2023-0023.","ieee":"J. Jakubík, M. Phuong, M. Chvosteková, and A. Krakovská, “Against the flow of time with multi-output models,” Measurement Science Review, vol. 23, no. 4. Sciendo, pp. 175–183, 2023.","short":"J. Jakubík, M. Phuong, M. Chvosteková, A. Krakovská, Measurement Science Review 23 (2023) 175–183.","ama":"Jakubík J, Phuong M, Chvosteková M, Krakovská A. Against the flow of time with multi-output models. Measurement Science Review. 2023;23(4):175-183. doi:10.2478/msr-2023-0023","apa":"Jakubík, J., Phuong, M., Chvosteková, M., & Krakovská, A. (2023). Against the flow of time with multi-output models. Measurement Science Review. Sciendo. https://doi.org/10.2478/msr-2023-0023","mla":"Jakubík, Jozef, et al. “Against the Flow of Time with Multi-Output Models.” Measurement Science Review, vol. 23, no. 4, Sciendo, 2023, pp. 175–83, doi:10.2478/msr-2023-0023."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","page":"175-183","doi":"10.2478/msr-2023-0023","date_published":"2023-08-01T00:00:00Z","date_created":"2023-10-22T22:01:15Z","has_accepted_license":"1","year":"2023","day":"01","publication":"Measurement Science Review","publisher":"Sciendo","quality_controlled":"1","oa":1,"acknowledgement":"The work was supported by the Scientific Grant Agency of the Ministry of Education of the Slovak Republic and the Slovak Academy of Sciences, projects APVV-21-0216, VEGA2-0096-21 and VEGA 2-0023-22.","department":[{"_id":"ChLa"}],"file_date_updated":"2023-10-31T12:07:23Z","date_updated":"2023-10-31T12:12:47Z","ddc":["510"],"article_type":"original","type":"journal_article","tmp":{"short":"CC BY-NC-ND (4.0)","name":"Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)","legal_code_url":"https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode","image":"/images/cc_by_nc_nd.png"},"status":"public","_id":"14446","volume":23,"issue":"4","license":"https://creativecommons.org/licenses/by-nc-nd/4.0/","publication_identifier":{"eissn":["1335-8871"]},"publication_status":"published","file":[{"content_type":"application/pdf","relation":"main_file","access_level":"open_access","success":1,"checksum":"b069cc10fa6a7c96b2bc9f728165f9e6","file_id":"14476","file_size":2639783,"date_updated":"2023-10-31T12:07:23Z","creator":"dernst","file_name":"2023_MeasurementScienceRev_Jakubik.pdf","date_created":"2023-10-31T12:07:23Z"}],"language":[{"iso":"eng"}],"scopus_import":"1","month":"08","intvolume":" 23","abstract":[{"text":"Recent work has paid close attention to the first principle of Granger causality, according to which cause precedes effect. In this context, the question may arise whether the detected direction of causality also reverses after the time reversal of unidirectionally coupled data. Recently, it has been shown that for unidirectionally causally connected autoregressive (AR) processes X → Y, after time reversal of data, the opposite causal direction Y → X is indeed detected, although typically as part of the bidirectional X↔ Y link. As we argue here, the answer is different when the measured data are not from AR processes but from linked deterministic systems. When the goal is the usual forward data analysis, cross-mapping-like approaches correctly detect X → Y, while Granger causality-like approaches, which should not be used for deterministic time series, detect causal independence X → Y. The results of backward causal analysis depend on the predictability of the reversed data. Unlike AR processes, observables from deterministic dynamical systems, even complex nonlinear ones, can be predicted well forward, while backward predictions can be difficult (notably when the time reversal of a function leads to one-to-many relations). To address this problem, we propose an approach based on models that provide multiple candidate predictions for the target, combined with a loss function that consideres only the best candidate. The resulting good forward and backward predictability supports the view that unidirectionally causally linked deterministic dynamical systems X → Y can be expected to detect the same link both before and after time reversal.","lang":"eng"}],"oa_version":"Published Version"},{"language":[{"iso":"eng"}],"publication_status":"published","publication_identifier":{"eissn":["2168-6238"]},"volume":80,"issue":"10","oa_version":"None","pmid":1,"abstract":[{"text":"Importance Climate change, pollution, urbanization, socioeconomic inequality, and psychosocial effects of the COVID-19 pandemic have caused massive changes in environmental conditions that affect brain health during the life span, both on a population level as well as on the level of the individual. How these environmental factors influence the brain, behavior, and mental illness is not well known.\r\nObservations A research strategy enabling population neuroscience to contribute to identify brain mechanisms underlying environment-related mental illness by leveraging innovative enrichment tools for data federation, geospatial observation, climate and pollution measures, digital health, and novel data integration techniques is described. This strategy can inform innovative treatments that target causal cognitive and molecular mechanisms of mental illness related to the environment. An example is presented of the environMENTAL Project that is leveraging federated cohort data of over 1.5 million European citizens and patients enriched with deep phenotyping data from large-scale behavioral neuroimaging cohorts to identify brain mechanisms related to environmental adversity underlying symptoms of depression, anxiety, stress, and substance misuse.\r\nConclusions and Relevance This research will lead to the development of objective biomarkers and evidence-based interventions that will significantly improve outcomes of environment-related mental illness.","lang":"eng"}],"intvolume":" 80","month":"10","scopus_import":"1","date_updated":"2023-10-31T12:17:20Z","department":[{"_id":"GaNo"}],"_id":"14443","status":"public","article_type":"review","type":"journal_article","publication":"JAMA Psychiatry","day":"01","year":"2023","date_created":"2023-10-22T22:01:14Z","doi":"10.1001/jamapsychiatry.2023.2996","date_published":"2023-10-01T00:00:00Z","page":"1066-1074","quality_controlled":"1","publisher":"American Medical Association","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"mla":"Schumann, Gunter, et al. “Addressing Global Environmental Challenges to Mental Health Using Population Neuroscience: A Review.” JAMA Psychiatry, vol. 80, no. 10, American Medical Association, 2023, pp. 1066–74, doi:10.1001/jamapsychiatry.2023.2996.","short":"G. Schumann, O.A. Andreassen, T. Banaschewski, V.D. Calhoun, N. Clinton, S. Desrivieres, R.E. Brandlistuen, J. Feng, S. Hese, E. Hitchen, P. Hoffmann, T. Jia, V. Jirsa, A.F. Marquand, F. Nees, M.M. Nöthen, G. Novarino, E. Polemiti, M. Ralser, M. Rapp, K. Schepanski, T. Schikowski, M. Slater, P. Sommer, B.C. Stahl, P.M. Thompson, S. Twardziok, D. Van Der Meer, H. Walter, L. Westlye, JAMA Psychiatry 80 (2023) 1066–1074.","ieee":"G. Schumann et al., “Addressing global environmental challenges to mental health using population neuroscience: A review,” JAMA Psychiatry, vol. 80, no. 10. American Medical Association, pp. 1066–1074, 2023.","apa":"Schumann, G., Andreassen, O. A., Banaschewski, T., Calhoun, V. D., Clinton, N., Desrivieres, S., … Westlye, L. (2023). Addressing global environmental challenges to mental health using population neuroscience: A review. JAMA Psychiatry. American Medical Association. https://doi.org/10.1001/jamapsychiatry.2023.2996","ama":"Schumann G, Andreassen OA, Banaschewski T, et al. Addressing global environmental challenges to mental health using population neuroscience: A review. JAMA Psychiatry. 2023;80(10):1066-1074. doi:10.1001/jamapsychiatry.2023.2996","chicago":"Schumann, Gunter, Ole A. Andreassen, Tobias Banaschewski, Vince D. Calhoun, Nicholas Clinton, Sylvane Desrivieres, Ragnhild Eek Brandlistuen, et al. “Addressing Global Environmental Challenges to Mental Health Using Population Neuroscience: A Review.” JAMA Psychiatry. American Medical Association, 2023. https://doi.org/10.1001/jamapsychiatry.2023.2996.","ista":"Schumann G, Andreassen OA, Banaschewski T, Calhoun VD, Clinton N, Desrivieres S, Brandlistuen RE, Feng J, Hese S, Hitchen E, Hoffmann P, Jia T, Jirsa V, Marquand AF, Nees F, Nöthen MM, Novarino G, Polemiti E, Ralser M, Rapp M, Schepanski K, Schikowski T, Slater M, Sommer P, Stahl BC, Thompson PM, Twardziok S, Van Der Meer D, Walter H, Westlye L. 2023. Addressing global environmental challenges to mental health using population neuroscience: A review. JAMA Psychiatry. 80(10), 1066–1074."},"title":"Addressing global environmental challenges to mental health using population neuroscience: A review","article_processing_charge":"No","external_id":{"pmid":["37610741"]},"author":[{"first_name":"Gunter","full_name":"Schumann, Gunter","last_name":"Schumann"},{"last_name":"Andreassen","full_name":"Andreassen, Ole A.","first_name":"Ole A."},{"last_name":"Banaschewski","full_name":"Banaschewski, Tobias","first_name":"Tobias"},{"first_name":"Vince D.","last_name":"Calhoun","full_name":"Calhoun, Vince D."},{"full_name":"Clinton, Nicholas","last_name":"Clinton","first_name":"Nicholas"},{"full_name":"Desrivieres, Sylvane","last_name":"Desrivieres","first_name":"Sylvane"},{"first_name":"Ragnhild Eek","full_name":"Brandlistuen, Ragnhild Eek","last_name":"Brandlistuen"},{"first_name":"Jianfeng","last_name":"Feng","full_name":"Feng, Jianfeng"},{"full_name":"Hese, Soeren","last_name":"Hese","first_name":"Soeren"},{"full_name":"Hitchen, Esther","last_name":"Hitchen","first_name":"Esther"},{"last_name":"Hoffmann","full_name":"Hoffmann, Per","first_name":"Per"},{"first_name":"Tianye","last_name":"Jia","full_name":"Jia, Tianye"},{"full_name":"Jirsa, Viktor","last_name":"Jirsa","first_name":"Viktor"},{"full_name":"Marquand, Andre F.","last_name":"Marquand","first_name":"Andre F."},{"last_name":"Nees","full_name":"Nees, Frauke","first_name":"Frauke"},{"last_name":"Nöthen","full_name":"Nöthen, Markus M.","first_name":"Markus M."},{"orcid":"0000-0002-7673-7178","full_name":"Novarino, Gaia","last_name":"Novarino","first_name":"Gaia","id":"3E57A680-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Elli","last_name":"Polemiti","full_name":"Polemiti, Elli"},{"first_name":"Markus","last_name":"Ralser","full_name":"Ralser, Markus"},{"first_name":"Michael","full_name":"Rapp, Michael","last_name":"Rapp"},{"full_name":"Schepanski, Kerstin","last_name":"Schepanski","first_name":"Kerstin"},{"last_name":"Schikowski","full_name":"Schikowski, Tamara","first_name":"Tamara"},{"last_name":"Slater","full_name":"Slater, Mel","first_name":"Mel"},{"last_name":"Sommer","full_name":"Sommer, Peter","first_name":"Peter"},{"first_name":"Bernd Carsten","full_name":"Stahl, Bernd Carsten","last_name":"Stahl"},{"first_name":"Paul M.","full_name":"Thompson, Paul M.","last_name":"Thompson"},{"last_name":"Twardziok","full_name":"Twardziok, Sven","first_name":"Sven"},{"full_name":"Van Der Meer, Dennis","last_name":"Van Der Meer","first_name":"Dennis"},{"full_name":"Walter, Henrik","last_name":"Walter","first_name":"Henrik"},{"last_name":"Westlye","full_name":"Westlye, Lars","first_name":"Lars"}]}]