@article{11584, abstract = {Observations show that star-forming galaxies reside on a tight 3D plane between mass, gas-phase metallicity, and star formation rate (SFR), which can be explained by the interplay between metal-poor gas inflows, SFR and outflows. However, different metals are released on different time-scales, which may affect the slope of this relation. Here, we use central, star-forming galaxies with Mstar = 109.0–10.5 M⊙ from the EAGLE hydrodynamical simulation to examine 3D relations between mass, SFR, and chemical enrichment using absolute and relative C, N, O, and Fe abundances. We show that the scatter is smaller when gas-phase α-enhancement is used rather than metallicity. A similar plane also exists for stellar α-enhancement, implying that present-day specific SFRs are correlated with long time-scale star formation histories. Between z = 0 and 1, the α-enhancement plane is even more insensitive to redshift than the plane using metallicity. However, it evolves at z > 1 due to lagging iron yields. At fixed mass, galaxies with higher SFRs have star formation histories shifted towards late times, are more α-enhanced, and this α-enhancement increases with redshift as observed. These findings suggest that relations between physical properties inferred from observations may be affected by systematic variations in α-enhancements.}, author = {Matthee, Jorryt J and Schaye, Joop}, issn = {1745-3933}, journal = {Monthly Notices of the Royal Astronomical Society: Letters}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: abundances, galaxies: evolution, galaxies: formation, galaxies: star formation}, number = {1}, pages = {L34 -- L39}, publisher = {Oxford University Press}, title = {{Star-forming galaxies are predicted to lie on a fundamental plane of mass, star formation rate, and α-enhancement}}, doi = {10.1093/mnrasl/sly093}, volume = {479}, year = {2018}, } @article{11619, abstract = {We report on the confirmation and mass determination of π Men c, the first transiting planet discovered by NASA’s TESS space mission. π Men is a naked-eye (V = 5.65 mag), quiet G0 V star that was previously known to host a sub-stellar companion (π Men b) on a longperiod (Porb = 2091 days), eccentric (e = 0.64) orbit. Using TESS time-series photometry, combined with Gaia data, published UCLES at AAT Doppler measurements, and archival HARPS at ESO-3.6m radial velocities, we found that π Men c is a close-in planet with an orbital period of Porb = 6.27 days, a mass of Mc = 4.52 ± 0.81 M⊕, and a radius of Rc = 2.06 ± 0.03 R⊕. Based on the planet’s orbital period and size, π Men c is a super-Earth located at, or close to, the radius gap, while its mass and bulk density suggest it may have held on to a significant atmosphere. Because of the brightness of the host star, this system is highly suitable for a wide range of further studies to characterize the planetary atmosphere and dynamical properties. We also performed an asteroseismic analysis of the TESS data and detected a hint of power excess consistent with the seismic values expected for this star, although this result depends on the photometric aperture used to extract the light curve. This marginal detection is expected from pre-launch simulations hinting at the asteroseismic potential of the TESS mission for longer, multi-sector observations and/or for more evolved bright stars.}, author = {Gandolfi, D. and Barragán, O. and Livingston, J. H. and Fridlund, M. and Justesen, A. B. and Redfield, S. and Fossati, L. and Mathur, S. and Grziwa, S. and Cabrera, J. and García, R. A. and Persson, C. M. and Van Eylen, V. and Hatzes, A. P. and Hidalgo, D. and Albrecht, S. and Bugnet, Lisa Annabelle and Cochran, W. D. and Csizmadia, Sz. and Deeg, H. and Eigmüller, Ph. and Endl, M. and Erikson, A. and Esposito, M. and Guenther, E. and Korth, J. and Luque, R. and Montañes Rodríguez, P. and Nespral, D. and Nowak, G. and Pätzold, M. and Prieto-Arranz, J.}, issn = {1432-0746}, journal = {Astronomy & Astrophysics}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, planetary systems / planets and satellites, detection / planets and satellites, fundamental parameters / planets and satellites, terrestrial planets / stars, fundamental parameters}, publisher = {EDP Sciences}, title = {{TESS’s first planet: A super-Earth transiting the naked-eye star π Mensae}}, doi = {10.1051/0004-6361/201834289}, volume = {619}, year = {2018}, } @article{11618, abstract = {Asteroseismology provides global stellar parameters such as masses, radii, or surface gravities using mean global seismic parameters and effective temperature for thousands of low-mass stars (0.8 M⊙ < M < 3 M⊙). This methodology has been successfully applied to stars in which acoustic modes excited by turbulent convection are measured. Other methods such as the Flicker technique can also be used to determine stellar surface gravities, but only works for log g above 2.5 dex. In this work, we present a new metric called FliPer (Flicker in spectral power density, in opposition to the standard Flicker measurement which is computed in the time domain); it is able to extend the range for which reliable surface gravities can be obtained (0.1 < log g < 4.6 dex) without performing any seismic analysis for stars brighter than Kp < 14. FliPer takes into account the average variability of a star measured in the power density spectrum in a given range of frequencies. However, FliPer values calculated on several ranges of frequency are required to better characterize a star. Using a large set of asteroseismic targets it is possible to calibrate the behavior of surface gravity with FliPer through machine learning. This calibration made with a random forest regressor covers a wide range of surface gravities from main-sequence stars to subgiants and red giants, with very small uncertainties from 0.04 to 0.1 dex. FliPer values can be inserted in automatic global seismic pipelines to either give an estimation of the stellar surface gravity or to assess the quality of the seismic results by detecting any outliers in the obtained νmax values. FliPer also constrains the surface gravities of main-sequence dwarfs using only long-cadence data for which the Nyquist frequency is too low to measure the acoustic-mode properties.}, author = {Bugnet, Lisa Annabelle and García, R. A. and Davies, G. R. and Mathur, S. and Corsaro, E. and Hall, O. J. and Rendle, B. M.}, issn = {1432-0746}, journal = {Astronomy & Astrophysics}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, asteroseismology / methods, data analysis / stars, oscillations}, publisher = {EDP Sciences}, title = {{FliPer: A global measure of power density to estimate surface gravities of main-sequence solar-like stars and red giants}}, doi = {10.1051/0004-6361/201833106}, volume = {620}, year = {2018}, } @article{11620, abstract = {We report the discovery and characterization of HD 89345b (K2-234b; EPIC 248777106b), a Saturn-sized planet orbiting a slightly evolved star. HD 89345 is a bright star (V = 9.3 mag) observed by the K2 mission with 1 min time sampling. It exhibits solar-like oscillations. We conducted asteroseismology to determine the parameters of the star, finding the mass and radius to be 1.12+0.04−0.01M⊙ and 1.657+0.020−0.004R⊙⁠, respectively. The star appears to have recently left the main sequence, based on the inferred age, 9.4+0.4−1.3Gyr⁠, and the non-detection of mixed modes. The star hosts a ‘warm Saturn’ (P = 11.8 d, Rp = 6.86 ± 0.14 R⊕). Radial-velocity follow-up observations performed with the FIbre-fed Echelle Spectrograph, HARPS, and HARPS-N spectrographs show that the planet has a mass of 35.7 ± 3.3 M⊕. The data also show that the planet’s orbit is eccentric (e ≈ 0.2). An investigation of the rotational splitting of the oscillation frequencies of the star yields no conclusive evidence on the stellar inclination angle. We further obtained Rossiter–McLaughlin observations, which result in a broad posterior of the stellar obliquity. The planet seems to confirm to the same patterns that have been observed for other sub-Saturns regarding planet mass and multiplicity, orbital eccentricity, and stellar metallicity.}, author = {Van Eylen, V and Dai, F and Mathur, S and Gandolfi, D and Albrecht, S and Fridlund, M and García, R A and Guenther, E and Hjorth, M and Justesen, A B and Livingston, J and Lund, M N and Pérez Hernández, F and Prieto-Arranz, J and Regulo, C and Bugnet, Lisa Annabelle and Everett, M E and Hirano, T and Nespral, D and Nowak, G and Palle, E and Silva Aguirre, V and Trifonov, T and Winn, J N and Barragán, O and Beck, P G and Chaplin, W J and Cochran, W D and Csizmadia, S and Deeg, H and Endl, M and Heeren, P and Grziwa, S and Hatzes, A P and Hidalgo, D and Korth, J and Mathis, S and Montañes Rodriguez, P and Narita, N and Patzold, M and Persson, C M and Rodler, F and Smith, A M S}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, asteroseismology, planets and satellites: composition, planets and satellites: formation, planets and satellites: fundamental parameters}, number = {4}, pages = {4866--4880}, publisher = {Oxford University Press}, title = {{HD 89345: A bright oscillating star hosting a transiting warm Saturn-sized planet observed by K2}}, doi = {10.1093/mnras/sty1390}, volume = {478}, year = {2018}, } @unpublished{11631, abstract = {The recently launched NASA Transiting Exoplanet Survey Satellite (TESS) mission is going to collect lightcurves for a few hundred million of stars and we expect to increase the number of pulsating stars to analyze compared to the few thousand stars observed by the CoRoT, Kepler and K2 missions. However, most of the TESS targets have not yet been properly classified and characterized. In order to improve the analysis of the TESS data, it is crucial to determine the type of stellar pulsations in a timely manner. We propose an automatic method to classify stars attending to their pulsation properties, in particular, to identify solar-like pulsators among all TESS targets. It relies on the use of the global amount of power contained in the power spectrum (already known as the FliPer method) as a key parameter, along with the effective temperature, to feed into a machine learning classifier. Our study, based on TESS simulated datasets, shows that we are able to classify pulsators with a 98% accuracy.}, author = {Bugnet, Lisa Annabelle and García, R. A. and Davies, G. R. and Mathur, S. and Hall, O. J. and Rendle, B. M.}, booktitle = {arXiv}, keywords = {asteroseismology - methods, data analysis - stars, oscillations}, title = {{FliPer: Classifying TESS pulsating stars}}, doi = {10.48550/arXiv.1811.12140}, year = {2018}, } @article{11657, abstract = {The minimum cut problem for an undirected edge-weighted graph asks us to divide its set of nodes into two blocks while minimizing the weight sum of the cut edges. Here, we introduce a linear-time algorithm to compute near-minimum cuts. Our algorithm is based on cluster contraction using label propagation and Padberg and Rinaldi’s contraction heuristics [SIAM Review, 1991]. We give both sequential and shared-memory parallel implementations of our algorithm. Extensive experiments on both real-world and generated instances show that our algorithm finds the optimal cut on nearly all instances significantly faster than other state-of-the-art exact algorithms, and our error rate is lower than that of other heuristic algorithms. In addition, our parallel algorithm runs a factor 7.5× faster on average when using 32 threads. To further speed up computations, we also give a version of our algorithm that performs random edge contractions as preprocessing. This version achieves a lower running time and better parallel scalability at the expense of a higher error rate.}, author = {Henzinger, Monika H and Noe, Alexander and Schulz, Christian and Strash, Darren}, issn = {1084-6654}, journal = {ACM Journal of Experimental Algorithmics}, keywords = {Theoretical Computer Science}, pages = {1--22}, publisher = {Association for Computing Machinery}, title = {{Practical minimum cut algorithms}}, doi = {10.1145/3274662}, volume = {23}, year = {2018}, } @article{11667, abstract = {The focus of classic mechanism design has been on truthful direct-revelation mechanisms. In the context of combinatorial auctions, the truthful direct-revelation mechanism that maximizes social welfare is the Vickrey-Clarke-Groves mechanism. For many valuation spaces, computing the allocation and payments of the VCG mechanism, however, is a computationally hard problem. We thus study the performance of the VCG mechanism when bidders are forced to choose bids from a subspace of the valuation space for which the VCG outcome can be computed efficiently. We prove improved upper bounds on the welfare loss for restrictions to additive bids and upper and lower bounds for restrictions to non-additive bids. These bounds show that increased expressiveness can give rise to additional equilibria of poorer efficiency.}, author = {Dütting, Paul and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Theory of computation, Algorithmic game theory and mechanism design, Applied computing, Economics, Simplified mechanisms, Combinatorial auctions with item bidding, Price of anarchy}, number = {2}, publisher = {Association for Computing Machinery}, title = {{Valuation compressions in VCG-based combinatorial auctions}}, doi = {10.1145/3232860}, volume = {6}, year = {2018}, } @article{11664, abstract = {We present a deterministic incremental algorithm for exactly maintaining the size of a minimum cut with O(log3 n log log2 n) amortized time per edge insertion and O(1) query time. This result partially answers an open question posed by Thorup (2007). It also stays in sharp contrast to a polynomial conditional lower bound for the fully dynamic weighted minimum cut problem. Our algorithm is obtained by combining a sparsification technique of Kawarabayashi and Thorup (2015) or its recent improvement by Henzinger, Rao, and Wang (2017), and an exact incremental algorithm of Henzinger (1997). We also study space-efficient incremental algorithms for the minimum cut problem. Concretely, we show that there exists an O(nlog n/ε2) space Monte Carlo algorithm that can process a stream of edge insertions starting from an empty graph, and with high probability, the algorithm maintains a (1+ε)-approximation to the minimum cut. The algorithm has O((α (n) log3 n)/ε 2) amortized update time and constant query time, where α (n) stands for the inverse of Ackermann function.}, author = {Goranci, Gramoz and Henzinger, Monika H and Thorup, Mikkel}, issn = {1549-6333}, journal = {ACM Transactions on Algorithms}, number = {2}, publisher = {Association for Computing Machinery}, title = {{Incremental exact min-cut in polylogarithmic amortized update time}}, doi = {10.1145/3174803}, volume = {14}, year = {2018}, } @article{11757, abstract = {We develop a dynamic version of the primal-dual method for optimization problems, and apply it to obtain the following results. (1) For the dynamic set-cover problem, we maintain an O ( f 2)-approximately optimal solution in O ( f · log(m + n)) amortized update time, where f is the maximum “frequency” of an element, n is the number of sets, and m is the maximum number of elements in the universe at any point in time. (2) For the dynamic b-matching problem, we maintain an O (1)-approximately optimal solution in O (log3 n) amortized update time, where n is the number of nodes in the graph.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe}, issn = {0890-5401}, journal = {Information and Computation}, number = {08}, pages = {219--239}, publisher = {Elsevier}, title = {{Dynamic algorithms via the primal-dual method}}, doi = {10.1016/j.ic.2018.02.005}, volume = {261}, year = {2018}, } @inproceedings{11828, abstract = {We consider the problem of dynamically maintaining (approximate) all-pairs effective resistances in separable graphs, which are those that admit an n^{c}-separator theorem for some c<1. We give a fully dynamic algorithm that maintains (1+epsilon)-approximations of the all-pairs effective resistances of an n-vertex graph G undergoing edge insertions and deletions with O~(sqrt{n}/epsilon^2) worst-case update time and O~(sqrt{n}/epsilon^2) worst-case query time, if G is guaranteed to be sqrt{n}-separable (i.e., it is taken from a class satisfying a sqrt{n}-separator theorem) and its separator can be computed in O~(n) time. Our algorithm is built upon a dynamic algorithm for maintaining approximate Schur complement that approximately preserves pairwise effective resistances among a set of terminals for separable graphs, which might be of independent interest. We complement our result by proving that for any two fixed vertices s and t, no incremental or decremental algorithm can maintain the s-t effective resistance for sqrt{n}-separable graphs with worst-case update time O(n^{1/2-delta}) and query time O(n^{1-delta}) for any delta>0, unless the Online Matrix Vector Multiplication (OMv) conjecture is false. We further show that for general graphs, no incremental or decremental algorithm can maintain the s-t effective resistance problem with worst-case update time O(n^{1-delta}) and query-time O(n^{2-delta}) for any delta >0, unless the OMv conjecture is false.}, author = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan}, booktitle = {26th Annual European Symposium on Algorithms}, isbn = {9783959770811}, issn = {1868-8969}, location = {Helsinki, Finland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Dynamic effective resistances and approximate schur complement on separable graphs}}, doi = {10.4230/LIPICS.ESA.2018.40}, volume = {112}, year = {2018}, } @inproceedings{11827, abstract = {We study the metric facility location problem with client insertions and deletions. This setting differs from the classic dynamic facility location problem, where the set of clients remains the same, but the metric space can change over time. We show a deterministic algorithm that maintains a constant factor approximation to the optimal solution in worst-case time O~(2^{O(kappa^2)}) per client insertion or deletion in metric spaces while answering queries about the cost in O(1) time, where kappa denotes the doubling dimension of the metric. For metric spaces with bounded doubling dimension, the update time is polylogarithmic in the parameters of the problem.}, author = {Goranci, Gramoz and Henzinger, Monika H and Leniowski, Dariusz}, booktitle = {26th Annual European Symposium on Algorithms}, isbn = {9783959770811}, issn = {1868-8969}, location = {Helsinki, Finland}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{A tree structure for dynamic facility location}}, doi = {10.4230/LIPICS.ESA.2018.39}, volume = {112}, year = {2018}, } @article{11768, abstract = {In the decremental single-source shortest paths (SSSP) problem, we want to maintain the distances between a given source node s and every other node in an n-node m-edge graph G undergoing edge deletions. While its static counterpart can be solved in near-linear time, this decremental problem is much more challenging even in the undirected unweighted case. In this case, the classic O(mn) total update time of Even and Shiloach [16] has been the fastest known algorithm for three decades. At the cost of a (1+ϵ)-approximation factor, the running time was recently improved to n2+o(1) by Bernstein and Roditty [9]. In this article, we bring the running time down to near-linear: We give a (1+ϵ)-approximation algorithm with m1+o(1) expected total update time, thus obtaining near-linear time. Moreover, we obtain m1+o(1) log W time for the weighted case, where the edge weights are integers from 1 to W. The only prior work on weighted graphs in o(mn) time is the mn0.9 + o(1)-time algorithm by Henzinger et al. [18, 19], which works for directed graphs with quasi-polynomial edge weights. The expected running time bound of our algorithm holds against an oblivious adversary. In contrast to the previous results, which rely on maintaining a sparse emulator, our algorithm relies on maintaining a so-called sparse (h, ϵ)-hop set introduced by Cohen [12] in the PRAM literature. An (h, ϵ)-hop set of a graph G=(V, E) is a set F of weighted edges such that the distance between any pair of nodes in G can be (1+ϵ)-approximated by their h-hop distance (given by a path containing at most h edges) on G′=(V, E ∪ F). Our algorithm can maintain an (no(1), ϵ)-hop set of near-linear size in near-linear time under edge deletions. It is the first of its kind to the best of our knowledge. To maintain approximate distances using this hop set, we extend the monotone Even-Shiloach tree of Henzinger et al. [20] and combine it with the bounded-hop SSSP technique of Bernstein [4, 5] and Mądry [27]. These two new tools might be of independent interest.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, issn = {1557-735X}, journal = {Journal of the ACM}, number = {6}, pages = {1--40}, publisher = {Association for Computing Machinery}, title = {{Decremental single-source shortest paths on undirected graphs in near-linear total update time}}, doi = {10.1145/3218657}, volume = {65}, year = {2018}, } @inproceedings{11872, abstract = {We design fast dynamic algorithms for proper vertex and edge colorings in a graph undergoing edge insertions and deletions. In the static setting, there are simple linear time algorithms for (Δ + 1)- vertex coloring and (2Δ – 1)-edge coloring in a graph with maximum degree Δ. It is natural to ask if we can efficiently maintain such colorings in the dynamic setting as well. We get the following three results. (1) We present a randomized algorithm which maintains a (Δ + 1)-vertex coloring with O(log Δ) expected amortized update time. (2) We present a deterministic algorithm which maintains a (1 + o(1)Δ-vertex coloring with O(polylog Δ) amortized update time. (3) We present a simple, deterministic algorithm which maintains a (2Δ – 1)-edge coloring with O(log Δ) worst-case update time. This improves the recent O(Δ)-edge coloring algorithm with worst-case update time [4].}, author = {Bhattacharya, Sayan and Chakrabarty, Deeparnab and Henzinger, Monika H and Nanongkai, Danupon}, booktitle = {29th Annual ACM-SIAM Symposium on Discrete Algorithms}, location = {New Orleans, LA, United States}, pages = {1 -- 20}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Dynamic algorithms for graph coloring}}, doi = {10.1137/1.9781611975031.1}, year = {2018}, } @inproceedings{11882, abstract = {The minimum cut problem for an undirected edge-weighted graph asks us to divide its set of nodes into two blocks while minimizing the weight sum of the cut edges. Here, we introduce a linear-time algorithm to compute near-minimum cuts. Our algorithm is based on cluster contraction using label propagation and Padberg and Rinaldi's contraction heuristics [SIAM Review, 1991]. We give both sequential and shared-memory parallel implementations of our algorithm. Extensive experiments on both real-world and generated instances show that our algorithm finds the optimal cut on nearly all instances significantly faster than other state-of-the-art exact algorithms, and our error rate is lower than that of other heuristic algorithms. In addition, our parallel algorithm shows good scalability.}, author = {Henzinger, Monika H and Noe, Alexander and Schulz, Christian and Strash, Darren}, booktitle = {20th Workshop on Algorithm Engineering and Experiments}, location = {New Orleans, LA, United States}, pages = {48--61}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Practical minimum cut algorithms}}, doi = {10.1137/1.9781611975055.5}, year = {2018}, } @article{11890, abstract = {We present the first deterministic data structures for maintaining approximate minimum vertex cover and maximum matching in a fully dynamic graph 𝐺=(𝑉,𝐸), with |𝑉|=𝑛 and |𝐸|=𝑚, in 𝑜(𝑚‾‾√) time per update. In particular, for minimum vertex cover, we provide deterministic data structures for maintaining a (2+𝜖) approximation in 𝑂(log𝑛/𝜖2) amortized time per update. For maximum matching, we show how to maintain a (3+𝜖) approximation in 𝑂(min(𝑛√/𝜖,𝑚1/3/𝜖2) amortized time per update and a (4+𝜖) approximation in 𝑂(𝑚1/3/𝜖2) worst-case time per update. Our data structure for fully dynamic minimum vertex cover is essentially near-optimal and settles an open problem by Onak and Rubinfeld [in 42nd ACM Symposium on Theory of Computing, Cambridge, MA, ACM, 2010, pp. 457--464].}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.}, issn = {1095-7111}, journal = {SIAM Journal on Computing}, number = {3}, pages = {859--887}, publisher = {Society for Industrial & Applied Mathematics}, title = {{Deterministic fully dynamic data structures for vertex cover and matching}}, doi = {10.1137/140998925}, volume = {47}, year = {2018}, } @inproceedings{11911, abstract = {It is common knowledge that there is no single best strategy for graph clustering, which justifies a plethora of existing approaches. In this paper, we present a general memetic algorithm, VieClus, to tackle the graph clustering problem. This algorithm can be adapted to optimize different objective functions. A key component of our contribution are natural recombine operators that employ ensemble clusterings as well as multi-level techniques. Lastly, we combine these techniques with a scalable communication protocol, producing a system that is able to compute high-quality solutions in a short amount of time. We instantiate our scheme with local search for modularity and show that our algorithm successfully improves or reproduces all entries of the 10th DIMACS implementation challenge under consideration using a small amount of time.}, author = {Biedermann, Sonja and Henzinger, Monika H and Schulz, Christian and Schuster, Bernhard}, booktitle = {17th International Symposium on Experimental Algorithms}, isbn = {9783959770705}, issn = {1868-8969}, location = {L'Aquila, Italy}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Memetic graph clustering}}, doi = {10.4230/LIPICS.SEA.2018.3}, volume = {103}, year = {2018}, } @article{1215, abstract = {Two generalizations of Itô formula to infinite-dimensional spaces are given. The first one, in Hilbert spaces, extends the classical one by taking advantage of cancellations when they occur in examples and it is applied to the case of a group generator. The second one, based on the previous one and a limit procedure, is an Itô formula in a special class of Banach spaces having a product structure with the noise in a Hilbert component; again the key point is the extension due to a cancellation. This extension to Banach spaces and in particular the specific cancellation are motivated by path-dependent Itô calculus.}, author = {Flandoli, Franco and Russo, Francesco and Zanco, Giovanni A}, journal = {Journal of Theoretical Probability}, number = {2}, pages = {789--826}, publisher = {Springer}, title = {{Infinite-dimensional calculus under weak spatial regularity of the processes}}, doi = {10.1007/s10959-016-0724-2}, volume = {31}, year = {2018}, } @article{176, abstract = {For a general class of non-negative functions defined on integral ideals of number fields, upper bounds are established for their average over the values of certain principal ideals that are associated to irreducible binary forms with integer coefficients.}, author = {Browning, Timothy D and Sofos, Efthymios}, journal = {International Journal of Nuber Theory}, number = {3}, pages = {547--567}, publisher = {World Scientific Publishing}, title = {{Averages of arithmetic functions over principal ideals}}, doi = {10.1142/S1793042119500283}, volume = {15}, year = {2018}, } @article{178, abstract = {We give an upper bound for the number of rational points of height at most B, lying on a surface defined by a quadratic form Q. The bound shows an explicit dependence on Q. It is optimal with respect to B, and is also optimal for typical forms Q.}, author = {Browning, Timothy D and Heath-Brown, Roger}, issn = {2397-3129}, journal = {Discrete Analysis}, pages = {1 -- 29}, publisher = {Alliance of Diamond Open Access Journals}, title = {{Counting rational points on quadric surfaces}}, doi = {10.19086/da.4375}, volume = {15}, year = {2018}, } @inproceedings{185, abstract = {We resolve in the affirmative conjectures of A. Skopenkov and Repovš (1998), and M. Skopenkov (2003) generalizing the classical Hanani-Tutte theorem to the setting of approximating maps of graphs on 2-dimensional surfaces by embeddings. Our proof of this result is constructive and almost immediately implies an efficient algorithm for testing whether a given piecewise linear map of a graph in a surface is approximable by an embedding. More precisely, an instance of this problem consists of (i) a graph G whose vertices are partitioned into clusters and whose inter-cluster edges are partitioned into bundles, and (ii) a region R of a 2-dimensional compact surface M given as the union of a set of pairwise disjoint discs corresponding to the clusters and a set of pairwise disjoint "pipes" corresponding to the bundles, connecting certain pairs of these discs. We are to decide whether G can be embedded inside M so that the vertices in every cluster are drawn in the corresponding disc, the edges in every bundle pass only through its corresponding pipe, and every edge crosses the boundary of each disc at most once.}, author = {Fulek, Radoslav and Kynčl, Jan}, isbn = {978-3-95977-066-8}, location = {Budapest, Hungary}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Hanani-Tutte for approximating maps of graphs}}, doi = {10.4230/LIPIcs.SoCG.2018.39}, volume = {99}, year = {2018}, } @inproceedings{188, abstract = {Smallest enclosing spheres of finite point sets are central to methods in topological data analysis. Focusing on Bregman divergences to measure dissimilarity, we prove bounds on the location of the center of a smallest enclosing sphere. These bounds depend on the range of radii for which Bregman balls are convex.}, author = {Edelsbrunner, Herbert and Virk, Ziga and Wagner, Hubert}, location = {Budapest, Hungary}, pages = {35:1 -- 35:13}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Smallest enclosing spheres and Chernoff points in Bregman geometry}}, doi = {10.4230/LIPIcs.SoCG.2018.35}, volume = {99}, year = {2018}, } @article{2015, abstract = {We consider the problem of learning a Bayesian network or directed acyclic graph model from observational data. A number of constraint‐based, score‐based and hybrid algorithms have been developed for this purpose. Statistical consistency guarantees of these algorithms rely on the faithfulness assumption, which has been shown to be restrictive especially for graphs with cycles in the skeleton. We here propose the sparsest permutation (SP) algorithm, showing that learning Bayesian networks is possible under strictly weaker assumptions than faithfulness. This comes at a computational price, thereby indicating a statistical‐computational trade‐off for causal inference algorithms. In the Gaussian noiseless setting, we prove that the SP algorithm boils down to finding the permutation of the variables with the sparsest Cholesky decomposition of the inverse covariance matrix, which is equivalent to ℓ0‐penalized maximum likelihood estimation. We end with a simulation study showing that in line with the proven stronger consistency guarantees, and the SP algorithm compares favourably to standard causal inference algorithms in terms of accuracy for a given sample size.}, author = {Raskutti, Garvesh and Uhler, Caroline}, journal = {STAT}, number = {1}, publisher = {Wiley}, title = {{Learning directed acyclic graphs based on sparsest permutations}}, doi = {10.1002/sta4.183}, volume = {7}, year = {2018}, } @article{306, abstract = {A cornerstone of statistical inference, the maximum entropy framework is being increasingly applied to construct descriptive and predictive models of biological systems, especially complex biological networks, from large experimental data sets. Both its broad applicability and the success it obtained in different contexts hinge upon its conceptual simplicity and mathematical soundness. Here we try to concisely review the basic elements of the maximum entropy principle, starting from the notion of ‘entropy’, and describe its usefulness for the analysis of biological systems. As examples, we focus specifically on the problem of reconstructing gene interaction networks from expression data and on recent work attempting to expand our system-level understanding of bacterial metabolism. Finally, we highlight some extensions and potential limitations of the maximum entropy approach, and point to more recent developments that are likely to play a key role in the upcoming challenges of extracting structures and information from increasingly rich, high-throughput biological data.}, author = {De Martino, Andrea and De Martino, Daniele}, journal = {Heliyon}, number = {4}, publisher = {Elsevier}, title = {{An introduction to the maximum entropy approach and its application to inference problems in biology}}, doi = {10.1016/j.heliyon.2018.e00596}, volume = {4}, year = {2018}, } @inbook{37, abstract = {Developmental processes are inherently dynamic and understanding them requires quantitative measurements of gene and protein expression levels in space and time. While live imaging is a powerful approach for obtaining such data, it is still a challenge to apply it over long periods of time to large tissues, such as the embryonic spinal cord in mouse and chick. Nevertheless, dynamics of gene expression and signaling activity patterns in this organ can be studied by collecting tissue sections at different developmental stages. In combination with immunohistochemistry, this allows for measuring the levels of multiple developmental regulators in a quantitative manner with high spatiotemporal resolution. The mean protein expression levels over time, as well as embryo-to-embryo variability can be analyzed. A key aspect of the approach is the ability to compare protein levels across different samples. This requires a number of considerations in sample preparation, imaging and data analysis. Here we present a protocol for obtaining time course data of dorsoventral expression patterns from mouse and chick neural tube in the first 3 days of neural tube development. The described workflow starts from embryo dissection and ends with a processed dataset. Software scripts for data analysis are included. The protocol is adaptable and instructions that allow the user to modify different steps are provided. Thus, the procedure can be altered for analysis of time-lapse images and applied to systems other than the neural tube.}, author = {Zagórski, Marcin P and Kicheva, Anna}, booktitle = {Morphogen Gradients }, isbn = {978-1-4939-8771-9}, issn = {1064-3745}, pages = {47 -- 63}, publisher = {Springer Nature}, title = {{Measuring dorsoventral pattern and morphogen signaling profiles in the growing neural tube}}, doi = {10.1007/978-1-4939-8772-6_4}, volume = {1863}, year = {2018}, } @inproceedings{325, abstract = {Probabilistic programs extend classical imperative programs with real-valued random variables and random branching. The most basic liveness property for such programs is the termination property. The qualitative (aka almost-sure) termination problem asks whether a given program program terminates with probability 1. While ranking functions provide a sound and complete method for non-probabilistic programs, the extension of them to probabilistic programs is achieved via ranking supermartingales (RSMs). Although deep theoretical results have been established about RSMs, their application to probabilistic programs with nondeterminism has been limited only to programs of restricted control-flow structure. For non-probabilistic programs, lexicographic ranking functions provide a compositional and practical approach for termination analysis of real-world programs. In this work we introduce lexicographic RSMs and show that they present a sound method for almost-sure termination of probabilistic programs with nondeterminism. We show that lexicographic RSMs provide a tool for compositional reasoning about almost-sure termination, and for probabilistic programs with linear arithmetic they can be synthesized efficiently (in polynomial time). We also show that with additional restrictions even asymptotic bounds on expected termination time can be obtained through lexicographic RSMs. Finally, we present experimental results on benchmarks adapted from previous work to demonstrate the effectiveness of our approach.}, author = {Agrawal, Sheshansh and Chatterjee, Krishnendu and Novotny, Petr}, location = {Los Angeles, CA, USA}, number = {POPL}, publisher = {ACM}, title = {{Lexicographic ranking supermartingales: an efficient approach to termination of probabilistic programs}}, doi = {10.1145/3158122}, volume = {2}, year = {2018}, } @article{394, abstract = {The valley pseudospin in monolayer transition metal dichalcogenides (TMDs) has been proposed as a new way to manipulate information in various optoelectronic devices. This relies on a large valley polarization that remains stable over long time scales (hundreds of nanoseconds). However, time-resolved measurements report valley lifetimes of only a few picoseconds. This has been attributed to mechanisms such as phonon-mediated intervalley scattering and a precession of the valley pseudospin through electron-hole exchange. Here we use transient spin grating to directly measure the valley depolarization lifetime in monolayer MoSe2. We find a fast valley decay rate that scales linearly with the excitation density at different temperatures. This establishes the presence of strong exciton-exciton Coulomb exchange interactions enhancing the valley depolarization. Our work highlights the microscopic processes inhibiting the efficient use of the exciton valley pseudospin in monolayer TMDs. }, author = {Mahmood, Fahad and Alpichshev, Zhanybek and Lee, Yi and Kong, Jing and Gedik, Nuh}, journal = {Nano Letters}, number = {1}, pages = {223 -- 228}, publisher = {American Chemical Society}, title = {{Observation of exciton-exciton interaction mediated valley Depolarization in Monolayer MoSe2}}, doi = {10.1021/acs.nanolett.7b03953}, volume = {18}, year = {2018}, } @article{53, abstract = {In 2013, a publication repository was implemented at IST Austria and 2015 after a thorough preparation phase a data repository was implemented - both based on the Open Source Software EPrints. In this text, designed as field report, we will reflect on our experiences with Open Source Software in general and specifically with EPrints regarding technical aspects but also regarding their characteristics of the user community. The second part is a pleading for including the end users in the process of implementation, adaption and evaluation.}, author = {Petritsch, Barbara and Porsche, Jana}, journal = {VÖB Mitteilungen}, number = {1}, pages = {199 -- 206}, publisher = {Vereinigung Österreichischer Bibliothekarinnen und Bibliothekare}, title = {{IST PubRep and IST DataRep: the institutional repositories at IST Austria}}, doi = {10.31263/voebm.v71i1.1993}, volume = {71}, year = {2018}, } @article{536, abstract = {We consider the problem of consensus in the challenging classic model. In this model, the adversary is adaptive; it can choose which processors crash at any point during the course of the algorithm. Further, communication is via asynchronous message passing: there is no known upper bound on the time to send a message from one processor to another, and all messages and coin flips are seen by the adversary. We describe a new randomized consensus protocol with expected message complexity O(n2log2n) when fewer than n / 2 processes may fail by crashing. This is an almost-linear improvement over the best previously known protocol, and within logarithmic factors of a known Ω(n2) message lower bound. The protocol further ensures that no process sends more than O(nlog3n) messages in expectation, which is again within logarithmic factors of optimal. We also present a generalization of the algorithm to an arbitrary number of failures t, which uses expected O(nt+t2log2t) total messages. Our approach is to build a message-efficient, resilient mechanism for aggregating individual processor votes, implementing the message-passing equivalent of a weak shared coin. Roughly, in our protocol, a processor first announces its votes to small groups, then propagates them to increasingly larger groups as it generates more and more votes. To bound the number of messages that an individual process might have to send or receive, the protocol progressively increases the weight of generated votes. The main technical challenge is bounding the impact of votes that are still “in flight” (generated, but not fully propagated) on the final outcome of the shared coin, especially since such votes might have different weights. We achieve this by leveraging the structure of the algorithm, and a technical argument based on martingale concentration bounds. Overall, we show that it is possible to build an efficient message-passing implementation of a shared coin, and in the process (almost-optimally) solve the classic consensus problem in the asynchronous message-passing model.}, author = {Alistarh, Dan-Adrian and Aspnes, James and King, Valerie and Saia, Jared}, issn = {01782770}, journal = {Distributed Computing}, number = {6}, pages = {489--501}, publisher = {Springer}, title = {{Communication-efficient randomized consensus}}, doi = {10.1007/s00446-017-0315-1}, volume = {31}, year = {2018}, } @article{554, abstract = {We analyse the canonical Bogoliubov free energy functional in three dimensions at low temperatures in the dilute limit. We prove existence of a first-order phase transition and, in the limit (Formula presented.), we determine the critical temperature to be (Formula presented.) to leading order. Here, (Formula presented.) is the critical temperature of the free Bose gas, ρ is the density of the gas and a is the scattering length of the pair-interaction potential V. We also prove asymptotic expansions for the free energy. In particular, we recover the Lee–Huang–Yang formula in the limit (Formula presented.).}, author = {Napiórkowski, Marcin M and Reuvers, Robin and Solovej, Jan}, issn = {00103616}, journal = {Communications in Mathematical Physics}, number = {1}, pages = {347--403}, publisher = {Springer}, title = {{The Bogoliubov free energy functional II: The dilute Limit}}, doi = {10.1007/s00220-017-3064-x}, volume = {360}, year = {2018}, } @inbook{562, abstract = {Primary neuronal cell culture preparations are widely used to investigate synaptic functions. This chapter describes a detailed protocol for the preparation of a neuronal cell culture in which giant calyx-type synaptic terminals are formed. This chapter also presents detailed protocols for utilizing the main technical advantages provided by such a preparation, namely, labeling and imaging of synaptic organelles and electrophysiological recordings directly from presynaptic terminals.}, author = {Dimitrov, Dimitar and Guillaud, Laurent and Eguchi, Kohgaku and Takahashi, Tomoyuki}, booktitle = {Neurotrophic Factors}, editor = {Skaper, Stephen D.}, pages = {201 -- 215}, publisher = {Springer}, title = {{Culture of mouse giant central nervous system synapses and application for imaging and electrophysiological analyses}}, doi = {10.1007/978-1-4939-7571-6_15}, volume = {1727}, year = {2018}, } @inbook{61, abstract = {We prove that there is no strongly regular graph (SRG) with parameters (460; 153; 32; 60). The proof is based on a recent lower bound on the number of 4-cliques in a SRG and some applications of Euclidean representation of SRGs. }, author = {Bondarenko, Andriy and Mellit, Anton and Prymak, Andriy and Radchenko, Danylo and Viazovska, Maryna}, booktitle = {Contemporary Computational Mathematics}, pages = {131 -- 134}, publisher = {Springer}, title = {{There is no strongly regular graph with parameters (460; 153; 32; 60)}}, doi = {10.1007/978-3-319-72456-0_7}, year = {2018}, } @article{6111, abstract = {Neurons develop elaborate morphologies that provide a model for understanding cellular architecture. By studying C. elegans sensory dendrites, we previously identified genes that act to promote the extension of ciliated sensory dendrites during embryogenesis. Interestingly, the nonciliated dendrite of the oxygen-sensing neuron URX is not affected by these genes, suggesting it develops through a distinct mechanism. Here, we use a visual forward genetic screen to identify mutants that affect URX dendrite morphogenesis. We find that disruption of the MAP kinase MAPK-15 or the βH-spectrin SMA-1 causes a phenotype opposite to what we had seen before: dendrites extend normally during embryogenesis but begin to overgrow as the animals reach adulthood, ultimately extending up to 150% of their normal length. SMA-1 is broadly expressed and acts non-cell-autonomously, while MAPK-15 is expressed in many sensory neurons including URX and acts cell-autonomously. MAPK-15 acts at the time of overgrowth, localizes at the dendrite ending, and requires its kinase activity, suggesting it acts locally in time and space to constrain dendrite growth. Finally, we find that the oxygen-sensing guanylate cyclase GCY-35, which normally localizes at the dendrite ending, is localized throughout the overgrown region, and that overgrowth can be suppressed by overexpressing GCY-35 or by genetically mimicking elevated cGMP signaling. These results suggest that overgrowth may correspond to expansion of a sensory compartment at the dendrite ending, reminiscent of the remodeling of sensory cilia or dendritic spines. Thus, in contrast to established pathways that promote dendrite growth during early development, our results reveal a distinct mechanism that constrains dendrite growth throughout the life of the animal, possibly by controlling the size of a sensory compartment at the dendrite ending.}, author = {McLachlan, Ian G. and Beets, Isabel and de Bono, Mario and Heiman, Maxwell G.}, issn = {1553-7404}, journal = {PLOS Genetics}, number = {6}, publisher = {Public Library of Science}, title = {{A neuronal MAP kinase constrains growth of a Caenorhabditis elegans sensory dendrite throughout the life of the organism}}, doi = {10.1371/journal.pgen.1007435}, volume = {14}, year = {2018}, } @article{6109, abstract = {Neuropeptides are ubiquitous modulators of behavior and physiology. They are packaged in specialized secretory organelles called dense core vesicles (DCVs) that are released upon neural stimulation. Unlike synaptic vesicles, which can be recycled and refilled close to release sites, DCVs must be replenished by de novo synthesis in the cell body. Here, we dissect DCV cell biology in vivo in a Caenorhabditis elegans sensory neuron whose tonic activity we can control using a natural stimulus. We express fluorescently tagged neuropeptides in the neuron and define parameters that describe their subcellular distribution. We measure these parameters at high and low neural activity in 187 mutants defective in proteins implicated in membrane traffic, neuroendocrine secretion, and neuronal or synaptic activity. Using unsupervised hierarchical clustering methods, we analyze these data and identify 62 groups of genes with similar mutant phenotypes. We explore the function of a subset of these groups. We recapitulate many previous findings, validating our paradigm. We uncover a large battery of proteins involved in recycling DCV membrane proteins, something hitherto poorly explored. We show that the unfolded protein response promotes DCV production, which may contribute to intertissue communication of stress. We also find evidence that different mechanisms of priming and exocytosis may operate at high and low neural activity. Our work provides a defined framework to study DCV biology at different neural activity levels.}, author = {Laurent, Patrick and Ch’ng, QueeLim and Jospin, Maëlle and Chen, Changchun and Lorenzo, Ramiro and de Bono, Mario}, issn = {0027-8424}, journal = {Proceedings of the National Academy of Sciences}, number = {29}, pages = {E6890--E6899}, publisher = {National Academy of Sciences}, title = {{Genetic dissection of neuropeptide cell biology at high and low activity in a defined sensory neuron}}, doi = {10.1073/pnas.1714610115}, volume = {115}, year = {2018}, } @article{6354, abstract = {Blood platelets are critical for hemostasis and thrombosis, but also play diverse roles during immune responses. We have recently reported that platelets migrate at sites of infection in vitro and in vivo. Importantly, platelets use their ability to migrate to collect and bundle fibrin (ogen)-bound bacteria accomplishing efficient intravascular bacterial trapping. Here, we describe a method that allows analyzing platelet migration in vitro, focusing on their ability to collect bacteria and trap bacteria under flow.}, author = {Fan, Shuxia and Lorenz, Michael and Massberg, Steffen and Gärtner, Florian R}, issn = {2331-8325}, journal = {Bio-Protocol}, keywords = {Platelets, Cell migration, Bacteria, Shear flow, Fibrinogen, E. coli}, number = {18}, publisher = {Bio-Protocol}, title = {{Platelet migration and bacterial trapping assay under flow}}, doi = {10.21769/bioprotoc.3018}, volume = {8}, year = {2018}, } @article{6368, abstract = {An optical network of superconducting quantum bits (qubits) is an appealing platform for quantum communication and distributed quantum computing, but developing a quantum-compatible link between the microwave and optical domains remains an outstanding challenge. Operating at T < 100 mK temperatures, as required for quantum electrical circuits, we demonstrate a mechanically mediated microwave–optical converter with 47% conversion efficiency, and use a classical feed-forward protocol to reduce added noise to 38 photons. The feed-forward protocol harnesses our discovery that noise emitted from the two converter output ports is strongly correlated because both outputs record thermal motion of the same mechanical mode. We also discuss a quantum feed-forward protocol that, given high system efficiencies, would allow quantum information to be transferred even when thermal phonons enter the mechanical element faster than the electro-optic conversion rate.}, author = {Higginbotham, Andrew P and Burns, P. S. and Urmey, M. D. and Peterson, R. W. and Kampel, N. S. and Brubaker, B. M. and Smith, G. and Lehnert, K. W. and Regal, C. A.}, issn = {1745-2473}, journal = {Nature Physics}, number = {10}, pages = {1038--1042}, publisher = {Springer Nature}, title = {{Harnessing electro-optic correlations in an efficient mechanical converter}}, doi = {10.1038/s41567-018-0210-0}, volume = {14}, year = {2018}, } @article{6369, abstract = {We construct a metamaterial from radio-frequency harmonic oscillators, and find two topologically distinct phases resulting from dissipation engineered into the system. These phases are distinguished by a quantized value of bulk energy transport. The impulse response of our circuit is measured and used to reconstruct the band structure and winding number of circuit eigenfunctions around a dark mode. Our results demonstrate that dissipative topological transport can occur in a wider class of physical systems than considered before.}, author = {Rosenthal, Eric I. and Ehrlich, Nicole K. and Rudner, Mark S. and Higginbotham, Andrew P and Lehnert, K. W.}, issn = {2469-9950}, journal = {Physical Review B}, number = {22}, publisher = {American Physical Society (APS)}, title = {{Topological phase transition measured in a dissipative metamaterial}}, doi = {10.1103/physrevb.97.220301}, volume = {97}, year = {2018}, } @misc{6459, author = {Petritsch, Barbara}, keywords = {Open Access, Publication Analysis}, location = {Graz, Austria}, publisher = {IST Austria}, title = {{Open Access at IST Austria 2009-2017}}, doi = {10.5281/zenodo.1410279}, year = {2018}, } @inproceedings{6664, abstract = {Reed-Muller (RM) and polar codes are a class of capacity-achieving channel coding schemes with the same factor graph representation. Low-complexity decoding algorithms fall short in providing a good error-correction performance for RM and polar codes. Using the symmetric group of RM and polar codes, the specific decoding algorithm can be carried out on multiple permutations of the factor graph to boost the error-correction performance. However, this approach results in high decoding complexity. In this paper, we first derive the total number of factor graph permutations on which the decoding can be performed. We further propose a successive permutation (SP) scheme which finds the permutations on the fly, thus the decoding always progresses on a single factor graph permutation. We show that SP can be used to improve the error-correction performance of RM and polar codes under successive-cancellation (SC) and SC list (SCL) decoding, while keeping the memory requirements of the decoders unaltered. Our results for RM and polar codes of length 128 and rate 0.5 show that when SP is used and at a target frame error rate of 10 -4 , up to 0.5 dB and 0.1 dB improvement can be achieved for RM and polar codes respectively.}, author = {Hashemi, Seyyed Ali and Doan, Nghia and Mondelli, Marco and Gross, Warren }, booktitle = {2018 IEEE 10th International Symposium on Turbo Codes & Iterative Information Processing}, location = {Hong Kong, China}, pages = {1--5}, publisher = {IEEE}, title = {{Decoding Reed-Muller and polar codes by successive factor graph permutations}}, doi = {10.1109/istc.2018.8625281}, year = {2018}, } @inproceedings{6728, abstract = {Polar codes are a channel coding scheme for the next generation of wireless communications standard (5G). The belief propagation (BP) decoder allows for parallel decoding of polar codes, making it suitable for high throughput applications. However, the error-correction performance of polar codes under BP decoding is far from the requirements of 5G. It has been shown that the error-correction performance of BP can be improved if the decoding is performed on multiple permuted factor graphs of polar codes. However, a different BP decoding scheduling is required for each factor graph permutation which results in the design of a different decoder for each permutation. Moreover, the selection of the different factor graph permutations is at random, which prevents the decoder to achieve a desirable error correction performance with a small number of permutations. In this paper, we first show that the permutations on the factor graph can be mapped into suitable permutations on the codeword positions. As a result, we can make use of a single decoder for all the permutations. In addition, we introduce a method to construct a set of predetermined permutations which can provide the correct codeword if the decoding fails on the original permutation. We show that for the 5G polar code of length 1024, the error-correction performance of the proposed decoder is more than 0.25 dB better than that of the BP decoder with the same number of random permutations at the frame error rate of 10 -4 .}, author = {Doan, Nghia and Hashemi, Seyyed Ali and Mondelli, Marco and Gross, Warren J.}, booktitle = {2018 IEEE Global Communications Conference }, isbn = {9781538647271}, location = {Abu Dhabi, United Arab Emirates}, publisher = {IEEE}, title = {{On the decoding of polar codes on permuted factor graphs}}, doi = {10.1109/glocom.2018.8647308}, year = {2018}, } @article{6678, abstract = {We survey coding techniques that enable reliable transmission at rates that approach the capacity of an arbitrary discrete memoryless channel. In particular, we take the point of view of modern coding theory and discuss how recent advances in coding for symmetric channels help provide more efficient solutions for the asymmetric case. We consider, in more detail, three basic coding paradigms. The first one is Gallager's scheme that consists of concatenating a linear code with a non-linear mapping so that the input distribution can be appropriately shaped. We explicitly show that both polar codes and spatially coupled codes can be employed in this scenario. Furthermore, we derive a scaling law between the gap to capacity, the cardinality of the input and output alphabets, and the required size of the mapper. The second one is an integrated scheme in which the code is used both for source coding, in order to create codewords distributed according to the capacity-achieving input distribution, and for channel coding, in order to provide error protection. Such a technique has been recently introduced by Honda and Yamamoto in the context of polar codes, and we show how to apply it also to the design of sparse graph codes. The third paradigm is based on an idea of Böcherer and Mathar, and separates the two tasks of source coding and channel coding by a chaining construction that binds together several codewords. We present conditions for the source code and the channel code, and we describe how to combine any source code with any channel code that fulfill those conditions, in order to provide capacity-achieving schemes for asymmetric channels. In particular, we show that polar codes, spatially coupled codes, and homophonic codes are suitable as basic building blocks of the proposed coding strategy. Rather than focusing on the exact details of the schemes, the purpose of this tutorial is to present different coding techniques that can then be implemented with many variants. There is no absolute winner and, in order to understand the most suitable technique for a specific application scenario, we provide a detailed comparison that takes into account several performance metrics.}, author = {Mondelli, Marco and Hassani, Hamed and Urbanke, Rudiger }, issn = {0018-9448}, journal = {IEEE Transactions on Information Theory}, number = {5}, pages = {3371--3393}, publisher = {IEEE}, title = {{How to achieve the capacity of asymmetric channels}}, doi = {10.1109/tit.2018.2789885}, volume = {64}, year = {2018}, } @article{690, abstract = {We consider spectral properties and the edge universality of sparse random matrices, the class of random matrices that includes the adjacency matrices of the Erdős–Rényi graph model G(N, p). We prove a local law for the eigenvalue density up to the spectral edges. Under a suitable condition on the sparsity, we also prove that the rescaled extremal eigenvalues exhibit GOE Tracy–Widom fluctuations if a deterministic shift of the spectral edge due to the sparsity is included. For the adjacency matrix of the Erdős–Rényi graph this establishes the Tracy–Widom fluctuations of the second largest eigenvalue when p is much larger than N−2/3 with a deterministic shift of order (Np)−1.}, author = {Lee, Jii and Schnelli, Kevin}, journal = {Probability Theory and Related Fields}, number = {1-2}, publisher = {Springer}, title = {{Local law and Tracy–Widom limit for sparse random matrices}}, doi = {10.1007/s00440-017-0787-8}, volume = {171}, year = {2018}, } @inproceedings{6675, abstract = {We present a coding paradigm that provides a new achievable rate for the primitive relay channel by combining compress-and-forward and decode-and-forward with a chaining construction. In the primitive relay channel model, the source broadcasts a message to the relay and to the destination; and the relay facilitates this communication by sending an additional message to the destination through a separate channel. Two well-known coding approaches for this setting are decode-and-forward and compress-and-forward: in the former, the relay decodes the message and sends some of the information to the destination; in the latter, the relay does not attempt to decode, but it sends a compressed description of the received sequence to the destination via Wyner-Ziv coding. In our scheme, we transmit over pairs of blocks and we use compress-and-forward for the first block and decode-and-forward for the second. In particular, in the first block, the relay does not attempt to decode and it sends only a part of the compressed description of the received sequence; in the second block, the relay decodes the message and sends this information plus the remaining part of the compressed sequence relative to the first block. As a result, we strictly outperform both compress-and- forward and decode-and-forward. Furthermore, this paradigm can be implemented with a low-complexity polar coding scheme that has the typical attractive features of polar codes, i.e., quasi-linear encoding/decoding complexity and super-polynomial decay of the error probability. Throughout the paper we consider as a running example the special case of the erasure relay channel and we compare the rates achievable by our proposed scheme with the existing upper and lower bounds.}, author = {Mondelli, Marco and Hassani, Hamed and Urbanke, Rudiger}, booktitle = {2018 IEEE International Symposium on Information Theory}, issn = {2157-8117}, location = {Vail, CO, United States}, pages = {351--355}, publisher = {IEEE}, title = {{A new coding paradigm for the primitive relay channel}}, doi = {10.1109/isit.2018.8437479}, year = {2018}, } @article{703, abstract = {We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.}, author = {Shekhovtsov, Alexander and Swoboda, Paul and Savchynskyy, Bogdan}, issn = {01628828}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {7}, pages = {1668--1682}, publisher = {IEEE}, title = {{Maximum persistency via iterative relaxed inference with graphical models}}, doi = {10.1109/TPAMI.2017.2730884}, volume = {40}, year = {2018}, } @article{7063, abstract = {The high-pressure synthesis and incommensurately modulated structure are reported for the new compound Sr2Pt8−xAs, with x = 0.715 (5). The structure consists of Sr2Pt3As layers alternating with Pt-only corrugated grids. Ab initio calculations predict a metallic character with a dominant role of the Pt d electrons. The electrical resistivity (ρ) and Seebeck coefficient confirm the metallic character, but surprisingly, ρ showed a near-flat temperature dependence. This observation fits the description of the Mooij correlation for electrical resistivity in disordered metals, originally developed for statistically distributed point defects. The discussed material has a long-range crystallographic order, but the high concentration of Pt vacancies, incommensurately ordered, strongly influences the electronic conduction properties. This result extends the range of validity of the Mooij correlation to long-range ordered incommensurately modulated vacancies. Motivated by the layered structure, the resistivity anisotropy was measured in a focused-ion-beam micro-fabricated well oriented single crystal. A low resistivity anisotropy indicates that the layers are electrically coupled and conduction channels along different directions are intermixed.}, author = {Martino, Edoardo and Arakcheeva, Alla and Autès, Gabriel and Pisoni, Andrea and Bachmann, Maja D. and Modic, Kimberly A and Helm, Toni and Yazyev, Oleg V. and Moll, Philip J. W. and Forró, László and Katrych, Sergiy}, issn = {2052-2525}, journal = {IUCrJ}, number = {4}, pages = {470--477}, publisher = {International Union of Crystallography (IUCr)}, title = {{Sr2Pt8−xAs: A layered incommensurately modulated metal with saturated resistivity}}, doi = {10.1107/s2052252518007303}, volume = {5}, year = {2018}, } @article{7062, abstract = {Weyl fermions are a recently discovered ingredient for correlated states of electronic matter. A key difficulty has been that real materials also contain non-Weyl quasiparticles, and disentangling the experimental signatures has proven challenging. Here we use magnetic fields up to 95 T to drive the Weyl semimetal TaAs far into its quantum limit, where only the purely chiral 0th Landau levels of the Weyl fermions are occupied. We find the electrical resistivity to be nearly independent of magnetic field up to 50 T: unusual for conventional metals but consistent with the chiral anomaly for Weyl fermions. Above 50 T we observe a two-order-of-magnitude increase in resistivity, indicating that a gap opens in the chiral Landau levels. Above 80 T we observe strong ultrasonic attenuation below 2 K, suggesting a mesoscopically textured state of matter. These results point the way to inducing new correlated states of matter in the quantum limit of Weyl semimetals.}, author = {Ramshaw, B. J. and Modic, Kimberly A and Shekhter, Arkady and Zhang, Yi and Kim, Eun-Ah and Moll, Philip J. W. and Bachmann, Maja D. and Chan, M. K. and Betts, J. B. and Balakirev, F. and Migliori, A. and Ghimire, N. J. and Bauer, E. D. and Ronning, F. and McDonald, R. D.}, issn = {2041-1723}, journal = {Nature Communications}, number = {1}, publisher = {Springer Nature}, title = {{Quantum limit transport and destruction of the Weyl nodes in TaAs}}, doi = {10.1038/s41467-018-04542-9}, volume = {9}, year = {2018}, } @article{7059, abstract = {Unusual behavior in quantum materials commonly arises from their effective low-dimensional physics, reflecting the underlying anisotropy in the spin and charge degrees of freedom. Here we introduce the magnetotropic coefficient k = ∂2F/∂θ2, the second derivative of the free energy F with respect to the magnetic field orientation θ in the crystal. We show that the magnetotropic coefficient can be quantitatively determined from a shift in the resonant frequency of a commercially available atomic force microscopy cantilever under magnetic field. This detection method enables part per 100 million sensitivity and the ability to measure magnetic anisotropy in nanogram-scale samples, as demonstrated on the Weyl semimetal NbP. Measurement of the magnetotropic coefficient in the spin-liquid candidate RuCl3 highlights its sensitivity to anisotropic phase transitions and allows a quantitative comparison to other thermodynamic coefficients via the Ehrenfest relations.}, author = {Modic, Kimberly A and Bachmann, Maja D. and Ramshaw, B. J. and Arnold, F. and Shirer, K. R. and Estry, Amelia and Betts, J. B. and Ghimire, Nirmal J. and Bauer, E. D. and Schmidt, Marcus and Baenitz, Michael and Svanidze, E. and McDonald, Ross D. and Shekhter, Arkady and Moll, Philip J. W.}, issn = {2041-1723}, journal = {Nature Communications}, number = {1}, pages = {3975}, publisher = {Springer Nature}, title = {{Resonant torsion magnetometry in anisotropic quantum materials}}, doi = {10.1038/s41467-018-06412-w}, volume = {9}, year = {2018}, } @article{7058, abstract = {We examine recent magnetic torque measurements in two compounds, γ−Li2IrO3 and RuCl3, which have been discussed as possible realizations of the Kitaev model. The analysis of the reported discontinuity in torque, as an external magnetic field is rotated across the c axis in both crystals, suggests that they have a translationally invariant chiral spin order of the form ⟨Si⋅(Sj×Sk)⟩≠0 in the ground state and persisting over a very wide range of magnetic field and temperature. An extraordinary |B|B2 dependence of the torque for small fields, beside the usual B2 part, is predicted by the chiral spin order. Data for small fields are available for γ−Li2IrO3 and are found to be consistent with the prediction upon further analysis. Other experiments such as inelastic scattering and thermal Hall effect and several questions raised by the discovery of chiral spin order, including its topological consequences, are discussed.}, author = {Modic, Kimberly A and Ramshaw, B. J. and Shekhter, A. and Varma, C. M.}, issn = {2469-9969}, journal = {Physical Review B}, number = {20}, publisher = {APS}, title = {{Chiral spin order in some purported Kitaev spin-liquid compounds}}, doi = {10.1103/physrevb.98.205110}, volume = {98}, year = {2018}, } @inproceedings{7116, abstract = {Training deep learning models has received tremendous research interest recently. In particular, there has been intensive research on reducing the communication cost of training when using multiple computational devices, through reducing the precision of the underlying data representation. Naturally, such methods induce system trade-offs—lowering communication precision could de-crease communication overheads and improve scalability; but, on the other hand, it can also reduce the accuracy of training. In this paper, we study this trade-off space, and ask:Can low-precision communication consistently improve the end-to-end performance of training modern neural networks, with no accuracy loss?From the performance point of view, the answer to this question may appear deceptively easy: compressing communication through low precision should help when the ratio between communication and computation is high. However, this answer is less straightforward when we try to generalize this principle across various neural network architectures (e.g., AlexNet vs. ResNet),number of GPUs (e.g., 2 vs. 8 GPUs), machine configurations(e.g., EC2 instances vs. NVIDIA DGX-1), communication primitives (e.g., MPI vs. NCCL), and even different GPU architectures(e.g., Kepler vs. Pascal). Currently, it is not clear how a realistic realization of all these factors maps to the speed up provided by low-precision communication. In this paper, we conduct an empirical study to answer this question and report the insights.}, author = {Grubic, Demjan and Tam, Leo and Alistarh, Dan-Adrian and Zhang, Ce}, booktitle = {Proceedings of the 21st International Conference on Extending Database Technology}, isbn = {9783893180783}, issn = {2367-2005}, location = {Vienna, Austria}, pages = {145--156}, publisher = {OpenProceedings}, title = {{Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study}}, doi = {10.5441/002/EDBT.2018.14}, year = {2018}, } @article{7126, abstract = {In the Minimum Description Length (MDL) principle, learning from the data is equivalent to an optimal coding problem. We show that the codes that achieve optimal compression in MDL are critical in a very precise sense. First, when they are taken as generative models of samples, they generate samples with broad empirical distributions and with a high value of the relevance, defined as the entropy of the empirical frequencies. These results are derived for different statistical models (Dirichlet model, independent and pairwise dependent spin models, and restricted Boltzmann machines). Second, MDL codes sit precisely at a second order phase transition point where the symmetry between the sampled outcomes is spontaneously broken. The order parameter controlling the phase transition is the coding cost of the samples. The phase transition is a manifestation of the optimality of MDL codes, and it arises because codes that achieve a higher compression do not exist. These results suggest a clear interpretation of the widespread occurrence of statistical criticality as a characterization of samples which are maximally informative on the underlying generative process.}, author = {Cubero, Ryan J and Marsili, Matteo and Roudi, Yasser}, issn = {1099-4300}, journal = {Entropy}, keywords = {Minimum Description Length, normalized maximum likelihood, statistical criticality, phase transitions, large deviations}, number = {10}, publisher = {MDPI}, title = {{Minimum description length codes are critical}}, doi = {10.3390/e20100755}, volume = {20}, year = {2018}, } @article{7277, abstract = {Solid alkali metal carbonates are universal passivation layer components of intercalation battery materials and common side products in metal‐O2 batteries, and are believed to form and decompose reversibly in metal‐O2/CO2 cells. In these cathodes, Li2CO3 decomposes to CO2 when exposed to potentials above 3.8 V vs. Li/Li+. However, O2 evolution, as would be expected according to the decomposition reaction 2 Li2CO3→4 Li++4 e−+2 CO2+O2, is not detected. O atoms are thus unaccounted for, which was previously ascribed to unidentified parasitic reactions. Here, we show that highly reactive singlet oxygen (1O2) forms upon oxidizing Li2CO3 in an aprotic electrolyte and therefore does not evolve as O2. These results have substantial implications for the long‐term cyclability of batteries: they underpin the importance of avoiding 1O2 in metal‐O2 batteries, question the possibility of a reversible metal‐O2/CO2 battery based on a carbonate discharge product, and help explain the interfacial reactivity of transition‐metal cathodes with residual Li2CO3.}, author = {Mahne, Nika and Renfrew, Sara E. and McCloskey, Bryan D. and Freunberger, Stefan Alexander}, issn = {1433-7851}, journal = {Angewandte Chemie International Edition}, number = {19}, pages = {5529--5533}, publisher = {Wiley}, title = {{Electrochemical oxidation of Lithium Carbonate generates singlet oxygen}}, doi = {10.1002/anie.201802277}, volume = {57}, year = {2018}, } @article{7287, abstract = {Passivation layers on electrode materials are ubiquitous in nonaqueous battery chemistries and strongly govern performance and lifetime. They comprise breakdown products of the electrolyte including carbonate, alkyl carbonates, alkoxides, carboxylates, and polymers. Parasitic chemistry in metal–O2 batteries forms similar products and is tied to the deviation of the O2 balance from the ideal stoichiometry during formation/decomposition of alkaline peroxides or superoxides. Accurate and integral quantification of carbonaceous species and peroxides or superoxides in battery electrodes remains, however, elusive. We present a refined procedure to quantify them accurately and sensitively by pointing out and rectifying pitfalls of previous procedures. Carbonaceous compounds are differentiated into inorganic and organic ones. We combine mass and UV–vis spectrometry to quantify evolved O2 and complexed peroxide and CO2 evolved from carbonaceous compounds by acid treatment and Fenton’s reaction. The capabilities of the method are exemplified by means of Li–O2 and Na–O2 cathodes, graphite anodes, and LiNi0.8Co0.15Al0.05O2 cathodes.}, author = {Schafzahl, Bettina and Mourad, Eléonore and Schafzahl, Lukas and Petit, Yann K. and Raju, Anjana R. and Thotiyl, Musthafa Ottakam and Wilkening, Martin and Slugovc, Christian and Freunberger, Stefan Alexander}, issn = {2380-8195}, journal = {ACS Energy Letters}, number = {1}, pages = {170--176}, publisher = {ACS}, title = {{Quantifying total superoxide, peroxide, and carbonaceous compounds in metal–O2 batteries and the solid electrolyte interphase}}, doi = {10.1021/acsenergylett.7b01111}, volume = {3}, year = {2018}, } @article{7285, abstract = {Hydrogelation, the self-assembly of molecules into soft, water-loaded networks, is one way to bridge the structural gap between single molecules and functional materials. The potential of hydrogels, such as those based on perylene bisimides, lies in their chemical, physical, optical, and electronic properties, which are governed by the supramolecular structure of the gel. However, the structural motifs and their precise role for long-range conductivity are yet to be explored. Here, we present a comprehensive structural picture of a perylene bisimide hydrogel, suggesting that its long-range conductivity is limited by charge transfer between electronic backbones. We reveal nanocrystalline ribbon-like structures as the electronic and structural backbone units between which charge transfer is mediated by polar solvent bridges. We exemplify this effect with sensing, where exposure to polar vapor enhances conductivity by 5 orders of magnitude, emphasizing the crucial role of the interplay between structural motif and surrounding medium for the rational design of devices based on nanocrystalline hydrogels.}, author = {Burian, Max and Rigodanza, Francesco and Demitri, Nicola and D̵ord̵ević, Luka and Marchesan, Silvia and Steinhartova, Tereza and Letofsky-Papst, Ilse and Khalakhan, Ivan and Mourad, Eléonore and Freunberger, Stefan Alexander and Amenitsch, Heinz and Prato, Maurizio and Syrgiannis, Zois}, issn = {1936-0851}, journal = {ACS Nano}, number = {6}, pages = {5800--5806}, publisher = {ACS}, title = {{Inter-backbone charge transfer as prerequisite for long-range conductivity in perylene bisimide hydrogels}}, doi = {10.1021/acsnano.8b01689}, volume = {12}, year = {2018}, } @inproceedings{7407, abstract = {Proofs of space (PoS) [Dziembowski et al., CRYPTO'15] are proof systems where a prover can convince a verifier that he "wastes" disk space. PoS were introduced as a more ecological and economical replacement for proofs of work which are currently used to secure blockchains like Bitcoin. In this work we investigate extensions of PoS which allow the prover to embed useful data into the dedicated space, which later can be recovered. Our first contribution is a security proof for the original PoS from CRYPTO'15 in the random oracle model (the original proof only applied to a restricted class of adversaries which can store a subset of the data an honest prover would store). When this PoS is instantiated with recent constructions of maximally depth robust graphs, our proof implies basically optimal security. As a second contribution we show three different extensions of this PoS where useful data can be embedded into the space required by the prover. Our security proof for the PoS extends (non-trivially) to these constructions. We discuss how some of these variants can be used as proofs of catalytic space (PoCS), a notion we put forward in this work, and which basically is a PoS where most of the space required by the prover can be used to backup useful data. Finally we discuss how one of the extensions is a candidate construction for a proof of replication (PoR), a proof system recently suggested in the Filecoin whitepaper. }, author = {Pietrzak, Krzysztof Z}, booktitle = {10th Innovations in Theoretical Computer Science Conference (ITCS 2019)}, isbn = {978-3-95977-095-8}, issn = {1868-8969}, location = {San Diego, CA, United States}, pages = {59:1--59:25}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Proofs of catalytic space}}, doi = {10.4230/LIPICS.ITCS.2019.59}, volume = {124}, year = {2018}, } @article{7717, abstract = {Background: DNA methylation levels change along with age, but few studies have examined the variation in the rate of such changes between individuals. Methods: We performed a longitudinal analysis to quantify the variation in the rate of change of DNA methylation between individuals using whole blood DNA methylation array profiles collected at 2–4 time points (N = 2894) in 954 individuals (67–90 years). Results: After stringent quality control, we identified 1507 DNA methylation CpG sites (rsCpGs) with statistically significant variation in the rate of change (random slope) of DNA methylation among individuals in a mixed linear model analysis. Genes in the vicinity of these rsCpGs were found to be enriched in Homeobox transcription factors and the Wnt signalling pathway, both of which are related to ageing processes. Furthermore, we investigated the SNP effect on the random slope. We found that 4 out of 1507 rsCpGs had one significant (P < 5 × 10−8/1507) SNP effect and 343 rsCpGs had at least one SNP effect (436 SNP-probe pairs) reaching genome-wide significance (P < 5 × 10−8). Ninety-five percent of the significant (P < 5 × 10−8) SNPs are on different chromosomes from their corresponding probes. Conclusions: We identified CpG sites that have variability in the rate of change of DNA methylation between individuals, and our results suggest a genetic basis of this variation. Genes around these CpG sites have been reported to be involved in the ageing process.}, author = {Zhang, Qian and Marioni, Riccardo E and Robinson, Matthew Richard and Higham, Jon and Sproul, Duncan and Wray, Naomi R and Deary, Ian J and McRae, Allan F and Visscher, Peter M}, issn = {1756-994X}, journal = {Genome Medicine}, number = {1}, publisher = {Springer Nature}, title = {{Genotype effects contribute to variation in longitudinal methylome patterns in older people}}, doi = {10.1186/s13073-018-0585-7}, volume = {10}, year = {2018}, } @article{7712, abstract = {Male pattern baldness (MPB) is a sex-limited, age-related, complex trait. We study MPB genetics in 205,327 European males from the UK Biobank. Here we show that MPB is strongly heritable and polygenic, with pedigree-heritability of 0.62 (SE = 0.03) estimated from close relatives, and SNP-heritability of 0.39 (SE = 0.01) from conventionally-unrelated males. We detect 624 near-independent genome-wide loci, contributing SNP-heritability of 0.25 (SE = 0.01), of which 26 X-chromosome loci explain 11.6%. Autosomal genetic variance is enriched for common variants and regions of lower linkage disequilibrium. We identify plausible genetic correlations between MPB and multiple sex-limited markers of earlier puberty, increased bone mineral density (rg = 0.15) and pancreatic β-cell function (rg = 0.12). Correlations with reproductive traits imply an effect on fitness, consistent with an estimated linear selection gradient of -0.018 per MPB standard deviation. Overall, we provide genetic insights into MPB: a phenotype of interest in its own right, with value as a model sex-limited, complex trait.}, author = {Yap, Chloe X. and Sidorenko, Julia and Wu, Yang and Kemper, Kathryn E. and Yang, Jian and Wray, Naomi R. and Robinson, Matthew Richard and Visscher, Peter M.}, issn = {2041-1723}, journal = {Nature Communications}, publisher = {Springer Nature}, title = {{Dissection of genetic variation and evidence for pleiotropy in male pattern baldness}}, doi = {10.1038/s41467-018-07862-y}, volume = {9}, year = {2018}, } @article{7716, abstract = {Genomic prediction has the potential to contribute to precision medicine. However, to date, the utility of such predictors is limited due to low accuracy for most traits. Here theory and simulation study are used to demonstrate that widespread pleiotropy among phenotypes can be utilised to improve genomic risk prediction. We show how a genetic predictor can be created as a weighted index that combines published genome-wide association study (GWAS) summary statistics across many different traits. We apply this framework to predict risk of schizophrenia and bipolar disorder in the Psychiatric Genomics consortium data, finding substantial heterogeneity in prediction accuracy increases across cohorts. For six additional phenotypes in the UK Biobank data, we find increases in prediction accuracy ranging from 0.7% for height to 47% for type 2 diabetes, when using a multi-trait predictor that combines published summary statistics from multiple traits, as compared to a predictor based only on one trait.}, author = {Maier, Robert M. and Zhu, Zhihong and Lee, Sang Hong and Trzaskowski, Maciej and Ruderfer, Douglas M. and Stahl, Eli A. and Ripke, Stephan and Wray, Naomi R. and Yang, Jian and Visscher, Peter M. and Robinson, Matthew Richard}, issn = {2041-1723}, journal = {Nature Communications}, publisher = {Springer Nature}, title = {{Improving genetic prediction by leveraging genetic correlations among human diseases and traits}}, doi = {10.1038/s41467-017-02769-6}, volume = {9}, year = {2018}, } @article{7714, abstract = {Health risk factors such as body mass index (BMI) and serum cholesterol are associated with many common diseases. It often remains unclear whether the risk factors are cause or consequence of disease, or whether the associations are the result of confounding. We develop and apply a method (called GSMR) that performs a multi-SNP Mendelian randomization analysis using summary-level data from genome-wide association studies to test the causal associations of BMI, waist-to-hip ratio, serum cholesterols, blood pressures, height, and years of schooling (EduYears) with common diseases (sample sizes of up to 405,072). We identify a number of causal associations including a protective effect of LDL-cholesterol against type-2 diabetes (T2D) that might explain the side effects of statins on T2D, a protective effect of EduYears against Alzheimer’s disease, and bidirectional associations with opposite effects (e.g., higher BMI increases the risk of T2D but the effect of T2D on BMI is negative).}, author = {Zhu, Zhihong and Zheng, Zhili and Zhang, Futao and Wu, Yang and Trzaskowski, Maciej and Maier, Robert and Robinson, Matthew Richard and McGrath, John J. and Visscher, Peter M. and Wray, Naomi R. and Yang, Jian}, issn = {2041-1723}, journal = {Nature Communications}, publisher = {Springer Nature}, title = {{Causal associations between risk factors and common diseases inferred from GWAS summary data}}, doi = {10.1038/s41467-017-02317-2}, volume = {9}, year = {2018}, } @article{7713, abstract = {There are mean differences in complex traits among global human populations. We hypothesize that part of the phenotypic differentiation is due to natural selection. To address this hypothesis, we assess the differentiation in allele frequencies of trait-associated SNPs among African, Eastern Asian, and European populations for ten complex traits using data of large sample size (up to ~405,000). We show that SNPs associated with height (P=2.46×10−5), waist-to-hip ratio (P=2.77×10−4), and schizophrenia (P=3.96×10−5) are significantly more differentiated among populations than matched “control” SNPs, suggesting that these trait-associated SNPs have undergone natural selection. We further find that SNPs associated with height (P=2.01×10−6) and schizophrenia (P=5.16×10−18) show significantly higher variance in linkage disequilibrium (LD) scores across populations than control SNPs. Our results support the hypothesis that natural selection has shaped the genetic differentiation of complex traits, such as height and schizophrenia, among worldwide populations.}, author = {Guo, Jing and Wu, Yang and Zhu, Zhihong and Zheng, Zhili and Trzaskowski, Maciej and Zeng, Jian and Robinson, Matthew Richard and Visscher, Peter M. and Yang, Jian}, issn = {2041-1723}, journal = {Nature Communications}, publisher = {Springer Nature}, title = {{Global genetic differentiation of complex traits shaped by natural selection in humans}}, doi = {10.1038/s41467-018-04191-y}, volume = {9}, year = {2018}, } @article{7721, abstract = {The availability of genome-wide genetic data on hundreds of thousands of people has led to an equally rapid growth in methodologies available to analyse these data. While the motivation for undertaking genome-wide association studies (GWAS) is identification of genetic markers associated with complex traits, once generated these data can be used for many other analyses. GWAS have demonstrated that complex traits exhibit a highly polygenic genetic architecture, often with shared genetic risk factors across traits. New methods to analyse data from GWAS are increasingly being used to address a diverse set of questions about the aetiology of complex traits and diseases, including psychiatric disorders. Here, we give an overview of some of these methods and present examples of how they have contributed to our understanding of psychiatric disorders. We consider: (i) estimation of the extent of genetic influence on traits, (ii) uncovering of shared genetic control between traits, (iii) predictions of genetic risk for individuals, (iv) uncovering of causal relationships between traits, (v) identifying causal single-nucleotide polymorphisms and genes or (vi) the detection of genetic heterogeneity. This classification helps organise the large number of recently developed methods, although some could be placed in more than one category. While some methods require GWAS data on individual people, others simply use GWAS summary statistics data, allowing novel well-powered analyses to be conducted at a low computational burden.}, author = {Maier, R. M. and Visscher, P. M. and Robinson, Matthew Richard and Wray, N. R.}, issn = {0033-2917}, journal = {Psychological Medicine}, number = {7}, pages = {1055--1067}, publisher = {Cambridge University Press}, title = {{Embracing polygenicity: A review of methods and tools for psychiatric genetics research}}, doi = {10.1017/s0033291717002318}, volume = {48}, year = {2018}, } @article{7754, abstract = {Creating a selective gel that filters particles based on their interactions is a major goal of nanotechnology, with far-reaching implications from drug delivery to controlling assembly pathways. However, this is particularly difficult when the particles are larger than the gel’s characteristic mesh size because such particles cannot passively pass through the gel. Thus, filtering requires the interacting particles to transiently reorganize the gel’s internal structure. While significant advances, e.g., in DNA engineering, have enabled the design of nano-materials with programmable interactions, it is not clear what physical principles such a designer gel could exploit to achieve selective permeability. We present an equilibrium mechanism where crosslink binding dynamics are affected by interacting particles such that particle diffusion is enhanced. In addition to revealing specific design rules for manufacturing selective gels, our results have the potential to explain the origin of selective permeability in certain biological materials, including the nuclear pore complex.}, author = {Goodrich, Carl Peter and Brenner, Michael P. and Ribbeck, Katharina}, issn = {2041-1723}, journal = {Nature Communications}, publisher = {Springer Nature}, title = {{Enhanced diffusion by binding to the crosslinks of a polymer gel}}, doi = {10.1038/s41467-018-06851-5}, volume = {9}, year = {2018}, } @unpublished{7783, abstract = {The Drosophila Genetic Reference Panel (DGRP) serves as a valuable resource to better understand the genetic landscapes underlying quantitative traits. However, such DGRP studies have so far only focused on nuclear genetic variants. To address this, we sequenced the mitochondrial genomes of >170 DGRP lines, identifying 229 variants including 21 indels and 7 frameshifts. We used our mitochondrial variation data to identify 12 genetically distinct mitochondrial haplotypes, thus revealing important population structure at the mitochondrial level. We further examined whether this population structure was reflected on the nuclear genome by screening for the presence of potential mito-nuclear genetic incompatibilities in the form of significant genotype ratio distortions (GRDs) between mitochondrial and nuclear variants. In total, we detected a remarkable 1,845 mito-nuclear GRDs, with the highest enrichment observed in a 40 kb region around the gene Sex-lethal (Sxl). Intriguingly, downstream phenotypic analyses did not uncover major fitness effects associated with these GRDs, suggesting that a large number of mito-nuclear GRDs may reflect population structure at the mitochondrial level rather than actual genomic incompatibilities. This is further supported by the GRD landscape showing particular large genomic regions associated with a single mitochondrial haplotype. Next, we explored the functional relevance of the detected mitochondrial haplotypes through an association analysis on a set of 259 assembled, non-correlating DGRP phenotypes. We found multiple significant associations with stress- and metabolism-related phenotypes, including food intake in males. We validated the latter observation by reciprocal swapping of mitochondrial genomes from high food intake DGRP lines to low food intake ones. In conclusion, our study uncovered important mitochondrial population structure and haplotype-specific metabolic variation in the DGRP, thus demonstrating the significance of incorporating mitochondrial haplotypes in geno-phenotype relationship studies.}, author = {Bevers, Roel P.J. and Litovchenko, Maria and Kapopoulou, Adamandia and Braman, Virginie S. and Robinson, Matthew Richard and Auwerx, Johan and Hollis, Brian and Deplancke, Bart}, booktitle = {bioRxiv}, pages = {49}, publisher = {Cold Spring Harbor Laboratory}, title = {{Extensive mitochondrial population structure and haplotype-specific phenotypic variation in the Drosophila Genetic Reference Panel}}, year = {2018}, } @inproceedings{7812, abstract = {Deep neural networks (DNNs) continue to make significant advances, solving tasks from image classification to translation or reinforcement learning. One aspect of the field receiving considerable attention is efficiently executing deep models in resource-constrained environments, such as mobile or embedded devices. This paper focuses on this problem, and proposes two new compression methods, which jointly leverage weight quantization and distillation of larger teacher networks into smaller student networks. The first method we propose is called quantized distillation and leverages distillation during the training process, by incorporating distillation loss, expressed with respect to the teacher, into the training of a student network whose weights are quantized to a limited set of levels. The second method, differentiable quantization, optimizes the location of quantization points through stochastic gradient descent, to better fit the behavior of the teacher model. We validate both methods through experiments on convolutional and recurrent architectures. We show that quantized shallow students can reach similar accuracy levels to full-precision teacher models, while providing order of magnitude compression, and inference speedup that is linear in the depth reduction. In sum, our results enable DNNs for resource-constrained environments to leverage architecture and accuracy advances developed on more powerful devices.}, author = {Polino, Antonio and Pascanu, Razvan and Alistarh, Dan-Adrian}, booktitle = {6th International Conference on Learning Representations}, location = {Vancouver, Canada}, title = {{Model compression via distillation and quantization}}, year = {2018}, } @article{7983, abstract = {Feste Alkalicarbonate sind universelle Bestandteile von Passivierungsschichten an Materialien für Interkalationsbatterien, übliche Nebenprodukte in Metall‐O2‐Batterien, und es wird angenommen, dass sie sich reversibel in Metall‐O2 /CO2‐Zellen bilden und zersetzen. In all diesen Kathoden zersetzt sich Li2CO3 zu CO2, sobald es Spannungen >3.8 V vs. Li/Li+ ausgesetzt wird. Beachtenswert ist, dass keine O2‐Entwicklung detektiert wird, wie gemäß der Zersetzungsreaktion 2 Li2CO3 → 4 Li+ + 4 e− + 2 CO2 + O2 zu erwarten wäre. Deswegen war der Verbleib eines der O‐Atome ungeklärt und wurde nicht identifizierten parasitären Reaktionen zugerechnet. Hier zeigen wir, dass hochreaktiver Singulett‐Sauerstoff (1O2) bei der Oxidation von Li2CO3 in einem aprotischen Elektrolyten gebildet und daher nicht als O2 freigesetzt wird. Diese Ergebnisse haben weitreichende Auswirkungen auf die langfristige Zyklisierbarkeit von Batterien: sie untermauern die Wichtigkeit, 1O2 in Metall‐O2‐Batterien zu verhindern, stellen die Möglichkeit einer reversiblen Metall‐O2 /CO2‐Batterie basierend auf einem Carbonat‐Entladeprodukt in Frage und helfen, Grenzflächenreaktivität von Übergangsmetallkathoden mit Li2CO3‐Resten zu erklären.}, author = {Mahne, Nika and Renfrew, Sara E. and McCloskey, Bryan D. and Freunberger, Stefan Alexander}, issn = {0044-8249}, journal = {Angewandte Chemie}, number = {19}, pages = {5627--5631}, publisher = {Wiley}, title = {{Elektrochemische Oxidation von Lithiumcarbonat generiert Singulett-Sauerstoff}}, doi = {10.1002/ange.201802277}, volume = {130}, year = {2018}, } @article{8015, abstract = {The neural code of cortical processing remains uncracked; however, it must necessarily rely on faithful signal propagation between cortical areas. In this issue of Neuron, Joglekar et al. (2018) show that strong inter-areal excitation balanced by local inhibition can enable reliable signal propagation in data-constrained network models of macaque cortex. }, author = {Stroud, Jake P. and Vogels, Tim P}, issn = {0896-6273}, journal = {Neuron}, number = {1}, pages = {8--9}, publisher = {Elsevier}, title = {{Cortical signal propagation: Balance, amplify, transmit}}, doi = {10.1016/j.neuron.2018.03.028}, volume = {98}, year = {2018}, } @article{8073, abstract = {Motor cortex (M1) exhibits a rich repertoire of neuronal activities to support the generation of complex movements. Although recent neuronal-network models capture many qualitative aspects of M1 dynamics, they can generate only a few distinct movements. Additionally, it is unclear how M1 efficiently controls movements over a wide range of shapes and speeds. We demonstrate that modulation of neuronal input–output gains in recurrent neuronal-network models with a fixed architecture can dramatically reorganize neuronal activity and thus downstream muscle outputs. Consistent with the observation of diffuse neuromodulatory projections to M1, a relatively small number of modulatory control units provide sufficient flexibility to adjust high-dimensional network activity using a simple reward-based learning rule. Furthermore, it is possible to assemble novel movements from previously learned primitives, and one can separately change movement speed while preserving movement shape. Our results provide a new perspective on the role of modulatory systems in controlling recurrent cortical activity.}, author = {Stroud, Jake P. and Porter, Mason A. and Hennequin, Guillaume and Vogels, Tim P}, issn = {1097-6256}, journal = {Nature Neuroscience}, number = {12}, pages = {1774--1783}, publisher = {Springer Nature}, title = {{Motor primitives in space and time via targeted gain modulation in cortical networks}}, doi = {10.1038/s41593-018-0276-0}, volume = {21}, year = {2018}, } @article{8231, author = {Fazekas-Singer, Judit and Singer, Josef and Ilieva, Kristina M. and Matz, Miroslawa and Herrmann, Ina and Spillner, Edzard and Karagiannis, Sophia N. and Jensen-Jarolim, Erika}, issn = {0091-6749}, journal = {Journal of Allergy and Clinical Immunology}, number = {3}, pages = {973--976.e11}, publisher = {Elsevier}, title = {{AllergoOncology: Generating a canine anticancer IgE against the epidermal growth factor receptor}}, doi = {10.1016/j.jaci.2018.04.021}, volume = {142}, year = {2018}, } @article{8234, abstract = {Molecular imaging probes such as PET-tracers have the potential to improve the accuracy of tumor characterization by directly visualizing the biochemical situation. Thus, molecular changes can be detected early before morphological manifestation. The A3 adenosine receptor (A3AR) is described to be highly expressed in colon cancer cell lines and human colorectal cancer (CRC), suggesting this receptor as a tumor marker. The aim of this preclinical study was the evaluation of FE@SUPPY as a PET-tracer for CRC using in vitro imaging and in vivo PET imaging. First, affinity and selectivity of FE@SUPPY and its metabolites were determined, proving the favorable binding profile of FE@SUPPY. The human adenocarcinoma cell line HT-29 was characterized regarding its hA3AR expression and was subsequently chosen as tumor graft. Promising results regarding the potential of FE@SUPPY as a PET-tracer for CRC imaging were obtained by autoradiography as ≥2.3-fold higher accumulation of FE@SUPPY was found in CRC tissue compared to adjacent healthy colon tissue from the same patient. Nevertheless, first in vivo studies using HT-29 xenografts showed insufficient tumor uptake due to (1) poor conservation of target expression in xenografts and (2) unfavorable pharmacokinetics of FE@SUPPY in mice. We therefore conclude that HT-29 xenografts are not adequate to visualize hA3ARs using FE@SUPPY.}, author = {Balber, T. and Singer, Judit and Berroterán-Infante, N. and Dumanic, M. and Fetty, L. and Fazekas-Singer, J. and Vraka, C. and Nics, L. and Bergmann, M. and Pallitsch, K. and Spreitzer, H. and Wadsak, W. and Hacker, M. and Jensen-Jarolim, E. and Viernstein, H. and Mitterhauser, M.}, issn = {1555-4309}, journal = {Contrast Media & Molecular Imaging}, publisher = {Hindawi}, title = {{Preclinical in vitro and in vivo evaluation of [18F]FE@SUPPY for cancer PET imaging: Limitations of a xenograft model for colorectal cancer}}, doi = {10.1155/2018/1269830}, volume = {2018}, year = {2018}, } @article{8232, abstract = {Anti-epidermal growth factor receptor (EGFR) antibody therapy is used in EGFR expressing cancers including lung, colon, head and neck, and bladder cancers, however results have been modest. Near infrared photoimmunotherapy (NIR-PIT) is a highly selective tumor treatment that employs an antibody-photo-absorber conjugate which is activated by NIR light. NIR-PIT is in clinical trials in patients with recurrent head and neck cancers using cetuximab-IR700 as the conjugate. However, its use has otherwise been restricted to mouse models. This is an effort to explore larger animal models with NIR-PIT. We describe the use of a recombinant canine anti-EGFR monoclonal antibody (mAb), can225IgG, conjugated to the photo-absorber, IR700DX, in three EGFR expressing canine transitional cell carcinoma (TCC) cell lines as a prelude to possible canine clinical studies. Can225-IR700 conjugate showed specific binding and cell-specific killing after NIR-PIT on EGFR expressing cells in vitro. In the in vivo study, can225-IR700 conjugate demonstrated accumulation of the fluorescent conjugate with high tumor-to-background ratio. Tumor-bearing mice were separated into 4 groups: (1) no treatment; (2) 100 μg of can225-IR700 i.v. only; (3) NIR light exposure only; (4) 100 μg of can225-IR700 i.v., NIR light exposure. Tumor growth was significantly inhibited by NIR-PIT treatment compared with the other groups (p < 0.001), and significantly prolonged survival was achieved (p < 0.001 vs. other groups) in the treatment groups. In conclusion, NIR-PIT with can225-IR700 is a promising treatment for canine EGFR-expressing cancers, including invasive transitional cell carcinoma in pet dogs, that could provide a pathway to translation to humans.}, author = {Nagaya, Tadanobu and Okuyama, Shuhei and Ogata, Fusa and Maruoka, Yasuhiro and Knapp, Deborah W. and Karagiannis, Sophia N. and Fazekas-Singer, Judit and Choyke, Peter L. and LeBlanc, Amy K. and Jensen-Jarolim, Erika and Kobayashi, Hisataka}, issn = {1949-2553}, journal = {Oncotarget}, pages = {19026--19038}, publisher = {Impact Journals}, title = {{Near infrared photoimmunotherapy targeting bladder cancer with a canine anti-epidermal growth factor receptor (EGFR) antibody}}, doi = {10.18632/oncotarget.24876}, volume = {9}, year = {2018}, } @article{8233, abstract = {The M2a subtype of macrophages plays an important role in human immunoglobulin E (IgE-mediated allergies) and other Th2 type immune reactions. In contrast, very little is known about these cells in the dog. Here we describe an in vitro method to activate canine histiocytic DH82 cells and primary canine monocyte-derived macrophages (MDMs) toward the M2a macrophages using human cytokines. For a side-by-side comparison, we compared the canine cells to human MDMs, and the human monocytic cell line U937 activated towards M1 and M2a cells on the cellular and molecular level. In analogy to activated human M2a cells, canine M2a, differentiated from both DH82 and MDMs, showed an increase in CD206 surface receptor expression compared to M1. Interestingly, canine M2a, but not M1 derived from MDM, upregulated the high-affinity IgE receptor (FcεRI). Transcription levels of M2a-associated genes (IL10, CCL22, TGFβ, CD163) showed a diverse pattern between the human and dog species, whereas M1 genes (IDO1, CXCL11, IL6, TNF-α) were similarly upregulated in canine and human M1 cells (cell lines and MDMs). We suggest that our novel in vitro method will be suitable in comparative allergology studies focussing on macrophages.}, author = {Herrmann, Ina and Gotovina, Jelena and Fazekas-Singer, Judit and Fischer, Michael B. and Hufnagl, Karin and Bianchini, Rodolfo and Jensen-Jarolim, Erika}, issn = {0145-305X}, journal = {Developmental & Comparative Immunology}, number = {5}, pages = {118--127}, publisher = {Elsevier}, title = {{Canine macrophages can like human macrophages be in vitro activated toward the M2a subtype relevant in allergy}}, doi = {10.1016/j.dci.2018.01.005}, volume = {82}, year = {2018}, } @article{8262, abstract = {Background: The genus Burkholderia consists of species that occupy remarkably diverse ecological niches. Its best known members are important pathogens, B. mallei and B. pseudomallei, which cause glanders and melioidosis, respectively. Burkholderia genomes are unusual due to their multichromosomal organization, generally comprised of 2-3 chromosomes. Results: We performed integrated genomic analysis of 127 Burkholderia strains. The pan-genome is open with the saturation to be reached between 86,000 and 88,000 genes. The reconstructed rearrangements indicate a strong avoidance of intra-replichore inversions that is likely caused by selection against the transfer of large groups of genes between the leading and the lagging strands. Translocated genes also tend to retain their position in the leading or the lagging strand, and this selection is stronger for large syntenies. Integrated reconstruction of chromosome rearrangements in the context of strains phylogeny reveals parallel rearrangements that may indicate inversion-based phase variation and integration of new genomic islands. In particular, we detected parallel inversions in the second chromosomes of B. pseudomallei with breakpoints formed by genes encoding membrane components of multidrug resistance complex, that may be linked to a phase variation mechanism. Two genomic islands, spreading horizontally between chromosomes, were detected in the B. cepacia group. Conclusions: This study demonstrates the power of integrated analysis of pan-genomes, chromosome rearrangements, and selection regimes. Non-random inversion patterns indicate selective pressure, inversions are particularly frequent in a recent pathogen B. mallei, and, together with periods of positive selection at other branches, may indicate adaptation to new niches. One such adaptation could be a possible phase variation mechanism in B. pseudomallei.}, author = {Bochkareva, Olga and Moroz, Elena V. and Davydov, Iakov I. and Gelfand, Mikhail S.}, issn = {1471-2164}, journal = {BMC Genomics}, publisher = {Springer Nature}, title = {{Genome rearrangements and selection in multi-chromosome bacteria Burkholderia spp.}}, doi = {10.1186/s12864-018-5245-1}, volume = {19}, year = {2018}, } @article{8265, abstract = {Genome rearrangements have played an important role in the evolution of Yersinia pestis from its progenitor Yersinia pseudotuberculosis. Traditional phylogenetic trees for Y. pestis based on sequence comparison have short internal branches and low bootstrap supports as only a small number of nucleotide substitutions have occurred. On the other hand, even a small number of genome rearrangements may resolve topological ambiguities in a phylogenetic tree. We reconstructed phylogenetic trees based on genome rearrangements using several popular approaches such as Maximum likelihood for Gene Order and the Bayesian model of genome rearrangements by inversions. We also reconciled phylogenetic trees for each of the three CRISPR loci to obtain an integrated scenario of the CRISPR cassette evolution. Analysis of contradictions between the obtained evolutionary trees yielded numerous parallel inversions and gain/loss events. Our data indicate that an integrated analysis of sequence-based and inversion-based trees enhances the resolution of phylogenetic reconstruction. In contrast, reconstructions of strain relationships based on solely CRISPR loci may not be reliable, as the history is obscured by large deletions, obliterating the order of spacer gains. Similarly, numerous parallel gene losses preclude reconstruction of phylogeny based on gene content.}, author = {Bochkareva, Olga and Dranenko, Natalia O. and Ocheredko, Elena S. and Kanevsky, German M. and Lozinsky, Yaroslav N. and Khalaycheva, Vera A. and Artamonova, Irena I. and Gelfand, Mikhail S.}, issn = {2167-8359}, journal = {PeerJ}, publisher = {PeerJ}, title = {{Genome rearrangements and phylogeny reconstruction in Yersinia pestis}}, doi = {10.7717/peerj.4545}, volume = {6}, year = {2018}, } @inproceedings{8297, abstract = {Designing a secure permissionless distributed ledger (blockchain) that performs on par with centralized payment processors, such as Visa, is a challenging task. Most existing distributed ledgers are unable to scale-out, i.e., to grow their totalprocessing capacity with the number of validators; and those that do, compromise security or decentralization. We present OmniLedger, a novel scale-out distributed ledger that preserves longterm security under permissionless operation. It ensures security and correctness by using a bias-resistant public-randomness protocol for choosing large, statistically representative shards that process transactions, and by introducing an efficient crossshard commit protocol that atomically handles transactions affecting multiple shards. OmniLedger also optimizes performance via parallel intra-shard transaction processing, ledger pruning via collectively-signed state blocks, and low-latency “trust-butverify” validation for low-value transactions. An evaluation ofour experimental prototype shows that OmniLedger’s throughput scales linearly in the number of active validators, supporting Visa-level workloads and beyond, while confirming typical transactions in under two seconds.}, author = {Kokoris Kogias, Eleftherios and Jovanovic, Philipp and Gasser, Linus and Gailly, Nicolas and Syta, Ewa and Ford, Bryan}, booktitle = {2018 IEEE Symposium on Security and Privacy}, isbn = {9781538643532}, issn = {2375-1207}, location = {San Francisco, CA, United States}, pages = {583--598}, publisher = {IEEE}, title = {{OmniLedger: A secure, scale-out, decentralized ledger via sharding}}, doi = {10.1109/sp.2018.000-5}, year = {2018}, } @unpublished{8547, abstract = {The cerebral cortex contains multiple hierarchically organized areas with distinctive cytoarchitectonical patterns, but the cellular mechanisms underlying the emergence of this diversity remain unclear. Here, we have quantitatively investigated the neuronal output of individual progenitor cells in the ventricular zone of the developing mouse neocortex using a combination of methods that together circumvent the biases and limitations of individual approaches. We found that individual cortical progenitor cells show a high degree of stochasticity and generate pyramidal cell lineages that adopt a wide range of laminar configurations. Mathematical modelling these lineage data suggests that a small number of progenitor cell populations, each generating pyramidal cells following different stochastic developmental programs, suffice to generate the heterogenous complement of pyramidal cell lineages that collectively build the complex cytoarchitecture of the neocortex.}, author = {Llorca, Alfredo and Ciceri, Gabriele and Beattie, Robert J and Wong, Fong K. and Diana, Giovanni and Serafeimidou, Eleni and Fernández-Otero, Marian and Streicher, Carmen and Arnold, Sebastian J. and Meyer, Martin and Hippenmeyer, Simon and Maravall, Miguel and Marín, Oscar}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{Heterogeneous progenitor cell behaviors underlie the assembly of neocortical cytoarchitecture}}, doi = {10.1101/494088}, year = {2018}, } @inbook{86, abstract = {Responsiveness—the requirement that every request to a system be eventually handled—is one of the fundamental liveness properties of a reactive system. Average response time is a quantitative measure for the responsiveness requirement used commonly in performance evaluation. We show how average response time can be computed on state-transition graphs, on Markov chains, and on game graphs. In all three cases, we give polynomial-time algorithms.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, booktitle = {Principles of Modeling}, editor = {Lohstroh, Marten and Derler, Patricia and Sirjani, Marjan}, pages = {143 -- 161}, publisher = {Springer}, title = {{Computing average response time}}, doi = {10.1007/978-3-319-95246-8_9}, volume = {10760}, year = {2018}, } @article{9062, abstract = {Self-assembly is the autonomous organization of components into patterns or structures: an essential ingredient of biology and a desired route to complex organization1. At equilibrium, the structure is encoded through specific interactions2,3,4,5,6,7,8, at an unfavourable entropic cost for the system. An alternative approach, widely used by nature, uses energy input to bypass the entropy bottleneck and develop features otherwise impossible at equilibrium9. Dissipative building blocks that inject energy locally were made available by recent advances in colloidal science10,11 but have not been used to control self-assembly. Here we show the targeted formation of self-powered microgears from active particles and their autonomous synchronization into dynamical superstructures. We use a photoactive component that consumes fuel, haematite, to devise phototactic microswimmers that form self-spinning microgears following spatiotemporal light patterns. The gears are coupled via their chemical clouds by diffusiophoresis12 and constitute the elementary bricks of synchronized superstructures, which autonomously regulate their dynamics. The results are quantitatively rationalized on the basis of a stochastic description of diffusio-phoretic oscillators dynamically coupled by chemical gradients. Our findings harness non-equilibrium phoretic phenomena to program interactions and direct self-assembly with fidelity and specificity. It lays the groundwork for the autonomous construction of dynamical architectures and functional micro-machinery.}, author = {Aubret, Antoine and Youssef, Mena and Sacanna, Stefano and Palacci, Jérémie A}, issn = {1745-2481}, journal = {Nature Physics}, number = {11}, pages = {1114--1118}, publisher = {Springer Nature}, title = {{Targeted assembly and synchronization of self-spinning microgears}}, doi = {10.1038/s41567-018-0227-4}, volume = {14}, year = {2018}, } @article{9229, author = {Danzl, Johann G}, issn = {2500-2295}, journal = {Opera Medica et Physiologica}, number = {S1}, pages = {11}, publisher = {Lobachevsky State University of Nizhny Novgorod}, title = {{Diffraction-unlimited optical imaging for synaptic physiology}}, doi = {10.20388/omp2018.00s1.001}, volume = {4}, year = {2018}, } @inproceedings{6005, abstract = {Network games are widely used as a model for selfish resource-allocation problems. In the classicalmodel, each player selects a path connecting her source and target vertices. The cost of traversingan edge depends on theload; namely, number of players that traverse it. Thus, it abstracts the factthat different users may use a resource at different times and for different durations, which playsan important role in determining the costs of the users in reality. For example, when transmittingpackets in a communication network, routing traffic in a road network, or processing a task in aproduction system, actual sharing and congestion of resources crucially depends on time.In [13], we introducedtimed network games, which add a time component to network games.Each vertexvin the network is associated with a cost function, mapping the load onvto theprice that a player pays for staying invfor one time unit with this load. Each edge in thenetwork is guarded by the time intervals in which it can be traversed, which forces the players tospend time in the vertices. In this work we significantly extend the way time can be referred toin timed network games. In the model we study, the network is equipped withclocks, and, as intimed automata, edges are guarded by constraints on the values of the clocks, and their traversalmay involve a reset of some clocks. We argue that the stronger model captures many realisticnetworks. The addition of clocks breaks the techniques we developed in [13] and we developnew techniques in order to show that positive results on classic network games carry over to thestronger timed setting.}, author = {Avni, Guy and Guha, Shibashis and Kupferman, Orna}, issn = {1868-8969}, location = {Liverpool, United Kingdom}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Timed network games with clocks}}, doi = {10.4230/LIPICS.MFCS.2018.23}, volume = {117}, year = {2018}, } @article{9668, abstract = {Estimating the homogeneous ice nucleation rate from undercooled liquid water is crucial for understanding many important physical phenomena and technological applications, and challenging for both experiments and theory. From a theoretical point of view, difficulties arise due to the long time scales required, as well as the numerous nucleation pathways involved to form ice nuclei with different stacking disorders. We computed the homogeneous ice nucleation rate at a physically relevant undercooling for a single-site water model, taking into account the diffuse nature of ice–water interfaces, stacking disorders in ice nuclei, and the addition rate of particles to the critical nucleus. We disentangled and investigated the relative importance of all the terms, including interfacial free energy, entropic contributions and the kinetic prefactor, that contribute to the overall nucleation rate. Breaking down the problem into pieces not only provides physical insights into ice nucleation, but also sheds light on the long-standing discrepancy between different theoretical predictions, as well as between theoretical and experimental determinations of the nucleation rate. Moreover, we pinpoint the main shortcomings and suggest strategies to systematically improve the existing simulation methods.}, author = {Cheng, Bingqing and Dellago, Christoph and Ceriotti, Michele}, issn = {1463-9084}, journal = {Physical Chemistry Chemical Physics}, number = {45}, pages = {28732--28740}, publisher = {Royal Society of Chemistry}, title = {{Theoretical prediction of the homogeneous ice nucleation rate: Disentangling thermodynamics and kinetics}}, doi = {10.1039/c8cp04561e}, volume = {20}, year = {2018}, } @article{9687, abstract = {The Gibbs free energy is the fundamental thermodynamic potential underlying the relative stability of different states of matter under constant-pressure conditions. However, computing this quantity from atomic-scale simulations is far from trivial, so the potential energy of a system is often used as a proxy. In this paper, we use a combination of thermodynamic integration methods to accurately evaluate the Gibbs free energies associated with defects in crystals, including the vacancy formation energy in bcc iron, and the stacking fault energy in fcc nickel, iron, and cobalt. We quantify the importance of entropic and anharmonic effects in determining the free energies of defects at high temperatures, and show that the potential energy approximation as well as the harmonic approximation may produce inaccurate or even qualitatively wrong results. Our calculations manifest the necessity to employ accurate free energy methods such as thermodynamic integration to estimate the stability of crystallographic defects at high temperatures.}, author = {Cheng, Bingqing and Ceriotti, Michele}, issn = {2469-9969}, journal = {Physical Review B}, number = {5}, publisher = {American Physical Society}, title = {{Computing the absolute Gibbs free energy in atomistic simulations: Applications to defects in solids}}, doi = {10.1103/physrevb.97.054102}, volume = {97}, year = {2018}, } @article{315, abstract = {More than 100 years after Grigg’s influential analysis of species’ borders, the causes of limits to species’ ranges still represent a puzzle that has never been understood with clarity. The topic has become especially important recently as many scientists have become interested in the potential for species’ ranges to shift in response to climate change—and yet nearly all of those studies fail to recognise or incorporate evolutionary genetics in a way that relates to theoretical developments. I show that range margins can be understood based on just two measurable parameters: (i) the fitness cost of dispersal—a measure of environmental heterogeneity—and (ii) the strength of genetic drift, which reduces genetic diversity. Together, these two parameters define an ‘expansion threshold’: adaptation fails when genetic drift reduces genetic diversity below that required for adaptation to a heterogeneous environment. When the key parameters drop below this expansion threshold locally, a sharp range margin forms. When they drop below this threshold throughout the species’ range, adaptation collapses everywhere, resulting in either extinction or formation of a fragmented metapopulation. Because the effects of dispersal differ fundamentally with dimension, the second parameter—the strength of genetic drift—is qualitatively different compared to a linear habitat. In two-dimensional habitats, genetic drift becomes effectively independent of selection. It decreases with ‘neighbourhood size’—the number of individuals accessible by dispersal within one generation. Moreover, in contrast to earlier predictions, which neglected evolution of genetic variance and/or stochasticity in two dimensions, dispersal into small marginal populations aids adaptation. This is because the reduction of both genetic and demographic stochasticity has a stronger effect than the cost of dispersal through increased maladaptation. The expansion threshold thus provides a novel, theoretically justified, and testable prediction for formation of the range margin and collapse of the species’ range.}, author = {Polechova, Jitka}, issn = {15449173}, journal = {PLoS Biology}, number = {6}, publisher = {Public Library of Science}, title = {{Is the sky the limit? On the expansion threshold of a species’ range}}, doi = {10.1371/journal.pbio.2005372}, volume = {16}, year = {2018}, } @article{8422, abstract = {The Birkhoff conjecture says that the boundary of a strictly convex integrable billiard table is necessarily an ellipse. In this article, we consider a stronger notion of integrability, namely integrability close to the boundary, and prove a local version of this conjecture: a small perturbation of an ellipse of small eccentricity which preserves integrability near the boundary, is itself an ellipse. This extends the result in Avila et al. (Ann Math 184:527–558, ADK16), where integrability was assumed on a larger set. In particular, it shows that (local) integrability near the boundary implies global integrability. One of the crucial ideas in the proof consists in analyzing Taylor expansion of the corresponding action-angle coordinates with respect to the eccentricity parameter, deriving and studying higher order conditions for the preservation of integrable rational caustics.}, author = {Huang, Guan and Kaloshin, Vadim and Sorrentino, Alfonso}, issn = {1016-443X}, journal = {Geometric and Functional Analysis}, keywords = {Geometry and Topology, Analysis}, number = {2}, pages = {334--392}, publisher = {Springer Nature}, title = {{Nearly circular domains which are integrable close to the boundary are ellipses}}, doi = {10.1007/s00039-018-0440-4}, volume = {28}, year = {2018}, } @article{8421, abstract = {The classical Birkhoff conjecture claims that the boundary of a strictly convex integrable billiard table is necessarily an ellipse (or a circle as a special case). In this article we prove a complete local version of this conjecture: a small integrable perturbation of an ellipse must be an ellipse. This extends and completes the result in Avila-De Simoi-Kaloshin, where nearly circular domains were considered. One of the crucial ideas in the proof is to extend action-angle coordinates for elliptic billiards into complex domains (with respect to the angle), and to thoroughly analyze the nature of their complex singularities. As an application, we are able to prove some spectral rigidity results for elliptic domains.}, author = {Kaloshin, Vadim and Sorrentino, Alfonso}, issn = {0003-486X}, journal = {Annals of Mathematics}, keywords = {Statistics, Probability and Uncertainty, Statistics and Probability}, number = {1}, pages = {315--380}, publisher = {Annals of Mathematics, Princeton U}, title = {{On the local Birkhoff conjecture for convex billiards}}, doi = {10.4007/annals.2018.188.1.6}, volume = {188}, year = {2018}, } @article{8420, abstract = {We show that in the space of all convex billiard boundaries, the set of boundaries with rational caustics is dense. More precisely, the set of billiard boundaries with caustics of rotation number 1/q is polynomially sense in the smooth case, and exponentially dense in the analytic case.}, author = {Kaloshin, Vadim and Zhang, Ke}, issn = {0951-7715}, journal = {Nonlinearity}, keywords = {Mathematical Physics, General Physics and Astronomy, Applied Mathematics, Statistical and Nonlinear Physics}, number = {11}, pages = {5214--5234}, publisher = {IOP Publishing}, title = {{Density of convex billiards with rational caustics}}, doi = {10.1088/1361-6544/aadc12}, volume = {31}, year = {2018}, } @article{8426, abstract = {For any strictly convex planar domain Ω ⊂ R2 with a C∞ boundary one can associate an infinite sequence of spectral invariants introduced by Marvizi–Merlose [5]. These invariants can generically be determined using the spectrum of the Dirichlet problem of the Laplace operator. A natural question asks if this collection is sufficient to determine Ω up to isometry. In this paper we give a counterexample, namely, we present two nonisometric domains Ω and Ω¯ with the same collection of Marvizi–Melrose invariants. Moreover, each domain has countably many periodic orbits {Sn}n≥1 (resp. {S¯n}n⩾1) of period going to infinity such that Sn and S¯n have the same period and perimeter for each n.}, author = {Buhovsky, Lev and Kaloshin, Vadim}, issn = {1560-3547}, journal = {Regular and Chaotic Dynamics}, pages = {54--59}, publisher = {Springer Nature}, title = {{Nonisometric domains with the same Marvizi-Melrose invariants}}, doi = {10.1134/s1560354718010057}, volume = {23}, year = {2018}, } @article{9053, abstract = {The development of strategies to assemble microscopic machines from dissipative building blocks are essential on the route to novel active materials. We recently demonstrated the hierarchical self-assembly of phoretic microswimmers into self-spinning microgears and their synchronization by diffusiophoretic interactions [Aubret et al., Nat. Phys., 2018]. In this paper, we adopt a pedagogical approach and expose our strategy to control self-assembly and build machines using phoretic phenomena. We notably introduce Highly Inclined Laminated Optical sheets microscopy (HILO) to image and characterize anisotropic and dynamic diffusiophoretic interactions, which cannot be performed by conventional fluorescence microscopy. The dynamics of a (haematite) photocatalytic material immersed in (hydrogen peroxide) fuel under various illumination patterns is first described and quantitatively rationalized by a model of diffusiophoresis, the migration of a colloidal particle in a concentration gradient. It is further exploited to design phototactic microswimmers that direct towards the high intensity of light, as a result of the reorientation of the haematite in a light gradient. We finally show the assembly of self-spinning microgears from colloidal microswimmers and carefully characterize the interactions using HILO techniques. The results are compared with analytical and numerical predictions and agree quantitatively, stressing the important role played by concentration gradients induced by chemical activity to control and design interactions. Because the approach described hereby is generic, this works paves the way for the rational design of machines by controlling phoretic phenomena.}, author = {Aubret, Antoine and Palacci, Jérémie A}, issn = {1744-6848}, journal = {Soft Matter}, keywords = {General Chemistry, Condensed Matter Physics}, number = {47}, pages = {9577--9588}, publisher = {Royal Society of Chemistry }, title = {{Diffusiophoretic design of self-spinning microgears from colloidal microswimmers}}, doi = {10.1039/c8sm01760c}, volume = {14}, year = {2018}, } @article{9136, abstract = {In this study we investigate the scaling of precipitation extremes with temperature in the Mediterranean region by assessing against observations the present day and future regional climate simulations performed in the frame of the HyMeX and MED-CORDEX programs. Over the 1979–2008 period, despite differences in quantitative precipitation simulation across the various models, the change in precipitation extremes with respect to temperature is robust and consistent. The spatial variability of the temperature–precipitation extremes relationship displays a hook shape across the Mediterranean, with negative slope at high temperatures and a slope following Clausius–Clapeyron (CC)-scaling at low temperatures. The temperature at which the slope of the temperature–precipitation extreme relation sharply changes (or temperature break), ranges from about 20 °C in the western Mediterranean to <10 °C in Greece. In addition, this slope is always negative in the arid regions of the Mediterranean. The scaling of the simulated precipitation extremes is insensitive to ocean–atmosphere coupling, while it depends very weakly on the resolution at high temperatures for short precipitation accumulation times. In future climate scenario simulations covering the 2070–2100 period, the temperature break shifts to higher temperatures by a value which is on average the mean regional temperature change due to global warming. The slope of the simulated future temperature–precipitation extremes relationship is close to CC-scaling at temperatures below the temperature break, while at high temperatures, the negative slope is close, but somewhat flatter or steeper, than in the current climate depending on the model. Overall, models predict more intense precipitation extremes in the future. Adjusting the temperature–precipitation extremes relationship in the present climate using the CC law and the temperature shift in the future allows the recovery of the temperature–precipitation extremes relationship in the future climate. This implies negligible regional changes of relative humidity in the future despite the large warming and drying over the Mediterranean. This suggests that the Mediterranean Sea is the primary source of moisture which counteracts the drying and warming impacts on relative humidity in parts of the Mediterranean region.}, author = {Drobinski, Philippe and Silva, Nicolas Da and Panthou, Gérémy and Bastin, Sophie and Muller, Caroline J and Ahrens, Bodo and Borga, Marco and Conte, Dario and Fosser, Giorgia and Giorgi, Filippo and Güttler, Ivan and Kotroni, Vassiliki and Li, Laurent and Morin, Efrat and Önol, Bariş and Quintana-Segui, Pere and Romera, Raquel and Torma, Csaba Zsolt}, issn = {0930-7575}, journal = {Climate Dynamics}, keywords = {Atmospheric Science}, number = {3}, pages = {1237--1257}, publisher = {Springer Nature}, title = {{Scaling precipitation extremes with temperature in the Mediterranean: Past climate assessment and projection in anthropogenic scenarios}}, doi = {10.1007/s00382-016-3083-x}, volume = {51}, year = {2018}, } @article{9134, abstract = {Several studies have shown the existence of a critical latitude where the dissipation of internal tides is strongly enhanced. Internal tides are internal waves generated by barotropic tidal currents impinging rough topography at the seafloor. Their dissipation and concomitant diapycnal mixing are believed to be important for water masses and the large‐scale ocean circulation. The purpose of this study is to clarify the physical processes at the origin of this strong latitudinal dependence of tidal energy dissipation. We find that different mechanisms are involved equatorward and poleward of the critical latitude. Triadic resonant instabilities are responsible for the dissipation of internal tides equatorward of the critical latitude. In particular, a dominant triad involving the primary internal tide and near‐inertial waves is key. At the critical latitude, the peak of energy dissipation is explained by both increased instability growth rates, and smaller scales of secondary waves thus more prone to break and dissipate their energy. Surprisingly, poleward of the critical latitude, the generation of evanescent waves appears to be crucial. Triadic instabilities have been widely studied, but the transfer of energy to evanescent waves has received comparatively little attention. Our work suggests that the nonlinear transfer of energy from the internal tide to evanescent waves (corresponding to the 2f‐pump mechanism described by Young et al., 2008, https://doi.org/10.1017/S0022112008001742) is an efficient mechanism to dissipate internal tide energy near and poleward of the critical latitude. The theoretical results are confirmed in idealized high‐resolution numerical simulations of a barotropic M2 tide impinging sinusoidal topography in a linearly stratified fluid.}, author = {Richet, O. and Chomaz, J.-M. and Muller, Caroline J}, issn = {2169-9275}, journal = {Journal of Geophysical Research: Oceans}, number = {9}, pages = {6136--6155}, publisher = {American Geophysical Union}, title = {{Internal tide dissipation at topography: Triadic resonant instability equatorward and evanescent waves poleward of the critical latitude}}, doi = {10.1029/2017jc013591}, volume = {123}, year = {2018}, } @article{9135, abstract = {Idealized simulations of tropical moist convection have revealed that clouds can spontaneously clump together in a process called self-aggregation. This results in a state where a moist cloudy region with intense deep convection is surrounded by extremely dry subsiding air devoid of deep convection. Because of the idealized settings of the simulations where it was discovered, the relevance of self-aggregation to the real world is still debated. Here, we show that self-aggregation feedbacks play a leading-order role in the spontaneous genesis of tropical cyclones in cloud-resolving simulations. Those feedbacks accelerate the cyclogenesis process by a factor of 2, and the feedbacks contributing to the cyclone formation show qualitative and quantitative agreement with the self-aggregation process. Once the cyclone is formed, wind-induced surface heat exchange (WISHE) effects dominate, although we find that self-aggregation feedbacks have a small but nonnegligible contribution to the maintenance of the mature cyclone. Our results suggest that self-aggregation, and the framework developed for its study, can help shed more light into the physical processes leading to cyclogenesis and cyclone intensification. In particular, our results point out the importance of the longwave radiative cooling outside the cyclone.}, author = {Muller, Caroline J and Romps, David M.}, issn = {0027-8424}, journal = {Proceedings of the National Academy of Sciences}, keywords = {Multidisciplinary}, number = {12}, pages = {2930--2935}, publisher = {Proceedings of the National Academy of Sciences}, title = {{Acceleration of tropical cyclogenesis by self-aggregation feedbacks}}, doi = {10.1073/pnas.1719967115}, volume = {115}, year = {2018}, } @article{9471, abstract = {The DEMETER (DME) DNA glycosylase catalyzes genome-wide DNA demethylation and is required for endosperm genomic imprinting and embryo viability. Targets of DME-mediated DNA demethylation reside in small, euchromatic, AT-rich transposons and at the boundaries of large transposons, but how DME interacts with these diverse chromatin states is unknown. The STRUCTURE SPECIFIC RECOGNITION PROTEIN 1 (SSRP1) subunit of the chromatin remodeler FACT (facilitates chromatin transactions), was previously shown to be involved in the DME-dependent regulation of genomic imprinting in Arabidopsis endosperm. Therefore, to investigate the interaction between DME and chromatin, we focused on the activity of the two FACT subunits, SSRP1 and SUPPRESSOR of TY16 (SPT16), during reproduction in Arabidopsis. We found that FACT colocalizes with nuclear DME in vivo, and that DME has two classes of target sites, the first being euchromatic and accessible to DME, but the second, representing over half of DME targets, requiring the action of FACT for DME-mediated DNA demethylation genome-wide. Our results show that the FACT-dependent DME targets are GC-rich heterochromatin domains with high nucleosome occupancy enriched with H3K9me2 and H3K27me1. Further, we demonstrate that heterochromatin-associated linker histone H1 specifically mediates the requirement for FACT at a subset of DME-target loci. Overall, our results demonstrate that FACT is required for DME targeting by facilitating its access to heterochromatin.}, author = {Frost, Jennifer M. and Kim, M. Yvonne and Park, Guen Tae and Hsieh, Ping-Hung and Nakamura, Miyuki and Lin, Samuel J. H. and Yoo, Hyunjin and Choi, Jaemyung and Ikeda, Yoko and Kinoshita, Tetsu and Choi, Yeonhee and Zilberman, Daniel and Fischer, Robert L.}, issn = {1091-6490}, journal = {Proceedings of the National Academy of Sciences}, keywords = {Multidisciplinary}, number = {20}, pages = {E4720--E4729}, publisher = {National Academy of Sciences}, title = {{FACT complex is required for DNA demethylation at heterochromatin during reproduction in Arabidopsis}}, doi = {10.1073/pnas.1713333115}, volume = {115}, year = {2018}, } @article{95, abstract = {Electrostatic charging of insulating fine particles can be responsible for numerous phenomena ranging from lightning in volcanic plumes to dust explosions. However, even basic aspects of how fine particles become charged are still unclear. Studying particle charging is challenging because it usually involves the complexities associated with many-particle collisions. To address these issues, we introduce a method based on acoustic levitation, which makes it possible to initiate sequences of repeated collisions of a single submillimeter particle with a flat plate, and to precisely measure the particle charge in situ after each collision. We show that collisional charge transfer between insulators is dependent on the hydrophobicity of the contacting surfaces. We use glass, which we modify by attaching nonpolar molecules to the particle, the plate, or both. We find that hydrophilic surfaces develop significant positive charges after contacting hydrophobic surfaces. Moreover, we demonstrate that charging between a hydrophilic and a hydrophobic surface is suppressed in an acidic environment and enhanced in a basic one. Application of an electric field during each collision is found to modify the charge transfer, again depending on surface hydrophobicity. We discuss these results within the context of contact charging due to ion transfer, and we show that they lend strong support to OH− ions as the charge carriers.}, author = {Lee, Victor and James, Nicole and Waitukaitis, Scott R and Jaeger, Heinrich}, journal = {Physical Review Materials}, number = {3}, publisher = {American Physical Society}, title = {{Collisional charging of individual submillimeter particles: Using ultrasonic levitation to initiate and track charge transfer}}, doi = {10.1103/PhysRevMaterials.2.035602}, volume = {2}, year = {2018}, } @article{9567, abstract = {Let P be a graph property which is preserved by removal of edges, and consider the random graph process that starts with the empty n-vertex graph and then adds edges one-by-one, each chosen uniformly at random subject to the constraint that P is not violated. These types of random processes have been the subject of extensive research over the last 20 years, having striking applications in extremal combinatorics, and leading to the discovery of important probabilistic tools. In this paper we consider the k-matching-free process, where P is the property of not containing a matching of size k. We are able to analyse the behaviour of this process for a wide range of values of k; in particular we prove that if k=o(n) or if n−2k=o(n−−√/logn) then this process is likely to terminate in a k-matching-free graph with the maximum possible number of edges, as characterised by Erdős and Gallai. We also show that these bounds on k are essentially best possible, and we make a first step towards understanding the behaviour of the process in the intermediate regime.}, author = {Krivelevich, Michael and Kwan, Matthew Alan and Loh, Po‐Shen and Sudakov, Benny}, issn = {1098-2418}, journal = {Random Structures and Algorithms}, number = {4}, pages = {692--716}, publisher = {Wiley}, title = {{The random k‐matching‐free process}}, doi = {10.1002/rsa.20814}, volume = {53}, year = {2018}, } @article{9565, abstract = {Let D(n,p) be the random directed graph on n vertices where each of the n(n-1) possible arcs is present independently with probability p. A celebrated result of Frieze shows that if p≥(logn+ω(1))/n then D(n,p) typically has a directed Hamilton cycle, and this is best possible. In this paper, we obtain a strengthening of this result, showing that under the same condition, the number of directed Hamilton cycles in D(n,p) is typically n!(p(1+o(1)))n. We also prove a hitting-time version of this statement, showing that in the random directed graph process, as soon as every vertex has in-/out-degrees at least 1, there are typically n!(logn/n(1+o(1)))n directed Hamilton cycles.}, author = {Ferber, Asaf and Kwan, Matthew Alan and Sudakov, Benny}, issn = {1098-2418}, journal = {Random Structures and Algorithms}, number = {4}, pages = {592--603}, publisher = {Wiley}, title = {{Counting Hamilton cycles in sparse random directed graphs}}, doi = {10.1002/rsa.20815}, volume = {53}, year = {2018}, } @article{9568, abstract = {An intercalate in a Latin square is a 2×2 Latin subsquare. Let N be the number of intercalates in a uniformly random n×n Latin square. We prove that asymptotically almost surely N≥(1−o(1))n2/4, and that EN≤(1+o(1))n2/2 (therefore asymptotically almost surely N≤fn2 for any f→∞). This significantly improves the previous best lower and upper bounds. We also give an upper tail bound for the number of intercalates in two fixed rows of a random Latin square. In addition, we discuss a problem of Linial and Luria on low-discrepancy Latin squares.}, author = {Kwan, Matthew Alan and Sudakov, Benny}, issn = {1098-2418}, journal = {Random Structures and Algorithms}, number = {2}, pages = {181--196}, publisher = {Wiley}, title = {{Intercalates and discrepancy in random Latin squares}}, doi = {10.1002/rsa.20742}, volume = {52}, year = {2018}, } @article{9587, abstract = {We say a family of sets is intersecting if any two of its sets intersect, and we say it is trivially intersecting if there is an element which appears in every set of the family. In this paper we study the maximum size of a non-trivially intersecting family in a natural “multi-part” setting. Here the ground set is divided into parts, and one considers families of sets whose intersection with each part is of a prescribed size. Our work is motivated by classical results in the single-part setting due to Erdős, Ko and Rado, and Hilton and Milner, and by a theorem of Frankl concerning intersecting families in this multi-part setting. In the case where the part sizes are sufficiently large we determine the maximum size of a non-trivially intersecting multi-part family, disproving a conjecture of Alon and Katona.}, author = {Kwan, Matthew Alan and Sudakov, Benny and Vieira, Pedro}, issn = {0097-3165}, journal = {Journal of Combinatorial Theory Series A}, pages = {44--60}, publisher = {Elsevier}, title = {{Non-trivially intersecting multi-part families}}, doi = {10.1016/j.jcta.2017.12.001}, volume = {156}, year = {2018}, } @article{9665, abstract = {We investigate the thermodynamics and kinetics of a hydrogen interstitial in magnetic α-iron, taking account of the quantum fluctuations of the proton as well as the anharmonicities of lattice vibrations and hydrogen hopping. We show that the diffusivity of hydrogen in the lattice of bcc iron deviates strongly from an Arrhenius behavior at and below room temperature. We compare a quantum transition state theory to explicit ring polymer molecular dynamics in the calculation of diffusivity. We then address the trapping of hydrogen by a vacancy as a prototype lattice defect. By a sequence of steps in a thought experiment, each involving a thermodynamic integration, we are able to separate out the binding free energy of a proton to a defect into harmonic and anharmonic, and classical and quantum contributions. We find that about 30% of a typical binding free energy of hydrogen to a lattice defect in iron is accounted for by finite temperature effects, and about half of these arise from quantum proton fluctuations. This has huge implications for the comparison between thermal desorption and permeation experiments and standard electronic structure theory. The implications are even greater for the interpretation of muon spin resonance experiments.}, author = {Cheng, Bingqing and Paxton, Anthony T. and Ceriotti, Michele}, issn = {1079-7114}, journal = {Physical Review Letters}, number = {22}, publisher = {American Physical Society}, title = {{Hydrogen diffusion and trapping in α-iron: The role of quantum and anharmonic fluctuations}}, doi = {10.1103/physrevlett.120.225901}, volume = {120}, year = {2018}, } @article{9659, abstract = {The curvature dependence of interfacial free energy, which is crucial in quantitatively predicting nucleation kinetics and the stability of bubbles and droplets, is quantified by the Tolman length δ. For solid-liquid interfaces, however, δ has never been computed directly due to various theoretical and practical challenges. Here we perform a direct evaluation of the Tolman length from atomistic simulations of a solid-liquid planar interface in out-of-equilibrium conditions, by first computing the surface tension from the amplitude of thermal capillary fluctuations of a localized version of the Gibbs dividing surface and by then calculating how much the surface energy changes when it is defined relative to the equimolar dividing surface. We computed δ for a model potential, and found a good agreement with the values indirectly inferred from nucleation simulations. The agreement not only validates our approach but also suggests that the nucleation free energy of the system can be perfectly described using classical nucleation theory if the Tolman length is taken into account.}, author = {Cheng, Bingqing and Ceriotti, Michele}, issn = {1089-7690}, journal = {The Journal of Chemical Physics}, number = {23}, publisher = {AIP Publishing}, title = {{Communication: Computing the Tolman length for solid-liquid interfaces}}, doi = {10.1063/1.5038396}, volume = {148}, year = {2018}, } @article{12603, abstract = {We present a field-data rich modelling analysis to reconstruct the climatic forcing, glacier response, and runoff generation from a high-elevation catchment in central Chile over the period 2000–2015 to provide insights into the differing contributions of debris-covered and debris-free glaciers under current and future changing climatic conditions. Model simulations with the physically based glacio-hydrological model TOPKAPI-ETH reveal a period of neutral or slightly positive mass balance between 2000 and 2010, followed by a transition to increasingly large annual mass losses, associated with a recent mega drought. Mass losses commence earlier, and are more severe, for a heavily debris-covered glacier, most likely due to its strong dependence on snow avalanche accumulation, which has declined in recent years. Catchment runoff shows a marked decreasing trend over the study period, but with high interannual variability directly linked to winter snow accumulation, and high contribution from ice melt in dry periods and drought conditions. The study demonstrates the importance of incorporating local-scale processes such as snow avalanche accumulation and spatially variable debris thickness, in understanding the responses of different glacier types to climate change. We highlight the increased dependency of runoff from high Andean catchments on the diminishing resource of glacier ice during dry years.}, author = {Burger, Flavia and Ayala, Alvaro and Farias, David and Shaw, Thomas E. and MacDonell, Shelley and Brock, Ben and McPhee, James and Pellicciotti, Francesca}, issn = {1099-1085}, journal = {Hydrological Processes}, keywords = {Water Science and Technology}, number = {2}, pages = {214--229}, publisher = {Wiley}, title = {{Interannual variability in glacier contribution to runoff from a high‐elevation Andean catchment: Understanding the role of debris cover in glacier hydrology}}, doi = {10.1002/hyp.13354}, volume = {33}, year = {2018}, } @article{12605, abstract = {Snow depth patterns over glaciers are controlled by precipitation, snow redistribution due to wind and avalanches, and the exchange of energy with the atmosphere that determines snow ablation. While many studies have advanced the understanding of ablation processes, less is known about winter snow patterns and their variability over glaciers. We analyze snow depth on Haut Glacier d'Arolla, Switzerland, in the two winter seasons 2006–2007 and 2010–2011 to (1) understand whether snow depth over an alpine glacier at the end of the accumulation season exhibits a behavior similar to the one observed on single slopes and vegetated areas; and (2) investigate the snow pattern consistency over the two accumulation seasons. We perform this analysis on a data set of high-resolution lidar-derived snow depth using variograms and fractal parameters. Our first main result is that snow depth patterns on the glacier exhibit a multiscale behavior, with a scale break around 20 m after which the fractal dimension increases, indicating more autocorrelated structure before the scale break than after. Second, this behavior is consistent over the two years, with fractal parameters and their spatial variability almost constant in the two seasons. We also show that snow depth patterns exhibit a distinct behavior in the glacier tongue and the upper catchment, with longer correlation distances on the tongue in the direction of the main winds, suggesting spatial distinctions that are likely induced by different processes and that should be taken into account when extrapolating snow depth from limited samples.}, author = {Clemenzi, I. and Pellicciotti, Francesca and Burlando, P.}, issn = {1944-7973}, journal = {Water Resources Research}, keywords = {Water Science and Technology}, number = {10}, pages = {7929--7945}, publisher = {American Geophysical Union}, title = {{Snow depth structure, fractal behavior, and interannual consistency over Haut Glacier d'Arolla, Switzerland}}, doi = {10.1029/2017wr021606}, volume = {54}, year = {2018}, } @article{12604, abstract = {Glaciers in the high mountains of Asia provide an important water resource for millions of people. Many of these glaciers are partially covered by rocky debris, which protects the ice from solar radiation and warm air. However, studies have found that the surface of these debris-covered glaciers is actually lowering as fast as glaciers without debris. Water ponded on the surface of the glaciers may be partially responsible, as water can absorb atmospheric energy very efficiently. However, the overall effect of these ponds has not been thoroughly assessed yet. We study a valley in Nepal for which we have extensive weather measurements, and we use a numerical model to calculate the energy absorbed by ponds on the surface of the glaciers over 6 months. As we have not observed each individual pond thoroughly, we run the model 5,000 times with different setups. We find that ponds are extremely important for glacier melt and absorb energy 14 times as quickly as the debris-covered ice. Although the ponds account for 1% of the glacier area covered by rocks, and only 0.3% of the total glacier area, they absorb enough energy to account for one eighth of the whole valley's ice loss.}, author = {Miles, Evan S. and Willis, Ian and Buri, Pascal and Steiner, Jakob F. and Arnold, Neil S. and Pellicciotti, Francesca}, issn = {1944-8007}, journal = {Geophysical Research Letters}, keywords = {General Earth and Planetary Sciences, Geophysics}, number = {19}, pages = {10464--10473}, publisher = {American Geophysical Union}, title = {{Surface pond energy absorption across four Himalayan Glaciers accounts for 1/8 of total catchment ice loss}}, doi = {10.1029/2018gl079678}, volume = {45}, year = {2018}, } @article{12607, abstract = {Supraglacial ice cliffs exist on debris-covered glaciers worldwide, but despite their importance as melt hot spots, their life cycle is little understood. Early field observations had advanced a hypothesis of survival of north-facing and disappearance of south-facing cliffs, which is central for predicting the contribution of cliffs to total glacier mass losses. Their role as windows of energy transfer suggests they may explain the anomalously high mass losses of debris-covered glaciers in High Mountain Asia (HMA) despite the insulating debris, currently at the center of a debated controversy. We use a 3D model of cliff evolution coupled to very high-resolution topographic data to demonstrate that ice cliffs facing south (in the Northern Hemisphere) disappear within a few months due to enhanced solar radiation receipts and that aspect is the key control on cliffs evolution. We reproduce continuous flattening of south-facing cliffs, a result of their vertical gradient of incoming solar radiation and sky view factor. Our results establish that only north-facing cliffs are recurrent features and thus stable contributors to the melting of debris-covered glaciers. Satellite observations and mass balance modeling confirms that few south-facing cliffs of small size exist on the glaciers of Langtang, and their contribution to the glacier volume losses is very small (∼1%). This has major implications for the mass balance of HMA debris-covered glaciers as it provides the basis for new parameterizations of cliff evolution and distribution to constrain volume losses in a region where glaciers are highly relevant as water sources for millions of people.}, author = {Buri, Pascal and Pellicciotti, Francesca}, issn = {1091-6490}, journal = {PNAS}, number = {17}, pages = {4369--4374}, publisher = {Proceedings of the National Academy of Sciences}, title = {{Aspect controls the survival of ice cliffs on debris-covered glaciers}}, doi = {10.1073/pnas.1713892115}, volume = {115}, year = {2018}, }