@article{9530,
abstract = {Background
DNA methylation of active genes, also known as gene body methylation, is found in many animal and plant genomes. Despite this, the transcriptional and developmental role of such methylation remains poorly understood. Here, we explore the dynamic range of DNA methylation in honey bee, a model organism for gene body methylation.
Results
Our data show that CG methylation in gene bodies globally fluctuates during honey bee development. However, these changes cause no gene expression alterations. Intriguingly, despite the global alterations, tissue-specific CG methylation patterns of complete genes or exons are rare, implying robust maintenance of genic methylation during development. Additionally, we show that CG methylation maintenance fluctuates in somatic cells, while reaching maximum fidelity in sperm cells. Finally, unlike universally present CG methylation, we discovered non-CG methylation specifically in bee heads that resembles such methylation in mammalian brain tissue.
Conclusions
Based on these results, we propose that gene body CG methylation can oscillate during development if it is kept to a level adequate to preserve function. Additionally, our data suggest that heightened non-CG methylation is a conserved regulator of animal nervous systems.},
author = {Harris, Keith D. and Lloyd, James P. B. and Domb, Katherine and ZILBERMAN, Daniel and Zemach, Assaf},
issn = {1756-8935},
journal = {Epigenetics and Chromatin},
publisher = {Springer Nature},
title = {{DNA methylation is maintained with high fidelity in the honey bee germline and exhibits global non-functional fluctuations during somatic development}},
doi = {10.1186/s13072-019-0307-4},
volume = {12},
year = {2019},
}
@inproceedings{7479,
abstract = {Multi-exit architectures, in which a stack of processing layers is interleaved with early output layers, allow the processing of a test example to stop early and thus save computation time and/or energy. In this work, we propose a new training procedure for multi-exit architectures based on the principle of knowledge distillation. The method encourage searly exits to mimic later, more accurate exits, by matching their output probabilities.
Experiments on CIFAR100 and ImageNet show that distillation-based training significantly improves the accuracy of early exits while maintaining state-of-the-art accuracy for late ones. The method is particularly beneficial when training data is limited and it allows a straightforward extension to semi-supervised learning,i.e. making use of unlabeled data at training time. Moreover, it takes only afew lines to implement and incurs almost no computational overhead at training time, and none at all at test time.},
author = {Bui Thi Mai, Phuong and Lampert, Christoph},
booktitle = {IEEE International Conference on Computer Vision},
isbn = {9781728148038},
issn = {15505499},
location = {Seoul, Korea},
pages = {1355--1364},
publisher = {IEEE},
title = {{Distillation-based training for multi-exit architectures}},
doi = {10.1109/ICCV.2019.00144},
volume = {2019-October},
year = {2019},
}
@article{9580,
abstract = {An r-cut of a k-uniform hypergraph H is a partition of the vertex set of H into r parts and the size of the cut is the number of edges which have a vertex in each part. A classical result of Edwards says that every m-edge graph has a 2-cut of size m/2+Ω)(m−−√) and this is best possible. That is, there exist cuts which exceed the expected size of a random cut by some multiple of the standard deviation. We study analogues of this and related results in hypergraphs. First, we observe that similarly to graphs, every m-edge k-uniform hypergraph has an r-cut whose size is Ω(m−−√) larger than the expected size of a random r-cut. Moreover, in the case where k = 3 and r = 2 this bound is best possible and is attained by Steiner triple systems. Surprisingly, for all other cases (that is, if k ≥ 4 or r ≥ 3), we show that every m-edge k-uniform hypergraph has an r-cut whose size is Ω(m5/9) larger than the expected size of a random r-cut. This is a significant difference in behaviour, since the amount by which the size of the largest cut exceeds the expected size of a random cut is now considerably larger than the standard deviation.},
author = {Conlon, David and Fox, Jacob and Kwan, Matthew Alan and Sudakov, Benny},
issn = {1565-8511},
journal = {Israel Journal of Mathematics},
number = {1},
pages = {67--111},
publisher = {Springer},
title = {{Hypergraph cuts above the average}},
doi = {10.1007/s11856-019-1897-z},
volume = {233},
year = {2019},
}
@article{9586,
abstract = {Consider integers 𝑘,ℓ such that 0⩽ℓ⩽(𝑘2) . Given a large graph 𝐺 , what is the fraction of 𝑘 -vertex subsets of 𝐺 which span exactly ℓ edges? When 𝐺 is empty or complete, and ℓ is zero or (𝑘2) , this fraction can be exactly 1. On the other hand, if ℓ is far from these extreme values, one might expect that this fraction is substantially smaller than 1. This was recently proved by Alon, Hefetz, Krivelevich, and Tyomkyn who initiated the systematic study of this question and proposed several natural conjectures.
Let ℓ∗=min{ℓ,(𝑘2)−ℓ} . Our main result is that for any 𝑘 and ℓ , the fraction of 𝑘 -vertex subsets that span ℓ edges is at most log𝑂(1)(ℓ∗/𝑘)√ 𝑘/ℓ∗, which is best-possible up to the logarithmic factor. This improves on multiple results of Alon, Hefetz, Krivelevich, and Tyomkyn, and resolves one of their conjectures. In addition, we also make some first steps towards some analogous questions for hypergraphs.
Our proofs involve some Ramsey-type arguments, and a number of different probabilistic tools, such as polynomial anticoncentration inequalities, hypercontractivity, and a coupling trick for random variables defined on a ‘slice’ of the Boolean hypercube.},
author = {Kwan, Matthew Alan and Sudakov, Benny and Tran, Tuan},
issn = {1469-7750},
journal = {Journal of the London Mathematical Society},
number = {3},
pages = {757--777},
publisher = {Wiley},
title = {{Anticoncentration for subgraph statistics}},
doi = {10.1112/jlms.12192},
volume = {99},
year = {2019},
}
@article{9585,
abstract = {An n-vertex graph is called C-Ramsey if it has no clique or independent set of size C log n. All known constructions of Ramsey graphs involve randomness in an essential way, and there is an ongoing line of research towards showing that in fact all Ramsey graphs must obey certain “richness” properties characteristic of random graphs. More than 25 years ago, Erdős, Faudree and Sós conjectured that in any C-Ramsey graph there are Ω(n^5/2) induced subgraphs, no pair of which have the same numbers of vertices and edges. Improving on earlier results of Alon, Balogh, Kostochka and Samotij, in this paper we prove this conjecture.},
author = {Kwan, Matthew Alan and Sudakov, Benny},
issn = {1088-6850},
journal = {Transactions of the American Mathematical Society},
number = {8},
pages = {5571--5594},
publisher = {American Mathematical Society},
title = {{Proof of a conjecture on induced subgraphs of Ramsey graphs}},
doi = {10.1090/tran/7729},
volume = {372},
year = {2019},
}
@article{6052,
abstract = {Expansion microscopy is a relatively new approach to super-resolution imaging that uses expandable hydrogels to isotropically increase the physical distance between fluorophores in biological samples such as cell cultures or tissue slices. The classic gel recipe results in an expansion factor of ~4×, with a resolution of 60–80 nm. We have recently developed X10 microscopy, which uses a gel that achieves an expansion factor of ~10×, with a resolution of ~25 nm. Here, we provide a step-by-step protocol for X10 expansion microscopy. A typical experiment consists of seven sequential stages: (i) immunostaining, (ii) anchoring, (iii) polymerization, (iv) homogenization, (v) expansion, (vi) imaging, and (vii) validation. The protocol presented here includes recommendations for optimization, pitfalls and their solutions, and detailed guidelines that should increase reproducibility. Although our protocol focuses on X10 expansion microscopy, we detail which of these suggestions are also applicable to classic fourfold expansion microscopy. We exemplify our protocol using primary hippocampal neurons from rats, but our approach can be used with other primary cells or cultured cell lines of interest. This protocol will enable any researcher with basic experience in immunostainings and access to an epifluorescence microscope to perform super-resolution microscopy with X10. The procedure takes 3 d and requires ~5 h of actively handling the sample for labeling and expansion, and another ~3 h for imaging and analysis.},
author = {Truckenbrodt, Sven M and Sommer, Christoph M and Rizzoli, Silvio O and Danzl, Johann G},
journal = {Nature Protocols},
number = {3},
pages = {832–863},
publisher = {Nature Publishing Group},
title = {{A practical guide to optimization in X10 expansion microscopy}},
doi = {10.1038/s41596-018-0117-3},
volume = {14},
year = {2019},
}
@inproceedings{6989,
abstract = {When can a polyomino piece of paper be folded into a unit cube? Prior work studied tree-like polyominoes, but polyominoes with holes remain an intriguing open problem. We present sufficient conditions for a polyomino with hole(s) to fold into a cube, and conditions under which cube folding is impossible. In particular, we show that all but five special simple holes guarantee foldability. },
author = {Aichholzer, Oswin and Akitaya, Hugo A and Cheung, Kenneth C and Demaine, Erik D and Demaine, Martin L and Fekete, Sandor P and Kleist, Linda and Kostitsyna, Irina and Löffler, Maarten and Masárová, Zuzana and Mundilova, Klara and Schmidt, Christiane},
booktitle = {Proceedings of the 31st Canadian Conference on Computational Geometry},
location = {Edmonton, Canada},
pages = {164--170},
publisher = {Canadian Conference on Computational Geometry},
title = {{Folding polyominoes with holes into a cube}},
year = {2019},
}
@article{9677,
abstract = {Progress in the atomic-scale modeling of matter over the past decade has been tremendous. This progress has been brought about by improvements in methods for evaluating interatomic forces that work by either solving the electronic structure problem explicitly, or by computing accurate approximations of the solution and by the development of techniques that use the Born–Oppenheimer (BO) forces to move the atoms on the BO potential energy surface. As a consequence of these developments it is now possible to identify stable or metastable states, to sample configurations consistent with the appropriate thermodynamic ensemble, and to estimate the kinetics of reactions and phase transitions. All too often, however, progress is slowed down by the bottleneck associated with implementing new optimization algorithms and/or sampling techniques into the many existing electronic-structure and empirical-potential codes. To address this problem, we are thus releasing a new version of the i-PI software. This piece of software is an easily extensible framework for implementing advanced atomistic simulation techniques using interatomic potentials and forces calculated by an external driver code. While the original version of the code (Ceriotti et al., 2014) was developed with a focus on path integral molecular dynamics techniques, this second release of i-PI not only includes several new advanced path integral methods, but also offers other classes of algorithms. In other words, i-PI is moving towards becoming a universal force engine that is both modular and tightly coupled to the driver codes that evaluate the potential energy surface and its derivatives.},
author = {Kapil, Venkat and Rossi, Mariana and Marsalek, Ondrej and Petraglia, Riccardo and Litman, Yair and Spura, Thomas and Cheng, Bingqing and Cuzzocrea, Alice and Meißner, Robert H. and Wilkins, David M. and Helfrecht, Benjamin A. and Juda, Przemysław and Bienvenue, Sébastien P. and Fang, Wei and Kessler, Jan and Poltavsky, Igor and Vandenbrande, Steven and Wieme, Jelle and Corminboeuf, Clemence and Kühne, Thomas D. and Manolopoulos, David E. and Markland, Thomas E. and Richardson, Jeremy O. and Tkatchenko, Alexandre and Tribello, Gareth A. and Van Speybroeck, Veronique and Ceriotti, Michele},
issn = {0010-4655},
journal = {Computer Physics Communications},
pages = {214--223},
publisher = {Elsevier},
title = {{i-PI 2.0: A universal force engine for advanced molecular simulations}},
doi = {10.1016/j.cpc.2018.09.020},
volume = {236},
year = {2019},
}
@article{9680,
abstract = {Atomistic modeling of phase transitions, chemical reactions, or other rare events that involve overcoming high free energy barriers usually entails prohibitively long simulation times. Introducing a bias potential as a function of an appropriately chosen set of collective variables can significantly accelerate the exploration of phase space, albeit at the price of distorting the distribution of microstates. Efficient reweighting to recover the unbiased distribution can be nontrivial when employing adaptive sampling techniques such as metadynamics, variationally enhanced sampling, or parallel bias metadynamics, in which the system evolves in a quasi-equilibrium manner under a time-dependent bias. We introduce an iterative unbiasing scheme that makes efficient use of all the trajectory data and that does not require the distribution to be evaluated on a grid. The method can thus be used even when the bias has a high dimensionality. We benchmark this approach against some of the existing schemes on model systems with different complexity and dimensionality.},
author = {Giberti, F. and Cheng, Bingqing and Tribello, G. A. and Ceriotti, M.},
issn = {1549-9626},
journal = {Journal of Chemical Theory and Computation},
number = {1},
pages = {100--107},
publisher = {American Chemical Society},
title = {{Iterative unbiasing of quasi-equilibrium sampling}},
doi = {10.1021/acs.jctc.9b00907},
volume = {16},
year = {2019},
}
@article{9689,
abstract = {A central goal of computational physics and chemistry is to predict material properties by using first-principles methods based on the fundamental laws of quantum mechanics. However, the high computational costs of these methods typically prevent rigorous predictions of macroscopic quantities at finite temperatures, such as heat capacity, density, and chemical potential. Here, we enable such predictions by marrying advanced free-energy methods with data-driven machine-learning interatomic potentials. We show that, for the ubiquitous and technologically essential system of water, a first-principles thermodynamic description not only leads to excellent agreement with experiments, but also reveals the crucial role of nuclear quantum fluctuations in modulating the thermodynamic stabilities of different phases of water.},
author = {Cheng, Bingqing and Engel, Edgar A. and Behler, Jörg and Dellago, Christoph and Ceriotti, Michele},
issn = {1091-6490},
journal = {Proceedings of the National Academy of Sciences},
number = {4},
pages = {1110--1115},
publisher = {National Academy of Sciences},
title = {{Ab initio thermodynamics of liquid and solid water}},
doi = {10.1073/pnas.1815117116},
volume = {116},
year = {2019},
}