@article{996,
abstract = {Iodine (I 2 ) molecules embedded in He nanodroplets are aligned by a 160 ps long laser pulse. The highest degree of alignment, occurring at the peak of the pulse and quantified by ⟨cos 2 θ 2D ⟩ , is measured as a function of the laser intensity. The results are well described by ⟨cos 2 θ 2D ⟩ calculated for a gas of isolated molecules each with an effective rotational constant of 0.6 times the gas-phase value, and at a temperature of 0.4 K. Theoretical analysis using the angulon quasiparticle to describe rotating molecules in superfluid helium rationalizes why the alignment mechanism is similar to that of isolated molecules with an effective rotational constant. A major advantage of molecules in He droplets is that their 0.4 K temperature leads to stronger alignment than what can generally be achieved for gas phase molecules -- here demonstrated by a direct comparison of the droplet results to measurements on a ∼ 1 K supersonic beam of isolated molecules. This point is further illustrated for more complex system by measurements on 1,4-diiodobenzene and 1,4-dibromobenzene. For all three molecular species studied the highest values of ⟨cos 2 θ 2D ⟩ achieved in He droplets exceed 0.96. },
author = {Shepperson, Benjamin and Chatterley, Adam and Søndergaard, Anders and Christiansen, Lars and Lemeshko, Mikhail and Stapelfeldt, Henrik},
issn = {00219606},
journal = {The Journal of Chemical Physics},
number = {1},
publisher = {AIP},
title = {{Strongly aligned molecules inside helium droplets in the near-adiabatic regime}},
doi = {10.1063/1.4983703},
volume = {147},
year = {2017},
}
@article{997,
abstract = {Recently it was shown that molecules rotating in superfluid helium can be described in terms of the angulon quasiparticles (Phys. Rev. Lett. 118, 095301 (2017)). Here we demonstrate that in the experimentally realized regime the angulon can be seen as a point charge on a 2-sphere interacting with a gauge field of a non-abelian magnetic monopole. Unlike in several other settings, the gauge fields of the angulon problem emerge in the real coordinate space, as opposed to the momentum space or some effective parameter space. Furthermore, we find a topological transition associated with making the monopole abelian, which takes place in the vicinity of the previously reported angulon instabilities. These results pave the way for studying topological phenomena in experiments on molecules trapped in superfluid helium nanodroplets, as well as on other realizations of orbital impurity problems.},
author = {Yakaboylu, Enderalp and Deuchert, Andreas and Lemeshko, Mikhail},
issn = {00319007},
journal = {APS Physics, Physical Review Letters},
number = {23},
publisher = {American Physiological Society},
title = {{Emergence of non-abelian magnetic monopoles in a quantum impurity problem}},
doi = {10.1103/PhysRevLett.119.235301},
volume = {119},
year = {2017},
}
@inproceedings{998,
abstract = {A major open problem on the road to artificial intelligence is the development of incrementally learning systems that learn about more and more concepts over time from a stream of data. In this work, we introduce a new training strategy, iCaRL, that allows learning in such a class-incremental way: only the training data for a small number of classes has to be present at the same time and new classes can be added progressively. iCaRL learns strong classifiers and a data representation simultaneously. This distinguishes it from earlier works that were fundamentally limited to fixed data representations and therefore incompatible with deep learning architectures. We show by experiments on CIFAR-100 and ImageNet ILSVRC 2012 data that iCaRL can learn many classes incrementally over a long period of time where other strategies quickly fail. },
author = {Rebuffi, Sylvestre Alvise and Kolesnikov, Alexander and Sperl, Georg and Lampert, Christoph},
isbn = {978-153860457-1},
location = {Honolulu, HA, United States},
pages = {5533 -- 5542},
publisher = {IEEE},
title = {{iCaRL: Incremental classifier and representation learning}},
doi = {10.1109/CVPR.2017.587},
volume = {2017},
year = {2017},
}
@inproceedings{999,
abstract = {In multi-task learning, a learner is given a collection of prediction tasks and needs to solve all of them. In contrast to previous work, which required that annotated training data must be available for all tasks, we consider a new setting, in which for some tasks, potentially most of them, only unlabeled training data is provided. Consequently, to solve all tasks, information must be transferred between tasks with labels and tasks without labels. Focusing on an instance-based transfer method we analyze two variants of this setting: when the set of labeled tasks is fixed, and when it can be actively selected by the learner. We state and prove a generalization bound that covers both scenarios and derive from it an algorithm for making the choice of labeled tasks (in the active case) and for transferring information between the tasks in a principled way. We also illustrate the effectiveness of the algorithm on synthetic and real data. },
author = {Pentina, Anastasia and Lampert, Christoph},
issn = {978-151085514-4},
location = {Sydney, Australia},
pages = {2807 -- 2816},
publisher = {Omnipress},
title = {{Multi-task learning with labeled and unlabeled tasks}},
volume = {70},
year = {2017},
}
@article{392,
abstract = {We used femtosecond optical pump-probe spectroscopy to study the photoinduced change in reflectivity of thin films of the electron-doped cuprate La2-xCexCuO4 (LCCO) with dopings of x=0.08 (underdoped) and x=0.11 (optimally doped). Above Tc, we observe fluence-dependent relaxation rates that begin at a temperature similar to the one where transport measurements first show signatures of antiferromagnetic correlations. Upon suppressing superconductivity with a magnetic field, it is found that the fluence and temperature dependence of relaxation rates are consistent with bimolecular recombination of electrons and holes across a gap (2ΔAF) originating from antiferromagnetic correlations which comprise the pseudogap in electron-doped cuprates. This can be used to learn about coupling between electrons and high-energy (ω>2ΔAF) excitations in these compounds and set limits on the time scales on which antiferromagnetic correlations are static.},
author = {Vishik, Inna and Mahmood, Fahad and Alpichshev, Zhanybek and Gedik, Nuh and Higgins, Joshu and Greene, Richard},
journal = {Physical Review B},
number = {11},
publisher = {American Physical Society},
title = {{Ultrafast dynamics in the presence of antiferromagnetic correlations in electron doped cuprate La2 xCexCuO4±δ}},
doi = {10.1103/PhysRevB.95.115125},
volume = {95},
year = {2017},
}
@article{393,
abstract = {We use a three-pulse ultrafast optical spectroscopy to study the relaxation processes in a frustrated Mott insulator Na2IrO3. By being able to independently produce the out-of-equilibrium bound states (excitons) of doublons and holons with the first pulse and suppress the underlying antiferromagnetic order with the second one, we were able to elucidate the relaxation mechanism of quasiparticles in this system. By observing the difference in the exciton dynamics in the magnetically ordered and disordered phases we found that the mass of this quasiparticle is mostly determined by its interaction with the surrounding spins. },
author = {Alpichshev, Zhanybek and Sie, Edbert and Mahmood, Fahad and Cao, Gang and Gedik, Nuh},
journal = {Physical Review B},
number = {23},
publisher = {American Physical Society},
title = {{Origin of the exciton mass in the frustrated Mott insulator Na2IrO3}},
doi = {10.1103/PhysRevB.96.235141},
volume = {96},
year = {2017},
}
@inbook{424,
abstract = {We show that very weak topological assumptions are enough to ensure the existence of a Helly-type theorem. More precisely, we show that for any non-negative integers b and d there exists an integer h(b, d) such that the following holds. If F is a finite family of subsets of Rd such that βi(∩G)≤b for any G⊊F and every 0 ≤ i ≤ [d/2]-1 then F has Helly number at most h(b, d). Here βi denotes the reduced Z2-Betti numbers (with singular homology). These topological conditions are sharp: not controlling any of these [d/2] first Betti numbers allow for families with unbounded Helly number. Our proofs combine homological non-embeddability results with a Ramsey-based approach to build, given an arbitrary simplicial complex K, some well-behaved chain map C*(K)→C*(Rd).},
author = {Goaoc, Xavier and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli},
booktitle = {A Journey through Discrete Mathematics: A Tribute to Jiri Matousek},
editor = {Loebl, Martin and Nešetřil, Jaroslav and Thomas, Robin},
isbn = {978-331944479-6},
pages = {407 -- 447},
publisher = {Springer},
title = {{Bounding helly numbers via betti numbers}},
doi = {10.1007/978-3-319-44479-6_17},
year = {2017},
}
@inproceedings{431,
abstract = {Parallel implementations of stochastic gradient descent (SGD) have received significant research attention, thanks to its excellent scalability properties. A fundamental barrier when parallelizing SGD is the high bandwidth cost of communicating gradient updates between nodes; consequently, several lossy compresion heuristics have been proposed, by which nodes only communicate quantized gradients. Although effective in practice, these heuristics do not always converge. In this paper, we propose Quantized SGD (QSGD), a family of compression schemes with convergence guarantees and good practical performance. QSGD allows the user to smoothly trade off communication bandwidth and convergence time: nodes can adjust the number of bits sent per iteration, at the cost of possibly higher variance. We show that this trade-off is inherent, in the sense that improving it past some threshold would violate information-theoretic lower bounds. QSGD guarantees convergence for convex and non-convex objectives, under asynchrony, and can be extended to stochastic variance-reduced techniques. When applied to training deep neural networks for image classification and automated speech recognition, QSGD leads to significant reductions in end-to-end training time. For instance, on 16GPUs, we can train the ResNet-152 network to full accuracy on ImageNet 1.8 × faster than the full-precision variant. },
author = {Alistarh, Dan-Adrian and Grubic, Demjan and Li, Jerry and Tomioka, Ryota and Vojnović, Milan},
issn = {10495258},
location = {Long Beach, CA, United States},
pages = {1710--1721},
publisher = {Neural Information Processing Systems Foundation, Inc.},
title = {{QSGD: Communication-efficient SGD via gradient quantization and encoding}},
volume = {2017},
year = {2017},
}
@inproceedings{432,
abstract = {Recently there has been significant interest in training machine-learning models at low precision: by reducing precision, one can reduce computation and communication by one order of magnitude. We examine training at reduced precision, both from a theoretical and practical perspective, and ask: is it possible to train models at end-to-end low precision with provable guarantees? Can this lead to consistent order-of-magnitude speedups? We mainly focus on linear models, and the answer is yes for linear models. We develop a simple framework called ZipML based on one simple but novel strategy called double sampling. Our ZipML framework is able to execute training at low precision with no bias, guaranteeing convergence, whereas naive quanti- zation would introduce significant bias. We val- idate our framework across a range of applica- tions, and show that it enables an FPGA proto- type that is up to 6.5 × faster than an implemen- tation using full 32-bit precision. We further de- velop a variance-optimal stochastic quantization strategy and show that it can make a significant difference in a variety of settings. When applied to linear models together with double sampling, we save up to another 1.7 × in data movement compared with uniform quantization. When training deep networks with quantized models, we achieve higher accuracy than the state-of-the- art XNOR-Net. },
author = {Zhang, Hantian and Li, Jerry and Kara, Kaan and Alistarh, Dan-Adrian and Liu, Ji and Zhang, Ce},
booktitle = {Proceedings of Machine Learning Research},
isbn = {978-151085514-4},
location = {Sydney, Australia},
pages = {4035 -- 4043},
publisher = { PMLR},
title = {{ZipML: Training linear models with end-to-end low precision, and a little bit of deep learning}},
volume = { 70},
year = {2017},
}
@article{443,
abstract = {Pancreatic cancer has a five-year survival rate of ~8%, with characteristic molecular heterogeneity and restricted treatment options. Targeting metabolism has emerged as a potentially effective therapeutic strategy for cancers such as pancreatic cancer, which are driven by genetic alterations that are not tractable drug targets. Although somatic mitochondrial genome (mtDNA) mutations have been observed in various tumors types, understanding of metabolic genotype-phenotype relationships is limited.},
author = {Hardie, Rae and Van Dam, Ellen and Cowley, Mark and Han, Ting and Balaban, Seher and Pajic, Marina and Pinese, Mark and Iconomou, Mary and Shearer, Robert and Mckenna, Jessie and Miller, David and Waddell, Nicola and Pearson, John and Grimmond, Sean and Sazanov, Leonid A and Biankin, Andrew and Villas Boas, Silas and Hoy, Andrew and Turner, Nigel and Saunders, Darren},
journal = {Cancer & Metabolism},
number = {2},
publisher = {BioMed Central},
title = {{Mitochondrial mutations and metabolic adaptation in pancreatic cancer}},
doi = {10.1186/s40170-017-0164-1},
volume = {5},
year = {2017},
}