@inproceedings{10748, abstract = {The study of fluxoid states and fluxoid dynamics in mesoscopic iron-based superconducting rings is valuable for characterizing the basic properties of the superconductor, and may also provide important insight into the superconducting paring symmetry. We report the fabrications of micron-sized rings and disks from thin films of Fe(Se, Te) grown by molecular beam epitaxy. In order to study fluxoid states in rings we developed a custom-tailored version of magnetic force microscopy (MFM). This technique has a number of qualitative advantages for working with mesoscopic superconducting samples in comparison to the conventional MFM and other imaging techniques. We observed metastable fluxoid states in rings of different sizes. Thermally activated fluxoid dynamics of these states was studied and modeled. In addition, we found different regimes of interaction between Fe(Se, Te) ring and MFM tip which are explained. Possibilities of the existence of exotic vortex states and proposals for experiments to test the symmetry of the superconducting order parameter in iron based superconductors are analyzed.}, author = {Polshyn, Hryhoriy and Zhang, Can and Naibert, Tyler and Eckstein, James and Budakian, Raffi}, booktitle = {APS March Meeting 2015}, issn = {0003-0503}, location = {San Antonio, TX, United States}, number = {1}, publisher = {American Physical Society}, title = {{Study of Fe (Se, Te) micron-sized rings by magnetic force microscopy}}, volume = {60}, year = {2015}, } @article{10794, abstract = {Mathematical models are of fundamental importance in the understanding of complex population dynamics. For instance, they can be used to predict the population evolution starting from different initial conditions or to test how a system responds to external perturbations. For this analysis to be meaningful in real applications, however, it is of paramount importance to choose an appropriate model structure and to infer the model parameters from measured data. While many parameter inference methods are available for models based on deterministic ordinary differential equations, the same does not hold for more detailed individual-based models. Here we consider, in particular, stochastic models in which the time evolution of the species abundances is described by a continuous-time Markov chain. These models are governed by a master equation that is typically difficult to solve. Consequently, traditional inference methods that rely on iterative evaluation of parameter likelihoods are computationally intractable. The aim of this paper is to present recent advances in parameter inference for continuous-time Markov chain models, based on a moment closure approximation of the parameter likelihood, and to investigate how these results can help in understanding, and ultimately controlling, complex systems in ecology. Specifically, we illustrate through an agricultural pest case study how parameters of a stochastic individual-based model can be identified from measured data and how the resulting model can be used to solve an optimal control problem in a stochastic setting. In particular, we show how the matter of determining the optimal combination of two different pest control methods can be formulated as a chance constrained optimization problem where the control action is modeled as a state reset, leading to a hybrid system formulation.}, author = {Parise, Francesca and Lygeros, John and Ruess, Jakob}, issn = {2296-665X}, journal = {Frontiers in Environmental Science}, keywords = {General Environmental Science}, publisher = {Frontiers}, title = {{Bayesian inference for stochastic individual-based models of ecological systems: a pest control simulation study}}, doi = {10.3389/fenvs.2015.00042}, volume = {3}, year = {2015}, } @inproceedings{10796, abstract = {We consider concurrent mean-payoff games, a very well-studied class of two-player (player 1 vs player 2) zero-sum games on finite-state graphs where every transition is assigned a reward between 0 and 1, and the payoff function is the long-run average of the rewards. The value is the maximal expected payoff that player 1 can guarantee against all strategies of player 2. We consider the computation of the set of states with value 1 under finite-memory strategies for player 1, and our main results for the problem are as follows: (1) we present a polynomial-time algorithm; (2) we show that whenever there is a finite-memory strategy, there is a stationary strategy that does not need memory at all; and (3) we present an optimal bound (which is double exponential) on the patience of stationary strategies (where patience of a distribution is the inverse of the smallest positive probability and represents a complexity measure of a stationary strategy).}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus}, booktitle = {Proceedings of the Twenty-Sixth Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {978-161197374-7}, location = {San Diego, CA, United States}, number = {1}, pages = {1018--1029}, publisher = {SIAM}, title = {{The value 1 problem under finite-memory strategies for concurrent mean-payoff games}}, doi = {10.1137/1.9781611973730.69}, volume = {2015}, year = {2015}, } @article{1106, abstract = {Circumferential skin creases Kunze type (CSC-KT) is a specific congenital entity with an unknown genetic cause. The disease phenotype comprises characteristic circumferential skin creases accompanied by intellectual disability, a cleft palate, short stature, and dysmorphic features. Here, we report that mutations in either MAPRE2 or TUBB underlie the genetic origin of this syndrome. MAPRE2 encodes a member of the microtubule end-binding family of proteins that bind to the guanosine triphosphate cap at growing microtubule plus ends, and TUBB encodes a β-tubulin isotype that is expressed abundantly in the developing brain. Functional analyses of the TUBB mutants show multiple defects in the chaperone-dependent tubulin heterodimer folding and assembly pathway that leads to a compromised yield of native heterodimers. The TUBB mutations also have an impact on microtubule dynamics. For MAPRE2, we show that the mutations result in enhanced MAPRE2 binding to microtubules, implying an increased dwell time at microtubule plus ends. Further, in vivo analysis of MAPRE2 mutations in a zebrafish model of craniofacial development shows that the variants most likely perturb the patterning of branchial arches, either through excessive activity (under a recessive paradigm) or through haploinsufficiency (dominant de novo paradigm). Taken together, our data add CSC-KT to the growing list of tubulinopathies and highlight how multiple inheritance paradigms can affect dosage-sensitive biological systems so as to result in the same clinical defect.}, author = {Isrie, Mala and Breuss, Martin and Tian, Guoling and Hansen, Andi H and Cristofoli, Francesca and Morandell, Jasmin and Kupchinsky, Zachari A and Sifrim, Alejandro and Rodriguez Rodriguez, Celia and Dapena, Elena P and Doonanco, Kurston and Leonard, Norma and Tinsa, Faten and Moortgat, Stéphanie and Ulucan, Hakan and Koparir, Erkan and Karaca, Ender and Katsanis, Nicholas and Marton, Valeria and Vermeesch, Joris R and Davis, Erica E and Cowan, Nicholas J and Keays, David and Van Esch, Hilde}, journal = {The American Journal of Human Genetics}, number = {6}, pages = {790 -- 800}, publisher = {Cell Press}, title = {{Mutations in either TUBB or MAPRE2 cause circumferential skin creases Kunze type}}, doi = {10.1016/j.ajhg.2015.10.014}, volume = {97}, year = {2015}, } @article{11079, abstract = {Aging is a major risk factor for many human diseases, and in vitro generation of human neurons is an attractive approach for modeling aging-related brain disorders. However, modeling aging in differentiated human neurons has proved challenging. We generated neurons from human donors across a broad range of ages, either by iPSC-based reprogramming and differentiation or by direct conversion into induced neurons (iNs). While iPSCs and derived neurons did not retain aging-associated gene signatures, iNs displayed age-specific transcriptional profiles and revealed age-associated decreases in the nuclear transport receptor RanBP17. We detected an age-dependent loss of nucleocytoplasmic compartmentalization (NCC) in donor fibroblasts and corresponding iNs and found that reduced RanBP17 impaired NCC in young cells, while iPSC rejuvenation restored NCC in aged cells. These results show that iNs retain important aging-related signatures, thus allowing modeling of the aging process in vitro, and they identify impaired NCC as an important factor in human aging.}, author = {Mertens, Jerome and Paquola, Apuã C.M. and Ku, Manching and Hatch, Emily and Böhnke, Lena and Ladjevardi, Shauheen and McGrath, Sean and Campbell, Benjamin and Lee, Hyungjun and Herdy, Joseph R. and Gonçalves, J. Tiago and Toda, Tomohisa and Kim, Yongsung and Winkler, Jürgen and Yao, Jun and HETZER, Martin W and Gage, Fred H.}, issn = {1934-5909}, journal = {Cell Stem Cell}, keywords = {Cell Biology, Genetics, Molecular Medicine}, number = {6}, pages = {705--718}, publisher = {Elsevier}, title = {{Directly reprogrammed human neurons retain aging-associated transcriptomic signatures and reveal age-related nucleocytoplasmic defects}}, doi = {10.1016/j.stem.2015.09.001}, volume = {17}, year = {2015}, } @article{11077, abstract = {Nucleoporins (Nups) are a family of proteins best known as the constituent building blocks of nuclear pore complexes (NPCs), membrane-embedded channels that mediate nuclear transport across the nuclear envelope. Recent evidence suggests that several Nups have additional roles in controlling the activation and silencing of developmental genes; however, the mechanistic details of these functions remain poorly understood. Here, we show that depletion of Nup153 in mouse embryonic stem cells (mESCs) causes the derepression of developmental genes and induction of early differentiation. This loss of stem cell identity is not associated with defects in the nuclear import of key pluripotency factors. Rather, Nup153 binds around the transcriptional start site (TSS) of developmental genes and mediates the recruitment of the polycomb-repressive complex 1 (PRC1) to a subset of its target loci. Our results demonstrate a chromatin-associated role of Nup153 in maintaining stem cell pluripotency by functioning in mammalian epigenetic gene silencing.}, author = {Jacinto, Filipe V. and Benner, Chris and HETZER, Martin W}, issn = {1549-5477}, journal = {Genes & Development}, keywords = {Developmental Biology, Genetics}, number = {12}, pages = {1224--1238}, publisher = {Cold Spring Harbor Laboratory}, title = {{The nucleoporin Nup153 regulates embryonic stem cell pluripotency through gene silencing}}, doi = {10.1101/gad.260919.115}, volume = {29}, year = {2015}, } @article{11078, abstract = {Aging is associated with the decline of protein, cell, and organ function. Here, we use an integrated approach to characterize gene expression, bulk translation, and cell biology in the brains and livers of young and old rats. We identify 468 differences in protein abundance between young and old animals. The majority are a consequence of altered translation output, that is, the combined effect of changes in transcript abundance and translation efficiency. In addition, we identify 130 proteins whose overall abundance remains unchanged but whose sub-cellular localization, phosphorylation state, or splice-form varies. While some protein-level differences appear to be a generic property of the rats’ chronological age, the majority are specific to one organ. These may be a consequence of the organ’s physiology or the chronological age of the cells within the tissue. Taken together, our study provides an initial view of the proteome at the molecular, sub-cellular, and organ level in young and old rats.}, author = {Ori, Alessandro and Toyama, Brandon H. and Harris, Michael S. and Bock, Thomas and Iskar, Murat and Bork, Peer and Ingolia, Nicholas T. and HETZER, Martin W and Beck, Martin}, issn = {2405-4712}, journal = {Cell Systems}, keywords = {Cell Biology, Histology, Pathology and Forensic Medicine}, number = {3}, pages = {P224--237}, publisher = {Elsevier}, title = {{Integrated transcriptome and proteome analyses reveal organ-specific proteome deterioration in old rats}}, doi = {10.1016/j.cels.2015.08.012}, volume = {1}, year = {2015}, } @article{11075, abstract = {Previously, we identified the nucleoporin gp210/Nup210 as a critical regulator of muscle and neuronal differentiation, but how this nucleoporin exerts its function and whether it modulates nuclear pore complex (NPC) activity remain unknown. Here, we show that gp210/Nup210 mediates muscle cell differentiation in vitro via its conserved N-terminal domain that extends into the perinuclear space. Removal of the C-terminal domain, which partially mislocalizes gp210/Nup210 away from NPCs, efficiently rescues the differentiation defect caused by the knockdown of endogenous gp210/Nup210. Unexpectedly, a gp210/Nup210 mutant lacking the NPC-targeting transmembrane and C-terminal domains is sufficient for C2C12 myoblast differentiation. We demonstrate that the endoplasmic reticulum (ER) stress-specific caspase cascade is exacerbated during Nup210 depletion and that blocking ER stress-mediated apoptosis rescues differentiation of Nup210-deficient cells. Our results suggest that the role of gp210/Nup210 in cell differentiation is mediated by its large luminal domain, which can act independently of NPC association and appears to play a pivotal role in the maintenance of nuclear envelope/ER homeostasis.}, author = {Gomez-Cavazos, J. Sebastian and HETZER, Martin W}, issn = {1540-8140}, journal = {Journal of Cell Biology}, keywords = {Cell Biology}, number = {6}, pages = {671--681}, publisher = {Rockefeller University Press}, title = {{The nucleoporin gp210/Nup210 controls muscle differentiation by regulating nuclear envelope/ER homeostasis}}, doi = {10.1083/jcb.201410047}, volume = {208}, year = {2015}, } @article{11076, abstract = {Nuclear pore complexes (NPCs) are composed of several copies of ∼30 different proteins called nucleoporins (Nups). NPCs penetrate the nuclear envelope (NE) and regulate the nucleocytoplasmic trafficking of macromolecules. Beyond this vital role, NPC components influence genome functions in a transport-independent manner. Nups play an evolutionarily conserved role in gene expression regulation that, in metazoans, extends into the nuclear interior. Additionally, in proliferative cells, Nups play a crucial role in genome integrity maintenance and mitotic progression. Here we discuss genome-related functions of Nups and their impact on essential DNA metabolism processes such as transcription, chromosome duplication, and segregation.}, author = {Ibarra, Arkaitz and HETZER, Martin W}, issn = {1549-5477}, journal = {Genes & Development}, keywords = {Developmental Biology, Genetics}, number = {4}, pages = {337--349}, publisher = {Cold Spring Harbor Laboratory}, title = {{Nuclear pore proteins and the control of genome functions}}, doi = {10.1101/gad.256495.114}, volume = {29}, year = {2015}, } @article{11073, abstract = {Human cancer cells bear complex chromosome rearrangements that can be potential drivers of cancer development. However, the molecular mechanisms underlying these rearrangements have been unclear. Zhang et al. use a new technique combining live-cell imaging and single-cell sequencing to demonstrate that chromosomes mis-segregated to micronuclei frequently undergo chromothripsis-like rearrangements in the subsequent cell cycle.}, author = {Hatch, Emily M. and HETZER, Martin W}, issn = {0092-8674}, journal = {Cell}, keywords = {General Biochemistry, Genetics and Molecular Biology}, number = {7}, pages = {1502--1504}, publisher = {Elsevier}, title = {{Linking micronuclei to chromosome fragmentation}}, doi = {10.1016/j.cell.2015.06.005}, volume = {161}, year = {2015}, } @article{11074, author = {Hatch, Emily M. and HETZER, Martin W}, issn = {0960-9822}, journal = {Current Biology}, keywords = {General Agricultural and Biological Sciences, General Biochemistry, Genetics and Molecular Biology}, number = {10}, pages = {PR397--R399}, publisher = {Elsevier}, title = {{Chromothripsis}}, doi = {10.1016/j.cub.2015.02.033}, volume = {25}, year = {2015}, } @article{11519, abstract = {Faint Lyα emitters become increasingly rarer toward the reionization epoch (z ∼ 6–7). However, observations from a very large (∼5 deg2) Lyα narrow-band survey at z = 6.6 show that this is not the case for the most luminous emitters, capable of ionizing their own local bubbles. Here we present follow-up observations of the two most luminous Lyα candidates in the COSMOS field: “MASOSA” and “CR7.” We used X-SHOOTER, SINFONI, and FORS2 on the Very Large Telescope, and DEIMOS on Keck, to confirm both candidates beyond any doubt. We find redshifts of z = 6.541 and z = 6.604 for “MASOSA” and “CR7,” respectively. MASOSA has a strong detection in Lyα with a line width of 386 ± 30 km s−1 (FWHM) and with very high EW0 (>200 Å), but undetected in the continuum, implying very low stellar mass and a likely young, metal-poor stellar population. “CR7,” with an observed Lyα luminosity of 1043.92±0.05 erg s−1 is the most luminous Lyα emitter ever found at z > 6 and is spatially extended (∼16 kpc). “CR7” reveals a narrow Lyα line with 266 ± 15 km s−1 FWHM, being detected in the near-infrared (NIR) (rest-frame UV; β = −2.3 ± 0.1) and in IRAC/Spitzer. We detect a narrow He II 1640 Å emission line (6σ, FWHM = 130 ± 30 km s−1 ) in CR7 which can explain the clear excess seen in the J-band photometry (EW0 ∼ 80 Å). We find no other emission lines from the UV to the NIR in our X-SHOOTER spectra (He II/O III] 1663 Å > 3 and He II/C III] 1908 Å > 2.5). We conclude that CR7 is best explained by a combination of a PopIII-like population, which dominates the rest-frame UV and the nebular emission, and a more normal stellar population, which presumably dominates the mass. Hubble Space Telescope/WFC3 observations show that the light is indeed spatially separated between a very blue component, coincident with Lyα and He II emission, and two red components (∼5 kpc away), which dominate the mass. Our findings are consistent with theoretical predictions of a PopIII wave, with PopIII star formation migrating away from the original sites of star formation.}, author = {Sobral, David and Matthee, Jorryt J and Darvish, Behnam and Schaerer, Daniel and Mobasher, Bahram and Röttgering, Huub and Santos, Sérgio and Hemmati, Shoubaneh}, issn = {1538-4357}, journal = {The Astrophysical Journal}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, dark ages, reionization, first stars – early universe – galaxies: evolution}, number = {2}, pages = {139}, publisher = {IOP Publishing}, title = {{Evidence for PopIII-like stellar populations in the most luminous Lyα emitters at the epoch of reionisation: Spectroscopic confirmation}}, doi = {10.1088/0004-637X/808/2/139}, volume = {808}, year = {2015}, } @article{11580, abstract = {We present results from the largest contiguous narrow-band survey in the near-infrared. We have used the wide-field infrared camera/Canada–France–Hawaii Telescope and the lowOH2 filter (1.187 ± 0.005 μm) to survey ≈10 deg2 of contiguous extragalactic sky in the SA22 field. A total of ∼6000 candidate emission-line galaxies are found. We use deep ugrizJK data to obtain robust photometric redshifts. We combine our data with the High-redshift(Z) Emission Line Survey (HiZELS), explore spectroscopic surveys (VVDS, VIPERS) and obtain our own spectroscopic follow-up with KMOS, FMOS and MOSFIRE to derive large samples of high-redshift emission-line selected galaxies: 3471 Hα emitters at z = 0.8, 1343 [O III] + Hβ emitters at z = 1.4 and 572 [O II] emitters at z = 2.2. We probe comoving volumes of >106 Mpc3 and find significant overdensities, including an 8.5σ (spectroscopically confirmed) overdensity of Hα emitters at z = 0.81. We derive Hα, [O III] + Hβ and [O II] luminosity functions at z = 0.8, 1.4, 2.2, respectively, and present implications for future surveys such as Euclid. Our uniquely large volumes/areas allow us to subdivide the samples in thousands of randomized combinations of areas and provide a robust empirical measurement of sample/cosmic variance. We show that surveys for star-forming/emission-line galaxies at a depth similar to ours can only overcome cosmic-variance (errors <10 per cent) if they are based on volumes >5 × 105 Mpc3; errors on L* and ϕ* due to sample (cosmic) variance on surveys probing ∼104 and ∼105 Mpc3 are typically very high: ∼300 and ∼40–60 per cent, respectively.}, author = {Sobral, D. and Matthee, Jorryt J and Best, P. N. and Smail, I. and Khostovan, A. A. and Milvang-Jensen, B. and Kim, J.-W. and Stott, J. and Calhau, J. and Nayyeri, H. and Mobasher, B.}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: formation, galaxies: luminosity function, mass function, cosmology: observations, early Universe, large-scale structure of Universe}, number = {3}, pages = {2303--2323}, publisher = {Oxford University Press}, title = {{CF-HiZELS, an ∼10 deg2 emission-line survey with spectroscopic follow-up: Hα, [O III] + Hβ and [O II] luminosity functions at z = 0.8, 1.4 and 2.2 }}, doi = {10.1093/mnras/stv1076}, volume = {451}, year = {2015}, } @article{11581, abstract = {Using wide-field narrow-band surveys, we provide a new measurement of the z = 6.6 Lymanα emitter (LAE) luminosity function (LF), which constraints the bright end for the first time. We use a combination of archival narrow-band NB921 data in UDS and new NB921 measurements in SA22 and COSMOS/UltraVISTA, all observed with the Subaru telescope, with a total area of ∼5 deg2. We exclude lower redshift interlopers by using broad-band optical and near-infrared photometry and also exclude three supernovae with data split over multiple epochs. Combining the UDS and COSMOS samples, we find no evolution of the bright end of the Lyα LF between z = 5.7 and 6.6, which is supported by spectroscopic follow-up, and conclude that sources with Himiko-like luminosity are not as rare as previously thought, with number densities of ∼1.5 × 10−5 Mpc−3. Combined with our wide-field SA22 measurements, our results indicate a non-Schechter-like bright end of the LF at z = 6.6 and a different evolution of observed faint and bright LAEs, overcoming cosmic variance. This differential evolution is also seen in the spectroscopic follow-up of UV-selected galaxies and is now also confirmed for LAEs, and we argue that it may be an effect of reionization. Using a toy model, we show that such differential evolution of the LF is expected, since brighter sources are able to ionize their surroundings earlier, such that Lyα photons are able to escape. Our targets are excellent candidates for detailed follow-up studies and provide the possibility to give a unique view on the earliest stages in the formation of galaxies and reionization process.}, author = {Matthee, Jorryt J and Sobral, David and Santos, Sérgio and Röttgering, Huub and Darvish, Behnam and Mobasher, Bahram}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics}, number = {1}, pages = {400--417}, publisher = {Oxford University Press}, title = {{Identification of the brightest Lyα emitters at z = 6.6: implications for the evolution of the luminosity function in the reionization era}}, doi = {10.1093/mnras/stv947}, volume = {451}, year = {2015}, } @article{11579, abstract = {CR7 is the brightest z = 6.6 Ly α emitter (LAE) known to date, and spectroscopic follow-up by Sobral et al. suggests that CR7 might host Population (Pop) III stars. We examine this interpretation using cosmological hydrodynamical simulations. Several simulated galaxies show the same ‘Pop III wave’ pattern observed in CR7. However, to reproduce the extreme CR7 Ly α/He II1640 line luminosities (⁠Lα/HeII⁠) a top-heavy initial mass function and a massive ( ≳ 107 M⊙) Pop III burst with age ≲ 2 Myr are required. Assuming that the observed properties of Ly α and He II emission are typical for Pop III, we predict that in the COSMOS/UDS/SA22 fields, 14 out of the 30 LAEs at z = 6.6 with Lα > 1043.3 erg s−1 should also host Pop III stars producing an observable LHeII≳1042.7ergs−1⁠. As an alternate explanation, we explore the possibility that CR7 is instead powered by accretion on to a direct collapse black hole. Our model predicts Lα, LHeII⁠, and X-ray luminosities that are in agreement with the observations. In any case, the observed properties of CR7 indicate that this galaxy is most likely powered by sources formed from pristine gas. We propose that further X-ray observations can distinguish between the two above scenarios.}, author = {Pallottini, A. and Ferrara, A. and Pacucci, F. and Gallerani, S. and Salvadori, S. and Schneider, R. and Schaerer, D. and Sobral, D. and Matthee, Jorryt J}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, black hole physics, stars: Population III, galaxies: high-redshift}, number = {3}, pages = {2465--2470}, publisher = {Oxford University Press}, title = {{The brightest Lyα emitter: Pop III or black hole?}}, doi = {10.1093/mnras/stv1795}, volume = {453}, year = {2015}, } @article{11668, abstract = {We study multiple keyword sponsored search auctions with budgets. Each keyword has multiple ad slots with a click-through rate. The bidders have additive valuations, which are linear in the click-through rates, and budgets, which are restricting their overall payments. Additionally, the number of slots per keyword assigned to a bidder is bounded. We show the following results: (1) We give the first mechanism for multiple keywords, where click-through rates differ among slots. Our mechanism is incentive compatible in expectation, individually rational in expectation, and Pareto optimal. (2) We study the combinatorial setting, where each bidder is only interested in a subset of the keywords. We give an incentive compatible, individually rational, Pareto-optimal, and deterministic mechanism for identical click-through rates. (3) We give an impossibility result for incentive compatible, individually rational, Pareto-optimal, and deterministic mechanisms for bidders with diminishing marginal valuations.}, author = {Colini-Baldeschi, Riccardo and Leonardi, Stefano and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Algorithms, Economics, Clinching ascending auction, auctions with budgets, Sponsored search auctions}, number = {1}, publisher = {Association for Computing Machinery}, title = {{On multiple keyword sponsored search auctions with budgets}}, doi = {10.1145/2818357}, volume = {4}, year = {2015}, } @article{11669, abstract = {We study individual rational, Pareto-optimal, and incentive compatible mechanisms for auctions with heterogeneous items and budget limits. We consider settings with multiunit demand and additive valuations. For single-dimensional valuations we prove a positive result for randomized mechanisms, and a negative result for deterministic mechanisms. While the positive result allows for private budgets, the negative result is for public budgets. For multidimensional valuations and public budgets we prove an impossibility result that applies to deterministic and randomized mechanisms. Taken together this shows the power of randomization in certain settings with heterogeneous items, but it also shows its limitations.}, author = {Dütting, Paul and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Algorithmic game theory, auction theory, Clinching auction, Pareto optimality, Budget limits}, number = {1}, publisher = {Association for Computing Machinery}, title = {{Auctions for heterogeneous items and budget limits}}, doi = {10.1145/2818351}, volume = {4}, year = {2015}, } @article{11670, abstract = {Auctions are widely used on the Web. Applications range from sponsored search to platforms such as eBay. In these and in many other applications the auctions in use are single-/multi-item auctions with unit demand. The main drawback of standard mechanisms for this type of auctions, such as VCG and GSP, is the limited expressiveness that they offer to the bidders. The General Auction Mechanism (GAM) of Aggarwal et al. [2009] takes a first step toward addressing the problem of limited expressiveness by computing a bidder optimal, envy-free outcome for linear utility functions with identical slopes and a single discontinuity per bidder-item pair. We show that in many practical situations this does not suffice to adequately model the preferences of the bidders, and we overcome this problem by presenting the first mechanism for piecewise linear utility functions with nonidentical slopes and multiple discontinuities. Our mechanism runs in polynomial time. Like GAM it is incentive compatible for inputs that fulfill a certain nondegeneracy assumption, but our requirement is more general than the requirement of GAM. For discontinuous utility functions that are nondegenerate as well as for continuous utility functions the outcome of our mechanism is a competitive equilibrium. We also show how our mechanism can be used to compute approximately bidder optimal, envy-free outcomes for a general class of continuous utility functions via piecewise linear approximation. Finally, we prove hardness results for even more expressive settings.}, author = {Dütting, Paul and Henzinger, Monika H and Weber, Ingmar}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Computational Mathematics, Marketing, Economics and Econometrics, Statistics and Probability, Computer Science (miscellaneous)}, number = {1}, publisher = {Association for Computing Machinery}, title = {{An expressive mechanism for auctions on the web}}, doi = {10.1145/2716312}, volume = {4}, year = {2015}, } @inproceedings{11774, abstract = {Combinatorial auctions (CA) are a well-studied area in algorithmic mechanism design. However, contrary to the standard model, empirical studies suggest that a bidder’s valuation often does not depend solely on the goods assigned to him. For instance, in adwords auctions an advertiser might not want his ads to be displayed next to his competitors’ ads. In this paper, we propose and analyze several natural graph-theoretic models that incorporate such negative externalities, in which bidders form a directed conflict graph with maximum out-degree Δ. We design algorithms and truthful mechanisms for social welfare maximization that attain approximation ratios depending on Δ. For CA, our results are twofold: (1) A lottery that eliminates conflicts by discarding bidders/items independent of the bids. It allows to apply any truthful 𝛼-approximation mechanism for conflict-free valuations and yields an 𝒪(𝛼Δ)-approximation mechanism. (2) For fractionally sub-additive valuations, we design a rounding algorithm via a novel combination of a semi-definite program and a linear program, resulting in a cone program; the approximation ratio is 𝒪((ΔloglogΔ)/logΔ). The ratios are almost optimal given existing hardness results. For adwords auctions, we present several algorithms for the most relevant scenario when the number of items is small. In particular, we design a truthful mechanism with approximation ratio 𝑜(Δ) when the number of items is only logarithmic in the number of bidders.}, author = {Cheung, Yun Kuen and Henzinger, Monika H and Hoefer, Martin and Starnberger, Martin}, booktitle = {11th International Conference on Web and Internet Economics}, isbn = {9783662489949}, issn = {0302-9743}, location = {Amsterdam, Netherlands}, pages = {230–243}, publisher = {Springer Nature}, title = {{Combinatorial auctions with conflict-based externalities}}, doi = {10.1007/978-3-662-48995-6_17}, volume = {9470}, year = {2015}, } @inproceedings{11773, abstract = {Ad exchanges are an emerging platform for trading advertisement slots on the web with billions of dollars revenue per year. Every time a user visits a web page, the publisher of that web page can ask an ad exchange to auction off the ad slots on this page to determine which advertisements are shown at which price. Due to the high volume of traffic, ad networks typically act as mediators for individual advertisers at ad exchanges. If multiple advertisers in an ad network are interested in the ad slots of the same auction, the ad network might use a “local” auction to resell the obtained ad slots among its advertisers. In this work we want to deepen the theoretical understanding of these new markets by analyzing them from the viewpoint of combinatorial auctions. Prior work studied mostly single-item auctions, while we allow the advertisers to express richer preferences over multiple items. We develop a game-theoretic model for the entanglement of the central auction at the ad exchange with the local auctions at the ad networks. We consider the incentives of all three involved parties and suggest a three-party competitive equilibrium, an extension of the Walrasian equilibrium that ensures envy-freeness for all participants. We show the existence of a three-party competitive equilibrium and a polynomial-time algorithm to find one for gross-substitute bidder valuations.}, author = {Ben-Zwi, Oren and Henzinger, Monika H and Loitzenbauer, Veronika}, booktitle = {11th International Conference on Web and Internet Economics}, isbn = {9783662489949}, issn = {0302-9743}, location = {Amsterdam, Netherlands}, pages = {104–117}, publisher = {Springer Nature}, title = {{Ad exchange: Envy-free auctions with mediators}}, doi = {10.1007/978-3-662-48995-6_8}, volume = {9470}, year = {2015}, } @inproceedings{11785, abstract = {Recently we presented the first algorithm for maintaining the set of nodes reachable from a source node in a directed graph that is modified by edge deletions with 𝑜(𝑚𝑛) total update time, where 𝑚 is the number of edges and 𝑛 is the number of nodes in the graph [Henzinger et al. STOC 2014]. The algorithm is a combination of several different algorithms, each for a different 𝑚 vs. 𝑛 trade-off. For the case of 𝑚=Θ(𝑛1.5) the running time is 𝑂(𝑛2.47), just barely below 𝑚𝑛=Θ(𝑛2.5). In this paper we simplify the previous algorithm using new algorithmic ideas and achieve an improved running time of 𝑂̃ (min(𝑚7/6𝑛2/3,𝑚3/4𝑛5/4+𝑜(1),𝑚2/3𝑛4/3+𝑜(1)+𝑚3/7𝑛12/7+𝑜(1))). This gives, e.g., 𝑂(𝑛2.36) for the notorious case 𝑚=Θ(𝑛1.5). We obtain the same upper bounds for the problem of maintaining the strongly connected components of a directed graph undergoing edge deletions. Our algorithms are correct with high probabililty against an oblivious adversary.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {725 -- 736}, publisher = {Springer Nature}, title = {{Improved algorithms for decremental single-source reachability on directed graphs}}, doi = {10.1007/978-3-662-47672-7_59}, volume = {9134}, year = {2015}, } @inproceedings{11787, abstract = {We present faster algorithms for computing the 2-edge and 2-vertex strongly connected components of a directed graph. While in undirected graphs the 2-edge and 2-vertex connected components can be found in linear time, in directed graphs with m edges and n vertices only rather simple O(m n)-time algorithms were known. We use a hierarchical sparsification technique to obtain algorithms that run in time 𝑂(𝑛2). For 2-edge strongly connected components our algorithm gives the first running time improvement in 20 years. Additionally we present an 𝑂(𝑚2/log𝑛)-time algorithm for 2-edge strongly connected components, and thus improve over the O(m n) running time also when 𝑚=𝑂(𝑛). Our approach extends to k-edge and k-vertex strongly connected components for any constant k with a running time of 𝑂(𝑛2log𝑛) for k-edge-connectivity and 𝑂(𝑛3) for k-vertex-connectivity.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Loitzenbauer, Veronika}, booktitle = {2nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {713 -- 724}, publisher = {Springer Nature}, title = {{Finding 2-edge and 2-vertex strongly connected components in quadratic time}}, doi = {10.1007/978-3-662-47672-7_58}, volume = {9134}, year = {2015}, } @inproceedings{11788, abstract = {Ad exchanges are becoming an increasingly popular way to sell advertisement slots on the internet. An ad exchange is basically a spot market for ad impressions. A publisher who has already signed contracts reserving advertisement impressions on his pages can choose between assigning a new ad impression for a new page view to a contracted advertiser or to sell it at an ad exchange. This leads to an online revenue maximization problem for the publisher. Given a new impression to sell decide whether (a) to assign it to a contracted advertiser and if so to which one or (b) to sell it at the ad exchange and if so at which reserve price. We make no assumptions about the distribution of the advertiser valuations that participate in the ad exchange and show that there exists a simple primal-dual based online algorithm, whose lower bound for the revenue converges to 𝑅𝐴𝐷𝑋+𝑅𝐴(1−1/𝑒), where 𝑅𝐴𝐷𝑋 is the revenue that the optimum algorithm achieves from the ad exchange and 𝑅𝐴 is the revenue that the optimum algorithm achieves from the contracted advertisers.}, author = {Dvořák, Wolfgang and Henzinger, Monika H}, booktitle = {12th International Workshop of Approximation and Online Algorithms}, issn = {0302-9743}, location = {Wroclaw, Poland}, pages = {156–167}, publisher = {Springer Nature}, title = {{Online ad assignment with an ad exchange}}, doi = {10.1007/978-3-319-18263-6_14}, volume = {8952}, year = {2015}, } @inproceedings{11786, abstract = {In this paper, we develop a dynamic version of the primal-dual method for optimization problems, and apply it to obtain the following results. (1) For the dynamic set-cover problem, we maintain an 𝑂(𝑓2)-approximately optimal solution in 𝑂(𝑓⋅log(𝑚+𝑛)) amortized update time, where 𝑓 is the maximum “frequency” of an element, 𝑛 is the number of sets, and 𝑚 is the maximum number of elements in the universe at any point in time. (2) For the dynamic 𝑏-matching problem, we maintain an 𝑂(1)-approximately optimal solution in 𝑂(log3𝑛) amortized update time, where 𝑛 is the number of nodes in the graph.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {206 -- 218}, publisher = {Springer Nature}, title = {{Design of dynamic algorithms via primal-dual method}}, doi = {10.1007/978-3-662-47672-7_17}, volume = {9134}, year = {2015}, } @article{11845, abstract = {Phylogenetic diversity (PD) is a measure of biodiversity based on the evolutionary history of species. Here, we discuss several optimization problems related to the use of PD, and the more general measure split diversity (SD), in conservation prioritization. Depending on the conservation goal and the information available about species, one can construct optimization routines that incorporate various conservation constraints. We demonstrate how this information can be used to select sets of species for conservation action. Specifically, we discuss the use of species' geographic distributions, the choice of candidates under economic pressure, and the use of predator–prey interactions between the species in a community to define viability constraints. Despite such optimization problems falling into the area of NP hard problems, it is possible to solve them in a reasonable amount of time using integer programming. We apply integer linear programming to a variety of models for conservation prioritization that incorporate the SD measure. We exemplarily show the results for two data sets: the Cape region of South Africa and a Caribbean coral reef community. Finally, we provide user-friendly software at http://www.cibiv.at/software/pda.}, author = {Chernomor, Olga and Minh, Bui Quang and Forest, Félix and Klaere, Steffen and Ingram, Travis and Henzinger, Monika H and von Haeseler, Arndt}, issn = {2041-210X}, journal = {Methods in Ecology and Evolution}, number = {1}, pages = {83--91}, publisher = {Wiley}, title = {{Split diversity in constrained conservation prioritization using integer linear programming}}, doi = {10.1111/2041-210x.12299}, volume = {6}, year = {2015}, } @inproceedings{11868, abstract = {Consider the following Online Boolean Matrix-Vector Multiplication problem: We are given an n x n matrix M and will receive n column-vectors of size n, denoted by v1, ..., vn, one by one. After seeing each vector vi, we have to output the product Mvi before we can see the next vector. A naive algorithm can solve this problem using O(n3) time in total, and its running time can be slightly improved to O(n3/log2 n) [Williams SODA'07]. We show that a conjecture that there is no truly subcubic (O(n3-ε)) time algorithm for this problem can be used to exhibit the underlying polynomial time hardness shared by many dynamic problems. For a number of problems, such as subgraph connectivity, Pagh's problem, d-failure connectivity, decremental single-source shortest paths, and decremental transitive closure, this conjecture implies tight hardness results. Thus, proving or disproving this conjecture will be very interesting as it will either imply several tight unconditional lower bounds or break through a common barrier that blocks progress with these problems. This conjecture might also be considered as strong evidence against any further improvement for these problems since refuting it will imply a major breakthrough for combinatorial Boolean matrix multiplication and other long-standing problems if the term "combinatorial algorithms" is interpreted as "Strassen-like algorithms" [Ballard et al. SPAA'11]. The conjecture also leads to hardness results for problems that were previously based on diverse problems and conjectures -- such as 3SUM, combinatorial Boolean matrix multiplication, triangle detection, and multiphase -- thus providing a uniform way to prove polynomial hardness results for dynamic algorithms; some of the new proofs are also simpler or even become trivial. The conjecture also leads to stronger and new, non-trivial, hardness results, e.g., for the fully-dynamic densest subgraph and diameter problems.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon and Saranurak, Thatchaphol}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737.8017}, location = {Portland, OR, United States}, publisher = {Association for Computing Machinery}, title = {{Unifying and strengthening hardness for dynamic problems via the online matrix-vector multiplication conjecture}}, doi = {10.1145/2746539.2746609}, year = {2015}, } @inproceedings{11869, abstract = {While in many graph mining applications it is crucial to handle a stream of updates efficiently in terms of both time and space, not much was known about achieving such type of algorithm. In this paper we study this issue for a problem which lies at the core of many graph mining applications called densest subgraph problem. We develop an algorithm that achieves time- and space-efficiency for this problem simultaneously. It is one of the first of its kind for graph problems to the best of our knowledge. Given an input graph, the densest subgraph is the subgraph that maximizes the ratio between the number of edges and the number of nodes. For any ε>0, our algorithm can, with high probability, maintain a (4+ε)-approximate solution under edge insertions and deletions using ~O(n) space and ~O(1) amortized time per update; here, $n$ is the number of nodes in the graph and ~O hides the O(polylog_{1+ε} n) term. The approximation ratio can be improved to (2+ε) with more time. It can be extended to a (2+ε)-approximation sublinear-time algorithm and a distributed-streaming algorithm. Our algorithm is the first streaming algorithm that can maintain the densest subgraph in one pass. Prior to this, no algorithm could do so even in the special case of an incremental stream and even when there is no time restriction. The previously best algorithm in this setting required O(log n) passes [BahmaniKV12]. The space required by our algorithm is tight up to a polylogarithmic factor.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon and Tsourakakis, Charalampos}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737-8017}, location = {Portland, OR, United States}, pages = {173 -- 182}, publisher = {Association for Computing Machinery}, title = {{Space- and time-efficient algorithm for maintaining dense subgraphs on one-pass dynamic streams}}, doi = {10.1145/2746539.2746592}, year = {2015}, } @inproceedings{11837, abstract = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1-1/e). (2) On the positive side we present (i) an O(sqrt n)-approximation algorithm for general concave externality functions, (ii) an O(\log m)-approximation algorithm for linear externality functions, and (iii) an (1-1/e)\frac{1}{6}-approximation algorithm for 2-hop step function externalities. We also improve the result from [6] for 1-hop step function externalities by giving a (1-1/e)/2-approximation algorithm.}, author = {Bhattacharya, Sayan and Dvorák, Wolfgang and Henzinger, Monika H and Starnberger, Martin}, booktitle = {32nd International Symposium on Theoretical Aspects of Computer Science}, isbn = {978-3-939897-78-1}, issn = {1868-8969}, location = {Garching, Germany}, pages = {90--102}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Welfare maximization with friends-of-friends network externalities}}, doi = {10.4230/LIPICS.STACS.2015.90}, volume = {30}, year = {2015}, } @article{11901, abstract = {We consider auctions of indivisible items to unit-demand bidders with budgets. This setting was suggested as an expressive model for single sponsored search auctions. Prior work presented mechanisms that compute bidder-optimal outcomes and are truthful for a restricted set of inputs, i.e., inputs in so-called general position. This condition is easily violated. We provide the first mechanism that is truthful in expectation for all inputs and achieves for each bidder no worse utility than the bidder-optimal outcome. Additionally we give a complete characterization for which inputs mechanisms that compute bidder-optimal outcomes are truthful.}, author = {Henzinger, Monika H and Loitzenbauer, Veronika}, issn = {0304-3975}, journal = {Theoretical Computer Science}, pages = {1--15}, publisher = {Elsevier}, title = {{Truthful unit-demand auctions with budgets revisited}}, doi = {10.1016/j.tcs.2015.01.033}, volume = {573}, year = {2015}, } @article{11962, abstract = {One of the rare alternative reagents for the reduction of carbon–carbon double bonds is diimide (HNNH), which can be generated in situ from hydrazine hydrate (N2H4⋅H2O) and O2. Although this selective method is extremely clean and powerful, it is rarely used, as the rate-determining oxidation of hydrazine in the absence of a catalyst is relatively slow using conventional batch protocols. A continuous high-temperature/high-pressure methodology dramatically enhances the initial oxidation step, at the same time allowing for a safe and scalable processing of the hazardous reaction mixture. Simple alkenes can be selectively reduced within 10–20 min at 100–120 °C and 20 bar O2 pressure. The development of a multi-injection reactor platform for the periodic addition of N2H4⋅H2O enables the reduction of less reactive olefins even at lower reaction temperatures. This concept was utilized for the highly selective reduction of artemisinic acid to dihydroartemisinic acid, the precursor molecule for the semisynthesis of the antimalarial drug artemisinin. The industrially relevant reduction was achieved by using four consecutive liquid feeds (of N2H4⋅H2O) and residence time units resulting in a highly selective reduction within approximately 40 min at 60 °C and 20 bar O2 pressure, providing dihydroartemisinic acid in ≥93 % yield and ≥95 % selectivity.}, author = {Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. Oliver}, issn = {1521-3765}, journal = {Chemistry - A European Journal}, number = {11}, pages = {4368--4376}, publisher = {Wiley}, title = {{Continuous flow reduction of artemisinic acid utilizing multi-injection strategies-closing the gap towards a fully continuous synthesis of antimalarial drugs}}, doi = {10.1002/chem.201406439}, volume = {21}, year = {2015}, } @article{11977, abstract = {The development of a continuous flow multistep strategy for the synthesis of linear peptoids and their subsequent macrocyclization via Click chemistry is described. The central transformation of this process is an Ugi four-component reaction generating the peptidomimetic core structure. In order to avoid exposure to the often toxic and malodorous isocyanide building blocks, the continuous approach was telescoped by the dehydration of the corresponding formamide. In a concurrent operation, the highly energetic azide moiety required for the subsequent intramolecular copper-catalyzed azide–alkyne cycloaddition (Click reaction) was installed by nucleophilic substitution from a bromide precursor. All steps yielding to the linear core structures can be conveniently coupled without the need for purification steps resulting in a single process generating the desired peptidomimetics in good to excellent yields within a 25 min reaction time. The following macrocyclization was realized in a coil reactor made of copper without any additional additive. A careful process intensification study demonstrated that this transformation occurs quantitatively within 25 min at 140 °C. Depending on the resulting ring strain, either a dimeric or a monomeric form of the cyclic product was obtained.}, author = {Salvador, Carlos Eduardo M. and Pieber, Bartholomäus and Neu, Philipp M. and Torvisco, Ana and Kleber Z. Andrade, Carlos and Kappe, C. Oliver}, issn = {1520-6904}, journal = {The Journal of Organic Chemistry}, number = {9}, pages = {4590--4602}, publisher = {American Chemical Society}, title = {{A sequential Ugi multicomponent/Cu-catalyzed azide–alkyne cycloaddition approach for the continuous flow generation of cyclic peptoids}}, doi = {10.1021/acs.joc.5b00445}, volume = {80}, year = {2015}, } @inbook{11989, abstract = {In recent years, the high demand for sustainable processes resulted in the development of highly attractive oxidation protocols utilizing molecular oxygen or even air instead of more uneconomic and often toxic reagents. The application of these sustainable, gaseous oxidants in conventional batch reactors is often associated with severe safety risks and process challenges especially on larger scales. Continuous flow technology offers the possibility to minimize these safety hazards and concurrently allows working in high-temperature/high-pressure regimes to access highly efficient oxidation protocols. This review article critically discusses recent literature examples of flow methodologies for selective aerobic oxidations of organic compounds. Several technologies and reactor designs for biphasic gas/liquid as well as supercritical reaction media are presented in detail. © Springer International Publishing Switzerland 2015.}, author = {Pieber, Bartholomäus and Kappe, C. Oliver}, booktitle = {Organometallic Flow Chemistry}, editor = {Noël, Timothy}, isbn = {9783319332413}, issn = {1616-8534}, pages = {97–136}, publisher = {Springer Nature}, title = {{Aerobic oxidations in continuous flow}}, doi = {10.1007/3418_2015_133}, volume = {57}, year = {2015}, } @article{120, abstract = {Clustering of fine particles is of crucial importance in settings ranging from the early stages of planet formation to the coagulation of industrial powders and airborne pollutants. Models of such clustering typically focus on inelastic deformation and cohesion. However, even in charge-neutral particle systems comprising grains of the same dielectric material, tribocharging can generate large amounts of net positive or negative charge on individual particles, resulting in long-range electrostatic forces. The effects of such forces on cluster formation are not well understood and have so far not been studied in situ. Here we report the first observations of individual collide-and-capture events between charged submillimetre particles, including Kepler-like orbits. Charged particles can become trapped in their mutual electrostatic energy well and aggregate via multiple bounces. This enables the initiation of clustering at relative velocities much larger than the upper limit for sticking after a head-on collision, a long-standing issue known from pre-planetary dust aggregation. Moreover, Coulomb interactions together with dielectric polarization are found to stabilize characteristic molecule-like configurations, providing new insights for the modelling of clustering dynamics in a wide range of microscopic dielectric systems, such as charged polarizable ions, biomolecules and colloids.}, author = {Lee, Victor and Waitukaitis, Scott R and Miskin, Marc and Jaeger, Heinrich}, journal = {Nature Physics}, number = {9}, pages = {733 -- 737}, publisher = {Nature Publishing Group}, title = {{Direct observation of particle interactions and clustering in charged granular streams}}, doi = {10.1038/nphys3396}, volume = {11}, year = {2015}, } @article{121, abstract = {We show that the simplest building blocks of origami-based materials - rigid, degree-four vertices - are generically multistable. The existence of two distinct branches of folding motion emerging from the flat state suggests at least bistability, but we show how nonlinearities in the folding motions allow generic vertex geometries to have as many as five stable states. In special geometries with collinear folds and symmetry, more branches emerge leading to as many as six stable states. Tuning the fold energy parameters, we show how monostability is also possible. Finally, we show how to program the stability features of a single vertex into a periodic fold tessellation. The resulting metasheets provide a previously unanticipated functionality - tunable and switchable shape and size via multistability.}, author = {Waitukaitis, Scott R and Menaut, Rémi and Chen, Bryan and Van Hecke, Martin}, journal = {APS Physics, Physical Review Letters}, number = {5}, publisher = {American Physical Society}, title = {{Origami multistability: From single vertices to metasheets}}, doi = {10.1103/PhysRevLett.114.055503}, volume = {114}, year = {2015}, } @article{1311, abstract = {In this paper, we develop an energy method to study finite speed of propagation and waiting time phenomena for the stochastic porous media equation with linear multiplicative noise in up to three spatial dimensions. Based on a novel iteration technique and on stochastic counterparts of weighted integral estimates used in the deterministic setting, we formulate a sufficient criterion on the growth of initial data which locally guarantees a waiting time phenomenon to occur almost surely. Up to a logarithmic factor, this criterion coincides with the optimal criterion known from the deterministic setting. Our technique can be modified to prove finite speed of propagation as well.}, author = {Julian Fischer and Grün, Günther}, journal = {SIAM Journal on Mathematical Analysis}, number = {1}, pages = {825 -- 854}, publisher = {Society for Industrial and Applied Mathematics }, title = {{Finite speed of propagation and waiting times for the stochastic porous medium equation: A unifying approach}}, doi = {10.1137/140960578}, volume = {47}, year = {2015}, } @article{1314, abstract = {We derive a posteriori estimates for the modeling error caused by the assumption of perfect incompressibility in the incompressible Navier-Stokes equation: Real fluids are never perfectly incompressible but always feature at least some low amount of compressibility. Thus, their behavior is described by the compressible Navier-Stokes equation, the pressure being a steep function of the density. We rigorously estimate the difference between an approximate solution to the incompressible Navier-Stokes equation and any weak solution to the compressible Navier-Stokes equation in the sense of Lions (without assuming any additional regularity of solutions). Heuristics and numerical results suggest that our error estimates are of optimal order in the case of "well-behaved" flows and divergence-free approximations of the velocity field. Thus, we expect our estimates to justify the idealization of fluids as perfectly incompressible also in practical situations.}, author = {Fischer, Julian L}, journal = {SIAM Journal on Numerical Analysis}, number = {5}, pages = {2178 -- 2205}, publisher = {Society for Industrial and Applied Mathematics }, title = {{A posteriori modeling error estimates for the assumption of perfect incompressibility in the Navier-Stokes equation}}, doi = {10.1137/140966654}, volume = {53}, year = {2015}, } @article{1313, abstract = {We present an algorithm for the derivation of lower bounds on support propagation for a certain class of nonlinear parabolic equations. We proceed by combining the ideas in some recent papers by the author with the algorithmic construction of entropies due to Jüngel and Matthes, reducing the problem to a quantifier elimination problem. Due to its complexity, the quantifier elimination problem cannot be solved by present exact algorithms. However, by tackling the quantifier elimination problem numerically, in the case of the thin-film equation we are able to improve recent results by the author in the regime of strong slippage n ∈ (1, 2). For certain second-order doubly nonlinear parabolic equations, we are able to extend the known lower bounds on free boundary propagation to the case of irregular oscillatory initial data. Finally, we apply our method to a sixth-order quantum drift-diffusion equation, resulting in an upper bound on the time which it takes for the support to reach every point in the domain.}, author = {Julian Fischer}, journal = {Interfaces and Free Boundaries}, number = {1}, pages = {1 -- 20}, publisher = {European Mathematical Society Publishing House}, title = {{Estimates on front propagation for nonlinear higher-order parabolic equations: An algorithmic approach}}, doi = {10.4171/IFB/331}, volume = {17}, year = {2015}, } @article{1316, abstract = {In the present work we introduce the notion of a renormalized solution for reaction–diffusion systems with entropy-dissipating reactions. We establish the global existence of renormalized solutions. In the case of integrable reaction terms our notion of a renormalized solution reduces to the usual notion of a weak solution. Our existence result in particular covers all reaction–diffusion systems involving a single reversible reaction with mass-action kinetics and (possibly species-dependent) Fick-law diffusion; more generally, it covers the case of systems of reversible reactions with mass-action kinetics which satisfy the detailed balance condition. For such equations the existence of any kind of solution in general was an open problem, thereby motivating the study of renormalized solutions.}, author = {Julian Fischer}, journal = {Archive for Rational Mechanics and Analysis}, number = {1}, pages = {553 -- 587}, publisher = {Springer}, title = {{Global existence of renormalized solutions to entropy-dissipating reaction–diffusion systems}}, doi = {10.1007/s00205-015-0866-x}, volume = {218}, year = {2015}, } @article{1383, abstract = {In plants, vacuolar H+-ATPase (V-ATPase) activity acidifies both the trans-Golgi network/early endosome (TGN/EE) and the vacuole. This dual V-ATPase function has impeded our understanding of how the pH homeostasis within the plant TGN/EE controls exo- and endocytosis. Here, we show that the weak V-ATPase mutant deetiolated3 (det3) displayed a pH increase in the TGN/EE, but not in the vacuole, strongly impairing secretion and recycling of the brassinosteroid receptor and the cellulose synthase complexes to the plasma membrane, in contrast to mutants lacking tonoplast-localized V-ATPase activity only. The brassinosteroid insensitivity and the cellulose deficiency defects in det3 were tightly correlated with reduced Golgi and TGN/EE motility. Thus, our results provide strong evidence that acidification of the TGN/EE, but not of the vacuole, is indispensable for functional secretion and recycling in plants.}, author = {Yu, Luo and Scholl, Stefan and Doering, Anett and Yi, Zhang and Irani, Niloufer and Di Rubbo, Simone and Neumetzler, Lutz and Krishnamoorthy, Praveen and Van Houtte, Isabelle and Mylle, Evelien and Bischoff, Volker and Vernhettes, Samantha and Winne, Johan and Friml, Jirí and Stierhof, York and Schumacher, Karin and Persson, Staffan and Russinova, Eugenia}, journal = {Nature Plants}, number = {7}, publisher = {Nature Publishing Group}, title = {{V-ATPase activity in the TGN/EE is required for exocytosis and recycling in Arabidopsis}}, doi = {10.1038/nplants.2015.94}, volume = {1}, year = {2015}, } @inproceedings{1425, abstract = {In this work we aim at extending the theoretical foundations of lifelong learning. Previous work analyzing this scenario is based on the assumption that learning tasks are sampled i.i.d. from a task environment or limited to strongly constrained data distributions. Instead, we study two scenarios when lifelong learning is possible, even though the observed tasks do not form an i.i.d. sample: first, when they are sampled from the same environment, but possibly with dependencies, and second, when the task environment is allowed to change over time in a consistent way. In the first case we prove a PAC-Bayesian theorem that can be seen as a direct generalization of the analogous previous result for the i.i.d. case. For the second scenario we propose to learn an inductive bias in form of a transfer procedure. We present a generalization bound and show on a toy example how it can be used to identify a beneficial transfer algorithm.}, author = {Pentina, Anastasia and Lampert, Christoph}, location = {Montreal, Canada}, pages = {1540 -- 1548}, publisher = {Neural Information Processing Systems}, title = {{Lifelong learning with non-i.i.d. tasks}}, volume = {2015}, year = {2015}, } @inproceedings{1424, abstract = {We consider the problem of statistical computations with persistence diagrams, a summary representation of topological features in data. These diagrams encode persistent homology, a widely used invariant in topological data analysis. While several avenues towards a statistical treatment of the diagrams have been explored recently, we follow an alternative route that is motivated by the success of methods based on the embedding of probability measures into reproducing kernel Hilbert spaces. In fact, a positive definite kernel on persistence diagrams has recently been proposed, connecting persistent homology to popular kernel-based learning techniques such as support vector machines. However, important properties of that kernel enabling a principled use in the context of probability measure embeddings remain to be explored. Our contribution is to close this gap by proving universality of a variant of the original kernel, and to demonstrate its effective use in twosample hypothesis testing on synthetic as well as real-world data.}, author = {Kwitt, Roland and Huber, Stefan and Niethammer, Marc and Lin, Weili and Bauer, Ulrich}, location = {Montreal, Canada}, pages = {3070 -- 3078}, publisher = {Neural Information Processing Systems}, title = {{Statistical topological data analysis-A kernel perspective}}, volume = {28}, year = {2015}, } @inproceedings{1430, abstract = {Evolutionary algorithms (EAs) form a popular optimisation paradigm inspired by natural evolution. In recent years the field of evolutionary computation has developed a rigorous analytical theory to analyse their runtime on many illustrative problems. Here we apply this theory to a simple model of natural evolution. In the Strong Selection Weak Mutation (SSWM) evolutionary regime the time between occurrence of new mutations is much longer than the time it takes for a new beneficial mutation to take over the population. In this situation, the population only contains copies of one genotype and evolution can be modelled as a (1+1)-type process where the probability of accepting a new genotype (improvements or worsenings) depends on the change in fitness. We present an initial runtime analysis of SSWM, quantifying its performance for various parameters and investigating differences to the (1+1) EA. We show that SSWM can have a moderate advantage over the (1+1) EA at crossing fitness valleys and study an example where SSWM outperforms the (1+1) EA by taking advantage of information on the fitness gradient.}, author = {Paixao, Tiago and Sudholt, Dirk and Heredia, Jorge and Trubenova, Barbora}, booktitle = {Proceedings of the 2015 Annual Conference on Genetic and Evolutionary Computation}, location = {Madrid, Spain}, pages = {1455 -- 1462}, publisher = {ACM}, title = {{First steps towards a runtime comparison of natural and artificial evolution}}, doi = {10.1145/2739480.2754758}, year = {2015}, } @inproceedings{1474, abstract = {Cryptographic access control offers selective access to encrypted data via a combination of key management and functionality-rich cryptographic schemes, such as attribute-based encryption. Using this approach, publicly available meta-data may inadvertently leak information on the access policy that is enforced by cryptography, which renders cryptographic access control unusable in settings where this information is highly sensitive. We begin to address this problem by presenting rigorous definitions for policy privacy in cryptographic access control. For concreteness we set our results in the model of Role-Based Access Control (RBAC), where we identify and formalize several different flavors of privacy, however, our framework should serve as inspiration for other models of access control. Based on our insights we propose a new system which significantly improves on the privacy properties of state-of-the-art constructions. Our design is based on a novel type of privacy-preserving attribute-based encryption, which we introduce and show how to instantiate. We present our results in the context of a cryptographic RBAC system by Ferrara et al. (CSF'13), which uses cryptography to control read access to files, while write access is still delegated to trusted monitors. We give an extension of the construction that permits cryptographic control over write access. Our construction assumes that key management uses out-of-band channels between the policy enforcer and the users but eliminates completely the need for monitoring read/write access to the data.}, author = {Ferrara, Anna and Fuchsbauer, Georg and Liu, Bin and Warinschi, Bogdan}, location = {Verona, Italy}, pages = {46--60}, publisher = {IEEE}, title = {{Policy privacy in cryptographic access control}}, doi = {10.1109/CSF.2015.11}, year = {2015}, } @misc{1473, abstract = {In this paper we survey geometric and arithmetic techniques to study the cohomology of semiprojective hyperkähler manifolds including toric hyperkähler varieties, Nakajima quiver varieties and moduli spaces of Higgs bundles on Riemann surfaces. The resulting formulae for their Poincaré polynomials are combinatorial and representation theoretical in nature. In particular we will look at their Betti numbers and will establish some results and state some expectations on their asymptotic shape.}, author = {Tamas Hausel and Rodríguez Villegas, Fernando}, booktitle = {Asterisque}, number = {370}, pages = {113 -- 156}, publisher = {Societe Mathematique de France}, title = {{Cohomology of large semiprojective hyperkähler varieties}}, volume = {2015}, year = {2015}, } @inproceedings{1483, abstract = {Topological data analysis offers a rich source of valuable information to study vision problems. Yet, so far we lack a theoretically sound connection to popular kernel-based learning techniques, such as kernel SVMs or kernel PCA. In this work, we establish such a connection by designing a multi-scale kernel for persistence diagrams, a stable summary representation of topological features in data. We show that this kernel is positive definite and prove its stability with respect to the 1-Wasserstein distance. Experiments on two benchmark datasets for 3D shape classification/retrieval and texture recognition show considerable performance gains of the proposed method compared to an alternative approach that is based on the recently introduced persistence landscapes.}, author = {Reininghaus, Jan and Huber, Stefan and Bauer, Ulrich and Kwitt, Roland}, location = {Boston, MA, USA}, pages = {4741 -- 4748}, publisher = {IEEE}, title = {{A stable multi-scale kernel for topological machine learning}}, doi = {10.1109/CVPR.2015.7299106}, year = {2015}, } @inproceedings{1498, abstract = {Fault-tolerant distributed algorithms play an important role in many critical/high-availability applications. These algorithms are notoriously difficult to implement correctly, due to asynchronous communication and the occurrence of faults, such as the network dropping messages or computers crashing. Nonetheless there is surprisingly little language and verification support to build distributed systems based on fault-tolerant algorithms. In this paper, we present some of the challenges that a designer has to overcome to implement a fault-tolerant distributed system. Then we review different models that have been proposed to reason about distributed algorithms and sketch how such a model can form the basis for a domain-specific programming language. Adopting a high-level programming model can simplify the programmer's life and make the code amenable to automated verification, while still compiling to efficiently executable code. We conclude by summarizing the current status of an ongoing language design and implementation project that is based on this idea.}, author = {Dragoi, Cezara and Henzinger, Thomas A and Zufferey, Damien}, isbn = {978-3-939897-80-4 }, location = {Asilomar, CA, United States}, pages = {90 -- 102}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{The need for language support for fault-tolerant distributed systems}}, doi = {10.4230/LIPIcs.SNAPL.2015.90}, volume = {32}, year = {2015}, } @article{1497, abstract = {Detecting allelic biases from high-throughput sequencing data requires an approach that maximises sensitivity while minimizing false positives. Here, we present Allelome.PRO, an automated user-friendly bioinformatics pipeline, which uses high-throughput sequencing data from reciprocal crosses of two genetically distinct mouse strains to detect allele-specific expression and chromatin modifications. Allelome.PRO extends approaches used in previous studies that exclusively analyzed imprinted expression to give a complete picture of the ‘allelome’ by automatically categorising the allelic expression of all genes in a given cell type into imprinted, strain-biased, biallelic or non-informative. Allelome.PRO offers increased sensitivity to analyze lowly expressed transcripts, together with a robust false discovery rate empirically calculated from variation in the sequencing data. We used RNA-seq data from mouse embryonic fibroblasts from F1 reciprocal crosses to determine a biologically relevant allelic ratio cutoff, and define for the first time an entire allelome. Furthermore, we show that Allelome.PRO detects differential enrichment of H3K4me3 over promoters from ChIP-seq data validating the RNA-seq results. This approach can be easily extended to analyze histone marks of active enhancers, or transcription factor binding sites and therefore provides a powerful tool to identify candidate cis regulatory elements genome wide.}, author = {Andergassen, Daniel and Dotter, Christoph and Kulinski, Tomasz and Guenzl, Philipp and Bammer, Philipp and Barlow, Denise and Pauler, Florian and Hudson, Quanah}, journal = {Nucleic Acids Research}, number = {21}, publisher = {Oxford University Press}, title = {{Allelome.PRO, a pipeline to define allele-specific genomic features from high-throughput sequencing data}}, doi = {10.1093/nar/gkv727}, volume = {43}, year = {2015}, } @inproceedings{1499, abstract = {We consider weighted automata with both positive and negative integer weights on edges and study the problem of synchronization using adaptive strategies that may only observe whether the current weight-level is negative or nonnegative. We show that the synchronization problem is decidable in polynomial time for deterministic weighted automata.}, author = {Kretinsky, Jan and Larsen, Kim and Laursen, Simon and Srba, Jiří}, location = {Madrid, Spain}, pages = {142 -- 154}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Polynomial time decidability of weighted synchronization under partial observability}}, doi = {10.4230/LIPIcs.CONCUR.2015.142}, volume = {42}, year = {2015}, } @inproceedings{1495, abstract = {Motivated by biological questions, we study configurations of equal-sized disks in the Euclidean plane that neither pack nor cover. Measuring the quality by the probability that a random point lies in exactly one disk, we show that the regular hexagonal grid gives the maximum among lattice configurations. }, author = {Edelsbrunner, Herbert and Iglesias Ham, Mabel and Kurlin, Vitaliy}, booktitle = {Proceedings of the 27th Canadian Conference on Computational Geometry}, location = {Ontario, Canada}, pages = {128--135}, publisher = {Queen's University}, title = {{Relaxed disk packing}}, volume = {2015-August}, year = {2015}, } @article{1504, abstract = {Let Q = (Q1, . . . , Qn) be a random vector drawn from the uniform distribution on the set of all n! permutations of {1, 2, . . . , n}. Let Z = (Z1, . . . , Zn), where Zj is the mean zero variance one random variable obtained by centralizing and normalizing Qj , j = 1, . . . , n. Assume that Xi , i = 1, . . . ,p are i.i.d. copies of 1/√ p Z and X = Xp,n is the p × n random matrix with Xi as its ith row. Then Sn = XX is called the p × n Spearman's rank correlation matrix which can be regarded as a high dimensional extension of the classical nonparametric statistic Spearman's rank correlation coefficient between two independent random variables. In this paper, we establish a CLT for the linear spectral statistics of this nonparametric random matrix model in the scenario of high dimension, namely, p = p(n) and p/n→c ∈ (0,∞) as n→∞.We propose a novel evaluation scheme to estimate the core quantity in Anderson and Zeitouni's cumulant method in [Ann. Statist. 36 (2008) 2553-2576] to bypass the so-called joint cumulant summability. In addition, we raise a two-step comparison approach to obtain the explicit formulae for the mean and covariance functions in the CLT. Relying on this CLT, we then construct a distribution-free statistic to test complete independence for components of random vectors. Owing to the nonparametric property, we can use this test on generally distributed random variables including the heavy-tailed ones.}, author = {Bao, Zhigang and Lin, Liang and Pan, Guangming and Zhou, Wang}, journal = {Annals of Statistics}, number = {6}, pages = {2588 -- 2623}, publisher = {Institute of Mathematical Statistics}, title = {{Spectral statistics of large dimensional spearman s rank correlation matrix and its application}}, doi = {10.1214/15-AOS1353}, volume = {43}, year = {2015}, }