@article{11564, abstract = {We study the production rate of ionizing photons of a sample of 588 Hα emitters (HAEs) and 160 Lyman-α emitters (LAEs) at z = 2.2 in the COSMOS field in order to assess the implied emissivity from galaxies, based on their ultraviolet (UV) luminosity. By exploring the rest-frame Lyman Continuum (LyC) with GALEX/NUV data, we find fesc < 2.8 (6.4) per cent through median (mean) stacking. By combining the Hα luminosity density with intergalactic medium emissivity measurements from absorption studies, we find a globally averaged 〈fesc〉 of 5.9+14.5−4.2 per cent at z = 2.2 if we assume HAEs are the only source of ionizing photons. We find similarly low values of the global 〈fesc〉 at z ≈ 3–5, also ruling out a high 〈fesc〉 at z < 5. These low escape fractions allow us to measure ξion, the number of produced ionizing photons per unit UV luminosity, and investigate how this depends on galaxy properties. We find a typical ξion ≈ 1024.77 ± 0.04 Hz erg−1 for HAEs and ξion ≈ 1025.14 ± 0.09 Hz erg−1 for LAEs. LAEs and low-mass HAEs at z = 2.2 show similar values of ξion as typically assumed in the reionization era, while the typical HAE is three times less ionizing. Due to an increasing ξion with increasing EW(Hα), ξion likely increases with redshift. This evolution alone is fully in line with the observed evolution of ξion between z ≈ 2 and 5, indicating a typical value of ξion ≈ 1025.4 Hz erg−1 in the reionization era.}, author = {Matthee, Jorryt J and Sobral, David and Best, Philip and Khostovan, Ali Ahmad and Oteo, Iván and Bouwens, Rychard and Röttgering, Huub}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, cosmology: observations, dark ages, reionization, first stars}, number = {3}, pages = {3637--3655}, publisher = {Oxford University Press}, title = {{The production and escape of Lyman-Continuum radiation from star-forming galaxies at z ∼ 2 and their redshift evolution}}, doi = {10.1093/mnras/stw2973}, volume = {465}, year = {2017}, } @article{11567, abstract = {Recently, the C III] and C IV emission lines have been observed in galaxies in the early Universe (z > 5), providing new ways to measure their redshift and study their stellar populations and active galactic nuclei (AGN). We explore the first blind C II], C III] and C IV survey (z ∼ 0.68, 1.05, 1.53, respectively) presented in Stroe et al. (2017). We derive luminosity functions (LF) and study properties of C II], C III] and C IV line emitters through comparisons to the LFs of H α and Ly α emitters, UV selected star-forming (SF) galaxies and quasars at similar redshifts. The C II] LF at z ∼ 0.68 is equally well described by a Schechter or a power-law LF, characteristic of a mixture of SF and AGN activity. The C III] LF (z ∼ 1.05) is consistent to a scaled down version of the Schechter H α and Ly α LF at their redshift, indicating a SF origin. In stark contrast, the C IV LF at z ∼ 1.53 is well fit by a power-law, quasar-like LF. We find that the brightest UV sources (MUV < −22) will universally have C III] and C IV emission. However, on average, C III] and C IV are not as abundant as H α or Ly α emitters at the same redshift, with cosmic average ratios of ∼0.02–0.06 to H α and ∼0.01–0.1 to intrinsic Ly α. We predict that the C III] and C IV lines can only be truly competitive in confirming high-redshift candidates when the hosts are intrinsically bright and the effective Ly α escape fraction is below 1 per cent. While C III] and C IV were proposed as good tracers of young, relatively low-metallicity galaxies typical of the early Universe, we find that, at least at z ∼ 1.5, C IV is exclusively hosted by AGN/quasars, especially at large line equivalent widths.}, author = {Stroe, Andra and Sobral, David and Matthee, Jorryt J and Calhau, João and Oteo, Ivan}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: active, galaxies: high redshift, galaxies: luminosity function, mass function, quasars: emission lines, star formation, cosmology: observations}, number = {3}, pages = {2575--2586}, publisher = {Oxford University Press}, title = {{A 1.4 deg2 blind survey for C II], C III] and C IV at z ∼ 0.7–1.5 – II. Luminosity functions and cosmic average line ratios}}, doi = {10.1093/mnras/stx1713}, volume = {471}, year = {2017}, } @article{11565, abstract = {We use the hydrodynamical EAGLE simulation to study the magnitude and origin of the scatter in the stellar mass–halo mass relation for central galaxies. We separate cause and effect by correlating stellar masses in the baryonic simulation with halo properties in a matched dark matter only (DMO) simulation. The scatter in stellar mass increases with redshift and decreases with halo mass. At z = 0.1, it declines from 0.25 dex at M200, DMO ≈ 1011 M⊙ to 0.12 dex at M200, DMO ≈ 1013 M⊙, but the trend is weak above 1012 M⊙. For M200, DMO < 1012.5 M⊙ up to 0.04 dex of the scatter is due to scatter in the halo concentration. At fixed halo mass, a larger stellar mass corresponds to a more concentrated halo. This is likely because higher concentrations imply earlier formation times and hence more time for accretion and star formation, and/or because feedback is less efficient in haloes with higher binding energies. The maximum circular velocity, Vmax, DMO, and binding energy are therefore more fundamental properties than halo mass, meaning that they are more accurate predictors of stellar mass, and we provide fitting formulae for their relations with stellar mass. However, concentration alone cannot explain the total scatter in the Mstar−M200,DMO relation, and it does not explain the scatter in Mstar–Vmax, DMO. Halo spin, sphericity, triaxiality, substructure and environment are also not responsible for the remaining scatter, which thus could be due to more complex halo properties or non-linear/stochastic baryonic effects.}, author = {Matthee, Jorryt J and Schaye, Joop and Crain, Robert A. and Schaller, Matthieu and Bower, Richard and Theuns, Tom}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: formation, galaxies: haloes, cosmology: theory}, number = {2}, pages = {2381--2396}, publisher = {Oxford University Press}, title = {{The origin of scatter in the stellar mass–halo mass relation of central galaxies in the EAGLE simulation}}, doi = {10.1093/mnras/stw2884}, volume = {465}, year = {2017}, } @article{11561, abstract = {We present a sample of ∼1000 emission-line galaxies at z = 0.4–4.7 from the ∼0.7deg2 High-z Emission-Line Survey in the Boötes field identified with a suite of six narrow-band filters at ≈0.4–2.1 μm. These galaxies have been selected on their Ly α (73), [O II] (285), H β/[O III] (387) or H α (362) emission line, and have been classified with optical to near-infrared colours. A subsample of 98 sources have reliable redshifts from multiple narrow-band (e.g. [O II]–H α) detections and/or spectroscopy. In this survey paper, we present the observations, selection and catalogues of emitters. We measure number densities of Ly α, [O II], H β/[O III] and H α and confirm strong luminosity evolution in star-forming galaxies from z ∼ 0.4 to ∼5, in agreement with previous results. To demonstrate the usefulness of dual-line emitters, we use the sample of dual [O II]–H α emitters to measure the observed [O II]/H α ratio at z = 1.47. The observed [O II]/H α ratio increases significantly from 0.40 ± 0.01 at z = 0.1 to 0.52 ± 0.05 at z = 1.47, which we attribute to either decreasing dust attenuation with redshift, or due to a bias in the (typically) fibre measurements in the local Universe that only measure the central kpc regions. At the bright end, we find that both the H α and Ly α number densities at z ≈ 2.2 deviate significantly from a Schechter form, following a power law. We show that this is driven entirely by an increasing X-ray/active galactic nucleus fraction with line luminosity, which reaches ≈100 per cent at line luminosities L ≳ 3 × 1044 erg s−1.}, author = {Matthee, Jorryt J and Sobral, David and Best, Philip and Smail, Ian and Bian, Fuyan and Darvish, Behnam and Röttgering, Huub and Fan, Xiaohui}, issn = {0035-8711}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics galaxies, active, galaxies, evolution, galaxies, high-redshift, galaxies, luminosity function, mass function, galaxies: star formation}, number = {1}, pages = {629--649}, publisher = {Oxford University Press}, title = {{Boötes-HiZELS: An optical to near-infrared survey of emission-line galaxies at z = 0.4–4.7}}, doi = {10.1093/mnras/stx1569}, volume = {471}, year = {2017}, } @article{11572, abstract = {We present spectroscopic follow-up of candidate luminous Ly α emitters (LAEs) at z = 5.7–6.6 in the SA22 field with VLT/X-SHOOTER. We confirm two new luminous LAEs at z = 5.676 (SR6) and z = 6.532 (VR7), and also present HST follow-up of both sources. These sources have luminosities LLy α ≈ 3 × 1043 erg s−1, very high rest-frame equivalent widths of EW0 ≳ 200 Å and narrow Ly α lines (200–340 km s−1). VR7 is the most UV-luminous LAE at z > 6.5, with M1500 = −22.5, even brighter in the UV than CR7. Besides Ly α, we do not detect any other rest-frame UV lines in the spectra of SR6 and VR7, and argue that rest-frame UV lines are easier to observe in bright galaxies with low Ly α equivalent widths. We confirm that Ly α line widths increase with Ly α luminosity at z = 5.7, while there are indications that Ly α lines of faint LAEs become broader at z = 6.6, potentially due to reionization. We find a large spread of up to 3 dex in UV luminosity for >L⋆ LAEs, but find that the Ly α luminosity of the brightest LAEs is strongly related to UV luminosity at z = 6.6. Under basic assumptions, we find that several LAEs at z ≈ 6–7 have Ly α escape fractions ≳ 100  per cent, indicating bursty star formation histories, alternative Ly α production mechanisms, or dust attenuating Ly α emission differently than UV emission. Finally, we present a method to compute ξion, the production efficiency of ionizing photons, and find that LAEs at z ≈ 6–7 have high values of log10(ξion/Hz erg−1) ≈ 25.51 ± 0.09 that may alleviate the need for high Lyman-Continuum escape fractions required for reionization.}, author = {Matthee, Jorryt J and Sobral, David and Darvish, Behnam and Santos, Sérgio and Mobasher, Bahram and Paulino-Afonso, Ana and Röttgering, Huub and Alegre, Lara}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution – galaxies: high-redshift, dark ages, reionization, first stars, cosmology: observations}, number = {1}, pages = {772--787}, publisher = {Oxford University Press}, title = {{Spectroscopic properties of luminous Ly α emitters at z ≈ 6–7 and comparison to the Lyman-break population}}, doi = {10.1093/mnras/stx2061}, volume = {472}, year = {2017}, } @article{11573, abstract = {We present dynamical measurements from the KMOS (K-band multi-object spectrograph) Deep Survey (KDS), which comprises 77 typical star-forming galaxies at z ≃ 3.5 in the mass range 9.0 < log (M⋆/M⊙) < 10.5. These measurements constrain the internal dynamics, the intrinsic velocity dispersions (σint) and rotation velocities (VC) of galaxies in the high-redshift Universe. The mean velocity dispersion of the galaxies in our sample is σint=70.8+3.3−3.1kms−1⁠, revealing that the increasing average σint with increasing redshift, reported for z ≲ 2, continues out to z ≃ 3.5. Only 36 ± 8 per cent of our galaxies are rotation-dominated (VC/σint > 1), with the sample average VC/σint value much smaller than at lower redshift. After carefully selecting comparable star-forming samples at multiple epochs, we find that the rotation-dominated fraction evolves with redshift with a z−0.2 dependence. The rotation-dominated KDS galaxies show no clear offset from the local rotation velocity–stellar mass (i.e. VC–M⋆) relation, although a smaller fraction of the galaxies are on the relation due to the increase in the dispersion-dominated fraction. These observations are consistent with a simple equilibrium model picture, in which random motions are boosted in high-redshift galaxies by a combination of the increasing gas fractions, accretion efficiency, specific star formation rate and stellar feedback and which may provide significant pressure support against gravity on the galactic disc scale.}, author = {Turner, O. J. and Cirasuolo, M. and Harrison, C. M. and McLure, R. J. and Dunlop, J. S. and Swinbank, A. M. and Johnson, H. L. and Sobral, D. and Matthee, Jorryt J and Sharples, R. M.}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, galaxies: kinematics and dynamics}, number = {2}, pages = {1280--1320}, publisher = {Oxford University Press}, title = {{The KMOS Deep Survey (KDS) – I. Dynamical measurements of typical star-forming galaxies at z ≃ 3.5}}, doi = {10.1093/mnras/stx1366}, volume = {471}, year = {2017}, } @unpublished{11633, abstract = {Our understanding of stars through asteroseismic data analysis is limited by our ability to take advantage of the huge amount of observed stars provided by space missions such as CoRoT, Kepler , K2, and soon TESS and PLATO. Global seismic pipelines provide global stellar parameters such as mass and radius using the mean seismic parameters, as well as the effective temperature. These pipelines are commonly used automatically on thousands of stars observed by K2 for 3 months (and soon TESS for at least ∼ 1 month). However, pipelines are not immune from misidentifying noise peaks and stellar oscillations. Therefore, new validation techniques are required to assess the quality of these results. We present a new metric called FliPer (Flicker in Power), which takes into account the average variability at all measured time scales. The proper calibration of FliPer enables us to obtain good estimations of global stellar parameters such as surface gravity that are robust against the influence of noise peaks and hence are an excellent way to find faults in asteroseismic pipelines.}, author = {Bugnet, Lisa Annabelle and Garcia, R. A. and Davies, G. R. and Mathur, S. and Corsaro, E.}, booktitle = {arXiv}, keywords = {asteroseismology - methods, data analysis - stars, oscillations}, title = {{FliPer: Checking the reliability of global seismic parameters from automatic pipelines}}, doi = {10.48550/arXiv.1711.02890}, year = {2017}, } @inproceedings{11651, abstract = {Diffusions and related random walk procedures are of central importance in many areas of machine learning, data analysis, and applied mathematics. Because they spread mass agnostically at each step in an iterative manner, they can sometimes spread mass “too aggressively,” thereby failing to find the “right” clusters. We introduce a novel Capacity Releasing Diffusion (CRD) Process, which is both faster and stays more local than the classical spectral diffusion process. As an application, we use our CRD Process to develop an improved local algorithm for graph clustering. Our local graph clustering method can find local clusters in a model of clustering where one begins the CRD Process in a cluster whose vertices are connected better internally than externally by an O(log2n) factor, where n is the number of nodes in the cluster. Thus, our CRD Process is the first local graph clustering algorithm that is not subject to the well-known quadratic Cheeger barrier. Our result requires a certain smoothness condition, which we expect to be an artifact of our analysis. Our empirical evaluation demonstrates improved results, in particular for realistic social graphs where there are moderately good—but not very good—clusters.}, author = {Wang, Di and Fountoulakis, Kimon and Henzinger, Monika H and Mahoney, Michael W. and Rao , Satish}, booktitle = {Proceedings of the 34th International Conference on Machine Learning}, issn = {2640-3498}, location = {Sydney, Australia}, pages = {3598--3607}, publisher = {ML Research Press}, title = {{Capacity releasing diffusion for speed and locality}}, volume = {70}, year = {2017}, } @article{11665, abstract = {We study the problem of maintaining a breadth-first spanning tree (BFS tree) in partially dynamic distributed networks modeling a sequence of either failures or additions of communication links (but not both). We present deterministic (1+ϵ)-approximation algorithms whose amortized time (over some number of link changes) is sublinear in D, the maximum diameter of the network. Our technique also leads to a deterministic (1+ϵ)-approximate incremental algorithm for single-source shortest paths in the sequential (usual RAM) model. Prior to our work, the state of the art was the classic exact algorithm of Even and Shiloach (1981), which is optimal under some assumptions (Roditty and Zwick 2011; Henzinger et al. 2015). Our result is the first to show that, in the incremental setting, this bound can be beaten in certain cases if some approximation is allowed.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, issn = {1549-6333}, journal = {ACM Transactions on Algorithms}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Sublinear-time maintenance of breadth-first spanning trees in partially dynamic networks}}, doi = {10.1145/3146550}, volume = {13}, year = {2017}, } @article{11676, abstract = {We study the problem of maximizing a monotone submodular function with viability constraints. This problem originates from computational biology, where we are given a phylogenetic tree over a set of species and a directed graph, the so-called food web, encoding viability constraints between these species. These food webs usually have constant depth. The goal is to select a subset of k species that satisfies the viability constraints and has maximal phylogenetic diversity. As this problem is known to be NP-hard, we investigate approximation algorithms. We present the first constant factor approximation algorithm if the depth is constant. Its approximation ratio is (1−1e√). This algorithm not only applies to phylogenetic trees with viability constraints but for arbitrary monotone submodular set functions with viability constraints. Second, we show that there is no (1−1/e+ϵ)-approximation algorithm for our problem setting (even for additive functions) and that there is no approximation algorithm for a slight extension of this setting.}, author = {Dvořák, Wolfgang and Henzinger, Monika H and Williamson, David P.}, issn = {1432-0541}, journal = {Algorithmica}, keywords = {Approximation algorithms, Submodular functions, Phylogenetic diversity, Viability constraints}, number = {1}, pages = {152--172}, publisher = {Springer Nature}, title = {{Maximizing a submodular function with viability constraints}}, doi = {10.1007/s00453-015-0066-y}, volume = {77}, year = {2017}, } @inproceedings{1175, abstract = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation. Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.}, author = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc}, editor = {Papadimitriou, Christos}, issn = {18688969}, location = {Berkeley, CA, United States}, pages = {38:1--38--21}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Cumulative space in black-white pebbling and resolution}}, doi = {10.4230/LIPIcs.ITCS.2017.38}, volume = {67}, year = {2017}, } @inproceedings{11772, abstract = {A dynamic graph algorithm is a data structure that supports operations on dynamically changing graphs.}, author = {Henzinger, Monika H}, booktitle = {44th International Conference on Current Trends in Theory and Practice of Computer Science}, isbn = {9783319731162}, issn = {0302-9743}, location = {Krems, Austria}, pages = {40–44}, publisher = {Springer Nature}, title = {{The state of the art in dynamic graph algorithms}}, doi = {10.1007/978-3-319-73117-9_3}, volume = {10706}, year = {2017}, } @inproceedings{11829, abstract = {In recent years it has become popular to study dynamic problems in a sensitivity setting: Instead of allowing for an arbitrary sequence of updates, the sensitivity model only allows to apply batch updates of small size to the original input data. The sensitivity model is particularly appealing since recent strong conditional lower bounds ruled out fast algorithms for many dynamic problems, such as shortest paths, reachability, or subgraph connectivity. In this paper we prove conditional lower bounds for these and additional problems in a sensitivity setting. For example, we show that under the Boolean Matrix Multiplication (BMM) conjecture combinatorial algorithms cannot compute the (4/3-\varepsilon)-approximate diameter of an undirected unweighted dense graph with truly subcubic preprocessing time and truly subquadratic update/query time. This result is surprising since in the static setting it is not clear whether a reduction from BMM to diameter is possible. We further show under the BMM conjecture that many problems, such as reachability or approximate shortest paths, cannot be solved faster than by recomputation from scratch even after only one or two edge insertions. We extend our reduction from BMM to Diameter to give a reduction from All Pairs Shortest Paths to Diameter under one deletion in weighted graphs. This is intriguing, as in the static setting it is a big open problem whether Diameter is as hard as APSP. We further get a nearly tight lower bound for shortest paths after two edge deletions based on the APSP conjecture. We give more lower bounds under the Strong Exponential Time Hypothesis. Many of our lower bounds also hold for static oracle data structures where no sensitivity is required. Finally, we give the first algorithm for the (1+\varepsilon)-approximate radius, diameter, and eccentricity problems in directed or undirected unweighted graphs in case of single edges failures. The algorithm has a truly subcubic running time for graphs with a truly subquadratic number of edges; it is tight w.r.t. the conditional lower bounds we obtain.}, author = {Henzinger, Monika H and Lincoln, Andrea and Neumann, Stefan and Vassilevska Williams, Virginia}, booktitle = {8th Innovations in Theoretical Computer Science Conference}, isbn = {9783959770293}, issn = {1868-8969}, location = {Berkley, CA, United States}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Conditional hardness for sensitivity problems}}, doi = {10.4230/LIPICS.ITCS.2017.26}, volume = {67}, year = {2017}, } @inproceedings{11833, abstract = {We introduce a new algorithmic framework for designing dynamic graph algorithms in minor-free graphs, by exploiting the structure of such graphs and a tool called vertex sparsification, which is a way to compress large graphs into small ones that well preserve relevant properties among a subset of vertices and has previously mainly been used in the design of approximation algorithms. Using this framework, we obtain a Monte Carlo randomized fully dynamic algorithm for (1 + epsilon)-approximating the energy of electrical flows in n-vertex planar graphs with tilde{O}(r epsilon^{-2}) worst-case update time and tilde{O}((r + n / sqrt{r}) epsilon^{-2}) worst-case query time, for any r larger than some constant. For r=n^{2/3}, this gives tilde{O}(n^{2/3} epsilon^{-2}) update time and tilde{O}(n^{2/3} epsilon^{-2}) query time. We also extend this algorithm to work for minor-free graphs with similar approximation and running time guarantees. Furthermore, we illustrate our framework on the all-pairs max flow and shortest path problems by giving corresponding dynamic algorithms in minor-free graphs with both sublinear update and query times. To the best of our knowledge, our results are the first to systematically establish such a connection between dynamic graph algorithms and vertex sparsification. We also present both upper bound and lower bound for maintaining the energy of electrical flows in the incremental subgraph model, where updates consist of only vertex activations, which might be of independent interest.}, author = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan}, booktitle = {25th Annual European Symposium on Algorithms}, isbn = {978-3-95977-049-1}, issn = {1868-8969}, location = {Vienna, Austria}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{The power of vertex sparsifiers in dynamic graph algorithms}}, doi = {10.4230/LIPICS.ESA.2017.45}, volume = {87}, year = {2017}, } @inproceedings{11832, abstract = {In this paper, we study the problem of opening centers to cluster a set of clients in a metric space so as to minimize the sum of the costs of the centers and of the cluster radii, in a dynamic environment where clients arrive and depart, and the solution must be updated efficiently while remaining competitive with respect to the current optimal solution. We call this dynamic sum-of-radii clustering problem. We present a data structure that maintains a solution whose cost is within a constant factor of the cost of an optimal solution in metric spaces with bounded doubling dimension and whose worst-case update time is logarithmic in the parameters of the problem.}, author = {Henzinger, Monika H and Leniowski, Dariusz and Mathieu, Claire}, booktitle = {25th Annual European Symposium on Algorithms}, isbn = {978-3-95977-049-1}, issn = {1868-8969}, location = {Vienna, Austria}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Dynamic clustering to minimize the sum of radii}}, doi = {10.4230/LIPICS.ESA.2017.48}, volume = {87}, year = {2017}, } @inproceedings{11874, abstract = {We consider the problem of maintaining an approximately maximum (fractional) matching and an approximately minimum vertex cover in a dynamic graph. Starting with the seminal paper by Onak and Rubinfeld [STOC 2010], this problem has received significant attention in recent years. There remains, however, a polynomial gap between the best known worst case update time and the best known amortised update time for this problem, even after allowing for randomisation. Specifically, Bernstein and Stein [ICALP 2015, SODA 2016] have the best known worst case update time. They present a deterministic data structure with approximation ratio (3/2 + ∊) and worst case update time O(m1/4/ ∊2), where m is the number of edges in the graph. In recent past, Gupta and Peng [FOCS 2013] gave a deterministic data structure with approximation ratio (1+ ∊) and worst case update time No known randomised data structure beats the worst case update times of these two results. In contrast, the paper by Onak and Rubinfeld [STOC 2010] gave a randomised data structure with approximation ratio O(1) and amortised update time O(log2 n), where n is the number of nodes in the graph. This was later improved by Baswana, Gupta and Sen [FOCS 2011] and Solomon [FOCS 2016], leading to a randomised date structure with approximation ratio 2 and amortised update time O(1). We bridge the polynomial gap between the worst case and amortised update times for this problem, without using any randomisation. We present a deterministic data structure with approximation ratio (2 + ∊) and worst case update time O(log3 n), for all sufficiently small constants ∊.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon}, booktitle = {28th Annual ACM-SIAM Symposium on Discrete Algorithms}, location = {Barcelona, Spain}, pages = {470 -- 489}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Fully dynamic approximate maximum matching and minimum vertex cover in o(log3 n) worst case update time}}, doi = {10.1137/1.9781611974782.30}, year = {2017}, } @inproceedings{11873, abstract = {We study the problem of computing a minimum cut in a simple, undirected graph and give a deterministic O(m log2 n log log2 n) time algorithm. This improves both on the best previously known deterministic running time of O(m log12 n) (Kawarabayashi and Thorup [12]) and the best previously known randomized running time of O(mlog3n) (Karger [11]) for this problem, though Karger's algorithm can be further applied to weighted graphs. Our approach is using the Kawarabayashi and Tho- rup graph compression technique, which repeatedly finds low-conductance cuts. To find these cuts they use a diffusion-based local algorithm. We use instead a flow- based local algorithm and suitably adjust their framework to work with our flow-based subroutine. Both flow and diffusion based methods have a long history of being applied to finding low conductance cuts. Diffusion algorithms have several variants that are naturally local while it is more complicated to make flow methods local. Some prior work has proven nice properties for local flow based algorithms with respect to improving or cleaning up low conductance cuts. Our flow subroutine, however, is the first that is both local and produces low conductance cuts. Thus, it may be of independent interest.}, author = {Henzinger, Monika H and Rao, Satish and Wang, Di}, booktitle = {28th Annual ACM-SIAM Symposium on Discrete Algorithms}, location = {Barcelona, Spain}, pages = {1919--1938}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Local flow partitioning for faster edge connectivity}}, doi = {10.1137/1.9781611974782.125}, year = {2017}, } @inproceedings{11831, abstract = {Graph Sparsification aims at compressing large graphs into smaller ones while (approximately) preserving important characteristics of the input graph. In this work we study Vertex Sparsifiers, i.e., sparsifiers whose goal is to reduce the number of vertices. Given a weighted graph G=(V,E), and a terminal set K with |K|=k, a quality-q vertex cut sparsifier of G is a graph H with K contained in V_H that preserves the value of minimum cuts separating any bipartition of K, up to a factor of q. We show that planar graphs with all the k terminals lying on the same face admit quality-1 vertex cut sparsifier of size O(k^2) that are also planar. Our result extends to vertex flow and distance sparsifiers. It improves the previous best known bound of O(k^2 2^(2k)) for cut and flow sparsifiers by an exponential factor, and matches an Omega(k^2) lower-bound for this class of graphs. We also study vertex reachability sparsifiers for directed graphs. Given a digraph G=(V,E) and a terminal set K, a vertex reachability sparsifier of G is a digraph H=(V_H,E_H), K contained in V_H that preserves all reachability information among terminal pairs. We introduce the notion of reachability-preserving minors, i.e., we require H to be a minor of G. Among others, for general planar digraphs, we construct reachability-preserving minors of size O(k^2 log^2 k). We complement our upper-bound by showing that there exists an infinite family of acyclic planar digraphs such that any reachability-preserving minor must have Omega(k^2) vertices.}, author = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan}, booktitle = {25th Annual European Symposium on Algorithms}, isbn = {978-3-95977-049-1}, issn = {1868-8969}, location = {Vienna, Austria}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Improved guarantees for vertex sparsification in planar graphs}}, doi = {10.4230/LIPICS.ESA.2017.44}, volume = {87}, year = {2017}, } @article{11903, abstract = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1−1/e). (2) On the positive side we present (i) an 𝑂(𝑛√)-approximation algorithm for general concave externality functions, (ii) an O(log m)-approximation algorithm for linear externality functions, and (iii) a 518(1−1/𝑒)-approximation algorithm for 2-hop step function externalities. We also improve the result from [7] for 1-hop step function externalities by giving a 12(1−1/𝑒)-approximation algorithm.}, author = {Bhattacharya, Sayan and Dvořák, Wolfgang and Henzinger, Monika H and Starnberger, Martin}, issn = {1433-0490}, journal = {Theory of Computing Systems}, number = {4}, pages = {948--986}, publisher = {Springer Nature}, title = {{Welfare maximization with friends-of-friends network externalities}}, doi = {10.1007/s00224-017-9759-8}, volume = {61}, year = {2017}, } @article{1191, abstract = {Variation in genotypes may be responsible for differences in dispersal rates, directional biases, and growth rates of individuals. These traits may favor certain genotypes and enhance their spatiotemporal spreading into areas occupied by the less advantageous genotypes. We study how these factors influence the speed of spreading in the case of two competing genotypes under the assumption that spatial variation of the total population is small compared to the spatial variation of the frequencies of the genotypes in the population. In that case, the dynamics of the frequency of one of the genotypes is approximately described by a generalized Fisher–Kolmogorov–Petrovskii–Piskunov (F–KPP) equation. This generalized F–KPP equation with (nonlinear) frequency-dependent diffusion and advection terms admits traveling wave solutions that characterize the invasion of the dominant genotype. Our existence results generalize the classical theory for traveling waves for the F–KPP with constant coefficients. Moreover, in the particular case of the quadratic (monostable) nonlinear growth–decay rate in the generalized F–KPP we study in detail the influence of the variance in diffusion and mean displacement rates of the two genotypes on the minimal wave propagation speed.}, author = {Kollár, Richard and Novak, Sebastian}, journal = {Bulletin of Mathematical Biology}, number = {3}, pages = {525--559}, publisher = {Springer}, title = {{Existence of traveling waves for the generalized F–KPP equation}}, doi = {10.1007/s11538-016-0244-3}, volume = {79}, year = {2017}, }