@article{11505, abstract = {Contact. This paper presents the results obtained with the Multi-Unit Spectroscopic Explorer (MUSE) at the ESO Very Large Telescope on the faint end of the Lyman-alpha luminosity function (LF) based on deep observations of four lensing clusters. The goal of our project is to set strong constraints on the relative contribution of the Lyman-alpha emitter (LAE) population to cosmic reionization. Aims. The precise aim of the present study is to further constrain the abundance of LAEs by taking advantage of the magnification provided by lensing clusters to build a blindly selected sample of galaxies which is less biased than current blank field samples in redshift and luminosity. By construction, this sample of LAEs is complementary to those built from deep blank fields, whether observed by MUSE or by other facilities, and makes it possible to determine the shape of the LF at fainter levels, as well as its evolution with redshift. Methods. We selected a sample of 156 LAEs with redshifts between 2.9 ≤ z ≤ 6.7 and magnification-corrected luminosities in the range 39 ≲ log LLyα [erg s−1] ≲43. To properly take into account the individual differences in detection conditions between the LAEs when computing the LF, including lensing configurations, and spatial and spectral morphologies, the non-parametric 1/Vmax method was adopted. The price to pay to benefit from magnification is a reduction of the effective volume of the survey, together with a more complex analysis procedure to properly determine the effective volume Vmax for each galaxy. In this paper we present a complete procedure for the determination of the LF based on IFU detections in lensing clusters. This procedure, including some new methods for masking, effective volume integration and (individual) completeness determinations, has been fully automated when possible, and it can be easily generalized to the analysis of IFU observations in blank fields. Results. As a result of this analysis, the Lyman-alpha LF has been obtained in four different redshift bins: 2.9 <  z <  6, 7, 2.9 <  z <  4.0, 4.0 <  z <  5.0, and 5.0 <  z <  6.7 with constraints down to log LLyα = 40.5. From our data only, no significant evolution of LF mean slope can be found. When performing a Schechter analysis also including data from the literature to complete the present sample towards the brightest luminosities, a steep faint end slope was measured varying from α = −1.69−0.08+0.08 to α = −1.87−0.12+0.12 between the lowest and the highest redshift bins. Conclusions. The contribution of the LAE population to the star formation rate density at z ∼ 6 is ≲50% depending on the luminosity limit considered, which is of the same order as the Lyman-break galaxy (LBG) contribution. The evolution of the LAE contribution with redshift depends on the assumed escape fraction of Lyman-alpha photons, and appears to slightly increase with increasing redshift when this fraction is conservatively set to one. Depending on the intersection between the LAE/LBG populations, the contribution of the observed galaxies to the ionizing flux may suffice to keep the universe ionized at z ∼ 6.}, author = {de La Vieuville, G. and Bina, D. and Pello, R. and Mahler, G. and Richard, J. and Drake, A. B. and Herenz, E. C. and Bauer, F. E. and Clément, B. and Lagattuta, D. and Laporte, N. and Martinez, J. and Patrício, V. and Wisotzki, L. and Zabl, J. and Bouwens, R. J. and Contini, T. and Garel, T. and Guiderdoni, B. and Marino, R. A. and Maseda, M. V. and Matthee, Jorryt J and Schaye, J. and Soucail, G.}, issn = {1432-0746}, journal = {Astronomy & Astrophysics}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, gravitational lensing: strong / galaxies: high-redshift / dark ages, reionization, first stars / galaxies: clusters: general / galaxies: luminosity function, mass function}, publisher = {EDP Sciences}, title = {{Faint end of the z ∼ 3–7 luminosity function of Lyman-alpha emitters behind lensing clusters observed with MUSE}}, doi = {10.1051/0004-6361/201834471}, volume = {628}, year = {2019}, } @article{11507, abstract = {Lyman-α (Lyα) is intrinsically the brightest line emitted from active galaxies. While it originates from many physical processes, for star-forming galaxies the intrinsic Lyα luminosity is a direct tracer of the Lyman-continuum (LyC) radiation produced by the most massive O- and early-type B-stars (M⋆ ≳ 10 M⊙) with lifetimes of a few Myrs. As such, Lyα luminosity should be an excellent instantaneous star formation rate (SFR) indicator. However, its resonant nature and susceptibility to dust as a rest-frame UV photon makes Lyα very hard to interpret due to the uncertain Lyα escape fraction, fesc, Lyα. Here we explore results from the CAlibrating LYMan-α with Hα (CALYMHA) survey at z = 2.2, follow-up of Lyα emitters (LAEs) at z = 2.2 − 2.6 and a z ∼ 0−0.3 compilation of LAEs to directly measure fesc, Lyα with Hα. We derive a simple empirical relation that robustly retrieves fesc, Lyα as a function of Lyα rest-frame EW (EW0): fesc,Lyα = 0.0048 EW0[Å] ± 0.05 and we show that it constrains a well-defined anti-correlation between ionisation efficiency (ξion) and dust extinction in LAEs. Observed Lyα luminosities and EW0 are easy measurable quantities at high redshift, thus making our relation a practical tool to estimate intrinsic Lyα and LyC luminosities under well controlled and simple assumptions. Our results allow observed Lyα luminosities to be used to compute SFRs for LAEs at z ∼ 0−2.6 within ±0.2 dex of the Hα dust corrected SFRs. We apply our empirical SFR(Lyα,EW0) calibration to several sources at z ≥ 2.6 to find that star-forming LAEs have SFRs typically ranging from 0.1 to 20 M⊙ yr−1 and that our calibration might be even applicable for the most luminous LAEs within the epoch of re-ionisation. Our results imply high ionisation efficiencies (log10[ξion/Hz erg−1] = 25.4−25.6) and low dust content in LAEs across cosmic time, and will be easily tested with future observations with JWST which can obtain Hα and Hβ measurements for high-redshift LAEs.}, author = {Sobral, David and Matthee, Jorryt J}, issn = {1432-0746}, journal = {Astronomy & Astrophysics}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: high-redshift / galaxies: star formation / galaxies: statistics / galaxies: evolution / galaxies: formation / galaxies: ISM}, publisher = {EDP Sciences}, title = {{Predicting Lyα escape fractions with a simple observable: Lyα in emission as an empirically calibrated star formation rate indicator}}, doi = {10.1051/0004-6361/201833075}, volume = {623}, year = {2019}, } @article{11514, abstract = {We discuss the nature and physical properties of gas-mass selected galaxies in the ALMA spectroscopic survey (ASPECS) of the Hubble Ultra Deep Field (HUDF). We capitalize on the deep optical integral-field spectroscopy from the Multi Unit Spectroscopic Explorer (MUSE) HUDF Survey and multiwavelength data to uniquely associate all 16 line emitters, detected in the ALMA data without preselection, with rotational transitions of carbon monoxide (CO). We identify 10 as CO(2–1) at 1 < z < 2, 5 as CO(3–2) at 2 < z < 3, and 1 as CO(4–3) at z = 3.6. Using the MUSE data as a prior, we identify two additional CO(2–1) emitters, increasing the total sample size to 18. We infer metallicities consistent with (super-)solar for the CO-detected galaxies at z ≤ 1.5, motivating our choice of a Galactic conversion factor between CO luminosity and molecular gas mass for these galaxies. Using deep Chandra imaging of the HUDF, we determine an X-ray AGN fraction of 20% and 60% among the CO emitters at z ∼ 1.4 and z ∼ 2.6, respectively. Being a CO-flux-limited survey, ASPECS-LP detects molecular gas in galaxies on, above, and below the main sequence (MS) at z ∼ 1.4. For stellar masses ≥1010 (1010.5) ${M}_{\odot }$, we detect about 40% (50%) of all galaxies in the HUDF at 1 < z < 2 (2 < z < 3). The combination of ALMA and MUSE integral-field spectroscopy thus enables an unprecedented view of MS galaxies during the peak of galaxy formation.}, author = {Boogaard, Leindert A. and Decarli, Roberto and González-López, Jorge and van der Werf, Paul and Walter, Fabian and Bouwens, Rychard and Aravena, Manuel and Carilli, Chris and Bauer, Franz Erik and Brinchmann, Jarle and Contini, Thierry and Cox, Pierre and da Cunha, Elisabete and Daddi, Emanuele and Díaz-Santos, Tanio and Hodge, Jacqueline and Inami, Hanae and Ivison, Rob and Maseda, Michael and Matthee, Jorryt J and Oesch, Pascal and Popping, Gergö and Riechers, Dominik and Schaye, Joop and Schouws, Sander and Smail, Ian and Weiss, Axel and Wisotzki, Lutz and Bacon, Roland and Cortes, Paulo C. and Rix, Hans-Walter and Somerville, Rachel S. and Swinbank, Mark and Wagg, Jeff}, issn = {1538-4357}, journal = {The Astrophysical Journal}, keywords = {Space and Planetary Science, Astronomy and Astrophysics}, number = {2}, publisher = {IOP Publishing}, title = {{The ALMA spectroscopic survey in the HUDF: Nature and physical properties of gas-mass selected galaxies using MUSE spectroscopy}}, doi = {10.3847/1538-4357/ab3102}, volume = {882}, year = {2019}, } @article{11516, abstract = {The well-known quasar SDSS J095253.83+011421.9 (J0952+0114) at z = 3.02 has one of the most peculiar spectra discovered so far, showing the presence of narrow Lyα and broad metal emission lines. Although recent studies have suggested that a proximate damped Lyα absorption (PDLA) system causes this peculiar spectrum, the origin of the gas associated with the PDLA is unknown. Here we report the results of observations with the Multi Unit Spectroscopic Explorer (MUSE) that reveal a new giant (≈100 physical kpc) Lyα nebula. The detailed analysis of the Lyα velocity, velocity dispersion, and surface brightness profiles suggests that the J0952+0114 Lyα nebula shares similar properties with other QSO nebulae previously detected with MUSE, implying that the PDLA in J0952+0144 is covering only a small fraction of the solid angle of the QSO emission. We also detected bright and spectrally narrow C iv λ1550 and He ii λ1640 extended emission around J0952+0114 with velocity centroids similar to the peak of the extended and central narrow Lyα emission. The presence of a peculiarly bright, unresolved, and relatively broad He ii λ1640 emission in the central region at exactly the same PDLA redshift hints at the possibility that the PDLA originates in a clumpy outflow with a bulk velocity of about 500 km s−1. The smaller velocity dispersion of the large-scale Lyα emission suggests that the high-speed outflow is confined to the central region. Lastly, the derived spatially resolved He ii/Lyα and C iv/Lyα maps show a positive gradient with the distance to the QSO, hinting at a non-homogeneous distribution of the ionization parameter.}, author = {Marino, Raffaella Anna and Cantalupo, Sebastiano and Pezzulli, Gabriele and Lilly, Simon J. and Gallego, Sofia and Mackenzie, Ruari and Matthee, Jorryt J and Brinchmann, Jarle and Bouché, Nicolas and Feltre, Anna and Muzahid, Sowgat and Schroetter, Ilane and Johnson, Sean D. and Nanayakkara, Themiya}, issn = {1538-4357}, journal = {The Astrophysical Journal}, keywords = {Space and Planetary Science, Astronomy and Astrophysics}, number = {1}, publisher = {IOP Publishing}, title = {{A giant Lyα nebula and a small-scale clumpy outflow in the system of the exotic quasar J0952+0114 unveiled by MUSE}}, doi = {10.3847/1538-4357/ab2881}, volume = {880}, year = {2019}, } @article{11515, abstract = {We present new deep ALMA and Hubble Space Telescope (HST)/WFC3 observations of MASOSA and VR7, two luminous Lyα emitters (LAEs) at z = 6.5, for which the UV continuum levels differ by a factor of four. No IR dust continuum emission is detected in either, indicating little amounts of obscured star formation and/or high dust temperatures. MASOSA, with a UV luminosity M1500 = −20.9, compact size, and very high Lyα ${\mathrm{EW}}_{0}\approx 145\,\mathring{\rm A} $, is undetected in [C ii] to a limit of L[C ii] < 2.2 × 107 L⊙, implying a metallicity Z ≲ 0.07 Z⊙. Intriguingly, our HST data indicate a red UV slope β = −1.1 ± 0.7, at odds with the low dust content. VR7, which is a bright (M1500 = −22.4) galaxy with moderate color (β = −1.4 ± 0.3) and Lyα EW0 = 34 Å, is clearly detected in [C ii] emission (S/N = 15). VR7's rest-frame UV morphology can be described by two components separated by ≈1.5 kpc and is globally more compact than the [C ii] emission. The global [C ii]/UV ratio indicates Z ≈ 0.2 Z⊙, but there are large variations in the UV/[C ii] ratio on kiloparsec scales. We also identify diffuse, possibly outflowing, [C ii]-emitting gas at ≈100 km s−1 with respect to the peak. VR7 appears to be assembling its components at a slightly more evolved stage than other luminous LAEs, with outflows already shaping its direct environment at z ∼ 7. Our results further indicate that the global [C ii]−UV relation steepens at SFR < 30 M⊙ yr−1, naturally explaining why the [C ii]/UV ratio is anticorrelated with Lyα EW in many, but not all, observed LAEs.}, author = {Matthee, Jorryt J and Sobral, D. and Boogaard, L. A. and Röttgering, H. and Vallini, L. and Ferrara, A. and Paulino-Afonso, A. and Boone, F. and Schaerer, D. and Mobasher, B.}, issn = {1538-4357}, journal = {The Astrophysical Journal}, keywords = {Space and Planetary Science, Astronomy and Astrophysics}, number = {2}, publisher = {IOP Publishing}, title = {{Resolved UV and [C ii] structures of luminous galaxies within the epoch of reionization}}, doi = {10.3847/1538-4357/ab2f81}, volume = {881}, year = {2019}, } @article{11517, abstract = {To understand star formation in galaxies, we investigate the star formation rate (SFR) surface density (ΣSFR) profiles for galaxies, based on a well-defined sample of 976 star-forming MaNGA galaxies. We find that the typical ΣSFR profiles within 1.5Re of normal SF galaxies can be well described by an exponential function for different stellar mass intervals, while the sSFR profile shows positive gradients, especially for more massive SF galaxies. This is due to the more pronounced central cores or bulges rather than the onset of a `quenching' process. While galaxies that lie significantly above (or below) the star formation main sequence (SFMS) show overall an elevation (or suppression) of ΣSFR at all radii, this central elevation (or suppression) is more pronounced in more massive galaxies. The degree of central enhancement and suppression is quite symmetric, suggesting that both the elevation and suppression of star formation are following the same physical processes. Furthermore, we find that the dispersion in ΣSFR within and across the population is found to be tightly correlated with the inferred gas depletion time, whether based on the stellar surface mass density or the orbital dynamical time. This suggests that we are seeing the response of a simple gas-regulator system to variations in the accretion rate. This is explored using a heuristic model that can quantitatively explain the dependence of σ(ΣSFR) on gas depletion timescale. Variations in accretion rate are progressively more damped out in regions of low star-formation efficiency leading to a reduced amplitude of variations in star-formation.}, author = {Wang, Enci and Lilly, Simon J. and Pezzulli, Gabriele and Matthee, Jorryt J}, issn = {1538-4357}, journal = {The Astrophysical Journal}, keywords = {Space and Planetary Science, Astronomy and Astrophysics}, number = {2}, publisher = {IOP Publishing}, title = {{On the elevation and suppression of star formation within galaxies}}, doi = {10.3847/1538-4357/ab1c5b}, volume = {877}, year = {2019}, } @article{11535, abstract = {We investigate the clustering and halo properties of ∼5000 Ly α-selected emission-line galaxies (LAEs) from the Slicing COSMOS 4K (SC4K) and from archival NB497 imaging of SA22 split in 15 discrete redshift slices between z ∼ 2.5 and 6. We measure clustering lengths of r0 ∼ 3–6 h−1 Mpc and typical halo masses of ∼1011 M⊙ for our narrowband-selected LAEs with typical LLy α ∼ 1042–43 erg s−1. The intermediate-band-selected LAEs are observed to have r0 ∼ 3.5–15 h−1 Mpc with typical halo masses of ∼1011–12 M⊙ and typical LLy α ∼ 1043–43.6 erg s−1. We find a strong, redshift-independent correlation between halo mass and Ly α luminosity normalized by the characteristic Ly α luminosity, L⋆(z). The faintest LAEs (L ∼ 0.1 L⋆(z)) typically identified by deep narrowband surveys are found in 1010 M⊙ haloes and the brightest LAEs (L ∼ 7 L⋆(z)) are found in ∼5 × 1012 M⊙ haloes. A dependency on the rest-frame 1500 Å UV luminosity, MUV, is also observed where the halo masses increase from 1011 to 1013 M⊙ for MUV ∼ −19 to −23.5 mag. Halo mass is also observed to increase from 109.8 to 1012 M⊙ for dust-corrected UV star formation rates from ∼0.6 to 10 M⊙ yr−1 and continues to increase up to 1013 M⊙ in halo mass, where the majority of those sources are active galactic nuclei. All the trends we observe are found to be redshift independent. Our results reveal that LAEs are the likely progenitors of a wide range of galaxies depending on their luminosity, from dwarf-like, to Milky Way-type, to bright cluster galaxies. LAEs therefore provide unique insight into the early formation and evolution of the galaxies we observe in the local Universe.}, author = {Khostovan, A A and Sobral, D and Mobasher, B and Matthee, Jorryt J and Cochrane, R K and Chartab, N and Jafariyazani, M and Paulino-Afonso, A and Santos, S and Calhau, J}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: haloes, galaxies: high-redshift, galaxies: star formation, cosmology: observations, large-scale structure of Universe}, number = {1}, pages = {555--573}, publisher = {Oxford University Press}, title = {{The clustering of typical Ly α emitters from z ∼ 2.5–6: Host halo masses depend on Ly α and UV luminosities}}, doi = {10.1093/mnras/stz2149}, volume = {489}, year = {2019}, } @article{11541, abstract = {We present new Hubble Space Telescope (HST)/WFC3 observations and re-analyse VLT data to unveil the continuum, variability, and rest-frame ultraviolet (UV) lines of the multiple UV clumps of the most luminous Lyα emitter at z = 6.6, CR7 (COSMOS Redshift 7). Our re-reduced, flux-calibrated X-SHOOTER spectra of CR7 reveal an He II emission line in observations obtained along the major axis of Lyα emission with the best seeing conditions. He II is spatially offset by ≈+0.8 arcsec from the peak of Lyα emission, and it is found towards clump B. Our WFC3 grism spectra detects the UV continuum of CR7’s clump A, yielding a power law with β=−2.5+0.6−0.7 and MUV=−21.87+0.25−0.20⁠. No significant variability is found for any of the UV clumps on their own, but there is tentative (≈2.2 σ) brightening of CR7 in F110W as a whole from 2012 to 2017. HST grism data fail to robustly detect rest-frame UV lines in any of the clumps, implying fluxes ≲2×10−17 erg s−1 cm−2 (3σ). We perform CLOUDY modelling to constrain the metallicity and the ionizing nature of CR7. CR7 seems to be actively forming stars without any clear active galactic nucleus activity in clump A, consistent with a metallicity of ∼0.05–0.2 Z⊙. Component C or an interclump component between B and C may host a high ionization source. Our results highlight the need for spatially resolved information to study the formation and assembly of early galaxies.}, author = {Sobral, David and Matthee, Jorryt J and Brammer, Gabriel and Ferrara, Andrea and Alegre, Lara and Röttgering, Huub and Schaerer, Daniel and Mobasher, Bahram and Darvish, Behnam}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, galaxies: ISM, cosmology: observations, dark ages, reionization, first stars, early Universe}, number = {2}, pages = {2422--2441}, publisher = {Oxford University Press}, title = {{On the nature and physical conditions of the luminous Ly α emitter CR7 and its rest-frame UV components}}, doi = {10.1093/mnras/sty2779}, volume = {482}, year = {2019}, } @article{11540, abstract = {Observations have revealed that the star formation rate (SFR) and stellar mass (Mstar) of star-forming galaxies follow a tight relation known as the galaxy main sequence. However, what physical information is encoded in this relation is under debate. Here, we use the EAGLE cosmological hydrodynamical simulation to study the mass dependence, evolution, and origin of scatter in the SFR–Mstar relation. At z = 0, we find that the scatter decreases slightly with stellar mass from 0.35 dex at Mstar ≈ 109 M⊙ to 0.30 dex at Mstar ≳ 1010.5 M⊙. The scatter decreases from z = 0 to z = 5 by 0.05 dex at Mstar ≳ 1010 M⊙ and by 0.15 dex for lower masses. We show that the scatter at z = 0.1 originates from a combination of fluctuations on short time-scales (ranging from 0.2–2 Gyr) that are presumably associated with self-regulation from cooling, star formation, and outflows, but is dominated by long time-scale (∼10 Gyr) variations related to differences in halo formation times. Shorter time-scale fluctuations are relatively more important for lower mass galaxies. At high masses, differences in black hole formation efficiency cause additional scatter, but also diminish the scatter caused by different halo formation times. While individual galaxies cross the main sequence multiple times during their evolution, they fluctuate around tracks associated with their halo properties, i.e. galaxies above/below the main sequence at z = 0.1 tend to have been above/below the main sequence for ≫1 Gyr.}, author = {Matthee, Jorryt J and Schaye, Joop}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics : galaxies: evolution, galaxies: formation, galaxies: star formation, cosmology: theory}, number = {1}, pages = {915--932}, publisher = {Oxford University Press}, title = {{The origin of scatter in the star formation rate–stellar mass relation}}, doi = {10.1093/mnras/stz030}, volume = {484}, year = {2019}, } @article{11616, abstract = {We present the discovery of HD 221416 b, the first transiting planet identified by the Transiting Exoplanet Survey Satellite (TESS) for which asteroseismology of the host star is possible. HD 221416 b (HIP 116158, TOI-197) is a bright (V = 8.2 mag), spectroscopically classified subgiant that oscillates with an average frequency of about 430 μHz and displays a clear signature of mixed modes. The oscillation amplitude confirms that the redder TESS bandpass compared to Kepler has a small effect on the oscillations, supporting the expected yield of thousands of solar-like oscillators with TESS 2 minute cadence observations. Asteroseismic modeling yields a robust determination of the host star radius (R⋆ = 2.943 ± 0.064 R⊙), mass (M⋆ = 1.212 ± 0.074 M⊙), and age (4.9 ± 1.1 Gyr), and demonstrates that it has just started ascending the red-giant branch. Combining asteroseismology with transit modeling and radial-velocity observations, we show that the planet is a "hot Saturn" (Rp = 9.17 ± 0.33 R⊕) with an orbital period of ∼14.3 days, irradiance of F = 343 ± 24 F⊕, and moderate mass (Mp = 60.5 ± 5.7 M⊕) and density (ρp = 0.431 ± 0.062 g cm−3). The properties of HD 221416 b show that the host-star metallicity–planet mass correlation found in sub-Saturns (4–8 R⊕) does not extend to larger radii, indicating that planets in the transition between sub-Saturns and Jupiters follow a relatively narrow range of densities. With a density measured to ∼15%, HD 221416 b is one of the best characterized Saturn-size planets to date, augmenting the small number of known transiting planets around evolved stars and demonstrating the power of TESS to characterize exoplanets and their host stars using asteroseismology.}, author = {Huber, Daniel and Chaplin, William J. and Chontos, Ashley and Kjeldsen, Hans and Christensen-Dalsgaard, Jørgen and Bedding, Timothy R. and Ball, Warrick and Brahm, Rafael and Espinoza, Nestor and Henning, Thomas and Jordán, Andrés and Sarkis, Paula and Knudstrup, Emil and Albrecht, Simon and Grundahl, Frank and Andersen, Mads Fredslund and Pallé, Pere L. and Crossfield, Ian and Fulton, Benjamin and Howard, Andrew W. and Isaacson, Howard T. and Weiss, Lauren M. and Handberg, Rasmus and Lund, Mikkel N. and Serenelli, Aldo M. and Rørsted Mosumgaard, Jakob and Stokholm, Amalie and Bieryla, Allyson and Buchhave, Lars A. and Latham, David W. and Quinn, Samuel N. and Gaidos, Eric and Hirano, Teruyuki and Ricker, George R. and Vanderspek, Roland K. and Seager, Sara and Jenkins, Jon M. and Winn, Joshua N. and Antia, H. M. and Appourchaux, Thierry and Basu, Sarbani and Bell, Keaton J. and Benomar, Othman and Bonanno, Alfio and Buzasi, Derek L. and Campante, Tiago L. and Çelik Orhan, Z. and Corsaro, Enrico and Cunha, Margarida S. and Davies, Guy R. and Deheuvels, Sebastien and Grunblatt, Samuel K. and Hasanzadeh, Amir and Di Mauro, Maria Pia and A. García, Rafael and Gaulme, Patrick and Girardi, Léo and Guzik, Joyce A. and Hon, Marc and Jiang, Chen and Kallinger, Thomas and Kawaler, Steven D. and Kuszlewicz, James S. and Lebreton, Yveline and Li, Tanda and Lucas, Miles and Lundkvist, Mia S. and Mann, Andrew W. and Mathis, Stéphane and Mathur, Savita and Mazumdar, Anwesh and Metcalfe, Travis S. and Miglio, Andrea and F. G. Monteiro, Mário J. P. and Mosser, Benoit and Noll, Anthony and Nsamba, Benard and Joel Ong, Jia Mian and Örtel, S. and Pereira, Filipe and Ranadive, Pritesh and Régulo, Clara and Rodrigues, Thaíse S. and Roxburgh, Ian W. and Aguirre, Victor Silva and Smalley, Barry and Schofield, Mathew and Sousa, Sérgio G. and Stassun, Keivan G. and Stello, Dennis and Tayar, Jamie and White, Timothy R. and Verma, Kuldeep and Vrard, Mathieu and Yıldız, M. and Baker, David and Bazot, Michaël and Beichmann, Charles and Bergmann, Christoph and Bugnet, Lisa Annabelle and Cale, Bryson and Carlino, Roberto and Cartwright, Scott M. and Christiansen, Jessie L. and Ciardi, David R. and Creevey, Orlagh and Dittmann, Jason A. and Nascimento, Jose-Dias Do and Eylen, Vincent Van and Fürész, Gabor and Gagné, Jonathan and Gao, Peter and Gazeas, Kosmas and Giddens, Frank and Hall, Oliver J. and Hekker, Saskia and Ireland, Michael J. and Latouf, Natasha and LeBrun, Danny and Levine, Alan M. and Matzko, William and Natinsky, Eva and Page, Emma and Plavchan, Peter and Mansouri-Samani, Masoud and McCauliff, Sean and Mullally, Susan E. and Orenstein, Brendan and Soto, Aylin Garcia and Paegert, Martin and van Saders, Jennifer L. and Schnaible, Chloe and Soderblom, David R. and Szabó, Róbert and Tanner, Angelle and Tinney, C. G. and Teske, Johanna and Thomas, Alexandra and Trampedach, Regner and Wright, Duncan and Yuan, Thomas T. and Zohrabi, Farzaneh}, issn = {0004-6256}, journal = {The Astronomical Journal}, keywords = {Space and Planetary Science, Astronomy and Astrophysics}, number = {6}, publisher = {IOP Publishing}, title = {{A hot Saturn orbiting an oscillating late subgiant discovered by TESS}}, doi = {10.3847/1538-3881/ab1488}, volume = {157}, year = {2019}, } @article{11613, abstract = {Over 2,000 stars were observed for 1 month with a high enough cadence in order to look for acoustic modes during the survey phase of the Kepler mission. Solar-like oscillations have been detected in about 540 stars. The question of why no oscillations were detected in the remaining stars is still open. Previous works explained the non-detection of modes with the high level of magnetic activity of the stars. However, the sample of stars studied contained some classical pulsators and red giants that could have biased the results. In this work, we revisit this analysis on a cleaner sample of main-sequence solar-like stars that consists of 1,014 stars. First we compute the predicted amplitude of the modes of that sample and for the stars with detected oscillation and compare it to the noise at high frequency in the power spectrum. We find that the stars with detected modes have an amplitude to noise ratio larger than 0.94. We measure reliable rotation periods and the associated photometric magnetic index for 684 stars out of the full sample and in particular for 323 stars where the amplitude of the modes is predicted to be high enough to be detected. We find that among these 323 stars 32% of them have a level of magnetic activity larger than the Sun during its maximum activity, explaining the non-detection of acoustic modes. Interestingly, magnetic activity cannot be the primary reason responsible for the absence of detectable modes in the remaining 68% of the stars without acoustic modes detected and with reliable rotation periods. Thus, we investigate metallicity, inclination angle of the rotation axis, and binarity as possible causes of low mode amplitudes. Using spectroscopic observations for a subsample, we find that a low metallicity could be the reason for suppressed modes. No clear correlation with binarity nor inclination is found. We also derive the lower limit for our photometric activity index (of 20–30 ppm) below which rotation and magnetic activity are not detected. Finally, with our analysis we conclude that stars with a photometric activity index larger than 2,000 ppm have 98.3% probability of not having oscillations detected.}, author = {Mathur, Savita and García, Rafael A. and Bugnet, Lisa Annabelle and Santos, Ângela R.G. and Santiago, Netsha and Beck, Paul G.}, issn = {2296-987X}, journal = {Frontiers in Astronomy and Space Sciences}, keywords = {Astronomy and Astrophysics}, publisher = {Frontiers Media}, title = {{Revisiting the impact of stellar magnetic activity on the detectability of solar-like oscillations by Kepler}}, doi = {10.3389/fspas.2019.00046}, volume = {6}, year = {2019}, } @article{11615, abstract = {The recently published Kepler mission Data Release 25 (DR25) reported on ∼197 000 targets observed during the mission. Despite this, no wide search for red giants showing solar-like oscillations have been made across all stars observed in Kepler’s long-cadence mode. In this work, we perform this task using custom apertures on the Kepler pixel files and detect oscillations in 21 914 stars, representing the largest sample of solar-like oscillating stars to date. We measure their frequency at maximum power, νmax, down to νmax≃4μHz and obtain log (g) estimates with a typical uncertainty below 0.05 dex, which is superior to typical measurements from spectroscopy. Additionally, the νmax distribution of our detections show good agreement with results from a simulated model of the Milky Way, with a ratio of observed to predicted stars of 0.992 for stars with 10<νmax<270μHz. Among our red giant detections, we find 909 to be dwarf/subgiant stars whose flux signal is polluted by a neighbouring giant as a result of using larger photometric apertures than those used by the NASA Kepler science processing pipeline. We further find that only 293 of the polluting giants are known Kepler targets. The remainder comprises over 600 newly identified oscillating red giants, with many expected to belong to the Galactic halo, serendipitously falling within the Kepler pixel files of targeted stars.}, author = {Hon, Marc and Stello, Dennis and García, Rafael A and Mathur, Savita and Sharma, Sanjib and Colman, Isabel L and Bugnet, Lisa Annabelle}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, asteroseismology, methods: data analysis, techniques: image processing, stars: oscillations, stars: statistics}, number = {4}, pages = {5616--5630}, publisher = {Oxford University Press}, title = {{A search for red giant solar-like oscillations in all Kepler data}}, doi = {10.1093/mnras/stz622}, volume = {485}, year = {2019}, } @article{11614, abstract = {The NASA Transiting Exoplanet Survey Satellite (TESS) is about to provide full-frame images of almost the entire sky. The amount of stellar data to be analysed represents hundreds of millions stars, which is several orders of magnitude more than the number of stars observed by the Convection, Rotation and planetary Transits satellite (CoRoT), and NASA Kepler and K2 missions. We aim at automatically classifying the newly observed stars with near real-time algorithms to better guide the subsequent detailed studies. In this paper, we present a classification algorithm built to recognise solar-like pulsators among classical pulsators. This algorithm relies on the global amount of power contained in the power spectral density (PSD), also known as the flicker in spectral power density (FliPer). Because each type of pulsating star has a characteristic background or pulsation pattern, the shape of the PSD at different frequencies can be used to characterise the type of pulsating star. The FliPer classifier (FliPerClass) uses different FliPer parameters along with the effective temperature as input parameters to feed a ML algorithm in order to automatically classify the pulsating stars observed by TESS. Using noisy TESS-simulated data from the TESS Asteroseismic Science Consortium (TASC), we classify pulsators with a 98% accuracy. Among them, solar-like pulsating stars are recognised with a 99% accuracy, which is of great interest for a further seismic analysis of these stars, which are like our Sun. Similar results are obtained when we trained our classifier and applied it to 27-day subsets of real Kepler data. FliPerClass is part of the large TASC classification pipeline developed by the TESS Data for Asteroseismology (T’DA) classification working group.}, author = {Bugnet, Lisa Annabelle and García, R. A. and Mathur, S. and Davies, G. R. and Hall, O. J. and Lund, M. N. and Rendle, B. M.}, issn = {1432-0746}, journal = {Astronomy & Astrophysics}, keywords = {Space and Planetary Science, Astronomy and Astrophysics}, publisher = {EDP Science}, title = {{FliPerClass: In search of solar-like pulsators among TESS targets}}, doi = {10.1051/0004-6361/201834780}, volume = {624}, year = {2019}, } @article{11623, abstract = {Brightness variations due to dark spots on the stellar surface encode information about stellar surface rotation and magnetic activity. In this work, we analyze the Kepler long-cadence data of 26,521 main-sequence stars of spectral types M and K in order to measure their surface rotation and photometric activity level. Rotation-period estimates are obtained by the combination of a wavelet analysis and autocorrelation function of the light curves. Reliable rotation estimates are determined by comparing the results from the different rotation diagnostics and four data sets. We also measure the photometric activity proxy Sph using the amplitude of the flux variations on an appropriate timescale. We report rotation periods and photometric activity proxies for about 60% of the sample, including 4431 targets for which McQuillan et al. did not report a rotation period. For the common targets with rotation estimates in this study and in McQuillan et al., our rotation periods agree within 99%. In this work, we also identify potential polluters, such as misclassified red giants and classical pulsator candidates. Within the parameter range we study, there is a mild tendency for hotter stars to have shorter rotation periods. The photometric activity proxy spans a wider range of values with increasing effective temperature. The rotation period and photometric activity proxy are also related, with Sph being larger for fast rotators. Similar to McQuillan et al., we find a bimodal distribution of rotation periods.}, author = {Santos, A. R. G. and García, R. A. and Mathur, S. and Bugnet, Lisa Annabelle and van Saders, J. L. and Metcalfe, T. S. and Simonian, G. V. A. and Pinsonneault, M. H.}, issn = {0067-0049}, journal = {The Astrophysical Journal Supplement Series}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, methods: data analysis, stars: activity, stars: low-mass, stars: rotation, starspots, techniques: photometric}, number = {1}, publisher = {IOP Publishing}, title = {{Surface rotation and photometric activity for Kepler targets. I. M and K main-sequence stars}}, doi = {10.3847/1538-4365/ab3b56}, volume = {244}, year = {2019}, } @unpublished{11627, abstract = {For a solar-like star, the surface rotation evolves with time, allowing in principle to estimate the age of a star from its surface rotation period. Here we are interested in measuring surface rotation periods of solar-like stars observed by the NASA mission Kepler. Different methods have been developed to track rotation signals in Kepler photometric light curves: time-frequency analysis based on wavelet techniques, autocorrelation and composite spectrum. We use the learning abilities of random forest classifiers to take decisions during two crucial steps of the analysis. First, given some input parameters, we discriminate the considered Kepler targets between rotating MS stars, non-rotating MS stars, red giants, binaries and pulsators. We then use a second classifier only on the MS rotating targets to decide the best data analysis treatment.}, author = {Breton, S. N. and Bugnet, Lisa Annabelle and Santos, A. R. G. and Saux, A. Le and Mathur, S. and Palle, P. L. and Garcia, R. A.}, booktitle = {arXiv}, keywords = {asteroseismology, rotation, solar-like stars, kepler, machine learning, random forest}, title = {{Determining surface rotation periods of solar-like stars observed by the Kepler mission using machine learning techniques}}, doi = {10.48550/arXiv.1906.09609}, year = {2019}, } @unpublished{11630, abstract = {The second mission of NASA’s Kepler satellite, K2, has collected hundreds of thousands of lightcurves for stars close to the ecliptic plane. This new sample could increase the number of known pulsating stars and then improve our understanding of those stars. For the moment only a few stars have been properly classified and published. In this work, we present a method to automaticly classify K2 pulsating stars using a Machine Learning technique called Random Forest. The objective is to sort out the stars in four classes: red giant (RG), main-sequence Solar-like stars (SL), classical pulsators (PULS) and Other. To do this we use the effective temperatures and the luminosities of the stars as well as the FliPer features, that measures the amount of power contained in the power spectral density. The classifier now retrieves the right classification for more than 80% of the stars.}, author = {Saux, A. Le and Bugnet, Lisa Annabelle and Mathur, S. and Breton, S. N. and Garcia, R. A.}, booktitle = {arXiv}, keywords = {asteroseismology - methods, data analysis - thecniques, machine learning - stars, oscillations}, title = {{Automatic classification of K2 pulsating stars using machine learning techniques}}, doi = {10.48550/arXiv.1906.09611}, year = {2019}, } @inproceedings{11826, abstract = {The diameter, radius and eccentricities are natural graph parameters. While these problems have been studied extensively, there are no known dynamic algorithms for them beyond the ones that follow from trivial recomputation after each update or from solving dynamic All-Pairs Shortest Paths (APSP), which is very computationally intensive. This is the situation for dynamic approximation algorithms as well, and even if only edge insertions or edge deletions need to be supported. This paper provides a comprehensive study of the dynamic approximation of Diameter, Radius and Eccentricities, providing both conditional lower bounds, and new algorithms whose bounds are optimal under popular hypotheses in fine-grained complexity. Some of the highlights include: - Under popular hardness hypotheses, there can be no significantly better fully dynamic approximation algorithms than recomputing the answer after each update, or maintaining full APSP. - Nearly optimal partially dynamic (incremental/decremental) algorithms can be achieved via efficient reductions to (incremental/decremental) maintenance of Single-Source Shortest Paths. For instance, a nearly (3/2+epsilon)-approximation to Diameter in directed or undirected n-vertex, m-edge graphs can be maintained decrementally in total time m^{1+o(1)}sqrt{n}/epsilon^2. This nearly matches the static 3/2-approximation algorithm for the problem that is known to be conditionally optimal.}, author = {Ancona, Bertie and Henzinger, Monika H and Roditty, Liam and Williams, Virginia Vassilevska and Wein, Nicole}, booktitle = {46th International Colloquium on Automata, Languages, and Programming}, isbn = {978-3-95977-109-2}, issn = {1868-8969}, location = {Patras, Greece}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Algorithms and hardness for diameter in dynamic graphs}}, doi = {10.4230/LIPICS.ICALP.2019.13}, volume = {132}, year = {2019}, } @inproceedings{11850, abstract = {Modern networked systems are increasingly reconfigurable, enabling demand-aware infrastructures whose resources can be adjusted according to the workload they currently serve. Such dynamic adjustments can be exploited to improve network utilization and hence performance, by moving frequently interacting communication partners closer, e.g., collocating them in the same server or datacenter. However, dynamically changing the embedding of workloads is algorithmically challenging: communication patterns are often not known ahead of time, but must be learned. During the learning process, overheads related to unnecessary moves (i.e., re-embeddings) should be minimized. This paper studies a fundamental model which captures the tradeoff between the benefits and costs of dynamically collocating communication partners on l servers, in an online manner. Our main contribution is a distributed online algorithm which is asymptotically almost optimal, i.e., almost matches the lower bound (also derived in this paper) on the competitive ratio of any (distributed or centralized) online algorithm.}, author = {Henzinger, Monika H and Neumann, Stefan and Schmid, Stefan}, booktitle = {SIGMETRICS'19: International Conference on Measurement and Modeling of Computer Systems}, isbn = {978-1-4503-6678-6}, location = {Phoenix, AZ, United States}, pages = {43–44}, publisher = {Association for Computing Machinery}, title = {{Efficient distributed workload (re-)embedding}}, doi = {10.1145/3309697.3331503}, year = {2019}, } @inbook{11847, abstract = {This paper serves as a user guide to the Vienna graph clustering framework. We review our general memetic algorithm, VieClus, to tackle the graph clustering problem. A key component of our contribution are natural recombine operators that employ ensemble clusterings as well as multi-level techniques. Lastly, we combine these techniques with a scalable communication protocol, producing a system that is able to compute high-quality solutions in a short amount of time. After giving a description of the algorithms employed, we establish the connection of the graph clustering problem to protein–protein interaction networks and moreover give a description on how the software can be used, what file formats are expected, and how this can be used to find functional groups in protein–protein interaction networks.}, author = {Biedermann, Sonja and Henzinger, Monika H and Schulz, Christian and Schuster, Bernhard}, booktitle = {Protein-Protein Interaction Networks}, editor = {Canzar, Stefan and Rojas Ringeling, Francisca}, isbn = {9781493998722}, issn = {1940-6029}, pages = {215–231}, publisher = {Springer Nature}, title = {{Vienna Graph Clustering}}, doi = {10.1007/978-1-4939-9873-9_16}, volume = {2074}, year = {2019}, } @inproceedings{11853, abstract = {We present a deterministic dynamic algorithm for maintaining a (1+ε)f-approximate minimum cost set cover with O(f log(Cn)/ε^2) amortized update time, when the input set system is undergoing element insertions and deletions. Here, n denotes the number of elements, each element appears in at most f sets, and the cost of each set lies in the range [1/C, 1]. Our result, together with that of Gupta~et~al.~[STOC'17], implies that there is a deterministic algorithm for this problem with O(f log(Cn)) amortized update time and O(min(log n, f)) -approximation ratio, which nearly matches the polynomial-time hardness of approximation for minimum set cover in the static setting. Our update time is only O(log (Cn)) away from a trivial lower bound. Prior to our work, the previous best approximation ratio guaranteed by deterministic algorithms was O(f^2), which was due to Bhattacharya~et~al.~[ICALP`15]. In contrast, the only result that guaranteed O(f) -approximation was obtained very recently by Abboud~et~al.~[STOC`19], who designed a dynamic algorithm with (1+ε)f-approximation ratio and O(f^2 log n/ε) amortized update time. Besides the extra O(f) factor in the update time compared to our and Gupta~et~al.'s results, the Abboud~et~al.~algorithm is randomized, and works only when the adversary is oblivious and the sets are unweighted (each set has the same cost). We achieve our result via the primal-dual approach, by maintaining a fractional packing solution as a dual certificate. This approach was pursued previously by Bhattacharya~et~al.~and Gupta~et~al., but not in the recent paper by Abboud~et~al. Unlike previous primal-dual algorithms that try to satisfy some local constraints for individual sets at all time, our algorithm basically waits until the dual solution changes significantly globally, and fixes the solution only where the fix is needed.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon}, booktitle = {60th Annual Symposium on Foundations of Computer Science}, isbn = {978-1-7281-4953-0}, issn = {2575-8454}, location = {Baltimore, MD, United States}, pages = {406--423}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{A new deterministic algorithm for dynamic set cover}}, doi = {10.1109/focs.2019.00033}, year = {2019}, } @inproceedings{11851, abstract = {The minimum cut problem for an undirected edge-weighted graph asks us to divide its set of nodes into two blocks while minimizing the weighted sum of the cut edges. In this paper, we engineer the fastest known exact algorithm for the problem. State-of-the-art algorithms like the algorithm of Padberg and Rinaldi or the algorithm of Nagamochi, Ono and Ibaraki identify edges that can be contracted to reduce the graph size such that at least one minimum cut is maintained in the contracted graph. Our algorithm achieves improvements in running time over these algorithms by a multitude of techniques. First, we use a recently developed fast and parallel inexact minimum cut algorithm to obtain a better bound for the problem. Afterwards, we use reductions that depend on this bound to reduce the size of the graph much faster than previously possible. We use improved data structures to further lower the running time of our algorithm. Additionally, we parallelize the contraction routines of Nagamochi et al. . Overall, we arrive at a system that significantly outperforms the fastest state-of-the-art solvers for the exact minimum cut problem.}, author = {Henzinger, Monika H and Noe, Alexander and Schulz, Christian}, booktitle = {33rd International Parallel and Distributed Processing Symposium}, isbn = {978-1-7281-1247-3}, issn = {1530-2075}, location = {Rio de Janeiro, Brazil}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Shared-memory exact minimum cuts}}, doi = {10.1109/ipdps.2019.00013}, year = {2019}, } @inproceedings{11865, abstract = {We present the first sublinear-time algorithm that can compute the edge connectivity λ of a network exactly on distributed message-passing networks (the CONGEST model), as long as the network contains no multi-edge. We present the first sublinear-time algorithm for a distributed message-passing network sto compute its edge connectivity λ exactly in the CONGEST model, as long as there are no parallel edges. Our algorithm takes Õ(n1−1/353D1/353+n1−1/706) time to compute λ and a cut of cardinality λ with high probability, where n and D are the number of nodes and the diameter of the network, respectively, and Õ hides polylogarithmic factors. This running time is sublinear in n (i.e. Õ(n1−є)) whenever D is. Previous sublinear-time distributed algorithms can solve this problem either (i) exactly only when λ=O(n1/8−є) [Thurimella PODC’95; Pritchard, Thurimella, ACM Trans. Algorithms’11; Nanongkai, Su, DISC’14] or (ii) approximately [Ghaffari, Kuhn, DISC’13; Nanongkai, Su, DISC’14]. To achieve this we develop and combine several new techniques. First, we design the first distributed algorithm that can compute a k-edge connectivity certificate for any k=O(n1−є) in time Õ(√nk+D). The previous sublinear-time algorithm can do so only when k=o(√n) [Thurimella PODC’95]. In fact, our algorithm can be turned into the first parallel algorithm with polylogarithmic depth and near-linear work. Previous near-linear work algorithms are essentially sequential and previous polylogarithmic-depth algorithms require Ω(mk) work in the worst case (e.g. [Karger, Motwani, STOC’93]). Second, we show that by combining the recent distributed expander decomposition technique of [Chang, Pettie, Zhang, SODA’19] with techniques from the sequential deterministic edge connectivity algorithm of [Kawarabayashi, Thorup, STOC’15], we can decompose the network into a sublinear number of clusters with small average diameter and without any mincut separating a cluster (except the “trivial” ones). This leads to a simplification of the Kawarabayashi-Thorup framework (except that we are randomized while they are deterministic). This might make this framework more useful in other models of computation. Finally, by extending the tree packing technique from [Karger STOC’96], we can find the minimum cut in time proportional to the number of components. As a byproduct of this technique, we obtain an Õ(n)-time algorithm for computing exact minimum cut for weighted graphs.}, author = {Daga, Mohit and Henzinger, Monika H and Nanongkai, Danupon and Saranurak, Thatchaphol}, booktitle = {Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing}, isbn = {978-1-4503-6705-9}, issn = {0737-8017}, location = {Phoenix, AZ, United States}, pages = {343–354}, publisher = {Association for Computing Machinery}, title = {{Distributed edge connectivity in sublinear time}}, doi = {10.1145/3313276.3316346}, year = {2019}, } @inproceedings{11871, abstract = {Many dynamic graph algorithms have an amortized update time, rather than a stronger worst-case guarantee. But amortized data structures are not suitable for real-time systems, where each individual operation has to be executed quickly. For this reason, there exist many recent randomized results that aim to provide a guarantee stronger than amortized expected. The strongest possible guarantee for a randomized algorithm is that it is always correct (Las Vegas), and has high-probability worst-case update time, which gives a bound on the time for each individual operation that holds with high probability. In this paper we present the first polylogarithmic high-probability worst-case time bounds for the dynamic spanner and the dynamic maximal matching problem. 1. For dynamic spanner, the only known o(n) worst-case bounds were O(n3/4) high-probability worst-case update time for maintaining a 3-spanner, and O(n5/9) for maintaining a 5-spanner. We give a O(1)k log3(n) high-probability worst-case time bound for maintaining a (2k – 1)-spanner, which yields the first worst-case polylog update time for all constant k. (All the results above maintain the optimal tradeoff of stretch 2k – 1 and Õ(n1+1/k) edges.) 2. For dynamic maximal matching, or dynamic 2-approximate maximum matching, no algorithm with o(n) worst-case time bound was known and we present an algorithm with O(log5 (n)) high-probability worst-case time; similar worst-case bounds existed only for maintaining a matching that was (2 + ∊)-approximate, and hence not maximal. Our results are achieved using a new approach for converting amortized guarantees to worst-case ones for randomized data structures by going through a third type of guarantee, which is a middle ground between the two above: an algorithm is said to have worst-case expected update time α if for every update σ, the expected time to process σ is at most α. Although stronger than amortized expected, the worst-case expected guarantee does not resolve the fundamental problem of amortization: a worst-case expected update time of O(1) still allows for the possibility that every 1/f(n) updates requires Θ(f(n)) time to process, for arbitrarily high f(n). In this paper we present a black-box reduction that converts any data structure with worst-case expected update time into one with a high-probability worst-case update time: the query time remains the same, while the update time increases by a factor of O(log2(n)). Thus we achieve our results in two steps: (1) First we show how to convert existing dynamic graph algorithms with amortized expected polylogarithmic running times into algorithms with worst-case expected polylogarithmic running times. (2) Then we use our black-box reduction to achieve the polylogarithmic high-probability worst-case time bound. All our algorithms are Las-Vegas-type algorithms.}, author = {Bernstein, Aaron and Forster, Sebastian and Henzinger, Monika H}, booktitle = {30th Annual ACM-SIAM Symposium on Discrete Algorithms}, location = {San Diego, CA, United States}, pages = {1899--1918}, publisher = {Society for Industrial and Applied Mathematics}, title = {{A deamortization approach for dynamic spanner and dynamic maximal matching}}, doi = {10.1137/1.9781611975482.115}, year = {2019}, } @article{11898, abstract = {We build upon the recent papers by Weinstein and Yu (FOCS'16), Larsen (FOCS'12), and Clifford et al. (FOCS'15) to present a general framework that gives amortized lower bounds on the update and query times of dynamic data structures. Using our framework, we present two concrete results. (1) For the dynamic polynomial evaluation problem, where the polynomial is defined over a finite field of size n1+Ω(1) and has degree n, any dynamic data structure must either have an amortized update time of Ω((lgn/lglgn)2) or an amortized query time of Ω((lgn/lglgn)2). (2) For the dynamic online matrix vector multiplication problem, where we get an n×n matrix whose entires are drawn from a finite field of size nΘ(1), any dynamic data structure must either have an amortized update time of Ω((lgn/lglgn)2) or an amortized query time of Ω(n⋅(lgn/lglgn)2). For these two problems, the previous works by Larsen (FOCS'12) and Clifford et al. (FOCS'15) gave the same lower bounds, but only for worst case update and query times. Our bounds match the highest unconditional lower bounds known till date for any dynamic problem in the cell-probe model.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Neumann, Stefan}, issn = {0304-3975}, journal = {Theoretical Computer Science}, pages = {72--87}, publisher = {Elsevier}, title = {{New amortized cell-probe lower bounds for dynamic problems}}, doi = {10.1016/j.tcs.2019.01.043}, volume = {779}, year = {2019}, } @article{11957, abstract = {Cross-coupling reactions mediated by dual nickel/photocatalysis are synthetically attractive but rely mainly on expensive, non-recyclable noble-metal complexes as photocatalysts. Heterogeneous semiconductors, which are commonly used for artificial photosynthesis and wastewater treatment, are a sustainable alternative. Graphitic carbon nitrides, a class of metal-free polymers that can be easily prepared from bulk chemicals, are heterogeneous semiconductors with high potential for photocatalytic organic transformations. Here, we demonstrate that graphitic carbon nitrides in combination with nickel catalysis can induce selective C−O cross-couplings of carboxylic acids with aryl halides, yielding the respective aryl esters in excellent yield and selectivity. The heterogeneous organic photocatalyst exhibits a broad substrate scope, is able to harvest green light, and can be recycled multiple times. In situ FTIR was used to track the reaction progress to study this transformation at different irradiation wavelengths and reaction scales.}, author = {Pieber, Bartholomäus and Malik, Jamal A. and Cavedon, Cristian and Gisbertz, Sebastian and Savateev, Aleksandr and Cruz, Daniel and Heil, Tobias and Zhang, Guigang and Seeberger, Peter H.}, issn = {1521-3773}, journal = {Angewandte Chemie International Edition}, number = {28}, pages = {9575--9580}, publisher = {Wiley}, title = {{Semi‐heterogeneous dual nickel/photocatalysis using carbon nitrides: Esterification of carboxylic acids with aryl halides}}, doi = {10.1002/anie.201902785}, volume = {58}, year = {2019}, } @article{11984, abstract = {Differentially protected galactosamine building blocks are key components for the synthesis of human and bacterial oligosaccharides. The azidophenylselenylation of 3,4,6-tri-O-acetyl-d-galactal provides straightforward access to the corresponding 2-nitrogenated glycoside. Poor reproducibility and the use of azides that lead to the formation of potentially explosive and toxic species limit the scalability of this reaction and render it a bottleneck for carbohydrate synthesis. Here, we present a method for the safe, efficient, and reliable azidophenylselenylation of 3,4,6-tri-O-acetyl-d-galactal at room temperature, using continuous flow chemistry. Careful analysis of the transformation resulted in reaction conditions that produce minimal side products while the reaction time was reduced drastically when compared to batch reactions. The flow setup is readily scalable to process 5 mmol of galactal in 3 h, producing 1.2 mmol/h of product.}, author = {Guberman, Mónica and Pieber, Bartholomäus and Seeberger, Peter H.}, issn = {1520-586X}, journal = {Organic Process Research and Development}, number = {12}, pages = {2764--2770}, publisher = {American Chemical Society}, title = {{Safe and scalable continuous flow azidophenylselenylation of galactal to prepare galactosamine building blocks}}, doi = {10.1021/acs.oprd.9b00456}, volume = {23}, year = {2019}, } @article{11982, abstract = {A carbon nitride material can be combined with homogeneous nickel catalysts for light-mediated cross-couplings of aryl bromides with alcohols under mild conditions. The metal-free heterogeneous semiconductor is fully recyclable and couples a broad range of electron-poor aryl bromides with primary and secondary alcohols as well as water. The application for intramolecular reactions and the synthesis of active pharmaceutical ingredients was demonstrated. The catalytic protocol is applicable for the coupling of aryl iodides with thiols as well.}, author = {Cavedon, Cristian and Madani, Amiera and Seeberger, Peter H. and Pieber, Bartholomäus}, issn = {1523-7052}, journal = {Organic Letters}, number = {13}, pages = {5331--5334}, publisher = {American Chemical Society}, title = {{Semiheterogeneous dual nickel/photocatalytic (thio)etherification using carbon nitrides}}, doi = {10.1021/acs.orglett.9b01957}, volume = {21}, year = {2019}, } @article{170, abstract = {Upper and lower bounds, of the expected order of magnitude, are obtained for the number of rational points of bounded height on any quartic del Pezzo surface over ℚ that contains a conic defined over ℚ .}, author = {Browning, Timothy D and Sofos, Efthymios}, journal = {Mathematische Annalen}, number = {3-4}, pages = {977--1016}, publisher = {Springer Nature}, title = {{Counting rational points on quartic del Pezzo surfaces with a rational conic}}, doi = {10.1007/s00208-018-1716-6}, volume = {373}, year = {2019}, } @article{441, author = {Kalinin, Nikita and Shkolnikov, Mikhail}, issn = {2199-6768}, journal = {European Journal of Mathematics}, number = {3}, pages = {909–928}, publisher = {Springer Nature}, title = {{Tropical formulae for summation over a part of SL(2,Z)}}, doi = {10.1007/s40879-018-0218-0}, volume = {5}, year = {2019}, } @inbook{5793, abstract = {The transcription coactivator, Yes-associated protein (YAP), which is a nuclear effector of the Hippo signaling pathway, has been shown to be a mechano-transducer. By using mutant fish and human 3D spheroids, we have recently demonstrated that YAP is also a mechano-effector. YAP functions in three-dimensional (3D) morphogenesis of organ and global body shape by controlling actomyosin-mediated tissue tension. In this chapter, we present a platform that links the findings in fish embryos with human cells. The protocols for analyzing tissue tension-mediated global body shape/organ morphogenesis in vivo and ex vivo using medaka fish embryos and in vitro using human cell spheroids represent useful tools for unraveling the molecular mechanisms by which YAP functions in regulating global body/organ morphogenesis.}, author = {Asaoka, Yoichi and Morita, Hitoshi and Furumoto, Hiroko and Heisenberg, Carl-Philipp J and Furutani-Seiki, Makoto}, booktitle = {The hippo pathway}, editor = {Hergovich, Alexander}, isbn = {978-1-4939-8909-6}, pages = {167--181}, publisher = {Springer}, title = {{Studying YAP-mediated 3D morphogenesis using fish embryos and human spheroids}}, doi = {10.1007/978-1-4939-8910-2_14}, volume = {1893}, year = {2019}, } @article{5887, abstract = {Cryptographic security is usually defined as a guarantee that holds except when a bad event with negligible probability occurs, and nothing is guaranteed in that bad case. However, in settings where such failure can happen with substantial probability, one needs to provide guarantees even for the bad case. A typical example is where a (possibly weak) password is used instead of a secure cryptographic key to protect a session, the bad event being that the adversary correctly guesses the password. In a situation with multiple such sessions, a per-session guarantee is desired: any session for which the password has not been guessed remains secure, independently of whether other sessions have been compromised. A new formalism for stating such gracefully degrading security guarantees is introduced and applied to analyze the examples of password-based message authentication and password-based encryption. While a natural per-message guarantee is achieved for authentication, the situation of password-based encryption is more delicate: a per-session confidentiality guarantee only holds against attackers for which the distribution of password-guessing effort over the sessions is known in advance. In contrast, for more general attackers without such a restriction, a strong, composable notion of security cannot be achieved.}, author = {Demay, Gregory and Gazi, Peter and Maurer, Ueli and Tackmann, Bjorn}, issn = {0926227X}, journal = {Journal of Computer Security}, number = {1}, pages = {75--111}, publisher = {IOS Press}, title = {{Per-session security: Password-based cryptography revisited}}, doi = {10.3233/JCS-181131}, volume = {27}, year = {2019}, } @inproceedings{6163, abstract = {We propose a new non-orthogonal basis to express the 3D Euclidean space in terms of a regular grid. Every grid point, each represented by integer 3-coordinates, corresponds to rhombic dodecahedron centroid. Rhombic dodecahedron is a space filling polyhedron which represents the close packing of spheres in 3D space and the Voronoi structures of the face centered cubic (FCC) lattice. In order to illustrate the interest of the new coordinate system, we propose the characterization of 3D digital plane with its topological features, such as the interrelation between the thickness of the digital plane and the separability constraint we aim to obtain. A characterization of a 3D digital sphere with relevant topological features is proposed as well with the help of a 48 symmetry that comes with the new coordinate system.}, author = {Biswas, Ranita and Largeteau-Skapin, Gaëlle and Zrour, Rita and Andres, Eric}, booktitle = {21st IAPR International Conference on Discrete Geometry for Computer Imagery}, isbn = {978-3-6624-6446-5}, issn = {0302-9743}, location = {Marne-la-Vallée, France}, pages = {27--37}, publisher = {Springer Berlin Heidelberg}, title = {{Rhombic dodecahedron grid—coordinate system and 3D digital object definitions}}, doi = {10.1007/978-3-030-14085-4_3}, volume = {11414}, year = {2019}, } @article{6515, abstract = {We give non-degeneracy criteria for Riemannian simplices based on simplices in spaces of constant sectional curvature. It extends previous work on Riemannian simplices, where we developed Riemannian simplices with respect to Euclidean reference simplices. The criteria we give in this article are in terms of quality measures for spaces of constant curvature that we develop here. We see that simplices in spaces that have nearly constant curvature, are already non-degenerate under very weak quality demands. This is of importance because it allows for sampling of Riemannian manifolds based on anisotropy of the manifold and not (absolute) curvature.}, author = {Dyer, Ramsay and Vegter, Gert and Wintraecken, Mathijs}, issn = {1920-180X}, journal = {Journal of Computational Geometry }, number = {1}, pages = {223–256}, publisher = {Carleton University}, title = {{Simplices modelled on spaces of constant curvature}}, doi = {10.20382/jocg.v10i1a9}, volume = {10}, year = {2019}, } @inproceedings{6528, abstract = {We construct a verifiable delay function (VDF) by showing how the Rivest-Shamir-Wagner time-lock puzzle can be made publicly verifiable. Concretely, we give a statistically sound public-coin protocol to prove that a tuple (N,x,T,y) satisfies y=x2T (mod N) where the prover doesn’t know the factorization of N and its running time is dominated by solving the puzzle, that is, compute x2T, which is conjectured to require T sequential squarings. To get a VDF we make this protocol non-interactive using the Fiat-Shamir heuristic.The motivation for this work comes from the Chia blockchain design, which uses a VDF as akey ingredient. For typical parameters (T≤2 40, N= 2048), our proofs are of size around 10K B, verification cost around three RSA exponentiations and computing the proof is 8000 times faster than solving the puzzle even without any parallelism.}, author = {Pietrzak, Krzysztof Z}, booktitle = {10th Innovations in Theoretical Computer Science Conference}, isbn = {978-3-95977-095-8}, issn = {1868-8969}, location = {San Diego, CA, United States}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Simple verifiable delay functions}}, doi = {10.4230/LIPICS.ITCS.2019.60}, volume = {124}, year = {2019}, } @inproceedings{6565, abstract = {In this paper, we address the problem of synthesizing periodic switching controllers for stabilizing a family of linear systems. Our broad approach consists of constructing a finite game graph based on the family of linear systems such that every winning strategy on the game graph corresponds to a stabilizing switching controller for the family of linear systems. The construction of a (finite) game graph, the synthesis of a winning strategy and the extraction of a stabilizing controller are all computationally feasible. We illustrate our method on an example.}, author = {Kundu, Atreyee and Garcia Soto, Miriam and Prabhakar, Pavithra}, booktitle = {5th Indian Control Conference Proceedings}, isbn = {978-153866246-5}, location = {Delhi, India}, publisher = {IEEE}, title = {{Formal synthesis of stabilizing controllers for periodically controlled linear switched systems}}, doi = {10.1109/INDIANCC.2019.8715598}, year = {2019}, } @inproceedings{6628, abstract = {Fejes Tóth [5] and Schneider [9] studied approximations of smooth convex hypersurfaces in Euclidean space by piecewise flat triangular meshes with a given number of vertices on the hypersurface that are optimal with respect to Hausdorff distance. They proved that this Hausdorff distance decreases inversely proportional with m 2/(d−1), where m is the number of vertices and d is the dimension of Euclidean space. Moreover the pro-portionality constant can be expressed in terms of the Gaussian curvature, an intrinsic quantity. In this short note, we prove the extrinsic nature of this constant for manifolds of sufficiently high codimension. We do so by constructing an family of isometric embeddings of the flat torus in Euclidean space.}, author = {Vegter, Gert and Wintraecken, Mathijs}, booktitle = {The 31st Canadian Conference in Computational Geometry}, location = {Edmonton, Canada}, pages = {275--279}, title = {{The extrinsic nature of the Hausdorff distance of optimal triangulations of manifolds}}, year = {2019}, } @inproceedings{6648, abstract = {Various kinds of data are routinely represented as discrete probability distributions. Examples include text documents summarized by histograms of word occurrences and images represented as histograms of oriented gradients. Viewing a discrete probability distribution as a point in the standard simplex of the appropriate dimension, we can understand collections of such objects in geometric and topological terms. Importantly, instead of using the standard Euclidean distance, we look into dissimilarity measures with information-theoretic justification, and we develop the theory needed for applying topological data analysis in this setting. In doing so, we emphasize constructions that enable the usage of existing computational topology software in this context.}, author = {Edelsbrunner, Herbert and Virk, Ziga and Wagner, Hubert}, booktitle = {35th International Symposium on Computational Geometry}, isbn = {9783959771047}, location = {Portland, OR, United States}, pages = {31:1--31:14}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Topological data analysis in information space}}, doi = {10.4230/LIPICS.SOCG.2019.31}, volume = {129}, year = {2019}, } @article{6659, abstract = {Chemical labeling of proteins with synthetic molecular probes offers the possibility to probe the functions of proteins of interest in living cells. However, the methods for covalently labeling targeted proteins using complementary peptide tag-probe pairs are still limited, irrespective of the versatility of such pairs in biological research. Herein, we report the new CysHis tag-Ni(II) probe pair for the specific covalent labeling of proteins. A broad-range evaluation of the reactivity profiles of the probe and the CysHis peptide tag afforded a tag-probe pair with an optimized and high labeling selectivity and reactivity. In particular, the labeling specificity of this pair was notably improved compared to the previously reported one. This pair was successfully utilized for the fluorescence imaging of membrane proteins on the surfaces of living cells, demonstrating its potential utility in biological research.}, author = {Zenmyo, Naoki and Tokumaru, Hiroki and Uchinomiya, Shohei and Fuchida, Hirokazu and Tabata, Shigekazu and Hamachi, Itaru and Shigemoto, Ryuichi and Ojida, Akio}, issn = {00092673}, journal = {Bulletin of the Chemical Society of Japan}, number = {5}, pages = {995--1000}, publisher = {Bulletin of the Chemical Society of Japan}, title = {{Optimized reaction pair of the CysHis tag and Ni(II)-NTA probe for highly selective chemical labeling of membrane proteins}}, doi = {10.1246/bcsj.20190034}, volume = {92}, year = {2019}, } @article{6662, abstract = {In phase retrieval, we want to recover an unknown signal 𝑥∈ℂ𝑑 from n quadratic measurements of the form 𝑦𝑖=|⟨𝑎𝑖,𝑥⟩|2+𝑤𝑖, where 𝑎𝑖∈ℂ𝑑 are known sensing vectors and 𝑤𝑖 is measurement noise. We ask the following weak recovery question: What is the minimum number of measurements n needed to produce an estimator 𝑥^(𝑦) that is positively correlated with the signal 𝑥? We consider the case of Gaussian vectors 𝑎𝑎𝑖. We prove that—in the high-dimensional limit—a sharp phase transition takes place, and we locate the threshold in the regime of vanishingly small noise. For 𝑛≤𝑑−𝑜(𝑑), no estimator can do significantly better than random and achieve a strictly positive correlation. For 𝑛≥𝑑+𝑜(𝑑), a simple spectral estimator achieves a positive correlation. Surprisingly, numerical simulations with the same spectral estimator demonstrate promising performance with realistic sensing matrices. Spectral methods are used to initialize non-convex optimization algorithms in phase retrieval, and our approach can boost the performance in this setting as well. Our impossibility result is based on classical information-theoretic arguments. The spectral algorithm computes the leading eigenvector of a weighted empirical covariance matrix. We obtain a sharp characterization of the spectral properties of this random matrix using tools from free probability and generalizing a recent result by Lu and Li. Both the upper bound and lower bound generalize beyond phase retrieval to measurements 𝑦𝑖 produced according to a generalized linear model. As a by-product of our analysis, we compare the threshold of the proposed spectral method with that of a message passing algorithm.}, author = {Mondelli, Marco and Montanari, Andrea}, issn = {1615-3383}, journal = {Foundations of Computational Mathematics}, number = {3}, pages = {703--773}, publisher = {Springer}, title = {{Fundamental limits of weak recovery with applications to phase retrieval}}, doi = {10.1007/s10208-018-9395-y}, volume = {19}, year = {2019}, } @article{6672, abstract = {The construction of anisotropic triangulations is desirable for various applications, such as the numerical solving of partial differential equations and the representation of surfaces in graphics. To solve this notoriously difficult problem in a practical way, we introduce the discrete Riemannian Voronoi diagram, a discrete structure that approximates the Riemannian Voronoi diagram. This structure has been implemented and was shown to lead to good triangulations in $\mathbb{R}^2$ and on surfaces embedded in $\mathbb{R}^3$ as detailed in our experimental companion paper. In this paper, we study theoretical aspects of our structure. Given a finite set of points $\mathcal{P}$ in a domain $\Omega$ equipped with a Riemannian metric, we compare the discrete Riemannian Voronoi diagram of $\mathcal{P}$ to its Riemannian Voronoi diagram. Both diagrams have dual structures called the discrete Riemannian Delaunay and the Riemannian Delaunay complex. We provide conditions that guarantee that these dual structures are identical. It then follows from previous results that the discrete Riemannian Delaunay complex can be embedded in $\Omega$ under sufficient conditions, leading to an anisotropic triangulation with curved simplices. Furthermore, we show that, under similar conditions, the simplices of this triangulation can be straightened.}, author = {Boissonnat, Jean-Daniel and Rouxel-Labbé, Mael and Wintraecken, Mathijs}, issn = {1095-7111}, journal = {SIAM Journal on Computing}, number = {3}, pages = {1046--1097}, publisher = {Society for Industrial & Applied Mathematics (SIAM)}, title = {{Anisotropic triangulations via discrete Riemannian Voronoi diagrams}}, doi = {10.1137/17m1152292}, volume = {48}, year = {2019}, } @inproceedings{6725, abstract = {A Valued Constraint Satisfaction Problem (VCSP) provides a common framework that can express a wide range of discrete optimization problems. A VCSP instance is given by a finite set of variables, a finite domain of labels, and an objective function to be minimized. This function is represented as a sum of terms where each term depends on a subset of the variables. To obtain different classes of optimization problems, one can restrict all terms to come from a fixed set Γ of cost functions, called a language. Recent breakthrough results have established a complete complexity classification of such classes with respect to language Γ: if all cost functions in Γ satisfy a certain algebraic condition then all Γ-instances can be solved in polynomial time, otherwise the problem is NP-hard. Unfortunately, testing this condition for a given language Γ is known to be NP-hard. We thus study exponential algorithms for this meta-problem. We show that the tractability condition of a finite-valued language Γ can be tested in O(3‾√3|D|⋅poly(size(Γ))) time, where D is the domain of Γ and poly(⋅) is some fixed polynomial. We also obtain a matching lower bound under the Strong Exponential Time Hypothesis (SETH). More precisely, we prove that for any constant δ<1 there is no O(3‾√3δ|D|) algorithm, assuming that SETH holds.}, author = {Kolmogorov, Vladimir}, booktitle = {46th International Colloquium on Automata, Languages and Programming}, isbn = {978-3-95977-109-2}, issn = {1868-8969}, location = {Patras, Greece}, pages = {77:1--77:12}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Testing the complexity of a valued CSP language}}, doi = {10.4230/LIPICS.ICALP.2019.77}, volume = {132}, year = {2019}, } @inbook{6726, abstract = {Randomness is an essential part of any secure cryptosystem, but many constructions rely on distributions that are not uniform. This is particularly true for lattice based cryptosystems, which more often than not make use of discrete Gaussian distributions over the integers. For practical purposes it is crucial to evaluate the impact that approximation errors have on the security of a scheme to provide the best possible trade-off between security and performance. Recent years have seen surprising results allowing to use relatively low precision while maintaining high levels of security. A key insight in these results is that sampling a distribution with low relative error can provide very strong security guarantees. Since floating point numbers provide guarantees on the relative approximation error, they seem a suitable tool in this setting, but it is not obvious which sampling algorithms can actually profit from them. While previous works have shown that inversion sampling can be adapted to provide a low relative error (Pöppelmann et al., CHES 2014; Prest, ASIACRYPT 2017), other works have called into question if this is possible for other sampling techniques (Zheng et al., Eprint report 2018/309). In this work, we consider all sampling algorithms that are popular in the cryptographic setting and analyze the relationship of floating point precision and the resulting relative error. We show that all of the algorithms either natively achieve a low relative error or can be adapted to do so.}, author = {Walter, Michael}, booktitle = {Progress in Cryptology – AFRICACRYPT 2019}, editor = {Buchmann, J and Nitaj, A and Rachidi, T}, isbn = {978-3-0302-3695-3}, issn = {0302-9743}, location = {Rabat, Morocco}, pages = {157--180}, publisher = {Springer Nature}, title = {{Sampling the integers with low relative error}}, doi = {10.1007/978-3-030-23696-0_9}, volume = {11627}, year = {2019}, } @article{6663, abstract = {Consider the problem of constructing a polar code of block length N for a given transmission channel W. Previous approaches require one to compute the reliability of the N synthetic channels and then use only those that are sufficiently reliable. However, we know from two independent works by Schürch and by Bardet et al. that the synthetic channels are partially ordered with respect to degradation. Hence, it is natural to ask whether the partial order can be exploited to reduce the computational burden of the construction problem. We show that, if we take advantage of the partial order, we can construct a polar code by computing the reliability of roughly a fraction 1/ log 3/2 N of the synthetic channels. In particular, we prove that N/ log 3/2 N is a lower bound on the number of synthetic channels to be considered and such a bound is tight up to a multiplicative factor log log N. This set of roughly N/ log 3/2 N synthetic channels is universal, in the sense that it allows one to construct polar codes for any W, and it can be identified by solving a maximum matching problem on a bipartite graph. Our proof technique consists of reducing the construction problem to the problem of computing the maximum cardinality of an antichain for a suitable partially ordered set. As such, this method is general, and it can be used to further improve the complexity of the construction problem, in case a refined partial order on the synthetic channels of polar codes is discovered.}, author = {Mondelli, Marco and Hassani, Hamed and Urbanke, Rudiger}, journal = {IEEE}, number = {5}, pages = {2782--2791}, publisher = {IEEE}, title = {{Construction of polar codes with sublinear complexity}}, doi = {10.1109/tit.2018.2889667}, volume = {65}, year = {2019}, } @inproceedings{6747, abstract = {We establish connections between the problem of learning a two-layer neural network and tensor decomposition. We consider a model with feature vectors x∈ℝd, r hidden units with weights {wi}1≤i≤r and output y∈ℝ, i.e., y=∑ri=1σ(w𝖳ix), with activation functions given by low-degree polynomials. In particular, if σ(x)=a0+a1x+a3x3, we prove that no polynomial-time learning algorithm can outperform the trivial predictor that assigns to each example the response variable 𝔼(y), when d3/2≪r≪d2. Our conclusion holds for a `natural data distribution', namely standard Gaussian feature vectors x, and output distributed according to a two-layer neural network with random isotropic weights, and under a certain complexity-theoretic assumption on tensor decomposition. Roughly speaking, we assume that no polynomial-time algorithm can substantially outperform current methods for tensor decomposition based on the sum-of-squares hierarchy. We also prove generalizations of this statement for higher degree polynomial activations, and non-random weight vectors. Remarkably, several existing algorithms for learning two-layer networks with rigorous guarantees are based on tensor decomposition. Our results support the idea that this is indeed the core computational difficulty in learning such networks, under the stated generative model for the data. As a side result, we show that under this model learning the network requires accurate learning of its weights, a property that does not hold in a more general setting. }, author = {Mondelli, Marco and Montanari, Andrea}, booktitle = {Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics}, location = {Naha, Okinawa, Japan}, pages = {1051--1060}, publisher = {Proceedings of Machine Learning Research}, title = {{On the connection between learning two-layers neural networks and tensor decomposition}}, volume = {89}, year = {2019}, } @article{6750, abstract = {Polar codes have gained extensive attention during the past few years and recently they have been selected for the next generation of wireless communications standards (5G). Successive-cancellation-based (SC-based) decoders, such as SC list (SCL) and SC flip (SCF), provide a reasonable error performance for polar codes at the cost of low decoding speed. Fast SC-based decoders, such as Fast-SSC, Fast-SSCL, and Fast-SSCF, identify the special constituent codes in a polar code graph off-line, produce a list of operations, store the list in memory, and feed the list to the decoder to decode the constituent codes in order efficiently, thus increasing the decoding speed. However, the list of operations is dependent on the code rate and as the rate changes, a new list is produced, making fast SC-based decoders not rate-flexible. In this paper, we propose a completely rate-flexible fast SC-based decoder by creating the list of operations directly in hardware, with low implementation complexity. We further propose a hardware architecture implementing the proposed method and show that the area occupation of the rate-flexible fast SC-based decoder in this paper is only 38% of the total area of the memory-based base-line decoder when 5G code rates are supported. }, author = {Hashemi, Seyyed Ali and Condo, Carlo and Mondelli, Marco and Gross, Warren J}, issn = {1053587X}, journal = {IEEE Transactions on Signal Processing}, number = {22}, publisher = {IEEE}, title = {{Rate-flexible fast polar decoders}}, doi = {10.1109/TSP.2019.2944738}, volume = {67}, year = {2019}, } @article{6759, abstract = {We consider the graph class Grounded-L corresponding to graphs that admit an intersection representation by L-shaped curves, where additionally the topmost points of each curve are assumed to belong to a common horizontal line. We prove that Grounded-L graphs admit an equivalent characterisation in terms of vertex ordering with forbidden patterns. We also compare this class to related intersection classes, such as the grounded segment graphs, the monotone L-graphs (a.k.a. max point-tolerance graphs), or the outer-1-string graphs. We give constructions showing that these classes are all distinct and satisfy only trivial or previously known inclusions.}, author = {Jelínek, Vít and Töpfer, Martin}, issn = {10778926}, journal = {Electronic Journal of Combinatorics}, number = {3}, publisher = {Electronic Journal of Combinatorics}, title = {{On grounded L-graphs and their relatives}}, doi = {10.37236/8096}, volume = {26}, year = {2019}, } @inproceedings{6822, abstract = {In two-player games on graphs, the players move a token through a graph to produce an infinite path, which determines the qualitative winner or quantitative payoff of the game. In bidding games, in each turn, we hold an auction between the two players to determine which player moves the token. Bidding games have largely been studied with concrete bidding mechanisms that are variants of a first-price auction: in each turn both players simultaneously submit bids, the higher bidder moves the token, and pays his bid to the lower bidder in Richman bidding, to the bank in poorman bidding, and in taxman bidding, the bid is split between the other player and the bank according to a predefined constant factor. Bidding games are deterministic games. They have an intriguing connection with a fragment of stochastic games called randomturn games. We study, for the first time, a combination of bidding games with probabilistic behavior; namely, we study bidding games that are played on Markov decision processes, where the players bid for the right to choose the next action, which determines the probability distribution according to which the next vertex is chosen. We study parity and meanpayoff bidding games on MDPs and extend results from the deterministic bidding setting to the probabilistic one.}, author = {Avni, Guy and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Novotny, Petr}, booktitle = { Proceedings of the 13th International Conference of Reachability Problems}, isbn = {978-303030805-6}, issn = {0302-9743}, location = {Brussels, Belgium}, pages = {1--12}, publisher = {Springer}, title = {{Bidding games on Markov decision processes}}, doi = {10.1007/978-3-030-30806-3_1}, volume = {11674}, year = {2019}, } @inproceedings{6887, abstract = {The fundamental model-checking problem, given as input a model and a specification, asks for the algorithmic verification of whether the model satisfies the specification. Two classical models for reactive systems are graphs and Markov decision processes (MDPs). A basic specification formalism in the verification of reactive systems is the strong fairness (aka Streett) objective, where given different types of requests and corresponding grants, the requirement is that for each type, if the request event happens infinitely often, then the corresponding grant event must also happen infinitely often. All omega-regular objectives can be expressed as Streett objectives and hence they are canonical in verification. Consider graphs/MDPs with n vertices, m edges, and a Streett objectives with k pairs, and let b denote the size of the description of the Streett objective for the sets of requests and grants. The current best-known algorithm for the problem requires time O(min(n^2, m sqrt{m log n}) + b log n). In this work we present randomized near-linear time algorithms, with expected running time O~(m + b), where the O~ notation hides poly-log factors. Our randomized algorithms are near-linear in the size of the input, and hence optimal up to poly-log factors. }, author = {Chatterjee, Krishnendu and Dvorák, Wolfgang and Henzinger, Monika H and Svozil, Alexander}, booktitle = {Leibniz International Proceedings in Informatics}, location = {Amsterdam, Netherlands}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Near-linear time algorithms for Streett objectives in graphs and MDPs}}, doi = {10.4230/LIPICS.CONCUR.2019.7}, volume = {140}, year = {2019}, } @inproceedings{6888, abstract = {In this paper, we design novel liquid time-constant recurrent neural networks for robotic control, inspired by the brain of the nematode, C. elegans. In the worm's nervous system, neurons communicate through nonlinear time-varying synaptic links established amongst them by their particular wiring structure. This property enables neurons to express liquid time-constants dynamics and therefore allows the network to originate complex behaviors with a small number of neurons. We identify neuron-pair communication motifs as design operators and use them to configure compact neuronal network structures to govern sequential robotic tasks. The networks are systematically designed to map the environmental observations to motor actions, by their hierarchical topology from sensory neurons, through recurrently-wired interneurons, to motor neurons. The networks are then parametrized in a supervised-learning scheme by a search-based algorithm. We demonstrate that obtained networks realize interpretable dynamics. We evaluate their performance in controlling mobile and arm robots, and compare their attributes to other artificial neural network-based control agents. Finally, we experimentally show their superior resilience to environmental noise, compared to the existing machine learning-based methods.}, author = {Lechner, Mathias and Hasani, Ramin and Zimmer, Manuel and Henzinger, Thomas A and Grosu, Radu}, booktitle = {Proceedings - IEEE International Conference on Robotics and Automation}, isbn = {9781538660270}, location = {Montreal, QC, Canada}, publisher = {IEEE}, title = {{Designing worm-inspired neural networks for interpretable robotic control}}, doi = {10.1109/icra.2019.8793840}, volume = {2019-May}, year = {2019}, } @inproceedings{6886, abstract = {In two-player games on graphs, the players move a token through a graph to produce an infinite path, which determines the winner of the game. Such games are central in formal methods since they model the interaction between a non-terminating system and its environment. In bidding games the players bid for the right to move the token: in each round, the players simultaneously submit bids, and the higher bidder moves the token and pays the other player. Bidding games are known to have a clean and elegant mathematical structure that relies on the ability of the players to submit arbitrarily small bids. Many applications, however, require a fixed granularity for the bids, which can represent, for example, the monetary value expressed in cents. We study, for the first time, the combination of discrete-bidding and infinite-duration games. Our most important result proves that these games form a large determined subclass of concurrent games, where determinacy is the strong property that there always exists exactly one player who can guarantee winning the game. In particular, we show that, in contrast to non-discrete bidding games, the mechanism with which tied bids are resolved plays an important role in discrete-bidding games. We study several natural tie-breaking mechanisms and show that, while some do not admit determinacy, most natural mechanisms imply determinacy for every pair of initial budgets. }, author = {Aghajohari, Milad and Avni, Guy and Henzinger, Thomas A}, location = {Amsterdam, Netherlands}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Determinacy in discrete-bidding infinite-duration games}}, doi = {10.4230/LIPICS.CONCUR.2019.20}, volume = {140}, year = {2019}, }