@article{11074, author = {Hatch, Emily M. and HETZER, Martin W}, issn = {0960-9822}, journal = {Current Biology}, keywords = {General Agricultural and Biological Sciences, General Biochemistry, Genetics and Molecular Biology}, number = {10}, pages = {PR397--R399}, publisher = {Elsevier}, title = {{Chromothripsis}}, doi = {10.1016/j.cub.2015.02.033}, volume = {25}, year = {2015}, } @article{11519, abstract = {Faint Lyα emitters become increasingly rarer toward the reionization epoch (z ∼ 6–7). However, observations from a very large (∼5 deg2) Lyα narrow-band survey at z = 6.6 show that this is not the case for the most luminous emitters, capable of ionizing their own local bubbles. Here we present follow-up observations of the two most luminous Lyα candidates in the COSMOS field: “MASOSA” and “CR7.” We used X-SHOOTER, SINFONI, and FORS2 on the Very Large Telescope, and DEIMOS on Keck, to confirm both candidates beyond any doubt. We find redshifts of z = 6.541 and z = 6.604 for “MASOSA” and “CR7,” respectively. MASOSA has a strong detection in Lyα with a line width of 386 ± 30 km s−1 (FWHM) and with very high EW0 (>200 Å), but undetected in the continuum, implying very low stellar mass and a likely young, metal-poor stellar population. “CR7,” with an observed Lyα luminosity of 1043.92±0.05 erg s−1 is the most luminous Lyα emitter ever found at z > 6 and is spatially extended (∼16 kpc). “CR7” reveals a narrow Lyα line with 266 ± 15 km s−1 FWHM, being detected in the near-infrared (NIR) (rest-frame UV; β = −2.3 ± 0.1) and in IRAC/Spitzer. We detect a narrow He II 1640 Å emission line (6σ, FWHM = 130 ± 30 km s−1 ) in CR7 which can explain the clear excess seen in the J-band photometry (EW0 ∼ 80 Å). We find no other emission lines from the UV to the NIR in our X-SHOOTER spectra (He II/O III] 1663 Å > 3 and He II/C III] 1908 Å > 2.5). We conclude that CR7 is best explained by a combination of a PopIII-like population, which dominates the rest-frame UV and the nebular emission, and a more normal stellar population, which presumably dominates the mass. Hubble Space Telescope/WFC3 observations show that the light is indeed spatially separated between a very blue component, coincident with Lyα and He II emission, and two red components (∼5 kpc away), which dominate the mass. Our findings are consistent with theoretical predictions of a PopIII wave, with PopIII star formation migrating away from the original sites of star formation.}, author = {Sobral, David and Matthee, Jorryt J and Darvish, Behnam and Schaerer, Daniel and Mobasher, Bahram and Röttgering, Huub and Santos, Sérgio and Hemmati, Shoubaneh}, issn = {1538-4357}, journal = {The Astrophysical Journal}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, dark ages, reionization, first stars – early universe – galaxies: evolution}, number = {2}, pages = {139}, publisher = {IOP Publishing}, title = {{Evidence for PopIII-like stellar populations in the most luminous Lyα emitters at the epoch of reionisation: Spectroscopic confirmation}}, doi = {10.1088/0004-637X/808/2/139}, volume = {808}, year = {2015}, } @article{11580, abstract = {We present results from the largest contiguous narrow-band survey in the near-infrared. We have used the wide-field infrared camera/Canada–France–Hawaii Telescope and the lowOH2 filter (1.187 ± 0.005 μm) to survey ≈10 deg2 of contiguous extragalactic sky in the SA22 field. A total of ∼6000 candidate emission-line galaxies are found. We use deep ugrizJK data to obtain robust photometric redshifts. We combine our data with the High-redshift(Z) Emission Line Survey (HiZELS), explore spectroscopic surveys (VVDS, VIPERS) and obtain our own spectroscopic follow-up with KMOS, FMOS and MOSFIRE to derive large samples of high-redshift emission-line selected galaxies: 3471 Hα emitters at z = 0.8, 1343 [O III] + Hβ emitters at z = 1.4 and 572 [O II] emitters at z = 2.2. We probe comoving volumes of >106 Mpc3 and find significant overdensities, including an 8.5σ (spectroscopically confirmed) overdensity of Hα emitters at z = 0.81. We derive Hα, [O III] + Hβ and [O II] luminosity functions at z = 0.8, 1.4, 2.2, respectively, and present implications for future surveys such as Euclid. Our uniquely large volumes/areas allow us to subdivide the samples in thousands of randomized combinations of areas and provide a robust empirical measurement of sample/cosmic variance. We show that surveys for star-forming/emission-line galaxies at a depth similar to ours can only overcome cosmic-variance (errors <10 per cent) if they are based on volumes >5 × 105 Mpc3; errors on L* and ϕ* due to sample (cosmic) variance on surveys probing ∼104 and ∼105 Mpc3 are typically very high: ∼300 and ∼40–60 per cent, respectively.}, author = {Sobral, D. and Matthee, Jorryt J and Best, P. N. and Smail, I. and Khostovan, A. A. and Milvang-Jensen, B. and Kim, J.-W. and Stott, J. and Calhau, J. and Nayyeri, H. and Mobasher, B.}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: formation, galaxies: luminosity function, mass function, cosmology: observations, early Universe, large-scale structure of Universe}, number = {3}, pages = {2303--2323}, publisher = {Oxford University Press}, title = {{CF-HiZELS, an ∼10 deg2 emission-line survey with spectroscopic follow-up: Hα, [O III] + Hβ and [O II] luminosity functions at z = 0.8, 1.4 and 2.2 }}, doi = {10.1093/mnras/stv1076}, volume = {451}, year = {2015}, } @article{11581, abstract = {Using wide-field narrow-band surveys, we provide a new measurement of the z = 6.6 Lymanα emitter (LAE) luminosity function (LF), which constraints the bright end for the first time. We use a combination of archival narrow-band NB921 data in UDS and new NB921 measurements in SA22 and COSMOS/UltraVISTA, all observed with the Subaru telescope, with a total area of ∼5 deg2. We exclude lower redshift interlopers by using broad-band optical and near-infrared photometry and also exclude three supernovae with data split over multiple epochs. Combining the UDS and COSMOS samples, we find no evolution of the bright end of the Lyα LF between z = 5.7 and 6.6, which is supported by spectroscopic follow-up, and conclude that sources with Himiko-like luminosity are not as rare as previously thought, with number densities of ∼1.5 × 10−5 Mpc−3. Combined with our wide-field SA22 measurements, our results indicate a non-Schechter-like bright end of the LF at z = 6.6 and a different evolution of observed faint and bright LAEs, overcoming cosmic variance. This differential evolution is also seen in the spectroscopic follow-up of UV-selected galaxies and is now also confirmed for LAEs, and we argue that it may be an effect of reionization. Using a toy model, we show that such differential evolution of the LF is expected, since brighter sources are able to ionize their surroundings earlier, such that Lyα photons are able to escape. Our targets are excellent candidates for detailed follow-up studies and provide the possibility to give a unique view on the earliest stages in the formation of galaxies and reionization process.}, author = {Matthee, Jorryt J and Sobral, David and Santos, Sérgio and Röttgering, Huub and Darvish, Behnam and Mobasher, Bahram}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics}, number = {1}, pages = {400--417}, publisher = {Oxford University Press}, title = {{Identification of the brightest Lyα emitters at z = 6.6: implications for the evolution of the luminosity function in the reionization era}}, doi = {10.1093/mnras/stv947}, volume = {451}, year = {2015}, } @article{11579, abstract = {CR7 is the brightest z = 6.6 Ly α emitter (LAE) known to date, and spectroscopic follow-up by Sobral et al. suggests that CR7 might host Population (Pop) III stars. We examine this interpretation using cosmological hydrodynamical simulations. Several simulated galaxies show the same ‘Pop III wave’ pattern observed in CR7. However, to reproduce the extreme CR7 Ly α/He II1640 line luminosities (⁠Lα/HeII⁠) a top-heavy initial mass function and a massive ( ≳ 107 M⊙) Pop III burst with age ≲ 2 Myr are required. Assuming that the observed properties of Ly α and He II emission are typical for Pop III, we predict that in the COSMOS/UDS/SA22 fields, 14 out of the 30 LAEs at z = 6.6 with Lα > 1043.3 erg s−1 should also host Pop III stars producing an observable LHeII≳1042.7ergs−1⁠. As an alternate explanation, we explore the possibility that CR7 is instead powered by accretion on to a direct collapse black hole. Our model predicts Lα, LHeII⁠, and X-ray luminosities that are in agreement with the observations. In any case, the observed properties of CR7 indicate that this galaxy is most likely powered by sources formed from pristine gas. We propose that further X-ray observations can distinguish between the two above scenarios.}, author = {Pallottini, A. and Ferrara, A. and Pacucci, F. and Gallerani, S. and Salvadori, S. and Schneider, R. and Schaerer, D. and Sobral, D. and Matthee, Jorryt J}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, black hole physics, stars: Population III, galaxies: high-redshift}, number = {3}, pages = {2465--2470}, publisher = {Oxford University Press}, title = {{The brightest Lyα emitter: Pop III or black hole?}}, doi = {10.1093/mnras/stv1795}, volume = {453}, year = {2015}, } @article{11668, abstract = {We study multiple keyword sponsored search auctions with budgets. Each keyword has multiple ad slots with a click-through rate. The bidders have additive valuations, which are linear in the click-through rates, and budgets, which are restricting their overall payments. Additionally, the number of slots per keyword assigned to a bidder is bounded. We show the following results: (1) We give the first mechanism for multiple keywords, where click-through rates differ among slots. Our mechanism is incentive compatible in expectation, individually rational in expectation, and Pareto optimal. (2) We study the combinatorial setting, where each bidder is only interested in a subset of the keywords. We give an incentive compatible, individually rational, Pareto-optimal, and deterministic mechanism for identical click-through rates. (3) We give an impossibility result for incentive compatible, individually rational, Pareto-optimal, and deterministic mechanisms for bidders with diminishing marginal valuations.}, author = {Colini-Baldeschi, Riccardo and Leonardi, Stefano and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Algorithms, Economics, Clinching ascending auction, auctions with budgets, Sponsored search auctions}, number = {1}, publisher = {Association for Computing Machinery}, title = {{On multiple keyword sponsored search auctions with budgets}}, doi = {10.1145/2818357}, volume = {4}, year = {2015}, } @article{11669, abstract = {We study individual rational, Pareto-optimal, and incentive compatible mechanisms for auctions with heterogeneous items and budget limits. We consider settings with multiunit demand and additive valuations. For single-dimensional valuations we prove a positive result for randomized mechanisms, and a negative result for deterministic mechanisms. While the positive result allows for private budgets, the negative result is for public budgets. For multidimensional valuations and public budgets we prove an impossibility result that applies to deterministic and randomized mechanisms. Taken together this shows the power of randomization in certain settings with heterogeneous items, but it also shows its limitations.}, author = {Dütting, Paul and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Algorithmic game theory, auction theory, Clinching auction, Pareto optimality, Budget limits}, number = {1}, publisher = {Association for Computing Machinery}, title = {{Auctions for heterogeneous items and budget limits}}, doi = {10.1145/2818351}, volume = {4}, year = {2015}, } @article{11670, abstract = {Auctions are widely used on the Web. Applications range from sponsored search to platforms such as eBay. In these and in many other applications the auctions in use are single-/multi-item auctions with unit demand. The main drawback of standard mechanisms for this type of auctions, such as VCG and GSP, is the limited expressiveness that they offer to the bidders. The General Auction Mechanism (GAM) of Aggarwal et al. [2009] takes a first step toward addressing the problem of limited expressiveness by computing a bidder optimal, envy-free outcome for linear utility functions with identical slopes and a single discontinuity per bidder-item pair. We show that in many practical situations this does not suffice to adequately model the preferences of the bidders, and we overcome this problem by presenting the first mechanism for piecewise linear utility functions with nonidentical slopes and multiple discontinuities. Our mechanism runs in polynomial time. Like GAM it is incentive compatible for inputs that fulfill a certain nondegeneracy assumption, but our requirement is more general than the requirement of GAM. For discontinuous utility functions that are nondegenerate as well as for continuous utility functions the outcome of our mechanism is a competitive equilibrium. We also show how our mechanism can be used to compute approximately bidder optimal, envy-free outcomes for a general class of continuous utility functions via piecewise linear approximation. Finally, we prove hardness results for even more expressive settings.}, author = {Dütting, Paul and Henzinger, Monika H and Weber, Ingmar}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Computational Mathematics, Marketing, Economics and Econometrics, Statistics and Probability, Computer Science (miscellaneous)}, number = {1}, publisher = {Association for Computing Machinery}, title = {{An expressive mechanism for auctions on the web}}, doi = {10.1145/2716312}, volume = {4}, year = {2015}, } @inproceedings{11774, abstract = {Combinatorial auctions (CA) are a well-studied area in algorithmic mechanism design. However, contrary to the standard model, empirical studies suggest that a bidder’s valuation often does not depend solely on the goods assigned to him. For instance, in adwords auctions an advertiser might not want his ads to be displayed next to his competitors’ ads. In this paper, we propose and analyze several natural graph-theoretic models that incorporate such negative externalities, in which bidders form a directed conflict graph with maximum out-degree Δ. We design algorithms and truthful mechanisms for social welfare maximization that attain approximation ratios depending on Δ. For CA, our results are twofold: (1) A lottery that eliminates conflicts by discarding bidders/items independent of the bids. It allows to apply any truthful 𝛼-approximation mechanism for conflict-free valuations and yields an 𝒪(𝛼Δ)-approximation mechanism. (2) For fractionally sub-additive valuations, we design a rounding algorithm via a novel combination of a semi-definite program and a linear program, resulting in a cone program; the approximation ratio is 𝒪((ΔloglogΔ)/logΔ). The ratios are almost optimal given existing hardness results. For adwords auctions, we present several algorithms for the most relevant scenario when the number of items is small. In particular, we design a truthful mechanism with approximation ratio 𝑜(Δ) when the number of items is only logarithmic in the number of bidders.}, author = {Cheung, Yun Kuen and Henzinger, Monika H and Hoefer, Martin and Starnberger, Martin}, booktitle = {11th International Conference on Web and Internet Economics}, isbn = {9783662489949}, issn = {0302-9743}, location = {Amsterdam, Netherlands}, pages = {230–243}, publisher = {Springer Nature}, title = {{Combinatorial auctions with conflict-based externalities}}, doi = {10.1007/978-3-662-48995-6_17}, volume = {9470}, year = {2015}, } @inproceedings{11773, abstract = {Ad exchanges are an emerging platform for trading advertisement slots on the web with billions of dollars revenue per year. Every time a user visits a web page, the publisher of that web page can ask an ad exchange to auction off the ad slots on this page to determine which advertisements are shown at which price. Due to the high volume of traffic, ad networks typically act as mediators for individual advertisers at ad exchanges. If multiple advertisers in an ad network are interested in the ad slots of the same auction, the ad network might use a “local” auction to resell the obtained ad slots among its advertisers. In this work we want to deepen the theoretical understanding of these new markets by analyzing them from the viewpoint of combinatorial auctions. Prior work studied mostly single-item auctions, while we allow the advertisers to express richer preferences over multiple items. We develop a game-theoretic model for the entanglement of the central auction at the ad exchange with the local auctions at the ad networks. We consider the incentives of all three involved parties and suggest a three-party competitive equilibrium, an extension of the Walrasian equilibrium that ensures envy-freeness for all participants. We show the existence of a three-party competitive equilibrium and a polynomial-time algorithm to find one for gross-substitute bidder valuations.}, author = {Ben-Zwi, Oren and Henzinger, Monika H and Loitzenbauer, Veronika}, booktitle = {11th International Conference on Web and Internet Economics}, isbn = {9783662489949}, issn = {0302-9743}, location = {Amsterdam, Netherlands}, pages = {104–117}, publisher = {Springer Nature}, title = {{Ad exchange: Envy-free auctions with mediators}}, doi = {10.1007/978-3-662-48995-6_8}, volume = {9470}, year = {2015}, } @inproceedings{11785, abstract = {Recently we presented the first algorithm for maintaining the set of nodes reachable from a source node in a directed graph that is modified by edge deletions with 𝑜(𝑚𝑛) total update time, where 𝑚 is the number of edges and 𝑛 is the number of nodes in the graph [Henzinger et al. STOC 2014]. The algorithm is a combination of several different algorithms, each for a different 𝑚 vs. 𝑛 trade-off. For the case of 𝑚=Θ(𝑛1.5) the running time is 𝑂(𝑛2.47), just barely below 𝑚𝑛=Θ(𝑛2.5). In this paper we simplify the previous algorithm using new algorithmic ideas and achieve an improved running time of 𝑂̃ (min(𝑚7/6𝑛2/3,𝑚3/4𝑛5/4+𝑜(1),𝑚2/3𝑛4/3+𝑜(1)+𝑚3/7𝑛12/7+𝑜(1))). This gives, e.g., 𝑂(𝑛2.36) for the notorious case 𝑚=Θ(𝑛1.5). We obtain the same upper bounds for the problem of maintaining the strongly connected components of a directed graph undergoing edge deletions. Our algorithms are correct with high probabililty against an oblivious adversary.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {725 -- 736}, publisher = {Springer Nature}, title = {{Improved algorithms for decremental single-source reachability on directed graphs}}, doi = {10.1007/978-3-662-47672-7_59}, volume = {9134}, year = {2015}, } @inproceedings{11787, abstract = {We present faster algorithms for computing the 2-edge and 2-vertex strongly connected components of a directed graph. While in undirected graphs the 2-edge and 2-vertex connected components can be found in linear time, in directed graphs with m edges and n vertices only rather simple O(m n)-time algorithms were known. We use a hierarchical sparsification technique to obtain algorithms that run in time 𝑂(𝑛2). For 2-edge strongly connected components our algorithm gives the first running time improvement in 20 years. Additionally we present an 𝑂(𝑚2/log𝑛)-time algorithm for 2-edge strongly connected components, and thus improve over the O(m n) running time also when 𝑚=𝑂(𝑛). Our approach extends to k-edge and k-vertex strongly connected components for any constant k with a running time of 𝑂(𝑛2log𝑛) for k-edge-connectivity and 𝑂(𝑛3) for k-vertex-connectivity.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Loitzenbauer, Veronika}, booktitle = {2nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {713 -- 724}, publisher = {Springer Nature}, title = {{Finding 2-edge and 2-vertex strongly connected components in quadratic time}}, doi = {10.1007/978-3-662-47672-7_58}, volume = {9134}, year = {2015}, } @inproceedings{11788, abstract = {Ad exchanges are becoming an increasingly popular way to sell advertisement slots on the internet. An ad exchange is basically a spot market for ad impressions. A publisher who has already signed contracts reserving advertisement impressions on his pages can choose between assigning a new ad impression for a new page view to a contracted advertiser or to sell it at an ad exchange. This leads to an online revenue maximization problem for the publisher. Given a new impression to sell decide whether (a) to assign it to a contracted advertiser and if so to which one or (b) to sell it at the ad exchange and if so at which reserve price. We make no assumptions about the distribution of the advertiser valuations that participate in the ad exchange and show that there exists a simple primal-dual based online algorithm, whose lower bound for the revenue converges to 𝑅𝐴𝐷𝑋+𝑅𝐴(1−1/𝑒), where 𝑅𝐴𝐷𝑋 is the revenue that the optimum algorithm achieves from the ad exchange and 𝑅𝐴 is the revenue that the optimum algorithm achieves from the contracted advertisers.}, author = {Dvořák, Wolfgang and Henzinger, Monika H}, booktitle = {12th International Workshop of Approximation and Online Algorithms}, issn = {0302-9743}, location = {Wroclaw, Poland}, pages = {156–167}, publisher = {Springer Nature}, title = {{Online ad assignment with an ad exchange}}, doi = {10.1007/978-3-319-18263-6_14}, volume = {8952}, year = {2015}, } @inproceedings{11786, abstract = {In this paper, we develop a dynamic version of the primal-dual method for optimization problems, and apply it to obtain the following results. (1) For the dynamic set-cover problem, we maintain an 𝑂(𝑓2)-approximately optimal solution in 𝑂(𝑓⋅log(𝑚+𝑛)) amortized update time, where 𝑓 is the maximum “frequency” of an element, 𝑛 is the number of sets, and 𝑚 is the maximum number of elements in the universe at any point in time. (2) For the dynamic 𝑏-matching problem, we maintain an 𝑂(1)-approximately optimal solution in 𝑂(log3𝑛) amortized update time, where 𝑛 is the number of nodes in the graph.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {206 -- 218}, publisher = {Springer Nature}, title = {{Design of dynamic algorithms via primal-dual method}}, doi = {10.1007/978-3-662-47672-7_17}, volume = {9134}, year = {2015}, } @article{11845, abstract = {Phylogenetic diversity (PD) is a measure of biodiversity based on the evolutionary history of species. Here, we discuss several optimization problems related to the use of PD, and the more general measure split diversity (SD), in conservation prioritization. Depending on the conservation goal and the information available about species, one can construct optimization routines that incorporate various conservation constraints. We demonstrate how this information can be used to select sets of species for conservation action. Specifically, we discuss the use of species' geographic distributions, the choice of candidates under economic pressure, and the use of predator–prey interactions between the species in a community to define viability constraints. Despite such optimization problems falling into the area of NP hard problems, it is possible to solve them in a reasonable amount of time using integer programming. We apply integer linear programming to a variety of models for conservation prioritization that incorporate the SD measure. We exemplarily show the results for two data sets: the Cape region of South Africa and a Caribbean coral reef community. Finally, we provide user-friendly software at http://www.cibiv.at/software/pda.}, author = {Chernomor, Olga and Minh, Bui Quang and Forest, Félix and Klaere, Steffen and Ingram, Travis and Henzinger, Monika H and von Haeseler, Arndt}, issn = {2041-210X}, journal = {Methods in Ecology and Evolution}, number = {1}, pages = {83--91}, publisher = {Wiley}, title = {{Split diversity in constrained conservation prioritization using integer linear programming}}, doi = {10.1111/2041-210x.12299}, volume = {6}, year = {2015}, } @inproceedings{11868, abstract = {Consider the following Online Boolean Matrix-Vector Multiplication problem: We are given an n x n matrix M and will receive n column-vectors of size n, denoted by v1, ..., vn, one by one. After seeing each vector vi, we have to output the product Mvi before we can see the next vector. A naive algorithm can solve this problem using O(n3) time in total, and its running time can be slightly improved to O(n3/log2 n) [Williams SODA'07]. We show that a conjecture that there is no truly subcubic (O(n3-ε)) time algorithm for this problem can be used to exhibit the underlying polynomial time hardness shared by many dynamic problems. For a number of problems, such as subgraph connectivity, Pagh's problem, d-failure connectivity, decremental single-source shortest paths, and decremental transitive closure, this conjecture implies tight hardness results. Thus, proving or disproving this conjecture will be very interesting as it will either imply several tight unconditional lower bounds or break through a common barrier that blocks progress with these problems. This conjecture might also be considered as strong evidence against any further improvement for these problems since refuting it will imply a major breakthrough for combinatorial Boolean matrix multiplication and other long-standing problems if the term "combinatorial algorithms" is interpreted as "Strassen-like algorithms" [Ballard et al. SPAA'11]. The conjecture also leads to hardness results for problems that were previously based on diverse problems and conjectures -- such as 3SUM, combinatorial Boolean matrix multiplication, triangle detection, and multiphase -- thus providing a uniform way to prove polynomial hardness results for dynamic algorithms; some of the new proofs are also simpler or even become trivial. The conjecture also leads to stronger and new, non-trivial, hardness results, e.g., for the fully-dynamic densest subgraph and diameter problems.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon and Saranurak, Thatchaphol}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737.8017}, location = {Portland, OR, United States}, publisher = {Association for Computing Machinery}, title = {{Unifying and strengthening hardness for dynamic problems via the online matrix-vector multiplication conjecture}}, doi = {10.1145/2746539.2746609}, year = {2015}, } @inproceedings{11869, abstract = {While in many graph mining applications it is crucial to handle a stream of updates efficiently in terms of both time and space, not much was known about achieving such type of algorithm. In this paper we study this issue for a problem which lies at the core of many graph mining applications called densest subgraph problem. We develop an algorithm that achieves time- and space-efficiency for this problem simultaneously. It is one of the first of its kind for graph problems to the best of our knowledge. Given an input graph, the densest subgraph is the subgraph that maximizes the ratio between the number of edges and the number of nodes. For any ε>0, our algorithm can, with high probability, maintain a (4+ε)-approximate solution under edge insertions and deletions using ~O(n) space and ~O(1) amortized time per update; here, $n$ is the number of nodes in the graph and ~O hides the O(polylog_{1+ε} n) term. The approximation ratio can be improved to (2+ε) with more time. It can be extended to a (2+ε)-approximation sublinear-time algorithm and a distributed-streaming algorithm. Our algorithm is the first streaming algorithm that can maintain the densest subgraph in one pass. Prior to this, no algorithm could do so even in the special case of an incremental stream and even when there is no time restriction. The previously best algorithm in this setting required O(log n) passes [BahmaniKV12]. The space required by our algorithm is tight up to a polylogarithmic factor.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon and Tsourakakis, Charalampos}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737-8017}, location = {Portland, OR, United States}, pages = {173 -- 182}, publisher = {Association for Computing Machinery}, title = {{Space- and time-efficient algorithm for maintaining dense subgraphs on one-pass dynamic streams}}, doi = {10.1145/2746539.2746592}, year = {2015}, } @inproceedings{11837, abstract = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1-1/e). (2) On the positive side we present (i) an O(sqrt n)-approximation algorithm for general concave externality functions, (ii) an O(\log m)-approximation algorithm for linear externality functions, and (iii) an (1-1/e)\frac{1}{6}-approximation algorithm for 2-hop step function externalities. We also improve the result from [6] for 1-hop step function externalities by giving a (1-1/e)/2-approximation algorithm.}, author = {Bhattacharya, Sayan and Dvorák, Wolfgang and Henzinger, Monika H and Starnberger, Martin}, booktitle = {32nd International Symposium on Theoretical Aspects of Computer Science}, isbn = {978-3-939897-78-1}, issn = {1868-8969}, location = {Garching, Germany}, pages = {90--102}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Welfare maximization with friends-of-friends network externalities}}, doi = {10.4230/LIPICS.STACS.2015.90}, volume = {30}, year = {2015}, } @article{11901, abstract = {We consider auctions of indivisible items to unit-demand bidders with budgets. This setting was suggested as an expressive model for single sponsored search auctions. Prior work presented mechanisms that compute bidder-optimal outcomes and are truthful for a restricted set of inputs, i.e., inputs in so-called general position. This condition is easily violated. We provide the first mechanism that is truthful in expectation for all inputs and achieves for each bidder no worse utility than the bidder-optimal outcome. Additionally we give a complete characterization for which inputs mechanisms that compute bidder-optimal outcomes are truthful.}, author = {Henzinger, Monika H and Loitzenbauer, Veronika}, issn = {0304-3975}, journal = {Theoretical Computer Science}, pages = {1--15}, publisher = {Elsevier}, title = {{Truthful unit-demand auctions with budgets revisited}}, doi = {10.1016/j.tcs.2015.01.033}, volume = {573}, year = {2015}, } @article{11962, abstract = {One of the rare alternative reagents for the reduction of carbon–carbon double bonds is diimide (HNNH), which can be generated in situ from hydrazine hydrate (N2H4⋅H2O) and O2. Although this selective method is extremely clean and powerful, it is rarely used, as the rate-determining oxidation of hydrazine in the absence of a catalyst is relatively slow using conventional batch protocols. A continuous high-temperature/high-pressure methodology dramatically enhances the initial oxidation step, at the same time allowing for a safe and scalable processing of the hazardous reaction mixture. Simple alkenes can be selectively reduced within 10–20 min at 100–120 °C and 20 bar O2 pressure. The development of a multi-injection reactor platform for the periodic addition of N2H4⋅H2O enables the reduction of less reactive olefins even at lower reaction temperatures. This concept was utilized for the highly selective reduction of artemisinic acid to dihydroartemisinic acid, the precursor molecule for the semisynthesis of the antimalarial drug artemisinin. The industrially relevant reduction was achieved by using four consecutive liquid feeds (of N2H4⋅H2O) and residence time units resulting in a highly selective reduction within approximately 40 min at 60 °C and 20 bar O2 pressure, providing dihydroartemisinic acid in ≥93 % yield and ≥95 % selectivity.}, author = {Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. Oliver}, issn = {1521-3765}, journal = {Chemistry - A European Journal}, number = {11}, pages = {4368--4376}, publisher = {Wiley}, title = {{Continuous flow reduction of artemisinic acid utilizing multi-injection strategies-closing the gap towards a fully continuous synthesis of antimalarial drugs}}, doi = {10.1002/chem.201406439}, volume = {21}, year = {2015}, }