@inproceedings{11793, abstract = {We study the problem of maintaining a breadth-first spanning tree (BFS tree) in partially dynamic distributed networks modeling a sequence of either failures or additions of communication links (but not both). We show (1 + ε)-approximation algorithms whose amortized time (over some number of link changes) is sublinear in D, the maximum diameter of the network. This breaks the Θ(D) time bound of recomputing “from scratch”. Our technique also leads to a (1 + ε)-approximate incremental algorithm for single-source shortest paths (SSSP) in the sequential (usual RAM) model. Prior to our work, the state of the art was the classic exact algorithm of [9] that is optimal under some assumptions [27]. Our result is the first to show that, in the incremental setting, this bound can be beaten in certain cases if a small approximation is allowed.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {40th International Colloquium on Automata, Languages, and Programming}, isbn = {9783642392115}, issn = {1611-3349}, location = {Riga, Latvia}, pages = {607–619}, publisher = {Springer Nature}, title = {{Sublinear-time maintenance of breadth-first spanning tree in partially dynamic networks}}, doi = {10.1007/978-3-642-39212-2_53}, volume = {7966}, year = {2013}, } @inproceedings{11791, abstract = {The focus of classic mechanism design has been on truthful direct-revelation mechanisms. In the context of combinatorial auctions the truthful direct-revelation mechanism that maximizes social welfare is the VCG mechanism. For many valuation spaces computing the allocation and payments of the VCG mechanism, however, is a computationally hard problem. We thus study the performance of the VCG mechanism when bidders are forced to choose bids from a subspace of the valuation space for which the VCG outcome can be computed efficiently. We prove improved upper bounds on the welfare loss for restrictions to additive bids and upper and lower bounds for restrictions to non-additive bids. These bounds show that the welfare loss increases in expressiveness. All our bounds apply to equilibrium concepts that can be computed in polynomial time as well as to learning outcomes.}, author = {Dütting, Paul and Henzinger, Monika H and Starnberger, Martin}, booktitle = {9th International Conference on Web and Internet Economics}, isbn = {9783642450457}, issn = {1611-3349}, location = {Cambridge, MA, USA}, pages = {146–159}, publisher = {Springer Nature}, title = {{Valuation compressions in VCG-based combinatorial auctions}}, doi = {10.1007/978-3-642-45046-4_13}, volume = {8289}, year = {2013}, } @inproceedings{11792, abstract = {We study the problem of maximizing a monotone submodular function with viability constraints. This problem originates from computational biology, where we are given a phylogenetic tree over a set of species and a directed graph, the so-called food web, encoding viability constraints between these species. These food webs usually have constant depth. The goal is to select a subset of k species that satisfies the viability constraints and has maximal phylogenetic diversity. As this problem is known to be NP-hard, we investigate approximation algorithm. We present the first constant factor approximation algorithm if the depth is constant. Its approximation ratio is (1−1𝑒√). This algorithm not only applies to phylogenetic trees with viability constraints but for arbitrary monotone submodular set functions with viability constraints. Second, we show that there is no (1 − 1/e + ε)-approximation algorithm for our problem setting (even for additive functions) and that there is no approximation algorithm for a slight extension of this setting.}, author = {Dvořák, Wolfgang and Henzinger, Monika H and Williamson, David P.}, booktitle = {21st Annual European Symposium on Algorithms}, isbn = {9783642404498}, issn = {1611-3349}, location = {Sophia Antipolis, France}, pages = {409 -- 420}, publisher = {Springer Nature}, title = {{Maximizing a submodular function with viability constraints}}, doi = {10.1007/978-3-642-40450-4_35}, volume = {8125}, year = {2013}, } @inproceedings{11856, abstract = {We study dynamic (1 + ϵ)-approximation algorithms for the all-pairs shortest paths problem in unweighted undirected n-node m-edge graphs under edge deletions. The fastest algorithm for this problem is a randomized algorithm with a total update time of Ȏ(mn) and constant query time by Roditty and Zwick (FOCS 2004). The fastest deterministic algorithm is from a 1981 paper by Even and Shiloach (JACM 1981); it has a total update time of O(mn 2 ) and constant query time. We improve these results as follows: (1) We present an algorithm with a total update time of Ȏ(n 5/2 ) and constant query time that has an additive error of two in addition to the 1 + ϵ multiplicative error. This beats the previous Ȏ(mn) time when m = Ω(n 3/2 ). Note that the additive error is unavoidable since, even in the static case, an O(n 3-δ )-time (a so-called truly sub cubic) combinatorial algorithm with 1 + ϵ multiplicative error cannot have an additive error less than 2 - ϵ, unless we make a major breakthrough for Boolean matrix multiplication (Dor, Halperin and Zwick FOCS 1996) and many other long-standing problems (Vassilevska Williams and Williams FOCS 2010). The algorithm can also be turned into a (2 + ϵ)-approximation algorithm (without an additive error) with the same time guarantees, improving the recent (3 + ϵ)-approximation algorithm with Ȏ(n 5/2+O(1√(log n)) ) running time of Bernstein and Roditty (SODA 2011) in terms of both approximation and time guarantees. (2) We present a deterministic algorithm with a total update time of Ȏ(mn) and a query time of O(log log n). The algorithm has a multiplicative error of 1 + ϵ and gives the first improved deterministic algorithm since 1981. It also answers an open question raised by Bernstein in his STOC 2013 paper. In order to achieve our results, we introduce two new techniques: (1) A lazy Even-Shiloach tree algorithm which maintains a bounded-distance shortest-paths tree on a certain type of emulator called locally persevering emulator. (2) A derandomization technique based on moving Even-Shiloach trees as a way to derandomize the standard random set argument. These techniques might be of independent interest.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {54th Annual Symposium on Foundations of Computer Science}, issn = {0272-5428}, location = {Berkeley, CA, United States}, pages = {538--547}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Dynamic approximate all-pairs shortest paths: Breaking the O(mn) barrier and derandomization}}, doi = {10.1109/focs.2013.64}, year = {2013}, } @article{11902, abstract = {We study the problem of matching bidders to items where each bidder i has general, strictly monotonic utility functions ui,j(pj) expressing his utility of being matched to item j at price pj. For this setting we prove that a bidder optimal outcome always exists, even when the utility functions are non-linear and non-continuous. We give sufficient conditions under which every mechanism that finds a bidder optimal outcome is incentive compatible. We also give a mechanism that finds a bidder optimal outcome if the conditions for incentive compatibility are satisfied. The running time of this mechanism is exponential in the number of items, but polynomial in the number of bidders.}, author = {Dütting, Paul and Henzinger, Monika H and Weber, Ingmar}, issn = {0304-3975}, journal = {Theoretical Computer Science}, number = {3}, pages = {22--32}, publisher = {Elsevier}, title = {{Bidder optimal assignments for general utilities}}, doi = {10.1016/j.tcs.2013.01.030}, volume = {478}, year = {2013}, } @article{11959, abstract = {No catalyst required! A highly efficient, catalyst-free process to generate diimide in situ from hydrazine monohydrate and molecular oxygen for the selective reduction of alkenes has been developed. The use of a gas–liquid segmented flow system allowed safe operating conditions and dramatically enhanced this atom-economical reaction, resulting in short processing times.}, author = {Pieber, Bartholomäus and Martinez, Sabrina Teixeira and Cantillo, David and Kappe, C. Oliver}, issn = {1521-3773}, journal = {Angewandte Chemie International Edition}, number = {39}, pages = {10241--10244}, publisher = {Wiley}, title = {{In situ generation of diimide from hydrazine and oxygen: Continuous-flow transfer hydrogenation of olefins}}, doi = {10.1002/anie.201303528}, volume = {52}, year = {2013}, } @article{11960, abstract = {It's not magic! The effects observed in microwave-irradiated chemical transformations can in most cases be rationalized by purely bulk thermal phenomena associated with rapid heating to elevated temperatures. As discussed in this Essay, the existence of so-called nonthermal or specific microwave effects is highly doubtful.}, author = {Kappe, C. Oliver and Pieber, Bartholomäus and Dallinger, Doris}, issn = {1521-3773}, journal = {Angewandte Chemie International Edition}, number = {4}, pages = {1088--1094}, publisher = {Wiley}, title = {{Microwave effects in organic synthesis: Myth or reality?}}, doi = {10.1002/anie.201204103}, volume = {52}, year = {2013}, } @article{11973, abstract = {The use of high-temperature/pressure gas–liquid continuous flow conditions dramatically enhances the iron-catalyzed aerobic oxidation of 2-benzylpyridines to their corresponding ketones. Pressurized air serves as a readily available oxygen source and propylene carbonate as a green solvent in this radically intensified preparation of synthetically valuable 2-aroylpyridines.}, author = {Pieber, Bartholomäus and Kappe, C. Oliver}, issn = {1463-9270}, journal = {Green Chemistry}, number = {2}, pages = {320--324}, publisher = {Royal Society of Chemistry}, title = {{Direct aerobic oxidation of 2-benzylpyridines in a gas-liquid continuous-flow regime using propylene carbonate as a solvent}}, doi = {10.1039/c2gc36896j}, volume = {15}, year = {2013}, } @article{12642, abstract = {Near-surface air temperature, typically measured at a height of 2 m, is the most important control on the energy exchange and the melt rate at a snow or ice surface. It is distributed in a simplistic manner in most glacier melt models by using constant linear lapse rates, which poorly represent the actual spatial and temporal variability of air temperature. In this paper, we test a simple thermodynamic model proposed by Greuell and Böhm in 1998 as an alternative, using a new dataset of air temperature measurements from along the flowline of Haut Glacier d’Arolla, Switzerland. The unmodified model performs little better than assuming a constant linear lapse rate. When modified to allow the ratio of the boundary layer height to the bulk heat transfer coefficient to vary along the flowline, the model matches measured air temperatures better, and a further reduction of the root-mean-square error is obtained, although there is still considerable scope for improvement. The modified model is shown to perform best under conditions favourable to the development of katabatic winds – few clouds, positive ambient air temperature, limited influence of synoptic or valley winds and a long fetch – but its performance is poor under cloudy conditions.}, author = {Petersen, Lene and Pellicciotti, Francesca and Juszak, Inge and Carenzo, Marco and Brock, Ben}, issn = {1727-5644}, journal = {Annals of Glaciology}, keywords = {Earth-Surface Processes}, number = {63}, pages = {120--130}, publisher = {International Glaciological Society}, title = {{Suitability of a constant air temperature lapse rate over an Alpine glacier: Testing the Greuell and Böhm model as an alternative}}, doi = {10.3189/2013aog63a477}, volume = {54}, year = {2013}, } @article{12643, abstract = {Parameterizations of incoming longwave radiation are increasingly receiving attention for both low and high elevation glacierized sites. In this paper, we test 13 clear-sky parameterizations combined with seven cloud corrections for all-sky atmospheric emissivity at one location on Haut Glacier d'Arolla. We also analyze the four seasons separately and conduct a cross-validation to test the parameters’ robustness. The best parameterization is the one by Dilley and O'Brien, B for clear-sky conditions combined with Unsworth and Monteith cloud correction. This model is also the most robust when tested in cross-validation. When validated at different sites in the southern Alps of Switzerland and north-western Italian Alps, all parameterizations show a substantial decrease in performance, except for one site, thus suggesting that it is important to recalibrate parameterizations of incoming longwave radiation for different locations. We argue that this is due to differences in the structure of the atmosphere at the sites. We also quantify the effect that the incoming longwave radiation parameterizations have on energy-balance melt modeling, and show that recalibration of model parameters is needed. Using parameters from other sites leads to a significant underestimation of melt and to an error that is larger than that associated with using different parameterizations. Once recalibrated, however, the parameters of most models seem to be stable over seasons and years at the location on Haut Glacier d'Arolla.}, author = {Juszak, I. and Pellicciotti, Francesca}, issn = {2169-897X}, journal = {Journal of Geophysical Research: Atmospheres}, keywords = {Space and Planetary Science, Earth and Planetary Sciences (miscellaneous), Atmospheric Science, Geophysics}, number = {8}, pages = {3066--3084}, publisher = {American Geophysical Union}, title = {{A comparison of parameterizations of incoming longwave radiation over melting glaciers: Model robustness and seasonal variability}}, doi = {10.1002/jgrd.50277}, volume = {118}, year = {2013}, }