@article{11668, abstract = {We study multiple keyword sponsored search auctions with budgets. Each keyword has multiple ad slots with a click-through rate. The bidders have additive valuations, which are linear in the click-through rates, and budgets, which are restricting their overall payments. Additionally, the number of slots per keyword assigned to a bidder is bounded. We show the following results: (1) We give the first mechanism for multiple keywords, where click-through rates differ among slots. Our mechanism is incentive compatible in expectation, individually rational in expectation, and Pareto optimal. (2) We study the combinatorial setting, where each bidder is only interested in a subset of the keywords. We give an incentive compatible, individually rational, Pareto-optimal, and deterministic mechanism for identical click-through rates. (3) We give an impossibility result for incentive compatible, individually rational, Pareto-optimal, and deterministic mechanisms for bidders with diminishing marginal valuations.}, author = {Colini-Baldeschi, Riccardo and Leonardi, Stefano and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Algorithms, Economics, Clinching ascending auction, auctions with budgets, Sponsored search auctions}, number = {1}, publisher = {Association for Computing Machinery}, title = {{On multiple keyword sponsored search auctions with budgets}}, doi = {10.1145/2818357}, volume = {4}, year = {2015}, } @article{11669, abstract = {We study individual rational, Pareto-optimal, and incentive compatible mechanisms for auctions with heterogeneous items and budget limits. We consider settings with multiunit demand and additive valuations. For single-dimensional valuations we prove a positive result for randomized mechanisms, and a negative result for deterministic mechanisms. While the positive result allows for private budgets, the negative result is for public budgets. For multidimensional valuations and public budgets we prove an impossibility result that applies to deterministic and randomized mechanisms. Taken together this shows the power of randomization in certain settings with heterogeneous items, but it also shows its limitations.}, author = {Dütting, Paul and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Algorithmic game theory, auction theory, Clinching auction, Pareto optimality, Budget limits}, number = {1}, publisher = {Association for Computing Machinery}, title = {{Auctions for heterogeneous items and budget limits}}, doi = {10.1145/2818351}, volume = {4}, year = {2015}, } @article{11670, abstract = {Auctions are widely used on the Web. Applications range from sponsored search to platforms such as eBay. In these and in many other applications the auctions in use are single-/multi-item auctions with unit demand. The main drawback of standard mechanisms for this type of auctions, such as VCG and GSP, is the limited expressiveness that they offer to the bidders. The General Auction Mechanism (GAM) of Aggarwal et al. [2009] takes a first step toward addressing the problem of limited expressiveness by computing a bidder optimal, envy-free outcome for linear utility functions with identical slopes and a single discontinuity per bidder-item pair. We show that in many practical situations this does not suffice to adequately model the preferences of the bidders, and we overcome this problem by presenting the first mechanism for piecewise linear utility functions with nonidentical slopes and multiple discontinuities. Our mechanism runs in polynomial time. Like GAM it is incentive compatible for inputs that fulfill a certain nondegeneracy assumption, but our requirement is more general than the requirement of GAM. For discontinuous utility functions that are nondegenerate as well as for continuous utility functions the outcome of our mechanism is a competitive equilibrium. We also show how our mechanism can be used to compute approximately bidder optimal, envy-free outcomes for a general class of continuous utility functions via piecewise linear approximation. Finally, we prove hardness results for even more expressive settings.}, author = {Dütting, Paul and Henzinger, Monika H and Weber, Ingmar}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Computational Mathematics, Marketing, Economics and Econometrics, Statistics and Probability, Computer Science (miscellaneous)}, number = {1}, publisher = {Association for Computing Machinery}, title = {{An expressive mechanism for auctions on the web}}, doi = {10.1145/2716312}, volume = {4}, year = {2015}, } @inproceedings{11774, abstract = {Combinatorial auctions (CA) are a well-studied area in algorithmic mechanism design. However, contrary to the standard model, empirical studies suggest that a bidder’s valuation often does not depend solely on the goods assigned to him. For instance, in adwords auctions an advertiser might not want his ads to be displayed next to his competitors’ ads. In this paper, we propose and analyze several natural graph-theoretic models that incorporate such negative externalities, in which bidders form a directed conflict graph with maximum out-degree Δ. We design algorithms and truthful mechanisms for social welfare maximization that attain approximation ratios depending on Δ. For CA, our results are twofold: (1) A lottery that eliminates conflicts by discarding bidders/items independent of the bids. It allows to apply any truthful 𝛼-approximation mechanism for conflict-free valuations and yields an 𝒪(𝛼Δ)-approximation mechanism. (2) For fractionally sub-additive valuations, we design a rounding algorithm via a novel combination of a semi-definite program and a linear program, resulting in a cone program; the approximation ratio is 𝒪((ΔloglogΔ)/logΔ). The ratios are almost optimal given existing hardness results. For adwords auctions, we present several algorithms for the most relevant scenario when the number of items is small. In particular, we design a truthful mechanism with approximation ratio 𝑜(Δ) when the number of items is only logarithmic in the number of bidders.}, author = {Cheung, Yun Kuen and Henzinger, Monika H and Hoefer, Martin and Starnberger, Martin}, booktitle = {11th International Conference on Web and Internet Economics}, isbn = {9783662489949}, issn = {0302-9743}, location = {Amsterdam, Netherlands}, pages = {230–243}, publisher = {Springer Nature}, title = {{Combinatorial auctions with conflict-based externalities}}, doi = {10.1007/978-3-662-48995-6_17}, volume = {9470}, year = {2015}, } @inproceedings{11773, abstract = {Ad exchanges are an emerging platform for trading advertisement slots on the web with billions of dollars revenue per year. Every time a user visits a web page, the publisher of that web page can ask an ad exchange to auction off the ad slots on this page to determine which advertisements are shown at which price. Due to the high volume of traffic, ad networks typically act as mediators for individual advertisers at ad exchanges. If multiple advertisers in an ad network are interested in the ad slots of the same auction, the ad network might use a “local” auction to resell the obtained ad slots among its advertisers. In this work we want to deepen the theoretical understanding of these new markets by analyzing them from the viewpoint of combinatorial auctions. Prior work studied mostly single-item auctions, while we allow the advertisers to express richer preferences over multiple items. We develop a game-theoretic model for the entanglement of the central auction at the ad exchange with the local auctions at the ad networks. We consider the incentives of all three involved parties and suggest a three-party competitive equilibrium, an extension of the Walrasian equilibrium that ensures envy-freeness for all participants. We show the existence of a three-party competitive equilibrium and a polynomial-time algorithm to find one for gross-substitute bidder valuations.}, author = {Ben-Zwi, Oren and Henzinger, Monika H and Loitzenbauer, Veronika}, booktitle = {11th International Conference on Web and Internet Economics}, isbn = {9783662489949}, issn = {0302-9743}, location = {Amsterdam, Netherlands}, pages = {104–117}, publisher = {Springer Nature}, title = {{Ad exchange: Envy-free auctions with mediators}}, doi = {10.1007/978-3-662-48995-6_8}, volume = {9470}, year = {2015}, } @inproceedings{11785, abstract = {Recently we presented the first algorithm for maintaining the set of nodes reachable from a source node in a directed graph that is modified by edge deletions with 𝑜(𝑚𝑛) total update time, where 𝑚 is the number of edges and 𝑛 is the number of nodes in the graph [Henzinger et al. STOC 2014]. The algorithm is a combination of several different algorithms, each for a different 𝑚 vs. 𝑛 trade-off. For the case of 𝑚=Θ(𝑛1.5) the running time is 𝑂(𝑛2.47), just barely below 𝑚𝑛=Θ(𝑛2.5). In this paper we simplify the previous algorithm using new algorithmic ideas and achieve an improved running time of 𝑂̃ (min(𝑚7/6𝑛2/3,𝑚3/4𝑛5/4+𝑜(1),𝑚2/3𝑛4/3+𝑜(1)+𝑚3/7𝑛12/7+𝑜(1))). This gives, e.g., 𝑂(𝑛2.36) for the notorious case 𝑚=Θ(𝑛1.5). We obtain the same upper bounds for the problem of maintaining the strongly connected components of a directed graph undergoing edge deletions. Our algorithms are correct with high probabililty against an oblivious adversary.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {725 -- 736}, publisher = {Springer Nature}, title = {{Improved algorithms for decremental single-source reachability on directed graphs}}, doi = {10.1007/978-3-662-47672-7_59}, volume = {9134}, year = {2015}, } @inproceedings{11787, abstract = {We present faster algorithms for computing the 2-edge and 2-vertex strongly connected components of a directed graph. While in undirected graphs the 2-edge and 2-vertex connected components can be found in linear time, in directed graphs with m edges and n vertices only rather simple O(m n)-time algorithms were known. We use a hierarchical sparsification technique to obtain algorithms that run in time 𝑂(𝑛2). For 2-edge strongly connected components our algorithm gives the first running time improvement in 20 years. Additionally we present an 𝑂(𝑚2/log𝑛)-time algorithm for 2-edge strongly connected components, and thus improve over the O(m n) running time also when 𝑚=𝑂(𝑛). Our approach extends to k-edge and k-vertex strongly connected components for any constant k with a running time of 𝑂(𝑛2log𝑛) for k-edge-connectivity and 𝑂(𝑛3) for k-vertex-connectivity.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Loitzenbauer, Veronika}, booktitle = {2nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {713 -- 724}, publisher = {Springer Nature}, title = {{Finding 2-edge and 2-vertex strongly connected components in quadratic time}}, doi = {10.1007/978-3-662-47672-7_58}, volume = {9134}, year = {2015}, } @inproceedings{11788, abstract = {Ad exchanges are becoming an increasingly popular way to sell advertisement slots on the internet. An ad exchange is basically a spot market for ad impressions. A publisher who has already signed contracts reserving advertisement impressions on his pages can choose between assigning a new ad impression for a new page view to a contracted advertiser or to sell it at an ad exchange. This leads to an online revenue maximization problem for the publisher. Given a new impression to sell decide whether (a) to assign it to a contracted advertiser and if so to which one or (b) to sell it at the ad exchange and if so at which reserve price. We make no assumptions about the distribution of the advertiser valuations that participate in the ad exchange and show that there exists a simple primal-dual based online algorithm, whose lower bound for the revenue converges to 𝑅𝐴𝐷𝑋+𝑅𝐴(1−1/𝑒), where 𝑅𝐴𝐷𝑋 is the revenue that the optimum algorithm achieves from the ad exchange and 𝑅𝐴 is the revenue that the optimum algorithm achieves from the contracted advertisers.}, author = {Dvořák, Wolfgang and Henzinger, Monika H}, booktitle = {12th International Workshop of Approximation and Online Algorithms}, issn = {0302-9743}, location = {Wroclaw, Poland}, pages = {156–167}, publisher = {Springer Nature}, title = {{Online ad assignment with an ad exchange}}, doi = {10.1007/978-3-319-18263-6_14}, volume = {8952}, year = {2015}, } @inproceedings{11786, abstract = {In this paper, we develop a dynamic version of the primal-dual method for optimization problems, and apply it to obtain the following results. (1) For the dynamic set-cover problem, we maintain an 𝑂(𝑓2)-approximately optimal solution in 𝑂(𝑓⋅log(𝑚+𝑛)) amortized update time, where 𝑓 is the maximum “frequency” of an element, 𝑛 is the number of sets, and 𝑚 is the maximum number of elements in the universe at any point in time. (2) For the dynamic 𝑏-matching problem, we maintain an 𝑂(1)-approximately optimal solution in 𝑂(log3𝑛) amortized update time, where 𝑛 is the number of nodes in the graph.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {206 -- 218}, publisher = {Springer Nature}, title = {{Design of dynamic algorithms via primal-dual method}}, doi = {10.1007/978-3-662-47672-7_17}, volume = {9134}, year = {2015}, } @article{11845, abstract = {Phylogenetic diversity (PD) is a measure of biodiversity based on the evolutionary history of species. Here, we discuss several optimization problems related to the use of PD, and the more general measure split diversity (SD), in conservation prioritization. Depending on the conservation goal and the information available about species, one can construct optimization routines that incorporate various conservation constraints. We demonstrate how this information can be used to select sets of species for conservation action. Specifically, we discuss the use of species' geographic distributions, the choice of candidates under economic pressure, and the use of predator–prey interactions between the species in a community to define viability constraints. Despite such optimization problems falling into the area of NP hard problems, it is possible to solve them in a reasonable amount of time using integer programming. We apply integer linear programming to a variety of models for conservation prioritization that incorporate the SD measure. We exemplarily show the results for two data sets: the Cape region of South Africa and a Caribbean coral reef community. Finally, we provide user-friendly software at http://www.cibiv.at/software/pda.}, author = {Chernomor, Olga and Minh, Bui Quang and Forest, Félix and Klaere, Steffen and Ingram, Travis and Henzinger, Monika H and von Haeseler, Arndt}, issn = {2041-210X}, journal = {Methods in Ecology and Evolution}, number = {1}, pages = {83--91}, publisher = {Wiley}, title = {{Split diversity in constrained conservation prioritization using integer linear programming}}, doi = {10.1111/2041-210x.12299}, volume = {6}, year = {2015}, } @inproceedings{11868, abstract = {Consider the following Online Boolean Matrix-Vector Multiplication problem: We are given an n x n matrix M and will receive n column-vectors of size n, denoted by v1, ..., vn, one by one. After seeing each vector vi, we have to output the product Mvi before we can see the next vector. A naive algorithm can solve this problem using O(n3) time in total, and its running time can be slightly improved to O(n3/log2 n) [Williams SODA'07]. We show that a conjecture that there is no truly subcubic (O(n3-ε)) time algorithm for this problem can be used to exhibit the underlying polynomial time hardness shared by many dynamic problems. For a number of problems, such as subgraph connectivity, Pagh's problem, d-failure connectivity, decremental single-source shortest paths, and decremental transitive closure, this conjecture implies tight hardness results. Thus, proving or disproving this conjecture will be very interesting as it will either imply several tight unconditional lower bounds or break through a common barrier that blocks progress with these problems. This conjecture might also be considered as strong evidence against any further improvement for these problems since refuting it will imply a major breakthrough for combinatorial Boolean matrix multiplication and other long-standing problems if the term "combinatorial algorithms" is interpreted as "Strassen-like algorithms" [Ballard et al. SPAA'11]. The conjecture also leads to hardness results for problems that were previously based on diverse problems and conjectures -- such as 3SUM, combinatorial Boolean matrix multiplication, triangle detection, and multiphase -- thus providing a uniform way to prove polynomial hardness results for dynamic algorithms; some of the new proofs are also simpler or even become trivial. The conjecture also leads to stronger and new, non-trivial, hardness results, e.g., for the fully-dynamic densest subgraph and diameter problems.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon and Saranurak, Thatchaphol}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737.8017}, location = {Portland, OR, United States}, publisher = {Association for Computing Machinery}, title = {{Unifying and strengthening hardness for dynamic problems via the online matrix-vector multiplication conjecture}}, doi = {10.1145/2746539.2746609}, year = {2015}, } @inproceedings{11869, abstract = {While in many graph mining applications it is crucial to handle a stream of updates efficiently in terms of both time and space, not much was known about achieving such type of algorithm. In this paper we study this issue for a problem which lies at the core of many graph mining applications called densest subgraph problem. We develop an algorithm that achieves time- and space-efficiency for this problem simultaneously. It is one of the first of its kind for graph problems to the best of our knowledge. Given an input graph, the densest subgraph is the subgraph that maximizes the ratio between the number of edges and the number of nodes. For any ε>0, our algorithm can, with high probability, maintain a (4+ε)-approximate solution under edge insertions and deletions using ~O(n) space and ~O(1) amortized time per update; here, $n$ is the number of nodes in the graph and ~O hides the O(polylog_{1+ε} n) term. The approximation ratio can be improved to (2+ε) with more time. It can be extended to a (2+ε)-approximation sublinear-time algorithm and a distributed-streaming algorithm. Our algorithm is the first streaming algorithm that can maintain the densest subgraph in one pass. Prior to this, no algorithm could do so even in the special case of an incremental stream and even when there is no time restriction. The previously best algorithm in this setting required O(log n) passes [BahmaniKV12]. The space required by our algorithm is tight up to a polylogarithmic factor.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon and Tsourakakis, Charalampos}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737-8017}, location = {Portland, OR, United States}, pages = {173 -- 182}, publisher = {Association for Computing Machinery}, title = {{Space- and time-efficient algorithm for maintaining dense subgraphs on one-pass dynamic streams}}, doi = {10.1145/2746539.2746592}, year = {2015}, } @inproceedings{11837, abstract = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1-1/e). (2) On the positive side we present (i) an O(sqrt n)-approximation algorithm for general concave externality functions, (ii) an O(\log m)-approximation algorithm for linear externality functions, and (iii) an (1-1/e)\frac{1}{6}-approximation algorithm for 2-hop step function externalities. We also improve the result from [6] for 1-hop step function externalities by giving a (1-1/e)/2-approximation algorithm.}, author = {Bhattacharya, Sayan and Dvorák, Wolfgang and Henzinger, Monika H and Starnberger, Martin}, booktitle = {32nd International Symposium on Theoretical Aspects of Computer Science}, isbn = {978-3-939897-78-1}, issn = {1868-8969}, location = {Garching, Germany}, pages = {90--102}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Welfare maximization with friends-of-friends network externalities}}, doi = {10.4230/LIPICS.STACS.2015.90}, volume = {30}, year = {2015}, } @article{11901, abstract = {We consider auctions of indivisible items to unit-demand bidders with budgets. This setting was suggested as an expressive model for single sponsored search auctions. Prior work presented mechanisms that compute bidder-optimal outcomes and are truthful for a restricted set of inputs, i.e., inputs in so-called general position. This condition is easily violated. We provide the first mechanism that is truthful in expectation for all inputs and achieves for each bidder no worse utility than the bidder-optimal outcome. Additionally we give a complete characterization for which inputs mechanisms that compute bidder-optimal outcomes are truthful.}, author = {Henzinger, Monika H and Loitzenbauer, Veronika}, issn = {0304-3975}, journal = {Theoretical Computer Science}, pages = {1--15}, publisher = {Elsevier}, title = {{Truthful unit-demand auctions with budgets revisited}}, doi = {10.1016/j.tcs.2015.01.033}, volume = {573}, year = {2015}, } @article{11962, abstract = {One of the rare alternative reagents for the reduction of carbon–carbon double bonds is diimide (HNNH), which can be generated in situ from hydrazine hydrate (N2H4⋅H2O) and O2. Although this selective method is extremely clean and powerful, it is rarely used, as the rate-determining oxidation of hydrazine in the absence of a catalyst is relatively slow using conventional batch protocols. A continuous high-temperature/high-pressure methodology dramatically enhances the initial oxidation step, at the same time allowing for a safe and scalable processing of the hazardous reaction mixture. Simple alkenes can be selectively reduced within 10–20 min at 100–120 °C and 20 bar O2 pressure. The development of a multi-injection reactor platform for the periodic addition of N2H4⋅H2O enables the reduction of less reactive olefins even at lower reaction temperatures. This concept was utilized for the highly selective reduction of artemisinic acid to dihydroartemisinic acid, the precursor molecule for the semisynthesis of the antimalarial drug artemisinin. The industrially relevant reduction was achieved by using four consecutive liquid feeds (of N2H4⋅H2O) and residence time units resulting in a highly selective reduction within approximately 40 min at 60 °C and 20 bar O2 pressure, providing dihydroartemisinic acid in ≥93 % yield and ≥95 % selectivity.}, author = {Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. Oliver}, issn = {1521-3765}, journal = {Chemistry - A European Journal}, number = {11}, pages = {4368--4376}, publisher = {Wiley}, title = {{Continuous flow reduction of artemisinic acid utilizing multi-injection strategies-closing the gap towards a fully continuous synthesis of antimalarial drugs}}, doi = {10.1002/chem.201406439}, volume = {21}, year = {2015}, } @article{11977, abstract = {The development of a continuous flow multistep strategy for the synthesis of linear peptoids and their subsequent macrocyclization via Click chemistry is described. The central transformation of this process is an Ugi four-component reaction generating the peptidomimetic core structure. In order to avoid exposure to the often toxic and malodorous isocyanide building blocks, the continuous approach was telescoped by the dehydration of the corresponding formamide. In a concurrent operation, the highly energetic azide moiety required for the subsequent intramolecular copper-catalyzed azide–alkyne cycloaddition (Click reaction) was installed by nucleophilic substitution from a bromide precursor. All steps yielding to the linear core structures can be conveniently coupled without the need for purification steps resulting in a single process generating the desired peptidomimetics in good to excellent yields within a 25 min reaction time. The following macrocyclization was realized in a coil reactor made of copper without any additional additive. A careful process intensification study demonstrated that this transformation occurs quantitatively within 25 min at 140 °C. Depending on the resulting ring strain, either a dimeric or a monomeric form of the cyclic product was obtained.}, author = {Salvador, Carlos Eduardo M. and Pieber, Bartholomäus and Neu, Philipp M. and Torvisco, Ana and Kleber Z. Andrade, Carlos and Kappe, C. Oliver}, issn = {1520-6904}, journal = {The Journal of Organic Chemistry}, number = {9}, pages = {4590--4602}, publisher = {American Chemical Society}, title = {{A sequential Ugi multicomponent/Cu-catalyzed azide–alkyne cycloaddition approach for the continuous flow generation of cyclic peptoids}}, doi = {10.1021/acs.joc.5b00445}, volume = {80}, year = {2015}, } @inbook{11989, abstract = {In recent years, the high demand for sustainable processes resulted in the development of highly attractive oxidation protocols utilizing molecular oxygen or even air instead of more uneconomic and often toxic reagents. The application of these sustainable, gaseous oxidants in conventional batch reactors is often associated with severe safety risks and process challenges especially on larger scales. Continuous flow technology offers the possibility to minimize these safety hazards and concurrently allows working in high-temperature/high-pressure regimes to access highly efficient oxidation protocols. This review article critically discusses recent literature examples of flow methodologies for selective aerobic oxidations of organic compounds. Several technologies and reactor designs for biphasic gas/liquid as well as supercritical reaction media are presented in detail. © Springer International Publishing Switzerland 2015.}, author = {Pieber, Bartholomäus and Kappe, C. Oliver}, booktitle = {Organometallic Flow Chemistry}, editor = {Noël, Timothy}, isbn = {9783319332413}, issn = {1616-8534}, pages = {97–136}, publisher = {Springer Nature}, title = {{Aerobic oxidations in continuous flow}}, doi = {10.1007/3418_2015_133}, volume = {57}, year = {2015}, } @article{120, abstract = {Clustering of fine particles is of crucial importance in settings ranging from the early stages of planet formation to the coagulation of industrial powders and airborne pollutants. Models of such clustering typically focus on inelastic deformation and cohesion. However, even in charge-neutral particle systems comprising grains of the same dielectric material, tribocharging can generate large amounts of net positive or negative charge on individual particles, resulting in long-range electrostatic forces. The effects of such forces on cluster formation are not well understood and have so far not been studied in situ. Here we report the first observations of individual collide-and-capture events between charged submillimetre particles, including Kepler-like orbits. Charged particles can become trapped in their mutual electrostatic energy well and aggregate via multiple bounces. This enables the initiation of clustering at relative velocities much larger than the upper limit for sticking after a head-on collision, a long-standing issue known from pre-planetary dust aggregation. Moreover, Coulomb interactions together with dielectric polarization are found to stabilize characteristic molecule-like configurations, providing new insights for the modelling of clustering dynamics in a wide range of microscopic dielectric systems, such as charged polarizable ions, biomolecules and colloids.}, author = {Lee, Victor and Waitukaitis, Scott R and Miskin, Marc and Jaeger, Heinrich}, journal = {Nature Physics}, number = {9}, pages = {733 -- 737}, publisher = {Nature Publishing Group}, title = {{Direct observation of particle interactions and clustering in charged granular streams}}, doi = {10.1038/nphys3396}, volume = {11}, year = {2015}, } @article{121, abstract = {We show that the simplest building blocks of origami-based materials - rigid, degree-four vertices - are generically multistable. The existence of two distinct branches of folding motion emerging from the flat state suggests at least bistability, but we show how nonlinearities in the folding motions allow generic vertex geometries to have as many as five stable states. In special geometries with collinear folds and symmetry, more branches emerge leading to as many as six stable states. Tuning the fold energy parameters, we show how monostability is also possible. Finally, we show how to program the stability features of a single vertex into a periodic fold tessellation. The resulting metasheets provide a previously unanticipated functionality - tunable and switchable shape and size via multistability.}, author = {Waitukaitis, Scott R and Menaut, Rémi and Chen, Bryan and Van Hecke, Martin}, journal = {APS Physics, Physical Review Letters}, number = {5}, publisher = {American Physical Society}, title = {{Origami multistability: From single vertices to metasheets}}, doi = {10.1103/PhysRevLett.114.055503}, volume = {114}, year = {2015}, } @article{1311, abstract = {In this paper, we develop an energy method to study finite speed of propagation and waiting time phenomena for the stochastic porous media equation with linear multiplicative noise in up to three spatial dimensions. Based on a novel iteration technique and on stochastic counterparts of weighted integral estimates used in the deterministic setting, we formulate a sufficient criterion on the growth of initial data which locally guarantees a waiting time phenomenon to occur almost surely. Up to a logarithmic factor, this criterion coincides with the optimal criterion known from the deterministic setting. Our technique can be modified to prove finite speed of propagation as well.}, author = {Julian Fischer and Grün, Günther}, journal = {SIAM Journal on Mathematical Analysis}, number = {1}, pages = {825 -- 854}, publisher = {Society for Industrial and Applied Mathematics }, title = {{Finite speed of propagation and waiting times for the stochastic porous medium equation: A unifying approach}}, doi = {10.1137/140960578}, volume = {47}, year = {2015}, } @article{1314, abstract = {We derive a posteriori estimates for the modeling error caused by the assumption of perfect incompressibility in the incompressible Navier-Stokes equation: Real fluids are never perfectly incompressible but always feature at least some low amount of compressibility. Thus, their behavior is described by the compressible Navier-Stokes equation, the pressure being a steep function of the density. We rigorously estimate the difference between an approximate solution to the incompressible Navier-Stokes equation and any weak solution to the compressible Navier-Stokes equation in the sense of Lions (without assuming any additional regularity of solutions). Heuristics and numerical results suggest that our error estimates are of optimal order in the case of "well-behaved" flows and divergence-free approximations of the velocity field. Thus, we expect our estimates to justify the idealization of fluids as perfectly incompressible also in practical situations.}, author = {Fischer, Julian L}, journal = {SIAM Journal on Numerical Analysis}, number = {5}, pages = {2178 -- 2205}, publisher = {Society for Industrial and Applied Mathematics }, title = {{A posteriori modeling error estimates for the assumption of perfect incompressibility in the Navier-Stokes equation}}, doi = {10.1137/140966654}, volume = {53}, year = {2015}, } @article{1313, abstract = {We present an algorithm for the derivation of lower bounds on support propagation for a certain class of nonlinear parabolic equations. We proceed by combining the ideas in some recent papers by the author with the algorithmic construction of entropies due to Jüngel and Matthes, reducing the problem to a quantifier elimination problem. Due to its complexity, the quantifier elimination problem cannot be solved by present exact algorithms. However, by tackling the quantifier elimination problem numerically, in the case of the thin-film equation we are able to improve recent results by the author in the regime of strong slippage n ∈ (1, 2). For certain second-order doubly nonlinear parabolic equations, we are able to extend the known lower bounds on free boundary propagation to the case of irregular oscillatory initial data. Finally, we apply our method to a sixth-order quantum drift-diffusion equation, resulting in an upper bound on the time which it takes for the support to reach every point in the domain.}, author = {Julian Fischer}, journal = {Interfaces and Free Boundaries}, number = {1}, pages = {1 -- 20}, publisher = {European Mathematical Society Publishing House}, title = {{Estimates on front propagation for nonlinear higher-order parabolic equations: An algorithmic approach}}, doi = {10.4171/IFB/331}, volume = {17}, year = {2015}, } @article{1316, abstract = {In the present work we introduce the notion of a renormalized solution for reaction–diffusion systems with entropy-dissipating reactions. We establish the global existence of renormalized solutions. In the case of integrable reaction terms our notion of a renormalized solution reduces to the usual notion of a weak solution. Our existence result in particular covers all reaction–diffusion systems involving a single reversible reaction with mass-action kinetics and (possibly species-dependent) Fick-law diffusion; more generally, it covers the case of systems of reversible reactions with mass-action kinetics which satisfy the detailed balance condition. For such equations the existence of any kind of solution in general was an open problem, thereby motivating the study of renormalized solutions.}, author = {Julian Fischer}, journal = {Archive for Rational Mechanics and Analysis}, number = {1}, pages = {553 -- 587}, publisher = {Springer}, title = {{Global existence of renormalized solutions to entropy-dissipating reaction–diffusion systems}}, doi = {10.1007/s00205-015-0866-x}, volume = {218}, year = {2015}, } @article{1383, abstract = {In plants, vacuolar H+-ATPase (V-ATPase) activity acidifies both the trans-Golgi network/early endosome (TGN/EE) and the vacuole. This dual V-ATPase function has impeded our understanding of how the pH homeostasis within the plant TGN/EE controls exo- and endocytosis. Here, we show that the weak V-ATPase mutant deetiolated3 (det3) displayed a pH increase in the TGN/EE, but not in the vacuole, strongly impairing secretion and recycling of the brassinosteroid receptor and the cellulose synthase complexes to the plasma membrane, in contrast to mutants lacking tonoplast-localized V-ATPase activity only. The brassinosteroid insensitivity and the cellulose deficiency defects in det3 were tightly correlated with reduced Golgi and TGN/EE motility. Thus, our results provide strong evidence that acidification of the TGN/EE, but not of the vacuole, is indispensable for functional secretion and recycling in plants.}, author = {Yu, Luo and Scholl, Stefan and Doering, Anett and Yi, Zhang and Irani, Niloufer and Di Rubbo, Simone and Neumetzler, Lutz and Krishnamoorthy, Praveen and Van Houtte, Isabelle and Mylle, Evelien and Bischoff, Volker and Vernhettes, Samantha and Winne, Johan and Friml, Jirí and Stierhof, York and Schumacher, Karin and Persson, Staffan and Russinova, Eugenia}, journal = {Nature Plants}, number = {7}, publisher = {Nature Publishing Group}, title = {{V-ATPase activity in the TGN/EE is required for exocytosis and recycling in Arabidopsis}}, doi = {10.1038/nplants.2015.94}, volume = {1}, year = {2015}, } @inproceedings{1425, abstract = {In this work we aim at extending the theoretical foundations of lifelong learning. Previous work analyzing this scenario is based on the assumption that learning tasks are sampled i.i.d. from a task environment or limited to strongly constrained data distributions. Instead, we study two scenarios when lifelong learning is possible, even though the observed tasks do not form an i.i.d. sample: first, when they are sampled from the same environment, but possibly with dependencies, and second, when the task environment is allowed to change over time in a consistent way. In the first case we prove a PAC-Bayesian theorem that can be seen as a direct generalization of the analogous previous result for the i.i.d. case. For the second scenario we propose to learn an inductive bias in form of a transfer procedure. We present a generalization bound and show on a toy example how it can be used to identify a beneficial transfer algorithm.}, author = {Pentina, Anastasia and Lampert, Christoph}, location = {Montreal, Canada}, pages = {1540 -- 1548}, publisher = {Neural Information Processing Systems}, title = {{Lifelong learning with non-i.i.d. tasks}}, volume = {2015}, year = {2015}, } @inproceedings{1424, abstract = {We consider the problem of statistical computations with persistence diagrams, a summary representation of topological features in data. These diagrams encode persistent homology, a widely used invariant in topological data analysis. While several avenues towards a statistical treatment of the diagrams have been explored recently, we follow an alternative route that is motivated by the success of methods based on the embedding of probability measures into reproducing kernel Hilbert spaces. In fact, a positive definite kernel on persistence diagrams has recently been proposed, connecting persistent homology to popular kernel-based learning techniques such as support vector machines. However, important properties of that kernel enabling a principled use in the context of probability measure embeddings remain to be explored. Our contribution is to close this gap by proving universality of a variant of the original kernel, and to demonstrate its effective use in twosample hypothesis testing on synthetic as well as real-world data.}, author = {Kwitt, Roland and Huber, Stefan and Niethammer, Marc and Lin, Weili and Bauer, Ulrich}, location = {Montreal, Canada}, pages = {3070 -- 3078}, publisher = {Neural Information Processing Systems}, title = {{Statistical topological data analysis-A kernel perspective}}, volume = {28}, year = {2015}, } @inproceedings{1430, abstract = {Evolutionary algorithms (EAs) form a popular optimisation paradigm inspired by natural evolution. In recent years the field of evolutionary computation has developed a rigorous analytical theory to analyse their runtime on many illustrative problems. Here we apply this theory to a simple model of natural evolution. In the Strong Selection Weak Mutation (SSWM) evolutionary regime the time between occurrence of new mutations is much longer than the time it takes for a new beneficial mutation to take over the population. In this situation, the population only contains copies of one genotype and evolution can be modelled as a (1+1)-type process where the probability of accepting a new genotype (improvements or worsenings) depends on the change in fitness. We present an initial runtime analysis of SSWM, quantifying its performance for various parameters and investigating differences to the (1+1) EA. We show that SSWM can have a moderate advantage over the (1+1) EA at crossing fitness valleys and study an example where SSWM outperforms the (1+1) EA by taking advantage of information on the fitness gradient.}, author = {Paixao, Tiago and Sudholt, Dirk and Heredia, Jorge and Trubenova, Barbora}, booktitle = {Proceedings of the 2015 Annual Conference on Genetic and Evolutionary Computation}, location = {Madrid, Spain}, pages = {1455 -- 1462}, publisher = {ACM}, title = {{First steps towards a runtime comparison of natural and artificial evolution}}, doi = {10.1145/2739480.2754758}, year = {2015}, } @inproceedings{1474, abstract = {Cryptographic access control offers selective access to encrypted data via a combination of key management and functionality-rich cryptographic schemes, such as attribute-based encryption. Using this approach, publicly available meta-data may inadvertently leak information on the access policy that is enforced by cryptography, which renders cryptographic access control unusable in settings where this information is highly sensitive. We begin to address this problem by presenting rigorous definitions for policy privacy in cryptographic access control. For concreteness we set our results in the model of Role-Based Access Control (RBAC), where we identify and formalize several different flavors of privacy, however, our framework should serve as inspiration for other models of access control. Based on our insights we propose a new system which significantly improves on the privacy properties of state-of-the-art constructions. Our design is based on a novel type of privacy-preserving attribute-based encryption, which we introduce and show how to instantiate. We present our results in the context of a cryptographic RBAC system by Ferrara et al. (CSF'13), which uses cryptography to control read access to files, while write access is still delegated to trusted monitors. We give an extension of the construction that permits cryptographic control over write access. Our construction assumes that key management uses out-of-band channels between the policy enforcer and the users but eliminates completely the need for monitoring read/write access to the data.}, author = {Ferrara, Anna and Fuchsbauer, Georg and Liu, Bin and Warinschi, Bogdan}, location = {Verona, Italy}, pages = {46--60}, publisher = {IEEE}, title = {{Policy privacy in cryptographic access control}}, doi = {10.1109/CSF.2015.11}, year = {2015}, } @misc{1473, abstract = {In this paper we survey geometric and arithmetic techniques to study the cohomology of semiprojective hyperkähler manifolds including toric hyperkähler varieties, Nakajima quiver varieties and moduli spaces of Higgs bundles on Riemann surfaces. The resulting formulae for their Poincaré polynomials are combinatorial and representation theoretical in nature. In particular we will look at their Betti numbers and will establish some results and state some expectations on their asymptotic shape.}, author = {Tamas Hausel and Rodríguez Villegas, Fernando}, booktitle = {Asterisque}, number = {370}, pages = {113 -- 156}, publisher = {Societe Mathematique de France}, title = {{Cohomology of large semiprojective hyperkähler varieties}}, volume = {2015}, year = {2015}, } @inproceedings{1483, abstract = {Topological data analysis offers a rich source of valuable information to study vision problems. Yet, so far we lack a theoretically sound connection to popular kernel-based learning techniques, such as kernel SVMs or kernel PCA. In this work, we establish such a connection by designing a multi-scale kernel for persistence diagrams, a stable summary representation of topological features in data. We show that this kernel is positive definite and prove its stability with respect to the 1-Wasserstein distance. Experiments on two benchmark datasets for 3D shape classification/retrieval and texture recognition show considerable performance gains of the proposed method compared to an alternative approach that is based on the recently introduced persistence landscapes.}, author = {Reininghaus, Jan and Huber, Stefan and Bauer, Ulrich and Kwitt, Roland}, location = {Boston, MA, USA}, pages = {4741 -- 4748}, publisher = {IEEE}, title = {{A stable multi-scale kernel for topological machine learning}}, doi = {10.1109/CVPR.2015.7299106}, year = {2015}, } @inproceedings{1498, abstract = {Fault-tolerant distributed algorithms play an important role in many critical/high-availability applications. These algorithms are notoriously difficult to implement correctly, due to asynchronous communication and the occurrence of faults, such as the network dropping messages or computers crashing. Nonetheless there is surprisingly little language and verification support to build distributed systems based on fault-tolerant algorithms. In this paper, we present some of the challenges that a designer has to overcome to implement a fault-tolerant distributed system. Then we review different models that have been proposed to reason about distributed algorithms and sketch how such a model can form the basis for a domain-specific programming language. Adopting a high-level programming model can simplify the programmer's life and make the code amenable to automated verification, while still compiling to efficiently executable code. We conclude by summarizing the current status of an ongoing language design and implementation project that is based on this idea.}, author = {Dragoi, Cezara and Henzinger, Thomas A and Zufferey, Damien}, isbn = {978-3-939897-80-4 }, location = {Asilomar, CA, United States}, pages = {90 -- 102}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{The need for language support for fault-tolerant distributed systems}}, doi = {10.4230/LIPIcs.SNAPL.2015.90}, volume = {32}, year = {2015}, } @article{1497, abstract = {Detecting allelic biases from high-throughput sequencing data requires an approach that maximises sensitivity while minimizing false positives. Here, we present Allelome.PRO, an automated user-friendly bioinformatics pipeline, which uses high-throughput sequencing data from reciprocal crosses of two genetically distinct mouse strains to detect allele-specific expression and chromatin modifications. Allelome.PRO extends approaches used in previous studies that exclusively analyzed imprinted expression to give a complete picture of the ‘allelome’ by automatically categorising the allelic expression of all genes in a given cell type into imprinted, strain-biased, biallelic or non-informative. Allelome.PRO offers increased sensitivity to analyze lowly expressed transcripts, together with a robust false discovery rate empirically calculated from variation in the sequencing data. We used RNA-seq data from mouse embryonic fibroblasts from F1 reciprocal crosses to determine a biologically relevant allelic ratio cutoff, and define for the first time an entire allelome. Furthermore, we show that Allelome.PRO detects differential enrichment of H3K4me3 over promoters from ChIP-seq data validating the RNA-seq results. This approach can be easily extended to analyze histone marks of active enhancers, or transcription factor binding sites and therefore provides a powerful tool to identify candidate cis regulatory elements genome wide.}, author = {Andergassen, Daniel and Dotter, Christoph and Kulinski, Tomasz and Guenzl, Philipp and Bammer, Philipp and Barlow, Denise and Pauler, Florian and Hudson, Quanah}, journal = {Nucleic Acids Research}, number = {21}, publisher = {Oxford University Press}, title = {{Allelome.PRO, a pipeline to define allele-specific genomic features from high-throughput sequencing data}}, doi = {10.1093/nar/gkv727}, volume = {43}, year = {2015}, } @inproceedings{1499, abstract = {We consider weighted automata with both positive and negative integer weights on edges and study the problem of synchronization using adaptive strategies that may only observe whether the current weight-level is negative or nonnegative. We show that the synchronization problem is decidable in polynomial time for deterministic weighted automata.}, author = {Kretinsky, Jan and Larsen, Kim and Laursen, Simon and Srba, Jiří}, location = {Madrid, Spain}, pages = {142 -- 154}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Polynomial time decidability of weighted synchronization under partial observability}}, doi = {10.4230/LIPIcs.CONCUR.2015.142}, volume = {42}, year = {2015}, } @inproceedings{1495, abstract = {Motivated by biological questions, we study configurations of equal-sized disks in the Euclidean plane that neither pack nor cover. Measuring the quality by the probability that a random point lies in exactly one disk, we show that the regular hexagonal grid gives the maximum among lattice configurations. }, author = {Edelsbrunner, Herbert and Iglesias Ham, Mabel and Kurlin, Vitaliy}, booktitle = {Proceedings of the 27th Canadian Conference on Computational Geometry}, location = {Ontario, Canada}, pages = {128--135}, publisher = {Queen's University}, title = {{Relaxed disk packing}}, volume = {2015-August}, year = {2015}, } @article{1504, abstract = {Let Q = (Q1, . . . , Qn) be a random vector drawn from the uniform distribution on the set of all n! permutations of {1, 2, . . . , n}. Let Z = (Z1, . . . , Zn), where Zj is the mean zero variance one random variable obtained by centralizing and normalizing Qj , j = 1, . . . , n. Assume that Xi , i = 1, . . . ,p are i.i.d. copies of 1/√ p Z and X = Xp,n is the p × n random matrix with Xi as its ith row. Then Sn = XX is called the p × n Spearman's rank correlation matrix which can be regarded as a high dimensional extension of the classical nonparametric statistic Spearman's rank correlation coefficient between two independent random variables. In this paper, we establish a CLT for the linear spectral statistics of this nonparametric random matrix model in the scenario of high dimension, namely, p = p(n) and p/n→c ∈ (0,∞) as n→∞.We propose a novel evaluation scheme to estimate the core quantity in Anderson and Zeitouni's cumulant method in [Ann. Statist. 36 (2008) 2553-2576] to bypass the so-called joint cumulant summability. In addition, we raise a two-step comparison approach to obtain the explicit formulae for the mean and covariance functions in the CLT. Relying on this CLT, we then construct a distribution-free statistic to test complete independence for components of random vectors. Owing to the nonparametric property, we can use this test on generally distributed random variables including the heavy-tailed ones.}, author = {Bao, Zhigang and Lin, Liang and Pan, Guangming and Zhou, Wang}, journal = {Annals of Statistics}, number = {6}, pages = {2588 -- 2623}, publisher = {Institute of Mathematical Statistics}, title = {{Spectral statistics of large dimensional spearman s rank correlation matrix and its application}}, doi = {10.1214/15-AOS1353}, volume = {43}, year = {2015}, } @misc{1500, abstract = {In this poster, we present methods for randomly generating hybrid automata with affine differential equations, invariants, guards, and assignments. Selecting an arbitrary affine function from the set of all affine functions results in a low likelihood of generating hybrid automata with diverse and interesting behaviors, as there are an uncountable number of elements in the set of all affine functions. Instead, we partition the set of all affine functions into potentially interesting classes and randomly select elements from these classes. For example, we partition the set of all affine differential equations by using restrictions on eigenvalues such as those that yield stable, unstable, etc. equilibrium points. We partition the components describing discrete behavior (guards, assignments, and invariants) to allow either time-dependent or state-dependent switching, and in particular provide the ability to generate subclasses of piecewise-affine hybrid automata. Our preliminary experimental results with a prototype tool called HyRG (Hybrid Random Generator) illustrate the feasibility of this generation method to automatically create standard hybrid automaton examples like the bouncing ball and thermostat.}, author = {Nguyen, Luan V and Christian Schilling and Sergiy Bogomolov and Johnson, Taylor T}, booktitle = {HSCC: Hybrid Systems - Computation and Control}, pages = {289 -- 290}, publisher = {Springer}, title = {{Poster: HyRG: A random generation tool for affine hybrid automata}}, doi = {10.1145/2728606.2728650}, year = {2015}, } @article{1503, abstract = {A Herman-Avila-Bochi type formula is obtained for the average sum of the top d Lyapunov exponents over a one-parameter family of double-struck G-cocycles, where double-struck G is the group that leaves a certain, non-degenerate Hermitian form of signature (c, d) invariant. The generic example of such a group is the pseudo-unitary group U(c, d) or, in the case c = d, the Hermitian-symplectic group HSp(2d) which naturally appears for cocycles related to Schrödinger operators. In the case d = 1, the formula for HSp(2d) cocycles reduces to the Herman-Avila-Bochi formula for SL(2, ℝ) cocycles.}, author = {Sadel, Christian}, journal = {Ergodic Theory and Dynamical Systems}, number = {5}, pages = {1582 -- 1591}, publisher = {Cambridge University Press}, title = {{A Herman-Avila-Bochi formula for higher-dimensional pseudo-unitary and Hermitian-symplectic-cocycles}}, doi = {10.1017/etds.2013.103}, volume = {35}, year = {2015}, } @inproceedings{1510, abstract = {The concept of well group in a special but important case captures homological properties of the zero set of a continuous map f from K to R^n on a compact space K that are invariant with respect to perturbations of f. The perturbations are arbitrary continuous maps within L_infty distance r from f for a given r > 0. The main drawback of the approach is that the computability of well groups was shown only when dim K = n or n = 1. Our contribution to the theory of well groups is twofold: on the one hand we improve on the computability issue, but on the other hand we present a range of examples where the well groups are incomplete invariants, that is, fail to capture certain important robust properties of the zero set. For the first part, we identify a computable subgroup of the well group that is obtained by cap product with the pullback of the orientation of R^n by f. In other words, well groups can be algorithmically approximated from below. When f is smooth and dim K < 2n-2, our approximation of the (dim K-n)th well group is exact. For the second part, we find examples of maps f, f' from K to R^n with all well groups isomorphic but whose perturbations have different zero sets. We discuss on a possible replacement of the well groups of vector valued maps by an invariant of a better descriptive power and computability status. }, author = {Franek, Peter and Krcál, Marek}, location = {Eindhoven, Netherlands}, pages = {842 -- 856}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{On computability and triviality of well groups}}, doi = {10.4230/LIPIcs.SOCG.2015.842}, volume = {34}, year = {2015}, } @article{1505, abstract = {This paper is aimed at deriving the universality of the largest eigenvalue of a class of high-dimensional real or complex sample covariance matrices of the form W N =Σ 1/2XX∗Σ 1/2 . Here, X = (xij )M,N is an M× N random matrix with independent entries xij , 1 ≤ i M,≤ 1 ≤ j ≤ N such that Exij = 0, E|xij |2 = 1/N . On dimensionality, we assume that M = M(N) and N/M → d ε (0, ∞) as N ∞→. For a class of general deterministic positive-definite M × M matrices Σ , under some additional assumptions on the distribution of xij 's, we show that the limiting behavior of the largest eigenvalue of W N is universal, via pursuing a Green function comparison strategy raised in [Probab. Theory Related Fields 154 (2012) 341-407, Adv. Math. 229 (2012) 1435-1515] by Erd″os, Yau and Yin for Wigner matrices and extended by Pillai and Yin [Ann. Appl. Probab. 24 (2014) 935-1001] to sample covariance matrices in the null case (&Epsi = I ). Consequently, in the standard complex case (Ex2 ij = 0), combing this universality property and the results known for Gaussian matrices obtained by El Karoui in [Ann. Probab. 35 (2007) 663-714] (nonsingular case) and Onatski in [Ann. Appl. Probab. 18 (2008) 470-490] (singular case), we show that after an appropriate normalization the largest eigenvalue of W N converges weakly to the type 2 Tracy-Widom distribution TW2 . Moreover, in the real case, we show that whenΣ is spiked with a fixed number of subcritical spikes, the type 1 Tracy-Widom limit TW1 holds for the normalized largest eigenvalue of W N , which extends a result of Féral and Péché in [J. Math. Phys. 50 (2009) 073302] to the scenario of nondiagonal Σ and more generally distributed X . In summary, we establish the Tracy-Widom type universality for the largest eigenvalue of generally distributed sample covariance matrices under quite light assumptions on &Sigma . Applications of these limiting results to statistical signal detection and structure recognition of separable covariance matrices are also discussed.}, author = {Bao, Zhigang and Pan, Guangming and Zhou, Wang}, journal = {Annals of Statistics}, number = {1}, pages = {382 -- 421}, publisher = {Institute of Mathematical Statistics}, title = {{Universality for the largest eigenvalue of sample covariance matrices with general population}}, doi = {10.1214/14-AOS1281}, volume = {43}, year = {2015}, } @article{1508, abstract = {We consider generalized Wigner ensembles and general β-ensembles with analytic potentials for any β ≥ 1. The recent universality results in particular assert that the local averages of consecutive eigenvalue gaps in the bulk of the spectrum are universal in the sense that they coincide with those of the corresponding Gaussian β-ensembles. In this article, we show that local averaging is not necessary for this result, i.e. we prove that the single gap distributions in the bulk are universal. In fact, with an additional step, our result can be extended to any C4(ℝ) potential.}, author = {Erdös, László and Yau, Horng}, journal = {Journal of the European Mathematical Society}, number = {8}, pages = {1927 -- 2036}, publisher = {European Mathematical Society}, title = {{Gap universality of generalized Wigner and β ensembles}}, doi = {10.4171/JEMS/548}, volume = {17}, year = {2015}, } @article{1506, abstract = {Consider the square random matrix An = (aij)n,n, where {aij:= a(n)ij , i, j = 1, . . . , n} is a collection of independent real random variables with means zero and variances one. Under the additional moment condition supn max1≤i,j ≤n Ea4ij <∞, we prove Girko's logarithmic law of det An in the sense that as n→∞ log | detAn| ? (1/2) log(n-1)! d/→√(1/2) log n N(0, 1).}, author = {Bao, Zhigang and Pan, Guangming and Zhou, Wang}, journal = {Bernoulli}, number = {3}, pages = {1600 -- 1628}, publisher = {Bernoulli Society for Mathematical Statistics and Probability}, title = {{The logarithmic law of random determinant}}, doi = {10.3150/14-BEJ615}, volume = {21}, year = {2015}, } @article{1513, abstract = {Insects of the order Hemiptera (true bugs) use a wide range of mechanisms of sex determination, including genetic sex determination, paternal genome elimination, and haplodiploidy. Genetic sex determination, the prevalent mode, is generally controlled by a pair of XY sex chromosomes or by an XX/X0 system, but different configurations that include additional sex chromosomes are also present. Although this diversity of sex determining systems has been extensively studied at the cytogenetic level, only the X chromosome of the model pea aphid Acyrthosiphon pisum has been analyzed at the genomic level, and little is known about X chromosome biology in the rest of the order. In this study, we take advantage of published DNA- and RNA-seq data from three additional Hemiptera species to perform a comparative analysis of the gene content and expression of the X chromosome throughout this clade. We find that, despite showing evidence of dosage compensation, the X chromosomes of these species show female-biased expression, and a deficit of male-biased genes, in direct contrast to the pea aphid X. We further detect an excess of shared gene content between these very distant species, suggesting that despite the diversity of sex determining systems, the same chromosomal element is used as the X throughout a large portion of the order. }, author = {Pal, Arka and Vicoso, Beatriz}, journal = {Genome Biology and Evolution}, number = {12}, pages = {3259 -- 3268}, publisher = {Oxford University Press}, title = {{The X chromosome of hemipteran insects: Conservation, dosage compensation and sex-biased expression}}, doi = {10.1093/gbe/evv215}, volume = {7}, year = {2015}, } @article{1517, abstract = {We study the large deviation rate functional for the empirical distribution of independent Brownian particles with drift. In one dimension, it has been shown by Adams, Dirr, Peletier and Zimmer that this functional is asymptotically equivalent (in the sense of Γ-convergence) to the Jordan-Kinderlehrer-Otto functional arising in the Wasserstein gradient flow structure of the Fokker-Planck equation. In higher dimensions, part of this statement (the lower bound) has been recently proved by Duong, Laschos and Renger, but the upper bound remained open, since the proof of Duong et al relies on regularity properties of optimal transport maps that are restricted to one dimension. In this note we present a new proof of the upper bound, thereby generalising the result of Adams et al to arbitrary dimensions. }, author = {Erbar, Matthias and Maas, Jan and Renger, Michiel}, journal = {Electronic Communications in Probability}, publisher = {Institute of Mathematical Statistics}, title = {{From large deviations to Wasserstein gradient flows in multiple dimensions}}, doi = {10.1214/ECP.v20-4315}, volume = {20}, year = {2015}, } @article{1515, abstract = {Type 1 metabotropic glutamate (mGlu1) receptors play a pivotal role in different forms of synaptic plasticity in the cerebellar cortex, e.g. long-term depression at glutamatergic synapses and rebound potentiation at GABAergic synapses. These various forms of plasticity might depend on the subsynaptic arrangement of the receptor in Purkinje cells that can be regulated by protein-protein interactions. This study investigated, by means of the freeze-fracture replica immunogold labelling method, the subcellular localization of mGlu1 receptors in the rodent cerebellum and whether Homer proteins regulate their subsynaptic distribution. We observed a widespread extrasynaptic localization of mGlu1 receptors and confirmed their peri-synaptic enrichment at glutamatergic synapses. Conversely, we detected mGlu1 receptors within the main body of GABAergic synapses onto Purkinje cell dendrites. Although Homer proteins are known to interact with the mGlu1 receptor C-terminus, we could not detect Homer3, the most abundant Homer protein in the cerebellar cortex, at GABAergic synapses by pre-embedding and post-embedding immunoelectron microscopy. We then hypothesized a critical role for Homer proteins in the peri-junctional localization of mGlu1 receptors at glutamatergic synapses. To disrupt Homer-associated protein complexes, mice were tail-vein injected with the membrane-permeable dominant-negative TAT-Homer1a. Freeze-fracture replica immunogold labelling analysis showed no significant alteration in the mGlu1 receptor distribution pattern at parallel fibre-Purkinje cell synapses, suggesting that other scaffolding proteins are involved in the peri-synaptic confinement. The identification of interactors that regulate the subsynaptic localization of the mGlu1 receptor at neurochemically distinct synapses may offer new insight into its trafficking and intracellular signalling.}, author = {Mansouri, Mahnaz and Kasugai, Yu and Fukazawa, Yugo and Bertaso, Federica and Raynaud, Fabrice and Perroy, Julie and Fagni, Laurent and Walter Kaufmann and Watanabe, Masahiko and Ryuichi Shigemoto and Ferraguti, Francesco}, journal = {European Journal of Neuroscience}, number = {2}, pages = {157 -- 167}, publisher = {Wiley-Blackwell}, title = {{Distinct subsynaptic localization of type 1 metabotropic glutamate receptors at glutamatergic and GABAergic synapses in the rodent cerebellar cortex}}, doi = {10.1111/ejn.12779}, volume = {41}, year = {2015}, } @article{1514, abstract = {Endocannabinoids (eCBs) play key roles in brain function, acting as modulatory signals in synaptic transmission and plasticity. They are recognized as retrograde messengers that mediate long-term synaptic depression (LTD), but their ability to induce long-term potentiation (LTP) is poorly known. We show that eCBs induce the long-term enhancement of transmitter release at single hippocampal synapses through stimulation of astrocytes when coincident with postsynaptic activity. This LTP requires the coordinated activity of the 3 elements of the tripartite synapse: 1) eCB-evoked astrocyte calcium signal that stimulates glutamate release; 2) postsynaptic nitric oxide production; and 3) activation of protein kinase C and presynaptic group I metabotropic glutamate receptors, whose location at presynaptic sites was confirmed by immunoelectron microscopy. Hence, while eCBs act as retrograde signals to depress homoneuronal synapses, they serve as lateral messengers to induce LTP in distant heteroneuronal synapses through stimulation of astrocytes. Therefore, eCBs can trigger LTP through stimulation of astrocyte-neuron signaling, revealing novel cellular mechanisms of eCB effects on synaptic plasticity.}, author = {Gómez-Gonzalo, Marta and Navarrete, Marta and Perea, Gertrudis and Covelo, Ana and Martín-Fernández, Mario and Ryuichi Shigemoto and Luján, Rafael and Araque, Alfonso}, journal = {Cerebral Cortex}, number = {10}, pages = {3699 -- 3712}, publisher = {Oxford University Press}, title = {{Endocannabinoids induce lateral long term potentiation of transmitter release by stimulation of gliotransmission}}, doi = {10.1093/cercor/bhu231}, volume = {25}, year = {2015}, } @article{1519, abstract = {Evolutionary biologists have an array of powerful theoretical techniques that can accurately predict changes in the genetic composition of populations. Changes in gene frequencies and genetic associations between loci can be tracked as they respond to a wide variety of evolutionary forces. However, it is often less clear how to decompose these various forces into components that accurately reflect the underlying biology. Here, we present several issues that arise in the definition and interpretation of selection and selection coefficients, focusing on insights gained through the examination of selection coefficients in multilocus notation. Using this notation, we discuss how its flexibility-which allows different biological units to be identified as targets of selection-is reflected in the interpretation of the coefficients that the notation generates. In many situations, it can be difficult to agree on whether loci can be considered to be under "direct" versus "indirect" selection, or to quantify this selection. We present arguments for what the terms direct and indirect selection might best encompass, considering a range of issues, from viability and sexual selection to kin selection. We show how multilocus notation can discriminate between direct and indirect selection, and describe when it can do so.}, author = {Barton, Nicholas H and Servedio, Maria}, journal = {Evolution}, number = {5}, pages = {1101 -- 1112}, publisher = {Wiley}, title = {{The interpretation of selection coefficients}}, doi = {10.1111/evo.12641}, volume = {69}, year = {2015}, } @article{1525, abstract = {Based on 16 recommendations, efforts should be made to achieve the following goal: By 2025, all scholarly publication activity in Austria should be Open Access. In other words, the final versions of all scholarly publications resulting from the support of public resources must be freely accessible on the Internet without delay (Gold Open Access). The resources required to meet this obligation shall be provided to the authors, or the cost of the publication venues shall be borne directly by the research organisations.}, author = {Bauer, Bruno and Blechl, Guido and Bock, Christoph and Danowski, Patrick and Ferus, Andreas and Graschopf, Anton and König, Thomas and Mayer, Katja and Reckling, Falk and Rieck, Katharina and Seitz, Peter and Stöger, Herwig and Welzig, Elvira}, journal = {VÖB Mitteilungen}, number = {3}, pages = {580 -- 607}, publisher = {Verein Österreichischer Bibliothekare}, title = {{Arbeitsgruppe „Nationale Strategie“ des Open Access Network Austria OANA}}, doi = {10.5281/zenodo.33178}, volume = {68}, year = {2015}, } @inproceedings{1520, abstract = {Creating mechanical automata that can walk in stable and pleasing manners is a challenging task that requires both skill and expertise. We propose to use computational design to offset the technical difficulties of this process. A simple drag-and-drop interface allows casual users to create personalized walking toys from a library of pre-defined template mechanisms. Provided with this input, our method leverages physical simulation and evolutionary optimization to refine the mechanical designs such that the resulting toys are able to walk. The optimization process is guided by an intuitive set of objectives that measure the quality of the walking motions. We demonstrate our approach on a set of simulated mechanical toys with different numbers of legs and various distinct gaits. Two fabricated prototypes showcase the feasibility of our designs.}, author = {Bharaj, Gaurav and Coros, Stelian and Thomaszewski, Bernhard and Tompkin, James and Bickel, Bernd and Pfister, Hanspeter}, isbn = {978-1-4503-3496-9}, location = {Los Angeles, CA, United States}, pages = {93 -- 100}, publisher = {ACM}, title = {{Computational design of walking automata}}, doi = {10.1145/2786784.2786803}, year = {2015}, } @article{1532, abstract = {Ammonium is the major nitrogen source in some plant ecosystems but is toxic at high concentrations, especially when available as the exclusive nitrogen source. Ammonium stress rapidly leads to various metabolic and hormonal imbalances that ultimately inhibit root and shoot growth in many plant species, including Arabidopsis thaliana (L.) Heynh. To identify molecular and genetic factors involved in seedling survival with prolonged exclusive NH4+ nutrition, a transcriptomic analysis with microarrays was used. Substantial transcriptional differences were most pronounced in (NH4)2SO4-grown seedlings, compared with plants grown on KNO3 or NH4NO3. Consistent with previous physiological analyses, major differences in the expression modules of photosynthesis-related genes, an altered mitochondrial metabolism, differential expression of the primary NH4+ assimilation, alteration of transporter gene expression and crucial changes in cell wall biosynthesis were found. A major difference in plant hormone responses, particularly of auxin but not cytokinin, was striking. The activity of the DR5::GUS reporter revealed a dramatically decreased auxin response in (NH4)2SO4-grown primary roots. The impaired root growth on (NH4)2SO4 was partially rescued by exogenous auxin or in specific mutants in the auxin pathway. The data suggest that NH4+-induced nutritional and metabolic imbalances can be partially overcome by elevated auxin levels.}, author = {Yang, Huaiyu and Von Der Fecht Bartenbach, Jenny and Friml, Jirí and Lohmann, Jan and Neuhäuser, Benjamin and Ludewig, Uwe}, issn = {1445-4408}, journal = {Functional Plant Biology}, number = {3}, pages = {239 -- 251}, publisher = {CSIRO}, title = {{Auxin-modulated root growth inhibition in Arabidopsis thaliana seedlings with ammonium as the sole nitrogen source}}, doi = {10.1071/FP14171}, volume = {42}, year = {2015}, } @inbook{1531, abstract = {The Heat Kernel Signature (HKS) is a scalar quantity which is derived from the heat kernel of a given shape. Due to its robustness, isometry invariance, and multiscale nature, it has been successfully applied in many geometric applications. From a more general point of view, the HKS can be considered as a descriptor of the metric of a Riemannian manifold. Given a symmetric positive definite tensor field we may interpret it as the metric of some Riemannian manifold and thereby apply the HKS to visualize and analyze the given tensor data. In this paper, we propose a generalization of this approach that enables the treatment of indefinite tensor fields, like the stress tensor, by interpreting them as a generator of a positive definite tensor field. To investigate the usefulness of this approach we consider the stress tensor from the two-point-load model example and from a mechanical work piece.}, author = {Zobel, Valentin and Reininghaus, Jan and Hotz, Ingrid}, booktitle = {Visualization and Processing of Higher Order Descriptors for Multi-Valued Data}, editor = {Hotz, Ingrid and Schultz, Thomas}, isbn = {978-3-319-15089-5}, pages = {257 -- 267}, publisher = {Springer}, title = {{Visualizing symmetric indefinite 2D tensor fields using The Heat Kernel Signature}}, doi = {10.1007/978-3-319-15090-1_13}, volume = {40}, year = {2015}, }