@article{11668, abstract = {We study multiple keyword sponsored search auctions with budgets. Each keyword has multiple ad slots with a click-through rate. The bidders have additive valuations, which are linear in the click-through rates, and budgets, which are restricting their overall payments. Additionally, the number of slots per keyword assigned to a bidder is bounded. We show the following results: (1) We give the first mechanism for multiple keywords, where click-through rates differ among slots. Our mechanism is incentive compatible in expectation, individually rational in expectation, and Pareto optimal. (2) We study the combinatorial setting, where each bidder is only interested in a subset of the keywords. We give an incentive compatible, individually rational, Pareto-optimal, and deterministic mechanism for identical click-through rates. (3) We give an impossibility result for incentive compatible, individually rational, Pareto-optimal, and deterministic mechanisms for bidders with diminishing marginal valuations.}, author = {Colini-Baldeschi, Riccardo and Leonardi, Stefano and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Algorithms, Economics, Clinching ascending auction, auctions with budgets, Sponsored search auctions}, number = {1}, publisher = {Association for Computing Machinery}, title = {{On multiple keyword sponsored search auctions with budgets}}, doi = {10.1145/2818357}, volume = {4}, year = {2015}, } @article{11669, abstract = {We study individual rational, Pareto-optimal, and incentive compatible mechanisms for auctions with heterogeneous items and budget limits. We consider settings with multiunit demand and additive valuations. For single-dimensional valuations we prove a positive result for randomized mechanisms, and a negative result for deterministic mechanisms. While the positive result allows for private budgets, the negative result is for public budgets. For multidimensional valuations and public budgets we prove an impossibility result that applies to deterministic and randomized mechanisms. Taken together this shows the power of randomization in certain settings with heterogeneous items, but it also shows its limitations.}, author = {Dütting, Paul and Henzinger, Monika H and Starnberger, Martin}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Algorithmic game theory, auction theory, Clinching auction, Pareto optimality, Budget limits}, number = {1}, publisher = {Association for Computing Machinery}, title = {{Auctions for heterogeneous items and budget limits}}, doi = {10.1145/2818351}, volume = {4}, year = {2015}, } @article{11670, abstract = {Auctions are widely used on the Web. Applications range from sponsored search to platforms such as eBay. In these and in many other applications the auctions in use are single-/multi-item auctions with unit demand. The main drawback of standard mechanisms for this type of auctions, such as VCG and GSP, is the limited expressiveness that they offer to the bidders. The General Auction Mechanism (GAM) of Aggarwal et al. [2009] takes a first step toward addressing the problem of limited expressiveness by computing a bidder optimal, envy-free outcome for linear utility functions with identical slopes and a single discontinuity per bidder-item pair. We show that in many practical situations this does not suffice to adequately model the preferences of the bidders, and we overcome this problem by presenting the first mechanism for piecewise linear utility functions with nonidentical slopes and multiple discontinuities. Our mechanism runs in polynomial time. Like GAM it is incentive compatible for inputs that fulfill a certain nondegeneracy assumption, but our requirement is more general than the requirement of GAM. For discontinuous utility functions that are nondegenerate as well as for continuous utility functions the outcome of our mechanism is a competitive equilibrium. We also show how our mechanism can be used to compute approximately bidder optimal, envy-free outcomes for a general class of continuous utility functions via piecewise linear approximation. Finally, we prove hardness results for even more expressive settings.}, author = {Dütting, Paul and Henzinger, Monika H and Weber, Ingmar}, issn = {2167-8383}, journal = {ACM Transactions on Economics and Computation}, keywords = {Computational Mathematics, Marketing, Economics and Econometrics, Statistics and Probability, Computer Science (miscellaneous)}, number = {1}, publisher = {Association for Computing Machinery}, title = {{An expressive mechanism for auctions on the web}}, doi = {10.1145/2716312}, volume = {4}, year = {2015}, } @inproceedings{11774, abstract = {Combinatorial auctions (CA) are a well-studied area in algorithmic mechanism design. However, contrary to the standard model, empirical studies suggest that a bidder’s valuation often does not depend solely on the goods assigned to him. For instance, in adwords auctions an advertiser might not want his ads to be displayed next to his competitors’ ads. In this paper, we propose and analyze several natural graph-theoretic models that incorporate such negative externalities, in which bidders form a directed conflict graph with maximum out-degree Δ. We design algorithms and truthful mechanisms for social welfare maximization that attain approximation ratios depending on Δ. For CA, our results are twofold: (1) A lottery that eliminates conflicts by discarding bidders/items independent of the bids. It allows to apply any truthful 𝛼-approximation mechanism for conflict-free valuations and yields an 𝒪(𝛼Δ)-approximation mechanism. (2) For fractionally sub-additive valuations, we design a rounding algorithm via a novel combination of a semi-definite program and a linear program, resulting in a cone program; the approximation ratio is 𝒪((ΔloglogΔ)/logΔ). The ratios are almost optimal given existing hardness results. For adwords auctions, we present several algorithms for the most relevant scenario when the number of items is small. In particular, we design a truthful mechanism with approximation ratio 𝑜(Δ) when the number of items is only logarithmic in the number of bidders.}, author = {Cheung, Yun Kuen and Henzinger, Monika H and Hoefer, Martin and Starnberger, Martin}, booktitle = {11th International Conference on Web and Internet Economics}, isbn = {9783662489949}, issn = {0302-9743}, location = {Amsterdam, Netherlands}, pages = {230–243}, publisher = {Springer Nature}, title = {{Combinatorial auctions with conflict-based externalities}}, doi = {10.1007/978-3-662-48995-6_17}, volume = {9470}, year = {2015}, } @inproceedings{11773, abstract = {Ad exchanges are an emerging platform for trading advertisement slots on the web with billions of dollars revenue per year. Every time a user visits a web page, the publisher of that web page can ask an ad exchange to auction off the ad slots on this page to determine which advertisements are shown at which price. Due to the high volume of traffic, ad networks typically act as mediators for individual advertisers at ad exchanges. If multiple advertisers in an ad network are interested in the ad slots of the same auction, the ad network might use a “local” auction to resell the obtained ad slots among its advertisers. In this work we want to deepen the theoretical understanding of these new markets by analyzing them from the viewpoint of combinatorial auctions. Prior work studied mostly single-item auctions, while we allow the advertisers to express richer preferences over multiple items. We develop a game-theoretic model for the entanglement of the central auction at the ad exchange with the local auctions at the ad networks. We consider the incentives of all three involved parties and suggest a three-party competitive equilibrium, an extension of the Walrasian equilibrium that ensures envy-freeness for all participants. We show the existence of a three-party competitive equilibrium and a polynomial-time algorithm to find one for gross-substitute bidder valuations.}, author = {Ben-Zwi, Oren and Henzinger, Monika H and Loitzenbauer, Veronika}, booktitle = {11th International Conference on Web and Internet Economics}, isbn = {9783662489949}, issn = {0302-9743}, location = {Amsterdam, Netherlands}, pages = {104–117}, publisher = {Springer Nature}, title = {{Ad exchange: Envy-free auctions with mediators}}, doi = {10.1007/978-3-662-48995-6_8}, volume = {9470}, year = {2015}, } @inproceedings{11785, abstract = {Recently we presented the first algorithm for maintaining the set of nodes reachable from a source node in a directed graph that is modified by edge deletions with 𝑜(𝑚𝑛) total update time, where 𝑚 is the number of edges and 𝑛 is the number of nodes in the graph [Henzinger et al. STOC 2014]. The algorithm is a combination of several different algorithms, each for a different 𝑚 vs. 𝑛 trade-off. For the case of 𝑚=Θ(𝑛1.5) the running time is 𝑂(𝑛2.47), just barely below 𝑚𝑛=Θ(𝑛2.5). In this paper we simplify the previous algorithm using new algorithmic ideas and achieve an improved running time of 𝑂̃ (min(𝑚7/6𝑛2/3,𝑚3/4𝑛5/4+𝑜(1),𝑚2/3𝑛4/3+𝑜(1)+𝑚3/7𝑛12/7+𝑜(1))). This gives, e.g., 𝑂(𝑛2.36) for the notorious case 𝑚=Θ(𝑛1.5). We obtain the same upper bounds for the problem of maintaining the strongly connected components of a directed graph undergoing edge deletions. Our algorithms are correct with high probabililty against an oblivious adversary.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {725 -- 736}, publisher = {Springer Nature}, title = {{Improved algorithms for decremental single-source reachability on directed graphs}}, doi = {10.1007/978-3-662-47672-7_59}, volume = {9134}, year = {2015}, } @inproceedings{11787, abstract = {We present faster algorithms for computing the 2-edge and 2-vertex strongly connected components of a directed graph. While in undirected graphs the 2-edge and 2-vertex connected components can be found in linear time, in directed graphs with m edges and n vertices only rather simple O(m n)-time algorithms were known. We use a hierarchical sparsification technique to obtain algorithms that run in time 𝑂(𝑛2). For 2-edge strongly connected components our algorithm gives the first running time improvement in 20 years. Additionally we present an 𝑂(𝑚2/log𝑛)-time algorithm for 2-edge strongly connected components, and thus improve over the O(m n) running time also when 𝑚=𝑂(𝑛). Our approach extends to k-edge and k-vertex strongly connected components for any constant k with a running time of 𝑂(𝑛2log𝑛) for k-edge-connectivity and 𝑂(𝑛3) for k-vertex-connectivity.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Loitzenbauer, Veronika}, booktitle = {2nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {713 -- 724}, publisher = {Springer Nature}, title = {{Finding 2-edge and 2-vertex strongly connected components in quadratic time}}, doi = {10.1007/978-3-662-47672-7_58}, volume = {9134}, year = {2015}, } @inproceedings{11788, abstract = {Ad exchanges are becoming an increasingly popular way to sell advertisement slots on the internet. An ad exchange is basically a spot market for ad impressions. A publisher who has already signed contracts reserving advertisement impressions on his pages can choose between assigning a new ad impression for a new page view to a contracted advertiser or to sell it at an ad exchange. This leads to an online revenue maximization problem for the publisher. Given a new impression to sell decide whether (a) to assign it to a contracted advertiser and if so to which one or (b) to sell it at the ad exchange and if so at which reserve price. We make no assumptions about the distribution of the advertiser valuations that participate in the ad exchange and show that there exists a simple primal-dual based online algorithm, whose lower bound for the revenue converges to 𝑅𝐴𝐷𝑋+𝑅𝐴(1−1/𝑒), where 𝑅𝐴𝐷𝑋 is the revenue that the optimum algorithm achieves from the ad exchange and 𝑅𝐴 is the revenue that the optimum algorithm achieves from the contracted advertisers.}, author = {Dvořák, Wolfgang and Henzinger, Monika H}, booktitle = {12th International Workshop of Approximation and Online Algorithms}, issn = {0302-9743}, location = {Wroclaw, Poland}, pages = {156–167}, publisher = {Springer Nature}, title = {{Online ad assignment with an ad exchange}}, doi = {10.1007/978-3-319-18263-6_14}, volume = {8952}, year = {2015}, } @inproceedings{11786, abstract = {In this paper, we develop a dynamic version of the primal-dual method for optimization problems, and apply it to obtain the following results. (1) For the dynamic set-cover problem, we maintain an 𝑂(𝑓2)-approximately optimal solution in 𝑂(𝑓⋅log(𝑚+𝑛)) amortized update time, where 𝑓 is the maximum “frequency” of an element, 𝑛 is the number of sets, and 𝑚 is the maximum number of elements in the universe at any point in time. (2) For the dynamic 𝑏-matching problem, we maintain an 𝑂(1)-approximately optimal solution in 𝑂(log3𝑛) amortized update time, where 𝑛 is the number of nodes in the graph.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {206 -- 218}, publisher = {Springer Nature}, title = {{Design of dynamic algorithms via primal-dual method}}, doi = {10.1007/978-3-662-47672-7_17}, volume = {9134}, year = {2015}, } @article{11845, abstract = {Phylogenetic diversity (PD) is a measure of biodiversity based on the evolutionary history of species. Here, we discuss several optimization problems related to the use of PD, and the more general measure split diversity (SD), in conservation prioritization. Depending on the conservation goal and the information available about species, one can construct optimization routines that incorporate various conservation constraints. We demonstrate how this information can be used to select sets of species for conservation action. Specifically, we discuss the use of species' geographic distributions, the choice of candidates under economic pressure, and the use of predator–prey interactions between the species in a community to define viability constraints. Despite such optimization problems falling into the area of NP hard problems, it is possible to solve them in a reasonable amount of time using integer programming. We apply integer linear programming to a variety of models for conservation prioritization that incorporate the SD measure. We exemplarily show the results for two data sets: the Cape region of South Africa and a Caribbean coral reef community. Finally, we provide user-friendly software at http://www.cibiv.at/software/pda.}, author = {Chernomor, Olga and Minh, Bui Quang and Forest, Félix and Klaere, Steffen and Ingram, Travis and Henzinger, Monika H and von Haeseler, Arndt}, issn = {2041-210X}, journal = {Methods in Ecology and Evolution}, number = {1}, pages = {83--91}, publisher = {Wiley}, title = {{Split diversity in constrained conservation prioritization using integer linear programming}}, doi = {10.1111/2041-210x.12299}, volume = {6}, year = {2015}, } @inproceedings{11868, abstract = {Consider the following Online Boolean Matrix-Vector Multiplication problem: We are given an n x n matrix M and will receive n column-vectors of size n, denoted by v1, ..., vn, one by one. After seeing each vector vi, we have to output the product Mvi before we can see the next vector. A naive algorithm can solve this problem using O(n3) time in total, and its running time can be slightly improved to O(n3/log2 n) [Williams SODA'07]. We show that a conjecture that there is no truly subcubic (O(n3-ε)) time algorithm for this problem can be used to exhibit the underlying polynomial time hardness shared by many dynamic problems. For a number of problems, such as subgraph connectivity, Pagh's problem, d-failure connectivity, decremental single-source shortest paths, and decremental transitive closure, this conjecture implies tight hardness results. Thus, proving or disproving this conjecture will be very interesting as it will either imply several tight unconditional lower bounds or break through a common barrier that blocks progress with these problems. This conjecture might also be considered as strong evidence against any further improvement for these problems since refuting it will imply a major breakthrough for combinatorial Boolean matrix multiplication and other long-standing problems if the term "combinatorial algorithms" is interpreted as "Strassen-like algorithms" [Ballard et al. SPAA'11]. The conjecture also leads to hardness results for problems that were previously based on diverse problems and conjectures -- such as 3SUM, combinatorial Boolean matrix multiplication, triangle detection, and multiphase -- thus providing a uniform way to prove polynomial hardness results for dynamic algorithms; some of the new proofs are also simpler or even become trivial. The conjecture also leads to stronger and new, non-trivial, hardness results, e.g., for the fully-dynamic densest subgraph and diameter problems.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon and Saranurak, Thatchaphol}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737.8017}, location = {Portland, OR, United States}, publisher = {Association for Computing Machinery}, title = {{Unifying and strengthening hardness for dynamic problems via the online matrix-vector multiplication conjecture}}, doi = {10.1145/2746539.2746609}, year = {2015}, } @inproceedings{11869, abstract = {While in many graph mining applications it is crucial to handle a stream of updates efficiently in terms of both time and space, not much was known about achieving such type of algorithm. In this paper we study this issue for a problem which lies at the core of many graph mining applications called densest subgraph problem. We develop an algorithm that achieves time- and space-efficiency for this problem simultaneously. It is one of the first of its kind for graph problems to the best of our knowledge. Given an input graph, the densest subgraph is the subgraph that maximizes the ratio between the number of edges and the number of nodes. For any ε>0, our algorithm can, with high probability, maintain a (4+ε)-approximate solution under edge insertions and deletions using ~O(n) space and ~O(1) amortized time per update; here, $n$ is the number of nodes in the graph and ~O hides the O(polylog_{1+ε} n) term. The approximation ratio can be improved to (2+ε) with more time. It can be extended to a (2+ε)-approximation sublinear-time algorithm and a distributed-streaming algorithm. Our algorithm is the first streaming algorithm that can maintain the densest subgraph in one pass. Prior to this, no algorithm could do so even in the special case of an incremental stream and even when there is no time restriction. The previously best algorithm in this setting required O(log n) passes [BahmaniKV12]. The space required by our algorithm is tight up to a polylogarithmic factor.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon and Tsourakakis, Charalampos}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737-8017}, location = {Portland, OR, United States}, pages = {173 -- 182}, publisher = {Association for Computing Machinery}, title = {{Space- and time-efficient algorithm for maintaining dense subgraphs on one-pass dynamic streams}}, doi = {10.1145/2746539.2746592}, year = {2015}, } @inproceedings{11837, abstract = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1-1/e). (2) On the positive side we present (i) an O(sqrt n)-approximation algorithm for general concave externality functions, (ii) an O(\log m)-approximation algorithm for linear externality functions, and (iii) an (1-1/e)\frac{1}{6}-approximation algorithm for 2-hop step function externalities. We also improve the result from [6] for 1-hop step function externalities by giving a (1-1/e)/2-approximation algorithm.}, author = {Bhattacharya, Sayan and Dvorák, Wolfgang and Henzinger, Monika H and Starnberger, Martin}, booktitle = {32nd International Symposium on Theoretical Aspects of Computer Science}, isbn = {978-3-939897-78-1}, issn = {1868-8969}, location = {Garching, Germany}, pages = {90--102}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Welfare maximization with friends-of-friends network externalities}}, doi = {10.4230/LIPICS.STACS.2015.90}, volume = {30}, year = {2015}, } @article{11901, abstract = {We consider auctions of indivisible items to unit-demand bidders with budgets. This setting was suggested as an expressive model for single sponsored search auctions. Prior work presented mechanisms that compute bidder-optimal outcomes and are truthful for a restricted set of inputs, i.e., inputs in so-called general position. This condition is easily violated. We provide the first mechanism that is truthful in expectation for all inputs and achieves for each bidder no worse utility than the bidder-optimal outcome. Additionally we give a complete characterization for which inputs mechanisms that compute bidder-optimal outcomes are truthful.}, author = {Henzinger, Monika H and Loitzenbauer, Veronika}, issn = {0304-3975}, journal = {Theoretical Computer Science}, pages = {1--15}, publisher = {Elsevier}, title = {{Truthful unit-demand auctions with budgets revisited}}, doi = {10.1016/j.tcs.2015.01.033}, volume = {573}, year = {2015}, } @article{11962, abstract = {One of the rare alternative reagents for the reduction of carbon–carbon double bonds is diimide (HNNH), which can be generated in situ from hydrazine hydrate (N2H4⋅H2O) and O2. Although this selective method is extremely clean and powerful, it is rarely used, as the rate-determining oxidation of hydrazine in the absence of a catalyst is relatively slow using conventional batch protocols. A continuous high-temperature/high-pressure methodology dramatically enhances the initial oxidation step, at the same time allowing for a safe and scalable processing of the hazardous reaction mixture. Simple alkenes can be selectively reduced within 10–20 min at 100–120 °C and 20 bar O2 pressure. The development of a multi-injection reactor platform for the periodic addition of N2H4⋅H2O enables the reduction of less reactive olefins even at lower reaction temperatures. This concept was utilized for the highly selective reduction of artemisinic acid to dihydroartemisinic acid, the precursor molecule for the semisynthesis of the antimalarial drug artemisinin. The industrially relevant reduction was achieved by using four consecutive liquid feeds (of N2H4⋅H2O) and residence time units resulting in a highly selective reduction within approximately 40 min at 60 °C and 20 bar O2 pressure, providing dihydroartemisinic acid in ≥93 % yield and ≥95 % selectivity.}, author = {Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. Oliver}, issn = {1521-3765}, journal = {Chemistry - A European Journal}, number = {11}, pages = {4368--4376}, publisher = {Wiley}, title = {{Continuous flow reduction of artemisinic acid utilizing multi-injection strategies-closing the gap towards a fully continuous synthesis of antimalarial drugs}}, doi = {10.1002/chem.201406439}, volume = {21}, year = {2015}, } @article{11977, abstract = {The development of a continuous flow multistep strategy for the synthesis of linear peptoids and their subsequent macrocyclization via Click chemistry is described. The central transformation of this process is an Ugi four-component reaction generating the peptidomimetic core structure. In order to avoid exposure to the often toxic and malodorous isocyanide building blocks, the continuous approach was telescoped by the dehydration of the corresponding formamide. In a concurrent operation, the highly energetic azide moiety required for the subsequent intramolecular copper-catalyzed azide–alkyne cycloaddition (Click reaction) was installed by nucleophilic substitution from a bromide precursor. All steps yielding to the linear core structures can be conveniently coupled without the need for purification steps resulting in a single process generating the desired peptidomimetics in good to excellent yields within a 25 min reaction time. The following macrocyclization was realized in a coil reactor made of copper without any additional additive. A careful process intensification study demonstrated that this transformation occurs quantitatively within 25 min at 140 °C. Depending on the resulting ring strain, either a dimeric or a monomeric form of the cyclic product was obtained.}, author = {Salvador, Carlos Eduardo M. and Pieber, Bartholomäus and Neu, Philipp M. and Torvisco, Ana and Kleber Z. Andrade, Carlos and Kappe, C. Oliver}, issn = {1520-6904}, journal = {The Journal of Organic Chemistry}, number = {9}, pages = {4590--4602}, publisher = {American Chemical Society}, title = {{A sequential Ugi multicomponent/Cu-catalyzed azide–alkyne cycloaddition approach for the continuous flow generation of cyclic peptoids}}, doi = {10.1021/acs.joc.5b00445}, volume = {80}, year = {2015}, } @inbook{11989, abstract = {In recent years, the high demand for sustainable processes resulted in the development of highly attractive oxidation protocols utilizing molecular oxygen or even air instead of more uneconomic and often toxic reagents. The application of these sustainable, gaseous oxidants in conventional batch reactors is often associated with severe safety risks and process challenges especially on larger scales. Continuous flow technology offers the possibility to minimize these safety hazards and concurrently allows working in high-temperature/high-pressure regimes to access highly efficient oxidation protocols. This review article critically discusses recent literature examples of flow methodologies for selective aerobic oxidations of organic compounds. Several technologies and reactor designs for biphasic gas/liquid as well as supercritical reaction media are presented in detail. © Springer International Publishing Switzerland 2015.}, author = {Pieber, Bartholomäus and Kappe, C. Oliver}, booktitle = {Organometallic Flow Chemistry}, editor = {Noël, Timothy}, isbn = {9783319332413}, issn = {1616-8534}, pages = {97–136}, publisher = {Springer Nature}, title = {{Aerobic oxidations in continuous flow}}, doi = {10.1007/3418_2015_133}, volume = {57}, year = {2015}, } @article{120, abstract = {Clustering of fine particles is of crucial importance in settings ranging from the early stages of planet formation to the coagulation of industrial powders and airborne pollutants. Models of such clustering typically focus on inelastic deformation and cohesion. However, even in charge-neutral particle systems comprising grains of the same dielectric material, tribocharging can generate large amounts of net positive or negative charge on individual particles, resulting in long-range electrostatic forces. The effects of such forces on cluster formation are not well understood and have so far not been studied in situ. Here we report the first observations of individual collide-and-capture events between charged submillimetre particles, including Kepler-like orbits. Charged particles can become trapped in their mutual electrostatic energy well and aggregate via multiple bounces. This enables the initiation of clustering at relative velocities much larger than the upper limit for sticking after a head-on collision, a long-standing issue known from pre-planetary dust aggregation. Moreover, Coulomb interactions together with dielectric polarization are found to stabilize characteristic molecule-like configurations, providing new insights for the modelling of clustering dynamics in a wide range of microscopic dielectric systems, such as charged polarizable ions, biomolecules and colloids.}, author = {Lee, Victor and Waitukaitis, Scott R and Miskin, Marc and Jaeger, Heinrich}, journal = {Nature Physics}, number = {9}, pages = {733 -- 737}, publisher = {Nature Publishing Group}, title = {{Direct observation of particle interactions and clustering in charged granular streams}}, doi = {10.1038/nphys3396}, volume = {11}, year = {2015}, } @article{121, abstract = {We show that the simplest building blocks of origami-based materials - rigid, degree-four vertices - are generically multistable. The existence of two distinct branches of folding motion emerging from the flat state suggests at least bistability, but we show how nonlinearities in the folding motions allow generic vertex geometries to have as many as five stable states. In special geometries with collinear folds and symmetry, more branches emerge leading to as many as six stable states. Tuning the fold energy parameters, we show how monostability is also possible. Finally, we show how to program the stability features of a single vertex into a periodic fold tessellation. The resulting metasheets provide a previously unanticipated functionality - tunable and switchable shape and size via multistability.}, author = {Waitukaitis, Scott R and Menaut, Rémi and Chen, Bryan and Van Hecke, Martin}, journal = {APS Physics, Physical Review Letters}, number = {5}, publisher = {American Physical Society}, title = {{Origami multistability: From single vertices to metasheets}}, doi = {10.1103/PhysRevLett.114.055503}, volume = {114}, year = {2015}, } @article{1311, abstract = {In this paper, we develop an energy method to study finite speed of propagation and waiting time phenomena for the stochastic porous media equation with linear multiplicative noise in up to three spatial dimensions. Based on a novel iteration technique and on stochastic counterparts of weighted integral estimates used in the deterministic setting, we formulate a sufficient criterion on the growth of initial data which locally guarantees a waiting time phenomenon to occur almost surely. Up to a logarithmic factor, this criterion coincides with the optimal criterion known from the deterministic setting. Our technique can be modified to prove finite speed of propagation as well.}, author = {Julian Fischer and Grün, Günther}, journal = {SIAM Journal on Mathematical Analysis}, number = {1}, pages = {825 -- 854}, publisher = {Society for Industrial and Applied Mathematics }, title = {{Finite speed of propagation and waiting times for the stochastic porous medium equation: A unifying approach}}, doi = {10.1137/140960578}, volume = {47}, year = {2015}, }