@inproceedings{11785, abstract = {Recently we presented the first algorithm for maintaining the set of nodes reachable from a source node in a directed graph that is modified by edge deletions with 𝑜(𝑚𝑛) total update time, where 𝑚 is the number of edges and 𝑛 is the number of nodes in the graph [Henzinger et al. STOC 2014]. The algorithm is a combination of several different algorithms, each for a different 𝑚 vs. 𝑛 trade-off. For the case of 𝑚=Θ(𝑛1.5) the running time is 𝑂(𝑛2.47), just barely below 𝑚𝑛=Θ(𝑛2.5). In this paper we simplify the previous algorithm using new algorithmic ideas and achieve an improved running time of 𝑂̃ (min(𝑚7/6𝑛2/3,𝑚3/4𝑛5/4+𝑜(1),𝑚2/3𝑛4/3+𝑜(1)+𝑚3/7𝑛12/7+𝑜(1))). This gives, e.g., 𝑂(𝑛2.36) for the notorious case 𝑚=Θ(𝑛1.5). We obtain the same upper bounds for the problem of maintaining the strongly connected components of a directed graph undergoing edge deletions. Our algorithms are correct with high probabililty against an oblivious adversary.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {725 -- 736}, publisher = {Springer Nature}, title = {{Improved algorithms for decremental single-source reachability on directed graphs}}, doi = {10.1007/978-3-662-47672-7_59}, volume = {9134}, year = {2015}, } @inproceedings{11787, abstract = {We present faster algorithms for computing the 2-edge and 2-vertex strongly connected components of a directed graph. While in undirected graphs the 2-edge and 2-vertex connected components can be found in linear time, in directed graphs with m edges and n vertices only rather simple O(m n)-time algorithms were known. We use a hierarchical sparsification technique to obtain algorithms that run in time 𝑂(𝑛2). For 2-edge strongly connected components our algorithm gives the first running time improvement in 20 years. Additionally we present an 𝑂(𝑚2/log𝑛)-time algorithm for 2-edge strongly connected components, and thus improve over the O(m n) running time also when 𝑚=𝑂(𝑛). Our approach extends to k-edge and k-vertex strongly connected components for any constant k with a running time of 𝑂(𝑛2log𝑛) for k-edge-connectivity and 𝑂(𝑛3) for k-vertex-connectivity.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Loitzenbauer, Veronika}, booktitle = {2nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {713 -- 724}, publisher = {Springer Nature}, title = {{Finding 2-edge and 2-vertex strongly connected components in quadratic time}}, doi = {10.1007/978-3-662-47672-7_58}, volume = {9134}, year = {2015}, } @inproceedings{11788, abstract = {Ad exchanges are becoming an increasingly popular way to sell advertisement slots on the internet. An ad exchange is basically a spot market for ad impressions. A publisher who has already signed contracts reserving advertisement impressions on his pages can choose between assigning a new ad impression for a new page view to a contracted advertiser or to sell it at an ad exchange. This leads to an online revenue maximization problem for the publisher. Given a new impression to sell decide whether (a) to assign it to a contracted advertiser and if so to which one or (b) to sell it at the ad exchange and if so at which reserve price. We make no assumptions about the distribution of the advertiser valuations that participate in the ad exchange and show that there exists a simple primal-dual based online algorithm, whose lower bound for the revenue converges to 𝑅𝐴𝐷𝑋+𝑅𝐴(1−1/𝑒), where 𝑅𝐴𝐷𝑋 is the revenue that the optimum algorithm achieves from the ad exchange and 𝑅𝐴 is the revenue that the optimum algorithm achieves from the contracted advertisers.}, author = {Dvořák, Wolfgang and Henzinger, Monika H}, booktitle = {12th International Workshop of Approximation and Online Algorithms}, issn = {0302-9743}, location = {Wroclaw, Poland}, pages = {156–167}, publisher = {Springer Nature}, title = {{Online ad assignment with an ad exchange}}, doi = {10.1007/978-3-319-18263-6_14}, volume = {8952}, year = {2015}, } @inproceedings{11786, abstract = {In this paper, we develop a dynamic version of the primal-dual method for optimization problems, and apply it to obtain the following results. (1) For the dynamic set-cover problem, we maintain an 𝑂(𝑓2)-approximately optimal solution in 𝑂(𝑓⋅log(𝑚+𝑛)) amortized update time, where 𝑓 is the maximum “frequency” of an element, 𝑛 is the number of sets, and 𝑚 is the maximum number of elements in the universe at any point in time. (2) For the dynamic 𝑏-matching problem, we maintain an 𝑂(1)-approximately optimal solution in 𝑂(log3𝑛) amortized update time, where 𝑛 is the number of nodes in the graph.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.}, booktitle = {42nd International Colloquium on Automata, Languages and Programming}, isbn = {9783662476710}, issn = {0302-9743}, location = {Kyoto, Japan}, pages = {206 -- 218}, publisher = {Springer Nature}, title = {{Design of dynamic algorithms via primal-dual method}}, doi = {10.1007/978-3-662-47672-7_17}, volume = {9134}, year = {2015}, } @article{11845, abstract = {Phylogenetic diversity (PD) is a measure of biodiversity based on the evolutionary history of species. Here, we discuss several optimization problems related to the use of PD, and the more general measure split diversity (SD), in conservation prioritization. Depending on the conservation goal and the information available about species, one can construct optimization routines that incorporate various conservation constraints. We demonstrate how this information can be used to select sets of species for conservation action. Specifically, we discuss the use of species' geographic distributions, the choice of candidates under economic pressure, and the use of predator–prey interactions between the species in a community to define viability constraints. Despite such optimization problems falling into the area of NP hard problems, it is possible to solve them in a reasonable amount of time using integer programming. We apply integer linear programming to a variety of models for conservation prioritization that incorporate the SD measure. We exemplarily show the results for two data sets: the Cape region of South Africa and a Caribbean coral reef community. Finally, we provide user-friendly software at http://www.cibiv.at/software/pda.}, author = {Chernomor, Olga and Minh, Bui Quang and Forest, Félix and Klaere, Steffen and Ingram, Travis and Henzinger, Monika H and von Haeseler, Arndt}, issn = {2041-210X}, journal = {Methods in Ecology and Evolution}, number = {1}, pages = {83--91}, publisher = {Wiley}, title = {{Split diversity in constrained conservation prioritization using integer linear programming}}, doi = {10.1111/2041-210x.12299}, volume = {6}, year = {2015}, } @inproceedings{11868, abstract = {Consider the following Online Boolean Matrix-Vector Multiplication problem: We are given an n x n matrix M and will receive n column-vectors of size n, denoted by v1, ..., vn, one by one. After seeing each vector vi, we have to output the product Mvi before we can see the next vector. A naive algorithm can solve this problem using O(n3) time in total, and its running time can be slightly improved to O(n3/log2 n) [Williams SODA'07]. We show that a conjecture that there is no truly subcubic (O(n3-ε)) time algorithm for this problem can be used to exhibit the underlying polynomial time hardness shared by many dynamic problems. For a number of problems, such as subgraph connectivity, Pagh's problem, d-failure connectivity, decremental single-source shortest paths, and decremental transitive closure, this conjecture implies tight hardness results. Thus, proving or disproving this conjecture will be very interesting as it will either imply several tight unconditional lower bounds or break through a common barrier that blocks progress with these problems. This conjecture might also be considered as strong evidence against any further improvement for these problems since refuting it will imply a major breakthrough for combinatorial Boolean matrix multiplication and other long-standing problems if the term "combinatorial algorithms" is interpreted as "Strassen-like algorithms" [Ballard et al. SPAA'11]. The conjecture also leads to hardness results for problems that were previously based on diverse problems and conjectures -- such as 3SUM, combinatorial Boolean matrix multiplication, triangle detection, and multiphase -- thus providing a uniform way to prove polynomial hardness results for dynamic algorithms; some of the new proofs are also simpler or even become trivial. The conjecture also leads to stronger and new, non-trivial, hardness results, e.g., for the fully-dynamic densest subgraph and diameter problems.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon and Saranurak, Thatchaphol}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737.8017}, location = {Portland, OR, United States}, publisher = {Association for Computing Machinery}, title = {{Unifying and strengthening hardness for dynamic problems via the online matrix-vector multiplication conjecture}}, doi = {10.1145/2746539.2746609}, year = {2015}, } @inproceedings{11869, abstract = {While in many graph mining applications it is crucial to handle a stream of updates efficiently in terms of both time and space, not much was known about achieving such type of algorithm. In this paper we study this issue for a problem which lies at the core of many graph mining applications called densest subgraph problem. We develop an algorithm that achieves time- and space-efficiency for this problem simultaneously. It is one of the first of its kind for graph problems to the best of our knowledge. Given an input graph, the densest subgraph is the subgraph that maximizes the ratio between the number of edges and the number of nodes. For any ε>0, our algorithm can, with high probability, maintain a (4+ε)-approximate solution under edge insertions and deletions using ~O(n) space and ~O(1) amortized time per update; here, $n$ is the number of nodes in the graph and ~O hides the O(polylog_{1+ε} n) term. The approximation ratio can be improved to (2+ε) with more time. It can be extended to a (2+ε)-approximation sublinear-time algorithm and a distributed-streaming algorithm. Our algorithm is the first streaming algorithm that can maintain the densest subgraph in one pass. Prior to this, no algorithm could do so even in the special case of an incremental stream and even when there is no time restriction. The previously best algorithm in this setting required O(log n) passes [BahmaniKV12]. The space required by our algorithm is tight up to a polylogarithmic factor.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon and Tsourakakis, Charalampos}, booktitle = {47th Annual ACM Symposium on Theory of Computing}, isbn = {978-145033536-2}, issn = {0737-8017}, location = {Portland, OR, United States}, pages = {173 -- 182}, publisher = {Association for Computing Machinery}, title = {{Space- and time-efficient algorithm for maintaining dense subgraphs on one-pass dynamic streams}}, doi = {10.1145/2746539.2746592}, year = {2015}, } @inproceedings{11837, abstract = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1-1/e). (2) On the positive side we present (i) an O(sqrt n)-approximation algorithm for general concave externality functions, (ii) an O(\log m)-approximation algorithm for linear externality functions, and (iii) an (1-1/e)\frac{1}{6}-approximation algorithm for 2-hop step function externalities. We also improve the result from [6] for 1-hop step function externalities by giving a (1-1/e)/2-approximation algorithm.}, author = {Bhattacharya, Sayan and Dvorák, Wolfgang and Henzinger, Monika H and Starnberger, Martin}, booktitle = {32nd International Symposium on Theoretical Aspects of Computer Science}, isbn = {978-3-939897-78-1}, issn = {1868-8969}, location = {Garching, Germany}, pages = {90--102}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Welfare maximization with friends-of-friends network externalities}}, doi = {10.4230/LIPICS.STACS.2015.90}, volume = {30}, year = {2015}, } @article{11901, abstract = {We consider auctions of indivisible items to unit-demand bidders with budgets. This setting was suggested as an expressive model for single sponsored search auctions. Prior work presented mechanisms that compute bidder-optimal outcomes and are truthful for a restricted set of inputs, i.e., inputs in so-called general position. This condition is easily violated. We provide the first mechanism that is truthful in expectation for all inputs and achieves for each bidder no worse utility than the bidder-optimal outcome. Additionally we give a complete characterization for which inputs mechanisms that compute bidder-optimal outcomes are truthful.}, author = {Henzinger, Monika H and Loitzenbauer, Veronika}, issn = {0304-3975}, journal = {Theoretical Computer Science}, pages = {1--15}, publisher = {Elsevier}, title = {{Truthful unit-demand auctions with budgets revisited}}, doi = {10.1016/j.tcs.2015.01.033}, volume = {573}, year = {2015}, } @article{11962, abstract = {One of the rare alternative reagents for the reduction of carbon–carbon double bonds is diimide (HNNH), which can be generated in situ from hydrazine hydrate (N2H4⋅H2O) and O2. Although this selective method is extremely clean and powerful, it is rarely used, as the rate-determining oxidation of hydrazine in the absence of a catalyst is relatively slow using conventional batch protocols. A continuous high-temperature/high-pressure methodology dramatically enhances the initial oxidation step, at the same time allowing for a safe and scalable processing of the hazardous reaction mixture. Simple alkenes can be selectively reduced within 10–20 min at 100–120 °C and 20 bar O2 pressure. The development of a multi-injection reactor platform for the periodic addition of N2H4⋅H2O enables the reduction of less reactive olefins even at lower reaction temperatures. This concept was utilized for the highly selective reduction of artemisinic acid to dihydroartemisinic acid, the precursor molecule for the semisynthesis of the antimalarial drug artemisinin. The industrially relevant reduction was achieved by using four consecutive liquid feeds (of N2H4⋅H2O) and residence time units resulting in a highly selective reduction within approximately 40 min at 60 °C and 20 bar O2 pressure, providing dihydroartemisinic acid in ≥93 % yield and ≥95 % selectivity.}, author = {Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. Oliver}, issn = {1521-3765}, journal = {Chemistry - A European Journal}, number = {11}, pages = {4368--4376}, publisher = {Wiley}, title = {{Continuous flow reduction of artemisinic acid utilizing multi-injection strategies-closing the gap towards a fully continuous synthesis of antimalarial drugs}}, doi = {10.1002/chem.201406439}, volume = {21}, year = {2015}, } @article{11977, abstract = {The development of a continuous flow multistep strategy for the synthesis of linear peptoids and their subsequent macrocyclization via Click chemistry is described. The central transformation of this process is an Ugi four-component reaction generating the peptidomimetic core structure. In order to avoid exposure to the often toxic and malodorous isocyanide building blocks, the continuous approach was telescoped by the dehydration of the corresponding formamide. In a concurrent operation, the highly energetic azide moiety required for the subsequent intramolecular copper-catalyzed azide–alkyne cycloaddition (Click reaction) was installed by nucleophilic substitution from a bromide precursor. All steps yielding to the linear core structures can be conveniently coupled without the need for purification steps resulting in a single process generating the desired peptidomimetics in good to excellent yields within a 25 min reaction time. The following macrocyclization was realized in a coil reactor made of copper without any additional additive. A careful process intensification study demonstrated that this transformation occurs quantitatively within 25 min at 140 °C. Depending on the resulting ring strain, either a dimeric or a monomeric form of the cyclic product was obtained.}, author = {Salvador, Carlos Eduardo M. and Pieber, Bartholomäus and Neu, Philipp M. and Torvisco, Ana and Kleber Z. Andrade, Carlos and Kappe, C. Oliver}, issn = {1520-6904}, journal = {The Journal of Organic Chemistry}, number = {9}, pages = {4590--4602}, publisher = {American Chemical Society}, title = {{A sequential Ugi multicomponent/Cu-catalyzed azide–alkyne cycloaddition approach for the continuous flow generation of cyclic peptoids}}, doi = {10.1021/acs.joc.5b00445}, volume = {80}, year = {2015}, } @inbook{11989, abstract = {In recent years, the high demand for sustainable processes resulted in the development of highly attractive oxidation protocols utilizing molecular oxygen or even air instead of more uneconomic and often toxic reagents. The application of these sustainable, gaseous oxidants in conventional batch reactors is often associated with severe safety risks and process challenges especially on larger scales. Continuous flow technology offers the possibility to minimize these safety hazards and concurrently allows working in high-temperature/high-pressure regimes to access highly efficient oxidation protocols. This review article critically discusses recent literature examples of flow methodologies for selective aerobic oxidations of organic compounds. Several technologies and reactor designs for biphasic gas/liquid as well as supercritical reaction media are presented in detail. © Springer International Publishing Switzerland 2015.}, author = {Pieber, Bartholomäus and Kappe, C. Oliver}, booktitle = {Organometallic Flow Chemistry}, editor = {Noël, Timothy}, isbn = {9783319332413}, issn = {1616-8534}, pages = {97–136}, publisher = {Springer Nature}, title = {{Aerobic oxidations in continuous flow}}, doi = {10.1007/3418_2015_133}, volume = {57}, year = {2015}, } @article{120, abstract = {Clustering of fine particles is of crucial importance in settings ranging from the early stages of planet formation to the coagulation of industrial powders and airborne pollutants. Models of such clustering typically focus on inelastic deformation and cohesion. However, even in charge-neutral particle systems comprising grains of the same dielectric material, tribocharging can generate large amounts of net positive or negative charge on individual particles, resulting in long-range electrostatic forces. The effects of such forces on cluster formation are not well understood and have so far not been studied in situ. Here we report the first observations of individual collide-and-capture events between charged submillimetre particles, including Kepler-like orbits. Charged particles can become trapped in their mutual electrostatic energy well and aggregate via multiple bounces. This enables the initiation of clustering at relative velocities much larger than the upper limit for sticking after a head-on collision, a long-standing issue known from pre-planetary dust aggregation. Moreover, Coulomb interactions together with dielectric polarization are found to stabilize characteristic molecule-like configurations, providing new insights for the modelling of clustering dynamics in a wide range of microscopic dielectric systems, such as charged polarizable ions, biomolecules and colloids.}, author = {Lee, Victor and Waitukaitis, Scott R and Miskin, Marc and Jaeger, Heinrich}, journal = {Nature Physics}, number = {9}, pages = {733 -- 737}, publisher = {Nature Publishing Group}, title = {{Direct observation of particle interactions and clustering in charged granular streams}}, doi = {10.1038/nphys3396}, volume = {11}, year = {2015}, } @article{121, abstract = {We show that the simplest building blocks of origami-based materials - rigid, degree-four vertices - are generically multistable. The existence of two distinct branches of folding motion emerging from the flat state suggests at least bistability, but we show how nonlinearities in the folding motions allow generic vertex geometries to have as many as five stable states. In special geometries with collinear folds and symmetry, more branches emerge leading to as many as six stable states. Tuning the fold energy parameters, we show how monostability is also possible. Finally, we show how to program the stability features of a single vertex into a periodic fold tessellation. The resulting metasheets provide a previously unanticipated functionality - tunable and switchable shape and size via multistability.}, author = {Waitukaitis, Scott R and Menaut, Rémi and Chen, Bryan and Van Hecke, Martin}, journal = {APS Physics, Physical Review Letters}, number = {5}, publisher = {American Physical Society}, title = {{Origami multistability: From single vertices to metasheets}}, doi = {10.1103/PhysRevLett.114.055503}, volume = {114}, year = {2015}, } @article{1311, abstract = {In this paper, we develop an energy method to study finite speed of propagation and waiting time phenomena for the stochastic porous media equation with linear multiplicative noise in up to three spatial dimensions. Based on a novel iteration technique and on stochastic counterparts of weighted integral estimates used in the deterministic setting, we formulate a sufficient criterion on the growth of initial data which locally guarantees a waiting time phenomenon to occur almost surely. Up to a logarithmic factor, this criterion coincides with the optimal criterion known from the deterministic setting. Our technique can be modified to prove finite speed of propagation as well.}, author = {Julian Fischer and Grün, Günther}, journal = {SIAM Journal on Mathematical Analysis}, number = {1}, pages = {825 -- 854}, publisher = {Society for Industrial and Applied Mathematics }, title = {{Finite speed of propagation and waiting times for the stochastic porous medium equation: A unifying approach}}, doi = {10.1137/140960578}, volume = {47}, year = {2015}, } @article{1314, abstract = {We derive a posteriori estimates for the modeling error caused by the assumption of perfect incompressibility in the incompressible Navier-Stokes equation: Real fluids are never perfectly incompressible but always feature at least some low amount of compressibility. Thus, their behavior is described by the compressible Navier-Stokes equation, the pressure being a steep function of the density. We rigorously estimate the difference between an approximate solution to the incompressible Navier-Stokes equation and any weak solution to the compressible Navier-Stokes equation in the sense of Lions (without assuming any additional regularity of solutions). Heuristics and numerical results suggest that our error estimates are of optimal order in the case of "well-behaved" flows and divergence-free approximations of the velocity field. Thus, we expect our estimates to justify the idealization of fluids as perfectly incompressible also in practical situations.}, author = {Fischer, Julian L}, journal = {SIAM Journal on Numerical Analysis}, number = {5}, pages = {2178 -- 2205}, publisher = {Society for Industrial and Applied Mathematics }, title = {{A posteriori modeling error estimates for the assumption of perfect incompressibility in the Navier-Stokes equation}}, doi = {10.1137/140966654}, volume = {53}, year = {2015}, } @article{1313, abstract = {We present an algorithm for the derivation of lower bounds on support propagation for a certain class of nonlinear parabolic equations. We proceed by combining the ideas in some recent papers by the author with the algorithmic construction of entropies due to Jüngel and Matthes, reducing the problem to a quantifier elimination problem. Due to its complexity, the quantifier elimination problem cannot be solved by present exact algorithms. However, by tackling the quantifier elimination problem numerically, in the case of the thin-film equation we are able to improve recent results by the author in the regime of strong slippage n ∈ (1, 2). For certain second-order doubly nonlinear parabolic equations, we are able to extend the known lower bounds on free boundary propagation to the case of irregular oscillatory initial data. Finally, we apply our method to a sixth-order quantum drift-diffusion equation, resulting in an upper bound on the time which it takes for the support to reach every point in the domain.}, author = {Julian Fischer}, journal = {Interfaces and Free Boundaries}, number = {1}, pages = {1 -- 20}, publisher = {European Mathematical Society Publishing House}, title = {{Estimates on front propagation for nonlinear higher-order parabolic equations: An algorithmic approach}}, doi = {10.4171/IFB/331}, volume = {17}, year = {2015}, } @article{1316, abstract = {In the present work we introduce the notion of a renormalized solution for reaction–diffusion systems with entropy-dissipating reactions. We establish the global existence of renormalized solutions. In the case of integrable reaction terms our notion of a renormalized solution reduces to the usual notion of a weak solution. Our existence result in particular covers all reaction–diffusion systems involving a single reversible reaction with mass-action kinetics and (possibly species-dependent) Fick-law diffusion; more generally, it covers the case of systems of reversible reactions with mass-action kinetics which satisfy the detailed balance condition. For such equations the existence of any kind of solution in general was an open problem, thereby motivating the study of renormalized solutions.}, author = {Julian Fischer}, journal = {Archive for Rational Mechanics and Analysis}, number = {1}, pages = {553 -- 587}, publisher = {Springer}, title = {{Global existence of renormalized solutions to entropy-dissipating reaction–diffusion systems}}, doi = {10.1007/s00205-015-0866-x}, volume = {218}, year = {2015}, } @article{1383, abstract = {In plants, vacuolar H+-ATPase (V-ATPase) activity acidifies both the trans-Golgi network/early endosome (TGN/EE) and the vacuole. This dual V-ATPase function has impeded our understanding of how the pH homeostasis within the plant TGN/EE controls exo- and endocytosis. Here, we show that the weak V-ATPase mutant deetiolated3 (det3) displayed a pH increase in the TGN/EE, but not in the vacuole, strongly impairing secretion and recycling of the brassinosteroid receptor and the cellulose synthase complexes to the plasma membrane, in contrast to mutants lacking tonoplast-localized V-ATPase activity only. The brassinosteroid insensitivity and the cellulose deficiency defects in det3 were tightly correlated with reduced Golgi and TGN/EE motility. Thus, our results provide strong evidence that acidification of the TGN/EE, but not of the vacuole, is indispensable for functional secretion and recycling in plants.}, author = {Yu, Luo and Scholl, Stefan and Doering, Anett and Yi, Zhang and Irani, Niloufer and Di Rubbo, Simone and Neumetzler, Lutz and Krishnamoorthy, Praveen and Van Houtte, Isabelle and Mylle, Evelien and Bischoff, Volker and Vernhettes, Samantha and Winne, Johan and Friml, Jirí and Stierhof, York and Schumacher, Karin and Persson, Staffan and Russinova, Eugenia}, journal = {Nature Plants}, number = {7}, publisher = {Nature Publishing Group}, title = {{V-ATPase activity in the TGN/EE is required for exocytosis and recycling in Arabidopsis}}, doi = {10.1038/nplants.2015.94}, volume = {1}, year = {2015}, } @inproceedings{1425, abstract = {In this work we aim at extending the theoretical foundations of lifelong learning. Previous work analyzing this scenario is based on the assumption that learning tasks are sampled i.i.d. from a task environment or limited to strongly constrained data distributions. Instead, we study two scenarios when lifelong learning is possible, even though the observed tasks do not form an i.i.d. sample: first, when they are sampled from the same environment, but possibly with dependencies, and second, when the task environment is allowed to change over time in a consistent way. In the first case we prove a PAC-Bayesian theorem that can be seen as a direct generalization of the analogous previous result for the i.i.d. case. For the second scenario we propose to learn an inductive bias in form of a transfer procedure. We present a generalization bound and show on a toy example how it can be used to identify a beneficial transfer algorithm.}, author = {Pentina, Anastasia and Lampert, Christoph}, location = {Montreal, Canada}, pages = {1540 -- 1548}, publisher = {Neural Information Processing Systems}, title = {{Lifelong learning with non-i.i.d. tasks}}, volume = {2015}, year = {2015}, }