@article{1614, abstract = {GABAergic perisoma-inhibiting fast-spiking interneurons (PIIs) effectively control the activity of large neuron populations by their wide axonal arborizations. It is generally assumed that the output of one PII to its target cells is strong and rapid. Here, we show that, unexpectedly, both strength and time course of PII-mediated perisomatic inhibition change with distance between synaptically connected partners in the rodent hippocampus. Synaptic signals become weaker due to lower contact numbers and decay more slowly with distance, very likely resulting from changes in GABAA receptor subunit composition. When distance-dependent synaptic inhibition is introduced to a rhythmically active neuronal network model, randomly driven principal cell assemblies are strongly synchronized by the PIIs, leading to higher precision in principal cell spike times than in a network with uniform synaptic inhibition. }, author = {Strüber, Michael and Jonas, Peter M and Bartos, Marlene}, journal = {PNAS}, number = {4}, pages = {1220 -- 1225}, publisher = {National Academy of Sciences}, title = {{Strength and duration of perisomatic GABAergic inhibition depend on distance between synaptically connected cells}}, doi = {10.1073/pnas.1412996112}, volume = {112}, year = {2015}, } @article{1611, abstract = {Biosensors for signaling molecules allow the study of physiological processes by bringing together the fields of protein engineering, fluorescence imaging, and cell biology. Construction of genetically encoded biosensors generally relies on the availability of a binding "core" that is both specific and stable, which can then be combined with fluorescent molecules to create a sensor. However, binding proteins with the desired properties are often not available in nature and substantial improvement to sensors can be required, particularly with regard to their durability. Ancestral protein reconstruction is a powerful protein-engineering tool able to generate highly stable and functional proteins. In this work, we sought to establish the utility of ancestral protein reconstruction to biosensor development, beginning with the construction of an l-arginine biosensor. l-arginine, as the immediate precursor to nitric oxide, is an important molecule in many physiological contexts including brain function. Using a combination of ancestral reconstruction and circular permutation, we constructed a Förster resonance energy transfer (FRET) biosensor for l-arginine (cpFLIPR). cpFLIPR displays high sensitivity and specificity, with a Kd of ∼14 μM and a maximal dynamic range of 35%. Importantly, cpFLIPR was highly robust, enabling accurate l-arginine measurement at physiological temperatures. We established that cpFLIPR is compatible with two-photon excitation fluorescence microscopy and report l-arginine concentrations in brain tissue.}, author = {Whitfield, Jason and Zhang, William and Herde, Michel and Clifton, Ben and Radziejewski, Johanna and Janovjak, Harald L and Henneberger, Christian and Jackson, Colin}, journal = {Protein Science}, number = {9}, pages = {1412 -- 1422}, publisher = {Wiley}, title = {{Construction of a robust and sensitive arginine biosensor through ancestral protein reconstruction}}, doi = {10.1002/pro.2721}, volume = {24}, year = {2015}, } @article{1624, abstract = {Population structure can facilitate evolution of cooperation. In a structured population, cooperators can form clusters which resist exploitation by defectors. Recently, it was observed that a shift update rule is an extremely strong amplifier of cooperation in a one dimensional spatial model. For the shift update rule, an individual is chosen for reproduction proportional to fecundity; the offspring is placed next to the parent; a random individual dies. Subsequently, the population is rearranged (shifted) until all individual cells are again evenly spaced out. For large population size and a one dimensional population structure, the shift update rule favors cooperation for any benefit-to-cost ratio greater than one. But every attempt to generalize shift updating to higher dimensions while maintaining its strong effect has failed. The reason is that in two dimensions the clusters are fragmented by the movements caused by rearranging the cells. Here we introduce the natural phenomenon of a repulsive force between cells of different types. After a birth and death event, the cells are being rearranged minimizing the overall energy expenditure. If the repulsive force is sufficiently high, shift becomes a strong promoter of cooperation in two dimensions.}, author = {Pavlogiannis, Andreas and Chatterjee, Krishnendu and Adlam, Ben and Nowak, Martin}, journal = {Scientific Reports}, publisher = {Nature Publishing Group}, title = {{Cellular cooperation with shift updating and repulsion}}, doi = {10.1038/srep17147}, volume = {5}, year = {2015}, } @article{1623, abstract = {Background Photosynthetic cyanobacteria are attractive for a range of biotechnological applications including biofuel production. However, due to slow growth, screening of mutant libraries using microtiter plates is not feasible. Results We present a method for high-throughput, single-cell analysis and sorting of genetically engineered l-lactate-producing strains of Synechocystis sp. PCC6803. A microfluidic device is used to encapsulate single cells in picoliter droplets, assay the droplets for l-lactate production, and sort strains with high productivity. We demonstrate the separation of low- and high-producing reference strains, as well as enrichment of a more productive l-lactate-synthesizing population after UV-induced mutagenesis. The droplet platform also revealed population heterogeneity in photosynthetic growth and lactate production, as well as the presence of metabolically stalled cells. Conclusions The workflow will facilitate metabolic engineering and directed evolution studies and will be useful in studies of cyanobacteria biochemistry and physiology. }, author = {Hammar, Petter and Angermayr, Andreas and Sjostrom, Staffan and Van Der Meer, Josefin and Hellingwerf, Klaas and Hudson, Elton and Joensson, Hakaan}, journal = {Biotechnology for Biofuels}, number = {1}, publisher = {BioMed Central}, title = {{Single-cell screening of photosynthetic growth and lactate production by cyanobacteria}}, doi = {10.1186/s13068-015-0380-2}, volume = {8}, year = {2015}, } @inproceedings{1625, abstract = {In recent years we have seen numerous improvements on 3D scanning and tracking of human faces, greatly advancing the creation of digital doubles for film and video games. However, despite the high-resolution quality of the reconstruction approaches available, current methods are unable to capture one of the most important regions of the face - the eye region. In this work we present the first method for detailed spatio-temporal reconstruction of eyelids. Tracking and reconstructing eyelids is extremely challenging, as this region exhibits very complex and unique skin deformation where skin is folded under while opening the eye. Furthermore, eyelids are often only partially visible and obstructed due to selfocclusion and eyelashes. Our approach is to combine a geometric deformation model with image data, leveraging multi-view stereo, optical flow, contour tracking and wrinkle detection from local skin appearance. Our deformation model serves as a prior that enables reconstruction of eyelids even under strong self-occlusions caused by rolling and folding skin as the eye opens and closes. The output is a person-specific, time-varying eyelid reconstruction with anatomically plausible deformations. Our high-resolution detailed eyelids couple naturally with current facial performance capture approaches. As a result, our method can largely increase the fidelity of facial capture and the creation of digital doubles.}, author = {Bermano, Amit and Beeler, Thabo and Kozlov, Yeara and Bradley, Derek and Bickel, Bernd and Gross, Markus}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{Detailed spatio-temporal reconstruction of eyelids}}, doi = {10.1145/2766924}, volume = {34}, year = {2015}, } @inproceedings{1626, abstract = {This paper introduces "OmniAD," a novel data-driven pipeline to model and acquire the aerodynamics of three-dimensional rigid objects. Traditionally, aerodynamics are examined through elaborate wind tunnel experiments or expensive fluid dynamics computations, and are only measured for a small number of discrete wind directions. OmniAD allows the evaluation of aerodynamic forces, such as drag and lift, for any incoming wind direction using a novel representation based on spherical harmonics. Our datadriven technique acquires the aerodynamic properties of an object simply by capturing its falling motion using a single camera. Once model parameters are estimated, OmniAD enables realistic realtime simulation of rigid bodies, such as the tumbling and gliding of leaves, without simulating the surrounding air. In addition, we propose an intuitive user interface based on OmniAD to interactively design three-dimensional kites that actually fly. Various nontraditional kites were designed to demonstrate the physical validity of our model.}, author = {Martin, Tobias and Umetani, Nobuyuki and Bickel, Bernd}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{OmniAD: Data-driven omni-directional aerodynamics}}, doi = {10.1145/2766919}, volume = {34}, year = {2015}, } @inproceedings{1628, abstract = {We propose a method for fabricating deformable objects with spatially varying elasticity using 3D printing. Using a single, relatively stiff printer material, our method designs an assembly of smallscale microstructures that have the effect of a softer material at the object scale, with properties depending on the microstructure used in each part of the object. We build on work in the area of metamaterials, using numerical optimization to design tiled microstructures with desired properties, but with the key difference that our method designs families of related structures that can be interpolated to smoothly vary the material properties over a wide range. To create an object with spatially varying elastic properties, we tile the object's interior with microstructures drawn from these families, generating a different microstructure for each cell using an efficient algorithm to select compatible structures for neighboring cells. We show results computed for both 2D and 3D objects, validating several 2D and 3D printed structures using standard material tests as well as demonstrating various example applications.}, author = {Schumacher, Christian and Bickel, Bernd and Rys, Jan and Marschner, Steve and Daraio, Chiara and Gross, Markus}, location = {Los Angeles, CA, USA}, number = {4}, publisher = {ACM}, title = {{Microstructures to control elasticity in 3D printing}}, doi = {10.1145/2766926}, volume = {34}, year = {2015}, } @inproceedings{1627, abstract = {We present a computational tool for fabrication-oriented design of flexible rod meshes. Given a deformable surface and a set of deformed poses as input, our method automatically computes a printable rod mesh that, once manufactured, closely matches the input poses under the same boundary conditions. The core of our method is formed by an optimization scheme that adjusts the cross-sectional profiles of the rods and their rest centerline in order to best approximate the target deformations. This approach allows us to locally control the bending and stretching resistance of the surface with a single material, yielding high design flexibility and low fabrication cost.}, author = {Pérez, Jesús and Thomaszewski, Bernhard and Coros, Stelian and Bickel, Bernd and Canabal, José and Sumner, Robert and Otaduy, Miguel}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{Design and fabrication of flexible rod meshes}}, doi = {10.1145/2766998}, volume = {34}, year = {2015}, } @inproceedings{1634, abstract = {Simulating the delightful dynamics of soap films, bubbles, and foams has traditionally required the use of a fully three-dimensional many-phase Navier-Stokes solver, even though their visual appearance is completely dominated by the thin liquid surface. We depart from earlier work on soap bubbles and foams by noting that their dynamics are naturally described by a Lagrangian vortex sheet model in which circulation is the primary variable. This leads us to derive a novel circulation-preserving surface-only discretization of foam dynamics driven by surface tension on a non-manifold triangle mesh. We represent the surface using a mesh-based multimaterial surface tracker which supports complex bubble topology changes, and evolve the surface according to the ambient air flow induced by a scalar circulation field stored on the mesh. Surface tension forces give rise to a simple update rule for circulation, even at non-manifold Plateau borders, based on a discrete measure of signed scalar mean curvature. We further incorporate vertex constraints to enable the interaction of soap films with wires. The result is a method that is at once simple, robust, and efficient, yet able to capture an array of soap films behaviors including foam rearrangement, catenoid collapse, blowing bubbles, and double bubbles being pulled apart.}, author = {Da, Fang and Batty, Christopher and Wojtan, Christopher J and Grinspun, Eitan}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{Double bubbles sans toil and trouble: discrete circulation-preserving vortex sheets for soap films and foams}}, doi = {10.1145/2767003}, volume = {34}, year = {2015}, } @inproceedings{1636, abstract = {Constraint Satisfaction Problem (CSP) is a fundamental algorithmic problem that appears in many areas of Computer Science. It can be equivalently stated as computing a homomorphism R→ΓΓ between two relational structures, e.g. between two directed graphs. Analyzing its complexity has been a prominent research direction, especially for the fixed template CSPs where the right side ΓΓ is fixed and the left side R is unconstrained. Far fewer results are known for the hybrid setting that restricts both sides simultaneously. It assumes that R belongs to a certain class of relational structures (called a structural restriction in this paper). We study which structural restrictions are effective, i.e. there exists a fixed template ΓΓ (from a certain class of languages) for which the problem is tractable when R is restricted, and NP-hard otherwise. We provide a characterization for structural restrictions that are closed under inverse homomorphisms. The criterion is based on the chromatic number of a relational structure defined in this paper; it generalizes the standard chromatic number of a graph. As our main tool, we use the algebraic machinery developed for fixed template CSPs. To apply it to our case, we introduce a new construction called a “lifted language”. We also give a characterization for structural restrictions corresponding to minor-closed families of graphs, extend results to certain Valued CSPs (namely conservative valued languages), and state implications for (valued) CSPs with ordered variables and for the maximum weight independent set problem on some restricted families of graphs.}, author = {Kolmogorov, Vladimir and Rolinek, Michal and Takhanov, Rustem}, booktitle = {26th International Symposium}, isbn = {978-3-662-48970-3}, location = {Nagoya, Japan}, pages = {566 -- 577}, publisher = {Springer Nature}, title = {{Effectiveness of structural restrictions for hybrid CSPs}}, doi = {10.1007/978-3-662-48971-0_48}, volume = {9472}, year = {2015}, } @inproceedings{1632, abstract = {This paper presents a liquid simulation technique that enforces the incompressibility condition using a stream function solve instead of a pressure projection. Previous methods have used stream function techniques for the simulation of detailed single-phase flows, but a formulation for liquid simulation has proved elusive in part due to the free surface boundary conditions. In this paper, we introduce a stream function approach to liquid simulations with novel boundary conditions for free surfaces, solid obstacles, and solid-fluid coupling. Although our approach increases the dimension of the linear system necessary to enforce incompressibility, it provides interesting and surprising benefits. First, the resulting flow is guaranteed to be divergence-free regardless of the accuracy of the solve. Second, our free-surface boundary conditions guarantee divergence-free motion even in the un-simulated air phase, which enables two-phase flow simulation by only computing a single phase. We implemented this method using a variant of FLIP simulation which only samples particles within a narrow band of the liquid surface, and we illustrate the effectiveness of our method for detailed two-phase flow simulations with complex boundaries, detailed bubble interactions, and two-way solid-fluid coupling.}, author = {Ando, Ryoichi and Thuerey, Nils and Wojtan, Christopher J}, location = {Los Angeles, CA, USA}, number = {4}, publisher = {ACM}, title = {{A stream function solver for liquid simulations}}, doi = {10.1145/2766935}, volume = {34}, year = {2015}, } @inproceedings{1630, abstract = {We present a method to learn and propagate shape placements in 2D polygonal scenes from a few examples provided by a user. The placement of a shape is modeled as an oriented bounding box. Simple geometric relationships between this bounding box and nearby scene polygons define a feature set for the placement. The feature sets of all example placements are then used to learn a probabilistic model over all possible placements and scenes. With this model, we can generate a new set of placements with similar geometric relationships in any given scene. We introduce extensions that enable propagation and generation of shapes in 3D scenes, as well as the application of a learned modeling session to large scenes without additional user interaction. These concepts allow us to generate complex scenes with thousands of objects with relatively little user interaction.}, author = {Guerrero, Paul and Jeschke, Stefan and Wimmer, Michael and Wonka, Peter}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{Learning shape placements by example}}, doi = {10.1145/2766933}, volume = {34}, year = {2015}, } @article{1640, abstract = {Auxin and cytokinin are key endogenous regulators of plant development. Although cytokinin-mediated modulation of auxin distribution is a developmentally crucial hormonal interaction, its molecular basis is largely unknown. Here we show a direct regulatory link between cytokinin signalling and the auxin transport machinery uncovering a mechanistic framework for cytokinin-auxin cross-talk. We show that the CYTOKININ RESPONSE FACTORS (CRFs), transcription factors downstream of cytokinin perception, transcriptionally control genes encoding PIN-FORMED (PIN) auxin transporters at a specific PIN CYTOKININ RESPONSE ELEMENT (PCRE) domain. Removal of this cis-regulatory element effectively uncouples PIN transcription from the CRF-mediated cytokinin regulation and attenuates plant cytokinin sensitivity. We propose that CRFs represent a missing cross-talk component that fine-tunes auxin transport capacity downstream of cytokinin signalling to control plant development.}, author = {Šimášková, Mária and O'Brien, José and Khan-Djamei, Mamoona and Van Noorden, Giel and Ötvös, Krisztina and Vieten, Anne and De Clercq, Inge and Van Haperen, Johanna and Cuesta, Candela and Hoyerová, Klára and Vanneste, Steffen and Marhavy, Peter and Wabnik, Krzysztof T and Van Breusegem, Frank and Nowack, Moritz and Murphy, Angus and Friml, Jiřĺ and Weijers, Dolf and Beeckman, Tom and Benková, Eva}, journal = {Nature Communications}, publisher = {Nature Publishing Group}, title = {{Cytokinin response factors regulate PIN-FORMED auxin transporters}}, doi = {10.1038/ncomms9717}, volume = {6}, year = {2015}, } @article{1642, abstract = {The Hanani-Tutte theorem is a classical result proved for the first time in the 1930s that characterizes planar graphs as graphs that admit a drawing in the plane in which every pair of edges not sharing a vertex cross an even number of times. We generalize this result to clustered graphs with two disjoint clusters, and show that a straightforward extension to flat clustered graphs with three or more disjoint clusters is not possible. For general clustered graphs we show a variant of the Hanani-Tutte theorem in the case when each cluster induces a connected subgraph. Di Battista and Frati proved that clustered planarity of embedded clustered graphs whose every face is incident to at most five vertices can be tested in polynomial time. We give a new and short proof of this result, using the matroid intersection algorithm.}, author = {Fulek, Radoslav and Kynčl, Jan and Malinovič, Igor and Pálvölgyi, Dömötör}, issn = {1077-8926}, journal = {Electronic Journal of Combinatorics}, number = {4}, publisher = {Electronic Journal of Combinatorics}, title = {{Clustered planarity testing revisited}}, doi = {10.37236/5002}, volume = {22}, year = {2015}, } @article{1639, abstract = {In this paper the optimal transport and the metamorphosis perspectives are combined. For a pair of given input images geodesic paths in the space of images are defined as minimizers of a resulting path energy. To this end, the underlying Riemannian metric measures the rate of transport cost and the rate of viscous dissipation. Furthermore, the model is capable to deal with strongly varying image contrast and explicitly allows for sources and sinks in the transport equations which are incorporated in the metric related to the metamorphosis approach by Trouvé and Younes. In the non-viscous case with source term existence of geodesic paths is proven in the space of measures. The proposed model is explored on the range from merely optimal transport to strongly dissipative dynamics. For this model a robust and effective variational time discretization of geodesic paths is proposed. This requires to minimize a discrete path energy consisting of a sum of consecutive image matching functionals. These functionals are defined on corresponding pairs of intensity functions and on associated pairwise matching deformations. Existence of time discrete geodesics is demonstrated. Furthermore, a finite element implementation is proposed and applied to instructive test cases and to real images. In the non-viscous case this is compared to the algorithm proposed by Benamou and Brenier including a discretization of the source term. Finally, the model is generalized to define discrete weighted barycentres with applications to textures and objects.}, author = {Maas, Jan and Rumpf, Martin and Schönlieb, Carola and Simon, Stefan}, journal = {ESAIM: Mathematical Modelling and Numerical Analysis}, number = {6}, pages = {1745 -- 1769}, publisher = {EDP Sciences}, title = {{A generalized model for optimal transport of images including dissipation and density modulation}}, doi = {10.1051/m2an/2015043}, volume = {49}, year = {2015}, } @article{1638, abstract = {The mitochondrial respiratory chain, also known as the electron transport chain (ETC), is crucial to life, and energy production in the form of ATP is the main mitochondrial function. Three proton-translocating enzymes of the ETC, namely complexes I, III and IV, generate proton motive force, which in turn drives ATP synthase (complex V). The atomic structures and basic mechanisms of most respiratory complexes have previously been established, with the exception of complex I, the largest complex in the ETC. Recently, the crystal structure of the entire complex I was solved using a bacterial enzyme. The structure provided novel insights into the core architecture of the complex, the electron transfer and proton translocation pathways, as well as the mechanism that couples these two processes.}, author = {Sazanov, Leonid A}, journal = {Nature Reviews Molecular Cell Biology}, number = {6}, pages = {375 -- 388}, publisher = {Nature Publishing Group}, title = {{A giant molecular proton pump: structure and mechanism of respiratory complex I}}, doi = {10.1038/nrm3997}, volume = {16}, year = {2015}, } @inproceedings{1646, abstract = {A pseudorandom function (PRF) is a keyed function F : K × X → Y where, for a random key k ∈ K, the function F(k, ·) is indistinguishable from a uniformly random function, given black-box access. A key-homomorphic PRF has the additional feature that for any keys k, k' and any input x, we have F(k+k', x) = F(k, x)⊕F(k', x) for some group operations +,⊕ on K and Y, respectively. A constrained PRF for a family of setsS ⊆ P(X) has the property that, given any key k and set S ∈ S, one can efficiently compute a “constrained” key kS that enables evaluation of F(k, x) on all inputs x ∈ S, while the values F(k, x) for x /∈ S remain pseudorandom even given kS. In this paper we construct PRFs that are simultaneously constrained and key homomorphic, where the homomorphic property holds even for constrained keys. We first show that the multilinear map-based bit-fixing and circuit-constrained PRFs of Boneh and Waters (Asiacrypt 2013) can be modified to also be keyhomomorphic. We then show that the LWE-based key-homomorphic PRFs of Banerjee and Peikert (Crypto 2014) are essentially already prefix-constrained PRFs, using a (non-obvious) definition of constrained keys and associated group operation. Moreover, the constrained keys themselves are pseudorandom, and the constraining and evaluation functions can all be computed in low depth. As an application of key-homomorphic constrained PRFs,we construct a proxy re-encryption schemewith fine-grained access control. This scheme allows storing encrypted data on an untrusted server, where each file can be encrypted relative to some attributes, so that only parties whose constrained keys match the attributes can decrypt. Moreover, the server can re-key (arbitrary subsets of) the ciphertexts without learning anything about the plaintexts, thus permitting efficient and finegrained revocation.}, author = {Banerjee, Abishek and Fuchsbauer, Georg and Peikert, Chris and Pietrzak, Krzysztof Z and Stevens, Sophie}, booktitle = {12th Theory of Cryptography Conference}, isbn = {978-3-662-46496-0}, location = {Warsaw, Poland}, pages = {31 -- 60}, publisher = {Springer Nature}, title = {{Key-homomorphic constrained pseudorandom functions}}, doi = {10.1007/978-3-662-46497-7_2}, volume = {9015}, year = {2015}, } @inproceedings{1648, abstract = {Generalized Selective Decryption (GSD), introduced by Panjwani [TCC’07], is a game for a symmetric encryption scheme Enc that captures the difficulty of proving adaptive security of certain protocols, most notably the Logical Key Hierarchy (LKH) multicast encryption protocol. In the GSD game there are n keys k1,..., kn, which the adversary may adaptively corrupt (learn); moreover, it can ask for encryptions Encki (kj) of keys under other keys. The adversary’s task is to distinguish keys (which it cannot trivially compute) from random. Proving the hardness of GSD assuming only IND-CPA security of Enc is surprisingly hard. Using “complexity leveraging” loses a factor exponential in n, which makes the proof practically meaningless. We can think of the GSD game as building a graph on n vertices, where we add an edge i → j when the adversary asks for an encryption of kj under ki. If restricted to graphs of depth ℓ, Panjwani gave a reduction that loses only a factor exponential in ℓ (not n). To date, this is the only non-trivial result known for GSD. In this paper we give almost-polynomial reductions for large classes of graphs. Most importantly, we prove the security of the GSD game restricted to trees losing only a quasi-polynomial factor n3 log n+5. Trees are an important special case capturing real-world protocols like the LKH protocol. Our new bound improves upon Panjwani’s on some LKH variants proposed in the literature where the underlying tree is not balanced. Our proof builds on ideas from the “nested hybrids” technique recently introduced by Fuchsbauer et al. [Asiacrypt’14] for proving the adaptive security of constrained PRFs.}, author = {Fuchsbauer, Georg and Jafargholi, Zahra and Pietrzak, Krzysztof Z}, location = {Santa Barbara, CA, USA}, pages = {601 -- 620}, publisher = {Springer}, title = {{A quasipolynomial reduction for generalized selective decryption on trees}}, doi = {10.1007/978-3-662-47989-6_29}, volume = {9215}, year = {2015}, } @inproceedings{1649, abstract = {We extend a commitment scheme based on the learning with errors over rings (RLWE) problem, and present efficient companion zeroknowledge proofs of knowledge. Our scheme maps elements from the ring (or equivalently, n elements from }, author = {Benhamouda, Fabrice and Krenn, Stephan and Lyubashevsky, Vadim and Pietrzak, Krzysztof Z}, location = {Vienna, Austria}, pages = {305 -- 325}, publisher = {Springer}, title = {{Efficient zero-knowledge proofs for commitments from learning with errors over rings}}, doi = {10.1007/978-3-319-24174-6_16}, volume = {9326}, year = {2015}, } @inproceedings{1644, abstract = {Increasing the computational complexity of evaluating a hash function, both for the honest users as well as for an adversary, is a useful technique employed for example in password-based cryptographic schemes to impede brute-force attacks, and also in so-called proofs of work (used in protocols like Bitcoin) to show that a certain amount of computation was performed by a legitimate user. A natural approach to adjust the complexity of a hash function is to iterate it c times, for some parameter c, in the hope that any query to the scheme requires c evaluations of the underlying hash function. However, results by Dodis et al. (Crypto 2012) imply that plain iteration falls short of achieving this goal, and designing schemes which provably have such a desirable property remained an open problem. This paper formalizes explicitly what it means for a given scheme to amplify the query complexity of a hash function. In the random oracle model, the goal of a secure query-complexity amplifier (QCA) scheme is captured as transforming, in the sense of indifferentiability, a random oracle allowing R queries (for the adversary) into one provably allowing only r < R queries. Turned around, this means that making r queries to the scheme requires at least R queries to the actual random oracle. Second, a new scheme, called collision-free iteration, is proposed and proven to achieve c-fold QCA for both the honest parties and the adversary, for any fixed parameter c.}, author = {Demay, Grégory and Gazi, Peter and Maurer, Ueli and Tackmann, Björn}, location = {Lugano, Switzerland}, pages = {159 -- 180}, publisher = {Springer}, title = {{Query-complexity amplification for random oracles}}, doi = {10.1007/978-3-319-17470-9_10}, volume = {9063}, year = {2015}, } @inproceedings{1647, abstract = {Round-optimal blind signatures are notoriously hard to construct in the standard model, especially in the malicious-signer model, where blindness must hold under adversarially chosen keys. This is substantiated by several impossibility results. The only construction that can be termed theoretically efficient, by Garg and Gupta (Eurocrypt’14), requires complexity leveraging, inducing an exponential security loss. We present a construction of practically efficient round-optimal blind signatures in the standard model. It is conceptually simple and builds on the recent structure-preserving signatures on equivalence classes (SPSEQ) from Asiacrypt’14. While the traditional notion of blindness follows from standard assumptions, we prove blindness under adversarially chosen keys under an interactive variant of DDH. However, we neither require non-uniform assumptions nor complexity leveraging. We then show how to extend our construction to partially blind signatures and to blind signatures on message vectors, which yield a construction of one-show anonymous credentials à la “anonymous credentials light” (CCS’13) in the standard model. Furthermore, we give the first SPS-EQ construction under noninteractive assumptions and show how SPS-EQ schemes imply conventional structure-preserving signatures, which allows us to apply optimality results for the latter to SPS-EQ.}, author = {Fuchsbauer, Georg and Hanser, Christian and Slamanig, Daniel}, location = {Santa Barbara, CA, United States}, pages = {233 -- 253}, publisher = {Springer}, title = {{Practical round-optimal blind signatures in the standard model}}, doi = {10.1007/978-3-662-48000-7_12}, volume = {9216}, year = {2015}, } @inproceedings{1645, abstract = {Secret-key constructions are often proved secure in a model where one or more underlying components are replaced by an idealized oracle accessible to the attacker. This model gives rise to information-theoretic security analyses, and several advances have been made in this area over the last few years. This paper provides a systematic overview of what is achievable in this model, and how existing works fit into this view.}, author = {Gazi, Peter and Tessaro, Stefano}, booktitle = {2015 IEEE Information Theory Workshop}, location = {Jerusalem, Israel}, publisher = {IEEE}, title = {{Secret-key cryptography from ideal primitives: A systematic verview}}, doi = {10.1109/ITW.2015.7133163}, year = {2015}, } @inproceedings{1654, abstract = {HMAC and its variant NMAC are the most popular approaches to deriving a MAC (and more generally, a PRF) from a cryptographic hash function. Despite nearly two decades of research, their exact security still remains far from understood in many different contexts. Indeed, recent works have re-surfaced interest for {\em generic} attacks, i.e., attacks that treat the compression function of the underlying hash function as a black box. Generic security can be proved in a model where the underlying compression function is modeled as a random function -- yet, to date, the question of proving tight, non-trivial bounds on the generic security of HMAC/NMAC even as a PRF remains a challenging open question. In this paper, we ask the question of whether a small modification to HMAC and NMAC can allow us to exactly characterize the security of the resulting constructions, while only incurring little penalty with respect to efficiency. To this end, we present simple variants of NMAC and HMAC, for which we prove tight bounds on the generic PRF security, expressed in terms of numbers of construction and compression function queries necessary to break the construction. All of our constructions are obtained via a (near) {\em black-box} modification of NMAC and HMAC, which can be interpreted as an initial step of key-dependent message pre-processing. While our focus is on PRF security, a further attractive feature of our new constructions is that they clearly defeat all recent generic attacks against properties such as state recovery and universal forgery. These exploit properties of the so-called ``functional graph'' which are not directly accessible in our new constructions. }, author = {Gazi, Peter and Pietrzak, Krzysztof Z and Tessaro, Stefano}, location = {Auckland, New Zealand}, pages = {85 -- 109}, publisher = {Springer}, title = {{Generic security of NMAC and HMAC with input whitening}}, doi = {10.1007/978-3-662-48800-3_4}, volume = {9453}, year = {2015}, } @inproceedings{1650, abstract = {We consider the task of deriving a key with high HILL entropy (i.e., being computationally indistinguishable from a key with high min-entropy) from an unpredictable source. Previous to this work, the only known way to transform unpredictability into a key that was ϵ indistinguishable from having min-entropy was via pseudorandomness, for example by Goldreich-Levin (GL) hardcore bits. This approach has the inherent limitation that from a source with k bits of unpredictability entropy one can derive a key of length (and thus HILL entropy) at most k−2log(1/ϵ) bits. In many settings, e.g. when dealing with biometric data, such a 2log(1/ϵ) bit entropy loss in not an option. Our main technical contribution is a theorem that states that in the high entropy regime, unpredictability implies HILL entropy. Concretely, any variable K with |K|−d bits of unpredictability entropy has the same amount of so called metric entropy (against real-valued, deterministic distinguishers), which is known to imply the same amount of HILL entropy. The loss in circuit size in this argument is exponential in the entropy gap d, and thus this result only applies for small d (i.e., where the size of distinguishers considered is exponential in d). To overcome the above restriction, we investigate if it’s possible to first “condense” unpredictability entropy and make the entropy gap small. We show that any source with k bits of unpredictability can be condensed into a source of length k with k−3 bits of unpredictability entropy. Our condenser simply “abuses" the GL construction and derives a k bit key from a source with k bits of unpredicatibily. The original GL theorem implies nothing when extracting that many bits, but we show that in this regime, GL still behaves like a “condenser" for unpredictability. This result comes with two caveats (1) the loss in circuit size is exponential in k and (2) we require that the source we start with has no HILL entropy (equivalently, one can efficiently check if a guess is correct). We leave it as an intriguing open problem to overcome these restrictions or to prove they’re inherent.}, author = {Skórski, Maciej and Golovnev, Alexander and Pietrzak, Krzysztof Z}, location = {Kyoto, Japan}, pages = {1046 -- 1057}, publisher = {Springer}, title = {{Condensed unpredictability }}, doi = {10.1007/978-3-662-47672-7_85}, volume = {9134}, year = {2015}, } @inproceedings{1651, abstract = {Cryptographic e-cash allows off-line electronic transactions between a bank, users and merchants in a secure and anonymous fashion. A plethora of e-cash constructions has been proposed in the literature; however, these traditional e-cash schemes only allow coins to be transferred once between users and merchants. Ideally, we would like users to be able to transfer coins between each other multiple times before deposit, as happens with physical cash. “Transferable” e-cash schemes are the solution to this problem. Unfortunately, the currently proposed schemes are either completely impractical or do not achieve the desirable anonymity properties without compromises, such as assuming the existence of a trusted “judge” who can trace all coins and users in the system. This paper presents the first efficient and fully anonymous transferable e-cash scheme without any trusted third parties. We start by revising the security and anonymity properties of transferable e-cash to capture issues that were previously overlooked. For our construction we use the recently proposed malleable signatures by Chase et al. to allow the secure and anonymous transfer of coins, combined with a new efficient double-spending detection mechanism. Finally, we discuss an instantiation of our construction.}, author = {Baldimtsi, Foteini and Chase, Melissa and Fuchsbauer, Georg and Kohlweiss, Markulf}, booktitle = {Public-Key Cryptography - PKC 2015}, isbn = {978-3-662-46446-5}, location = {Gaithersburg, MD, United States}, pages = {101 -- 124}, publisher = {Springer}, title = {{Anonymous transferable e-cash}}, doi = {10.1007/978-3-662-46447-2_5}, volume = {9020}, year = {2015}, } @inproceedings{1652, abstract = {We develop new theoretical tools for proving lower-bounds on the (amortized) complexity of certain functions in models of parallel computation. We apply the tools to construct a class of functions with high amortized memory complexity in the parallel Random Oracle Model (pROM); a variant of the standard ROM allowing for batches of simultaneous queries. In particular we obtain a new, more robust, type of Memory-Hard Functions (MHF); a security primitive which has recently been gaining acceptance in practice as an effective means of countering brute-force attacks on security relevant functions. Along the way we also demonstrate an important shortcoming of previous definitions of MHFs and give a new definition addressing the problem. The tools we develop represent an adaptation of the powerful pebbling paradigm (initially introduced by Hewitt and Paterson [HP70] and Cook [Coo73]) to a simple and intuitive parallel setting. We define a simple pebbling game Gp over graphs which aims to abstract parallel computation in an intuitive way. As a conceptual contribution we define a measure of pebbling complexity for graphs called cumulative complexity (CC) and show how it overcomes a crucial shortcoming (in the parallel setting) exhibited by more traditional complexity measures used in the past. As a main technical contribution we give an explicit construction of a constant in-degree family of graphs whose CC in Gp approaches maximality to within a polylogarithmic factor for any graph of equal size (analogous to the graphs of Tarjan et. al. [PTC76, LT82] for sequential pebbling games). Finally, for a given graph G and related function fG, we derive a lower-bound on the amortized memory complexity of fG in the pROM in terms of the CC of G in the game Gp.}, author = {Alwen, Joel F and Serbinenko, Vladimir}, booktitle = {Proceedings of the 47th annual ACM symposium on Theory of computing}, location = {Portland, OR, United States}, pages = {595 -- 603}, publisher = {ACM}, title = {{High parallel complexity graphs and memory-hard functions}}, doi = {10.1145/2746539.2746622}, year = {2015}, } @inproceedings{1658, abstract = {Continuous-time Markov chain (CTMC) models have become a central tool for understanding the dynamics of complex reaction networks and the importance of stochasticity in the underlying biochemical processes. When such models are employed to answer questions in applications, in order to ensure that the model provides a sufficiently accurate representation of the real system, it is of vital importance that the model parameters are inferred from real measured data. This, however, is often a formidable task and all of the existing methods fail in one case or the other, usually because the underlying CTMC model is high-dimensional and computationally difficult to analyze. The parameter inference methods that tend to scale best in the dimension of the CTMC are based on so-called moment closure approximations. However, there exists a large number of different moment closure approximations and it is typically hard to say a priori which of the approximations is the most suitable for the inference procedure. Here, we propose a moment-based parameter inference method that automatically chooses the most appropriate moment closure method. Accordingly, contrary to existing methods, the user is not required to be experienced in moment closure techniques. In addition to that, our method adaptively changes the approximation during the parameter inference to ensure that always the best approximation is used, even in cases where different approximations are best in different regions of the parameter space.}, author = {Bogomolov, Sergiy and Henzinger, Thomas A and Podelski, Andreas and Ruess, Jakob and Schilling, Christian}, location = {Nantes, France}, pages = {77 -- 89}, publisher = {Springer}, title = {{Adaptive moment closure for parameter inference of biochemical reaction networks}}, doi = {10.1007/978-3-319-23401-4_8}, volume = {9308}, year = {2015}, } @inproceedings{1660, abstract = {We study the pattern frequency vector for runs in probabilistic Vector Addition Systems with States (pVASS). Intuitively, each configuration of a given pVASS is assigned one of finitely many patterns, and every run can thus be seen as an infinite sequence of these patterns. The pattern frequency vector assigns to each run the limit of pattern frequencies computed for longer and longer prefixes of the run. If the limit does not exist, then the vector is undefined. We show that for one-counter pVASS, the pattern frequency vector is defined and takes one of finitely many values for almost all runs. Further, these values and their associated probabilities can be approximated up to an arbitrarily small relative error in polynomial time. For stable two-counter pVASS, we show the same result, but we do not provide any upper complexity bound. As a byproduct of our study, we discover counterexamples falsifying some classical results about stochastic Petri nets published in the 80s.}, author = {Brázdil, Tomáš and Kiefer, Stefan and Kučera, Antonín and Novotny, Petr}, location = {Kyoto, Japan}, pages = {44 -- 55}, publisher = {IEEE}, title = {{Long-run average behaviour of probabilistic vector addition systems}}, doi = {10.1109/LICS.2015.15}, year = {2015}, } @article{1665, abstract = {Which genetic alterations drive tumorigenesis and how they evolve over the course of disease and therapy are central questions in cancer biology. Here we identify 44 recurrently mutated genes and 11 recurrent somatic copy number variations through whole-exome sequencing of 538 chronic lymphocytic leukaemia (CLL) and matched germline DNA samples, 278 of which were collected in a prospective clinical trial. These include previously unrecognized putative cancer drivers (RPS15, IKZF3), and collectively identify RNA processing and export, MYC activity, and MAPK signalling as central pathways involved in CLL. Clonality analysis of this large data set further enabled reconstruction of temporal relationships between driver events. Direct comparison between matched pre-treatment and relapse samples from 59 patients demonstrated highly frequent clonal evolution. Thus, large sequencing data sets of clinically informative samples enable the discovery of novel genes associated with cancer, the network of relationships between the driver events, and their impact on disease relapse and clinical outcome.}, author = {Landau, Dan and Tausch, Eugen and Taylor Weiner, Amaro and Stewart, Chip and Reiter, Johannes and Bahlo, Jasmin and Kluth, Sandra and Božić, Ivana and Lawrence, Michael and Böttcher, Sebastian and Carter, Scott and Cibulskis, Kristian and Mertens, Daniel and Sougnez, Carrie and Rosenberg, Mara and Hess, Julian and Edelmann, Jennifer and Kless, Sabrina and Kneba, Michael and Ritgen, Matthias and Fink, Anna and Fischer, Kirsten and Gabriel, Stacey and Lander, Eric and Nowak, Martin and Döhner, Hartmut and Hallek, Michael and Neuberg, Donna and Getz, Gad and Stilgenbauer, Stephan and Wu, Catherine}, journal = {Nature}, number = {7574}, pages = {525 -- 530}, publisher = {Nature Publishing Group}, title = {{Mutations driving CLL and their evolution in progression and relapse}}, doi = {10.1038/nature15395}, volume = {526}, year = {2015}, } @article{1663, abstract = {CREB-binding protein (CBP) and p300 are transcriptional coactivators involved in numerous biological processes that affect cell growth, transformation, differentiation, and development. In this study, we provide evidence of the involvement of homeodomain-interacting protein kinase 2 (HIPK2) in the regulation of CBP activity. We show that HIPK2 interacts with and phosphorylates several regions of CBP. We demonstrate that serines 2361, 2363, 2371, 2376, and 2381 are responsible for the HIPK2-induced mobility shift of CBP C-terminal activation domain. Moreover, we show that HIPK2 strongly potentiates the transcriptional activity of CBP. However, our data suggest that HIPK2 activates CBP mainly by counteracting the repressive action of cell cycle regulatory domain 1 (CRD1), located between amino acids 977 and 1076, independently of CBP phosphorylation. Our findings thus highlight a complex regulation of CBP activity by HIPK2, which might be relevant for the control of specific sets of target genes involved in cellular proliferation, differentiation and apoptosis.}, author = {Kovács, Krisztián and Steinmann, Myriam and Halfon, Olivier and Magistretti, Pierre and Cardinaux, Jean}, journal = {Cellular Signalling}, number = {11}, pages = {2252 -- 2260}, publisher = {Elsevier}, title = {{Complex regulation of CREB-binding protein by homeodomain-interacting protein kinase 2}}, doi = {10.1016/j.cellsig.2015.08.001}, volume = {27}, year = {2015}, } @inproceedings{1667, abstract = {We consider parametric version of fixed-delay continuoustime Markov chains (or equivalently deterministic and stochastic Petri nets, DSPN) where fixed-delay transitions are specified by parameters, rather than concrete values. Our goal is to synthesize values of these parameters that, for a given cost function, minimise expected total cost incurred before reaching a given set of target states. We show that under mild assumptions, optimal values of parameters can be effectively approximated using translation to a Markov decision process (MDP) whose actions correspond to discretized values of these parameters. To this end we identify and overcome several interesting phenomena arising in systems with fixed delays.}, author = {Brázdil, Tomáš and Korenčiak, L'Uboš and Krčál, Jan and Novotny, Petr and Řehák, Vojtěch}, location = {Madrid, Spain}, pages = {141 -- 159}, publisher = {Springer}, title = {{Optimizing performance of continuous-time stochastic systems using timeout synthesis}}, doi = {10.1007/978-3-319-22264-6_10}, volume = {9259}, year = {2015}, } @article{1664, abstract = {Over a century of research into the origin of turbulence in wall-bounded shear flows has resulted in a puzzling picture in which turbulence appears in a variety of different states competing with laminar background flow. At moderate flow speeds, turbulence is confined to localized patches; it is only at higher speeds that the entire flow becomes turbulent. The origin of the different states encountered during this transition, the front dynamics of the turbulent regions and the transformation to full turbulence have yet to be explained. By combining experiments, theory and computer simulations, here we uncover a bifurcation scenario that explains the transformation to fully turbulent pipe flow and describe the front dynamics of the different states encountered in the process. Key to resolving this problem is the interpretation of the flow as a bistable system with nonlinear propagation (advection) of turbulent fronts. These findings bridge the gap between our understanding of the onset of turbulence and fully turbulent flows.}, author = {Barkley, Dwight and Song, Baofang and Vasudevan, Mukund and Lemoult, Grégoire M and Avila, Marc and Hof, Björn}, journal = {Nature}, number = {7574}, pages = {550 -- 553}, publisher = {Nature Publishing Group}, title = {{The rise of fully turbulent flow}}, doi = {10.1038/nature15701}, volume = {526}, year = {2015}, } @inproceedings{1672, abstract = {Composable notions of incoercibility aim to forbid a coercer from using anything beyond the coerced parties’ inputs and outputs to catch them when they try to deceive him. Existing definitions are restricted to weak coercion types, and/or are not universally composable. Furthermore, they often make too strong assumptions on the knowledge of coerced parties—e.g., they assume they known the identities and/or the strategies of other coerced parties, or those of corrupted parties— which makes them unsuitable for applications of incoercibility such as e-voting, where colluding adversarial parties may attempt to coerce honest voters, e.g., by offering them money for a promised vote, and use their own view to check that the voter keeps his end of the bargain. In this work we put forward the first universally composable notion of incoercible multi-party computation, which satisfies the above intuition and does not assume collusions among coerced parties or knowledge of the corrupted set. We define natural notions of UC incoercibility corresponding to standard coercion-types, i.e., receipt-freeness and resistance to full-active coercion. Importantly, our suggested notion has the unique property that it builds on top of the well studied UC framework by Canetti instead of modifying it. This guarantees backwards compatibility, and allows us to inherit results from the rich UC literature. We then present MPC protocols which realize our notions of UC incoercibility given access to an arguably minimal setup—namely honestly generate tamper-proof hardware performing a very simple cryptographic operation—e.g., a smart card. This is, to our knowledge, the first proposed construction of an MPC protocol (for more than two parties) that is incoercibly secure and universally composable, and therefore the first construction of a universally composable receipt-free e-voting protocol.}, author = {Alwen, Joel F and Ostrovsky, Rafail and Zhou, Hongsheng and Zikas, Vassilis}, booktitle = {Advances in Cryptology - CRYPTO 2015}, isbn = {978-3-662-47999-5}, location = {Santa Barbara, CA, United States}, pages = {763 -- 780}, publisher = {Springer}, title = {{Incoercible multi-party computation and universally composable receipt-free voting}}, doi = {10.1007/978-3-662-48000-7_37}, volume = {9216}, year = {2015}, } @inproceedings{1669, abstract = {Computational notions of entropy (a.k.a. pseudoentropy) have found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The most important tools to argue about pseudoentropy are chain rules, which quantify by how much (in terms of quantity and quality) the pseudoentropy of a given random variable X decreases when conditioned on some other variable Z (think for example of X as a secret key and Z as information leaked by a side-channel). In this paper we give a very simple and modular proof of the chain rule for HILL pseudoentropy, improving best known parameters. Our version allows for increasing the acceptable length of leakage in applications up to a constant factor compared to the best previous bounds. As a contribution of independent interest, we provide a comprehensive study of all known versions of the chain rule, comparing their worst-case strength and limitations.}, author = {Pietrzak, Krzysztof Z and Skórski, Maciej}, location = {Guadalajara, Mexico}, pages = {81 -- 98}, publisher = {Springer}, title = {{The chain rule for HILL pseudoentropy, revisited}}, doi = {10.1007/978-3-319-22174-8_5}, volume = {9230}, year = {2015}, } @inproceedings{1671, abstract = {This paper studies the concrete security of PRFs and MACs obtained by keying hash functions based on the sponge paradigm. One such hash function is KECCAK, selected as NIST’s new SHA-3 standard. In contrast to other approaches like HMAC, the exact security of keyed sponges is not well understood. Indeed, recent security analyses delivered concrete security bounds which are far from existing attacks. This paper aims to close this gap. We prove (nearly) exact bounds on the concrete PRF security of keyed sponges using a random permutation. These bounds are tight for the most relevant ranges of parameters, i.e., for messages of length (roughly) l ≤ min{2n/4, 2r} blocks, where n is the state size and r is the desired output length; and for l ≤ q queries (to the construction or the underlying permutation). Moreover, we also improve standard-model bounds. As an intermediate step of independent interest, we prove tight bounds on the PRF security of the truncated CBC-MAC construction, which operates as plain CBC-MAC, but only returns a prefix of the output.}, author = {Gazi, Peter and Pietrzak, Krzysztof Z and Tessaro, Stefano}, location = {Santa Barbara, CA, United States}, pages = {368 -- 387}, publisher = {Springer}, title = {{The exact PRF security of truncation: Tight bounds for keyed sponges and truncated CBC}}, doi = {10.1007/978-3-662-47989-6_18}, volume = {9215}, year = {2015}, } @article{1673, abstract = {When a new mutant arises in a population, there is a probability it outcompetes the residents and fixes. The structure of the population can affect this fixation probability. Suppressing population structures reduce the difference between two competing variants, while amplifying population structures enhance the difference. Suppressors are ubiquitous and easy to construct, but amplifiers for the large population limit are more elusive and only a few examples have been discovered. Whether or not a population structure is an amplifier of selection depends on the probability distribution for the placement of the invading mutant. First, we prove that there exist only bounded amplifiers for adversarial placement-that is, for arbitrary initial conditions. Next, we show that the Star population structure, which is known to amplify for mutants placed uniformly at random, does not amplify for mutants that arise through reproduction and are therefore placed proportional to the temperatures of the vertices. Finally, we construct population structures that amplify for all mutational events that arise through reproduction, uniformly at random, or through some combination of the two. }, author = {Adlam, Ben and Chatterjee, Krishnendu and Nowak, Martin}, journal = {Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences}, number = {2181}, publisher = {Royal Society of London}, title = {{Amplifiers of selection}}, doi = {10.1098/rspa.2015.0114}, volume = {471}, year = {2015}, } @inproceedings{1668, abstract = {We revisit the security (as a pseudorandom permutation) of cascading-based constructions for block-cipher key-length extension. Previous works typically considered the extreme case where the adversary is given the entire codebook of the construction, the only complexity measure being the number qe of queries to the underlying ideal block cipher, representing adversary’s secret-key-independent computation. Here, we initiate a systematic study of the more natural case of an adversary restricted to adaptively learning a number qc of plaintext/ciphertext pairs that is less than the entire codebook. For any such qc, we aim to determine the highest number of block-cipher queries qe the adversary can issue without being able to successfully distinguish the construction (under a secret key) from a random permutation. More concretely, we show the following results for key-length extension schemes using a block cipher with n-bit blocks and κ-bit keys: Plain cascades of length ℓ=2r+1 are secure whenever qcqre≪2r(κ+n), qc≪2κ and qe≪22κ. The bound for r=1 also applies to two-key triple encryption (as used within Triple DES). The r-round XOR-cascade is secure as long as qcqre≪2r(κ+n), matching an attack by Gaži (CRYPTO 2013). We fully characterize the security of Gaži and Tessaro’s two-call }, author = {Gazi, Peter and Lee, Jooyoung and Seurin, Yannick and Steinberger, John and Tessaro, Stefano}, location = {Istanbul, Turkey}, pages = {319 -- 341}, publisher = {Springer}, title = {{Relaxing full-codebook security: A refined analysis of key-length extension schemes}}, doi = {10.1007/978-3-662-48116-5_16}, volume = {9054}, year = {2015}, } @inproceedings{1670, abstract = {Planning in hybrid domains poses a special challenge due to the involved mixed discrete-continuous dynamics. A recent solving approach for such domains is based on applying model checking techniques on a translation of PDDL+ planning problems to hybrid automata. However, the proposed translation is limited because must behavior is only overapproximated, and hence, processes and events are not reflected exactly. In this paper, we present the theoretical foundation of an exact PDDL+ translation. We propose a schema to convert a hybrid automaton with must transitions into an equivalent hybrid automaton featuring only may transitions.}, author = {Bogomolov, Sergiy and Magazzeni, Daniele and Minopoli, Stefano and Wehrle, Martin}, location = {Jerusalem, Israel}, pages = {42 -- 46}, publisher = {AAAI Press}, title = {{PDDL+ planning with hybrid automata: Foundations of translating must behavior}}, year = {2015}, } @article{1674, abstract = {We consider N × N random matrices of the form H = W + V where W is a real symmetric Wigner matrix and V a random or deterministic, real, diagonal matrix whose entries are independent of W. We assume subexponential decay for the matrix entries of W and we choose V so that the eigenvalues of W and V are typically of the same order. For a large class of diagonal matrices V, we show that the rescaled distribution of the extremal eigenvalues is given by the Tracy-Widom distribution F1 in the limit of large N. Our proofs also apply to the complex Hermitian setting, i.e. when W is a complex Hermitian Wigner matrix.}, author = {Lee, Jioon and Schnelli, Kevin}, journal = {Reviews in Mathematical Physics}, number = {8}, publisher = {World Scientific Publishing}, title = {{Edge universality for deformed Wigner matrices}}, doi = {10.1142/S0129055X1550018X}, volume = {27}, year = {2015}, } @article{1679, author = {Lemoult, Grégoire M and Maier, Philipp and Hof, Björn}, journal = {Physics of Fluids}, number = {9}, publisher = {American Institute of Physics}, title = {{Taylor's Forest}}, doi = {10.1063/1.4930850}, volume = {27}, year = {2015}, } @article{1676, author = {Sixt, Michael K and Raz, Erez}, journal = {Current Opinion in Cell Biology}, number = {10}, pages = {4 -- 6}, publisher = {Elsevier}, title = {{Editorial overview: Cell adhesion and migration}}, doi = {10.1016/j.ceb.2015.09.004}, volume = {36}, year = {2015}, } @article{1687, abstract = {Guided cell movement is essential for development and integrity of animals and crucially involved in cellular immune responses. Leukocytes are professional migratory cells that can navigate through most types of tissues and sense a wide range of directional cues. The responses of these cells to attractants have been mainly explored in tissue culture settings. How leukocytes make directional decisions in situ, within the challenging environment of a tissue maze, is less understood. Here we review recent advances in how leukocytes sense chemical cues in complex tissue settings and make links with paradigms of directed migration in development and Dictyostelium discoideum amoebae.}, author = {Sarris, Milka and Sixt, Michael K}, journal = {Current Opinion in Cell Biology}, number = {10}, pages = {93 -- 102}, publisher = {Elsevier}, title = {{Navigating in tissue mazes: Chemoattractant interpretation in complex environments}}, doi = {10.1016/j.ceb.2015.08.001}, volume = {36}, year = {2015}, } @inproceedings{1685, abstract = {Given a graph G cellularly embedded on a surface Σ of genus g, a cut graph is a subgraph of G such that cutting Σ along G yields a topological disk. We provide a fixed parameter tractable approximation scheme for the problem of computing the shortest cut graph, that is, for any ε > 0, we show how to compute a (1 + ε) approximation of the shortest cut graph in time f(ε, g)n3. Our techniques first rely on the computation of a spanner for the problem using the technique of brick decompositions, to reduce the problem to the case of bounded tree-width. Then, to solve the bounded tree-width case, we introduce a variant of the surface-cut decomposition of Rué, Sau and Thilikos, which may be of independent interest.}, author = {Cohen Addad, Vincent and De Mesmay, Arnaud N}, location = {Patras, Greece}, pages = {386 -- 398}, publisher = {Springer}, title = {{A fixed parameter tractable approximation scheme for the optimal cut graph of a surface}}, doi = {10.1007/978-3-662-48350-3_33}, volume = {9294}, year = {2015}, } @article{1688, abstract = {We estimate the selection constant in the following geometric selection theorem by Pach: For every positive integer d, there is a constant (Formula presented.) such that whenever (Formula presented.) are n-element subsets of (Formula presented.), we can find a point (Formula presented.) and subsets (Formula presented.) for every i∈[d+1], each of size at least cdn, such that p belongs to all rainbowd-simplices determined by (Formula presented.) simplices with one vertex in each Yi. We show a super-exponentially decreasing upper bound (Formula presented.). The ideas used in the proof of the upper bound also help us to prove Pach’s theorem with (Formula presented.), which is a lower bound doubly exponentially decreasing in d (up to some polynomial in the exponent). For comparison, Pach’s original approach yields a triply exponentially decreasing lower bound. On the other hand, Fox, Pach, and Suk recently obtained a hypergraph density result implying a proof of Pach’s theorem with (Formula presented.). In our construction for the upper bound, we use the fact that the minimum solid angle of every d-simplex is super-exponentially small. This fact was previously unknown and might be of independent interest. For the lower bound, we improve the ‘separation’ part of the argument by showing that in one of the key steps only d+1 separations are necessary, compared to 2d separations in the original proof. We also provide a measure version of Pach’s theorem.}, author = {Karasev, Roman and Kynčl, Jan and Paták, Pavel and Patakova, Zuzana and Tancer, Martin}, journal = {Discrete & Computational Geometry}, number = {3}, pages = {610 -- 636}, publisher = {Springer}, title = {{Bounds for Pach's selection theorem and for the minimum solid angle in a simplex}}, doi = {10.1007/s00454-015-9720-z}, volume = {54}, year = {2015}, } @article{1680, abstract = {We consider the satisfiability problem for modal logic over first-order definable classes of frames.We confirm the conjecture from Hemaspaandra and Schnoor [2008] that modal logic is decidable over classes definable by universal Horn formulae. We provide a full classification of Horn formulae with respect to the complexity of the corresponding satisfiability problem. It turns out, that except for the trivial case of inconsistent formulae, local satisfiability is eitherNP-complete or PSPACE-complete, and global satisfiability is NP-complete, PSPACE-complete, or ExpTime-complete. We also show that the finite satisfiability problem for modal logic over Horn definable classes of frames is decidable. On the negative side, we show undecidability of two related problems. First, we exhibit a simple universal three-variable formula defining the class of frames over which modal logic is undecidable. Second, we consider the satisfiability problem of bimodal logic over Horn definable classes of frames, and also present a formula leading to undecidability.}, author = {Michaliszyn, Jakub and Otop, Jan and Kieroňski, Emanuel}, journal = {ACM Transactions on Computational Logic}, number = {1}, publisher = {ACM}, title = {{On the decidability of elementary modal logics}}, doi = {10.1145/2817825}, volume = {17}, year = {2015}, } @article{1682, abstract = {We study the problem of robust satisfiability of systems of nonlinear equations, namely, whether for a given continuous function f:K→ ℝn on a finite simplicial complex K and α > 0, it holds that each function g: K → ℝn such that ||g - f || ∞ < α, has a root in K. Via a reduction to the extension problem of maps into a sphere, we particularly show that this problem is decidable in polynomial time for every fixed n, assuming dimK ≤ 2n - 3. This is a substantial extension of previous computational applications of topological degree and related concepts in numerical and interval analysis. Via a reverse reduction, we prove that the problem is undecidable when dim K > 2n - 2, where the threshold comes from the stable range in homotopy theory. For the lucidity of our exposition, we focus on the setting when f is simplexwise linear. Such functions can approximate general continuous functions, and thus we get approximation schemes and undecidability of the robust satisfiability in other possible settings.}, author = {Franek, Peter and Krcál, Marek}, journal = {Journal of the ACM}, number = {4}, publisher = {ACM}, title = {{Robust satisfiability of systems of equations}}, doi = {10.1145/2751524}, volume = {62}, year = {2015}, } @article{1683, abstract = {The 1 MDa, 45-subunit proton-pumping NADH-ubiquinone oxidoreductase (complex I) is the largest complex of the mitochondrial electron transport chain. The molecular mechanism of complex I is central to the metabolism of cells, but has yet to be fully characterized. The last two years have seen steady progress towards this goal with the first atomic-resolution structure of the entire bacterial complex I, a 5 Å cryo-electron microscopy map of bovine mitochondrial complex I and a ∼3.8 Å resolution X-ray crystallographic study of mitochondrial complex I from yeast Yarrowia lipotytica. In this review we will discuss what we have learned from these studies and what remains to be elucidated.}, author = {Letts, Jame A and Sazanov, Leonid A}, journal = {Current Opinion in Structural Biology}, number = {8}, pages = {135 -- 145}, publisher = {Elsevier}, title = {{Gaining mass: The structure of respiratory complex I-from bacterial towards mitochondrial versions}}, doi = {10.1016/j.sbi.2015.08.008}, volume = {33}, year = {2015}, } @article{1686, author = {Kiermaier, Eva and Sixt, Michael K}, journal = {Science}, number = {6252}, pages = {1055 -- 1056}, publisher = {American Association for the Advancement of Science}, title = {{Fragmented communication between immune cells: Neutrophils blaze a trail with migratory cues for T cells to follow to sites of infection}}, doi = {10.1126/science.aad0867}, volume = {349}, year = {2015}, } @inproceedings{1692, abstract = {Computing an approximation of the reachable states of a hybrid system is a challenge, mainly because overapproximating the solutions of ODEs with a finite number of sets does not scale well. Using template polyhedra can greatly reduce the computational complexity, since it replaces complex operations on sets with a small number of optimization problems. However, the use of templates may make the over-approximation too conservative. Spurious transitions, which are falsely considered reachable, are particularly detrimental to performance and accuracy, and may exacerbate the state explosion problem. In this paper, we examine how spurious transitions can be avoided with minimal computational effort. To this end, detecting spurious transitions is reduced to the well-known problem of showing that two convex sets are disjoint by finding a hyperplane that separates them. We generalize this to owpipes by considering hyperplanes that evolve with time in correspondence to the dynamics of the system. The approach is implemented in the model checker SpaceEx and demonstrated on examples.}, author = {Frehse, Goran and Bogomolov, Sergiy and Greitschus, Marius and Strump, Thomas and Podelski, Andreas}, booktitle = {Proceedings of the 18th International Conference on Hybrid Systems: Computation and Control}, isbn = {978-1-4503-3433-4}, location = {Seattle, WA, United States}, pages = {149 -- 158}, publisher = {ACM}, title = {{Eliminating spurious transitions in reachability with support functions}}, doi = {10.1145/2728606.2728622}, year = {2015}, } @article{1693, abstract = {Quantum interference between energetically close states is theoretically investigated, with the state structure being observed via laser spectroscopy. In this work, we focus on hyperfine states of selected hydrogenic muonic isotopes, and on how quantum interference affects the measured Lamb shift. The process of photon excitation and subsequent photon decay is implemented within the framework of nonrelativistic second-order perturbation theory. Due to its experimental interest, calculations are performed for muonic hydrogen, deuterium, and helium-3. We restrict our analysis to the case of photon scattering by incident linear polarized photons and the polarization of the scattered photons not being observed. We conclude that while quantum interference effects can be safely neglected in muonic hydrogen and helium-3, in the case of muonic deuterium there are resonances with close proximity, where quantum interference effects can induce shifts up to a few percent of the linewidth, assuming a pointlike detector. However, by taking into account the geometry of the setup used by the CREMA collaboration, this effect is reduced to less than 0.2% of the linewidth in all possible cases, which makes it irrelevant at the present level of accuracy. © 2015 American Physical Society.}, author = {Amaro, Pedro and Franke, Beatrice and Krauth, Julian and Diepold, Marc and Fratini, Filippo and Safari, Laleh and Machado, Jorge and Antognini, Aldo and Kottmann, Franz and Indelicato, Paul and Pohl, Randolf and Santos, José}, journal = {Physical Review A}, number = {2}, publisher = {American Physical Society}, title = {{Quantum interference effects in laser spectroscopy of muonic hydrogen, deuterium, and helium-3}}, doi = {10.1103/PhysRevA.92.022514}, volume = {92}, year = {2015}, } @inproceedings{1690, abstract = {A number of powerful and scalable hybrid systems model checkers have recently emerged. Although all of them honor roughly the same hybrid systems semantics, they have drastically different model description languages. This situation (a) makes it difficult to quickly evaluate a specific hybrid automaton model using the different tools, (b) obstructs comparisons of reachability approaches, and (c) impedes the widespread application of research results that perform model modification and could benefit many of the tools. In this paper, we present Hyst, a Hybrid Source Transformer. Hyst is a source-to-source translation tool, currently taking input in the SpaceEx model format, and translating to the formats of HyCreate, Flow∗, or dReach. Internally, the tool supports generic model-to-model transformation passes that serve to both ease the translation and potentially improve reachability results for the supported tools. Although these model transformation passes could be implemented within each tool, the Hyst approach provides a single place for model modification, generating modified input sources for the unmodified target tools. Our evaluation demonstrates Hyst is capable of automatically translating benchmarks in several classes (including affine and nonlinear hybrid automata) to the input formats of several tools. Additionally, we illustrate a general model transformation pass based on pseudo-invariants implemented in Hyst that illustrates the reachability improvement.}, author = {Bak, Stanley and Bogomolov, Sergiy and Johnson, Taylor}, location = {Seattle, WA, United States}, pages = {128 -- 133}, publisher = {Springer}, title = {{HYST: A source transformation and translation tool for hybrid automaton models}}, doi = {10.1145/2728606.2728630}, year = {2015}, } @inproceedings{1691, abstract = {We consider a case study of the problem of deploying an autonomous air vehicle in a partially observable, dynamic, indoor environment from a specification given as a linear temporal logic (LTL) formula over regions of interest. We model the motion and sensing capabilities of the vehicle as a partially observable Markov decision process (POMDP). We adapt recent results for solving POMDPs with parity objectives to generate a control policy. We also extend the existing framework with a policy minimization technique to obtain a better implementable policy, while preserving its correctness. The proposed techniques are illustrated in an experimental setup involving an autonomous quadrotor performing surveillance in a dynamic environment.}, author = {Svoreňová, Mária and Chmelik, Martin and Leahy, Kevin and Eniser, Hasan and Chatterjee, Krishnendu and Cěrná, Ivana and Belta, Cǎlin}, booktitle = {Proceedings of the 18th International Conference on Hybrid Systems: Computation and Control}, location = {Seattle, WA, United States}, pages = {233 -- 238}, publisher = {ACM}, title = {{Temporal logic motion planning using POMDPs with parity objectives: Case study paper}}, doi = {10.1145/2728606.2728617}, year = {2015}, } @article{1694, abstract = { We introduce quantitative timed refinement and timed simulation (directed) metrics, incorporating zenoness checks, for timed systems. These metrics assign positive real numbers which quantify the timing mismatches between two timed systems, amongst non-zeno runs. We quantify timing mismatches in three ways: (1) the maximal timing mismatch that can arise, (2) the “steady-state” maximal timing mismatches, where initial transient timing mismatches are ignored; and (3) the (long-run) average timing mismatches amongst two systems. These three kinds of mismatches constitute three important types of timing differences. Our event times are the global times, measured from the start of the system execution, not just the time durations of individual steps. We present algorithms over timed automata for computing the three quantitative simulation distances to within any desired degree of accuracy. In order to compute the values of the quantitative simulation distances, we use a game theoretic formulation. We introduce two new kinds of objectives for two player games on finite-state game graphs: (1) eventual debit-sum level objectives, and (2) average debit-sum level objectives. We present algorithms for computing the optimal values for these objectives in graph games, and then use these algorithms to compute the values of the timed simulation distances over timed automata. }, author = {Chatterjee, Krishnendu and Prabhu, Vinayak}, journal = {IEEE Transactions on Automatic Control}, number = {9}, pages = {2291 -- 2306}, publisher = {IEEE}, title = {{Quantitative temporal simulation and refinement distances for timed systems}}, doi = {10.1109/TAC.2015.2404612}, volume = {60}, year = {2015}, } @article{1695, abstract = {We give a comprehensive introduction into a diagrammatic method that allows for the evaluation of Gutzwiller wave functions in finite spatial dimensions. We discuss in detail some numerical schemes that turned out to be useful in the real-space evaluation of the diagrams. The method is applied to the problem of d-wave superconductivity in a two-dimensional single-band Hubbard model. Here, we discuss in particular the role of long-range contributions in our diagrammatic expansion. We further reconsider our previous analysis on the kinetic energy gain in the superconducting state.}, author = {Kaczmarczyk, Jan and Schickling, Tobias and Bünemann, Jörg}, journal = {Physica Status Solidi (B): Basic Solid State Physics}, number = {9}, pages = {2059 -- 2071}, publisher = {Wiley}, title = {{Evaluation techniques for Gutzwiller wave functions in finite dimensions}}, doi = {10.1002/pssb.201552082}, volume = {252}, year = {2015}, } @article{1697, abstract = {Motion tracking is a challenge the visual system has to solve by reading out the retinal population. It is still unclear how the information from different neurons can be combined together to estimate the position of an object. Here we recorded a large population of ganglion cells in a dense patch of salamander and guinea pig retinas while displaying a bar moving diffusively. We show that the bar’s position can be reconstructed from retinal activity with a precision in the hyperacuity regime using a linear decoder acting on 100+ cells. We then took advantage of this unprecedented precision to explore the spatial structure of the retina’s population code. The classical view would have suggested that the firing rates of the cells form a moving hill of activity tracking the bar’s position. Instead, we found that most ganglion cells in the salamander fired sparsely and idiosyncratically, so that their neural image did not track the bar. Furthermore, ganglion cell activity spanned an area much larger than predicted by their receptive fields, with cells coding for motion far in their surround. As a result, population redundancy was high, and we could find multiple, disjoint subsets of neurons that encoded the trajectory with high precision. This organization allows for diverse collections of ganglion cells to represent high-accuracy motion information in a form easily read out by downstream neural circuits.}, author = {Marre, Olivier and Botella Soler, Vicente and Simmons, Kristina and Mora, Thierry and Tkacik, Gasper and Berry, Michael}, journal = {PLoS Computational Biology}, number = {7}, publisher = {Public Library of Science}, title = {{High accuracy decoding of dynamical motion from a large retinal population}}, doi = {10.1371/journal.pcbi.1004304}, volume = {11}, year = {2015}, } @article{1699, abstract = {By hybridization and backcrossing, alleles can surmount species boundaries and be incorporated into the genome of a related species. This introgression of genes is of particular evolutionary relevance if it involves the transfer of adaptations between populations. However, any beneficial allele will typically be associated with other alien alleles that are often deleterious and hamper the introgression process. In order to describe the introgression of an adaptive allele, we set up a stochastic model with an explicit genetic makeup of linked and unlinked deleterious alleles. Based on the theory of reducible multitype branching processes, we derive a recursive expression for the establishment probability of the beneficial allele after a single hybridization event. We furthermore study the probability that slightly deleterious alleles hitchhike to fixation. The key to the analysis is a split of the process into a stochastic phase in which the advantageous alleles establishes and a deterministic phase in which it sweeps to fixation. We thereafter apply the theory to a set of biologically relevant scenarios such as introgression in the presence of many unlinked or few closely linked deleterious alleles. A comparison to computer simulations shows that the approximations work well over a large parameter range.}, author = {Uecker, Hildegard and Setter, Derek and Hermisson, Joachim}, journal = {Journal of Mathematical Biology}, number = {7}, pages = {1523 -- 1580}, publisher = {Springer}, title = {{Adaptive gene introgression after secondary contact}}, doi = {10.1007/s00285-014-0802-y}, volume = {70}, year = {2015}, } @article{1696, abstract = {The recently proposed diagrammatic expansion (DE) technique for the full Gutzwiller wave function (GWF) is applied to the Anderson lattice model. This approach allows for a systematic evaluation of the expectation values with full Gutzwiller wave function in finite-dimensional systems. It introduces results extending in an essential manner those obtained by means of the standard Gutzwiller approximation (GA), which is variationally exact only in infinite dimensions. Within the DE-GWF approach we discuss the principal paramagnetic properties and their relevance to heavy-fermion systems. We demonstrate the formation of an effective, narrow f band originating from atomic f-electron states and subsequently interpret this behavior as a direct itineracy of f electrons; it represents a combined effect of both the hybridization and the correlations induced by the Coulomb repulsive interaction. Such a feature is absent on the level of GA, which is equivalent to the zeroth order of our expansion. Formation of the hybridization- and electron-concentration-dependent narrow f band rationalizes the common assumption of such dispersion of f levels in the phenomenological modeling of the band structure of CeCoIn5. Moreover, it is shown that the emerging f-electron direct itineracy leads in a natural manner to three physically distinct regimes within a single model that are frequently discussed for 4f- or 5f-electron compounds as separate model situations. We identify these regimes as (i) the mixed-valence regime, (ii) Kondo/almost-Kondo insulating regime, and (iii) the Kondo-lattice limit when the f-electron occupancy is very close to the f-state half filling, ⟨nˆf⟩→1. The nonstandard features of the emerging correlated quantum liquid state are stressed.}, author = {Wysokiński, Marcin and Kaczmarczyk, Jan and Spałek, Jozef}, journal = {Physical Review B}, number = {12}, publisher = {American Physical Society}, title = {{Gutzwiller wave function solution for Anderson lattice model: Emerging universal regimes of heavy quasiparticle states}}, doi = {10.1103/PhysRevB.92.125135}, volume = {92}, year = {2015}, } @article{1701, abstract = {The activity of a neural network is defined by patterns of spiking and silence from the individual neurons. Because spikes are (relatively) sparse, patterns of activity with increasing numbers of spikes are less probable, but, with more spikes, the number of possible patterns increases. This tradeoff between probability and numerosity is mathematically equivalent to the relationship between entropy and energy in statistical physics. We construct this relationship for populations of up to N = 160 neurons in a small patch of the vertebrate retina, using a combination of direct and model-based analyses of experiments on the response of this network to naturalistic movies. We see signs of a thermodynamic limit, where the entropy per neuron approaches a smooth function of the energy per neuron as N increases. The form of this function corresponds to the distribution of activity being poised near an unusual kind of critical point. We suggest further tests of criticality, and give a brief discussion of its functional significance. }, author = {Tkacik, Gasper and Mora, Thierry and Marre, Olivier and Amodei, Dario and Palmer, Stephanie and Berry Ii, Michael and Bialek, William}, journal = {PNAS}, number = {37}, pages = {11508 -- 11513}, publisher = {National Academy of Sciences}, title = {{Thermodynamics and signatures of criticality in a network of neurons}}, doi = {10.1073/pnas.1514188112}, volume = {112}, year = {2015}, } @article{1698, abstract = {In mean-payoff games, the objective of the protagonist is to ensure that the limit average of an infinite sequence of numeric weights is nonnegative. In energy games, the objective is to ensure that the running sum of weights is always nonnegative. Multi-mean-payoff and multi-energy games replace individual weights by tuples, and the limit average (resp., running sum) of each coordinate must be (resp., remain) nonnegative. We prove finite-memory determinacy of multi-energy games and show inter-reducibility of multi-mean-payoff and multi-energy games for finite-memory strategies. We improve the computational complexity for solving both classes with finite-memory strategies: we prove coNP-completeness improving the previous known EXPSPACE bound. For memoryless strategies, we show that deciding the existence of a winning strategy for the protagonist is NP-complete. We present the first solution of multi-mean-payoff games with infinite-memory strategies: we show that mean-payoff-sup objectives can be decided in NP∩coNP, whereas mean-payoff-inf objectives are coNP-complete.}, author = {Velner, Yaron and Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A and Rabinovich, Alexander and Raskin, Jean}, journal = {Information and Computation}, number = {4}, pages = {177 -- 196}, publisher = {Elsevier}, title = {{The complexity of multi-mean-payoff and multi-energy games}}, doi = {10.1016/j.ic.2015.03.001}, volume = {241}, year = {2015}, } @article{1700, abstract = {We use the dual boson approach to reveal the phase diagram of the Fermi-Hubbard model with long-range dipole-dipole interactions. By using a large-scale finite-temperature calculation on a 64×64 square lattice we demonstrate the existence of a novel phase, possessing an "ultralong-range" order. The fingerprint of this phase - the density correlation function - features a nontrivial behavior on a scale of tens of lattice sites. We study the properties and the stability of the ultralong-range-ordered phase, and show that it is accessible in modern experiments with ultracold polar molecules and magnetic atoms.}, author = {Van Loon, Erik and Katsnelson, Mikhail and Lemeshko, Mikhail}, journal = {Physical Review B}, number = {8}, publisher = {American Physical Society}, title = {{Ultralong-range order in the Fermi-Hubbard model with long-range interactions}}, doi = {10.1103/PhysRevB.92.081106}, volume = {92}, year = {2015}, } @article{1704, abstract = {Given a convex function (Formula presented.) and two hermitian matrices A and B, Lewin and Sabin study in (Lett Math Phys 104:691–705, 2014) the relative entropy defined by (Formula presented.). Among other things, they prove that the so-defined quantity is monotone if and only if (Formula presented.) is operator monotone. The monotonicity is then used to properly define (Formula presented.) for bounded self-adjoint operators acting on an infinite-dimensional Hilbert space by a limiting procedure. More precisely, for an increasing sequence of finite-dimensional projections (Formula presented.) with (Formula presented.) strongly, the limit (Formula presented.) is shown to exist and to be independent of the sequence of projections (Formula presented.). The question whether this sequence converges to its "obvious" limit, namely (Formula presented.), has been left open. We answer this question in principle affirmatively and show that (Formula presented.). If the operators A and B are regular enough, that is (A − B), (Formula presented.) and (Formula presented.) are trace-class, the identity (Formula presented.) holds.}, author = {Deuchert, Andreas and Hainzl, Christian and Seiringer, Robert}, journal = {Letters in Mathematical Physics}, number = {10}, pages = {1449 -- 1466}, publisher = {Springer}, title = {{Note on a family of monotone quantum relative entropies}}, doi = {10.1007/s11005-015-0787-5}, volume = {105}, year = {2015}, } @article{1703, abstract = {Vegetation clearing and land-use change have depleted many natural plant communities to the point where restoration is required. A major impediment to the success of rebuilding complex vegetation communities is having regular access to sufficient quantities of high-quality seed. Seed-production areas (SPAs) can help generate this seed, but these must be underpinned by a broad genetic base to maximise the evolutionary potential of restored populations. However, genetic bottlenecks can occur at the collection, establishment and production stages in SPAs, requiring genetic evaluation. This is especially relevant for species that may take many years before a return on SPA investment is realised. Two recently established yellow box (Eucalyptus melliodora A.Cunn. ex Schauer, Myrtaceae) SPAs were evaluated to determine whether genetic bottlenecks had occurred between seed collection and SPA establishment. No evidence was found to suggest that a significant loss of genetic diversity had occurred at this stage, although there was a significant difference in diversity between the two SPAs. Complex population genetic structure was also observed in the seed used to source the SPAs, with up to eight groups identified. Plant survival in the SPAs was influenced by seed collection location but not by SPA location and was not associated with genetic diversity. There were also no associations between genetic diversity and plant growth. These data highlighted the importance of chance events when establishing SPAs and indicated that the two yellow box SPAs are likely to provide genetically diverse seed sources for future restoration projects, especially by pooling seed from both SPAs.}, author = {Broadhurst, Linda and Fifield, Graham and Vanzella, Bindi and Pickup, Melinda}, journal = {Australian Journal of Botany}, number = {5}, pages = {455 -- 466}, publisher = {CSIRO}, title = {{An evaluation of the genetic structure of seed sources and the maintenance of genetic diversity during establishment of two yellow box (Eucalyptus melliodora) seed-production areas}}, doi = {10.1071/BT15023}, volume = {63}, year = {2015}, } @inproceedings{1706, abstract = {We consider a problem of learning kernels for use in SVM classification in the multi-task and lifelong scenarios and provide generalization bounds on the error of a large margin classifier. Our results show that, under mild conditions on the family of kernels used for learning, solving several related tasks simultaneously is beneficial over single task learning. In particular, as the number of observed tasks grows, assuming that in the considered family of kernels there exists one that yields low approximation error on all tasks, the overhead associated with learning such a kernel vanishes and the complexity converges to that of learning when this good kernel is given to the learner.}, author = {Pentina, Anastasia and Ben David, Shai}, location = {Banff, AB, Canada}, pages = {194 -- 208}, publisher = {Springer}, title = {{Multi-task and lifelong learning of kernels}}, doi = {10.1007/978-3-319-24486-0_13}, volume = {9355}, year = {2015}, } @article{1712, abstract = {The majority of immune cells in Drosophila melanogaster are plasmatocytes; they carry out similar functions to vertebrate macrophages, influencing development as well as protecting against infection and cancer. Plasmatocytes, sometimes referred to with the broader term of hemocytes, migrate widely during embryonic development and cycle in the larvae between sessile and circulating positions. Here we discuss the similarities of plasmatocyte developmental migration and its functions to that of vertebrate macrophages, considering the recent controversy regarding the functions of Drosophila PDGF/VEGF related ligands. We also examine recent findings on the significance of adhesion for plasmatocyte migration in the embryo, as well as proliferation, trans-differentiation, and tumor responses in the larva. We spotlight parallels throughout to vertebrate immune responses.}, author = {Ratheesh, Aparna and Belyaeva, Vera and Siekhaus, Daria E}, journal = {Current Opinion in Cell Biology}, number = {10}, pages = {71 -- 79}, publisher = {Elsevier}, title = {{Drosophila immune cell migration and adhesion during embryonic development and larval immune responses}}, doi = {10.1016/j.ceb.2015.07.003}, volume = {36}, year = {2015}, } @article{1710, abstract = {We consider the hollow on the half-plane {(x, y) : y ≤ 0} ⊂ ℝ2 defined by a function u : (-1, 1) → ℝ, u(x) < 0, and a vertical flow of point particles incident on the hollow. It is assumed that u satisfies the so-called single impact condition (SIC): each incident particle is elastically reflected by graph(u) and goes away without hitting the graph of u anymore. We solve the problem: find the function u minimizing the force of resistance created by the flow. We show that the graph of the minimizer is formed by two arcs of parabolas symmetric to each other with respect to the y-axis. Assuming that the resistance of u ≡ 0 equals 1, we show that the minimal resistance equals π/2 - 2arctan(1/2) ≈ 0.6435. This result completes the previously obtained result [SIAM J. Math. Anal., 46 (2014), pp. 2730-2742] stating in particular that the minimal resistance of a hollow in higher dimensions equals 0.5. We additionally consider a similar problem of minimal resistance, where the hollow in the half-space {(x1,...,xd,y) : y ≤ 0} ⊂ ℝd+1 is defined by a radial function U satisfying the SIC, U(x) = u(|x|), with x = (x1,...,xd), u(ξ) < 0 for 0 ≤ ξ < 1, and u(ξ) = 0 for ξ ≥ 1, and the flow is parallel to the y-axis. The minimal resistance is greater than 0.5 (and coincides with 0.6435 when d = 1) and converges to 0.5 as d → ∞.}, author = {Akopyan, Arseniy and Plakhov, Alexander}, journal = {Society for Industrial and Applied Mathematics}, number = {4}, pages = {2754 -- 2769}, publisher = {SIAM}, title = {{Minimal resistance of curves under the single impact assumption}}, doi = {10.1137/140993843}, volume = {47}, year = {2015}, } @article{1730, abstract = {How much cutting is needed to simplify the topology of a surface? We provide bounds for several instances of this question, for the minimum length of topologically non-trivial closed curves, pants decompositions, and cut graphs with a given combinatorial map in triangulated combinatorial surfaces (or their dual cross-metric counterpart). Our work builds upon Riemannian systolic inequalities, which bound the minimum length of non-trivial closed curves in terms of the genus and the area of the surface. We first describe a systematic way to translate Riemannian systolic inequalities to a discrete setting, and vice-versa. This implies a conjecture by Przytycka and Przytycki (Graph structure theory. Contemporary Mathematics, vol. 147, 1993), a number of new systolic inequalities in the discrete setting, and the fact that a theorem of Hutchinson on the edge-width of triangulated surfaces and Gromov’s systolic inequality for surfaces are essentially equivalent. We also discuss how these proofs generalize to higher dimensions. Then we focus on topological decompositions of surfaces. Relying on ideas of Buser, we prove the existence of pants decompositions of length O(g^(3/2)n^(1/2)) for any triangulated combinatorial surface of genus g with n triangles, and describe an O(gn)-time algorithm to compute such a decomposition. Finally, we consider the problem of embedding a cut graph (or more generally a cellular graph) with a given combinatorial map on a given surface. Using random triangulations, we prove (essentially) that, for any choice of a combinatorial map, there are some surfaces on which any cellular embedding with that combinatorial map has length superlinear in the number of triangles of the triangulated combinatorial surface. There is also a similar result for graphs embedded on polyhedral triangulations.}, author = {Colin De Verdière, Éric and Hubard, Alfredo and De Mesmay, Arnaud N}, journal = {Discrete & Computational Geometry}, number = {3}, pages = {587 -- 620}, publisher = {Springer}, title = {{Discrete systolic inequalities and decompositions of triangulated surfaces}}, doi = {10.1007/s00454-015-9679-9}, volume = {53}, year = {2015}, } @article{1735, abstract = {This work presents a method for efficiently simplifying the pressure projection step in a liquid simulation. We first devise a straightforward dimension reduction technique that dramatically reduces the cost of solving the pressure projection. Next, we introduce a novel change of basis that satisfies free-surface boundary conditions exactly, regardless of the accuracy of the pressure solve. When combined, these ideas greatly reduce the computational complexity of the pressure solve without compromising free surface boundary conditions at the highest level of detail. Our techniques are easy to parallelize, and they effectively eliminate the computational bottleneck for large liquid simulations.}, author = {Ando, Ryoichi and Thürey, Nils and Wojtan, Christopher J}, journal = {Computer Graphics Forum}, number = {2}, pages = {473 -- 480}, publisher = {Wiley}, title = {{A dimension-reduced pressure solver for liquid simulations}}, doi = {10.1111/cgf.12576}, volume = {34}, year = {2015}, } @article{1734, abstract = {Facial appearance capture is now firmly established within academic research and used extensively across various application domains, perhaps most prominently in the entertainment industry through the design of virtual characters in video games and films. While significant progress has occurred over the last two decades, no single survey currently exists that discusses the similarities, differences, and practical considerations of the available appearance capture techniques as applied to human faces. A central difficulty of facial appearance capture is the way light interacts with skin-which has a complex multi-layered structure-and the interactions that occur below the skin surface can, by definition, only be observed indirectly. In this report, we distinguish between two broad strategies for dealing with this complexity. "Image-based methods" try to exhaustively capture the exact face appearance under different lighting and viewing conditions, and then render the face through weighted image combinations. "Parametric methods" instead fit the captured reflectance data to some parametric appearance model used during rendering, allowing for a more lightweight and flexible representation but at the cost of potentially increased rendering complexity or inexact reproduction. The goal of this report is to provide an overview that can guide practitioners and researchers in assessing the tradeoffs between current approaches and identifying directions for future advances in facial appearance capture.}, author = {Klehm, Oliver and Rousselle, Fabrice and Papas, Marios and Bradley, Derek and Hery, Christophe and Bickel, Bernd and Jarosz, Wojciech and Beeler, Thabo}, journal = {Computer Graphics Forum}, number = {2}, pages = {709 -- 733}, publisher = {Wiley-Blackwell}, title = {{Recent advances in facial appearance capture}}, doi = {10.1111/cgf.12594}, volume = {34}, year = {2015}, } @article{1789, abstract = {Intellectual disability (ID) has an estimated prevalence of 2-3%. Due to its extreme heterogeneity, the genetic basis of ID remains elusive in many cases. Recently, whole exome sequencing (WES) studies revealed that a large proportion of sporadic cases are caused by de novo gene variants. To identify further genes involved in ID, we performed WES in 250 patients with unexplained ID and their unaffected parents and included exomes of 51 previously sequenced child-parents trios in the analysis. Exome analysis revealed de novo intragenic variants in SET domain-containing 5 (SETD5) in two patients. One patient carried a nonsense variant, and the other an 81 bp deletion located across a splice-donor site. Chromosomal microarray diagnostics further identified four de novo non-recurrent microdeletions encompassing SETD5. CRISPR/Cas9 mutation modelling of the two intragenic variants demonstrated nonsense-mediated decay of the resulting transcripts, pointing to a loss-of-function (LoF) and haploinsufficiency as the common disease-causing mechanism of intragenic SETD5 sequence variants and SETD5-containing microdeletions. In silico domain prediction of SETD5, a predicted SET domain-containing histone methyltransferase (HMT), substantiated the presence of a SET domain and identified a novel putative PHD domain, strengthening a functional link to well-known histone-modifying ID genes. All six patients presented with ID and certain facial dysmorphisms, suggesting that SETD5 sequence variants contribute substantially to the microdeletion 3p25.3 phenotype. The present report of two SETD5 LoF variants in 301 patients demonstrates a prevalence of 0.7% and thus SETD5 variants as a relatively frequent cause of ID.}, author = {Kuechler, Alma and Zink, Alexander and Wieland, Thomas and Lüdecke, Hermann and Cremer, Kirsten and Salviati, Leonardo and Magini, Pamela and Najafi, Kimia and Zweier, Christiane and Czeschik, Johanna and Aretz, Stefan and Endele, Sabine and Tamburrino, Federica and Pinato, Claudia and Clementi, Maurizio and Gundlach, Jasmin and Maylahn, Carina and Mazzanti, Laura and Wohlleber, Eva and Schwarzmayr, Thomas and Kariminejad, Roxana and Schlessinger, Avner and Wieczorek, Dagmar and Strom, Tim and Novarino, Gaia and Engels, Hartmut}, journal = {European Journal of Human Genetics}, number = {6}, pages = {753 -- 760}, publisher = {Nature Publishing Group}, title = {{Loss-of-function variants of SETD5 cause intellectual disability and the core phenotype of microdeletion 3p25.3 syndrome}}, doi = {10.1038/ejhg.2014.165}, volume = {23}, year = {2015}, } @article{1804, abstract = {It is known that in classical fluids turbulence typically occurs at high Reynolds numbers. But can turbulence occur at low Reynolds numbers? Here we investigate the transition to turbulence in the classic Taylor-Couette system in which the rotating fluids are manufactured ferrofluids with magnetized nanoparticles embedded in liquid carriers. We find that, in the presence of a magnetic field transverse to the symmetry axis of the system, turbulence can occur at Reynolds numbers that are at least one order of magnitude smaller than those in conventional fluids. This is established by extensive computational ferrohydrodynamics through a detailed investigation of transitions in the flow structure, and characterization of behaviors of physical quantities such as the energy, the wave number, and the angular momentum through the bifurcations. A finding is that, as the magnetic field is increased, onset of turbulence can be determined accurately and reliably. Our results imply that experimental investigation of turbulence may be feasible by using ferrofluids. Our study of transition to and evolution of turbulence in the Taylor-Couette ferrofluidic flow system provides insights into the challenging problem of turbulence control.}, author = {Altmeyer, Sebastian and Do, Younghae and Lai, Ying}, journal = {Scientific Reports}, publisher = {Nature Publishing Group}, title = {{Transition to turbulence in Taylor-Couette ferrofluidic flow}}, doi = {10.1038/srep10781}, volume = {5}, year = {2015}, } @article{1807, abstract = {We study a double Cahn-Hilliard type functional related to the Gross-Pitaevskii energy of two-components Bose-Einstein condensates. In the case of large but same order intercomponent and intracomponent coupling strengths, we prove Γ-convergence to a perimeter minimisation functional with an inhomogeneous surface tension. We study the asymptotic behavior of the surface tension as the ratio between the intercomponent and intracomponent coupling strengths becomes very small or very large and obtain good agreement with the physical literature. We obtain as a consequence, symmetry breaking of the minimisers for the harmonic potential.}, author = {Goldman, Michael and Royo-Letelier, Jimena}, journal = {ESAIM - Control, Optimisation and Calculus of Variations}, number = {3}, pages = {603 -- 624}, publisher = {EDP Sciences}, title = {{Sharp interface limit for two components Bose-Einstein condensates}}, doi = {10.1051/cocv/2014040}, volume = {21}, year = {2015}, } @article{1810, abstract = {Combining antibiotics is a promising strategy for increasing treatment efficacy and for controlling resistance evolution. When drugs are combined, their effects on cells may be amplified or weakened, that is the drugs may show synergistic or antagonistic interactions. Recent work revealed the underlying mechanisms of such drug interactions by elucidating the drugs'; joint effects on cell physiology. Moreover, new treatment strategies that use drug combinations to exploit evolutionary tradeoffs were shown to affect the rate of resistance evolution in predictable ways. High throughput studies have further identified drug candidates based on their interactions with established antibiotics and general principles that enable the prediction of drug interactions were suggested. Overall, the conceptual and technical foundation for the rational design of potent drug combinations is rapidly developing.}, author = {Bollenbach, Mark Tobias}, journal = {Current Opinion in Microbiology}, pages = {1 -- 9}, publisher = {Elsevier}, title = {{Antimicrobial interactions: Mechanisms and implications for drug discovery and resistance evolution}}, doi = {10.1016/j.mib.2015.05.008}, volume = {27}, year = {2015}, } @article{1812, abstract = {We investigate the occurrence of rotons in a quadrupolar Bose–Einstein condensate confined to two dimensions. Depending on the particle density, the ratio of the contact and quadrupole–quadrupole interactions, and the alignment of the quadrupole moments with respect to the confinement plane, the dispersion relation features two or four point-like roton minima or one ring-shaped minimum. We map out the entire parameter space of the roton behavior and identify the instability regions. We propose to observe the exotic rotons by monitoring the characteristic density wave dynamics resulting from a short local perturbation, and discuss the possibilities to detect the predicted effects in state-of-the-art experiments with ultracold homonuclear molecules. }, author = {Lahrz, Martin and Lemeshko, Mikhail and Mathey, Ludwig}, journal = {New Journal of Physics}, number = {4}, publisher = {IOP Publishing Ltd.}, title = {{Exotic roton excitations in quadrupolar Bose–Einstein condensates }}, doi = {10.1088/1367-2630/17/4/045005}, volume = {17}, year = {2015}, } @article{1811, abstract = {Atomic form factors are widely used for the characterization of targets and specimens, from crystallography to biology. By using recent mathematical results, here we derive an analytical expression for the atomic form factor within the independent particle model constructed from nonrelativistic screened hydrogenic wave functions. The range of validity of this analytical expression is checked by comparing the analytically obtained form factors with the ones obtained within the Hartee-Fock method. As an example, we apply our analytical expression for the atomic form factor to evaluate the differential cross section for Rayleigh scattering off neutral atoms.}, author = {Safari, Laleh and Santos, José and Amaro, Pedro and Jänkälä, Kari and Fratini, Filippo}, journal = {Journal of Mathematical Physics}, number = {5}, publisher = {American Institute of Physics}, title = {{Analytical evaluation of atomic form factors: Application to Rayleigh scattering}}, doi = {10.1063/1.4921227}, volume = {56}, year = {2015}, } @article{1813, abstract = {We develop a microscopic theory describing a quantum impurity whose rotational degree of freedom is coupled to a many-particle bath. We approach the problem by introducing the concept of an “angulon”—a quantum rotor dressed by a quantum field—and reveal its quasiparticle properties using a combination of variational and diagrammatic techniques. Our theory predicts renormalization of the impurity rotational structure, such as that observed in experiments with molecules in superfluid helium droplets, in terms of a rotational Lamb shift induced by the many-particle environment. Furthermore, we discover a rich many-body-induced fine structure, emerging in rotational spectra due to a redistribution of angular momentum within the quantum many-body system.}, author = {Schmidt, Richard and Lemeshko, Mikhail}, journal = {Physical Review Letters}, number = {20}, publisher = {American Physical Society}, title = {{Rotation of quantum impurities in the presence of a many-body environment}}, doi = {10.1103/PhysRevLett.114.203001}, volume = {114}, year = {2015}, } @article{1808, author = {Gupta, Ashutosh and Henzinger, Thomas A}, journal = {ACM Transactions on Modeling and Computer Simulation}, number = {2}, publisher = {ACM}, title = {{Guest editors' introduction to special issue on computational methods in systems biology}}, doi = {10.1145/2745799}, volume = {25}, year = {2015}, } @article{1817, abstract = {Vertebrates have a unique 3D body shape in which correct tissue and organ shape and alignment are essential for function. For example, vision requires the lens to be centred in the eye cup which must in turn be correctly positioned in the head. Tissue morphogenesis depends on force generation, force transmission through the tissue, and response of tissues and extracellular matrix to force. Although a century ago D'Arcy Thompson postulated that terrestrial animal body shapes are conditioned by gravity, there has been no animal model directly demonstrating how the aforementioned mechano-morphogenetic processes are coordinated to generate a body shape that withstands gravity. Here we report a unique medaka fish (Oryzias latipes) mutant, hirame (hir), which is sensitive to deformation by gravity. hir embryos display a markedly flattened body caused by mutation of YAP, a nuclear executor of Hippo signalling that regulates organ size. We show that actomyosin-mediated tissue tension is reduced in hir embryos, leading to tissue flattening and tissue misalignment, both of which contribute to body flattening. By analysing YAP function in 3D spheroids of human cells, we identify the Rho GTPase activating protein ARHGAP18 as an effector of YAP in controlling tissue tension. Together, these findings reveal a previously unrecognised function of YAP in regulating tissue shape and alignment required for proper 3D body shape. Understanding this morphogenetic function of YAP could facilitate the use of embryonic stem cells to generate complex organs requiring correct alignment of multiple tissues. }, author = {Porazinski, Sean and Wang, Huijia and Asaoka, Yoichi and Behrndt, Martin and Miyamoto, Tatsuo and Morita, Hitoshi and Hata, Shoji and Sasaki, Takashi and Krens, Gabriel and Osada, Yumi and Asaka, Satoshi and Momoi, Akihiro and Linton, Sarah and Miesfeld, Joel and Link, Brian and Senga, Takeshi and Castillo Morales, Atahualpa and Urrutia, Araxi and Shimizu, Nobuyoshi and Nagase, Hideaki and Matsuura, Shinya and Bagby, Stefan and Kondoh, Hisato and Nishina, Hiroshi and Heisenberg, Carl-Philipp J and Furutani Seiki, Makoto}, journal = {Nature}, number = {7551}, pages = {217 -- 221}, publisher = {Nature Publishing Group}, title = {{YAP is essential for tissue tension to ensure vertebrate 3D body shape}}, doi = {10.1038/nature14215}, volume = {521}, year = {2015}, } @inproceedings{1820, abstract = {We consider partially observable Markov decision processes (POMDPs) with a set of target states and every transition is associated with an integer cost. The optimization objec- tive we study asks to minimize the expected total cost till the target set is reached, while ensuring that the target set is reached almost-surely (with probability 1). We show that for integer costs approximating the optimal cost is undecidable. For positive costs, our results are as follows: (i) we establish matching lower and upper bounds for the optimal cost and the bound is double exponential; (ii) we show that the problem of approximating the optimal cost is decidable and present ap- proximation algorithms developing on the existing algorithms for POMDPs with finite-horizon objectives. While the worst- case running time of our algorithm is double exponential, we present efficient stopping criteria for the algorithm and show experimentally that it performs well in many examples.}, author = {Chatterjee, Krishnendu and Chmelik, Martin and Gupta, Raghav and Kanodia, Ayush}, booktitle = {Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence }, location = {Austin, TX, USA}, pages = {3496--3502}, publisher = {AAAI Press}, title = {{Optimal cost almost-sure reachability in POMDPs}}, volume = {5}, year = {2015}, } @article{1814, abstract = {We present an efficient wavefront tracking algorithm for animating bodies of water that interact with their environment. Our contributions include: a novel wavefront tracking technique that enables dispersion, refraction, reflection, and diffraction in the same simulation; a unique multivalued function interpolation method that enables our simulations to elegantly sidestep the Nyquist limit; a dispersion approximation for efficiently amplifying the number of simulated waves by several orders of magnitude; and additional extensions that allow for time-dependent effects and interactive artistic editing of the resulting animation. Our contributions combine to give us multitudes more wave details than similar algorithms, while maintaining high frame rates and allowing close camera zooms.}, author = {Jeschke, Stefan and Wojtan, Christopher J}, journal = {ACM Transactions on Graphics}, number = {3}, publisher = {ACM}, title = {{Water wave animation via wavefront parameter interpolation}}, doi = {10.1145/2714572}, volume = {34}, year = {2015}, } @article{1818, abstract = {Why do species not adapt to ever-wider ranges of conditions, gradually expanding their ecological niche and geographic range? Gene flow across environments has two conflicting effects: although it increases genetic variation, which is a prerequisite for adaptation, gene flow may swamp adaptation to local conditions. In 1956, Haldane proposed that, when the environment varies across space, "swamping" by gene flow creates a positive feedback between low population size and maladaptation, leading to a sharp range margin. However, current deterministic theory shows that, when variance can evolve, there is no such limit. Using simple analytical tools and simulations, we show that genetic drift can generate a sharp margin to a species' range, by reducing genetic variance below the level needed for adaptation to spatially variable conditions. Aided by separation of ecological and evolutionary timescales, the identified effective dimensionless parameters reveal a simple threshold that predicts when adaptation at the range margin fails. Two observable parameters determine the threshold: (i) the effective environmental gradient, which can be measured by the loss of fitness due to dispersal to a different environment; and (ii) the efficacy of selection relative to genetic drift. The theory predicts sharp range margins even in the absence of abrupt changes in the environment. Furthermore, it implies that gradual worsening of conditions across a species' habitat may lead to a sudden range fragmentation, when adaptation to a wide span of conditions within a single species becomes impossible.}, author = {Polechova, Jitka and Barton, Nicholas H}, journal = {PNAS}, number = {20}, pages = {6401 -- 6406}, publisher = {National Academy of Sciences}, title = {{Limits to adaptation along environmental gradients}}, doi = {10.1073/pnas.1421515112}, volume = {112}, year = {2015}, } @article{1819, abstract = {The sessile life style of plants creates the need to deal with an often adverse environment, in which water availability can change on a daily basis, challenging the cellular physiology and integrity. Changes in osmotic conditions disrupt the equilibrium of the plasma membrane: hypoosmotic conditions increase and hyperosmotic environment decrease the cell volume. Here, we show that short-term extracellular osmotic treatments are closely followed by a shift in the balance between endocytosis and exocytosis in root meristem cells. Acute hyperosmotic treatments (ionic and nonionic) enhance clathrin-mediated endocytosis simultaneously attenuating exocytosis, whereas hypoosmotic treatments have the opposite effects. In addition to clathrin recruitment to the plasma membrane, components of early endocytic trafficking are essential during hyperosmotic stress responses. Consequently, growth of seedlings defective in elements of clathrin or early endocytic machinery is more sensitive to hyperosmotic treatments. We also found that the endocytotic response to a change of osmotic status in the environment is dominant over the presumably evolutionary more recent regulatory effect of plant hormones, such as auxin. These results imply that osmotic perturbation influences the balance between endocytosis and exocytosis acting through clathrin-mediated endocytosis. We propose that tension on the plasma membrane determines the addition or removal of membranes at the cell surface, thus preserving cell integrity.}, author = {Zwiewka, Marta and Nodzyński, Tomasz and Robert, Stéphanie and Vanneste, Steffen and Friml, Jiřĺ}, journal = {Molecular Plant}, number = {8}, pages = {1175 -- 1187}, publisher = {Elsevier}, title = {{Osmotic stress modulates the balance between exocytosis and clathrin mediated endocytosis in Arabidopsis thaliana}}, doi = {10.1016/j.molp.2015.03.007}, volume = {8}, year = {2015}, } @article{1823, abstract = {Abstract Drug combinations are increasingly important in disease treatments, for combating drug resistance, and for elucidating fundamental relationships in cell physiology. When drugs are combined, their individual effects on cells may be amplified or weakened. Such drug interactions are crucial for treatment efficacy, but their underlying mechanisms remain largely unknown. To uncover the causes of drug interactions, we developed a systematic approach based on precise quantification of the individual and joint effects of antibiotics on growth of genome-wide Escherichia coli gene deletion strains. We found that drug interactions between antibiotics representing the main modes of action are highly robust to genetic perturbation. This robustness is encapsulated in a general principle of bacterial growth, which enables the quantitative prediction of mutant growth rates under drug combinations. Rare violations of this principle exposed recurring cellular functions controlling drug interactions. In particular, we found that polysaccharide and ATP synthesis control multiple drug interactions with previously unexplained mechanisms, and small molecule adjuvants targeting these functions synthetically reshape drug interactions in predictable ways. These results provide a new conceptual framework for the design of multidrug combinations and suggest that there are universal mechanisms at the heart of most drug interactions. Synopsis A general principle of bacterial growth enables the prediction of mutant growth rates under drug combinations. Rare violations of this principle expose cellular functions that control drug interactions and can be targeted by small molecules to alter drug interactions in predictable ways. Drug interactions between antibiotics are highly robust to genetic perturbations. A general principle of bacterial growth enables the prediction of mutant growth rates under drug combinations. Rare violations of this principle expose cellular functions that control drug interactions. Diverse drug interactions are controlled by recurring cellular functions, including LPS synthesis and ATP synthesis. A general principle of bacterial growth enables the prediction of mutant growth rates under drug combinations. Rare violations of this principle expose cellular functions that control drug interactions and can be targeted by small molecules to alter drug interactions in predictable ways.}, author = {Chevereau, Guillaume and Bollenbach, Mark Tobias}, journal = {Molecular Systems Biology}, number = {4}, publisher = {Nature Publishing Group}, title = {{Systematic discovery of drug interaction mechanisms}}, doi = {10.15252/msb.20156098}, volume = {11}, year = {2015}, } @article{1824, abstract = {Condensation phenomena arise through a collective behaviour of particles. They are observed in both classical and quantum systems, ranging from the formation of traffic jams in mass transport models to the macroscopic occupation of the energetic ground state in ultra-cold bosonic gases (Bose-Einstein condensation). Recently, it has been shown that a driven and dissipative system of bosons may form multiple condensates. Which states become the condensates has, however, remained elusive thus far. The dynamics of this condensation are described by coupled birth-death processes, which also occur in evolutionary game theory. Here we apply concepts from evolutionary game theory to explain the formation of multiple condensates in such driven-dissipative bosonic systems. We show that the vanishing of relative entropy production determines their selection. The condensation proceeds exponentially fast, but the system never comes to rest. Instead, the occupation numbers of condensates may oscillate, as we demonstrate for a rock-paper-scissors game of condensates.}, author = {Knebel, Johannes and Weber, Markus and Krüger, Torben H and Frey, Erwin}, journal = {Nature Communications}, publisher = {Nature Publishing Group}, title = {{Evolutionary games of condensates in coupled birth-death processes}}, doi = {10.1038/ncomms7977}, volume = {6}, year = {2015}, } @article{1831, abstract = {This paper introduces a theme issue presenting the latest developments in research on the impacts of sociality on health and fitness. The articles that follow cover research on societies ranging from insects to humans. Variation in measures of fitness (i.e. survival and reproduction) has been linked to various aspects of sociality in humans and animals alike, and variability in individual health and condition has been recognized as a key mediator of these relationships. Viewed from a broad evolutionary perspective, the evolutionary transitions from a solitary lifestyle to group living have resulted in several new health-related costs and benefits of sociality. Social transmission of parasites within groups represents a major cost of group living, but some behavioural mechanisms, such as grooming, have evolved repeatedly to reduce this cost. Group living also has created novel costs in terms of altered susceptibility to infectious and non-infectious disease as a result of the unavoidable physiological consequences of social competition and integration, which are partly alleviated by social buffering in some vertebrates. Here, we define the relevant aspects of sociality, summarize their health-related costs and benefits, and discuss possible fitness measures in different study systems. Given the pervasive effects of social factors on health and fitness, we propose a synthesis of existing conceptual approaches in disease ecology, ecological immunology and behavioural neurosciences by adding sociality as a key factor, with the goal to generate a broader framework for organismal integration of health-related research.}, author = {Kappeler, Peter and Cremer, Sylvia and Nunn, Charles}, journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences}, number = {1669}, publisher = {Royal Society}, title = {{Sociality and health: Impacts of sociality on disease susceptibility and transmission in animal and human societies}}, doi = {10.1098/rstb.2014.0116}, volume = {370}, year = {2015}, } @article{1828, abstract = {We construct a non-linear Markov process connected with a biological model of a bacterial genome recombination. The description of invariant measures of this process gives us the solution of one problem in elementary probability theory.}, author = {Akopyan, Arseniy and Pirogov, Sergey and Rybko, Aleksandr}, journal = {Journal of Statistical Physics}, number = {1}, pages = {163 -- 167}, publisher = {Springer}, title = {{Invariant measures of genetic recombination process}}, doi = {10.1007/s10955-015-1238-5}, volume = {160}, year = {2015}, } @inproceedings{1836, abstract = {In the standard framework for worst-case execution time (WCET) analysis of programs, the main data structure is a single instance of integer linear programming (ILP) that represents the whole program. The instance of this NP-hard problem must be solved to find an estimate forWCET, and it must be refined if the estimate is not tight.We propose a new framework for WCET analysis, based on abstract segment trees (ASTs) as the main data structure. The ASTs have two advantages. First, they allow computing WCET by solving a number of independent small ILP instances. Second, ASTs store more expressive constraints, thus enabling a more efficient and precise refinement procedure. In order to realize our framework algorithmically, we develop an algorithm for WCET estimation on ASTs, and we develop an interpolation-based counterexample-guided refinement scheme for ASTs. Furthermore, we extend our framework to obtain parametric estimates of WCET. We experimentally evaluate our approach on a set of examples from WCET benchmark suites and linear-algebra packages. We show that our analysis, with comparable effort, provides WCET estimates that in many cases significantly improve those computed by existing tools.}, author = {Cerny, Pavol and Henzinger, Thomas A and Kovács, Laura and Radhakrishna, Arjun and Zwirchmayr, Jakob}, location = {London, United Kingdom}, pages = {105 -- 131}, publisher = {Springer}, title = {{Segment abstraction for worst-case execution time analysis}}, doi = {10.1007/978-3-662-46669-8_5}, volume = {9032}, year = {2015}, } @inproceedings{1838, abstract = {Synthesis of program parts is particularly useful for concurrent systems. However, most approaches do not support common design tasks, like modifying a single process without having to re-synthesize or verify the whole system. Assume-guarantee synthesis (AGS) provides robustness against modifications of system parts, but thus far has been limited to the perfect information setting. This means that local variables cannot be hidden from other processes, which renders synthesis results cumbersome or even impossible to realize.We resolve this shortcoming by defining AGS under partial information. We analyze the complexity and decidability in different settings, showing that the problem has a high worstcase complexity and is undecidable in many interesting cases. Based on these observations, we present a pragmatic algorithm based on bounded synthesis, and demonstrate its practical applicability on several examples.}, author = {Bloem, Roderick and Chatterjee, Krishnendu and Jacobs, Swen and Könighofer, Robert}, location = {London, United Kingdom}, pages = {517 -- 532}, publisher = {Springer}, title = {{Assume-guarantee synthesis for concurrent reactive programs with partial information}}, doi = {10.1007/978-3-662-46681-0_50}, volume = {9035}, year = {2015}, } @inproceedings{1839, abstract = {We present MultiGain, a tool to synthesize strategies for Markov decision processes (MDPs) with multiple mean-payoff objectives. Our models are described in PRISM, and our tool uses the existing interface and simulator of PRISM. Our tool extends PRISM by adding novel algorithms for multiple mean-payoff objectives, and also provides features such as (i) generating strategies and exploring them for simulation, and checking them with respect to other properties; and (ii) generating an approximate Pareto curve for two mean-payoff objectives. In addition, we present a new practical algorithm for the analysis of MDPs with multiple mean-payoff objectives under memoryless strategies.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín}, location = {London, United Kingdom}, pages = {181 -- 187}, publisher = {Springer}, title = {{Multigain: A controller synthesis tool for MDPs with multiple mean-payoff objectives}}, doi = {10.1007/978-3-662-46681-0_12}, volume = {9035}, year = {2015}, } @article{1837, abstract = {Transition to turbulence in straight pipes occurs in spite of the linear stability of the laminar Hagen-Poiseuille flow if both the amplitude of flow perturbations and the Reynolds number Re exceed a minimum threshold (subcritical transition). As the pipe curvature increases, centrifugal effects become important, modifying the basic flow as well as the most unstable linear modes. If the curvature (tube-to-coiling diameter d/D) is sufficiently large, a Hopf bifurcation (supercritical instability) is encountered before turbulence can be excited (subcritical instability). We trace the instability thresholds in the Re - d/D parameter space in the range 0.01 ≤ d/D\ ≤ 0.1 by means of laser-Doppler velocimetry and determine the point where the subcritical and supercritical instabilities meet. Two different experimental set-ups are used: a closed system where the pipe forms an axisymmetric torus and an open system employing a helical pipe. Implications for the measurement of friction factors in curved pipes are discussed.}, author = {Kühnen, Jakob and Braunshier, P and Schwegel, M and Kuhlmann, Hendrik and Hof, Björn}, journal = {Journal of Fluid Mechanics}, number = {5}, publisher = {Cambridge University Press}, title = {{Subcritical versus supercritical transition to turbulence in curved pipes}}, doi = {10.1017/jfm.2015.184}, volume = {770}, year = {2015}, } @article{1848, abstract = {The ability to escape apoptosis is a hallmark of cancer-initiating cells and a key factor of resistance to oncolytic therapy. Here, we identify FAM96A as a ubiquitous, evolutionarily conserved apoptosome-activating protein and investigate its potential pro-apoptotic tumor suppressor function in gastrointestinal stromal tumors (GISTs). Interaction between FAM96A and apoptotic peptidase activating factor 1 (APAF1) was identified in yeast two-hybrid screen and further studied by deletion mutants, glutathione-S-transferase pull-down, co-immunoprecipitation and immunofluorescence. Effects of FAM96A overexpression and knock-down on apoptosis sensitivity were examined in cancer cells and zebrafish embryos. Expression of FAM96A in GISTs and histogenetically related cells including interstitial cells of Cajal (ICCs), “fibroblast-like cells” (FLCs) and ICC stem cells (ICC-SCs) was investigated by Northern blotting, reverse transcription—polymerase chain reaction, immunohistochemistry and Western immunoblotting. Tumorigenicity of GIST cells and transformed murine ICC-SCs stably transduced to re-express FAM96A was studied by xeno- and allografting into immunocompromised mice. FAM96A was found to bind APAF1 and to enhance the induction of mitochondrial apoptosis. FAM96A protein or mRNA was dramatically reduced or lost in 106 of 108 GIST samples representing three independent patient cohorts. Whereas ICCs, ICC-SCs and FLCs, the presumed normal counterparts of GIST, were found to robustly express FAM96A protein and mRNA, FAM96A expression was much reduced in tumorigenic ICC-SCs. Re-expression of FAM96A in GIST cells and transformed ICC-SCs increased apoptosis sensitivity and diminished tumorigenicity. Our data suggest FAM96A is a novel pro-apoptotic tumor suppressor that is lost during GIST tumorigenesis.}, author = {Schwamb, Bettina and Pick, Robert and Fernández, Sara and Völp, Kirsten and Heering, Jan and Dötsch, Volker and Bösser, Susanne and Jung, Jennifer and Beinoravičiute Kellner, Rasa and Wesely, Josephine and Zörnig, Inka and Hammerschmidt, Matthias and Nowak, Matthias and Penzel, Roland and Zatloukal, Kurt and Joos, Stefan and Rieker, Ralf and Agaimy, Abbas and Söder, Stephan and Reid Lombardo, Kmarie and Kendrick, Michael and Bardsley, Michael and Hayashi, Yujiro and Asuzu, David and Syed, Sabriya and Ördög, Tamás and Zörnig, Martin}, journal = {International Journal of Cancer}, number = {6}, pages = {1318 -- 1329}, publisher = {Wiley}, title = {{FAM96A is a novel pro-apoptotic tumor suppressor in gastrointestinal stromal tumors}}, doi = {10.1002/ijc.29498}, volume = {137}, year = {2015}, } @article{1846, abstract = {Modal transition systems (MTS) is a well-studied specification formalism of reactive systems supporting a step-wise refinement methodology. Despite its many advantages, the formalism as well as its currently known extensions are incapable of expressing some practically needed aspects in the refinement process like exclusive, conditional and persistent choices. We introduce a new model called parametric modal transition systems (PMTS) together with a general modal refinement notion that overcomes many of the limitations. We investigate the computational complexity of modal and thorough refinement checking on PMTS and its subclasses and provide a direct encoding of the modal refinement problem into quantified Boolean formulae, allowing us to employ state-of-the-art QBF solvers for modal refinement checking. The experiments we report on show that the feasibility of refinement checking is more influenced by the degree of nondeterminism rather than by the syntactic restrictions on the types of formulae allowed in the description of the PMTS.}, author = {Beneš, Nikola and Kretinsky, Jan and Larsen, Kim and Möller, Mikael and Sickert, Salomon and Srba, Jiří}, journal = {Acta Informatica}, number = {2-3}, pages = {269 -- 297}, publisher = {Springer}, title = {{Refinement checking on parametric modal transition systems}}, doi = {10.1007/s00236-015-0215-4}, volume = {52}, year = {2015}, } @article{1845, abstract = {Based on extrapolation from excitatory synapses, it is often assumed that depletion of the releasable pool of synaptic vesicles is the main factor underlying depression at inhibitory synapses. In this issue of Neuron, using subcellular patch-clamp recording from inhibitory presynaptic terminals, Kawaguchi and Sakaba (2015) show that at Purkinje cell-deep cerebellar nuclei neuron synapses, changes in presynaptic action potential waveform substantially contribute to synaptic depression. Based on extrapolation from excitatory synapses, it is often assumed that depletion of the releasable pool of synaptic vesicles is the main factor underlying depression at inhibitory synapses. In this issue of Neuron, using subcellular patch-clamp recording from inhibitory presynaptic terminals, Kawaguchi and Sakaba (2015) show that at Purkinje cell-deep cerebellar nuclei neuron synapses, changes in presynaptic action potential waveform substantially contribute to synaptic depression.}, author = {Vandael, David H and Espinoza Martinez, Claudia and Jonas, Peter M}, journal = {Neuron}, number = {6}, pages = {1149 -- 1151}, publisher = {Elsevier}, title = {{Excitement about inhibitory presynaptic terminals}}, doi = {10.1016/j.neuron.2015.03.006}, volume = {85}, year = {2015}, } @article{1840, abstract = {In this paper, we present a method for reducing a regular, discrete-time Markov chain (DTMC) to another DTMC with a given, typically much smaller number of states. The cost of reduction is defined as the Kullback-Leibler divergence rate between a projection of the original process through a partition function and a DTMC on the correspondingly partitioned state space. Finding the reduced model with minimal cost is computationally expensive, as it requires an exhaustive search among all state space partitions, and an exact evaluation of the reduction cost for each candidate partition. Our approach deals with the latter problem by minimizing an upper bound on the reduction cost instead of minimizing the exact cost. The proposed upper bound is easy to compute and it is tight if the original chain is lumpable with respect to the partition. Then, we express the problem in the form of information bottleneck optimization, and propose using the agglomerative information bottleneck algorithm for searching a suboptimal partition greedily, rather than exhaustively. The theory is illustrated with examples and one application scenario in the context of modeling bio-molecular interactions.}, author = {Geiger, Bernhard and Petrov, Tatjana and Kubin, Gernot and Koeppl, Heinz}, issn = {0018-9286}, journal = {IEEE Transactions on Automatic Control}, number = {4}, pages = {1010 -- 1022}, publisher = {IEEE}, title = {{Optimal Kullback-Leibler aggregation via information bottleneck}}, doi = {10.1109/TAC.2014.2364971}, volume = {60}, year = {2015}, } @article{1841, abstract = {We propose a new family of message passing techniques for MAP estimation in graphical models which we call Sequential Reweighted Message Passing (SRMP). Special cases include well-known techniques such as Min-Sum Diffusion (MSD) and a faster Sequential Tree-Reweighted Message Passing (TRW-S). Importantly, our derivation is simpler than the original derivation of TRW-S, and does not involve a decomposition into trees. This allows easy generalizations. The new family of algorithms can be viewed as a generalization of TRW-S from pairwise to higher-order graphical models. We test SRMP on several real-world problems with promising results.}, author = {Kolmogorov, Vladimir}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {5}, pages = {919 -- 930}, publisher = {IEEE}, title = {{A new look at reweighted message passing}}, doi = {10.1109/TPAMI.2014.2363465}, volume = {37}, year = {2015}, } @article{1849, abstract = {Cell polarity is a fundamental property of pro- and eukaryotic cells. It is necessary for coordination of cell division, cell morphogenesis and signaling processes. How polarity is generated and maintained is a complex issue governed by interconnected feed-back regulations between small GTPase signaling and membrane tension-based signaling that controls membrane trafficking, and cytoskeleton organization and dynamics. Here, we will review the potential role for calcium as a crucial signal that connects and coordinates the respective processes during polarization processes in plants. This article is part of a Special Issue entitled: 13th European Symposium on Calcium.}, author = {Himschoot, Ellie and Beeckman, Tom and Friml, Jiřĺ and Vanneste, Steffen}, journal = {Biochimica et Biophysica Acta - Molecular Cell Research}, number = {9}, pages = {2168 -- 2172}, publisher = {Elsevier}, title = {{Calcium is an organizer of cell polarity in plants}}, doi = {10.1016/j.bbamcr.2015.02.017}, volume = {1853}, year = {2015}, } @article{1847, author = {Grones, Peter and Friml, Jiřĺ}, journal = {Molecular Plant}, number = {3}, pages = {356 -- 358}, publisher = {Elsevier}, title = {{ABP1: Finally docking}}, doi = {10.1016/j.molp.2014.12.013}, volume = {8}, year = {2015}, } @article{1850, abstract = {Entomopathogenic fungi are potent biocontrol agents that are widely used against insect pests, many of which are social insects. Nevertheless, theoretical investigations of their particular life history are scarce. We develop a model that takes into account the main distinguishing features between traditionally studied diseases and obligate killing pathogens, like the (biocontrol-relevant) insect-pathogenic fungi Metarhizium and Beauveria. First, obligate killing entomopathogenic fungi produce new infectious particles (conidiospores) only after host death and not yet on the living host. Second, the killing rates of entomopathogenic fungi depend strongly on the initial exposure dosage, thus we explicitly consider the pathogen load of individual hosts. Further, we make the model applicable not only to solitary host species, but also to group living species by incorporating social interactions between hosts, like the collective disease defences of insect societies. Our results identify the optimal killing rate for the pathogen that minimises its invasion threshold. Furthermore, we find that the rate of contact between hosts has an ambivalent effect: dense interaction networks between individuals are considered to facilitate disease outbreaks because of increased pathogen transmission. In social insects, this is compensated by their collective disease defences, i.e., social immunity. For the type of pathogens considered here, we show that even without social immunity, high contact rates between live individuals dilute the pathogen in the host colony and hence can reduce individual pathogen loads below disease-causing levels.}, author = {Novak, Sebastian and Cremer, Sylvia}, journal = {Journal of Theoretical Biology}, number = {5}, pages = {54 -- 64}, publisher = {Elsevier}, title = {{Fungal disease dynamics in insect societies: Optimal killing rates and the ambivalent effect of high social interaction rates}}, doi = {10.1016/j.jtbi.2015.02.018}, volume = {372}, year = {2015}, } @article{1851, abstract = {We consider mating strategies for females who search for males sequentially during a season of limited length. We show that the best strategy rejects a given male type if encountered before a time-threshold but accepts him after. For frequency-independent benefits, we obtain the optimal time-thresholds explicitly for both discrete and continuous distributions of males, and allow for mistakes being made in assessing the correct male type. When the benefits are indirect (genes for the offspring) and the population is under frequency-dependent ecological selection, the benefits depend on the mating strategy of other females as well. This case is particularly relevant to speciation models that seek to explore the stability of reproductive isolation by assortative mating under frequency-dependent ecological selection. We show that the indirect benefits are to be quantified by the reproductive values of couples, and describe how the evolutionarily stable time-thresholds can be found. We conclude with an example based on the Levene model, in which we analyze the evolutionarily stable assortative mating strategies and the strength of reproductive isolation provided by them.}, author = {Priklopil, Tadeas and Kisdi, Eva and Gyllenberg, Mats}, issn = {1558-5646}, journal = {Evolution}, number = {4}, pages = {1015 -- 1026}, publisher = {Wiley}, title = {{Evolutionarily stable mating decisions for sequentially searching females and the stability of reproductive isolation by assortative mating}}, doi = {10.1111/evo.12618}, volume = {69}, year = {2015}, } @inproceedings{1859, abstract = {Structural support vector machines (SSVMs) are amongst the best performing models for structured computer vision tasks, such as semantic image segmentation or human pose estimation. Training SSVMs, however, is computationally costly, because it requires repeated calls to a structured prediction subroutine (called \emph{max-oracle}), which has to solve an optimization problem itself, e.g. a graph cut. In this work, we introduce a new algorithm for SSVM training that is more efficient than earlier techniques when the max-oracle is computationally expensive, as it is frequently the case in computer vision tasks. The main idea is to (i) combine the recent stochastic Block-Coordinate Frank-Wolfe algorithm with efficient hyperplane caching, and (ii) use an automatic selection rule for deciding whether to call the exact max-oracle or to rely on an approximate one based on the cached hyperplanes. We show experimentally that this strategy leads to faster convergence to the optimum with respect to the number of requires oracle calls, and that this translates into faster convergence with respect to the total runtime when the max-oracle is slow compared to the other steps of the algorithm. }, author = {Shah, Neel and Kolmogorov, Vladimir and Lampert, Christoph}, location = {Boston, MA, USA}, pages = {2737 -- 2745}, publisher = {IEEE}, title = {{A multi-plane block-coordinate Frank-Wolfe algorithm for training structural SVMs with a costly max-oracle}}, doi = {10.1109/CVPR.2015.7298890}, year = {2015}, } @inproceedings{1860, abstract = {Classifiers for object categorization are usually evaluated by their accuracy on a set of i.i.d. test examples. This provides us with an estimate of the expected error when applying the classifiers to a single new image. In real application, however, classifiers are rarely only used for a single image and then discarded. Instead, they are applied sequentially to many images, and these are typically not i.i.d. samples from a fixed data distribution, but they carry dependencies and their class distribution varies over time. In this work, we argue that the phenomenon of correlated data at prediction time is not a nuisance, but a blessing in disguise. We describe a probabilistic method for adapting classifiers at prediction time without having to retrain them. We also introduce a framework for creating realistically distributed image sequences, which offers a way to benchmark classifier adaptation methods, such as the one we propose. Experiments on the ILSVRC2010 and ILSVRC2012 datasets show that adapting object classification systems at prediction time can significantly reduce their error rate, even with no additional human feedback.}, author = {Royer, Amélie and Lampert, Christoph}, location = {Boston, MA, United States}, pages = {1401 -- 1409}, publisher = {IEEE}, title = {{Classifier adaptation at prediction time}}, doi = {10.1109/CVPR.2015.7298746}, year = {2015}, }