@article{1674,
abstract = {We consider N × N random matrices of the form H = W + V where W is a real symmetric Wigner matrix and V a random or deterministic, real, diagonal matrix whose entries are independent of W. We assume subexponential decay for the matrix entries of W and we choose V so that the eigenvalues of W and V are typically of the same order. For a large class of diagonal matrices V, we show that the rescaled distribution of the extremal eigenvalues is given by the Tracy-Widom distribution F1 in the limit of large N. Our proofs also apply to the complex Hermitian setting, i.e. when W is a complex Hermitian Wigner matrix.},
author = {Lee, Jioon and Schnelli, Kevin},
journal = {Reviews in Mathematical Physics},
number = {8},
publisher = {World Scientific Publishing},
title = {{Edge universality for deformed Wigner matrices}},
doi = {10.1142/S0129055X1550018X},
volume = {27},
year = {2015},
}
@article{1663,
abstract = {CREB-binding protein (CBP) and p300 are transcriptional coactivators involved in numerous biological processes that affect cell growth, transformation, differentiation, and development. In this study, we provide evidence of the involvement of homeodomain-interacting protein kinase 2 (HIPK2) in the regulation of CBP activity. We show that HIPK2 interacts with and phosphorylates several regions of CBP. We demonstrate that serines 2361, 2363, 2371, 2376, and 2381 are responsible for the HIPK2-induced mobility shift of CBP C-terminal activation domain. Moreover, we show that HIPK2 strongly potentiates the transcriptional activity of CBP. However, our data suggest that HIPK2 activates CBP mainly by counteracting the repressive action of cell cycle regulatory domain 1 (CRD1), located between amino acids 977 and 1076, independently of CBP phosphorylation. Our findings thus highlight a complex regulation of CBP activity by HIPK2, which might be relevant for the control of specific sets of target genes involved in cellular proliferation, differentiation and apoptosis.},
author = {Kovács, Krisztián and Steinmann, Myriam and Halfon, Olivier and Magistretti, Pierre and Cardinaux, Jean},
journal = {Cellular Signalling},
number = {11},
pages = {2252 -- 2260},
publisher = {Elsevier},
title = {{Complex regulation of CREB-binding protein by homeodomain-interacting protein kinase 2}},
doi = {10.1016/j.cellsig.2015.08.001},
volume = {27},
year = {2015},
}
@article{1664,
abstract = {Over a century of research into the origin of turbulence in wall-bounded shear flows has resulted in a puzzling picture in which turbulence appears in a variety of different states competing with laminar background flow. At moderate flow speeds, turbulence is confined to localized patches; it is only at higher speeds that the entire flow becomes turbulent. The origin of the different states encountered during this transition, the front dynamics of the turbulent regions and the transformation to full turbulence have yet to be explained. By combining experiments, theory and computer simulations, here we uncover a bifurcation scenario that explains the transformation to fully turbulent pipe flow and describe the front dynamics of the different states encountered in the process. Key to resolving this problem is the interpretation of the flow as a bistable system with nonlinear propagation (advection) of turbulent fronts. These findings bridge the gap between our understanding of the onset of turbulence and fully turbulent flows.},
author = {Barkley, Dwight and Song, Baofang and Vasudevan, Mukund and Lemoult, Grégoire M and Avila, Marc and Hof, Björn},
journal = {Nature},
number = {7574},
pages = {550 -- 553},
publisher = {Nature Publishing Group},
title = {{The rise of fully turbulent flow}},
doi = {10.1038/nature15701},
volume = {526},
year = {2015},
}
@article{1665,
abstract = {Which genetic alterations drive tumorigenesis and how they evolve over the course of disease and therapy are central questions in cancer biology. Here we identify 44 recurrently mutated genes and 11 recurrent somatic copy number variations through whole-exome sequencing of 538 chronic lymphocytic leukaemia (CLL) and matched germline DNA samples, 278 of which were collected in a prospective clinical trial. These include previously unrecognized putative cancer drivers (RPS15, IKZF3), and collectively identify RNA processing and export, MYC activity, and MAPK signalling as central pathways involved in CLL. Clonality analysis of this large data set further enabled reconstruction of temporal relationships between driver events. Direct comparison between matched pre-treatment and relapse samples from 59 patients demonstrated highly frequent clonal evolution. Thus, large sequencing data sets of clinically informative samples enable the discovery of novel genes associated with cancer, the network of relationships between the driver events, and their impact on disease relapse and clinical outcome.},
author = {Landau, Dan and Tausch, Eugen and Taylor Weiner, Amaro and Stewart, Chip and Reiter, Johannes and Bahlo, Jasmin and Kluth, Sandra and Božić, Ivana and Lawrence, Michael and Böttcher, Sebastian and Carter, Scott and Cibulskis, Kristian and Mertens, Daniel and Sougnez, Carrie and Rosenberg, Mara and Hess, Julian and Edelmann, Jennifer and Kless, Sabrina and Kneba, Michael and Ritgen, Matthias and Fink, Anna and Fischer, Kirsten and Gabriel, Stacey and Lander, Eric and Nowak, Martin and Döhner, Hartmut and Hallek, Michael and Neuberg, Donna and Getz, Gad and Stilgenbauer, Stephan and Wu, Catherine},
journal = {Nature},
number = {7574},
pages = {525 -- 530},
publisher = {Nature Publishing Group},
title = {{Mutations driving CLL and their evolution in progression and relapse}},
doi = {10.1038/nature15395},
volume = {526},
year = {2015},
}
@article{1676,
author = {Sixt, Michael K and Raz, Erez},
journal = {Current Opinion in Cell Biology},
number = {10},
pages = {4 -- 6},
publisher = {Elsevier},
title = {{Editorial overview: Cell adhesion and migration}},
doi = {10.1016/j.ceb.2015.09.004},
volume = {36},
year = {2015},
}
@article{1677,
abstract = {We consider real symmetric and complex Hermitian random matrices with the additional symmetry hxy = hN-y,N-x. The matrix elements are independent (up to the fourfold symmetry) and not necessarily identically distributed. This ensemble naturally arises as the Fourier transform of a Gaussian orthogonal ensemble. Italso occurs as the flip matrix model - an approximation of the two-dimensional Anderson model at small disorder. We show that the density of states converges to the Wigner semicircle law despite the new symmetry type. We also prove the local version of the semicircle law on the optimal scale.},
author = {Alt, Johannes},
journal = {Journal of Mathematical Physics},
number = {10},
publisher = {American Institute of Physics},
title = {{The local semicircle law for random matrices with a fourfold symmetry}},
doi = {10.1063/1.4932606},
volume = {56},
year = {2015},
}
@inproceedings{1729,
abstract = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of such sequences produced under a non-preemptive scheduler. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and rules for inserting synchronization. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient, and, since it does not require explicit specifications, is more practical than the conventional approach based on user-provided assertions.},
author = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten},
location = {San Francisco, CA, United States},
pages = {180 -- 197},
publisher = {Springer},
title = {{From non-preemptive to preemptive scheduling using synchronization synthesis}},
doi = {10.1007/978-3-319-21668-3_11},
volume = {9207},
year = {2015},
}
@article{1730,
abstract = {How much cutting is needed to simplify the topology of a surface? We provide bounds for several instances of this question, for the minimum length of topologically non-trivial closed curves, pants decompositions, and cut graphs with a given combinatorial map in triangulated combinatorial surfaces (or their dual cross-metric counterpart). Our work builds upon Riemannian systolic inequalities, which bound the minimum length of non-trivial closed curves in terms of the genus and the area of the surface. We first describe a systematic way to translate Riemannian systolic inequalities to a discrete setting, and vice-versa. This implies a conjecture by Przytycka and Przytycki (Graph structure theory. Contemporary Mathematics, vol. 147, 1993), a number of new systolic inequalities in the discrete setting, and the fact that a theorem of Hutchinson on the edge-width of triangulated surfaces and Gromov’s systolic inequality for surfaces are essentially equivalent. We also discuss how these proofs generalize to higher dimensions. Then we focus on topological decompositions of surfaces. Relying on ideas of Buser, we prove the existence of pants decompositions of length O(g^(3/2)n^(1/2)) for any triangulated combinatorial surface of genus g with n triangles, and describe an O(gn)-time algorithm to compute such a decomposition. Finally, we consider the problem of embedding a cut graph (or more generally a cellular graph) with a given combinatorial map on a given surface. Using random triangulations, we prove (essentially) that, for any choice of a combinatorial map, there are some surfaces on which any cellular embedding with that combinatorial map has length superlinear in the number of triangles of the triangulated combinatorial surface. There is also a similar result for graphs embedded on polyhedral triangulations.},
author = {Colin De Verdière, Éric and Hubard, Alfredo and De Mesmay, Arnaud N},
journal = {Discrete & Computational Geometry},
number = {3},
pages = {587 -- 620},
publisher = {Springer},
title = {{Discrete systolic inequalities and decompositions of triangulated surfaces}},
doi = {10.1007/s00454-015-9679-9},
volume = {53},
year = {2015},
}
@inproceedings{1732,
abstract = {We consider partially observable Markov decision processes (POMDPs), that are a standard framework for robotics applications to model uncertainties present in the real world, with temporal logic specifications. All temporal logic specifications in linear-time temporal logic (LTL) can be expressed as parity objectives. We study the qualitative analysis problem for POMDPs with parity objectives that asks whether there is a controller (policy) to ensure that the objective holds with probability 1 (almost-surely). While the qualitative analysis of POMDPs with parity objectives is undecidable, recent results show that when restricted to finite-memory policies the problem is EXPTIME-complete. While the problem is intractable in theory, we present a practical approach to solve the qualitative analysis problem. We designed several heuristics to deal with the exponential complexity, and have used our implementation on a number of well-known POMDP examples for robotics applications. Our results provide the first practical approach to solve the qualitative analysis of robot motion planning with LTL properties in the presence of uncertainty.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Gupta, Raghav and Kanodia, Ayush},
location = {Seattle, WA, United States},
pages = {325 -- 330},
publisher = {IEEE},
title = {{Qualitative analysis of POMDPs with temporal logic specifications for robotics applications}},
doi = {10.1109/ICRA.2015.7139019},
year = {2015},
}
@article{1807,
abstract = {We study a double Cahn-Hilliard type functional related to the Gross-Pitaevskii energy of two-components Bose-Einstein condensates. In the case of large but same order intercomponent and intracomponent coupling strengths, we prove Γ-convergence to a perimeter minimisation functional with an inhomogeneous surface tension. We study the asymptotic behavior of the surface tension as the ratio between the intercomponent and intracomponent coupling strengths becomes very small or very large and obtain good agreement with the physical literature. We obtain as a consequence, symmetry breaking of the minimisers for the harmonic potential.},
author = {Goldman, Michael and Royo-Letelier, Jimena},
journal = {ESAIM - Control, Optimisation and Calculus of Variations},
number = {3},
pages = {603 -- 624},
publisher = {EDP Sciences},
title = {{Sharp interface limit for two components Bose-Einstein condensates}},
doi = {10.1051/cocv/2014040},
volume = {21},
year = {2015},
}
@article{1809,
abstract = {Background: Indirect genetic effects (IGEs) occur when genes expressed in one individual alter the expression of traits in social partners. Previous studies focused on the evolutionary consequences and evolutionary dynamics of IGEs, using equilibrium solutions to predict phenotypes in subsequent generations. However, whether or not such steady states may be reached may depend on the dynamics of interactions themselves. Results: In our study, we focus on the dynamics of social interactions and indirect genetic effects and investigate how they modify phenotypes over time. Unlike previous IGE studies, we do not analyse evolutionary dynamics; rather we consider within-individual phenotypic changes, also referred to as phenotypic plasticity. We analyse iterative interactions, when individuals interact in a series of discontinuous events, and investigate the stability of steady state solutions and the dependence on model parameters, such as population size, strength, and the nature of interactions. We show that for interactions where a feedback loop occurs, the possible parameter space of interaction strength is fairly limited, affecting the evolutionary consequences of IGEs. We discuss the implications of our results for current IGE model predictions and their limitations.},
author = {Trubenova, Barbora and Novak, Sebastian and Hager, Reinmar},
journal = {PLoS One},
number = {5},
publisher = {Public Library of Science},
title = {{Indirect genetic effects and the dynamics of social interactions}},
doi = {10.1371/journal.pone.0126907},
volume = {10},
year = {2015},
}
@article{1810,
abstract = {Combining antibiotics is a promising strategy for increasing treatment efficacy and for controlling resistance evolution. When drugs are combined, their effects on cells may be amplified or weakened, that is the drugs may show synergistic or antagonistic interactions. Recent work revealed the underlying mechanisms of such drug interactions by elucidating the drugs'; joint effects on cell physiology. Moreover, new treatment strategies that use drug combinations to exploit evolutionary tradeoffs were shown to affect the rate of resistance evolution in predictable ways. High throughput studies have further identified drug candidates based on their interactions with established antibiotics and general principles that enable the prediction of drug interactions were suggested. Overall, the conceptual and technical foundation for the rational design of potent drug combinations is rapidly developing.},
author = {Bollenbach, Mark Tobias},
journal = {Current Opinion in Microbiology},
pages = {1 -- 9},
publisher = {Elsevier},
title = {{Antimicrobial interactions: Mechanisms and implications for drug discovery and resistance evolution}},
doi = {10.1016/j.mib.2015.05.008},
volume = {27},
year = {2015},
}
@article{1811,
abstract = {Atomic form factors are widely used for the characterization of targets and specimens, from crystallography to biology. By using recent mathematical results, here we derive an analytical expression for the atomic form factor within the independent particle model constructed from nonrelativistic screened hydrogenic wave functions. The range of validity of this analytical expression is checked by comparing the analytically obtained form factors with the ones obtained within the Hartee-Fock method. As an example, we apply our analytical expression for the atomic form factor to evaluate the differential cross section for Rayleigh scattering off neutral atoms.},
author = {Safari, Laleh and Santos, José and Amaro, Pedro and Jänkälä, Kari and Fratini, Filippo},
journal = {Journal of Mathematical Physics},
number = {5},
publisher = {American Institute of Physics},
title = {{Analytical evaluation of atomic form factors: Application to Rayleigh scattering}},
doi = {10.1063/1.4921227},
volume = {56},
year = {2015},
}
@article{1812,
abstract = {We investigate the occurrence of rotons in a quadrupolar Bose–Einstein condensate confined to two dimensions. Depending on the particle density, the ratio of the contact and quadrupole–quadrupole interactions, and the alignment of the quadrupole moments with respect to the confinement plane, the dispersion relation features two or four point-like roton minima or one ring-shaped minimum. We map out the entire parameter space of the roton behavior and identify the instability regions. We propose to observe the exotic rotons by monitoring the characteristic density wave dynamics resulting from a short local perturbation, and discuss the possibilities to detect the predicted effects in state-of-the-art experiments with ultracold homonuclear molecules.
},
author = {Lahrz, Martin and Lemeshko, Mikhail and Mathey, Ludwig},
journal = {New Journal of Physics},
number = {4},
publisher = {IOP Publishing Ltd.},
title = {{Exotic roton excitations in quadrupolar Bose–Einstein condensates }},
doi = {10.1088/1367-2630/17/4/045005},
volume = {17},
year = {2015},
}
@article{1813,
abstract = {We develop a microscopic theory describing a quantum impurity whose rotational degree of freedom is coupled to a many-particle bath. We approach the problem by introducing the concept of an “angulon”—a quantum rotor dressed by a quantum field—and reveal its quasiparticle properties using a combination of variational and diagrammatic techniques. Our theory predicts renormalization of the impurity rotational structure, such as that observed in experiments with molecules in superfluid helium droplets, in terms of a rotational Lamb shift induced by the many-particle environment. Furthermore, we discover a rich many-body-induced fine structure, emerging in rotational spectra due to a redistribution of angular momentum within the quantum many-body system.},
author = {Schmidt, Richard and Lemeshko, Mikhail},
journal = {Physical Review Letters},
number = {20},
publisher = {American Physical Society},
title = {{Rotation of quantum impurities in the presence of a many-body environment}},
doi = {10.1103/PhysRevLett.114.203001},
volume = {114},
year = {2015},
}
@article{1814,
abstract = {We present an efficient wavefront tracking algorithm for animating bodies of water that interact with their environment. Our contributions include: a novel wavefront tracking technique that enables dispersion, refraction, reflection, and diffraction in the same simulation; a unique multivalued function interpolation method that enables our simulations to elegantly sidestep the Nyquist limit; a dispersion approximation for efficiently amplifying the number of simulated waves by several orders of magnitude; and additional extensions that allow for time-dependent effects and interactive artistic editing of the resulting animation. Our contributions combine to give us multitudes more wave details than similar algorithms, while maintaining high frame rates and allowing close camera zooms.},
author = {Jeschke, Stefan and Wojtan, Christopher J},
journal = {ACM Transactions on Graphics},
number = {3},
publisher = {ACM},
title = {{Water wave animation via wavefront parameter interpolation}},
doi = {10.1145/2714572},
volume = {34},
year = {2015},
}
@article{1817,
abstract = {Vertebrates have a unique 3D body shape in which correct tissue and organ shape and alignment are essential for function. For example, vision requires the lens to be centred in the eye cup which must in turn be correctly positioned in the head. Tissue morphogenesis depends on force generation, force transmission through the tissue, and response of tissues and extracellular matrix to force. Although a century ago D'Arcy Thompson postulated that terrestrial animal body shapes are conditioned by gravity, there has been no animal model directly demonstrating how the aforementioned mechano-morphogenetic processes are coordinated to generate a body shape that withstands gravity. Here we report a unique medaka fish (Oryzias latipes) mutant, hirame (hir), which is sensitive to deformation by gravity. hir embryos display a markedly flattened body caused by mutation of YAP, a nuclear executor of Hippo signalling that regulates organ size. We show that actomyosin-mediated tissue tension is reduced in hir embryos, leading to tissue flattening and tissue misalignment, both of which contribute to body flattening. By analysing YAP function in 3D spheroids of human cells, we identify the Rho GTPase activating protein ARHGAP18 as an effector of YAP in controlling tissue tension. Together, these findings reveal a previously unrecognised function of YAP in regulating tissue shape and alignment required for proper 3D body shape. Understanding this morphogenetic function of YAP could facilitate the use of embryonic stem cells to generate complex organs requiring correct alignment of multiple tissues. },
author = {Porazinski, Sean and Wang, Huijia and Asaoka, Yoichi and Behrndt, Martin and Miyamoto, Tatsuo and Morita, Hitoshi and Hata, Shoji and Sasaki, Takashi and Krens, Gabriel and Osada, Yumi and Asaka, Satoshi and Momoi, Akihiro and Linton, Sarah and Miesfeld, Joel and Link, Brian and Senga, Takeshi and Castillo Morales, Atahualpa and Urrutia, Araxi and Shimizu, Nobuyoshi and Nagase, Hideaki and Matsuura, Shinya and Bagby, Stefan and Kondoh, Hisato and Nishina, Hiroshi and Heisenberg, Carl-Philipp J and Furutani Seiki, Makoto},
journal = {Nature},
number = {7551},
pages = {217 -- 221},
publisher = {Nature Publishing Group},
title = {{YAP is essential for tissue tension to ensure vertebrate 3D body shape}},
doi = {10.1038/nature14215},
volume = {521},
year = {2015},
}
@article{1818,
abstract = {Why do species not adapt to ever-wider ranges of conditions, gradually expanding their ecological niche and geographic range? Gene flow across environments has two conflicting effects: although it increases genetic variation, which is a prerequisite for adaptation, gene flow may swamp adaptation to local conditions. In 1956, Haldane proposed that, when the environment varies across space, "swamping" by gene flow creates a positive feedback between low population size and maladaptation, leading to a sharp range margin. However, current deterministic theory shows that, when variance can evolve, there is no such limit. Using simple analytical tools and simulations, we show that genetic drift can generate a sharp margin to a species' range, by reducing genetic variance below the level needed for adaptation to spatially variable conditions. Aided by separation of ecological and evolutionary timescales, the identified effective dimensionless parameters reveal a simple threshold that predicts when adaptation at the range margin fails. Two observable parameters determine the threshold: (i) the effective environmental gradient, which can be measured by the loss of fitness due to dispersal to a different environment; and (ii) the efficacy of selection relative to genetic drift. The theory predicts sharp range margins even in the absence of abrupt changes in the environment. Furthermore, it implies that gradual worsening of conditions across a species' habitat may lead to a sudden range fragmentation, when adaptation to a wide span of conditions within a single species becomes impossible.},
author = {Polechova, Jitka and Barton, Nicholas H},
journal = {PNAS},
number = {20},
pages = {6401 -- 6406},
publisher = {National Academy of Sciences},
title = {{Limits to adaptation along environmental gradients}},
doi = {10.1073/pnas.1421515112},
volume = {112},
year = {2015},
}
@article{1819,
abstract = {The sessile life style of plants creates the need to deal with an often adverse environment, in which water availability can change on a daily basis, challenging the cellular physiology and integrity. Changes in osmotic conditions disrupt the equilibrium of the plasma membrane: hypoosmotic conditions increase and hyperosmotic environment decrease the cell volume. Here, we show that short-term extracellular osmotic treatments are closely followed by a shift in the balance between endocytosis and exocytosis in root meristem cells. Acute hyperosmotic treatments (ionic and nonionic) enhance clathrin-mediated endocytosis simultaneously attenuating exocytosis, whereas hypoosmotic treatments have the opposite effects. In addition to clathrin recruitment to the plasma membrane, components of early endocytic trafficking are essential during hyperosmotic stress responses. Consequently, growth of seedlings defective in elements of clathrin or early endocytic machinery is more sensitive to hyperosmotic treatments. We also found that the endocytotic response to a change of osmotic status in the environment is dominant over the presumably evolutionary more recent regulatory effect of plant hormones, such as auxin. These results imply that osmotic perturbation influences the balance between endocytosis and exocytosis acting through clathrin-mediated endocytosis. We propose that tension on the plasma membrane determines the addition or removal of membranes at the cell surface, thus preserving cell integrity.},
author = {Zwiewka, Marta and Nodzyński, Tomasz and Robert, Stéphanie and Vanneste, Steffen and Friml, Jiřĺ},
journal = {Molecular Plant},
number = {8},
pages = {1175 -- 1187},
publisher = {Elsevier},
title = {{Osmotic stress modulates the balance between exocytosis and clathrin mediated endocytosis in Arabidopsis thaliana}},
doi = {10.1016/j.molp.2015.03.007},
volume = {8},
year = {2015},
}
@inproceedings{1820,
abstract = {We consider partially observable Markov decision processes (POMDPs) with a set of target states and every transition is associated with an integer cost. The optimization objec- tive we study asks to minimize the expected total cost till the target set is reached, while ensuring that the target set is reached almost-surely (with probability 1). We show that for integer costs approximating the optimal cost is undecidable. For positive costs, our results are as follows: (i) we establish matching lower and upper bounds for the optimal cost and the bound is double exponential; (ii) we show that the problem of approximating the optimal cost is decidable and present ap- proximation algorithms developing on the existing algorithms for POMDPs with finite-horizon objectives. While the worst- case running time of our algorithm is double exponential, we present efficient stopping criteria for the algorithm and show experimentally that it performs well in many examples.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Gupta, Raghav and Kanodia, Ayush},
booktitle = {Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence },
location = {Austin, TX, USA},
pages = {3496--3502},
publisher = {AAAI Press},
title = {{Optimal cost almost-sure reachability in POMDPs}},
volume = {5},
year = {2015},
}
@article{1823,
abstract = {Abstract Drug combinations are increasingly important in disease treatments, for combating drug resistance, and for elucidating fundamental relationships in cell physiology. When drugs are combined, their individual effects on cells may be amplified or weakened. Such drug interactions are crucial for treatment efficacy, but their underlying mechanisms remain largely unknown. To uncover the causes of drug interactions, we developed a systematic approach based on precise quantification of the individual and joint effects of antibiotics on growth of genome-wide Escherichia coli gene deletion strains. We found that drug interactions between antibiotics representing the main modes of action are highly robust to genetic perturbation. This robustness is encapsulated in a general principle of bacterial growth, which enables the quantitative prediction of mutant growth rates under drug combinations. Rare violations of this principle exposed recurring cellular functions controlling drug interactions. In particular, we found that polysaccharide and ATP synthesis control multiple drug interactions with previously unexplained mechanisms, and small molecule adjuvants targeting these functions synthetically reshape drug interactions in predictable ways. These results provide a new conceptual framework for the design of multidrug combinations and suggest that there are universal mechanisms at the heart of most drug interactions. Synopsis A general principle of bacterial growth enables the prediction of mutant growth rates under drug combinations. Rare violations of this principle expose cellular functions that control drug interactions and can be targeted by small molecules to alter drug interactions in predictable ways. Drug interactions between antibiotics are highly robust to genetic perturbations. A general principle of bacterial growth enables the prediction of mutant growth rates under drug combinations. Rare violations of this principle expose cellular functions that control drug interactions. Diverse drug interactions are controlled by recurring cellular functions, including LPS synthesis and ATP synthesis. A general principle of bacterial growth enables the prediction of mutant growth rates under drug combinations. Rare violations of this principle expose cellular functions that control drug interactions and can be targeted by small molecules to alter drug interactions in predictable ways.},
author = {Chevereau, Guillaume and Bollenbach, Mark Tobias},
journal = {Molecular Systems Biology},
number = {4},
publisher = {Nature Publishing Group},
title = {{Systematic discovery of drug interaction mechanisms}},
doi = {10.15252/msb.20156098},
volume = {11},
year = {2015},
}
@article{1824,
abstract = {Condensation phenomena arise through a collective behaviour of particles. They are observed in both classical and quantum systems, ranging from the formation of traffic jams in mass transport models to the macroscopic occupation of the energetic ground state in ultra-cold bosonic gases (Bose-Einstein condensation). Recently, it has been shown that a driven and dissipative system of bosons may form multiple condensates. Which states become the condensates has, however, remained elusive thus far. The dynamics of this condensation are described by coupled birth-death processes, which also occur in evolutionary game theory. Here we apply concepts from evolutionary game theory to explain the formation of multiple condensates in such driven-dissipative bosonic systems. We show that the vanishing of relative entropy production determines their selection. The condensation proceeds exponentially fast, but the system never comes to rest. Instead, the occupation numbers of condensates may oscillate, as we demonstrate for a rock-paper-scissors game of condensates.},
author = {Knebel, Johannes and Weber, Markus and Krüger, Torben H and Frey, Erwin},
journal = {Nature Communications},
publisher = {Nature Publishing Group},
title = {{Evolutionary games of condensates in coupled birth-death processes}},
doi = {10.1038/ncomms7977},
volume = {6},
year = {2015},
}
@article{1827,
abstract = {Bow-tie or hourglass structure is a common architectural feature found in many biological systems. A bow-tie in a multi-layered structure occurs when intermediate layers have much fewer components than the input and output layers. Examples include metabolism where a handful of building blocks mediate between multiple input nutrients and multiple output biomass components, and signaling networks where information from numerous receptor types passes through a small set of signaling pathways to regulate multiple output genes. Little is known, however, about how bow-tie architectures evolve. Here, we address the evolution of bow-tie architectures using simulations of multi-layered systems evolving to fulfill a given input-output goal. We find that bow-ties spontaneously evolve when the information in the evolutionary goal can be compressed. Mathematically speaking, bow-ties evolve when the rank of the input-output matrix describing the evolutionary goal is deficient. The maximal compression possible (the rank of the goal) determines the size of the narrowest part of the network—that is the bow-tie. A further requirement is that a process is active to reduce the number of links in the network, such as product-rule mutations, otherwise a non-bow-tie solution is found in the evolutionary simulations. This offers a mechanism to understand a common architectural principle of biological systems, and a way to quantitate the effective rank of the goals under which they evolved.},
author = {Friedlander, Tamar and Mayo, Avraham and Tlusty, Tsvi and Alon, Uri},
journal = {PLoS Computational Biology},
number = {3},
publisher = {Public Library of Science},
title = {{Evolution of bow-tie architectures in biology}},
doi = {10.1371/journal.pcbi.1004055},
volume = {11},
year = {2015},
}
@article{1828,
abstract = {We construct a non-linear Markov process connected with a biological model of a bacterial genome recombination. The description of invariant measures of this process gives us the solution of one problem in elementary probability theory.},
author = {Akopyan, Arseniy and Pirogov, Sergey and Rybko, Aleksandr},
journal = {Journal of Statistical Physics},
number = {1},
pages = {163 -- 167},
publisher = {Springer},
title = {{Invariant measures of genetic recombination process}},
doi = {10.1007/s10955-015-1238-5},
volume = {160},
year = {2015},
}
@article{1830,
abstract = {To prevent epidemics, insect societies have evolved collective disease defences that are highly effective at curing exposed individuals and limiting disease transmission to healthy group members. Grooming is an important sanitary behaviour—either performed towards oneself (self-grooming) or towards others (allogrooming)—to remove infectious agents from the body surface of exposed individuals, but at the risk of disease contraction by the groomer. We use garden ants (Lasius neglectus) and the fungal pathogen Metarhizium as a model system to study how pathogen presence affects self-grooming and allogrooming between exposed and healthy individuals. We develop an epidemiological SIS model to explore how experimentally observed grooming patterns affect disease spread within the colony, thereby providing a direct link between the expression and direction of sanitary behaviours, and their effects on colony-level epidemiology. We find that fungus-exposed ants increase self-grooming, while simultaneously decreasing allogrooming. This behavioural modulation seems universally adaptive and is predicted to contain disease spread in a great variety of host–pathogen systems. In contrast, allogrooming directed towards pathogen-exposed individuals might both increase and decrease disease risk. Our model reveals that the effect of allogrooming depends on the balance between pathogen infectiousness and efficiency of social host defences, which are likely to vary across host–pathogen systems.},
author = {Theis, Fabian and Ugelvig, Line V and Marr, Carsten and Cremer, Sylvia},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1669},
publisher = {Royal Society, The},
title = {{Opposing effects of allogrooming on disease transmission in ant societies}},
doi = {10.1098/rstb.2014.0108},
volume = {370},
year = {2015},
}
@article{1831,
abstract = {This paper introduces a theme issue presenting the latest developments in research on the impacts of sociality on health and fitness. The articles that follow cover research on societies ranging from insects to humans. Variation in measures of fitness (i.e. survival and reproduction) has been linked to various aspects of sociality in humans and animals alike, and variability in individual health and condition has been recognized as a key mediator of these relationships. Viewed from a broad evolutionary perspective, the evolutionary transitions from a solitary lifestyle to group living have resulted in several new health-related costs and benefits of sociality. Social transmission of parasites within groups represents a major cost of group living, but some behavioural mechanisms, such as grooming, have evolved repeatedly to reduce this cost. Group living also has created novel costs in terms of altered susceptibility to infectious and non-infectious disease as a result of the unavoidable physiological consequences of social competition and integration, which are partly alleviated by social buffering in some vertebrates. Here, we define the relevant aspects of sociality, summarize their health-related costs and benefits, and discuss possible fitness measures in different study systems. Given the pervasive effects of social factors on health and fitness, we propose a synthesis of existing conceptual approaches in disease ecology, ecological immunology and behavioural neurosciences by adding sociality as a key factor, with the goal to generate a broader framework for organismal integration of health-related research.},
author = {Kappeler, Peter and Cremer, Sylvia and Nunn, Charles},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1669},
publisher = {Royal Society},
title = {{Sociality and health: Impacts of sociality on disease susceptibility and transmission in animal and human societies}},
doi = {10.1098/rstb.2014.0116},
volume = {370},
year = {2015},
}
@article{1734,
abstract = {Facial appearance capture is now firmly established within academic research and used extensively across various application domains, perhaps most prominently in the entertainment industry through the design of virtual characters in video games and films. While significant progress has occurred over the last two decades, no single survey currently exists that discusses the similarities, differences, and practical considerations of the available appearance capture techniques as applied to human faces. A central difficulty of facial appearance capture is the way light interacts with skin-which has a complex multi-layered structure-and the interactions that occur below the skin surface can, by definition, only be observed indirectly. In this report, we distinguish between two broad strategies for dealing with this complexity. "Image-based methods" try to exhaustively capture the exact face appearance under different lighting and viewing conditions, and then render the face through weighted image combinations. "Parametric methods" instead fit the captured reflectance data to some parametric appearance model used during rendering, allowing for a more lightweight and flexible representation but at the cost of potentially increased rendering complexity or inexact reproduction. The goal of this report is to provide an overview that can guide practitioners and researchers in assessing the tradeoffs between current approaches and identifying directions for future advances in facial appearance capture.},
author = {Klehm, Oliver and Rousselle, Fabrice and Papas, Marios and Bradley, Derek and Hery, Christophe and Bickel, Bernd and Jarosz, Wojciech and Beeler, Thabo},
journal = {Computer Graphics Forum},
number = {2},
pages = {709 -- 733},
publisher = {Wiley-Blackwell},
title = {{Recent advances in facial appearance capture}},
doi = {10.1111/cgf.12594},
volume = {34},
year = {2015},
}
@article{1735,
abstract = {This work presents a method for efficiently simplifying the pressure projection step in a liquid simulation. We first devise a straightforward dimension reduction technique that dramatically reduces the cost of solving the pressure projection. Next, we introduce a novel change of basis that satisfies free-surface boundary conditions exactly, regardless of the accuracy of the pressure solve. When combined, these ideas greatly reduce the computational complexity of the pressure solve without compromising free surface boundary conditions at the highest level of detail. Our techniques are easy to parallelize, and they effectively eliminate the computational bottleneck for large liquid simulations.},
author = {Ando, Ryoichi and Thürey, Nils and Wojtan, Christopher J},
journal = {Computer Graphics Forum},
number = {2},
pages = {473 -- 480},
publisher = {Wiley},
title = {{A dimension-reduced pressure solver for liquid simulations}},
doi = {10.1111/cgf.12576},
volume = {34},
year = {2015},
}
@article{1804,
abstract = {It is known that in classical fluids turbulence typically occurs at high Reynolds numbers. But can turbulence occur at low Reynolds numbers? Here we investigate the transition to turbulence in the classic Taylor-Couette system in which the rotating fluids are manufactured ferrofluids with magnetized nanoparticles embedded in liquid carriers. We find that, in the presence of a magnetic field transverse to the symmetry axis of the system, turbulence can occur at Reynolds numbers that are at least one order of magnitude smaller than those in conventional fluids. This is established by extensive computational ferrohydrodynamics through a detailed investigation of transitions in the flow structure, and characterization of behaviors of physical quantities such as the energy, the wave number, and the angular momentum through the bifurcations. A finding is that, as the magnetic field is increased, onset of turbulence can be determined accurately and reliably. Our results imply that experimental investigation of turbulence may be feasible by using ferrofluids. Our study of transition to and evolution of turbulence in the Taylor-Couette ferrofluidic flow system provides insights into the challenging problem of turbulence control.},
author = {Altmeyer, Sebastian and Do, Younghae and Lai, Ying},
journal = {Scientific Reports},
publisher = {Nature Publishing Group},
title = {{Transition to turbulence in Taylor-Couette ferrofluidic flow}},
doi = {10.1038/srep10781},
volume = {5},
year = {2015},
}
@article{1792,
abstract = {Motivated by recent ideas of Harman (Unif. Distrib. Theory, 2010) we develop a new concept of variation of multivariate functions on a compact Hausdorff space with respect to a collection D of subsets. We prove a general version of the Koksma-Hlawka theorem that holds for this notion of variation and discrepancy with respect to D. As special cases, we obtain Koksma-Hlawka inequalities for classical notions, such as extreme or isotropic discrepancy. For extreme discrepancy, our result coincides with the usual Koksma-Hlawka theorem. We show that the space of functions of bounded D-variation contains important discontinuous functions and is closed under natural algebraic operations. Finally, we illustrate the results on concrete integration problems from integral geometry and stereology.},
author = {Pausinger, Florian and Svane, Anne},
journal = {Journal of Complexity},
number = {6},
pages = {773 -- 797},
publisher = {Academic Press},
title = {{A Koksma-Hlawka inequality for general discrepancy systems}},
doi = {10.1016/j.jco.2015.06.002},
volume = {31},
year = {2015},
}
@article{1793,
abstract = {We present a software platform for reconstructing and analyzing the growth of a plant root system from a time-series of 3D voxelized shapes. It aligns the shapes with each other, constructs a geometric graph representation together with the function that records the time of growth, and organizes the branches into a hierarchy that reflects the order of creation. The software includes the automatic computation of structural and dynamic traits for each root in the system enabling the quantification of growth on fine-scale. These are important advances in plant phenotyping with applications to the study of genetic and environmental influences on growth.},
author = {Symonova, Olga and Topp, Christopher and Edelsbrunner, Herbert},
journal = {PLoS One},
number = {6},
publisher = {Public Library of Science},
title = {{DynamicRoots: A software platform for the reconstruction and analysis of growing plant roots}},
doi = {10.1371/journal.pone.0127657},
volume = {10},
year = {2015},
}
@inproceedings{1857,
abstract = {Sharing information between multiple tasks enables algorithms to achieve good generalization performance even from small amounts of training data. However, in a realistic scenario of multi-task learning not all tasks are equally related to each other, hence it could be advantageous to transfer information only between the most related tasks. In this work we propose an approach that processes multiple tasks in a sequence with sharing between subsequent tasks instead of solving all tasks jointly. Subsequently, we address the question of curriculum learning of tasks, i.e. finding the best order of tasks to be learned. Our approach is based on a generalization bound criterion for choosing the task order that optimizes the average expected classification performance over all tasks. Our experimental results show that learning multiple related tasks sequentially can be more effective than learning them jointly, the order in which tasks are being solved affects the overall performance, and that our model is able to automatically discover the favourable order of tasks. },
author = {Pentina, Anastasia and Sharmanska, Viktoriia and Lampert, Christoph},
location = {Boston, MA, United States},
pages = {5492 -- 5500},
publisher = {IEEE},
title = {{Curriculum learning of multiple tasks}},
doi = {10.1109/CVPR.2015.7299188},
year = {2015},
}
@inproceedings{1858,
abstract = {We study the problem of predicting the future, though only in the probabilistic sense of estimating a future state of a time-varying probability distribution. This is not only an interesting academic problem, but solving this extrapolation problem also has many practical application, e.g. for training classifiers that have to operate under time-varying conditions. Our main contribution is a method for predicting the next step of the time-varying distribution from a given sequence of sample sets from earlier time steps. For this we rely on two recent machine learning techniques: embedding probability distributions into a reproducing kernel Hilbert space, and learning operators by vector-valued regression. We illustrate the working principles and the practical usefulness of our method by experiments on synthetic and real data. We also highlight an exemplary application: training a classifier in a domain adaptation setting without having access to examples from the test time distribution at training time.},
author = {Lampert, Christoph},
location = {Boston, MA, United States},
pages = {942 -- 950},
publisher = {IEEE},
title = {{Predicting the future behavior of a time-varying probability distribution}},
doi = {10.1109/CVPR.2015.7298696},
year = {2015},
}
@inproceedings{1859,
abstract = {Structural support vector machines (SSVMs) are amongst the best performing models for structured computer vision tasks, such as semantic image segmentation or human pose estimation. Training SSVMs, however, is computationally costly, because it requires repeated calls to a structured prediction subroutine (called \emph{max-oracle}), which has to solve an optimization problem itself, e.g. a graph cut.
In this work, we introduce a new algorithm for SSVM training that is more efficient than earlier techniques when the max-oracle is computationally expensive, as it is frequently the case in computer vision tasks. The main idea is to (i) combine the recent stochastic Block-Coordinate Frank-Wolfe algorithm with efficient hyperplane caching, and (ii) use an automatic selection rule for deciding whether to call the exact max-oracle or to rely on an approximate one based on the cached hyperplanes.
We show experimentally that this strategy leads to faster convergence to the optimum with respect to the number of requires oracle calls, and that this translates into faster convergence with respect to the total runtime when the max-oracle is slow compared to the other steps of the algorithm. },
author = {Shah, Neel and Kolmogorov, Vladimir and Lampert, Christoph},
location = {Boston, MA, USA},
pages = {2737 -- 2745},
publisher = {IEEE},
title = {{A multi-plane block-coordinate Frank-Wolfe algorithm for training structural SVMs with a costly max-oracle}},
doi = {10.1109/CVPR.2015.7298890},
year = {2015},
}
@inproceedings{1860,
abstract = {Classifiers for object categorization are usually evaluated by their accuracy on a set of i.i.d. test examples. This provides us with an estimate of the expected error when applying the classifiers to a single new image. In real application, however, classifiers are rarely only used for a single image and then discarded. Instead, they are applied sequentially to many images, and these are typically not i.i.d. samples from a fixed data distribution, but they carry dependencies and their class distribution varies over time. In this work, we argue that the phenomenon of correlated data at prediction time is not a nuisance, but a blessing in disguise. We describe a probabilistic method for adapting classifiers at prediction time without having to retrain them. We also introduce a framework for creating realistically distributed image sequences, which offers a way to benchmark classifier adaptation methods, such as the one we propose. Experiments on the ILSVRC2010 and ILSVRC2012 datasets show that adapting object classification systems at prediction time can significantly reduce their error rate, even with no additional human feedback.},
author = {Royer, Amélie and Lampert, Christoph},
location = {Boston, MA, United States},
pages = {1401 -- 1409},
publisher = {IEEE},
title = {{Classifier adaptation at prediction time}},
doi = {10.1109/CVPR.2015.7298746},
year = {2015},
}
@article{1861,
abstract = {Continuous-time Markov chains are commonly used in practice for modeling biochemical reaction networks in which the inherent randomness of themolecular interactions cannot be ignored. This has motivated recent research effort into methods for parameter inference and experiment design for such models. The major difficulty is that such methods usually require one to iteratively solve the chemical master equation that governs the time evolution of the probability distribution of the system. This, however, is rarely possible, and even approximation techniques remain limited to relatively small and simple systems. An alternative explored in this article is to base methods on only some low-order moments of the entire probability distribution. We summarize the theory behind such moment-based methods for parameter inference and experiment design and provide new case studies where we investigate their performance.},
author = {Ruess, Jakob and Lygeros, John},
journal = {ACM Transactions on Modeling and Computer Simulation},
number = {2},
publisher = {ACM},
title = {{Moment-based methods for parameter inference and experiment design for stochastic biochemical reaction networks}},
doi = {10.1145/2688906},
volume = {25},
year = {2015},
}
@article{1864,
abstract = {The Altshuler–Shklovskii formulas (Altshuler and Shklovskii, BZh Eksp Teor Fiz 91:200, 1986) predict, for any disordered quantum system in the diffusive regime, a universal power law behaviour for the correlation functions of the mesoscopic eigenvalue density. In this paper and its companion (Erdős and Knowles, The Altshuler–Shklovskii formulas for random band matrices I: the unimodular case, 2013), we prove these formulas for random band matrices. In (Erdős and Knowles, The Altshuler–Shklovskii formulas for random band matrices I: the unimodular case, 2013) we introduced a diagrammatic approach and presented robust estimates on general diagrams under certain simplifying assumptions. In this paper, we remove these assumptions by giving a general estimate of the subleading diagrams. We also give a precise analysis of the leading diagrams which give rise to the Altschuler–Shklovskii power laws. Moreover, we introduce a family of general random band matrices which interpolates between real symmetric (β = 1) and complex Hermitian (β = 2) models, and track the transition for the mesoscopic density–density correlation. Finally, we address the higher-order correlation functions by proving that they behave asymptotically according to a Gaussian process whose covariance is given by the Altshuler–Shklovskii formulas.
},
author = {Erdös, László and Knowles, Antti},
journal = {Annales Henri Poincare},
number = {3},
pages = {709 -- 799},
publisher = {Springer},
title = {{The Altshuler–Shklovskii formulas for random band matrices II: The general case}},
doi = {10.1007/s00023-014-0333-5},
volume = {16},
year = {2015},
}
@article{1865,
abstract = {The plant hormone auxin and its directional transport are known to play a crucial role in defining the embryonic axis and subsequent development of the body plan. Although the role of PIN auxin efflux transporters has been clearly assigned during embryonic shoot and root specification, the role of the auxin influx carriers AUX1 and LIKE-AUX1 (LAX) proteins is not well established. Here, we used chemical and genetic tools on Brassica napus microspore-derived embryos and Arabidopsis thaliana zygotic embryos, and demonstrate that AUX1, LAX1 and LAX2 are required for both shoot and root pole formation, in concert with PIN efflux carriers. Furthermore, we uncovered a positive-feedback loop betweenMONOPTEROS(ARF5)-dependent auxin signalling and auxin transport. ThisMONOPTEROSdependent transcriptional regulation of auxin influx (AUX1, LAX1 and LAX2) and auxin efflux (PIN1 and PIN4) carriers by MONOPTEROS helps to maintain proper auxin transport to the root tip. These results indicate that auxin-dependent cell specification during embryo development requires balanced auxin transport involving both influx and efflux mechanisms, and that this transport is maintained by a positive transcriptional feedback on auxin signalling.},
author = {Robert, Hélène and Grunewald, Wim and Sauer, Michael and Cannoot, Bernard and Soriano, Mercedes and Swarup, Ranjan and Weijers, Dolf and Bennett, Malcolm and Boutilier, Kim and Friml, Jirí},
journal = {Development},
number = {4},
pages = {702 -- 711},
publisher = {Company of Biologists},
title = {{Plant embryogenesis requires AUX/LAX-mediated auxin influx}},
doi = {10.1242/dev.115832},
volume = {142},
year = {2015},
}
@article{1866,
author = {Henzinger, Thomas A and Raskin, Jean},
journal = {Communications of the ACM},
number = {2},
pages = {86--86},
publisher = {ACM},
title = {{The equivalence problem for finite automata: Technical perspective}},
doi = {10.1145/2701001},
volume = {58},
year = {2015},
}
@article{1867,
abstract = {Cultured mammalian cells essential are model systems in basic biology research, production platforms of proteins for medical use, and testbeds in synthetic biology. Flavin cofactors, in particular flavin mononucleotide (FMN) and flavin adenine dinucleotide (FAD), are critical for cellular redox reactions and sense light in naturally occurring photoreceptors and optogenetic tools. Here, we quantified flavin contents of commonly used mammalian cell lines. We first compared three procedures for extraction of free and noncovalently protein-bound flavins and verified extraction using fluorescence spectroscopy. For separation, two CE methods with different BGEs were established, and detection was performed by LED-induced fluorescence with limit of detections (LODs 0.5-3.8 nM). We found that riboflavin (RF), FMN, and FAD contents varied significantly between cell lines. RF (3.1-14 amol/cell) and FAD (2.2-17.0 amol/cell) were the predominant flavins, while FMN (0.46-3.4 amol/cell) was found at markedly lower levels. Observed flavin contents agree with those previously extracted from mammalian tissues, yet reduced forms of RF were detected that were not described previously. Quantification of flavins in mammalian cell lines will allow a better understanding of cellular redox reactions and optogenetic tools.},
author = {Hühner, Jens and Inglés Prieto, Álvaro and Neusüß, Christian and Lämmerhofer, Michael and Janovjak, Harald L},
journal = {Electrophoresis},
number = {4},
pages = {518 -- 525},
publisher = {Wiley},
title = {{Quantification of riboflavin, flavin mononucleotide, and flavin adenine dinucleotide in mammalian model cells by CE with LED-induced fluorescence detection}},
doi = {10.1002/elps.201400451},
volume = {36},
year = {2015},
}
@article{1868,
abstract = {We investigate high-dimensional nonlinear dynamical systems exhibiting multiple resonances under adiabatic parameter variations. Our motivations come from experimental considerations where time-dependent sweeping of parameters is a practical approach to probing and characterizing the bifurcations of the system. The question is whether bifurcations so detected are faithful representations of the bifurcations intrinsic to the original stationary system. Utilizing a harmonically forced, closed fluid flow system that possesses multiple resonances and solving the Navier-Stokes equation under proper boundary conditions, we uncover the phenomenon of the early effect. Specifically, as a control parameter, e.g., the driving frequency, is adiabatically increased from an initial value, resonances emerge at frequency values that are lower than those in the corresponding stationary system. The phenomenon is established by numerical characterization of physical quantities through the resonances, which include the kinetic energy and the vorticity field, and a heuristic analysis based on the concept of instantaneous frequency. A simple formula is obtained which relates the resonance points in the time-dependent and time-independent systems. Our findings suggest that, in general, any true bifurcation of a nonlinear dynamical system can be unequivocally uncovered through adiabatic parameter sweeping, in spite of a shift in the bifurcation point, which is of value to experimental studies of nonlinear dynamical systems.},
author = {Park, Youngyong and Do, Younghae and Altmeyer, Sebastian and Lai, Yingcheng and Lee, Gyuwon},
issn = {1539-3755},
journal = {Physical Review E},
number = {2},
publisher = {American Physical Society},
title = {{Early effect in time-dependent, high-dimensional nonlinear dynamical systems with multiple resonances}},
doi = {10.1103/PhysRevE.91.022906},
volume = {91},
year = {2015},
}
@article{1871,
abstract = {The plant hormone auxin is a key regulator of plant growth and development. Differences in auxin distribution within tissues are mediated by the polar auxin transport machinery, and cellular auxin responses occur depending on changes in cellular auxin levels. Multiple receptor systems at the cell surface and in the interior operate to sense and interpret fluctuations in auxin distribution that occur during plant development. Until now, three proteins or protein complexes that can bind auxin have been identified. SCFTIR1 [a SKP1-cullin-1-F-box complex that contains transport inhibitor response 1 (TIR1) as the F-box protein] and S-phase-kinaseassociated protein 2 (SKP2) localize to the nucleus, whereas auxinbinding protein 1 (ABP1), predominantly associates with the endoplasmic reticulum and cell surface. In this Cell Science at a Glance article, we summarize recent discoveries in the field of auxin transport and signaling that have led to the identification of new components of these pathways, as well as their mutual interaction.},
author = {Grones, Peter and Friml, Jirí},
journal = {Journal of Cell Science},
number = {1},
pages = {1 -- 7},
publisher = {Company of Biologists},
title = {{Auxin transporters and binding proteins at a glance}},
doi = {10.1242/jcs.159418},
volume = {128},
year = {2015},
}
@article{1873,
abstract = {We consider partially observable Markov decision processes (POMDPs) with limit-average payoff, where a reward value in the interval [0,1] is associated with every transition, and the payoff of an infinite path is the long-run average of the rewards. We consider two types of path constraints: (i) a quantitative constraint defines the set of paths where the payoff is at least a given threshold λ1ε(0,1]; and (ii) a qualitative constraint which is a special case of the quantitative constraint with λ1=1. We consider the computation of the almost-sure winning set, where the controller needs to ensure that the path constraint is satisfied with probability 1. Our main results for qualitative path constraints are as follows: (i) the problem of deciding the existence of a finite-memory controller is EXPTIME-complete; and (ii) the problem of deciding the existence of an infinite-memory controller is undecidable. For quantitative path constraints we show that the problem of deciding the existence of a finite-memory controller is undecidable. We also present a prototype implementation of our EXPTIME algorithm and experimental results on several examples.},
author = {Chatterjee, Krishnendu and Chmelik, Martin},
journal = {Artificial Intelligence},
pages = {46 -- 72},
publisher = {Elsevier},
title = {{POMDPs under probabilistic semantics}},
doi = {10.1016/j.artint.2014.12.009},
volume = {221},
year = {2015},
}
@article{1874,
abstract = {The hippocampal region, comprising the hippocampal formation and the parahippocampal region, has been one of the most intensively studied parts of the brain for decades. Better understanding of its functional diversity and complexity has led to an increased demand for specificity in experimental procedures and manipulations. In view of the complex 3D structure of the hippocampal region, precisely positioned experimental approaches require a fine-grained architectural description that is available and readable to experimentalists lacking detailed anatomical experience. In this paper, we provide the first cyto- and chemoarchitectural description of the hippocampal formation and parahippocampal region in the rat at high resolution and in the three standard sectional planes: coronal, horizontal and sagittal. The atlas uses a series of adjacent sections stained for neurons and for a number of chemical marker substances, particularly parvalbumin and calbindin. All the borders defined in one plane have been cross-checked against their counterparts in the other two planes. The entire dataset will be made available as a web-based interactive application through the Rodent Brain WorkBench (http://www.rbwb.org) which, together with this paper, provides a unique atlas resource.},
author = {Boccara, Charlotte and Kjønigsen, Lisa and Hammer, Ingvild and Bjaalie, Jan and Leergaard, Trygve and Witter, Menno},
journal = {Hippocampus},
number = {7},
pages = {838 -- 857},
publisher = {Wiley},
title = {{A three-plane architectonic atlas of the rat hippocampal region}},
doi = {10.1002/hipo.22407},
volume = {25},
year = {2015},
}
@article{1878,
abstract = {Petrocoptis is a small genus of chasmophytic plants endemic to the Iberian Peninsula, with some localized populations in the French Pyrenees. Within the genus, a dozen species have been recognized based on morphological diversity, most of them with limited distribution area, in small populations and frequently with potential threats to their survival. To date, however, a molecular evaluation of the current systematic treatments has not been carried out. The aim of the present study is to infer phylogenetic relationships among its subordinate taxa by using plastidial rps16 intron and nuclear internal transcribed spacer (ITS) DNA sequences; and evaluate the phylogenetic placement of the genus Petrocoptis within the family Caryophyllaceae. The monophyly of Petrocoptis is supported by both ITS and rps16 intron sequence analyses. Furthermore, time estimates using BEAST analyses indicate a Middle to Late Miocene diversification (10.59 Myr, 6.44–15.26 Myr highest posterior densities [HPD], for ITS; 14.30 Myr, 8.61–21.00 Myr HPD, for rps16 intron).},
author = {Cires Rodriguez, Eduardo and Prieto, José},
journal = {Journal of Plant Research},
number = {2},
pages = {223 -- 238},
publisher = {Springer},
title = {{Phylogenetic relationships of Petrocoptis A. Braun ex Endl. (Caryophyllaceae), a discussed genus from the Iberian Peninsula}},
doi = {10.1007/s10265-014-0691-6},
volume = {128},
year = {2015},
}
@article{1879,
abstract = {When electron microscopy (EM) was introduced in the 1930s it gave scientists their first look into the nanoworld of cells. Over the last 80 years EM has vastly increased our understanding of the complex cellular structures that underlie the diverse functions that cells need to maintain life. One drawback that has been difficult to overcome was the inherent lack of volume information, mainly due to the limit on the thickness of sections that could be viewed in a transmission electron microscope (TEM). For many years scientists struggled to achieve three-dimensional (3D) EM using serial section reconstructions, TEM tomography, and scanning EM (SEM) techniques such as freeze-fracture. Although each technique yielded some special information, they required a significant amount of time and specialist expertise to obtain even a very small 3D EM dataset. Almost 20 years ago scientists began to exploit SEMs to image blocks of embedded tissues and perform serial sectioning of these tissues inside the SEM chamber. Using first focused ion beams (FIB) and subsequently robotic ultramicrotomes (serial block-face, SBF-SEM) microscopists were able to collect large volumes of 3D EM information at resolutions that could address many important biological questions, and do so in an efficient manner. We present here some examples of 3D EM taken from the many diverse specimens that have been imaged in our core facility. We propose that the next major step forward will be to efficiently correlate functional information obtained using light microscopy (LM) with 3D EM datasets to more completely investigate the important links between cell structures and their functions.},
author = {Kremer, A and Lippens, Stefaan and Bartunkova, Sonia and Asselbergh, Bob and Blanpain, Cendric and Fendrych, Matyas and Goossens, A and Holt, Matthew and Janssens, Sophie and Krols, Michiel and Larsimont, Jean and Mc Guire, Conor and Nowack, Moritz and Saelens, Xavier and Schertel, Andreas and Schepens, B and Slezak, M and Timmerman, Vincent and Theunis, Clara and Van Brempt, Ronald and Visser, Y and Guérin, Christophe},
journal = {Journal of Microscopy},
number = {2},
pages = {80 -- 96},
publisher = {Wiley-Blackwell},
title = {{Developing 3D SEM in a broad biological context}},
doi = {10.1111/jmi.12211},
volume = {259},
year = {2015},
}
@article{1880,
abstract = {We investigate the relation between Bose-Einstein condensation (BEC) and superfluidity in the ground state of a one-dimensional model of interacting bosons in a strong random potential. We prove rigorously that in a certain parameter regime the superfluid fraction can be arbitrarily small while complete BEC prevails. In another regime there is both complete BEC and complete superfluidity, despite the strong disorder},
author = {Könenberg, Martin and Moser, Thomas and Seiringer, Robert and Yngvason, Jakob},
journal = {New Journal of Physics},
publisher = {IOP Publishing Ltd.},
title = {{Superfluid behavior of a Bose-Einstein condensate in a random potential}},
doi = {10.1088/1367-2630/17/1/013022},
volume = {17},
year = {2015},
}
@inproceedings{1882,
abstract = {We provide a framework for compositional and iterative design and verification of systems with quantitative information, such as rewards, time or energy. It is based on disjunctive modal transition systems where we allow actions to bear various types of quantitative information. Throughout the design process the actions can be further refined and the information made more precise. We show how to compute the results of standard operations on the systems, including the quotient (residual), which has not been previously considered for quantitative non-deterministic systems. Our quantitative framework has close connections to the modal nu-calculus and is compositional with respect to general notions of distances between systems and the standard operations.},
author = {Fahrenberg, Uli and Kretinsky, Jan and Legay, Axel and Traonouez, Louis},
location = {Bertinoro, Italy},
pages = {306 -- 324},
publisher = {Springer},
title = {{Compositionality for quantitative specifications}},
doi = {10.1007/978-3-319-15317-9_19},
volume = {8997},
year = {2015},
}
@article{1883,
abstract = {We introduce a one-parametric family of tree growth models, in which branching probabilities decrease with branch age τ as τ-α. Depending on the exponent α, the scaling of tree depth with tree size n displays a transition between the logarithmic scaling of random trees and an algebraic growth. At the transition (α=1) tree depth grows as (logn)2. This anomalous scaling is in good agreement with the trend observed in evolution of biological species, thus providing a theoretical support for age-dependent speciation and associating it to the occurrence of a critical point.
},
author = {Keller-Schmidt, Stephanie and Tugrul, Murat and Eguíluz, Víctor and Hernandez Garcia, Emilio and Klemm, Konstantin},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {2},
publisher = {American Institute of Physics},
title = {{Anomalous scaling in an age-dependent branching model}},
doi = {10.1103/PhysRevE.91.022803},
volume = {91},
year = {2015},
}
@article{1885,
abstract = {The concept of positional information is central to our understanding of how cells determine their location in a multicellular structure and thereby their developmental fates. Nevertheless, positional information has neither been defined mathematically nor quantified in a principled way. Here we provide an information-theoretic definition in the context of developmental gene expression patterns and examine the features of expression patterns that affect positional information quantitatively. We connect positional information with the concept of positional error and develop tools to directly measure information and error from experimental data. We illustrate our framework for the case of gap gene expression patterns in the early Drosophila embryo and show how information that is distributed among only four genes is sufficient to determine developmental fates with nearly single-cell resolution. Our approach can be generalized to a variety of different model systems; procedures and examples are discussed in detail. },
author = {Tkacik, Gasper and Dubuis, Julien and Petkova, Mariela and Gregor, Thomas},
journal = {Genetics},
number = {1},
pages = {39 -- 59},
publisher = {Genetics Society of America},
title = {{Positional information, positional error, and readout precision in morphogenesis: A mathematical framework}},
doi = {10.1534/genetics.114.171850},
volume = {199},
year = {2015},
}
@article{1938,
abstract = {We numerically investigate the distribution of extrema of 'chaotic' Laplacian eigenfunctions on two-dimensional manifolds. Our contribution is two-fold: (a) we count extrema on grid graphs with a small number of randomly added edges and show the behavior to coincide with the 1957 prediction of Longuet-Higgins for the continuous case and (b) we compute the regularity of their spatial distribution using discrepancy, which is a classical measure from the theory of Monte Carlo integration. The first part suggests that grid graphs with randomly added edges should behave like two-dimensional surfaces with ergodic geodesic flow; in the second part we show that the extrema are more regularly distributed in space than the grid Z2.},
author = {Pausinger, Florian and Steinerberger, Stefan},
journal = {Physics Letters, Section A},
number = {6},
pages = {535 -- 541},
publisher = {Elsevier},
title = {{On the distribution of local extrema in quantum chaos}},
doi = {10.1016/j.physleta.2014.12.010},
volume = {379},
year = {2015},
}
@article{1939,
author = {Dereziński, Jan and Napiórkowski, Marcin M},
journal = {Annales Henri Poincare},
number = {7},
pages = {1709 -- 1711},
publisher = {Birkhäuser},
title = {{Erratum to: Excitation spectrum of interacting bosons in the Mean-Field Infinite-Volume limit}},
doi = {10.1007/s00023-014-0390-9},
volume = {16},
year = {2015},
}
@article{1940,
abstract = {We typically think of cells as responding to external signals independently by regulating their gene expression levels, yet they often locally exchange information and coordinate. Can such spatial coupling be of benefit for conveying signals subject to gene regulatory noise? Here we extend our information-theoretic framework for gene regulation to spatially extended systems. As an example, we consider a lattice of nuclei responding to a concentration field of a transcriptional regulator (the "input") by expressing a single diffusible target gene. When input concentrations are low, diffusive coupling markedly improves information transmission; optimal gene activation functions also systematically change. A qualitatively new regulatory strategy emerges where individual cells respond to the input in a nearly step-like fashion that is subsequently averaged out by strong diffusion. While motivated by early patterning events in the Drosophila embryo, our framework is generically applicable to spatially coupled stochastic gene expression models.},
author = {Sokolowski, Thomas R and Tkacik, Gasper},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {6},
publisher = {American Institute of Physics},
title = {{Optimizing information flow in small genetic networks. IV. Spatial coupling}},
doi = {10.1103/PhysRevE.91.062710},
volume = {91},
year = {2015},
}
@article{1944,
author = {Rakusová, Hana and Fendrych, Matyas and Friml, Jirí},
journal = {Current Opinion in Plant Biology},
number = {2},
pages = {116 -- 123},
publisher = {Elsevier},
title = {{Intracellular trafficking and PIN-mediated cell polarity during tropic responses in plants}},
doi = {10.1016/j.pbi.2014.12.002},
volume = {23},
year = {2015},
}
@inproceedings{1992,
abstract = {We present a method and a tool for generating succinct representations of sets of concurrent traces. We focus on trace sets that contain all correct or all incorrect permutations of events from a given trace. We represent trace sets as HB-Formulas that are Boolean combinations of happens-before constraints between events. To generate a representation of incorrect interleavings, our method iteratively explores interleavings that violate the specification and gathers generalizations of the discovered interleavings into an HB-Formula; its complement yields a representation of correct interleavings.
We claim that our trace set representations can drive diverse verification, fault localization, repair, and synthesis techniques for concurrent programs. We demonstrate this by using our tool in three case studies involving synchronization synthesis, bug summarization, and abstraction refinement based verification. In each case study, our initial experimental results have been promising.
In the first case study, we present an algorithm for inferring missing synchronization from an HB-Formula representing correct interleavings of a given trace. The algorithm applies rules to rewrite specific patterns in the HB-Formula into locks, barriers, and wait-notify constructs. In the second case study, we use an HB-Formula representing incorrect interleavings for bug summarization. While the HB-Formula itself is a concise counterexample summary, we present additional inference rules to help identify specific concurrency bugs such as data races, define-use order violations, and two-stage access bugs. In the final case study, we present a novel predicate learning procedure that uses HB-Formulas representing abstract counterexamples to accelerate counterexample-guided abstraction refinement (CEGAR). In each iteration of the CEGAR loop, the procedure refines the abstraction to eliminate multiple spurious abstract counterexamples drawn from the HB-Formula.},
author = {Gupta, Ashutosh and Henzinger, Thomas A and Radhakrishna, Arjun and Samanta, Roopsha and Tarrach, Thorsten},
isbn = {978-1-4503-3300-9},
location = {Mumbai, India},
pages = {433 -- 444},
publisher = {ACM},
title = {{Succinct representation of concurrent trace sets}},
doi = {10.1145/2676726.2677008},
year = {2015},
}
@article{1993,
abstract = {The fitness effects of symbionts on their hosts can be context-dependent, with usually benign symbionts causing detrimental effects when their hosts are stressed, or typically parasitic symbionts providing protection towards their hosts (e.g. against pathogen infection). Here, we studied the novel association between the invasive garden ant Lasius neglectus and its fungal ectosymbiont Laboulbenia formicarum for potential costs and benefits. We tested ants with different Laboulbenia levels for their survival and immunity under resource limitation and exposure to the obligate killing entomopathogen Metarhizium brunneum. While survival of L. neglectus workers under starvation was significantly decreased with increasing Laboulbenia levels, host survival under Metarhizium exposure increased with higher levels of the ectosymbiont, suggesting a symbiont-mediated anti-pathogen protection, which seems to be driven mechanistically by both improved sanitary behaviours and an upregulated immune system. Ants with high Laboulbenia levels showed significantly longer self-grooming and elevated expression of immune genes relevant for wound repair and antifungal responses (β-1,3-glucan binding protein, Prophenoloxidase), compared with ants carrying low Laboulbenia levels. This suggests that the ectosymbiont Laboulbenia formicarum weakens its ant host by either direct resource exploitation or the costs of an upregulated behavioural and immunological response, which, however, provides a prophylactic protection upon later exposure to pathogens. },
author = {Konrad, Matthias and Grasse, Anna V and Tragust, Simon and Cremer, Sylvia},
journal = {Proceedings of the Royal Society of London Series B Biological Sciences},
number = {1799},
publisher = {Royal Society},
title = {{Anti-pathogen protection versus survival costs mediated by an ectosymbiont in an ant host}},
doi = {10.1098/rspb.2014.1976},
volume = {282},
year = {2015},
}
@article{1997,
abstract = {We prove that the three-state toric homogeneous Markov chain model has Markov degree two. In algebraic terminology this means, that a certain class of toric ideals is generated by quadratic binomials. This was conjectured by Haws, Martin del Campo, Takemura and Yoshida, who proved that they are generated by degree six binomials.},
author = {Noren, Patrik},
journal = {Journal of Symbolic Computation},
number = {May-June},
pages = {285 -- 296},
publisher = {Elsevier},
title = {{The three-state toric homogeneous Markov chain model has Markov degree two}},
doi = {10.1016/j.jsc.2014.09.014},
volume = {68/Part 2},
year = {2015},
}
@phdthesis{1399,
abstract = {This thesis is concerned with the computation and approximation of intrinsic volumes. Given a smooth body M and a certain digital approximation of it, we develop algorithms to approximate various intrinsic volumes of M using only measurements taken from its digital approximations. The crucial idea behind our novel algorithms is to link the recent theory of persistent homology to the theory of intrinsic volumes via the Crofton formula from integral geometry and, in particular, via Euler characteristic computations. Our main contributions are a multigrid convergent digital algorithm to compute the first intrinsic volume of a solid body in R^n as well as an appropriate integration pipeline to approximate integral-geometric integrals defined over the Grassmannian manifold.},
author = {Pausinger, Florian},
pages = {144},
publisher = {IST Austria},
title = {{On the approximation of intrinsic volumes}},
year = {2015},
}
@article{1832,
abstract = {Linearizability of concurrent data structures is usually proved by monolithic simulation arguments relying on the identification of the so-called linearization points. Regrettably, such proofs, whether manual or automatic, are often complicated and scale poorly to advanced non-blocking concurrency patterns, such as helping and optimistic updates. In response, we propose a more modular way of checking linearizability of concurrent queue algorithms that does not involve identifying linearization points. We reduce the task of proving linearizability with respect to the queue specification to establishing four basic properties, each of which can be proved independently by simpler arguments. As a demonstration of our approach, we verify the Herlihy and Wing queue, an algorithm that is challenging to verify by a simulation proof. },
author = {Chakraborty, Soham and Henzinger, Thomas A and Sezgin, Ali and Vafeiadis, Viktor},
journal = {Logical Methods in Computer Science},
number = {1},
publisher = {International Federation of Computational Logic},
title = {{Aspect-oriented linearizability proofs}},
doi = {10.2168/LMCS-11(1:20)2015},
volume = {11},
year = {2015},
}
@phdthesis{1400,
abstract = {Cancer results from an uncontrolled growth of abnormal cells. Sequentially accumulated genetic and epigenetic alterations decrease cell death and increase cell replication. We used mathematical models to quantify the effect of driver gene mutations. The recently developed targeted therapies can lead to dramatic regressions. However, in solid cancers, clinical responses are often short-lived because resistant cancer cells evolve. We estimated that approximately 50 different mutations can confer resistance to a typical targeted therapeutic agent. We find that resistant cells are likely to be present in expanded subclones before the start of the treatment. The dominant strategy to prevent the evolution of resistance is combination therapy. Our analytical results suggest that in most patients, dual therapy, but not monotherapy, can result in long-term disease control. However, long-term control can only occur if there are no possible mutations in the genome that can cause cross-resistance to both drugs. Furthermore, we showed that simultaneous therapy with two drugs is much more likely to result in long-term disease control than sequential therapy with the same drugs. To improve our understanding of the underlying subclonal evolution we reconstruct the evolutionary history of a patient's cancer from next-generation sequencing data of spatially-distinct DNA samples. Using a quantitative measure of genetic relatedness, we found that pancreatic cancers and their metastases demonstrated a higher level of relatedness than that expected for any two cells randomly taken from a normal tissue. This minimal amount of genetic divergence among advanced lesions indicates that genetic heterogeneity, when quantitatively defined, is not a fundamental feature of the natural history of untreated pancreatic cancers. Our newly developed, phylogenomic tool Treeomics finds evidence for seeding patterns of metastases and can directly be used to discover rules governing the evolution of solid malignancies to transform cancer into a more predictable disease.},
author = {Reiter, Johannes},
pages = {183},
publisher = {IST Austria},
title = {{The subclonal evolution of cancer}},
year = {2015},
}
@article{1731,
abstract = {We consider two-player zero-sum games on graphs. These games can be classified on the basis of the information of the players and on the mode of interaction between them. On the basis of information the classification is as follows: (a) partial-observation (both players have partial view of the game); (b) one-sided complete-observation (one player has complete observation); and (c) complete-observation (both players have complete view of the game). On the basis of mode of interaction we have the following classification: (a) concurrent (both players interact simultaneously); and (b) turn-based (both players interact in turn). The two sources of randomness in these games are randomness in transition function and randomness in strategies. In general, randomized strategies are more powerful than deterministic strategies, and randomness in transitions gives more general classes of games. In this work we present a complete characterization for the classes of games where randomness is not helpful in: (a) the transition function probabilistic transition can be simulated by deterministic transition); and (b) strategies (pure strategies are as powerful as randomized strategies). As consequence of our characterization we obtain new undecidability results for these games. },
author = {Chatterjee, Krishnendu and Doyen, Laurent and Gimbert, Hugo and Henzinger, Thomas A},
journal = {Information and Computation},
number = {12},
pages = {3 -- 16},
publisher = {Elsevier},
title = {{Randomness for free}},
doi = {10.1016/j.ic.2015.06.003},
volume = {245},
year = {2015},
}
@article{1856,
abstract = {The traditional synthesis question given a specification asks for the automatic construction of a system that satisfies the specification, whereas often there exists a preference order among the different systems that satisfy the given specification. Under a probabilistic assumption about the possible inputs, such a preference order is naturally expressed by a weighted automaton, which assigns to each word a value, such that a system is preferred if it generates a higher expected value. We solve the following optimal synthesis problem: given an omega-regular specification, a Markov chain that describes the distribution of inputs, and a weighted automaton that measures how well a system satisfies the given specification under the input assumption, synthesize a system that optimizes the measured value. For safety specifications and quantitative measures that are defined by mean-payoff automata, the optimal synthesis problem reduces to finding a strategy in a Markov decision process (MDP) that is optimal for a long-run average reward objective, which can be achieved in polynomial time. For general omega-regular specifications along with mean-payoff automata, the solution rests on a new, polynomial-time algorithm for computing optimal strategies in MDPs with mean-payoff parity objectives. Our algorithm constructs optimal strategies that consist of two memoryless strategies and a counter. The counter is in general not bounded. To obtain a finite-state system, we show how to construct an ε-optimal strategy with a bounded counter, for all ε > 0. Furthermore, we show how to decide in polynomial time if it is possible to construct an optimal finite-state system (i.e., a system without a counter) for a given specification. We have implemented our approach and the underlying algorithms in a tool that takes qualitative and quantitative specifications and automatically constructs a system that satisfies the qualitative specification and optimizes the quantitative specification, if such a system exists. We present some experimental results showing optimal systems that were automatically generated in this way.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Singh, Rohit},
journal = {Journal of the ACM},
number = {1},
publisher = {ACM},
title = {{Measuring and synthesizing systems in probabilistic environments}},
doi = {10.1145/2699430},
volume = {62},
year = {2015},
}
@inproceedings{1512,
abstract = {We show that very weak topological assumptions are enough to ensure the existence of a Helly-type theorem. More precisely, we show that for any non-negative integers b and d there exists an integer h(b,d) such that the following holds. If F is a finite family of subsets of R^d such that the ith reduced Betti number (with Z_2 coefficients in singular homology) of the intersection of any proper subfamily G of F is at most b for every non-negative integer i less or equal to (d-1)/2, then F has Helly number at most h(b,d). These topological conditions are sharp: not controlling any of these first Betti numbers allow for families with unbounded Helly number. Our proofs combine homological non-embeddability results with a Ramsey-based approach to build, given an arbitrary simplicial complex K, some well-behaved chain map from C_*(K) to C_*(R^d). Both techniques are of independent interest.},
author = {Goaoc, Xavier and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli},
location = {Eindhoven, Netherlands},
pages = {507 -- 521},
publisher = {ACM},
title = {{Bounding Helly numbers via Betti numbers}},
doi = {10.4230/LIPIcs.SOCG.2015.507},
volume = {34},
year = {2015},
}
@inproceedings{1661,
abstract = {The computation of the winning set for one-pair Streett objectives and for k-pair Streett objectives in (standard) graphs as well as in game graphs are central problems in computer-aided verification, with application to the verification of closed systems with strong fairness conditions, the verification of open systems, checking interface compatibility, well-formed ness of specifications, and the synthesis of reactive systems. We give faster algorithms for the computation of the winning set for (1) one-pair Streett objectives (aka parity-3 problem) in game graphs and (2) for k-pair Streett objectives in graphs. For both problems this represents the first improvement in asymptotic running time in 15 years.},
author = {Chatterjee, Krishnendu and Henzinger, Monika and Loitzenbauer, Veronika},
booktitle = {Proceedings - Symposium on Logic in Computer Science},
location = {Kyoto, Japan},
publisher = {IEEE},
title = {{Improved algorithms for one-pair and k-pair Streett objectives}},
doi = {10.1109/LICS.2015.34},
volume = {2015-July},
year = {2015},
}
@inproceedings{1657,
abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives. There exist two different views: (i) ~the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) ~the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector. We consider optimization with respect to both objectives at once, thus unifying the existing semantics. Precisely, the goal is to optimize the expectation while ensuring the satisfaction constraint. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., Ensure certain probabilistic guarantee). Our main results are as follows: First, we present algorithms for the decision problems, which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Second, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem. },
author = {Chatterjee, Krishnendu and Komárková, Zuzana and Kretinsky, Jan},
location = {Kyoto, Japan},
pages = {244 -- 256},
publisher = {IEEE},
title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}},
doi = {10.1109/LICS.2015.32},
year = {2015},
}
@inproceedings{1610,
abstract = {The edit distance between two words w1, w2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w1 to w2. The edit distance generalizes to languages L1,L2, where the edit distance is the minimal number k such that for every word from L1 there exists a word in L2 with edit distance at most k. We study the edit distance computation problem between pushdown automata and their subclasses. The problem of computing edit distance to pushdown automata is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for deciding whether, for a given threshold k, the edit distance from a pushdown automaton to a finite automaton is at most k.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan},
location = {Kyoto, Japan},
number = {Part II},
pages = {121 -- 133},
publisher = {Springer},
title = {{Edit distance for pushdown automata}},
doi = {10.1007/978-3-662-47666-6_10},
volume = {9135},
year = {2015},
}
@misc{5435,
abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives.
There have been two different views: (i) the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector.
We consider the problem where the goal is to optimize the expectation under the constraint that the satisfaction semantics is ensured, and thus consider a generalization that unifies the existing semantics. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., ensures certain probabilistic guarantee).
Our main results are algorithms for the decision problem which are always polynomial in the size of the MDP.
We also show that an approximation of the Pareto-curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Finally, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem.},
author = {Chatterjee, Krishnendu and Komarkova, Zuzana and Kretinsky, Jan},
issn = {2664-1690},
pages = {51},
publisher = {IST Austria},
title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}},
doi = {10.15479/AT:IST-2015-318-v2-1},
year = {2015},
}
@misc{5429,
abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives.
There have been two different views: (i) the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector.
We consider the problem where the goal is to optimize the expectation under the constraint that the satisfaction semantics is ensured, and thus consider a generalization that unifies the existing semantics.
Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., ensures certain probabilistic guarantee).
Our main results are algorithms for the decision problem which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto-curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions.
Finally, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem.},
author = {Chatterjee, Krishnendu and Komarkova, Zuzana and Kretinsky, Jan},
issn = {2664-1690},
pages = {41},
publisher = {IST Austria},
title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}},
doi = {10.15479/AT:IST-2015-318-v1-1},
year = {2015},
}
@misc{5438,
abstract = {The edit distance between two words w1, w2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w1 to w2. The edit distance generalizes to languages L1, L2, where the edit distance is the minimal number k such that for every word from L1 there exists a word in L2 with edit distance at most k. We study the edit distance computation problem between pushdown automata and their subclasses.
The problem of computing edit distance to a pushdown automaton is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for deciding whether, for a given threshold k, the edit distance from a pushdown automaton to a finite automaton is at most k. },
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan},
issn = {2664-1690},
pages = {15},
publisher = {IST Austria},
title = {{Edit distance for pushdown automata}},
doi = {10.15479/AT:IST-2015-334-v1-1},
year = {2015},
}
@article{473,
abstract = {We prove that nonlinear Gibbs measures can be obtained from the corresponding many-body, grand-canonical, quantum Gibbs states, in a mean-field limit where the temperature T diverges and the interaction strength behaves as 1/T. We proceed by characterizing the interacting Gibbs state as minimizing a functional counting the free-energy relatively to the non-interacting case. We then perform an infinite-dimensional analogue of phase-space semiclassical analysis, using fine properties of the quantum relative entropy, the link between quantum de Finetti measures and upper/lower symbols in a coherent state basis, as well as Berezin-Lieb type inequalities. Our results cover the measure built on the defocusing nonlinear Schrödinger functional on a finite interval, as well as smoother interactions in dimensions d 2.},
author = {Lewin, Mathieu and Phan Thanh, Nam and Rougerie, Nicolas},
journal = {Journal de l'Ecole Polytechnique - Mathematiques},
pages = {65 -- 115},
publisher = {Ecole Polytechnique},
title = {{Derivation of nonlinear gibbs measures from many-body quantum mechanics}},
doi = {10.5802/jep.18},
volume = {2},
year = {2015},
}
@article{477,
abstract = {Dendritic cells are potent antigen-presenting cells endowed with the unique ability to initiate adaptive immune responses upon inflammation. Inflammatory processes are often associated with an increased production of serotonin, which operates by activating specific receptors. However, the functional role of serotonin receptors in regulation of dendritic cell functions is poorly understood. Here, we demonstrate that expression of serotonin receptor 5-HT7 (5-HT7TR) as well as its downstream effector Cdc42 is upregulated in dendritic cells upon maturation. Although dendritic cell maturation was independent of 5-HT7TR, receptor stimulation affected dendritic cell morphology through Cdc42-mediated signaling. In addition, basal activity of 5-HT7TR was required for the proper expression of the chemokine receptor CCR7, which is a key factor that controls dendritic cell migration. Consistent with this, we observed that 5-HT7TR enhances chemotactic motility of dendritic cells in vitro by modulating their directionality and migration velocity. Accordingly, migration of dendritic cells in murine colon explants was abolished after pharmacological receptor inhibition. Our results indicate that there is a crucial role for 5-HT7TR-Cdc42-mediated signaling in the regulation of dendritic cell morphology and motility, suggesting that 5-HT7TR could be a new target for treatment of a variety of inflammatory and immune disorders.},
author = {Holst, Katrin and Guseva, Daria and Schindler, Susann and Sixt, Michael K and Braun, Armin and Chopra, Himpriya and Pabst, Oliver and Ponimaskin, Evgeni},
journal = {Journal of Cell Science},
number = {15},
pages = {2866 -- 2880},
publisher = {Company of Biologists},
title = {{The serotonin receptor 5-HT7R regulates the morphology and migratory properties of dendritic cells}},
doi = {10.1242/jcs.167999},
volume = {128},
year = {2015},
}
@inproceedings{1656,
abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata, nor in any other know decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in run-time verification. We establish an almost complete decidability picture for the basic decision problems about nested weighted automata, and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
booktitle = {Proceedings - Symposium on Logic in Computer Science},
location = {Kyoto, Japan},
publisher = {IEEE},
title = {{Nested weighted automata}},
doi = {10.1109/LICS.2015.72},
volume = {2015-July},
year = {2015},
}
@misc{5436,
abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata, nor in any other know decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata which makes it possible to express important quantitative properties such as average response time.
In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in run-time verification. We establish an almost complete decidability picture for the basic decision problems about nested weighted automata, and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
issn = {2664-1690},
pages = {29},
publisher = {IST Austria},
title = {{Nested weighted automata}},
doi = {10.15479/AT:IST-2015-170-v2-2},
year = {2015},
}
@article{524,
abstract = {We consider concurrent games played by two players on a finite-state graph, where in every round the players simultaneously choose a move, and the current state along with the joint moves determine the successor state. We study the most fundamental objective for concurrent games, namely, mean-payoff or limit-average objective, where a reward is associated to each transition, and the goal of player 1 is to maximize the long-run average of the rewards, and the objective of player 2 is strictly the opposite (i.e., the games are zero-sum). The path constraint for player 1 could be qualitative, i.e., the mean-payoff is the maximal reward, or arbitrarily close to it; or quantitative, i.e., a given threshold between the minimal and maximal reward. We consider the computation of the almost-sure (resp. positive) winning sets, where player 1 can ensure that the path constraint is satisfied with probability 1 (resp. positive probability). Almost-sure winning with qualitative constraint exactly corresponds to the question of whether there exists a strategy to ensure that the payoff is the maximal reward of the game. Our main results for qualitative path constraints are as follows: (1) we establish qualitative determinacy results that show that for every state either player 1 has a strategy to ensure almost-sure (resp. positive) winning against all player-2 strategies, or player 2 has a spoiling strategy to falsify almost-sure (resp. positive) winning against all player-1 strategies; (2) we present optimal strategy complexity results that precisely characterize the classes of strategies required for almost-sure and positive winning for both players; and (3) we present quadratic time algorithms to compute the almost-sure and the positive winning sets, matching the best known bound of the algorithms for much simpler problems (such as reachability objectives). For quantitative constraints we show that a polynomial time solution for the almost-sure or the positive winning set would imply a solution to a long-standing open problem (of solving the value problem of turn-based deterministic mean-payoff games) that is not known to be solvable in polynomial time.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
journal = {Information and Computation},
number = {6},
pages = {2 -- 24},
publisher = {Elsevier},
title = {{Qualitative analysis of concurrent mean payoff games}},
doi = {10.1016/j.ic.2015.03.009},
volume = {242},
year = {2015},
}
@article{532,
abstract = {Ethylene is a gaseous phytohormone that plays vital roles in plant growth and development. Previous studies uncovered EIN2 as an essential signal transducer linking ethylene perception on ER to transcriptional regulation in the nucleus through a “cleave and shuttle” model. In this study, we report another mechanism of EIN2-mediated ethylene signaling, whereby EIN2 imposes the translational repression of EBF1 and EBF2 mRNA. We find that the EBF1/2 3′ UTRs mediate EIN2-directed translational repression and identify multiple poly-uridylates (PolyU) motifs as functional cis elements of 3′ UTRs. Furthermore, we demonstrate that ethylene induces EIN2 to associate with 3′ UTRs and target EBF1/2 mRNA to cytoplasmic processing-body (P-body) through interacting with multiple P-body factors, including EIN5 and PABs. Our study illustrates translational regulation as a key step in ethylene signaling and presents mRNA 3′ UTR functioning as a “signal transducer” to sense and relay cellular signaling in plants.},
author = {Li, Wenyang and Ma, Mengdi and Feng, Ying and Li, Hongjiang and Wang, Yichuan and Ma, Yutong and Li, Mingzhe and An, Fengying and Guo, Hongwei},
journal = {Cell},
number = {3},
pages = {670 -- 683},
publisher = {Cell Press},
title = {{EIN2-directed translational regulation of ethylene signaling in arabidopsis}},
doi = {10.1016/j.cell.2015.09.037},
volume = {163},
year = {2015},
}
@article{523,
abstract = {We consider two-player games played on weighted directed graphs with mean-payoff and total-payoff objectives, two classical quantitative objectives. While for single-dimensional games the complexity and memory bounds for both objectives coincide, we show that in contrast to multi-dimensional mean-payoff games that are known to be coNP-complete, multi-dimensional total-payoff games are undecidable. We introduce conservative approximations of these objectives, where the payoff is considered over a local finite window sliding along a play, instead of the whole play. For single dimension, we show that (i) if the window size is polynomial, deciding the winner takes polynomial time, and (ii) the existence of a bounded window can be decided in NP ∩ coNP, and is at least as hard as solving mean-payoff games. For multiple dimensions, we show that (i) the problem with fixed window size is EXPTIME-complete, and (ii) there is no primitive-recursive algorithm to decide the existence of a bounded window.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Randour, Mickael and Raskin, Jean},
journal = {Information and Computation},
number = {6},
pages = {25 -- 52},
publisher = {Elsevier},
title = {{Looking at mean-payoff and total-payoff through windows}},
doi = {10.1016/j.ic.2015.03.010},
volume = {242},
year = {2015},
}
@article{5749,
abstract = {Parasitism creates selection for resistance mechanisms in host populations and is hypothesized to promote increased host evolvability. However, the influence of these traits on host evolution when parasites are no longer present is unclear. We used experimental evolution and whole-genome sequencing of Escherichia coli to determine the effects of past and present exposure to parasitic viruses (phages) on the spread of mutator alleles, resistance, and bacterial competitive fitness. We found that mutator alleles spread rapidly during adaptation to any of four different phage species, and this pattern was even more pronounced with multiple phages present simultaneously. However, hypermutability did not detectably accelerate adaptation in the absence of phages and recovery of fitness costs associated with resistance. Several lineages evolved phage resistance through elevated mucoidy, and during subsequent evolution in phage-free conditions they rapidly reverted to nonmucoid, phage-susceptible phenotypes. Genome sequencing revealed that this phenotypic reversion was achieved by additional genetic changes rather than by genotypic reversion of the initial resistance mutations. Insertion sequence (IS) elements played a key role in both the acquisition of resistance and adaptation in the absence of parasites; unlike single nucleotide polymorphisms, IS insertions were not more frequent in mutator lineages. Our results provide a genetic explanation for rapid reversion of mucoidy, a phenotype observed in other bacterial species including human pathogens. Moreover, this demonstrates that the types of genetic change underlying adaptation to fitness costs, and consequently the impact of evolvability mechanisms such as increased point-mutation rates, depend critically on the mechanism of resistance.},
author = {Wielgoss, Sébastien and Bergmiller, Tobias and Bischofberger, Anna M. and Hall, Alex R.},
issn = {0737-4038},
journal = {Molecular Biology and Evolution},
number = {3},
pages = {770--782},
publisher = {Oxford University Press (OUP)},
title = {{Adaptation to Parasites and Costs of Parasite Resistance in Mutator and Nonmutator Bacteria}},
doi = {10.1093/molbev/msv270},
volume = {33},
year = {2015},
}
@inproceedings{779,
abstract = {The concurrent memory reclamation problem is that of devising a way for a deallocating thread to verify that no other concurrent threads hold references to a memory block being deallocated. To date, in the absence of automatic garbage collection, there is no satisfactory solution to this problem; existing tracking methods like hazard pointers, reference counters, or epoch-based techniques like RCU, are either prohibitively expensive or require significant programming expertise, to the extent that implementing them efficiently can be worthy of a publication. None of the existing techniques are automatic or even semi-automated. In this paper, we take a new approach to concurrent memory reclamation: instead of manually tracking access to memory locations as done in techniques like hazard pointers, or restricting shared accesses to specific epoch boundaries as in RCU, our algorithm, called ThreadScan, leverages operating system signaling to automatically detect which memory locations are being accessed by concurrent threads. Initial empirical evidence shows that ThreadScan scales surprisingly well and requires negligible programming effort beyond the standard use of Malloc and Free.},
author = {Alistarh, Dan and Matveev, Alexander and Leiserson, William M and Shavit, Nir N},
pages = {123 -- 132},
publisher = {ACM},
title = {{ThreadScan: Automatic and scalable memory reclamation}},
doi = {10.1145/2755573.2755600},
volume = {2015-June},
year = {2015},
}
@misc{5441,
abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural problems that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, a property satisfied by the controlflow graphs of most programs. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis. The study of multiple queries allows us to consider the tradeoff between the resource usage of the one-time preprocessing and for each individual query. The traditional approach constructs the product graph of all components and applies the best-known graph algorithm on the product. In this approach, even the answer to a single query requires the transitive closure (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results showing that the worst-case running time of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (i.e., improving the worst-case bound for the shortest path problem in general graphs). Preliminary experimental results show that our algorithms perform favorably on several benchmarks.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Goharshady, Amir and Pavlogiannis, Andreas},
issn = {2664-1690},
pages = {24},
publisher = {IST Austria},
title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}},
doi = {10.15479/AT:IST-2015-340-v1-1},
year = {2015},
}
@misc{5442,
abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural properties that arise in program analysis.
We consider that each component of the concurrent system is a graph with constant treewidth, and it is known that the controlflow graphs of most programs have constant treewidth. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis problems (e.g., alias analysis). The study of multiple queries allows us to consider the tradeoff between the resource usage of the \emph{one-time} preprocessing and for \emph{each individual} query. The traditional approaches construct the product graph of all components and apply the best-known graph algorithm on the product. In the traditional approach, even the answer to a single query requires the transitive closure computation (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time.
Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time,
each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results that show that the worst-case running times of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (such as improving
the worst-case bounds for the shortest path problem in general graphs whose current best-known bound has not been improved in five decades). Finally, we provide a prototype implementation of our algorithms which significantly outperforms the existing algorithmic methods on several benchmarks.},
author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3 and Anonymous, 4},
issn = {2664-1690},
pages = {22},
publisher = {IST Austria},
title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}},
year = {2015},
}
@inproceedings{1511,
abstract = {The fact that the complete graph K_5 does not embed in the plane has been generalized in two independent directions. On the one hand, the solution of the classical Heawood problem for graphs on surfaces established that the complete graph K_n embeds in a closed surface M if and only if (n-3)(n-4) is at most 6b_1(M), where b_1(M) is the first Z_2-Betti number of M. On the other hand, Van Kampen and Flores proved that the k-skeleton of the n-dimensional simplex (the higher-dimensional analogue of K_{n+1}) embeds in R^{2k} if and only if n is less or equal to 2k+2. Two decades ago, Kuhnel conjectured that the k-skeleton of the n-simplex embeds in a compact, (k-1)-connected 2k-manifold with kth Z_2-Betti number b_k only if the following generalized Heawood inequality holds: binom{n-k-1}{k+1} is at most binom{2k+1}{k+1} b_k. This is a common generalization of the case of graphs on surfaces as well as the Van Kampen--Flores theorem. In the spirit of Kuhnel's conjecture, we prove that if the k-skeleton of the n-simplex embeds in a 2k-manifold with kth Z_2-Betti number b_k, then n is at most 2b_k binom{2k+2}{k} + 2k + 5. This bound is weaker than the generalized Heawood inequality, but does not require the assumption that M is (k-1)-connected. Our proof uses a result of Volovikov about maps that satisfy a certain homological triviality condition.},
author = {Goaoc, Xavier and Mabillard, Isaac and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli},
location = {Eindhoven, Netherlands},
pages = {476 -- 490},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{On generalized Heawood inequalities for manifolds: A Van Kampen–Flores-type nonembeddability result}},
doi = {10.4230/LIPIcs.SOCG.2015.476},
volume = {34 },
year = {2015},
}
@inproceedings{1637,
abstract = {An instance of the Valued Constraint Satisfaction Problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P ≠ NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in {0, ∞} corresponds to ordinary CSPs, where one deals only with the feasibility issue and there is no optimization. This case is the subject of the Algebraic CSP Dichotomy Conjecture predicting for which constraint languages CSPs are tractable (i.e. solvable in polynomial time) and for which NP-hard. The case when all allowed functions take only finite values corresponds to finite-valued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Zivny. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e. the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.},
author = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal},
location = {Berkeley, CA, United States},
pages = {1246 -- 1258},
publisher = {IEEE},
title = {{The complexity of general-valued CSPs}},
doi = {10.1109/FOCS.2015.80},
year = {2015},
}
@article{802,
abstract = {Glycoinositolphosphoceramides (GIPCs) are complex sphingolipids present at the plasma membrane of various eukaryotes with the important exception of mammals. In fungi, these glycosphingolipids commonly contain an alpha-mannose residue (Man) linked at position 2 of the inositol. However, several pathogenic fungi additionally synthesize zwitterionic GIPCs carrying an alpha-glucosamine residue (GlcN) at this position. In the human pathogen Aspergillus fumigatus, the GlcNalpha1,2IPC core (where IPC is inositolphosphoceramide) is elongated to Manalpha1,3Manalpha1,6GlcNalpha1,2IPC, which is the most abundant GIPC synthesized by this fungus. In this study, we identified an A. fumigatus N-acetylglucosaminyltransferase, named GntA, and demonstrate its involvement in the initiation of zwitterionic GIPC biosynthesis. Targeted deletion of the gene encoding GntA in A. fumigatus resulted in complete absence of zwitterionic GIPC; a phenotype that could be reverted by episomal expression of GntA in the mutant. The N-acetylhexosaminyltransferase activity of GntA was substantiated by production of N-acetylhexosamine-IPC in the yeast Saccharomyces cerevisiae upon GntA expression. Using an in vitro assay, GntA was furthermore shown to use UDP-N-acetylglucosamine as donor substrate to generate a glycolipid product resistant to saponification and to digestion by phosphatidylinositol-phospholipase C as expected for GlcNAcalpha1,2IPC. Finally, as the enzymes involved in mannosylation of IPC, GntA was localized to the Golgi apparatus, the site of IPC synthesis.},
author = {Engel, Jakob and Schmalhorst, Philipp S and Kruger, Anke and Muller, Christina and Buettner, Falk and Routier, Françoise},
journal = {Glycobiology},
number = {12},
pages = {1423 -- 1430},
publisher = {Oxford University Press},
title = {{Characterization of an N-acetylglucosaminyltransferase involved in Aspergillus fumigatus zwitterionic glycoinositolphosphoceramide biosynthesis}},
doi = {10.1093/glycob/cwv059},
volume = {25},
year = {2015},
}
@inproceedings{1702,
abstract = {In this paper we present INTERHORN, a solver for recursion-free Horn clauses. The main application domain of INTERHORN lies in solving interpolation problems arising in software verification. We show how a range of interpolation problems, including path, transition, nested, state/transition and well-founded interpolation can be handled directly by INTERHORN. By detailing these interpolation problems and their Horn clause representations, we hope to encourage the emergence of a common back-end interpolation interface useful for diverse verification tools.},
author = {Gupta, Ashutosh and Popeea, Corneliu and Rybalchenko, Andrey},
booktitle = {Electronic Proceedings in Theoretical Computer Science, EPTCS},
location = {Vienna, Austria},
pages = {31 -- 38},
publisher = {Open Publishing},
title = {{Generalised interpolation by solving recursion free-horn clauses}},
doi = {10.4204/EPTCS.169.5},
volume = {169},
year = {2014},
}
@article{1761,
abstract = {Metal silicides formed by means of thermal annealing processes are employed as contact materials in microelectronics. Control of the structure of silicide/silicon interfaces becomes a critical issue when the characteristic size of the device is reduced below a few tens of nanometers. Here, we report on silicide clustering occurring within the channel of PtSi/Si/PtSi Schottky-barrier transistors. This phenomenon is investigated through atomistic simulations and low-temperature resonant-tunneling spectroscopy. Our results provide evidence for the segregation of a PtSi cluster with a diameter of a few nanometers from the silicide contact. The cluster acts as a metallic quantum dot giving rise to distinct signatures of quantum transport through its discrete energy states.},
author = {Mongillo, Massimo and Spathis, Panayotis N and Georgios Katsaros and De Franceschi, Silvano and Gentile, Pascal and Rurali, Riccardo and Cartoixà, Xavier},
journal = {Physical Review X},
number = {4},
publisher = {American Physical Society},
title = {{PtSi clustering in silicon probed by transport spectroscopy}},
doi = {10.1103/PhysRevX.3.041025},
volume = {3},
year = {2014},
}
@article{1791,
abstract = {Acute gene inactivation using short hairpin RNA (shRNA, knockdown) in developing brain is a powerful technique to study genetic function; however, discrepancies between knockdown and knockout murine phenotypes have left unanswered questions. For example, doublecortin (Dcx) knockdown but not knockout shows a neocortical neuronal migration phenotype. Here we report that in utero electroporation of shRNA, but not siRNA or miRNA, to Dcx demonstrates a migration phenotype in Dcx knockouts akin to the effect in wild-type mice, suggestingshRNA-mediated off-target toxicity. This effect wasnot limited to Dcx, as it was observed in Dclk1 knockouts, as well as with a fraction of scrambled shRNAs, suggesting a sequence-dependent but not sequence-specific effect. Profiling RNAs from electroporated cells showed a defect in endogenous let7 miRNA levels, and disruption of let7 or Dicer recapitulated the migration defect. The results suggest that shRNA-mediated knockdown can produce untoward migration effects by altering endogenous miRNA pathways.},
author = {Baek, SeungTae and Kerjan, Géraldine and Bielas, Stephanie L and Lee, Jieun and Fenstermaker, Ali G and Gaia Novarino and Gleeson, Joseph G},
journal = {Neuron},
number = {6},
pages = {1255 -- 1262},
publisher = {Elsevier},
title = {{Off-target effect of doublecortin family shRNA on neuronal migration associated with endogenous MicroRNA dysregulation}},
doi = {10.1016/j.neuron.2014.04.036},
volume = {82},
year = {2014},
}
@inproceedings{1853,
abstract = {Wireless sensor networks (WSNs) composed of low-power, low-cost sensor nodes are expected to form the backbone of future intelligent networks for a broad range of civil, industrial and military applications. These sensor nodes are often deployed through random spreading, and function in dynamic environments. Many applications of WSNs such as pollution tracking, forest fire detection, and military surveillance require knowledge of the location of constituent nodes. But the use of technologies such as GPS on all nodes is prohibitive due to power and cost constraints. So, the sensor nodes need to autonomously determine their locations. Most localization techniques use anchor nodes with known locations to determine the position of remaining nodes. Localization techniques have two conflicting requirements. On one hand, an ideal localization technique should be computationally simple and on the other hand, it must be resistant to attacks that compromise anchor nodes. In this paper, we propose a computationally light-weight game theoretic secure localization technique and demonstrate its effectiveness in comparison to existing techniques.},
author = {Jha, Susmit and Tripakis, Stavros and Seshia, Sanjit and Chatterjee, Krishnendu},
location = {Cambridge, USA},
pages = {85 -- 90},
publisher = {IEEE},
title = {{Game theoretic secure localization in wireless sensor networks}},
doi = {10.1109/IOT.2014.7030120},
year = {2014},
}
@inproceedings{1869,
abstract = {Boolean controllers for systems with complex datapaths are often very difficult to implement correctly, in particular when concurrency is involved. Yet, in many instances it is easy to formally specify correctness. For example, the specification for the controller of a pipelined processor only has to state that the pipelined processor gives the same results as a non-pipelined reference design. This makes such controllers a good target for automated synthesis. However, an efficient abstraction for the complex datapath elements is needed, as a bit-precise description is often infeasible. We present Suraq, the first controller synthesis tool which uses uninterpreted functions for the abstraction. Quantified firstorder formulas (with specific quantifier structure) serve as the specification language from which Suraq synthesizes Boolean controllers. Suraq transforms the specification into an unsatisfiable SMT formula, and uses Craig interpolation to compute its results. Using Suraq, we were able to synthesize a controller (consisting of two Boolean signals) for a five-stage pipelined DLX processor in roughly one hour and 15 minutes.},
author = {Hofferek, Georg and Gupta, Ashutosh},
booktitle = {HVC 2014},
editor = {Yahav, Eran},
location = {Haifa, Israel},
pages = {68 -- 74},
publisher = {Springer},
title = {{Suraq - a controller synthesis tool using uninterpreted functions}},
doi = {10.1007/978-3-319-13338-6_6},
volume = {8855},
year = {2014},
}
@inproceedings{1870,
abstract = {We investigate the problem of checking if a finite-state transducer is robust to uncertainty in its input. Our notion of robustness is based on the analytic notion of Lipschitz continuity - a transducer is K-(Lipschitz) robust if the perturbation in its output is at most K times the perturbation in its input. We quantify input and output perturbation using similarity functions. We show that K-robustness is undecidable even for deterministic transducers. We identify a class of functional transducers, which admits a polynomial time automata-theoretic decision procedure for K-robustness. This class includes Mealy machines and functional letter-to-letter transducers. We also study K-robustness of nondeterministic transducers. Since a nondeterministic transducer generates a set of output words for each input word, we quantify output perturbation using setsimilarity functions. We show that K-robustness of nondeterministic transducers is undecidable, even for letter-to-letter transducers. We identify a class of set-similarity functions which admit decidable K-robustness of letter-to-letter transducers.},
author = {Henzinger, Thomas A and Otop, Jan and Samanta, Roopsha},
booktitle = {Leibniz International Proceedings in Informatics, LIPIcs},
location = {Delhi, India},
pages = {431 -- 443},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Lipschitz robustness of finite-state transducers}},
doi = {10.4230/LIPIcs.FSTTCS.2014.431},
volume = {29},
year = {2014},
}
@article{1884,
abstract = {Unbiased high-throughput massively parallel sequencing methods have transformed the process of discovery of novel putative driver gene mutations in cancer. In chronic lymphocytic leukemia (CLL), these methods have yielded several unexpected findings, including the driver genes SF3B1, NOTCH1 and POT1. Recent analysis, utilizing down-sampling of existing datasets, has shown that the discovery process of putative drivers is far from complete across cancer. In CLL, while driver gene mutations affecting >10% of patients were efficiently discovered with previously published CLL cohorts of up to 160 samples subjected to whole exome sequencing (WES), this sample size has only 0.78 power to detect drivers affecting 5% of patients, and only 0.12 power for drivers affecting 2% of patients. These calculations emphasize the need to apply unbiased WES to larger patient cohorts.},
author = {Landau, Dan and Stewart, Chip and Reiter, Johannes and Lawrence, Michael and Sougnez, Carrie and Brown, Jennifer and Lopez Guillermo, Armando and Gabriel, Stacey and Lander, Eric and Neuberg, Donna and López Otín, Carlos and Campo, Elias and Getz, Gad and Wu, Catherine},
journal = {Blood},
number = {21},
pages = {1952 -- 1952},
publisher = {American Society of Hematology},
title = {{Novel putative driver gene mutations in chronic lymphocytic leukemia (CLL): results from a combined analysis of whole exome sequencing of 262 primary CLL aamples}},
volume = {124},
year = {2014},
}
@article{1887,
author = {Cremer, Sylvia},
journal = {Zoologie},
pages = {23 -- 30},
publisher = {Deutsche Zoologische Gesellschaft},
title = {{Gemeinsame Krankheitsabwehr in Ameisengesellschaften}},
year = {2014},
}
@inbook{1888,
abstract = {Im Rahmen meiner Arbeit mit der kollektiven Krankheitsabwehr in Ameisengesellschaften interessiert mich vor allem, wie sich die Kolonien als Ganzes gegen Krankheiten wehren können. Warum ist dieses Thema der Krankheitsdynamik in Gruppen so wichtig? Ein Vergleich von solitär lebenden Individuen mit Individuen, die in sozialen Gruppen zusammenleben, zeigt die Kosten und die Vorteile des Gruppenlebens: Einerseits haben Individuen in sozialen Gruppen aufgrund der hohen Dichte, in der die Tiere zusammenleben, den hohen Interaktionsraten, die sie miteinander haben, und der engen Verwandtschaft, die sie verbindet, ein höheres Ansteckungsrisiko. Andererseits kann die individuelle Krankheitsabwehr durch die kollektive Abwehr in den Gruppen ergänzt werden.},
author = {Cremer, Sylvia},
booktitle = {Soziale Insekten in einer sich wandelnden Welt},
pages = {65 -- 72},
publisher = {Pfeil},
title = {{Soziale Immunität: Wie sich der Staat gegen Pathogene wehrt Bayerische Akademie der Wissenschaften}},
volume = {43},
year = {2014},
}
@inproceedings{1927,
abstract = {Constrained pseudorandom functions have recently been introduced independently by Boneh and Waters (Asiacrypt’13), Kiayias et al. (CCS’13), and Boyle et al. (PKC’14). In a standard pseudorandom function (PRF) a key k is used to evaluate the PRF on all inputs in the domain. Constrained PRFs additionally offer the functionality to delegate “constrained” keys kS which allow to evaluate the PRF only on a subset S of the domain. The three above-mentioned papers all show that the classical GGM construction (J.ACM’86) of a PRF from a pseudorandom generator (PRG) directly yields a constrained PRF where one can compute constrained keys to evaluate the PRF on all inputs with a given prefix. This constrained PRF has already found many interesting applications. Unfortunately, the existing security proofs only show selective security (by a reduction to the security of the underlying PRG). To achieve full security, one has to use complexity leveraging, which loses an exponential factor 2N in security, where N is the input length. The first contribution of this paper is a new reduction that only loses a quasipolynomial factor qlog N, where q is the number of adversarial queries. For this we develop a new proof technique which constructs a distinguisher by interleaving simple guessing steps and hybrid arguments a small number of times. This approach might be of interest also in other contexts where currently the only technique to achieve full security is complexity leveraging. Our second contribution is concerned with another constrained PRF, due to Boneh and Waters, which allows for constrained keys for the more general class of bit-fixing functions. Their security proof also suffers from a 2N loss, which we show is inherent. We construct a meta-reduction which shows that any “simple” reduction of full security from a noninteractive hardness assumption must incur an exponential security loss.},
author = {Georg Fuchsbauer and Konstantinov, Momchil and Krzysztof Pietrzak and Rao, Vanishree},
pages = {173 -- 192},
publisher = {Springer},
title = {{Adaptive security of constrained PRFs}},
doi = {10.1145/2591796.2591825},
volume = {8874},
year = {2014},
}
@article{1979,
abstract = {NADH-ubiquinone oxidoreductase (complex I) is the first and largest enzyme in the respiratory chain of mitochondria and many bacteria. It couples the transfer of two electrons between NADH and ubiquinone to the translocation of four protons across the membrane. Complex I is an L-shaped assembly formed by the hydrophilic (peripheral) arm, containing all the redox centres performing electron transfer and the membrane arm, containing proton-translocating machinery. Mitochondrial complex I consists of 44 subunits of about 1 MDa in total, whilst the prokaryotic enzyme is simpler and generally consists of 14 conserved “core” subunits. Recently we have determined the first atomic structure of the entire complex I, using the enzyme from Thermus thermophilus (536 kDa, 16 subunits, 9 Fe-S clusters, 64 TM helices). Structure suggests a unique coupling mechanism, with redox energy of electron transfer driving proton translocation via long-range (up to ~200 Å) conformational changes. It resembles a steam engine, with coupling elements (akin to coupling rods) linking parts of this molecular machine.},
author = {Leonid Sazanov},
journal = {Journal of Bioenergetics and Biomembranes},
number = {4},
pages = {247 -- 253},
publisher = {Springer},
title = {{The mechanism of coupling between electron transfer and proton translocation in respiratory complex I}},
doi = {10.1007/s10863-014-9554-z},
volume = {46},
year = {2014},
}
@article{1980,
abstract = {Non-proton pumping type II NADH dehydrogenase (NDH-2) plays a central role in the respiratory metabolism of bacteria, and in the mitochondria of fungi, plants and protists. The lack of NDH-2 in mammalian mitochondria and its essentiality in important bacterial pathogens suggests these enzymes may represent a potential new drug target to combat microbial pathogens. Here, we report the first crystal structure of a bacterial NDH-2 enzyme at 2.5Å resolution from Caldalkalibacillus thermarum. The NDH-2 structure reveals a homodimeric organization that has a unique dimer interface. NDH-2 is localized to the cytoplasmic membrane by two separated C-terminal membrane-anchoring regions that are essential for membrane localization and FAD binding, but not NDH-2 dimerization. Comparison of bacterial NDH-2 with the yeast NADH dehydrogenase (Ndi1) structure revealed non-overlapping binding sites for quinone and NADH in the bacterial enzyme. The bacterial NDH-2 structure establishes a framework for the structure-based design of small-molecule inhibitors.},
author = {Heikal, Adam and Nakatani, Yoshio and Dunn, Elyse A and Weimar, Marion R and Day, Catherine and Baker, Edward N and Lott, Shaun J and Leonid Sazanov and Cook, Gregory},
journal = {Molecular Microbiology},
number = {5},
pages = {950 -- 964},
publisher = {Wiley-Blackwell},
title = {{Structure of the bacterial type II NADH dehydrogenase: a monotopic membrane protein with an essential role in energy generation}},
doi = {10.1111/mmi.12507},
volume = {91},
year = {2014},
}
@misc{1981,
abstract = {Variation in mitochondrial DNA is often assumed to be neutral and is used to construct the genealogical relationships among populations and species. However, if extant variation is the result of episodes of positive selection, these genealogies may be incorrect, although this information itself may provide biologically and evolutionary meaningful information. In fact, positive Darwinian selection has been detected in the mitochondrial-encoded subunits that comprise complex I from diverse taxa with seemingly dissimilar bioenergetic life histories, but the functional implications of the selected sites are unknown. Complex I produces roughly 40% of the proton flux that is used to synthesize ATP from ADP, and a functional model based on the high-resolution structure of complex I described a unique biomechanical apparatus for proton translocation. We reported positive selection at sites in this apparatus during the evolution of Pacific salmon, and it appeared this was also the case in published reports from other taxa, but a comparison among studies was difficult because different statistical tests were used to detect selection and oftentimes, specific sites were not reported. Here we review the literature of positive selection in mitochondrial genomes, the statistical tests used to detect selection, and the structural and functional models that are currently available to study the physiological implications of selection. We then search for signatures of positive selection among the coding mitochondrial genomes of 237 species with a common set of tests and verify that the ND5 subunit of complex I is a repeated target of positive Darwinian selection in diverse taxa. We propose a novel hypothesis to explain the results based on their bioenergetic life histories and provide a guide for laboratory and field studies to test this hypothesis.},
author = {Garvin, Michael R and Bielawski, Joseph P and Leonid Sazanov and Gharrett, Anthony J},
booktitle = {Journal of Zoological Systematics and Evolutionary Research},
number = {1},
pages = {1 -- 17},
publisher = {Wiley-Blackwell},
title = {{Review and meta-analysis of natural selection in mitochondrial complex I in metazoans}},
doi = {10.1111/jzs.12079},
volume = {53},
year = {2014},
}
@article{1989,
abstract = {During animal cell division, the cleavage furrow is positioned by microtubules that signal to the actin cortex at the cell midplane. We developed a cell-free system to recapitulate cytokinesis signaling using cytoplasmic extract from Xenopus eggs. Microtubules grew out as asters from artificial centrosomes and met to organize antiparallel overlap zones. These zones blocked the interpenetration of neighboring asters and recruited cytokinesis midzone proteins, including the chromosomal passenger complex (CPC) and centralspindlin. The CPC was transported to overlap zones, which required two motor proteins, Kif4A and a Kif20A paralog. Using supported lipid bilayers to mimic the plasma membrane, we observed the recruitment of cleavage furrow markers, including an active RhoA reporter, at microtubule overlaps. This system opens further approaches to understanding the biophysics of cytokinesis signaling.},
author = {Nguyen, Phuong A and Groen, Aaron C and Martin Loose and Ishihara, Keisuke and Wühr, Martin and Field, Christine M and Mitchison, Timothy J},
journal = {Science},
number = {6206},
pages = {244 -- 247},
publisher = {American Association for the Advancement of Science},
title = {{Spatial organization of cytokinesis signaling reconstituted in a cell-free system}},
doi = {10.1126/science.1256773},
volume = {346},
year = {2014},
}
@article{1990,
abstract = {Bacterial cytokinesis is commonly initiated by the Z-ring, a cytoskeletal structure that assembles at the site of division. Its primary component is FtsZ, a tubulin superfamily GTPase, which is recruited to the membrane by the actin-related protein FtsA. Both proteins are required for the formation of the Z-ring, but if and how they influence each other's assembly dynamics is not known. Here, we reconstituted FtsA-dependent recruitment of FtsZ polymers to supported membranes, where both proteins self-organize into complex patterns, such as fast-moving filament bundles and chirally rotating rings. Using fluorescence microscopy and biochemical perturbations, we found that these large-scale rearrangements of FtsZ emerge from its polymerization dynamics and a dual, antagonistic role of FtsA: recruitment of FtsZ filaments to the membrane and negative regulation of FtsZ organization. Our findings provide a model for the initial steps of bacterial cell division and illustrate how dynamic polymers can self-organize into large-scale structures.},
author = {Martin Loose and Mitchison, Timothy J},
journal = {Nature Cell Biology},
number = {1},
pages = {38 -- 46},
publisher = {Nature Publishing Group},
title = {{The bacterial cell division proteins ftsA and ftsZ self-organize into dynamic cytoskeletal patterns}},
doi = {10.1038/ncb2885},
volume = {16},
year = {2014},
}
@misc{2007,
author = {Anna Klimova and Rudas, Tamás},
publisher = {The Comprehensive R Archive Network},
title = {{gIPFrm: Generalized iterative proportional fitting for relational models}},
year = {2014},
}
@inproceedings{2012,
abstract = {The classical sphere packing problem asks for the best (infinite) arrangement of non-overlapping unit balls which cover as much space as possible. We define a generalized version of the problem, where we allow each ball a limited amount of overlap with other balls. We study two natural choices of overlap measures and obtain the optimal lattice packings in a parameterized family of lattices which contains the FCC, BCC, and integer lattice.},
author = {Iglesias Ham, Mabel and Kerber, Michael and Uhler, Caroline},
location = {Halifax, Canada},
pages = {155 -- 161},
publisher = {Unknown},
title = {{Sphere packing with limited overlap}},
year = {2014},
}