@inproceedings{2157,
abstract = {We show that the following algorithmic problem is decidable: given a 2-dimensional simplicial complex, can it be embedded (topologically, or equivalently, piecewise linearly) in ℝ3? By a known reduction, it suffices to decide the embeddability of a given triangulated 3-manifold X into the 3-sphere S3. The main step, which allows us to simplify X and recurse, is in proving that if X can be embedded in S3, then there is also an embedding in which X has a short meridian, i.e., an essential curve in the boundary of X bounding a disk in S3 nX with length bounded by a computable function of the number of tetrahedra of X.},
author = {Matoušek, Jiří and Sedgwick, Eric and Tancer, Martin and Wagner, Uli},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {78 -- 84},
publisher = {ACM},
title = {{Embeddability in the 3 sphere is decidable}},
doi = {10.1145/2582112.2582137},
year = {2014},
}
@article{2158,
abstract = {Directional guidance of migrating cells is relatively well explored in the reductionist setting of cell culture experiments. Here spatial gradients of chemical cues as well as gradients of mechanical substrate characteristics prove sufficient to attract single cells as well as their collectives. How such gradients present and act in the context of an organism is far less clear. Here we review recent advances in understanding how guidance cues emerge and operate in the physiological context.},
author = {Majumdar, Ritankar and Sixt, Michael K and Parent, Carole},
journal = {Current Opinion in Cell Biology},
number = {1},
pages = {33 -- 40},
publisher = {Elsevier},
title = {{New paradigms in the establishment and maintenance of gradients during directed cell migration}},
doi = {10.1016/j.ceb.2014.05.010},
volume = {30},
year = {2014},
}
@inproceedings{2160,
abstract = {Transfer learning has received a lot of attention in the machine learning community over the last years, and several effective algorithms have been developed. However, relatively little is known about their theoretical properties, especially in the setting of lifelong learning, where the goal is to transfer information to tasks for which no data have been observed so far. In this work we study lifelong learning from a theoretical perspective. Our main result is a PAC-Bayesian generalization bound that offers a unified view on existing paradigms for transfer learning, such as the transfer of parameters or the transfer of low-dimensional representations. We also use the bound to derive two principled lifelong learning algorithms, and we show that these yield results comparable with existing methods.},
author = {Pentina, Anastasia and Lampert, Christoph},
editor = {Xing, Eric and Jebara, Tony},
location = {Beijing, China},
pages = {991 -- 999},
publisher = {Omnipress},
title = {{A PAC-Bayesian bound for Lifelong Learning}},
volume = {32},
year = {2014},
}
@article{2161,
abstract = {Repeated pathogen exposure is a common threat in colonies of social insects, posing selection pressures on colony members to respond with improved disease-defense performance. We here tested whether experience gained by repeated tending of low-level fungus-exposed (Metarhizium robertsii) larvae may alter the performance of sanitary brood care in the clonal ant, Platythyrea punctata. We trained ants individually over nine consecutive trials to either sham-treated or fungus-exposed larvae. We then compared the larval grooming behavior of naive and trained ants and measured how effectively they removed infectious fungal conidiospores from the fungus-exposed larvae. We found that the ants changed the duration of larval grooming in response to both, larval treatment and their level of experience: (1) sham-treated larvae received longer grooming than the fungus-exposed larvae and (2) trained ants performed less self-grooming but longer larval grooming than naive ants, which was true for both, ants trained to fungus-exposed and also to sham-treated larvae. Ants that groomed the fungus-exposed larvae for longer periods removed a higher number of fungal conidiospores from the surface of the fungus-exposed larvae. As experienced ants performed longer larval grooming, they were more effective in fungal removal, thus making them better caretakers under pathogen attack of the colony. By studying this clonal ant, we can thus conclude that even in the absence of genetic variation between colony members, differences in experience levels of brood care may affect performance of sanitary brood care in social insects.},
author = {Westhus, Claudia and Ugelvig, Line V and Tourdot, Edouard and Heinze, Jürgen and Doums, Claudie and Cremer, Sylvia},
journal = {Behavioral Ecology and Sociobiology},
number = {10},
pages = {1701 -- 1710},
publisher = {Springer},
title = {{Increased grooming after repeated brood care provides sanitary benefits in a clonal ant}},
doi = {10.1007/s00265-014-1778-8},
volume = {68},
year = {2014},
}
@inproceedings{2162,
abstract = {We study two-player (zero-sum) concurrent mean-payoff games played on a finite-state graph. We focus on the important sub-class of ergodic games where all states are visited infinitely often with probability 1. The algorithmic study of ergodic games was initiated in a seminal work of Hoffman and Karp in 1966, but all basic complexity questions have remained unresolved. Our main results for ergodic games are as follows: We establish (1) an optimal exponential bound on the patience of stationary strategies (where patience of a distribution is the inverse of the smallest positive probability and represents a complexity measure of a stationary strategy); (2) the approximation problem lies in FNP; (3) the approximation problem is at least as hard as the decision problem for simple stochastic games (for which NP ∩ coNP is the long-standing best known bound). We present a variant of the strategy-iteration algorithm by Hoffman and Karp; show that both our algorithm and the classical value-iteration algorithm can approximate the value in exponential time; and identify a subclass where the value-iteration algorithm is a FPTAS. We also show that the exact value can be expressed in the existential theory of the reals, and establish square-root sum hardness for a related class of games.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {122 -- 133},
publisher = {Springer},
title = {{The complexity of ergodic mean payoff games}},
doi = {10.1007/978-3-662-43951-7_11},
volume = {8573},
year = {2014},
}
@inproceedings{2163,
abstract = {We consider multi-player graph games with partial-observation and parity objective. While the decision problem for three-player games with a coalition of the first and second players against the third player is undecidable in general, we present a decidability result for partial-observation games where the first and third player are in a coalition against the second player, thus where the second player is adversarial but weaker due to partial-observation. We establish tight complexity bounds in the case where player 1 is less informed than player 2, namely 2-EXPTIME-completeness for parity objectives. The symmetric case of player 1 more informed than player 2 is much more complicated, and we show that already in the case where player 1 has perfect observation, memory of size non-elementary is necessary in general for reachability objectives, and the problem is decidable for safety and reachability objectives. From our results we derive new complexity results for partial-observation stochastic games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
booktitle = {Lecture Notes in Computer Science},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {110 -- 121},
publisher = {Springer},
title = {{Games with a weak adversary}},
doi = {10.1007/978-3-662-43951-7_10},
volume = {8573},
year = {2014},
}
@article{2164,
abstract = {Neuronal ectopia, such as granule cell dispersion (GCD) in temporal lobe epilepsy (TLE), has been assumed to result from a migration defect during development. Indeed, recent studies reported that aberrant migration of neonatal-generated dentate granule cells (GCs) increased the risk to develop epilepsy later in life. On the contrary, in the present study, we show that fully differentiated GCs become motile following the induction of epileptiform activity, resulting in GCD. Hippocampal slice cultures from transgenic mice expressing green fluorescent protein in differentiated, but not in newly generated GCs, were incubated with the glutamate receptor agonist kainate (KA), which induced GC burst activity and GCD. Using real-time microscopy, we observed that KA-exposed, differentiated GCs translocated their cell bodies and changed their dendritic organization. As found in human TLE, KA application was associated with decreased expression of the extracellular matrix protein Reelin, particularly in hilar interneurons. Together these findings suggest that KA-induced motility of differentiated GCs contributes to the development of GCD and establish slice cultures as a model to study neuronal changes induced by epileptiform activity. },
author = {Chai, Xuejun and Münzner, Gert and Zhao, Shanting and Tinnes, Stefanie and Kowalski, Janina and Häussler, Ute and Young, Christina and Haas, Carola and Frotscher, Michael},
journal = {Cerebral Cortex},
number = {8},
pages = {2130 -- 2140},
publisher = {Oxford University Press},
title = {{Epilepsy-induced motility of differentiated neurons}},
doi = {10.1093/cercor/bht067},
volume = {24},
year = {2014},
}
@inproceedings{2167,
abstract = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing. In this paper, we study compositional properties of the ioco-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the ioco conformance relation, the resulting methodology can be applied to a broader class of systems.},
author = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
booktitle = {IEEE 7th International Conference on Software Testing, Verification and Validation},
isbn = {978-1-4799-2255-0},
issn = {2159-4848},
location = {Cleveland, USA},
publisher = {IEEE},
title = {{Compositional specifications for IOCO testing}},
doi = {10.1109/ICST.2014.50},
year = {2014},
}
@article{2168,
abstract = {Many species have an essentially continuous distribution in space, in which there are no natural divisions between randomly mating subpopulations. Yet, the standard approach to modelling these populations is to impose an arbitrary grid of demes, adjusting deme sizes and migration rates in an attempt to capture the important features of the population. Such indirect methods are required because of the failure of the classical models of isolation by distance, which have been shown to have major technical flaws. A recently introduced model of extinction and recolonisation in two dimensions solves these technical problems, and provides a rigorous technical foundation for the study of populations evolving in a spatial continuum. The coalescent process for this model is simply stated, but direct simulation is very inefficient for large neighbourhood sizes. We present efficient and exact algorithms to simulate this coalescent process for arbitrary sample sizes and numbers of loci, and analyse these algorithms in detail.},
author = {Kelleher, Jerome and Etheridge, Alison and Barton, Nicholas H},
journal = {Theoretical Population Biology},
pages = {13 -- 23},
publisher = {Academic Press},
title = {{Coalescent simulation in continuous space: Algorithms for large neighbourhood size}},
doi = {10.1016/j.tpb.2014.05.001},
volume = {95},
year = {2014},
}
@article{2169,
author = {Barton, Nicholas H and Novak, Sebastian and Paixao, Tiago},
journal = {PNAS},
number = {29},
pages = {10398 -- 10399},
publisher = {National Academy of Sciences},
title = {{Diverse forms of selection in evolution and computer science}},
doi = {10.1073/pnas.1410107111},
volume = {111},
year = {2014},
}
@article{2170,
abstract = { Short-read sequencing technologies have in principle made it feasible to draw detailed inferences about the recent history of any organism. In practice, however, this remains challenging due to the difficulty of genome assembly in most organisms and the lack of statistical methods powerful enough to discriminate between recent, nonequilibrium histories. We address both the assembly and inference challenges. We develop a bioinformatic pipeline for generating outgroup-rooted alignments of orthologous sequence blocks from de novo low-coverage short-read data for a small number of genomes, and show how such sequence blocks can be used to fit explicit models of population divergence and admixture in a likelihood framework. To illustrate our approach, we reconstruct the Pleistocene history of an oak-feeding insect (the oak gallwasp Biorhiza pallida), which, in common with many other taxa, was restricted during Pleistocene ice ages to a longitudinal series of southern refugia spanning the Western Palaearctic. Our analysis of sequence blocks sampled from a single genome from each of three major glacial refugia reveals support for an unexpected history dominated by recent admixture. Despite the fact that 80% of the genome is affected by admixture during the last glacial cycle, we are able to infer the deeper divergence history of these populations. These inferences are robust to variation in block length, mutation model and the sampling location of individual genomes within refugia. This combination of de novo assembly and numerical likelihood calculation provides a powerful framework for estimating recent population history that can be applied to any organism without the need for prior genetic resources.},
author = {Hearn, Jack and Stone, Graham and Bunnefeld, Lynsey and Nicholls, James and Barton, Nicholas H and Lohse, Konrad},
journal = {Molecular Ecology},
number = {1},
pages = {198 -- 211},
publisher = {Wiley-Blackwell},
title = {{Likelihood-based inference of population history from low-coverage de novo genome assemblies}},
doi = {10.1111/mec.12578},
volume = {23},
year = {2014},
}
@inproceedings{2171,
abstract = {We present LS-CRF, a new method for training cyclic Conditional Random Fields (CRFs) from large datasets that is inspired by classical closed-form expressions for the maximum likelihood parameters of a generative graphical model with tree topology. Training a CRF with LS-CRF requires only solving a set of independent regression problems, each of which can be solved efficiently in closed form or by an iterative solver. This makes LS-CRF orders of magnitude faster than classical CRF training based on probabilistic inference, and at the same time more flexible and easier to implement than other approximate techniques, such as pseudolikelihood or piecewise training. We apply LS-CRF to the task of semantic image segmentation, showing that it achieves on par accuracy to other training techniques at higher speed, thereby allowing efficient CRF training from very large training sets. For example, training a linearly parameterized pairwise CRF on 150,000 images requires less than one hour on a modern workstation.},
author = {Kolesnikov, Alexander and Guillaumin, Matthieu and Ferrari, Vittorio and Lampert, Christoph},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Fleet, David and Pajdla, Tomas and Schiele, Bernt and Tuytelaars, Tinne},
location = {Zurich, Switzerland},
number = {PART 3},
pages = {550 -- 565},
publisher = {Springer},
title = {{Closed-form approximate CRF training for scalable image segmentation}},
doi = {10.1007/978-3-319-10578-9_36},
volume = {8691},
year = {2014},
}
@inproceedings{2172,
abstract = {Fisher Kernels and Deep Learning were two developments with significant impact on large-scale object categorization in the last years. Both approaches were shown to achieve state-of-the-art results on large-scale object categorization datasets, such as ImageNet. Conceptually, however, they are perceived as very different and it is not uncommon for heated debates to spring up when advocates of both paradigms meet at conferences or workshops. In this work, we emphasize the similarities between both architectures rather than their differences and we argue that such a unified view allows us to transfer ideas from one domain to the other. As a concrete example we introduce a method for learning a support vector machine classifier with Fisher kernel at the same time as a task-specific data representation. We reinterpret the setting as a multi-layer feed forward network. Its final layer is the classifier, parameterized by a weight vector, and the two previous layers compute Fisher vectors, parameterized by the coefficients of a Gaussian mixture model. We introduce a gradient descent based learning algorithm that, in contrast to other feature learning techniques, is not just derived from intuition or biological analogy, but has a theoretical justification in the framework of statistical learning theory. Our experiments show that the new training procedure leads to significant improvements in classification accuracy while preserving the modularity and geometric interpretability of a support vector machine setup.},
author = {Sydorov, Vladyslav and Sakurada, Mayu and Lampert, Christoph},
booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
location = {Columbus, USA},
pages = {1402 -- 1409},
publisher = {IEEE},
title = {{Deep Fisher Kernels – End to end learning of the Fisher Kernel GMM parameters}},
doi = {10.1109/CVPR.2014.182},
year = {2014},
}
@inproceedings{2173,
abstract = {In this work we introduce a new approach to co-classification, i.e. the task of jointly classifying multiple, otherwise independent, data samples. The method we present, named CoConut, is based on the idea of adding a regularizer in the label space to encode certain priors on the resulting labelings. A regularizer that encourages labelings that are smooth across the test set, for instance, can be seen as a test-time variant of the cluster assumption, which has been proven useful at training time in semi-supervised learning. A regularizer that introduces a preference for certain class proportions can be regarded as a prior distribution on the class labels. CoConut can build on existing classifiers without making any assumptions on how they were obtained and without the need to re-train them. The use of a regularizer adds a new level of flexibility. It allows the integration of potentially new information at test time, even in other modalities than what the classifiers were trained on. We evaluate our framework on six datasets, reporting a clear performance gain in classification accuracy compared to the standard classification setup that predicts labels for each test sample separately.
},
author = {Khamis, Sameh and Lampert, Christoph},
booktitle = {Proceedings of the British Machine Vision Conference 2014},
location = {Nottingham, UK},
publisher = {BMVA Press},
title = {{CoConut: Co-classification with output space regularization}},
year = {2014},
}
@article{2174,
abstract = {When polygenic traits are under stabilizing selection, many different combinations of alleles allow close adaptation to the optimum. If alleles have equal effects, all combinations that result in the same deviation from the optimum are equivalent. Furthermore, the genetic variance that is maintained by mutation-selection balance is 2μ/S per locus, where μ is the mutation rate and S the strength of stabilizing selection. In reality, alleles vary in their effects, making the fitness landscape asymmetric and complicating analysis of the equilibria. We show that that the resulting genetic variance depends on the fraction of alleles near fixation, which contribute by 2μ/S, and on the total mutational effects of alleles that are at intermediate frequency. The inpplayfi between stabilizing selection and mutation leads to a sharp transition: alleles with effects smaller than a threshold value of 2 remain polymorphic, whereas those with larger effects are fixed. The genetic load in equilibrium is less than for traits of equal effects, and the fitness equilibria are more similar. We find p the optimum is displaced, alleles with effects close to the threshold value sweep first, and their rate of increase is bounded by Long-term response leads in general to well-adapted traits, unlike the case of equal effects that often end up at a suboptimal fitness peak. However, the particular peaks to which the populations converge are extremely sensitive to the initial states and to the speed of the shift of the optimum trait value.},
author = {De Vladar, Harold and Barton, Nicholas H},
journal = {Genetics},
number = {2},
pages = {749 -- 767},
publisher = {Genetics Society of America},
title = {{Stability and response of polygenic traits to stabilizing selection and mutation}},
doi = {10.1534/genetics.113.159111},
volume = {197},
year = {2014},
}
@article{2175,
abstract = {The cerebral cortex, the seat of our cognitive abilities, is composed of an intricate network of billions of excitatory projection and inhibitory interneurons. Postmitotic cortical neurons are generated by a diverse set of neural stem cell progenitors within dedicated zones and defined periods of neurogenesis during embryonic development. Disruptions in neurogenesis can lead to alterations in the neuronal cytoarchitecture, which is thought to represent a major underlying cause for several neurological disorders, including microcephaly, autism and epilepsy. Although a number of signaling pathways regulating neurogenesis have been described, the precise cellular and molecular mechanisms regulating the functional neural stem cell properties in cortical neurogenesis remain unclear. Here, we discuss the most up-to-date strategies to monitor the fundamental mechanistic parameters of neuronal progenitor proliferation, and recent advances deciphering the logic and dynamics of neurogenesis.},
author = {Postiglione, Maria P and Hippenmeyer, Simon},
journal = {Future Neurology},
number = {3},
pages = {323 -- 340},
publisher = {Future Medicine Ltd.},
title = {{Monitoring neurogenesis in the cerebral cortex: an update}},
doi = {10.2217/fnl.14.18},
volume = {9},
year = {2014},
}
@article{2176,
abstract = {Electron microscopy (EM) allows for the simultaneous visualization of all tissue components at high resolution. However, the extent to which conventional aldehyde fixation and ethanol dehydration of the tissue alter the fine structure of cells and organelles, thereby preventing detection of subtle structural changes induced by an experiment, has remained an issue. Attempts have been made to rapidly freeze tissue to preserve native ultrastructure. Shock-freezing of living tissue under high pressure (high-pressure freezing, HPF) followed by cryosubstitution of the tissue water avoids aldehyde fixation and dehydration in ethanol; the tissue water is immobilized in â ̂1/450 ms, and a close-to-native fine structure of cells, organelles and molecules is preserved. Here we describe a protocol for HPF that is useful to monitor ultrastructural changes associated with functional changes at synapses in the brain but can be applied to many other tissues as well. The procedure requires a high-pressure freezer and takes a minimum of 7 d but can be paused at several points.},
author = {Studer, Daniel and Zhao, Shanting and Chai, Xuejun and Jonas, Peter M and Graber, Werner and Nestel, Sigrun and Frotscher, Michael},
journal = {Nature Protocols},
number = {6},
pages = {1480 -- 1495},
publisher = {Nature Publishing Group},
title = {{Capture of activity-induced ultrastructural changes at synapses by high-pressure freezing of brain tissue}},
doi = {10.1038/nprot.2014.099},
volume = {9},
year = {2014},
}
@inproceedings{2177,
abstract = {We give evidence for the difficulty of computing Betti numbers of simplicial complexes over a finite field. We do this by reducing the rank computation for sparse matrices with to non-zero entries to computing Betti numbers of simplicial complexes consisting of at most a constant times to simplices. Together with the known reduction in the other direction, this implies that the two problems have the same computational complexity.},
author = {Edelsbrunner, Herbert and Parsa, Salman},
booktitle = {Proceedings of the Annual ACM-SIAM Symposium on Discrete Algorithms},
location = {Portland, USA},
pages = {152 -- 160},
publisher = {SIAM},
title = {{On the computational complexity of betti numbers reductions from matrix rank}},
doi = {10.1137/1.9781611973402.11},
year = {2014},
}
@article{2178,
abstract = {We consider the three-state toric homogeneous Markov chain model (THMC) without loops and initial parameters. At time T, the size of the design matrix is 6 × 3 · 2T-1 and the convex hull of its columns is the model polytope. We study the behavior of this polytope for T ≥ 3 and we show that it is defined by 24 facets for all T ≥ 5. Moreover, we give a complete description of these facets. From this, we deduce that the toric ideal associated with the design matrix is generated by binomials of degree at most 6. Our proof is based on a result due to Sturmfels, who gave a bound on the degree of the generators of a toric ideal, provided the normality of the corresponding toric variety. In our setting, we established the normality of the toric variety associated to the THMC model by studying the geometric properties of the model polytope.},
author = {Haws, David and Martin Del Campo Sanchez, Abraham and Takemura, Akimichi and Yoshida, Ruriko},
journal = {Beitrage zur Algebra und Geometrie},
number = {1},
pages = {161 -- 188},
publisher = {Springer},
title = {{Markov degree of the three-state toric homogeneous Markov chain model}},
doi = {10.1007/s13366-013-0178-y},
volume = {55},
year = {2014},
}
@article{2179,
abstract = {We extend the proof of the local semicircle law for generalized Wigner matrices given in MR3068390 to the case when the matrix of variances has an eigenvalue -1. In particular, this result provides a short proof of the optimal local Marchenko-Pastur law at the hard edge (i.e. around zero) for sample covariance matrices X*X, where the variances of the entries of X may vary.},
author = {Ajanki, Oskari H and Erdös, László and Krüger, Torben H},
journal = {Electronic Communications in Probability},
publisher = {Institute of Mathematical Statistics},
title = {{Local semicircle law with imprimitive variance matrix}},
doi = {10.1214/ECP.v19-3121},
volume = {19},
year = {2014},
}
@article{2180,
abstract = {Weighted majority votes allow one to combine the output of several classifiers or voters. MinCq is a recent algorithm for optimizing the weight of each voter based on the minimization of a theoretical bound over the risk of the vote with elegant PAC-Bayesian generalization guarantees. However, while it has demonstrated good performance when combining weak classifiers, MinCq cannot make use of the useful a priori knowledge that one may have when using a mixture of weak and strong voters. In this paper, we propose P-MinCq, an extension of MinCq that can incorporate such knowledge in the form of a constraint over the distribution of the weights, along with general proofs of convergence that stand in the sample compression setting for data-dependent voters. The approach is applied to a vote of k-NN classifiers with a specific modeling of the voters' performance. P-MinCq significantly outperforms the classic k-NN classifier, a symmetric NN and MinCq using the same voters. We show that it is also competitive with LMNN, a popular metric learning algorithm, and that combining both approaches further reduces the error.},
author = {Bellet, Aurélien and Habrard, Amaury and Morvant, Emilie and Sebban, Marc},
journal = {Machine Learning},
number = {1-2},
pages = {129 -- 154},
publisher = {Springer},
title = {{Learning a priori constrained weighted majority votes}},
doi = {10.1007/s10994-014-5462-z},
volume = {97},
year = {2014},
}
@article{2184,
abstract = {Given topological spaces X,Y, a fundamental problem of algebraic topology is understanding the structure of all continuous maps X→ Y. We consider a computational version, where X,Y are given as finite simplicial complexes, and the goal is to compute [X,Y], that is, all homotopy classes of suchmaps.We solve this problem in the stable range, where for some d ≥ 2, we have dim X ≤ 2d-2 and Y is (d-1)-connected; in particular, Y can be the d-dimensional sphere Sd. The algorithm combines classical tools and ideas from homotopy theory (obstruction theory, Postnikov systems, and simplicial sets) with algorithmic tools from effective algebraic topology (locally effective simplicial sets and objects with effective homology). In contrast, [X,Y] is known to be uncomputable for general X,Y, since for X = S1 it includes a well known undecidable problem: testing triviality of the fundamental group of Y. In follow-up papers, the algorithm is shown to run in polynomial time for d fixed, and extended to other problems, such as the extension problem, where we are given a subspace A ⊂ X and a map A→ Y and ask whether it extends to a map X → Y, or computing the Z2-index-everything in the stable range. Outside the stable range, the extension problem is undecidable.},
author = {Čadek, Martin and Krcál, Marek and Matoušek, Jiří and Sergeraert, Francis and Vokřínek, Lukáš and Wagner, Uli},
journal = {Journal of the ACM},
number = {3},
publisher = {ACM},
title = {{Computing all maps into a sphere}},
doi = {10.1145/2597629},
volume = {61},
year = {2014},
}
@inproceedings{2185,
abstract = {We revisit the classical problem of converting an imperfect source of randomness into a usable cryptographic key. Assume that we have some cryptographic application P that expects a uniformly random m-bit key R and ensures that the best attack (in some complexity class) against P(R) has success probability at most δ. Our goal is to design a key-derivation function (KDF) h that converts any random source X of min-entropy k into a sufficiently "good" key h(X), guaranteeing that P(h(X)) has comparable security δ′ which is 'close' to δ. Seeded randomness extractors provide a generic way to solve this problem for all applications P, with resulting security δ′ = O(δ), provided that we start with entropy k ≥ m + 2 log (1/δ) - O(1). By a result of Radhakrishnan and Ta-Shma, this bound on k (called the "RT-bound") is also known to be tight in general. Unfortunately, in many situations the loss of 2 log (1/δ) bits of entropy is unacceptable. This motivates the study KDFs with less entropy waste by placing some restrictions on the source X or the application P. In this work we obtain the following new positive and negative results in this regard: - Efficient samplability of the source X does not help beat the RT-bound for general applications. This resolves the SRT (samplable RT) conjecture of Dachman-Soled et al. [DGKM12] in the affirmative, and also shows that the existence of computationally-secure extractors beating the RT-bound implies the existence of one-way functions. - We continue in the line of work initiated by Barak et al. [BDK+11] and construct new information-theoretic KDFs which beat the RT-bound for large but restricted classes of applications. Specifically, we design efficient KDFs that work for all unpredictability applications P (e.g., signatures, MACs, one-way functions, etc.) and can either: (1) extract all of the entropy k = m with a very modest security loss δ′ = O(δ·log (1/δ)), or alternatively, (2) achieve essentially optimal security δ′ = O(δ) with a very modest entropy loss k ≥ m + loglog (1/δ). In comparison, the best prior results from [BDK+11] for this class of applications would only guarantee δ′ = O(√δ) when k = m, and would need k ≥ m + log (1/δ) to get δ′ = O(δ). - The weaker bounds of [BDK+11] hold for a larger class of so-called "square- friendly" applications (which includes all unpredictability, but also some important indistinguishability, applications). Unfortunately, we show that these weaker bounds are tight for the larger class of applications. - We abstract out a clean, information-theoretic notion of (k,δ,δ′)- unpredictability extractors, which guarantee "induced" security δ′ for any δ-secure unpredictability application P, and characterize the parameters achievable for such unpredictability extractors. Of independent interest, we also relate this notion to the previously-known notion of (min-entropy) condensers, and improve the state-of-the-art parameters for such condensers.},
author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Wichs, Daniel},
editor = {Nguyen, Phong and Oswald, Elisabeth},
location = {Copenhagen, Denmark},
pages = {93 -- 110},
publisher = {Springer},
title = {{Key derivation without entropy waste}},
doi = {10.1007/978-3-642-55220-5_6},
volume = {8441},
year = {2014},
}
@article{2186,
abstract = {We prove the existence of scattering states for the defocusing cubic Gross-Pitaevskii (GP) hierarchy in ℝ3. Moreover, we show that an exponential energy growth condition commonly used in the well-posedness theory of the GP hierarchy is, in a specific sense, necessary. In fact, we prove that without the latter, there exist initial data for the focusing cubic GP hierarchy for which instantaneous blowup occurs.},
author = {Chen, Thomas and Hainzl, Christian and Pavlović, Nataša and Seiringer, Robert},
journal = {Letters in Mathematical Physics},
number = {7},
pages = {871 -- 891},
publisher = {Springer},
title = {{On the well-posedness and scattering for the Gross-Pitaevskii hierarchy via quantum de Finetti}},
doi = {10.1007/s11005-014-0693-2},
volume = {104},
year = {2014},
}
@article{2187,
abstract = {Systems should not only be correct but also robust in the sense that they behave reasonably in unexpected situations. This article addresses synthesis of robust reactive systems from temporal specifications. Existing methods allow arbitrary behavior if assumptions in the specification are violated. To overcome this, we define two robustness notions, combine them, and show how to enforce them in synthesis. The first notion applies to safety properties: If safety assumptions are violated temporarily, we require that the system recovers to normal operation with as few errors as possible. The second notion requires that, if liveness assumptions are violated, as many guarantees as possible should be fulfilled nevertheless. We present a synthesis procedure achieving this for the important class of GR(1) specifications, and establish complexity bounds. We also present an implementation of a special case of robustness, and show experimental results.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Hofferek, Georg and Jobstmann, Barbara and Könighofer, Bettina and Könighofer, Robert},
journal = {Acta Informatica},
number = {3-4},
pages = {193 -- 220},
publisher = {Springer},
title = {{Synthesizing robust systems}},
doi = {10.1007/s00236-013-0191-5},
volume = {51},
year = {2014},
}
@article{2188,
abstract = {Although plant and animal cells use a similar core mechanism to deliver proteins to the plasma membrane, their different lifestyle, body organization and specific cell structures resulted in the acquisition of regulatory mechanisms that vary in the two kingdoms. In particular, cell polarity regulators do not seem to be conserved, because genes encoding key components are absent in plant genomes. In plants, the broad knowledge on polarity derives from the study of auxin transporters, the PIN-FORMED proteins, in the model plant Arabidopsis thaliana. In animals, much information is provided from the study of polarity in epithelial cells that exhibit basolateral and luminal apical polarities, separated by tight junctions. In this review, we summarize the similarities and differences of the polarization mechanisms between plants and animals and survey the main genetic approaches that have been used to characterize new genes involved in polarity establishment in plants, including the frequently used forward and reverse genetics screens as well as a novel chemical genetics approach that is expected to overcome the limitation of classical genetics methods.},
author = {Kania, Urszula and Fendrych, Matyas and Friml, Jiřĺ},
journal = {Open Biology},
number = {APRIL},
publisher = {Royal Society},
title = {{Polar delivery in plants; commonalities and differences to animal epithelial cells}},
doi = {10.1098/rsob.140017},
volume = {4},
year = {2014},
}
@inproceedings{2189,
abstract = {En apprentissage automatique, nous parlons d'adaptation de domaine lorsque les données de test (cibles) et d'apprentissage (sources) sont générées selon différentes distributions. Nous devons donc développer des algorithmes de classification capables de s'adapter à une nouvelle distribution, pour laquelle aucune information sur les étiquettes n'est disponible. Nous attaquons cette problématique sous l'angle de l'approche PAC-Bayésienne qui se focalise sur l'apprentissage de modèles définis comme des votes de majorité sur un ensemble de fonctions. Dans ce contexte, nous introduisons PV-MinCq une version adaptative de l'algorithme (non adaptatif) MinCq. PV-MinCq suit le principe suivant. Nous transférons les étiquettes sources aux points cibles proches pour ensuite appliquer MinCq sur l'échantillon cible ``auto-étiqueté'' (justifié par une borne théorique). Plus précisément, nous définissons un auto-étiquetage non itératif qui se focalise dans les régions où les distributions marginales source et cible sont les plus similaires. Dans un second temps, nous étudions l'influence de notre auto-étiquetage pour en déduire une procédure de validation des hyperparamètres. Finalement, notre approche montre des résultats empiriques prometteurs.},
author = {Morvant, Emilie},
location = {Saint-Etienne, France},
pages = {49--58},
publisher = {Elsevier},
title = {{Adaptation de domaine de vote de majorité par auto-étiquetage non itératif}},
volume = {1},
year = {2014},
}
@inproceedings{2190,
abstract = {We present a new algorithm to construct a (generalized) deterministic Rabin automaton for an LTL formula φ. The automaton is the product of a master automaton and an array of slave automata, one for each G-subformula of φ. The slave automaton for G ψ is in charge of recognizing whether FG ψ holds. As opposed to standard determinization procedures, the states of all our automata have a clear logical structure, which allows for various optimizations. Our construction subsumes former algorithms for fragments of LTL. Experimental results show improvement in the sizes of the resulting automata compared to existing methods.},
author = {Esparza, Javier and Kretinsky, Jan},
pages = {192 -- 208},
publisher = {Springer},
title = {{From LTL to deterministic automata: A safraless compositional approach}},
doi = {10.1007/978-3-319-08867-9_13},
volume = {8559},
year = {2014},
}
@article{2211,
abstract = {In two-player finite-state stochastic games of partial observation on graphs, in every state of the graph, the players simultaneously choose an action, and their joint actions determine a probability distribution over the successor states. The game is played for infinitely many rounds and thus the players construct an infinite path in the graph. We consider reachability objectives where the first player tries to ensure a target state to be visited almost-surely (i.e., with probability 1) or positively (i.e., with positive probability), no matter the strategy of the second player. We classify such games according to the information and to the power of randomization available to the players. On the basis of information, the game can be one-sided with either (a) player 1, or (b) player 2 having partial observation (and the other player has perfect observation), or two-sided with (c) both players having partial observation. On the basis of randomization, (a) the players may not be allowed to use randomization (pure strategies), or (b) they may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) they may use full randomization. Our main results for pure strategies are as follows: (1) For one-sided games with player 2 having perfect observation we show that (in contrast to full randomized strategies) belief-based (subset-construction based) strategies are not sufficient, and we present an exponential upper bound on memory both for almost-sure and positive winning strategies; we show that the problem of deciding the existence of almost-sure and positive winning strategies for player 1 is EXPTIME-complete and present symbolic algorithms that avoid the explicit exponential construction. (2) For one-sided games with player 1 having perfect observation we show that nonelementarymemory is both necessary and sufficient for both almost-sure and positive winning strategies. (3) We show that for the general (two-sided) case finite-memory strategies are sufficient for both positive and almost-sure winning, and at least nonelementary memory is required. We establish the equivalence of the almost-sure winning problems for pure strategies and for randomized strategies with actions invisible. Our equivalence result exhibit serious flaws in previous results of the literature: we show a nonelementary memory lower bound for almost-sure winning whereas an exponential upper bound was previously claimed.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {2},
publisher = {ACM},
title = {{Partial-observation stochastic games: How to win when belief fails}},
doi = {10.1145/2579821},
volume = {15},
year = {2014},
}
@inproceedings{2212,
abstract = {The theory of graph games is the foundation for modeling and synthesizing reactive processes. In the synthesis of stochastic processes, we use 2 1/2-player games where some transitions of the game graph are controlled by two adversarial players, the System and the Environment, and the other transitions are determined probabilistically. We consider 2 1/2-player games where the objective of the System is the conjunction of a qualitative objective (specified as a parity condition) and a quantitative objective (specified as a mean-payoff condition). We establish that the problem of deciding whether the System can ensure that the probability to satisfy the mean-payoff parity objective is at least a given threshold is in NP ∩ coNP, matching the best known bound in the special case of 2-player games (where all transitions are deterministic). We present an algorithm running in time O(d·n2d·MeanGame) to compute the set of almost-sure winning states from which the objective can be ensured with probability 1, where n is the number of states of the game, d the number of priorities of the parity objective, and MeanGame is the complexity to compute the set of almost-sure winning states in 2 1/2-player mean-payoff games. Our results are useful in the synthesis of stochastic reactive systems with both functional requirement (given as a qualitative objective) and performance requirement (given as a quantitative objective). },
author = {Chatterjee, Krishnendu and Doyen, Laurent and Gimbert, Hugo and Oualhadj, Youssouf},
location = {Grenoble, France},
pages = {210 -- 225},
publisher = {Springer},
title = {{Perfect-information stochastic mean-payoff parity games}},
doi = {10.1007/978-3-642-54830-7_14},
volume = {8412},
year = {2014},
}
@inproceedings{2213,
abstract = {We consider two-player partial-observation stochastic games on finitestate graphs where player 1 has partial observation and player 2 has perfect observation. The winning condition we study are ε-regular conditions specified as parity objectives. The qualitative-analysis problem given a partial-observation stochastic game and a parity objective asks whether there is a strategy to ensure that the objective is satisfied with probability 1 (resp. positive probability). These qualitative-analysis problems are known to be undecidable. However in many applications the relevant question is the existence of finite-memory strategies, and the qualitative-analysis problems under finite-memory strategies was recently shown to be decidable in 2EXPTIME.We improve the complexity and show that the qualitative-analysis problems for partial-observation stochastic parity games under finite-memory strategies are EXPTIME-complete; and also establish optimal (exponential) memory bounds for finite-memory strategies required for qualitative analysis.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Nain, Sumit and Vardi, Moshe},
location = {Grenoble, France},
pages = {242 -- 257},
publisher = {Springer},
title = {{The complexity of partial-observation stochastic parity games with finite-memory strategies}},
doi = {10.1007/978-3-642-54830-7_16},
volume = {8412},
year = {2014},
}
@article{2214,
abstract = {A hallmark of immune cell trafficking is directional guidance via gradients of soluble or surface bound chemokines. Vascular endothelial cells produce, transport and deposit either their own chemokines or chemokines produced by the underlying stroma. Endothelial heparan sulfate (HS) was suggested to be a critical scaffold for these chemokine pools, but it is unclear how steep chemokine gradients are sustained between the lumenal and ablumenal aspects of blood vessels. Addressing this question by semi-quantitative immunostaining of HS moieties around blood vessels with a pan anti-HS IgM mAb, we found a striking HS enrichment in the basal lamina of resting and inflamed post capillary skin venules, as well as in high endothelial venules (HEVs) of lymph nodes. Staining of skin vessels with a glycocalyx probe further suggested that their lumenal glycocalyx contains much lower HS density than their basolateral extracellular matrix (ECM). This polarized HS pattern was observed also in isolated resting and inflamed microvascular dermal cells. Notably, progressive skin inflammation resulted in massive ECM deposition and in further HS enrichment around skin post capillary venules and their associated pericytes. Inflammation-dependent HS enrichment was not compromised in mice deficient in the main HS degrading enzyme, heparanase. Our results suggest that the blood vasculature patterns steep gradients of HS scaffolds between their lumenal and basolateral endothelial aspects, and that inflammatory processes can further enrich the HS content nearby inflamed vessels. We propose that chemokine gradients between the lumenal and ablumenal sides of vessels could be favored by these sharp HS scaffold gradients.},
author = {Stoler Barak, Liat and Moussion, Christine and Shezen, Elias and Hatzav, Miki and Sixt, Michael K and Alon, Ronen},
journal = {PLoS One},
number = {1},
publisher = {Public Library of Science},
title = {{Blood vessels pattern heparan sulfate gradients between their apical and basolateral aspects}},
doi = {10.1371/journal.pone.0085699},
volume = {9},
year = {2014},
}
@article{2215,
abstract = {Homologous recombination is crucial for genome stability and for genetic exchange. Although our knowledge of the principle steps in recombination and its machinery is well advanced, homology search, the critical step of exploring the genome for homologous sequences to enable recombination, has remained mostly enigmatic. However, recent methodological advances have provided considerable new insights into this fundamental step in recombination that can be integrated into a mechanistic model. These advances emphasize the importance of genomic proximity and nuclear organization for homology search and the critical role of homology search mediators in this process. They also aid our understanding of how homology search might lead to unwanted and potentially disease-promoting recombination events.},
author = {Renkawitz, Jörg and Lademann, Claudio and Jentsch, Stefan},
journal = {Nature Reviews Molecular Cell Biology},
number = {6},
pages = {369 -- 383},
publisher = {Nature Publishing Group},
title = {{Mechanisms and principles of homology search during recombination}},
doi = {10.1038/nrm3805},
volume = {15},
year = {2014},
}
@inproceedings{2216,
abstract = {The edit distance between two (untimed) traces is the minimum cost of a sequence of edit operations (insertion, deletion, or substitution) needed to transform one trace to the other. Edit distances have been extensively studied in the untimed setting, and form the basis for approximate matching of sequences in different domains such as coding theory, parsing, and speech recognition. In this paper, we lift the study of edit distances from untimed languages to the timed setting. We define an edit distance between timed words which incorporates both the edit distance between the untimed words and the absolute difference in time stamps. Our edit distance between two timed words is computable in polynomial time. Further, we show that the edit distance between a timed word and a timed language generated by a timed automaton, defined as the edit distance between the word and the closest word in the language, is PSPACE-complete. While computing the edit distance between two timed automata is undecidable, we show that the approximate version, where we decide if the edit distance between two timed automata is either less than a given parameter or more than δ away from the parameter, for δ > 0, can be solved in exponential space and is EXPSPACE-hard. Our definitions and techniques can be generalized to the setting of hybrid systems, and analogous decidability results hold for rectangular automata.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Majumdar, Ritankar},
location = {Berlin, Germany},
pages = {303 -- 312},
publisher = {Springer},
title = {{Edit distance for timed automata}},
doi = {10.1145/2562059.2562141},
year = {2014},
}
@inproceedings{2217,
abstract = {As hybrid systems involve continuous behaviors, they should be evaluated by quantitative methods, rather than qualitative methods. In this paper we adapt a quantitative framework, called model measuring, to the hybrid systems domain. The model-measuring problem asks, given a model M and a specification, what is the maximal distance such that all models within that distance from M satisfy (or violate) the specification. A distance function on models is given as part of the input of the problem. Distances, especially related to continuous behaviors are more natural in the hybrid case than the discrete case. We are interested in distances represented by monotonic hybrid automata, a hybrid counterpart of (discrete) weighted automata, whose recognized timed languages are monotone (w.r.t. inclusion) in the values of parameters.
The contributions of this paper are twofold. First, we give sufficient conditions under which the model-measuring problem can be solved. Second, we discuss the modeling of distances and applications of the model-measuring problem.},
author = {Henzinger, Thomas A and Otop, Jan},
booktitle = {Proceedings of the 17th international conference on Hybrid systems: computation and control},
location = {Berlin, Germany},
pages = {213 -- 222},
publisher = {Springer},
title = {{Model measuring for hybrid systems}},
doi = {10.1145/2562059.2562130},
year = {2014},
}
@inproceedings{2219,
abstract = {Recently, Döttling et al. (ASIACRYPT 2012) proposed the first chosen-ciphertext (IND-CCA) secure public-key encryption scheme from the learning parity with noise (LPN) assumption. In this work we give an alternative scheme which is conceptually simpler and more efficient. At the core of our construction is a trapdoor technique originally proposed for lattices by Micciancio and Peikert (EUROCRYPT 2012), which we adapt to the LPN setting. The main technical tool is a new double-trapdoor mechanism, together with a trapdoor switching lemma based on a computational variant of the leftover hash lemma.},
author = {Kiltz, Eike and Masny, Daniel and Pietrzak, Krzysztof Z},
isbn = {978-364254630-3},
pages = {1 -- 18},
publisher = {Springer},
title = {{Simple chosen-ciphertext security from low noise LPN}},
doi = {10.1007/978-3-642-54631-0_1},
volume = {8383},
year = {2014},
}
@article{2220,
abstract = {In this issue of Chemistry & Biology, Cokol and colleagues report a systematic study of drug interactions between antifungal compounds. Suppressive drug interactions occur more frequently than previously realized and come in different flavors with interesting implications.},
author = {De Vos, Marjon and Bollenbach, Mark Tobias},
issn = {10745521},
journal = {Chemistry and Biology},
number = {4},
pages = {439 -- 440},
publisher = {Cell Press},
title = {{Suppressive drug interactions between antifungals}},
doi = {10.1016/j.chembiol.2014.04.004},
volume = {21},
year = {2014},
}
@article{2222,
abstract = {Leaf venation develops complex patterns in angiosperms, but the mechanism underlying this process is largely unknown. To elucidate the molecular mechanisms governing vein pattern formation, we previously isolated vascular network defective (van) mutants that displayed venation discontinuities. Here, we report the phenotypic analysis of van4 mutants, and we identify and characterize the VAN4 gene. Detailed phenotypic analysis shows that van4 mutants are defective in procambium cell differentiation and subsequent vascular cell differentiation. Reduced shoot and root cell growth is observed in van4 mutants, suggesting that VAN4 function is important for cell growth and the establishment of venation continuity. Consistent with these phenotypes, the VAN4 gene is strongly expressed in vascular and meristematic cells. VAN4 encodes a putative TRS120, which is a known guanine nucleotide exchange factor (GEF) for Rab GTPase involved in regulating vesicle transport, and a known tethering factor that determines the specificity of membrane fusion. VAN4 protein localizes at the trans-Golgi network/early endosome (TGN/EE). Aberrant recycling of the auxin efflux carrier PIN proteins is observed in van4 mutants. These results suggest that VAN4-mediated exocytosis at the TGN plays important roles in plant vascular development and cell growth in shoot and root. Our identification of VAN4 as a putative TRS120 shows that Rab GTPases are crucial (in addition to ARF GTPases) for continuous vascular development, and provides further evidence for the importance of vesicle transport in leaf vascular formation.},
author = {Naramoto, Satoshi and Nodzyński, Tomasz and Dainobu, Tomoko and Takatsuka, Hirotomo and Okada, Teruyo and Friml, Jirí and Fukuda, Hiroo},
issn = {00320781},
journal = {Plant and Cell Physiology},
number = {4},
pages = {750 -- 763},
publisher = {Oxford University Press},
title = {{VAN4 encodes a putative TRS120 that is required for normal cell growth and vein development in arabidopsis}},
doi = {10.1093/pcp/pcu012},
volume = {55},
year = {2014},
}
@article{2223,
abstract = {Correct positioning of membrane proteins is an essential process in eukaryotic organisms. The plant hormone auxin is distributed through intercellular transport and triggers various cellular responses. Auxin transporters of the PIN-FORMED (PIN) family localize asymmetrically at the plasma membrane (PM) and mediate the directional transport of auxin between cells. A fungal toxin, brefeldin A (BFA), inhibits a subset of guanine nucleotide exchange factors for ADP-ribosylation factor small GTPases (ARF GEFs) including GNOM, which plays a major role in localization of PIN1 predominantly to the basal side of the PM. The Arabidopsis genome encodes 19 ARF-related putative GTPases. However, ARF components involved in PIN1 localization have been genetically poorly defined. Using a fluorescence imaging-based forward genetic approach, we identified an Arabidopsis mutant, bfa-visualized exocytic trafficking defective1 (bex1), in which PM localization of PIN1-green fluorescent protein (GFP) as well as development is hypersensitive to BFA. We found that in bex1 a member of the ARF1 gene family, ARF1A1C, was mutated. ARF1A1C localizes to the trans-Golgi network/early endosome and Golgi apparatus, acts synergistically to BEN1/MIN7 ARF GEF and is important for PIN recycling to the PM. Consistent with the developmental importance of PIN proteins, functional interference with ARF1 resulted in an impaired auxin response gradient and various developmental defects including embryonic patterning defects and growth arrest. Our results show that ARF1A1C is essential for recycling of PIN auxin transporters and for various auxin-dependent developmental processes.},
author = {Tanaka, Hirokazu and Nodzyński, Tomasz and Kitakura, Saeko and Feraru, Mugurel and Sasabe, Michiko and Ishikawa, Tomomi and Kleine Vehn, Jürgen and Kakimoto, Tatsuo and Friml, Jirí},
issn = {00320781},
journal = {Plant and Cell Physiology},
number = {4},
pages = {737 -- 749},
publisher = {Oxford University Press},
title = {{BEX1/ARF1A1C is required for BFA-sensitive recycling of PIN auxin transporters and auxin-mediated development in arabidopsis}},
doi = {10.1093/pcp/pct196},
volume = {55},
year = {2014},
}
@article{2224,
abstract = {This work investigates the transition between different traveling helical waves (spirals, SPIs) in the setup of differentially independent rotating cylinders. We use direct numerical simulations to consider an infinite long and periodic Taylor-Couette apparatus with fixed axial periodicity length. We find so-called mixed-cross-spirals (MCSs), that can be seen as nonlinear superpositions of SPIs, to establish stable footbridges connecting SPI states. While bridging the bifurcation branches of SPIs, the corresponding contributions within the MCS vary continuously with the control parameters. Here discussed MCSs presenting footbridge solutions start and end in different SPI branches. Therefore they differ significantly from the already known MCSs that present bypass solutions (Altmeyer and Hoffmann 2010 New J. Phys. 12 113035). The latter start and end in the same SPI branch, while they always bifurcate out of those SPI branches with the larger mode amplitude. Meanwhile, these only appear within the coexisting region of both SPIs. In contrast, the footbridge solutions can also bifurcate out of the minor SPI contribution. We also find they exist in regions where only one of the SPIs contributions exists. In addition, MCS as footbridge solution can appear either stable or unstable. The latter detected transient solutions offer similar spatio-temporal characteristics to the flow establishing stable footbridges. Such transition processes are interesting for pattern-forming systems in general because they accomplish transitions between traveling waves of different azimuthal wave numbers and have not been described in the literature yet.},
author = {Altmeyer, Sebastian},
issn = {01695983},
journal = {Fluid Dynamics Research},
number = {2},
publisher = {IOP Publishing Ltd.},
title = {{On secondary instabilities generating footbridges between spiral vortex flow}},
doi = {10.1088/0169-5983/46/2/025503},
volume = {46},
year = {2014},
}
@article{2225,
abstract = {We consider sample covariance matrices of the form X∗X, where X is an M×N matrix with independent random entries. We prove the isotropic local Marchenko-Pastur law, i.e. we prove that the resolvent (X∗X−z)−1 converges to a multiple of the identity in the sense of quadratic forms. More precisely, we establish sharp high-probability bounds on the quantity ⟨v,(X∗X−z)−1w⟩−⟨v,w⟩m(z), where m is the Stieltjes transform of the Marchenko-Pastur law and v,w∈CN. We require the logarithms of the dimensions M and N to be comparable. Our result holds down to scales Iz≥N−1+ε and throughout the entire spectrum away from 0. We also prove analogous results for generalized Wigner matrices.
},
author = {Bloemendal, Alex and Erdös, László and Knowles, Antti and Yau, Horng and Yin, Jun},
issn = {10836489},
journal = {Electronic Journal of Probability},
publisher = {Institute of Mathematical Statistics},
title = {{Isotropic local laws for sample covariance and generalized Wigner matrices}},
doi = {10.1214/EJP.v19-3054},
volume = {19},
year = {2014},
}
@article{2226,
abstract = {Coriolis force effects on shear flows are important in geophysical and astrophysical contexts. We report a study on the linear stability and the transient energy growth of the plane Couette flow with system rotation perpendicular to the shear direction. External rotation causes linear instability. At small rotation rates, the onset of linear instability scales inversely with the rotation rate and the optimal transient growth in the linearly stable region is slightly enhanced ∼Re2. The corresponding optimal initial perturbations are characterized by roll structures inclined in the streamwise direction and are twisted under external rotation. At large rotation rates, the transient growth is significantly inhibited and hence linear stability analysis is a reliable indicator for instability.},
author = {Shi, Liang and Hof, Björn and Tilgner, Andreas},
issn = {15393755},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {1},
publisher = {American Institute of Physics},
title = {{Transient growth of Ekman-Couette flow}},
doi = {10.1103/PhysRevE.89.013001},
volume = {89},
year = {2014},
}
@article{2228,
abstract = {Fast-spiking, parvalbumin-expressing GABAergic interneurons, a large proportion of which are basket cells (BCs), have a key role in feedforward and feedback inhibition, gamma oscillations and complex information processing. For these functions, fast propagation of action potentials (APs) from the soma to the presynaptic terminals is important. However, the functional properties of interneuron axons remain elusive. We examined interneuron axons by confocally targeted subcellular patch-clamp recording in rat hippocampal slices. APs were initiated in the proximal axon ∼20 μm from the soma and propagated to the distal axon with high reliability and speed. Subcellular mapping revealed a stepwise increase of Na^+ conductance density from the soma to the proximal axon, followed by a further gradual increase in the distal axon. Active cable modeling and experiments with partial channel block revealed that low axonal Na^+ conductance density was sufficient for reliability, but high Na^+ density was necessary for both speed of propagation and fast-spiking AP phenotype. Our results suggest that a supercritical density of Na^+ channels compensates for the morphological properties of interneuron axons (small segmental diameter, extensive branching and high bouton density), ensuring fast AP propagation and high-frequency repetitive firing.},
author = {Hu, Hua and Jonas, Peter M},
issn = {10976256},
journal = {Nature Neuroscience},
number = {5},
pages = {686--693},
publisher = {Nature Publishing Group},
title = {{A supercritical density of Na^+ channels ensures fast signaling in GABAergic interneuron axons}},
doi = {10.1038/nn.3678},
volume = {17},
year = {2014},
}
@article{2229,
abstract = {The distance between Ca^2+ channels and release sensors determines the speed and efficacy of synaptic transmission. Tight "nanodomain" channel-sensor coupling initiates transmitter release at synapses in the mature brain, whereas loose "microdomain" coupling appears restricted to early developmental stages. To probe the coupling configuration at a plastic synapse in the mature central nervous system, we performed paired recordings between mossy fiber terminals and CA3 pyramidal neurons in rat hippocampus. Millimolar concentrations of both the fast Ca^2+ chelator BAPTA [1,2-bis(2-aminophenoxy)ethane- N,N, N′,N′-tetraacetic acid] and the slow chelator EGTA efficiently suppressed transmitter release, indicating loose coupling between Ca^2+ channels and release sensors. Loose coupling enabled the control of initial release probability by fast endogenous Ca^2+ buffers and the generation of facilitation by buffer saturation. Thus, loose coupling provides the molecular framework for presynaptic plasticity.},
author = {Vyleta, Nicholas and Jonas, Peter M},
issn = {00368075},
journal = {Science},
number = {6171},
pages = {665 -- 670},
publisher = {American Association for the Advancement of Science},
title = {{Loose coupling between Ca^2+ channels and release sensors at a plastic hippocampal synapse}},
doi = {10.1126/science.1244811},
volume = {343},
year = {2014},
}
@article{2230,
abstract = {Intracellular electrophysiological recordings provide crucial insights into elementary neuronal signals such as action potentials and synaptic currents. Analyzing and interpreting these signals is essential for a quantitative understanding of neuronal information processing, and requires both fast data visualization and ready access to complex analysis routines. To achieve this goal, we have developed Stimfit, a free software package for cellular neurophysiology with a Python scripting interface and a built-in Python shell. The program supports most standard file formats for cellular neurophysiology and other biomedical signals through the Biosig library. To quantify and interpret the activity of single neurons and communication between neurons, the program includes algorithms to characterize the kinetics of presynaptic action potentials and postsynaptic currents, estimate latencies between pre- and postsynaptic events, and detect spontaneously occurring events. We validate and benchmark these algorithms, give estimation errors, and provide sample use cases, showing that Stimfit represents an efficient, accessible and extensible way to accurately analyze and interpret neuronal signals.},
author = {Guzmán, José and Schlögl, Alois and Schmidt Hieber, Christoph},
issn = {16625196},
journal = {Frontiers in Neuroinformatics},
number = {FEB},
publisher = {Frontiers Research Foundation},
title = {{Stimfit: Quantifying electrophysiological data with Python}},
doi = {10.3389/fninf.2014.00016},
volume = {8},
year = {2014},
}
@article{2231,
abstract = {Based on the measurements of noise in gene expression performed during the past decade, it has become customary to think of gene regulation in terms of a two-state model, where the promoter of a gene can stochastically switch between an ON and an OFF state. As experiments are becoming increasingly precise and the deviations from the two-state model start to be observable, we ask about the experimental signatures of complex multistate promoters, as well as the functional consequences of this additional complexity. In detail, we i), extend the calculations for noise in gene expression to promoters described by state transition diagrams with multiple states, ii), systematically compute the experimentally accessible noise characteristics for these complex promoters, and iii), use information theory to evaluate the channel capacities of complex promoter architectures and compare them with the baseline provided by the two-state model. We find that adding internal states to the promoter generically decreases channel capacity, except in certain cases, three of which (cooperativity, dual-role regulation, promoter cycling) we analyze in detail.},
author = {Rieckh, Georg and Tkacik, Gasper},
issn = {00063495},
journal = {Biophysical Journal},
number = {5},
pages = {1194 -- 1204},
publisher = {Biophysical Society},
title = {{Noise and information transmission in promoters with multiple internal states}},
doi = {10.1016/j.bpj.2014.01.014},
volume = {106},
year = {2014},
}
@article{2232,
abstract = {The purpose of this contribution is to summarize and discuss recent advances regarding the onset of turbulence in shear flows. The absence of a clear-cut instability mechanism, the spatio-temporal intermittent character and extremely long lived transients are some of the major difficulties encountered in these flows and have hindered progress towards understanding the transition process. We will show for the case of pipe flow that concepts from nonlinear dynamics and statistical physics can help to explain the onset of turbulence. In particular, the turbulent structures (puffs) observed close to onset are spatially localized chaotic transients and their lifetimes increase super-exponentially with Reynolds number. At the same time fluctuations of individual turbulent puffs can (although very rarely) lead to the nucleation of new puffs. The competition between these two stochastic processes gives rise to a non-equilibrium phase transition where turbulence changes from a super-transient to a sustained state.},
author = {Song, Baofang and Hof, Björn},
issn = {17425468},
journal = {Journal of Statistical Mechanics Theory and Experiment},
number = {2},
publisher = {IOP Publishing Ltd.},
title = {{Deterministic and stochastic aspects of the transition to turbulence}},
doi = {10.1088/1742-5468/2014/02/P02001},
volume = {2014},
year = {2014},
}
@article{2233,
abstract = { A discounted-sum automaton (NDA) is a nondeterministic finite automaton with edge weights, valuing a run by the discounted sum of visited edge weights. More precisely, the weight in the i-th position of the run is divided by λi, where the discount factor λ is a fixed rational number greater than 1. The value of a word is the minimal value of the automaton runs on it. Discounted summation is a common and useful measuring scheme, especially for infinite sequences, reflecting the assumption that earlier weights are more important than later weights. Unfortunately, determinization of NDAs, which is often essential in formal verification, is, in general, not possible. We provide positive news, showing that every NDA with an integral discount factor is determinizable. We complete the picture by proving that the integers characterize exactly the discount factors that guarantee determinizability: for every nonintegral rational discount factor λ, there is a nondeterminizable λ-NDA. We also prove that the class of NDAs with integral discount factors enjoys closure under the algebraic operations min, max, addition, and subtraction, which is not the case for general NDAs nor for deterministic NDAs. For general NDAs, we look into approximate determinization, which is always possible as the influence of a word's suffix decays. We show that the naive approach, of unfolding the automaton computations up to a sufficient level, is doubly exponential in the discount factor. We provide an alternative construction for approximate determinization, which is singly exponential in the discount factor, in the precision, and in the number of states. We also prove matching lower bounds, showing that the exponential dependency on each of these three parameters cannot be avoided. All our results hold equally for automata over finite words and for automata over infinite words. },
author = {Boker, Udi and Henzinger, Thomas A},
issn = {18605974},
journal = {Logical Methods in Computer Science},
number = {1},
publisher = {International Federation of Computational Logic},
title = {{Exact and approximate determinization of discounted-sum automata}},
doi = {10.2168/LMCS-10(1:10)2014},
volume = {10},
year = {2014},
}
@article{2234,
abstract = {We study Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) functions. We consider two different objectives, namely, expectation and satisfaction objectives. Given an MDP with κ limit-average functions, in the expectation objective the goal is to maximize the expected limit-average value, and in the satisfaction objective the goal is to maximize the probability of runs such that the limit-average value stays above a given vector. We show that under the expectation objective, in contrast to the case of one limit-average function, both randomization and memory are necessary for strategies even for ε-approximation, and that finite-memory randomized strategies are sufficient for achieving Pareto optimal values. Under the satisfaction objective, in contrast to the case of one limit-average function, infinite memory is necessary for strategies achieving a specific value (i.e. randomized finite-memory strategies are not sufficient), whereas memoryless randomized strategies are sufficient for ε-approximation, for all ε > 0. We further prove that the decision problems for both expectation and satisfaction objectives can be solved in polynomial time and the trade-off curve (Pareto curve) can be ε-approximated in time polynomial in the size of the MDP and 1/ε, and exponential in the number of limit-average functions, for all ε > 0. Our analysis also reveals flaws in previous work for MDPs with multiple mean-payoff functions under the expectation objective, corrects the flaws, and allows us to obtain improved results.},
author = {Brázdil, Tomáš and Brožek, Václav and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
issn = {18605974},
journal = {Logical Methods in Computer Science},
number = {1},
publisher = {International Federation of Computational Logic},
title = {{Markov decision processes with multiple long-run average objectives}},
doi = {10.2168/LMCS-10(1:13)2014},
volume = {10},
year = {2014},
}
@article{2235,
abstract = {Emerging infectious diseases (EIDs) pose a risk to human welfare, both directly and indirectly, by affecting managed livestock and wildlife that provide valuable resources and ecosystem services, such as the pollination of crops. Honeybees (Apis mellifera), the prevailing managed insect crop pollinator, suffer from a range of emerging and exotic high-impact pathogens, and population maintenance requires active management by beekeepers to control them. Wild pollinators such as bumblebees (Bombus spp.) are in global decline, one cause of which may be pathogen spillover from managed pollinators like honeybees or commercial colonies of bumblebees. Here we use a combination of infection experiments and landscape-scale field data to show that honeybee EIDs are indeed widespread infectious agents within the pollinator assemblage. The prevalence of deformed wing virus (DWV) and the exotic parasite Nosema ceranae in honeybees and bumblebees is linked; as honeybees have higher DWV prevalence, and sympatric bumblebees and honeybees are infected by the same DWV strains, Apis is the likely source of at least one major EID in wild pollinators. Lessons learned from vertebrates highlight the need for increased pathogen control in managed bee species to maintain wild pollinators, as declines in native pollinators may be caused by interspecies pathogen transmission originating from managed pollinators.},
author = {Fürst, Matthias and Mcmahon, Dino and Osborne, Juliet and Paxton, Robert and Brown, Mark},
issn = {00280836},
journal = {Nature},
number = {7488},
pages = {364 -- 366},
publisher = {Nature Publishing Group},
title = {{Disease associations between honeybees and bumblebees as a threat to wild pollinators}},
doi = {10.1038/nature12977},
volume = {506},
year = {2014},
}